1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1994 Linus Torvalds 4 * 5 * Cyrix stuff, June 1998 by: 6 * - Rafael R. Reilova (moved everything from head.S), 7 * <rreilova@ececs.uc.edu> 8 * - Channing Corn (tests & fixes), 9 * - Andrew D. Balsa (code cleanup). 10 */ 11 #include <linux/init.h> 12 #include <linux/cpu.h> 13 #include <linux/module.h> 14 #include <linux/nospec.h> 15 #include <linux/prctl.h> 16 #include <linux/sched/smt.h> 17 #include <linux/pgtable.h> 18 #include <linux/bpf.h> 19 #include <linux/kvm_types.h> 20 21 #include <asm/spec-ctrl.h> 22 #include <asm/cmdline.h> 23 #include <asm/bugs.h> 24 #include <asm/processor.h> 25 #include <asm/processor-flags.h> 26 #include <asm/fpu/api.h> 27 #include <asm/msr.h> 28 #include <asm/vmx.h> 29 #include <asm/paravirt.h> 30 #include <asm/cpu_device_id.h> 31 #include <asm/e820/api.h> 32 #include <asm/hypervisor.h> 33 #include <asm/tlbflush.h> 34 #include <asm/cpu.h> 35 36 #include "cpu.h" 37 38 /* 39 * Speculation Vulnerability Handling 40 * 41 * Each vulnerability is handled with the following functions: 42 * <vuln>_select_mitigation() -- Selects a mitigation to use. This should 43 * take into account all relevant command line 44 * options. 45 * <vuln>_update_mitigation() -- This is called after all vulnerabilities have 46 * selected a mitigation, in case the selection 47 * may want to change based on other choices 48 * made. This function is optional. 49 * <vuln>_apply_mitigation() -- Enable the selected mitigation. 50 * 51 * The compile-time mitigation in all cases should be AUTO. An explicit 52 * command-line option can override AUTO. If no such option is 53 * provided, <vuln>_select_mitigation() will override AUTO to the best 54 * mitigation option. 55 */ 56 57 /* The base value of the SPEC_CTRL MSR without task-specific bits set */ 58 u64 x86_spec_ctrl_base; 59 60 /* The current value of the SPEC_CTRL MSR with task-specific bits set */ 61 DEFINE_PER_CPU(u64, x86_spec_ctrl_current); 62 EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current); 63 64 /* 65 * Set when the CPU has run a potentially malicious guest. An IBPB will 66 * be needed to before running userspace. That IBPB will flush the branch 67 * predictor content. 68 */ 69 DEFINE_PER_CPU(bool, x86_ibpb_exit_to_user); 70 EXPORT_PER_CPU_SYMBOL_GPL(x86_ibpb_exit_to_user); 71 72 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; 73 74 static u64 __ro_after_init x86_arch_cap_msr; 75 76 static DEFINE_MUTEX(spec_ctrl_mutex); 77 78 void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk; 79 80 static void __init set_return_thunk(void *thunk) 81 { 82 x86_return_thunk = thunk; 83 84 pr_info("active return thunk: %ps\n", thunk); 85 } 86 87 /* Update SPEC_CTRL MSR and its cached copy unconditionally */ 88 static void update_spec_ctrl(u64 val) 89 { 90 this_cpu_write(x86_spec_ctrl_current, val); 91 wrmsrq(MSR_IA32_SPEC_CTRL, val); 92 } 93 94 /* 95 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ 96 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). 97 */ 98 void update_spec_ctrl_cond(u64 val) 99 { 100 if (this_cpu_read(x86_spec_ctrl_current) == val) 101 return; 102 103 this_cpu_write(x86_spec_ctrl_current, val); 104 105 /* 106 * When KERNEL_IBRS this MSR is written on return-to-user, unless 107 * forced the update can be delayed until that time. 108 */ 109 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) 110 wrmsrq(MSR_IA32_SPEC_CTRL, val); 111 } 112 113 noinstr u64 spec_ctrl_current(void) 114 { 115 return this_cpu_read(x86_spec_ctrl_current); 116 } 117 EXPORT_SYMBOL_GPL(spec_ctrl_current); 118 119 /* 120 * AMD specific MSR info for Speculative Store Bypass control. 121 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). 122 */ 123 u64 __ro_after_init x86_amd_ls_cfg_base; 124 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; 125 126 /* Control conditional STIBP in switch_to() */ 127 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); 128 /* Control conditional IBPB in switch_mm() */ 129 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); 130 /* Control unconditional IBPB in switch_mm() */ 131 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); 132 133 /* Control IBPB on vCPU load */ 134 DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb); 135 EXPORT_SYMBOL_FOR_KVM(switch_vcpu_ibpb); 136 137 /* Control CPU buffer clear before idling (halt, mwait) */ 138 DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear); 139 EXPORT_SYMBOL_GPL(cpu_buf_idle_clear); 140 141 /* 142 * Controls whether l1d flush based mitigations are enabled, 143 * based on hw features and admin setting via boot parameter 144 * defaults to false 145 */ 146 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); 147 148 /* 149 * Controls CPU Fill buffer clear before VMenter. This is a subset of 150 * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only 151 * mitigation is required. 152 */ 153 DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear); 154 EXPORT_SYMBOL_FOR_KVM(cpu_buf_vm_clear); 155 156 #undef pr_fmt 157 #define pr_fmt(fmt) "mitigations: " fmt 158 159 static void __init cpu_print_attack_vectors(void) 160 { 161 pr_info("Enabled attack vectors: "); 162 163 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL)) 164 pr_cont("user_kernel, "); 165 166 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) 167 pr_cont("user_user, "); 168 169 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) 170 pr_cont("guest_host, "); 171 172 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) 173 pr_cont("guest_guest, "); 174 175 pr_cont("SMT mitigations: "); 176 177 switch (smt_mitigations) { 178 case SMT_MITIGATIONS_OFF: 179 pr_cont("off\n"); 180 break; 181 case SMT_MITIGATIONS_AUTO: 182 pr_cont("auto\n"); 183 break; 184 case SMT_MITIGATIONS_ON: 185 pr_cont("on\n"); 186 } 187 } 188 189 /* 190 * NOTE: This function is *only* called for SVM, since Intel uses 191 * MSR_IA32_SPEC_CTRL for SSBD. 192 */ 193 void 194 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest) 195 { 196 u64 guestval, hostval; 197 struct thread_info *ti = current_thread_info(); 198 199 /* 200 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update 201 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. 202 */ 203 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && 204 !static_cpu_has(X86_FEATURE_VIRT_SSBD)) 205 return; 206 207 /* 208 * If the host has SSBD mitigation enabled, force it in the host's 209 * virtual MSR value. If its not permanently enabled, evaluate 210 * current's TIF_SSBD thread flag. 211 */ 212 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) 213 hostval = SPEC_CTRL_SSBD; 214 else 215 hostval = ssbd_tif_to_spec_ctrl(ti->flags); 216 217 /* Sanitize the guest value */ 218 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; 219 220 if (hostval != guestval) { 221 unsigned long tif; 222 223 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : 224 ssbd_spec_ctrl_to_tif(hostval); 225 226 speculation_ctrl_update(tif); 227 } 228 } 229 EXPORT_SYMBOL_FOR_KVM(x86_virt_spec_ctrl); 230 231 static void x86_amd_ssb_disable(void) 232 { 233 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; 234 235 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) 236 wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); 237 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) 238 wrmsrq(MSR_AMD64_LS_CFG, msrval); 239 } 240 241 #undef pr_fmt 242 #define pr_fmt(fmt) "MDS: " fmt 243 244 /* 245 * Returns true if vulnerability should be mitigated based on the 246 * selected attack vector controls. 247 * 248 * See Documentation/admin-guide/hw-vuln/attack_vector_controls.rst 249 */ 250 static bool __init should_mitigate_vuln(unsigned int bug) 251 { 252 switch (bug) { 253 /* 254 * The only runtime-selected spectre_v1 mitigations in the kernel are 255 * related to SWAPGS protection on kernel entry. Therefore, protection 256 * is only required for the user->kernel attack vector. 257 */ 258 case X86_BUG_SPECTRE_V1: 259 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL); 260 261 case X86_BUG_SPECTRE_V2: 262 case X86_BUG_RETBLEED: 263 case X86_BUG_L1TF: 264 case X86_BUG_ITS: 265 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || 266 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST); 267 268 case X86_BUG_SPECTRE_V2_USER: 269 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || 270 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST); 271 272 /* 273 * All the vulnerabilities below allow potentially leaking data 274 * across address spaces. Therefore, mitigation is required for 275 * any of these 4 attack vectors. 276 */ 277 case X86_BUG_MDS: 278 case X86_BUG_TAA: 279 case X86_BUG_MMIO_STALE_DATA: 280 case X86_BUG_RFDS: 281 case X86_BUG_SRBDS: 282 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || 283 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || 284 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || 285 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST); 286 287 case X86_BUG_GDS: 288 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || 289 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || 290 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || 291 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST) || 292 (smt_mitigations != SMT_MITIGATIONS_OFF); 293 294 case X86_BUG_SPEC_STORE_BYPASS: 295 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER); 296 297 case X86_BUG_VMSCAPE: 298 return cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST); 299 300 default: 301 WARN(1, "Unknown bug %x\n", bug); 302 return false; 303 } 304 } 305 306 /* Default mitigation for MDS-affected CPUs */ 307 static enum mds_mitigations mds_mitigation __ro_after_init = 308 IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_AUTO : MDS_MITIGATION_OFF; 309 static bool mds_nosmt __ro_after_init = false; 310 311 static const char * const mds_strings[] = { 312 [MDS_MITIGATION_OFF] = "Vulnerable", 313 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", 314 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", 315 }; 316 317 enum taa_mitigations { 318 TAA_MITIGATION_OFF, 319 TAA_MITIGATION_AUTO, 320 TAA_MITIGATION_UCODE_NEEDED, 321 TAA_MITIGATION_VERW, 322 TAA_MITIGATION_TSX_DISABLED, 323 }; 324 325 /* Default mitigation for TAA-affected CPUs */ 326 static enum taa_mitigations taa_mitigation __ro_after_init = 327 IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_AUTO : TAA_MITIGATION_OFF; 328 329 enum mmio_mitigations { 330 MMIO_MITIGATION_OFF, 331 MMIO_MITIGATION_AUTO, 332 MMIO_MITIGATION_UCODE_NEEDED, 333 MMIO_MITIGATION_VERW, 334 }; 335 336 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */ 337 static enum mmio_mitigations mmio_mitigation __ro_after_init = 338 IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_AUTO : MMIO_MITIGATION_OFF; 339 340 enum rfds_mitigations { 341 RFDS_MITIGATION_OFF, 342 RFDS_MITIGATION_AUTO, 343 RFDS_MITIGATION_VERW, 344 RFDS_MITIGATION_UCODE_NEEDED, 345 }; 346 347 /* Default mitigation for Register File Data Sampling */ 348 static enum rfds_mitigations rfds_mitigation __ro_after_init = 349 IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF; 350 351 /* 352 * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing 353 * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry. 354 */ 355 static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init; 356 357 static void __init mds_select_mitigation(void) 358 { 359 if (!boot_cpu_has_bug(X86_BUG_MDS)) { 360 mds_mitigation = MDS_MITIGATION_OFF; 361 return; 362 } 363 364 if (mds_mitigation == MDS_MITIGATION_AUTO) { 365 if (should_mitigate_vuln(X86_BUG_MDS)) 366 mds_mitigation = MDS_MITIGATION_FULL; 367 else 368 mds_mitigation = MDS_MITIGATION_OFF; 369 } 370 371 if (mds_mitigation == MDS_MITIGATION_OFF) 372 return; 373 374 verw_clear_cpu_buf_mitigation_selected = true; 375 } 376 377 static void __init mds_update_mitigation(void) 378 { 379 if (!boot_cpu_has_bug(X86_BUG_MDS)) 380 return; 381 382 /* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */ 383 if (verw_clear_cpu_buf_mitigation_selected) 384 mds_mitigation = MDS_MITIGATION_FULL; 385 386 if (mds_mitigation == MDS_MITIGATION_FULL) { 387 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) 388 mds_mitigation = MDS_MITIGATION_VMWERV; 389 } 390 391 pr_info("%s\n", mds_strings[mds_mitigation]); 392 } 393 394 static void __init mds_apply_mitigation(void) 395 { 396 if (mds_mitigation == MDS_MITIGATION_FULL || 397 mds_mitigation == MDS_MITIGATION_VMWERV) { 398 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 399 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && 400 (mds_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)) 401 cpu_smt_disable(false); 402 } 403 } 404 405 static int __init mds_cmdline(char *str) 406 { 407 if (!boot_cpu_has_bug(X86_BUG_MDS)) 408 return 0; 409 410 if (!str) 411 return -EINVAL; 412 413 if (!strcmp(str, "off")) 414 mds_mitigation = MDS_MITIGATION_OFF; 415 else if (!strcmp(str, "full")) 416 mds_mitigation = MDS_MITIGATION_FULL; 417 else if (!strcmp(str, "full,nosmt")) { 418 mds_mitigation = MDS_MITIGATION_FULL; 419 mds_nosmt = true; 420 } 421 422 return 0; 423 } 424 early_param("mds", mds_cmdline); 425 426 #undef pr_fmt 427 #define pr_fmt(fmt) "TAA: " fmt 428 429 static bool taa_nosmt __ro_after_init; 430 431 static const char * const taa_strings[] = { 432 [TAA_MITIGATION_OFF] = "Vulnerable", 433 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", 434 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", 435 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled", 436 }; 437 438 static bool __init taa_vulnerable(void) 439 { 440 return boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM); 441 } 442 443 static void __init taa_select_mitigation(void) 444 { 445 if (!boot_cpu_has_bug(X86_BUG_TAA)) { 446 taa_mitigation = TAA_MITIGATION_OFF; 447 return; 448 } 449 450 /* TSX previously disabled by tsx=off */ 451 if (!boot_cpu_has(X86_FEATURE_RTM)) { 452 taa_mitigation = TAA_MITIGATION_TSX_DISABLED; 453 return; 454 } 455 456 /* Microcode will be checked in taa_update_mitigation(). */ 457 if (taa_mitigation == TAA_MITIGATION_AUTO) { 458 if (should_mitigate_vuln(X86_BUG_TAA)) 459 taa_mitigation = TAA_MITIGATION_VERW; 460 else 461 taa_mitigation = TAA_MITIGATION_OFF; 462 } 463 464 if (taa_mitigation != TAA_MITIGATION_OFF) 465 verw_clear_cpu_buf_mitigation_selected = true; 466 } 467 468 static void __init taa_update_mitigation(void) 469 { 470 if (!taa_vulnerable()) 471 return; 472 473 if (verw_clear_cpu_buf_mitigation_selected) 474 taa_mitigation = TAA_MITIGATION_VERW; 475 476 if (taa_mitigation == TAA_MITIGATION_VERW) { 477 /* Check if the requisite ucode is available. */ 478 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) 479 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; 480 481 /* 482 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. 483 * A microcode update fixes this behavior to clear CPU buffers. It also 484 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the 485 * ARCH_CAP_TSX_CTRL_MSR bit. 486 * 487 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode 488 * update is required. 489 */ 490 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && 491 !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)) 492 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; 493 } 494 495 pr_info("%s\n", taa_strings[taa_mitigation]); 496 } 497 498 static void __init taa_apply_mitigation(void) 499 { 500 if (taa_mitigation == TAA_MITIGATION_VERW || 501 taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) { 502 /* 503 * TSX is enabled, select alternate mitigation for TAA which is 504 * the same as MDS. Enable MDS static branch to clear CPU buffers. 505 * 506 * For guests that can't determine whether the correct microcode is 507 * present on host, enable the mitigation for UCODE_NEEDED as well. 508 */ 509 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 510 511 if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON) 512 cpu_smt_disable(false); 513 } 514 } 515 516 static int __init tsx_async_abort_parse_cmdline(char *str) 517 { 518 if (!boot_cpu_has_bug(X86_BUG_TAA)) 519 return 0; 520 521 if (!str) 522 return -EINVAL; 523 524 if (!strcmp(str, "off")) { 525 taa_mitigation = TAA_MITIGATION_OFF; 526 } else if (!strcmp(str, "full")) { 527 taa_mitigation = TAA_MITIGATION_VERW; 528 } else if (!strcmp(str, "full,nosmt")) { 529 taa_mitigation = TAA_MITIGATION_VERW; 530 taa_nosmt = true; 531 } 532 533 return 0; 534 } 535 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); 536 537 #undef pr_fmt 538 #define pr_fmt(fmt) "MMIO Stale Data: " fmt 539 540 static bool mmio_nosmt __ro_after_init = false; 541 542 static const char * const mmio_strings[] = { 543 [MMIO_MITIGATION_OFF] = "Vulnerable", 544 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", 545 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", 546 }; 547 548 static void __init mmio_select_mitigation(void) 549 { 550 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { 551 mmio_mitigation = MMIO_MITIGATION_OFF; 552 return; 553 } 554 555 /* Microcode will be checked in mmio_update_mitigation(). */ 556 if (mmio_mitigation == MMIO_MITIGATION_AUTO) { 557 if (should_mitigate_vuln(X86_BUG_MMIO_STALE_DATA)) 558 mmio_mitigation = MMIO_MITIGATION_VERW; 559 else 560 mmio_mitigation = MMIO_MITIGATION_OFF; 561 } 562 563 if (mmio_mitigation == MMIO_MITIGATION_OFF) 564 return; 565 566 /* 567 * Enable CPU buffer clear mitigation for host and VMM, if also affected 568 * by MDS or TAA. 569 */ 570 if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable()) 571 verw_clear_cpu_buf_mitigation_selected = true; 572 } 573 574 static void __init mmio_update_mitigation(void) 575 { 576 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 577 return; 578 579 if (verw_clear_cpu_buf_mitigation_selected) 580 mmio_mitigation = MMIO_MITIGATION_VERW; 581 582 if (mmio_mitigation == MMIO_MITIGATION_VERW) { 583 /* 584 * Check if the system has the right microcode. 585 * 586 * CPU Fill buffer clear mitigation is enumerated by either an explicit 587 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS 588 * affected systems. 589 */ 590 if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) || 591 (boot_cpu_has(X86_FEATURE_MD_CLEAR) && 592 boot_cpu_has(X86_FEATURE_FLUSH_L1D) && 593 !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))) 594 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; 595 } 596 597 pr_info("%s\n", mmio_strings[mmio_mitigation]); 598 } 599 600 static void __init mmio_apply_mitigation(void) 601 { 602 if (mmio_mitigation == MMIO_MITIGATION_OFF) 603 return; 604 605 /* 606 * Only enable the VMM mitigation if the CPU buffer clear mitigation is 607 * not being used. 608 */ 609 if (verw_clear_cpu_buf_mitigation_selected) { 610 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 611 static_branch_disable(&cpu_buf_vm_clear); 612 } else { 613 static_branch_enable(&cpu_buf_vm_clear); 614 } 615 616 /* 617 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can 618 * be propagated to uncore buffers, clearing the Fill buffers on idle 619 * is required irrespective of SMT state. 620 */ 621 if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) 622 static_branch_enable(&cpu_buf_idle_clear); 623 624 if (mmio_nosmt || smt_mitigations == SMT_MITIGATIONS_ON) 625 cpu_smt_disable(false); 626 } 627 628 static int __init mmio_stale_data_parse_cmdline(char *str) 629 { 630 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 631 return 0; 632 633 if (!str) 634 return -EINVAL; 635 636 if (!strcmp(str, "off")) { 637 mmio_mitigation = MMIO_MITIGATION_OFF; 638 } else if (!strcmp(str, "full")) { 639 mmio_mitigation = MMIO_MITIGATION_VERW; 640 } else if (!strcmp(str, "full,nosmt")) { 641 mmio_mitigation = MMIO_MITIGATION_VERW; 642 mmio_nosmt = true; 643 } 644 645 return 0; 646 } 647 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); 648 649 #undef pr_fmt 650 #define pr_fmt(fmt) "Register File Data Sampling: " fmt 651 652 static const char * const rfds_strings[] = { 653 [RFDS_MITIGATION_OFF] = "Vulnerable", 654 [RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File", 655 [RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 656 }; 657 658 static inline bool __init verw_clears_cpu_reg_file(void) 659 { 660 return (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR); 661 } 662 663 static void __init rfds_select_mitigation(void) 664 { 665 if (!boot_cpu_has_bug(X86_BUG_RFDS)) { 666 rfds_mitigation = RFDS_MITIGATION_OFF; 667 return; 668 } 669 670 if (rfds_mitigation == RFDS_MITIGATION_AUTO) { 671 if (should_mitigate_vuln(X86_BUG_RFDS)) 672 rfds_mitigation = RFDS_MITIGATION_VERW; 673 else 674 rfds_mitigation = RFDS_MITIGATION_OFF; 675 } 676 677 if (rfds_mitigation == RFDS_MITIGATION_OFF) 678 return; 679 680 if (verw_clears_cpu_reg_file()) 681 verw_clear_cpu_buf_mitigation_selected = true; 682 } 683 684 static void __init rfds_update_mitigation(void) 685 { 686 if (!boot_cpu_has_bug(X86_BUG_RFDS)) 687 return; 688 689 if (verw_clear_cpu_buf_mitigation_selected) 690 rfds_mitigation = RFDS_MITIGATION_VERW; 691 692 if (rfds_mitigation == RFDS_MITIGATION_VERW) { 693 if (!verw_clears_cpu_reg_file()) 694 rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED; 695 } 696 697 pr_info("%s\n", rfds_strings[rfds_mitigation]); 698 } 699 700 static void __init rfds_apply_mitigation(void) 701 { 702 if (rfds_mitigation == RFDS_MITIGATION_VERW) 703 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 704 } 705 706 static __init int rfds_parse_cmdline(char *str) 707 { 708 if (!str) 709 return -EINVAL; 710 711 if (!boot_cpu_has_bug(X86_BUG_RFDS)) 712 return 0; 713 714 if (!strcmp(str, "off")) 715 rfds_mitigation = RFDS_MITIGATION_OFF; 716 else if (!strcmp(str, "on")) 717 rfds_mitigation = RFDS_MITIGATION_VERW; 718 719 return 0; 720 } 721 early_param("reg_file_data_sampling", rfds_parse_cmdline); 722 723 #undef pr_fmt 724 #define pr_fmt(fmt) "SRBDS: " fmt 725 726 enum srbds_mitigations { 727 SRBDS_MITIGATION_OFF, 728 SRBDS_MITIGATION_AUTO, 729 SRBDS_MITIGATION_UCODE_NEEDED, 730 SRBDS_MITIGATION_FULL, 731 SRBDS_MITIGATION_TSX_OFF, 732 SRBDS_MITIGATION_HYPERVISOR, 733 }; 734 735 static enum srbds_mitigations srbds_mitigation __ro_after_init = 736 IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_AUTO : SRBDS_MITIGATION_OFF; 737 738 static const char * const srbds_strings[] = { 739 [SRBDS_MITIGATION_OFF] = "Vulnerable", 740 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 741 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", 742 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", 743 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", 744 }; 745 746 static bool srbds_off; 747 748 void update_srbds_msr(void) 749 { 750 u64 mcu_ctrl; 751 752 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 753 return; 754 755 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 756 return; 757 758 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) 759 return; 760 761 /* 762 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX 763 * being disabled and it hasn't received the SRBDS MSR microcode. 764 */ 765 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) 766 return; 767 768 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 769 770 switch (srbds_mitigation) { 771 case SRBDS_MITIGATION_OFF: 772 case SRBDS_MITIGATION_TSX_OFF: 773 mcu_ctrl |= RNGDS_MITG_DIS; 774 break; 775 case SRBDS_MITIGATION_FULL: 776 mcu_ctrl &= ~RNGDS_MITG_DIS; 777 break; 778 default: 779 break; 780 } 781 782 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 783 } 784 785 static void __init srbds_select_mitigation(void) 786 { 787 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) { 788 srbds_mitigation = SRBDS_MITIGATION_OFF; 789 return; 790 } 791 792 if (srbds_mitigation == SRBDS_MITIGATION_AUTO) { 793 if (should_mitigate_vuln(X86_BUG_SRBDS)) 794 srbds_mitigation = SRBDS_MITIGATION_FULL; 795 else { 796 srbds_mitigation = SRBDS_MITIGATION_OFF; 797 return; 798 } 799 } 800 801 /* 802 * Check to see if this is one of the MDS_NO systems supporting TSX that 803 * are only exposed to SRBDS when TSX is enabled or when CPU is affected 804 * by Processor MMIO Stale Data vulnerability. 805 */ 806 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && 807 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 808 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; 809 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 810 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; 811 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) 812 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; 813 else if (srbds_off) 814 srbds_mitigation = SRBDS_MITIGATION_OFF; 815 816 pr_info("%s\n", srbds_strings[srbds_mitigation]); 817 } 818 819 static void __init srbds_apply_mitigation(void) 820 { 821 update_srbds_msr(); 822 } 823 824 static int __init srbds_parse_cmdline(char *str) 825 { 826 if (!str) 827 return -EINVAL; 828 829 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 830 return 0; 831 832 srbds_off = !strcmp(str, "off"); 833 return 0; 834 } 835 early_param("srbds", srbds_parse_cmdline); 836 837 #undef pr_fmt 838 #define pr_fmt(fmt) "L1D Flush : " fmt 839 840 enum l1d_flush_mitigations { 841 L1D_FLUSH_OFF = 0, 842 L1D_FLUSH_ON, 843 }; 844 845 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF; 846 847 static void __init l1d_flush_select_mitigation(void) 848 { 849 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) 850 return; 851 852 static_branch_enable(&switch_mm_cond_l1d_flush); 853 pr_info("Conditional flush on switch_mm() enabled\n"); 854 } 855 856 static int __init l1d_flush_parse_cmdline(char *str) 857 { 858 if (!strcmp(str, "on")) 859 l1d_flush_mitigation = L1D_FLUSH_ON; 860 861 return 0; 862 } 863 early_param("l1d_flush", l1d_flush_parse_cmdline); 864 865 #undef pr_fmt 866 #define pr_fmt(fmt) "GDS: " fmt 867 868 enum gds_mitigations { 869 GDS_MITIGATION_OFF, 870 GDS_MITIGATION_AUTO, 871 GDS_MITIGATION_UCODE_NEEDED, 872 GDS_MITIGATION_FORCE, 873 GDS_MITIGATION_FULL, 874 GDS_MITIGATION_FULL_LOCKED, 875 GDS_MITIGATION_HYPERVISOR, 876 }; 877 878 static enum gds_mitigations gds_mitigation __ro_after_init = 879 IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_AUTO : GDS_MITIGATION_OFF; 880 881 static const char * const gds_strings[] = { 882 [GDS_MITIGATION_OFF] = "Vulnerable", 883 [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 884 [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode", 885 [GDS_MITIGATION_FULL] = "Mitigation: Microcode", 886 [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)", 887 [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", 888 }; 889 890 bool gds_ucode_mitigated(void) 891 { 892 return (gds_mitigation == GDS_MITIGATION_FULL || 893 gds_mitigation == GDS_MITIGATION_FULL_LOCKED); 894 } 895 EXPORT_SYMBOL_FOR_KVM(gds_ucode_mitigated); 896 897 void update_gds_msr(void) 898 { 899 u64 mcu_ctrl_after; 900 u64 mcu_ctrl; 901 902 switch (gds_mitigation) { 903 case GDS_MITIGATION_OFF: 904 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 905 mcu_ctrl |= GDS_MITG_DIS; 906 break; 907 case GDS_MITIGATION_FULL_LOCKED: 908 /* 909 * The LOCKED state comes from the boot CPU. APs might not have 910 * the same state. Make sure the mitigation is enabled on all 911 * CPUs. 912 */ 913 case GDS_MITIGATION_FULL: 914 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 915 mcu_ctrl &= ~GDS_MITG_DIS; 916 break; 917 case GDS_MITIGATION_FORCE: 918 case GDS_MITIGATION_UCODE_NEEDED: 919 case GDS_MITIGATION_HYPERVISOR: 920 case GDS_MITIGATION_AUTO: 921 return; 922 } 923 924 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 925 926 /* 927 * Check to make sure that the WRMSR value was not ignored. Writes to 928 * GDS_MITG_DIS will be ignored if this processor is locked but the boot 929 * processor was not. 930 */ 931 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); 932 WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after); 933 } 934 935 static void __init gds_select_mitigation(void) 936 { 937 u64 mcu_ctrl; 938 939 if (!boot_cpu_has_bug(X86_BUG_GDS)) 940 return; 941 942 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 943 gds_mitigation = GDS_MITIGATION_HYPERVISOR; 944 return; 945 } 946 947 /* Will verify below that mitigation _can_ be disabled */ 948 if (gds_mitigation == GDS_MITIGATION_AUTO) { 949 if (should_mitigate_vuln(X86_BUG_GDS)) 950 gds_mitigation = GDS_MITIGATION_FULL; 951 else 952 gds_mitigation = GDS_MITIGATION_OFF; 953 } 954 955 /* No microcode */ 956 if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) { 957 if (gds_mitigation != GDS_MITIGATION_FORCE) 958 gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; 959 return; 960 } 961 962 /* Microcode has mitigation, use it */ 963 if (gds_mitigation == GDS_MITIGATION_FORCE) 964 gds_mitigation = GDS_MITIGATION_FULL; 965 966 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 967 if (mcu_ctrl & GDS_MITG_LOCKED) { 968 if (gds_mitigation == GDS_MITIGATION_OFF) 969 pr_warn("Mitigation locked. Disable failed.\n"); 970 971 /* 972 * The mitigation is selected from the boot CPU. All other CPUs 973 * _should_ have the same state. If the boot CPU isn't locked 974 * but others are then update_gds_msr() will WARN() of the state 975 * mismatch. If the boot CPU is locked update_gds_msr() will 976 * ensure the other CPUs have the mitigation enabled. 977 */ 978 gds_mitigation = GDS_MITIGATION_FULL_LOCKED; 979 } 980 } 981 982 static void __init gds_apply_mitigation(void) 983 { 984 if (!boot_cpu_has_bug(X86_BUG_GDS)) 985 return; 986 987 /* Microcode is present */ 988 if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL) 989 update_gds_msr(); 990 else if (gds_mitigation == GDS_MITIGATION_FORCE) { 991 /* 992 * This only needs to be done on the boot CPU so do it 993 * here rather than in update_gds_msr() 994 */ 995 setup_clear_cpu_cap(X86_FEATURE_AVX); 996 pr_warn("Microcode update needed! Disabling AVX as mitigation.\n"); 997 } 998 999 pr_info("%s\n", gds_strings[gds_mitigation]); 1000 } 1001 1002 static int __init gds_parse_cmdline(char *str) 1003 { 1004 if (!str) 1005 return -EINVAL; 1006 1007 if (!boot_cpu_has_bug(X86_BUG_GDS)) 1008 return 0; 1009 1010 if (!strcmp(str, "off")) 1011 gds_mitigation = GDS_MITIGATION_OFF; 1012 else if (!strcmp(str, "force")) 1013 gds_mitigation = GDS_MITIGATION_FORCE; 1014 1015 return 0; 1016 } 1017 early_param("gather_data_sampling", gds_parse_cmdline); 1018 1019 #undef pr_fmt 1020 #define pr_fmt(fmt) "Spectre V1 : " fmt 1021 1022 enum spectre_v1_mitigation { 1023 SPECTRE_V1_MITIGATION_NONE, 1024 SPECTRE_V1_MITIGATION_AUTO, 1025 }; 1026 1027 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init = 1028 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V1) ? 1029 SPECTRE_V1_MITIGATION_AUTO : SPECTRE_V1_MITIGATION_NONE; 1030 1031 static const char * const spectre_v1_strings[] = { 1032 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers", 1033 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization", 1034 }; 1035 1036 /* 1037 * Does SMAP provide full mitigation against speculative kernel access to 1038 * userspace? 1039 */ 1040 static bool smap_works_speculatively(void) 1041 { 1042 if (!boot_cpu_has(X86_FEATURE_SMAP)) 1043 return false; 1044 1045 /* 1046 * On CPUs which are vulnerable to Meltdown, SMAP does not 1047 * prevent speculative access to user data in the L1 cache. 1048 * Consider SMAP to be non-functional as a mitigation on these 1049 * CPUs. 1050 */ 1051 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) 1052 return false; 1053 1054 return true; 1055 } 1056 1057 static void __init spectre_v1_select_mitigation(void) 1058 { 1059 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) 1060 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 1061 1062 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V1)) 1063 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 1064 } 1065 1066 static void __init spectre_v1_apply_mitigation(void) 1067 { 1068 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) 1069 return; 1070 1071 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { 1072 /* 1073 * With Spectre v1, a user can speculatively control either 1074 * path of a conditional swapgs with a user-controlled GS 1075 * value. The mitigation is to add lfences to both code paths. 1076 * 1077 * If FSGSBASE is enabled, the user can put a kernel address in 1078 * GS, in which case SMAP provides no protection. 1079 * 1080 * If FSGSBASE is disabled, the user can only put a user space 1081 * address in GS. That makes an attack harder, but still 1082 * possible if there's no SMAP protection. 1083 */ 1084 if (boot_cpu_has(X86_FEATURE_FSGSBASE) || 1085 !smap_works_speculatively()) { 1086 /* 1087 * Mitigation can be provided from SWAPGS itself or 1088 * PTI as the CR3 write in the Meltdown mitigation 1089 * is serializing. 1090 * 1091 * If neither is there, mitigate with an LFENCE to 1092 * stop speculation through swapgs. 1093 */ 1094 if (boot_cpu_has_bug(X86_BUG_SWAPGS) && 1095 !boot_cpu_has(X86_FEATURE_PTI)) 1096 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER); 1097 1098 /* 1099 * Enable lfences in the kernel entry (non-swapgs) 1100 * paths, to prevent user entry from speculatively 1101 * skipping swapgs. 1102 */ 1103 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL); 1104 } 1105 } 1106 1107 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]); 1108 } 1109 1110 static int __init nospectre_v1_cmdline(char *str) 1111 { 1112 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 1113 return 0; 1114 } 1115 early_param("nospectre_v1", nospectre_v1_cmdline); 1116 1117 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE; 1118 1119 /* Depends on spectre_v2 mitigation selected already */ 1120 static inline bool cdt_possible(enum spectre_v2_mitigation mode) 1121 { 1122 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) || 1123 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) 1124 return false; 1125 1126 if (mode == SPECTRE_V2_RETPOLINE || 1127 mode == SPECTRE_V2_EIBRS_RETPOLINE) 1128 return true; 1129 1130 return false; 1131 } 1132 1133 #undef pr_fmt 1134 #define pr_fmt(fmt) "RETBleed: " fmt 1135 1136 enum its_mitigation { 1137 ITS_MITIGATION_OFF, 1138 ITS_MITIGATION_AUTO, 1139 ITS_MITIGATION_VMEXIT_ONLY, 1140 ITS_MITIGATION_ALIGNED_THUNKS, 1141 ITS_MITIGATION_RETPOLINE_STUFF, 1142 }; 1143 1144 static enum its_mitigation its_mitigation __ro_after_init = 1145 IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_MITIGATION_AUTO : ITS_MITIGATION_OFF; 1146 1147 enum retbleed_mitigation { 1148 RETBLEED_MITIGATION_NONE, 1149 RETBLEED_MITIGATION_AUTO, 1150 RETBLEED_MITIGATION_UNRET, 1151 RETBLEED_MITIGATION_IBPB, 1152 RETBLEED_MITIGATION_IBRS, 1153 RETBLEED_MITIGATION_EIBRS, 1154 RETBLEED_MITIGATION_STUFF, 1155 }; 1156 1157 static const char * const retbleed_strings[] = { 1158 [RETBLEED_MITIGATION_NONE] = "Vulnerable", 1159 [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", 1160 [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB", 1161 [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", 1162 [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", 1163 [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing", 1164 }; 1165 1166 static enum retbleed_mitigation retbleed_mitigation __ro_after_init = 1167 IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_MITIGATION_AUTO : RETBLEED_MITIGATION_NONE; 1168 1169 static int __ro_after_init retbleed_nosmt = false; 1170 1171 enum srso_mitigation { 1172 SRSO_MITIGATION_NONE, 1173 SRSO_MITIGATION_AUTO, 1174 SRSO_MITIGATION_UCODE_NEEDED, 1175 SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED, 1176 SRSO_MITIGATION_MICROCODE, 1177 SRSO_MITIGATION_NOSMT, 1178 SRSO_MITIGATION_SAFE_RET, 1179 SRSO_MITIGATION_IBPB, 1180 SRSO_MITIGATION_IBPB_ON_VMEXIT, 1181 SRSO_MITIGATION_BP_SPEC_REDUCE, 1182 }; 1183 1184 static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO; 1185 1186 static int __init retbleed_parse_cmdline(char *str) 1187 { 1188 if (!str) 1189 return -EINVAL; 1190 1191 while (str) { 1192 char *next = strchr(str, ','); 1193 if (next) { 1194 *next = 0; 1195 next++; 1196 } 1197 1198 if (!strcmp(str, "off")) { 1199 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1200 } else if (!strcmp(str, "auto")) { 1201 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1202 } else if (!strcmp(str, "unret")) { 1203 retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 1204 } else if (!strcmp(str, "ibpb")) { 1205 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 1206 } else if (!strcmp(str, "stuff")) { 1207 retbleed_mitigation = RETBLEED_MITIGATION_STUFF; 1208 } else if (!strcmp(str, "nosmt")) { 1209 retbleed_nosmt = true; 1210 } else if (!strcmp(str, "force")) { 1211 setup_force_cpu_bug(X86_BUG_RETBLEED); 1212 } else { 1213 pr_err("Ignoring unknown retbleed option (%s).", str); 1214 } 1215 1216 str = next; 1217 } 1218 1219 return 0; 1220 } 1221 early_param("retbleed", retbleed_parse_cmdline); 1222 1223 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" 1224 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" 1225 1226 static void __init retbleed_select_mitigation(void) 1227 { 1228 if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) { 1229 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1230 return; 1231 } 1232 1233 switch (retbleed_mitigation) { 1234 case RETBLEED_MITIGATION_UNRET: 1235 if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) { 1236 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1237 pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n"); 1238 } 1239 break; 1240 case RETBLEED_MITIGATION_IBPB: 1241 if (!boot_cpu_has(X86_FEATURE_IBPB)) { 1242 pr_err("WARNING: CPU does not support IBPB.\n"); 1243 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1244 } else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { 1245 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); 1246 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1247 } 1248 break; 1249 case RETBLEED_MITIGATION_STUFF: 1250 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) { 1251 pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n"); 1252 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1253 } else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { 1254 pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n"); 1255 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1256 } 1257 break; 1258 default: 1259 break; 1260 } 1261 1262 if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO) 1263 return; 1264 1265 if (!should_mitigate_vuln(X86_BUG_RETBLEED)) { 1266 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1267 return; 1268 } 1269 1270 /* Intel mitigation selected in retbleed_update_mitigation() */ 1271 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 1272 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { 1273 if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) 1274 retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 1275 else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) && 1276 boot_cpu_has(X86_FEATURE_IBPB)) 1277 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 1278 else 1279 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1280 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 1281 /* Final mitigation depends on spectre-v2 selection */ 1282 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) 1283 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; 1284 else if (boot_cpu_has(X86_FEATURE_IBRS)) 1285 retbleed_mitigation = RETBLEED_MITIGATION_IBRS; 1286 else 1287 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1288 } 1289 } 1290 1291 static void __init retbleed_update_mitigation(void) 1292 { 1293 if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) 1294 return; 1295 1296 /* ITS can also enable stuffing */ 1297 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF) 1298 retbleed_mitigation = RETBLEED_MITIGATION_STUFF; 1299 1300 /* If SRSO is using IBPB, that works for retbleed too */ 1301 if (srso_mitigation == SRSO_MITIGATION_IBPB) 1302 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 1303 1304 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF && 1305 !cdt_possible(spectre_v2_enabled)) { 1306 pr_err("WARNING: retbleed=stuff depends on retpoline\n"); 1307 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1308 } 1309 1310 /* 1311 * Let IBRS trump all on Intel without affecting the effects of the 1312 * retbleed= cmdline option except for call depth based stuffing 1313 */ 1314 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 1315 switch (spectre_v2_enabled) { 1316 case SPECTRE_V2_IBRS: 1317 retbleed_mitigation = RETBLEED_MITIGATION_IBRS; 1318 break; 1319 case SPECTRE_V2_EIBRS: 1320 case SPECTRE_V2_EIBRS_RETPOLINE: 1321 case SPECTRE_V2_EIBRS_LFENCE: 1322 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; 1323 break; 1324 default: 1325 if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) { 1326 if (retbleed_mitigation != RETBLEED_MITIGATION_NONE) 1327 pr_err(RETBLEED_INTEL_MSG); 1328 1329 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1330 } 1331 } 1332 } 1333 1334 pr_info("%s\n", retbleed_strings[retbleed_mitigation]); 1335 } 1336 1337 static void __init retbleed_apply_mitigation(void) 1338 { 1339 bool mitigate_smt = false; 1340 1341 switch (retbleed_mitigation) { 1342 case RETBLEED_MITIGATION_NONE: 1343 return; 1344 1345 case RETBLEED_MITIGATION_UNRET: 1346 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1347 setup_force_cpu_cap(X86_FEATURE_UNRET); 1348 1349 set_return_thunk(retbleed_return_thunk); 1350 1351 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 1352 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 1353 pr_err(RETBLEED_UNTRAIN_MSG); 1354 1355 mitigate_smt = true; 1356 break; 1357 1358 case RETBLEED_MITIGATION_IBPB: 1359 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); 1360 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); 1361 mitigate_smt = true; 1362 1363 /* 1364 * IBPB on entry already obviates the need for 1365 * software-based untraining so clear those in case some 1366 * other mitigation like SRSO has selected them. 1367 */ 1368 setup_clear_cpu_cap(X86_FEATURE_UNRET); 1369 setup_clear_cpu_cap(X86_FEATURE_RETHUNK); 1370 1371 /* 1372 * There is no need for RSB filling: write_ibpb() ensures 1373 * all predictions, including the RSB, are invalidated, 1374 * regardless of IBPB implementation. 1375 */ 1376 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); 1377 1378 break; 1379 1380 case RETBLEED_MITIGATION_STUFF: 1381 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1382 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); 1383 1384 set_return_thunk(call_depth_return_thunk); 1385 break; 1386 1387 default: 1388 break; 1389 } 1390 1391 if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && 1392 (retbleed_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)) 1393 cpu_smt_disable(false); 1394 } 1395 1396 #undef pr_fmt 1397 #define pr_fmt(fmt) "ITS: " fmt 1398 1399 static const char * const its_strings[] = { 1400 [ITS_MITIGATION_OFF] = "Vulnerable", 1401 [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected", 1402 [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks", 1403 [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB", 1404 }; 1405 1406 static int __init its_parse_cmdline(char *str) 1407 { 1408 if (!str) 1409 return -EINVAL; 1410 1411 if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) { 1412 pr_err("Mitigation disabled at compile time, ignoring option (%s)", str); 1413 return 0; 1414 } 1415 1416 if (!strcmp(str, "off")) { 1417 its_mitigation = ITS_MITIGATION_OFF; 1418 } else if (!strcmp(str, "on")) { 1419 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1420 } else if (!strcmp(str, "force")) { 1421 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1422 setup_force_cpu_bug(X86_BUG_ITS); 1423 } else if (!strcmp(str, "vmexit")) { 1424 its_mitigation = ITS_MITIGATION_VMEXIT_ONLY; 1425 } else if (!strcmp(str, "stuff")) { 1426 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; 1427 } else { 1428 pr_err("Ignoring unknown indirect_target_selection option (%s).", str); 1429 } 1430 1431 return 0; 1432 } 1433 early_param("indirect_target_selection", its_parse_cmdline); 1434 1435 static void __init its_select_mitigation(void) 1436 { 1437 if (!boot_cpu_has_bug(X86_BUG_ITS)) { 1438 its_mitigation = ITS_MITIGATION_OFF; 1439 return; 1440 } 1441 1442 if (its_mitigation == ITS_MITIGATION_AUTO) { 1443 if (should_mitigate_vuln(X86_BUG_ITS)) 1444 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1445 else 1446 its_mitigation = ITS_MITIGATION_OFF; 1447 } 1448 1449 if (its_mitigation == ITS_MITIGATION_OFF) 1450 return; 1451 1452 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || 1453 !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) { 1454 pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n"); 1455 its_mitigation = ITS_MITIGATION_OFF; 1456 return; 1457 } 1458 1459 if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) { 1460 pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n"); 1461 its_mitigation = ITS_MITIGATION_OFF; 1462 return; 1463 } 1464 1465 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF && 1466 !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) { 1467 pr_err("RSB stuff mitigation not supported, using default\n"); 1468 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1469 } 1470 1471 if (its_mitigation == ITS_MITIGATION_VMEXIT_ONLY && 1472 !boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) 1473 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1474 } 1475 1476 static void __init its_update_mitigation(void) 1477 { 1478 if (!boot_cpu_has_bug(X86_BUG_ITS)) 1479 return; 1480 1481 switch (spectre_v2_enabled) { 1482 case SPECTRE_V2_NONE: 1483 if (its_mitigation != ITS_MITIGATION_OFF) 1484 pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n"); 1485 its_mitigation = ITS_MITIGATION_OFF; 1486 break; 1487 case SPECTRE_V2_RETPOLINE: 1488 case SPECTRE_V2_EIBRS_RETPOLINE: 1489 /* Retpoline+CDT mitigates ITS */ 1490 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF) 1491 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; 1492 break; 1493 case SPECTRE_V2_LFENCE: 1494 case SPECTRE_V2_EIBRS_LFENCE: 1495 pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n"); 1496 its_mitigation = ITS_MITIGATION_OFF; 1497 break; 1498 default: 1499 break; 1500 } 1501 1502 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF && 1503 !cdt_possible(spectre_v2_enabled)) 1504 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1505 1506 pr_info("%s\n", its_strings[its_mitigation]); 1507 } 1508 1509 static void __init its_apply_mitigation(void) 1510 { 1511 switch (its_mitigation) { 1512 case ITS_MITIGATION_OFF: 1513 case ITS_MITIGATION_AUTO: 1514 case ITS_MITIGATION_VMEXIT_ONLY: 1515 break; 1516 case ITS_MITIGATION_ALIGNED_THUNKS: 1517 if (!boot_cpu_has(X86_FEATURE_RETPOLINE)) 1518 setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS); 1519 1520 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1521 set_return_thunk(its_return_thunk); 1522 break; 1523 case ITS_MITIGATION_RETPOLINE_STUFF: 1524 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1525 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); 1526 set_return_thunk(call_depth_return_thunk); 1527 break; 1528 } 1529 } 1530 1531 #undef pr_fmt 1532 #define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt 1533 1534 enum tsa_mitigations { 1535 TSA_MITIGATION_NONE, 1536 TSA_MITIGATION_AUTO, 1537 TSA_MITIGATION_UCODE_NEEDED, 1538 TSA_MITIGATION_USER_KERNEL, 1539 TSA_MITIGATION_VM, 1540 TSA_MITIGATION_FULL, 1541 }; 1542 1543 static const char * const tsa_strings[] = { 1544 [TSA_MITIGATION_NONE] = "Vulnerable", 1545 [TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 1546 [TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary", 1547 [TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM", 1548 [TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", 1549 }; 1550 1551 static enum tsa_mitigations tsa_mitigation __ro_after_init = 1552 IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE; 1553 1554 static int __init tsa_parse_cmdline(char *str) 1555 { 1556 if (!str) 1557 return -EINVAL; 1558 1559 if (!strcmp(str, "off")) 1560 tsa_mitigation = TSA_MITIGATION_NONE; 1561 else if (!strcmp(str, "on")) 1562 tsa_mitigation = TSA_MITIGATION_FULL; 1563 else if (!strcmp(str, "user")) 1564 tsa_mitigation = TSA_MITIGATION_USER_KERNEL; 1565 else if (!strcmp(str, "vm")) 1566 tsa_mitigation = TSA_MITIGATION_VM; 1567 else 1568 pr_err("Ignoring unknown tsa=%s option.\n", str); 1569 1570 return 0; 1571 } 1572 early_param("tsa", tsa_parse_cmdline); 1573 1574 static void __init tsa_select_mitigation(void) 1575 { 1576 if (!boot_cpu_has_bug(X86_BUG_TSA)) { 1577 tsa_mitigation = TSA_MITIGATION_NONE; 1578 return; 1579 } 1580 1581 if (tsa_mitigation == TSA_MITIGATION_AUTO) { 1582 bool vm = false, uk = false; 1583 1584 tsa_mitigation = TSA_MITIGATION_NONE; 1585 1586 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || 1587 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) { 1588 tsa_mitigation = TSA_MITIGATION_USER_KERNEL; 1589 uk = true; 1590 } 1591 1592 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || 1593 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) { 1594 tsa_mitigation = TSA_MITIGATION_VM; 1595 vm = true; 1596 } 1597 1598 if (uk && vm) 1599 tsa_mitigation = TSA_MITIGATION_FULL; 1600 } 1601 1602 if (tsa_mitigation == TSA_MITIGATION_NONE) 1603 return; 1604 1605 if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) 1606 tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED; 1607 1608 /* 1609 * No need to set verw_clear_cpu_buf_mitigation_selected - it 1610 * doesn't fit all cases here and it is not needed because this 1611 * is the only VERW-based mitigation on AMD. 1612 */ 1613 pr_info("%s\n", tsa_strings[tsa_mitigation]); 1614 } 1615 1616 static void __init tsa_apply_mitigation(void) 1617 { 1618 switch (tsa_mitigation) { 1619 case TSA_MITIGATION_USER_KERNEL: 1620 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 1621 break; 1622 case TSA_MITIGATION_VM: 1623 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); 1624 break; 1625 case TSA_MITIGATION_FULL: 1626 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 1627 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); 1628 break; 1629 default: 1630 break; 1631 } 1632 } 1633 1634 #undef pr_fmt 1635 #define pr_fmt(fmt) "Spectre V2 : " fmt 1636 1637 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = 1638 SPECTRE_V2_USER_NONE; 1639 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = 1640 SPECTRE_V2_USER_NONE; 1641 1642 #ifdef CONFIG_MITIGATION_RETPOLINE 1643 static bool spectre_v2_bad_module; 1644 1645 bool retpoline_module_ok(bool has_retpoline) 1646 { 1647 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) 1648 return true; 1649 1650 pr_err("System may be vulnerable to spectre v2\n"); 1651 spectre_v2_bad_module = true; 1652 return false; 1653 } 1654 1655 static inline const char *spectre_v2_module_string(void) 1656 { 1657 return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; 1658 } 1659 #else 1660 static inline const char *spectre_v2_module_string(void) { return ""; } 1661 #endif 1662 1663 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" 1664 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" 1665 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" 1666 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n" 1667 1668 #ifdef CONFIG_BPF_SYSCALL 1669 void unpriv_ebpf_notify(int new_state) 1670 { 1671 if (new_state) 1672 return; 1673 1674 /* Unprivileged eBPF is enabled */ 1675 1676 switch (spectre_v2_enabled) { 1677 case SPECTRE_V2_EIBRS: 1678 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); 1679 break; 1680 case SPECTRE_V2_EIBRS_LFENCE: 1681 if (sched_smt_active()) 1682 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); 1683 break; 1684 default: 1685 break; 1686 } 1687 } 1688 #endif 1689 1690 /* The kernel command line selection for spectre v2 */ 1691 enum spectre_v2_mitigation_cmd { 1692 SPECTRE_V2_CMD_NONE, 1693 SPECTRE_V2_CMD_AUTO, 1694 SPECTRE_V2_CMD_FORCE, 1695 SPECTRE_V2_CMD_RETPOLINE, 1696 SPECTRE_V2_CMD_RETPOLINE_GENERIC, 1697 SPECTRE_V2_CMD_RETPOLINE_LFENCE, 1698 SPECTRE_V2_CMD_EIBRS, 1699 SPECTRE_V2_CMD_EIBRS_RETPOLINE, 1700 SPECTRE_V2_CMD_EIBRS_LFENCE, 1701 SPECTRE_V2_CMD_IBRS, 1702 }; 1703 1704 static enum spectre_v2_mitigation_cmd spectre_v2_cmd __ro_after_init = 1705 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE; 1706 1707 enum spectre_v2_user_mitigation_cmd { 1708 SPECTRE_V2_USER_CMD_NONE, 1709 SPECTRE_V2_USER_CMD_AUTO, 1710 SPECTRE_V2_USER_CMD_FORCE, 1711 SPECTRE_V2_USER_CMD_PRCTL, 1712 SPECTRE_V2_USER_CMD_PRCTL_IBPB, 1713 SPECTRE_V2_USER_CMD_SECCOMP, 1714 SPECTRE_V2_USER_CMD_SECCOMP_IBPB, 1715 }; 1716 1717 static enum spectre_v2_user_mitigation_cmd spectre_v2_user_cmd __ro_after_init = 1718 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE; 1719 1720 static const char * const spectre_v2_user_strings[] = { 1721 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", 1722 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", 1723 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", 1724 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", 1725 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", 1726 }; 1727 1728 static int __init spectre_v2_user_parse_cmdline(char *str) 1729 { 1730 if (!str) 1731 return -EINVAL; 1732 1733 if (!strcmp(str, "auto")) 1734 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_AUTO; 1735 else if (!strcmp(str, "off")) 1736 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_NONE; 1737 else if (!strcmp(str, "on")) 1738 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_FORCE; 1739 else if (!strcmp(str, "prctl")) 1740 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_PRCTL; 1741 else if (!strcmp(str, "prctl,ibpb")) 1742 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_PRCTL_IBPB; 1743 else if (!strcmp(str, "seccomp")) 1744 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_SECCOMP; 1745 else if (!strcmp(str, "seccomp,ibpb")) 1746 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_SECCOMP_IBPB; 1747 else 1748 pr_err("Ignoring unknown spectre_v2_user option (%s).", str); 1749 1750 return 0; 1751 } 1752 early_param("spectre_v2_user", spectre_v2_user_parse_cmdline); 1753 1754 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) 1755 { 1756 return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS; 1757 } 1758 1759 static void __init spectre_v2_user_select_mitigation(void) 1760 { 1761 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) 1762 return; 1763 1764 switch (spectre_v2_user_cmd) { 1765 case SPECTRE_V2_USER_CMD_NONE: 1766 return; 1767 case SPECTRE_V2_USER_CMD_FORCE: 1768 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1769 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT; 1770 break; 1771 case SPECTRE_V2_USER_CMD_AUTO: 1772 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2_USER)) 1773 break; 1774 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; 1775 if (smt_mitigations == SMT_MITIGATIONS_OFF) 1776 break; 1777 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; 1778 break; 1779 case SPECTRE_V2_USER_CMD_PRCTL: 1780 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; 1781 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; 1782 break; 1783 case SPECTRE_V2_USER_CMD_PRCTL_IBPB: 1784 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1785 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; 1786 break; 1787 case SPECTRE_V2_USER_CMD_SECCOMP: 1788 if (IS_ENABLED(CONFIG_SECCOMP)) 1789 spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP; 1790 else 1791 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; 1792 spectre_v2_user_stibp = spectre_v2_user_ibpb; 1793 break; 1794 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: 1795 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1796 if (IS_ENABLED(CONFIG_SECCOMP)) 1797 spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP; 1798 else 1799 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; 1800 break; 1801 } 1802 1803 /* 1804 * At this point, an STIBP mode other than "off" has been set. 1805 * If STIBP support is not being forced, check if STIBP always-on 1806 * is preferred. 1807 */ 1808 if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || 1809 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) && 1810 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) 1811 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED; 1812 1813 if (!boot_cpu_has(X86_FEATURE_IBPB)) 1814 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE; 1815 1816 if (!boot_cpu_has(X86_FEATURE_STIBP)) 1817 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; 1818 } 1819 1820 static void __init spectre_v2_user_update_mitigation(void) 1821 { 1822 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) 1823 return; 1824 1825 /* The spectre_v2 cmd line can override spectre_v2_user options */ 1826 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) { 1827 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE; 1828 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; 1829 } else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) { 1830 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1831 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT; 1832 } 1833 1834 /* 1835 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP 1836 * is not required. 1837 * 1838 * Intel's Enhanced IBRS also protects against cross-thread branch target 1839 * injection in user-mode as the IBRS bit remains always set which 1840 * implicitly enables cross-thread protections. However, in legacy IBRS 1841 * mode, the IBRS bit is set only on kernel entry and cleared on return 1842 * to userspace. AMD Automatic IBRS also does not protect userspace. 1843 * These modes therefore disable the implicit cross-thread protection, 1844 * so allow for STIBP to be selected in those cases. 1845 */ 1846 if (!boot_cpu_has(X86_FEATURE_STIBP) || 1847 !cpu_smt_possible() || 1848 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 1849 !boot_cpu_has(X86_FEATURE_AUTOIBRS))) { 1850 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; 1851 return; 1852 } 1853 1854 if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE && 1855 (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || 1856 retbleed_mitigation == RETBLEED_MITIGATION_IBPB)) { 1857 if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT && 1858 spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT_PREFERRED) 1859 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n"); 1860 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED; 1861 } 1862 pr_info("%s\n", spectre_v2_user_strings[spectre_v2_user_stibp]); 1863 } 1864 1865 static void __init spectre_v2_user_apply_mitigation(void) 1866 { 1867 /* Initialize Indirect Branch Prediction Barrier */ 1868 if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) { 1869 static_branch_enable(&switch_vcpu_ibpb); 1870 1871 switch (spectre_v2_user_ibpb) { 1872 case SPECTRE_V2_USER_STRICT: 1873 static_branch_enable(&switch_mm_always_ibpb); 1874 break; 1875 case SPECTRE_V2_USER_PRCTL: 1876 case SPECTRE_V2_USER_SECCOMP: 1877 static_branch_enable(&switch_mm_cond_ibpb); 1878 break; 1879 default: 1880 break; 1881 } 1882 1883 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", 1884 static_key_enabled(&switch_mm_always_ibpb) ? 1885 "always-on" : "conditional"); 1886 } 1887 } 1888 1889 static const char * const spectre_v2_strings[] = { 1890 [SPECTRE_V2_NONE] = "Vulnerable", 1891 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", 1892 [SPECTRE_V2_LFENCE] = "Vulnerable: LFENCE", 1893 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS", 1894 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE", 1895 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines", 1896 [SPECTRE_V2_IBRS] = "Mitigation: IBRS", 1897 }; 1898 1899 static bool nospectre_v2 __ro_after_init; 1900 1901 static int __init nospectre_v2_parse_cmdline(char *str) 1902 { 1903 nospectre_v2 = true; 1904 spectre_v2_cmd = SPECTRE_V2_CMD_NONE; 1905 return 0; 1906 } 1907 early_param("nospectre_v2", nospectre_v2_parse_cmdline); 1908 1909 static int __init spectre_v2_parse_cmdline(char *str) 1910 { 1911 if (!str) 1912 return -EINVAL; 1913 1914 if (nospectre_v2) 1915 return 0; 1916 1917 if (!strcmp(str, "off")) { 1918 spectre_v2_cmd = SPECTRE_V2_CMD_NONE; 1919 } else if (!strcmp(str, "on")) { 1920 spectre_v2_cmd = SPECTRE_V2_CMD_FORCE; 1921 setup_force_cpu_bug(X86_BUG_SPECTRE_V2); 1922 setup_force_cpu_bug(X86_BUG_SPECTRE_V2_USER); 1923 } else if (!strcmp(str, "retpoline")) { 1924 spectre_v2_cmd = SPECTRE_V2_CMD_RETPOLINE; 1925 } else if (!strcmp(str, "retpoline,amd") || 1926 !strcmp(str, "retpoline,lfence")) { 1927 spectre_v2_cmd = SPECTRE_V2_CMD_RETPOLINE_LFENCE; 1928 } else if (!strcmp(str, "retpoline,generic")) { 1929 spectre_v2_cmd = SPECTRE_V2_CMD_RETPOLINE_GENERIC; 1930 } else if (!strcmp(str, "eibrs")) { 1931 spectre_v2_cmd = SPECTRE_V2_CMD_EIBRS; 1932 } else if (!strcmp(str, "eibrs,lfence")) { 1933 spectre_v2_cmd = SPECTRE_V2_CMD_EIBRS_LFENCE; 1934 } else if (!strcmp(str, "eibrs,retpoline")) { 1935 spectre_v2_cmd = SPECTRE_V2_CMD_EIBRS_RETPOLINE; 1936 } else if (!strcmp(str, "auto")) { 1937 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO; 1938 } else if (!strcmp(str, "ibrs")) { 1939 spectre_v2_cmd = SPECTRE_V2_CMD_IBRS; 1940 } else { 1941 pr_err("Ignoring unknown spectre_v2 option (%s).", str); 1942 } 1943 1944 return 0; 1945 } 1946 early_param("spectre_v2", spectre_v2_parse_cmdline); 1947 1948 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) 1949 { 1950 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) { 1951 pr_err("Kernel not compiled with retpoline; no mitigation available!"); 1952 return SPECTRE_V2_NONE; 1953 } 1954 1955 return SPECTRE_V2_RETPOLINE; 1956 } 1957 1958 static bool __ro_after_init rrsba_disabled; 1959 1960 /* Disable in-kernel use of non-RSB RET predictors */ 1961 static void __init spec_ctrl_disable_kernel_rrsba(void) 1962 { 1963 if (rrsba_disabled) 1964 return; 1965 1966 if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) { 1967 rrsba_disabled = true; 1968 return; 1969 } 1970 1971 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) 1972 return; 1973 1974 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; 1975 update_spec_ctrl(x86_spec_ctrl_base); 1976 rrsba_disabled = true; 1977 } 1978 1979 static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode) 1980 { 1981 /* 1982 * WARNING! There are many subtleties to consider when changing *any* 1983 * code related to RSB-related mitigations. Before doing so, carefully 1984 * read the following document, and update if necessary: 1985 * 1986 * Documentation/admin-guide/hw-vuln/rsb.rst 1987 * 1988 * In an overly simplified nutshell: 1989 * 1990 * - User->user RSB attacks are conditionally mitigated during 1991 * context switches by cond_mitigation -> write_ibpb(). 1992 * 1993 * - User->kernel and guest->host attacks are mitigated by eIBRS or 1994 * RSB filling. 1995 * 1996 * Though, depending on config, note that other alternative 1997 * mitigations may end up getting used instead, e.g., IBPB on 1998 * entry/vmexit, call depth tracking, or return thunks. 1999 */ 2000 2001 switch (mode) { 2002 case SPECTRE_V2_NONE: 2003 break; 2004 2005 case SPECTRE_V2_EIBRS: 2006 case SPECTRE_V2_EIBRS_LFENCE: 2007 case SPECTRE_V2_EIBRS_RETPOLINE: 2008 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { 2009 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); 2010 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); 2011 } 2012 break; 2013 2014 case SPECTRE_V2_RETPOLINE: 2015 case SPECTRE_V2_LFENCE: 2016 case SPECTRE_V2_IBRS: 2017 pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n"); 2018 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); 2019 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); 2020 break; 2021 2022 default: 2023 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n"); 2024 dump_stack(); 2025 break; 2026 } 2027 } 2028 2029 /* 2030 * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by 2031 * branch history in userspace. Not needed if BHI_NO is set. 2032 */ 2033 static bool __init spec_ctrl_bhi_dis(void) 2034 { 2035 if (!boot_cpu_has(X86_FEATURE_BHI_CTRL)) 2036 return false; 2037 2038 x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S; 2039 update_spec_ctrl(x86_spec_ctrl_base); 2040 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW); 2041 2042 return true; 2043 } 2044 2045 enum bhi_mitigations { 2046 BHI_MITIGATION_OFF, 2047 BHI_MITIGATION_AUTO, 2048 BHI_MITIGATION_ON, 2049 BHI_MITIGATION_VMEXIT_ONLY, 2050 }; 2051 2052 static enum bhi_mitigations bhi_mitigation __ro_after_init = 2053 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_AUTO : BHI_MITIGATION_OFF; 2054 2055 static int __init spectre_bhi_parse_cmdline(char *str) 2056 { 2057 if (!str) 2058 return -EINVAL; 2059 2060 if (!strcmp(str, "off")) 2061 bhi_mitigation = BHI_MITIGATION_OFF; 2062 else if (!strcmp(str, "on")) 2063 bhi_mitigation = BHI_MITIGATION_ON; 2064 else if (!strcmp(str, "vmexit")) 2065 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY; 2066 else 2067 pr_err("Ignoring unknown spectre_bhi option (%s)", str); 2068 2069 return 0; 2070 } 2071 early_param("spectre_bhi", spectre_bhi_parse_cmdline); 2072 2073 static void __init bhi_select_mitigation(void) 2074 { 2075 if (!boot_cpu_has(X86_BUG_BHI)) 2076 bhi_mitigation = BHI_MITIGATION_OFF; 2077 2078 if (bhi_mitigation != BHI_MITIGATION_AUTO) 2079 return; 2080 2081 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) { 2082 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL)) 2083 bhi_mitigation = BHI_MITIGATION_ON; 2084 else 2085 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY; 2086 } else { 2087 bhi_mitigation = BHI_MITIGATION_OFF; 2088 } 2089 } 2090 2091 static void __init bhi_update_mitigation(void) 2092 { 2093 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) 2094 bhi_mitigation = BHI_MITIGATION_OFF; 2095 } 2096 2097 static void __init bhi_apply_mitigation(void) 2098 { 2099 if (bhi_mitigation == BHI_MITIGATION_OFF) 2100 return; 2101 2102 /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */ 2103 if (boot_cpu_has(X86_FEATURE_RETPOLINE) && 2104 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) { 2105 spec_ctrl_disable_kernel_rrsba(); 2106 if (rrsba_disabled) 2107 return; 2108 } 2109 2110 if (!IS_ENABLED(CONFIG_X86_64)) 2111 return; 2112 2113 /* Mitigate in hardware if supported */ 2114 if (spec_ctrl_bhi_dis()) 2115 return; 2116 2117 if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) { 2118 pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n"); 2119 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT); 2120 return; 2121 } 2122 2123 pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n"); 2124 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP); 2125 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT); 2126 } 2127 2128 static void __init spectre_v2_select_mitigation(void) 2129 { 2130 if ((spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE || 2131 spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || 2132 spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || 2133 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 2134 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 2135 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) { 2136 pr_err("RETPOLINE selected but not compiled in. Switching to AUTO select\n"); 2137 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO; 2138 } 2139 2140 if ((spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS || 2141 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 2142 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 2143 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 2144 pr_err("EIBRS selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n"); 2145 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO; 2146 } 2147 2148 if ((spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || 2149 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) && 2150 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { 2151 pr_err("LFENCE selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n"); 2152 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO; 2153 } 2154 2155 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) { 2156 pr_err("IBRS selected but not compiled in. Switching to AUTO select\n"); 2157 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO; 2158 } 2159 2160 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { 2161 pr_err("IBRS selected but not Intel CPU. Switching to AUTO select\n"); 2162 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO; 2163 } 2164 2165 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { 2166 pr_err("IBRS selected but CPU doesn't have IBRS. Switching to AUTO select\n"); 2167 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO; 2168 } 2169 2170 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) { 2171 pr_err("IBRS selected but running as XenPV guest. Switching to AUTO select\n"); 2172 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO; 2173 } 2174 2175 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) { 2176 spectre_v2_cmd = SPECTRE_V2_CMD_NONE; 2177 return; 2178 } 2179 2180 switch (spectre_v2_cmd) { 2181 case SPECTRE_V2_CMD_NONE: 2182 return; 2183 2184 case SPECTRE_V2_CMD_AUTO: 2185 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2)) 2186 break; 2187 fallthrough; 2188 case SPECTRE_V2_CMD_FORCE: 2189 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 2190 spectre_v2_enabled = SPECTRE_V2_EIBRS; 2191 break; 2192 } 2193 2194 spectre_v2_enabled = spectre_v2_select_retpoline(); 2195 break; 2196 2197 case SPECTRE_V2_CMD_RETPOLINE_LFENCE: 2198 pr_err(SPECTRE_V2_LFENCE_MSG); 2199 spectre_v2_enabled = SPECTRE_V2_LFENCE; 2200 break; 2201 2202 case SPECTRE_V2_CMD_RETPOLINE_GENERIC: 2203 spectre_v2_enabled = SPECTRE_V2_RETPOLINE; 2204 break; 2205 2206 case SPECTRE_V2_CMD_RETPOLINE: 2207 spectre_v2_enabled = spectre_v2_select_retpoline(); 2208 break; 2209 2210 case SPECTRE_V2_CMD_IBRS: 2211 spectre_v2_enabled = SPECTRE_V2_IBRS; 2212 break; 2213 2214 case SPECTRE_V2_CMD_EIBRS: 2215 spectre_v2_enabled = SPECTRE_V2_EIBRS; 2216 break; 2217 2218 case SPECTRE_V2_CMD_EIBRS_LFENCE: 2219 spectre_v2_enabled = SPECTRE_V2_EIBRS_LFENCE; 2220 break; 2221 2222 case SPECTRE_V2_CMD_EIBRS_RETPOLINE: 2223 spectre_v2_enabled = SPECTRE_V2_EIBRS_RETPOLINE; 2224 break; 2225 } 2226 } 2227 2228 static void __init spectre_v2_update_mitigation(void) 2229 { 2230 if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO && 2231 !spectre_v2_in_eibrs_mode(spectre_v2_enabled)) { 2232 if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) && 2233 boot_cpu_has_bug(X86_BUG_RETBLEED) && 2234 retbleed_mitigation != RETBLEED_MITIGATION_NONE && 2235 retbleed_mitigation != RETBLEED_MITIGATION_STUFF && 2236 boot_cpu_has(X86_FEATURE_IBRS) && 2237 boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 2238 spectre_v2_enabled = SPECTRE_V2_IBRS; 2239 } 2240 } 2241 2242 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 2243 pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]); 2244 } 2245 2246 static void __init spectre_v2_apply_mitigation(void) 2247 { 2248 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) 2249 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); 2250 2251 if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) { 2252 if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) { 2253 msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); 2254 } else { 2255 x86_spec_ctrl_base |= SPEC_CTRL_IBRS; 2256 update_spec_ctrl(x86_spec_ctrl_base); 2257 } 2258 } 2259 2260 switch (spectre_v2_enabled) { 2261 case SPECTRE_V2_NONE: 2262 return; 2263 2264 case SPECTRE_V2_EIBRS: 2265 break; 2266 2267 case SPECTRE_V2_IBRS: 2268 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); 2269 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) 2270 pr_warn(SPECTRE_V2_IBRS_PERF_MSG); 2271 break; 2272 2273 case SPECTRE_V2_LFENCE: 2274 case SPECTRE_V2_EIBRS_LFENCE: 2275 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); 2276 fallthrough; 2277 2278 case SPECTRE_V2_RETPOLINE: 2279 case SPECTRE_V2_EIBRS_RETPOLINE: 2280 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 2281 break; 2282 } 2283 2284 /* 2285 * Disable alternate RSB predictions in kernel when indirect CALLs and 2286 * JMPs gets protection against BHI and Intramode-BTI, but RET 2287 * prediction from a non-RSB predictor is still a risk. 2288 */ 2289 if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE || 2290 spectre_v2_enabled == SPECTRE_V2_EIBRS_RETPOLINE || 2291 spectre_v2_enabled == SPECTRE_V2_RETPOLINE) 2292 spec_ctrl_disable_kernel_rrsba(); 2293 2294 spectre_v2_select_rsb_mitigation(spectre_v2_enabled); 2295 2296 /* 2297 * Retpoline protects the kernel, but doesn't protect firmware. IBRS 2298 * and Enhanced IBRS protect firmware too, so enable IBRS around 2299 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't 2300 * otherwise enabled. 2301 * 2302 * Use "spectre_v2_enabled" to check Enhanced IBRS instead of 2303 * boot_cpu_has(), because the user might select retpoline on the kernel 2304 * command line and if the CPU supports Enhanced IBRS, kernel might 2305 * un-intentionally not enable IBRS around firmware calls. 2306 */ 2307 if (boot_cpu_has_bug(X86_BUG_RETBLEED) && 2308 boot_cpu_has(X86_FEATURE_IBPB) && 2309 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 2310 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { 2311 2312 if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) { 2313 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); 2314 pr_info("Enabling Speculation Barrier for firmware calls\n"); 2315 } 2316 2317 } else if (boot_cpu_has(X86_FEATURE_IBRS) && 2318 !spectre_v2_in_ibrs_mode(spectre_v2_enabled)) { 2319 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); 2320 pr_info("Enabling Restricted Speculation for firmware calls\n"); 2321 } 2322 } 2323 2324 static void update_stibp_msr(void * __unused) 2325 { 2326 u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); 2327 update_spec_ctrl(val); 2328 } 2329 2330 /* Update x86_spec_ctrl_base in case SMT state changed. */ 2331 static void update_stibp_strict(void) 2332 { 2333 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; 2334 2335 if (sched_smt_active()) 2336 mask |= SPEC_CTRL_STIBP; 2337 2338 if (mask == x86_spec_ctrl_base) 2339 return; 2340 2341 pr_info("Update user space SMT mitigation: STIBP %s\n", 2342 mask & SPEC_CTRL_STIBP ? "always-on" : "off"); 2343 x86_spec_ctrl_base = mask; 2344 on_each_cpu(update_stibp_msr, NULL, 1); 2345 } 2346 2347 /* Update the static key controlling the evaluation of TIF_SPEC_IB */ 2348 static void update_indir_branch_cond(void) 2349 { 2350 if (sched_smt_active()) 2351 static_branch_enable(&switch_to_cond_stibp); 2352 else 2353 static_branch_disable(&switch_to_cond_stibp); 2354 } 2355 2356 #undef pr_fmt 2357 #define pr_fmt(fmt) fmt 2358 2359 /* Update the static key controlling the MDS CPU buffer clear in idle */ 2360 static void update_mds_branch_idle(void) 2361 { 2362 /* 2363 * Enable the idle clearing if SMT is active on CPUs which are 2364 * affected only by MSBDS and not any other MDS variant. 2365 * 2366 * The other variants cannot be mitigated when SMT is enabled, so 2367 * clearing the buffers on idle just to prevent the Store Buffer 2368 * repartitioning leak would be a window dressing exercise. 2369 */ 2370 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) 2371 return; 2372 2373 if (sched_smt_active()) { 2374 static_branch_enable(&cpu_buf_idle_clear); 2375 } else if (mmio_mitigation == MMIO_MITIGATION_OFF || 2376 (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) { 2377 static_branch_disable(&cpu_buf_idle_clear); 2378 } 2379 } 2380 2381 #undef pr_fmt 2382 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt 2383 2384 static enum ssb_mitigation ssb_mode __ro_after_init = 2385 IS_ENABLED(CONFIG_MITIGATION_SSB) ? SPEC_STORE_BYPASS_AUTO : SPEC_STORE_BYPASS_NONE; 2386 2387 static const char * const ssb_strings[] = { 2388 [SPEC_STORE_BYPASS_NONE] = "Vulnerable", 2389 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", 2390 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", 2391 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", 2392 }; 2393 2394 static bool nossb __ro_after_init; 2395 2396 static int __init nossb_parse_cmdline(char *str) 2397 { 2398 nossb = true; 2399 ssb_mode = SPEC_STORE_BYPASS_NONE; 2400 return 0; 2401 } 2402 early_param("nospec_store_bypass_disable", nossb_parse_cmdline); 2403 2404 static int __init ssb_parse_cmdline(char *str) 2405 { 2406 if (!str) 2407 return -EINVAL; 2408 2409 if (nossb) 2410 return 0; 2411 2412 if (!strcmp(str, "auto")) 2413 ssb_mode = SPEC_STORE_BYPASS_AUTO; 2414 else if (!strcmp(str, "on")) 2415 ssb_mode = SPEC_STORE_BYPASS_DISABLE; 2416 else if (!strcmp(str, "off")) 2417 ssb_mode = SPEC_STORE_BYPASS_NONE; 2418 else if (!strcmp(str, "prctl")) 2419 ssb_mode = SPEC_STORE_BYPASS_PRCTL; 2420 else if (!strcmp(str, "seccomp")) 2421 ssb_mode = IS_ENABLED(CONFIG_SECCOMP) ? 2422 SPEC_STORE_BYPASS_SECCOMP : SPEC_STORE_BYPASS_PRCTL; 2423 else 2424 pr_err("Ignoring unknown spec_store_bypass_disable option (%s).\n", 2425 str); 2426 2427 return 0; 2428 } 2429 early_param("spec_store_bypass_disable", ssb_parse_cmdline); 2430 2431 static void __init ssb_select_mitigation(void) 2432 { 2433 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) { 2434 ssb_mode = SPEC_STORE_BYPASS_NONE; 2435 return; 2436 } 2437 2438 if (ssb_mode == SPEC_STORE_BYPASS_AUTO) { 2439 if (should_mitigate_vuln(X86_BUG_SPEC_STORE_BYPASS)) 2440 ssb_mode = SPEC_STORE_BYPASS_PRCTL; 2441 else 2442 ssb_mode = SPEC_STORE_BYPASS_NONE; 2443 } 2444 2445 if (!boot_cpu_has(X86_FEATURE_SSBD)) 2446 ssb_mode = SPEC_STORE_BYPASS_NONE; 2447 2448 pr_info("%s\n", ssb_strings[ssb_mode]); 2449 } 2450 2451 static void __init ssb_apply_mitigation(void) 2452 { 2453 /* 2454 * We have three CPU feature flags that are in play here: 2455 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. 2456 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass 2457 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation 2458 */ 2459 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) { 2460 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); 2461 /* 2462 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may 2463 * use a completely different MSR and bit dependent on family. 2464 */ 2465 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && 2466 !static_cpu_has(X86_FEATURE_AMD_SSBD)) { 2467 x86_amd_ssb_disable(); 2468 } else { 2469 x86_spec_ctrl_base |= SPEC_CTRL_SSBD; 2470 update_spec_ctrl(x86_spec_ctrl_base); 2471 } 2472 } 2473 } 2474 2475 #undef pr_fmt 2476 #define pr_fmt(fmt) "Speculation prctl: " fmt 2477 2478 static void task_update_spec_tif(struct task_struct *tsk) 2479 { 2480 /* Force the update of the real TIF bits */ 2481 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); 2482 2483 /* 2484 * Immediately update the speculation control MSRs for the current 2485 * task, but for a non-current task delay setting the CPU 2486 * mitigation until it is scheduled next. 2487 * 2488 * This can only happen for SECCOMP mitigation. For PRCTL it's 2489 * always the current task. 2490 */ 2491 if (tsk == current) 2492 speculation_ctrl_update_current(); 2493 } 2494 2495 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl) 2496 { 2497 2498 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) 2499 return -EPERM; 2500 2501 switch (ctrl) { 2502 case PR_SPEC_ENABLE: 2503 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); 2504 return 0; 2505 case PR_SPEC_DISABLE: 2506 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); 2507 return 0; 2508 default: 2509 return -ERANGE; 2510 } 2511 } 2512 2513 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) 2514 { 2515 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && 2516 ssb_mode != SPEC_STORE_BYPASS_SECCOMP) 2517 return -ENXIO; 2518 2519 switch (ctrl) { 2520 case PR_SPEC_ENABLE: 2521 /* If speculation is force disabled, enable is not allowed */ 2522 if (task_spec_ssb_force_disable(task)) 2523 return -EPERM; 2524 task_clear_spec_ssb_disable(task); 2525 task_clear_spec_ssb_noexec(task); 2526 task_update_spec_tif(task); 2527 break; 2528 case PR_SPEC_DISABLE: 2529 task_set_spec_ssb_disable(task); 2530 task_clear_spec_ssb_noexec(task); 2531 task_update_spec_tif(task); 2532 break; 2533 case PR_SPEC_FORCE_DISABLE: 2534 task_set_spec_ssb_disable(task); 2535 task_set_spec_ssb_force_disable(task); 2536 task_clear_spec_ssb_noexec(task); 2537 task_update_spec_tif(task); 2538 break; 2539 case PR_SPEC_DISABLE_NOEXEC: 2540 if (task_spec_ssb_force_disable(task)) 2541 return -EPERM; 2542 task_set_spec_ssb_disable(task); 2543 task_set_spec_ssb_noexec(task); 2544 task_update_spec_tif(task); 2545 break; 2546 default: 2547 return -ERANGE; 2548 } 2549 return 0; 2550 } 2551 2552 static bool is_spec_ib_user_controlled(void) 2553 { 2554 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || 2555 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || 2556 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || 2557 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP; 2558 } 2559 2560 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) 2561 { 2562 switch (ctrl) { 2563 case PR_SPEC_ENABLE: 2564 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 2565 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 2566 return 0; 2567 2568 /* 2569 * With strict mode for both IBPB and STIBP, the instruction 2570 * code paths avoid checking this task flag and instead, 2571 * unconditionally run the instruction. However, STIBP and IBPB 2572 * are independent and either can be set to conditionally 2573 * enabled regardless of the mode of the other. 2574 * 2575 * If either is set to conditional, allow the task flag to be 2576 * updated, unless it was force-disabled by a previous prctl 2577 * call. Currently, this is possible on an AMD CPU which has the 2578 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the 2579 * kernel is booted with 'spectre_v2_user=seccomp', then 2580 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and 2581 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED. 2582 */ 2583 if (!is_spec_ib_user_controlled() || 2584 task_spec_ib_force_disable(task)) 2585 return -EPERM; 2586 2587 task_clear_spec_ib_disable(task); 2588 task_update_spec_tif(task); 2589 break; 2590 case PR_SPEC_DISABLE: 2591 case PR_SPEC_FORCE_DISABLE: 2592 /* 2593 * Indirect branch speculation is always allowed when 2594 * mitigation is force disabled. 2595 */ 2596 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 2597 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 2598 return -EPERM; 2599 2600 if (!is_spec_ib_user_controlled()) 2601 return 0; 2602 2603 task_set_spec_ib_disable(task); 2604 if (ctrl == PR_SPEC_FORCE_DISABLE) 2605 task_set_spec_ib_force_disable(task); 2606 task_update_spec_tif(task); 2607 if (task == current) 2608 indirect_branch_prediction_barrier(); 2609 break; 2610 default: 2611 return -ERANGE; 2612 } 2613 return 0; 2614 } 2615 2616 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, 2617 unsigned long ctrl) 2618 { 2619 switch (which) { 2620 case PR_SPEC_STORE_BYPASS: 2621 return ssb_prctl_set(task, ctrl); 2622 case PR_SPEC_INDIRECT_BRANCH: 2623 return ib_prctl_set(task, ctrl); 2624 case PR_SPEC_L1D_FLUSH: 2625 return l1d_flush_prctl_set(task, ctrl); 2626 default: 2627 return -ENODEV; 2628 } 2629 } 2630 2631 #ifdef CONFIG_SECCOMP 2632 void arch_seccomp_spec_mitigate(struct task_struct *task) 2633 { 2634 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) 2635 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); 2636 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || 2637 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) 2638 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); 2639 } 2640 #endif 2641 2642 static int l1d_flush_prctl_get(struct task_struct *task) 2643 { 2644 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) 2645 return PR_SPEC_FORCE_DISABLE; 2646 2647 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH)) 2648 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2649 else 2650 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2651 } 2652 2653 static int ssb_prctl_get(struct task_struct *task) 2654 { 2655 switch (ssb_mode) { 2656 case SPEC_STORE_BYPASS_NONE: 2657 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 2658 return PR_SPEC_ENABLE; 2659 return PR_SPEC_NOT_AFFECTED; 2660 case SPEC_STORE_BYPASS_DISABLE: 2661 return PR_SPEC_DISABLE; 2662 case SPEC_STORE_BYPASS_SECCOMP: 2663 case SPEC_STORE_BYPASS_PRCTL: 2664 case SPEC_STORE_BYPASS_AUTO: 2665 if (task_spec_ssb_force_disable(task)) 2666 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 2667 if (task_spec_ssb_noexec(task)) 2668 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; 2669 if (task_spec_ssb_disable(task)) 2670 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2671 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2672 } 2673 BUG(); 2674 } 2675 2676 static int ib_prctl_get(struct task_struct *task) 2677 { 2678 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 2679 return PR_SPEC_NOT_AFFECTED; 2680 2681 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 2682 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 2683 return PR_SPEC_ENABLE; 2684 else if (is_spec_ib_user_controlled()) { 2685 if (task_spec_ib_force_disable(task)) 2686 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 2687 if (task_spec_ib_disable(task)) 2688 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2689 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2690 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || 2691 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || 2692 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) 2693 return PR_SPEC_DISABLE; 2694 else 2695 return PR_SPEC_NOT_AFFECTED; 2696 } 2697 2698 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 2699 { 2700 switch (which) { 2701 case PR_SPEC_STORE_BYPASS: 2702 return ssb_prctl_get(task); 2703 case PR_SPEC_INDIRECT_BRANCH: 2704 return ib_prctl_get(task); 2705 case PR_SPEC_L1D_FLUSH: 2706 return l1d_flush_prctl_get(task); 2707 default: 2708 return -ENODEV; 2709 } 2710 } 2711 2712 void x86_spec_ctrl_setup_ap(void) 2713 { 2714 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) 2715 update_spec_ctrl(x86_spec_ctrl_base); 2716 2717 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) 2718 x86_amd_ssb_disable(); 2719 } 2720 2721 bool itlb_multihit_kvm_mitigation; 2722 EXPORT_SYMBOL_FOR_KVM(itlb_multihit_kvm_mitigation); 2723 2724 #undef pr_fmt 2725 #define pr_fmt(fmt) "L1TF: " fmt 2726 2727 /* Default mitigation for L1TF-affected CPUs */ 2728 enum l1tf_mitigations l1tf_mitigation __ro_after_init = 2729 IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF; 2730 EXPORT_SYMBOL_FOR_KVM(l1tf_mitigation); 2731 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; 2732 EXPORT_SYMBOL_FOR_KVM(l1tf_vmx_mitigation); 2733 2734 /* 2735 * These CPUs all support 44bits physical address space internally in the 2736 * cache but CPUID can report a smaller number of physical address bits. 2737 * 2738 * The L1TF mitigation uses the top most address bit for the inversion of 2739 * non present PTEs. When the installed memory reaches into the top most 2740 * address bit due to memory holes, which has been observed on machines 2741 * which report 36bits physical address bits and have 32G RAM installed, 2742 * then the mitigation range check in l1tf_select_mitigation() triggers. 2743 * This is a false positive because the mitigation is still possible due to 2744 * the fact that the cache uses 44bit internally. Use the cache bits 2745 * instead of the reported physical bits and adjust them on the affected 2746 * machines to 44bit if the reported bits are less than 44. 2747 */ 2748 static void override_cache_bits(struct cpuinfo_x86 *c) 2749 { 2750 if (c->x86 != 6) 2751 return; 2752 2753 switch (c->x86_vfm) { 2754 case INTEL_NEHALEM: 2755 case INTEL_WESTMERE: 2756 case INTEL_SANDYBRIDGE: 2757 case INTEL_IVYBRIDGE: 2758 case INTEL_HASWELL: 2759 case INTEL_HASWELL_L: 2760 case INTEL_HASWELL_G: 2761 case INTEL_BROADWELL: 2762 case INTEL_BROADWELL_G: 2763 case INTEL_SKYLAKE_L: 2764 case INTEL_SKYLAKE: 2765 case INTEL_KABYLAKE_L: 2766 case INTEL_KABYLAKE: 2767 if (c->x86_cache_bits < 44) 2768 c->x86_cache_bits = 44; 2769 break; 2770 } 2771 } 2772 2773 static void __init l1tf_select_mitigation(void) 2774 { 2775 if (!boot_cpu_has_bug(X86_BUG_L1TF)) { 2776 l1tf_mitigation = L1TF_MITIGATION_OFF; 2777 return; 2778 } 2779 2780 if (l1tf_mitigation != L1TF_MITIGATION_AUTO) 2781 return; 2782 2783 if (!should_mitigate_vuln(X86_BUG_L1TF)) { 2784 l1tf_mitigation = L1TF_MITIGATION_OFF; 2785 return; 2786 } 2787 2788 if (smt_mitigations == SMT_MITIGATIONS_ON) 2789 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 2790 else 2791 l1tf_mitigation = L1TF_MITIGATION_FLUSH; 2792 } 2793 2794 static void __init l1tf_apply_mitigation(void) 2795 { 2796 u64 half_pa; 2797 2798 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 2799 return; 2800 2801 override_cache_bits(&boot_cpu_data); 2802 2803 switch (l1tf_mitigation) { 2804 case L1TF_MITIGATION_OFF: 2805 case L1TF_MITIGATION_FLUSH_NOWARN: 2806 case L1TF_MITIGATION_FLUSH: 2807 case L1TF_MITIGATION_AUTO: 2808 break; 2809 case L1TF_MITIGATION_FLUSH_NOSMT: 2810 case L1TF_MITIGATION_FULL: 2811 cpu_smt_disable(false); 2812 break; 2813 case L1TF_MITIGATION_FULL_FORCE: 2814 cpu_smt_disable(true); 2815 break; 2816 } 2817 2818 #if CONFIG_PGTABLE_LEVELS == 2 2819 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); 2820 return; 2821 #endif 2822 2823 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; 2824 if (l1tf_mitigation != L1TF_MITIGATION_OFF && 2825 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { 2826 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); 2827 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", 2828 half_pa); 2829 pr_info("However, doing so will make a part of your RAM unusable.\n"); 2830 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n"); 2831 return; 2832 } 2833 2834 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); 2835 } 2836 2837 static int __init l1tf_cmdline(char *str) 2838 { 2839 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 2840 return 0; 2841 2842 if (!str) 2843 return -EINVAL; 2844 2845 if (!strcmp(str, "off")) 2846 l1tf_mitigation = L1TF_MITIGATION_OFF; 2847 else if (!strcmp(str, "flush,nowarn")) 2848 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN; 2849 else if (!strcmp(str, "flush")) 2850 l1tf_mitigation = L1TF_MITIGATION_FLUSH; 2851 else if (!strcmp(str, "flush,nosmt")) 2852 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 2853 else if (!strcmp(str, "full")) 2854 l1tf_mitigation = L1TF_MITIGATION_FULL; 2855 else if (!strcmp(str, "full,force")) 2856 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE; 2857 2858 return 0; 2859 } 2860 early_param("l1tf", l1tf_cmdline); 2861 2862 #undef pr_fmt 2863 #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt 2864 2865 static const char * const srso_strings[] = { 2866 [SRSO_MITIGATION_NONE] = "Vulnerable", 2867 [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 2868 [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode", 2869 [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET", 2870 [SRSO_MITIGATION_NOSMT] = "Mitigation: SMT disabled", 2871 [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET", 2872 [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", 2873 [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only", 2874 [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation" 2875 }; 2876 2877 static int __init srso_parse_cmdline(char *str) 2878 { 2879 if (!str) 2880 return -EINVAL; 2881 2882 if (!strcmp(str, "off")) 2883 srso_mitigation = SRSO_MITIGATION_NONE; 2884 else if (!strcmp(str, "microcode")) 2885 srso_mitigation = SRSO_MITIGATION_MICROCODE; 2886 else if (!strcmp(str, "safe-ret")) 2887 srso_mitigation = SRSO_MITIGATION_SAFE_RET; 2888 else if (!strcmp(str, "ibpb")) 2889 srso_mitigation = SRSO_MITIGATION_IBPB; 2890 else if (!strcmp(str, "ibpb-vmexit")) 2891 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; 2892 else 2893 pr_err("Ignoring unknown SRSO option (%s).", str); 2894 2895 return 0; 2896 } 2897 early_param("spec_rstack_overflow", srso_parse_cmdline); 2898 2899 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options." 2900 2901 static void __init srso_select_mitigation(void) 2902 { 2903 if (!boot_cpu_has_bug(X86_BUG_SRSO)) { 2904 srso_mitigation = SRSO_MITIGATION_NONE; 2905 return; 2906 } 2907 2908 if (srso_mitigation == SRSO_MITIGATION_AUTO) { 2909 /* 2910 * Use safe-RET if user->kernel or guest->host protection is 2911 * required. Otherwise the 'microcode' mitigation is sufficient 2912 * to protect the user->user and guest->guest vectors. 2913 */ 2914 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || 2915 (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) && 2916 !boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))) { 2917 srso_mitigation = SRSO_MITIGATION_SAFE_RET; 2918 } else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || 2919 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) { 2920 srso_mitigation = SRSO_MITIGATION_MICROCODE; 2921 } else { 2922 srso_mitigation = SRSO_MITIGATION_NONE; 2923 return; 2924 } 2925 } 2926 2927 /* Zen1/2 with SMT off aren't vulnerable to SRSO. */ 2928 if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { 2929 srso_mitigation = SRSO_MITIGATION_NOSMT; 2930 return; 2931 } 2932 2933 if (!boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) { 2934 pr_warn("IBPB-extending microcode not applied!\n"); 2935 pr_warn(SRSO_NOTICE); 2936 2937 /* 2938 * Safe-RET provides partial mitigation without microcode, but 2939 * other mitigations require microcode to provide any 2940 * mitigations. 2941 */ 2942 if (srso_mitigation == SRSO_MITIGATION_SAFE_RET) 2943 srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED; 2944 else 2945 srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED; 2946 } 2947 2948 switch (srso_mitigation) { 2949 case SRSO_MITIGATION_SAFE_RET: 2950 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED: 2951 if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) { 2952 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; 2953 goto ibpb_on_vmexit; 2954 } 2955 2956 if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) { 2957 pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n"); 2958 srso_mitigation = SRSO_MITIGATION_NONE; 2959 } 2960 break; 2961 ibpb_on_vmexit: 2962 case SRSO_MITIGATION_IBPB_ON_VMEXIT: 2963 if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) { 2964 pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n"); 2965 srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE; 2966 break; 2967 } 2968 fallthrough; 2969 case SRSO_MITIGATION_IBPB: 2970 if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { 2971 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); 2972 srso_mitigation = SRSO_MITIGATION_NONE; 2973 } 2974 break; 2975 default: 2976 break; 2977 } 2978 } 2979 2980 static void __init srso_update_mitigation(void) 2981 { 2982 if (!boot_cpu_has_bug(X86_BUG_SRSO)) 2983 return; 2984 2985 /* If retbleed is using IBPB, that works for SRSO as well */ 2986 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB && 2987 boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) 2988 srso_mitigation = SRSO_MITIGATION_IBPB; 2989 2990 pr_info("%s\n", srso_strings[srso_mitigation]); 2991 } 2992 2993 static void __init srso_apply_mitigation(void) 2994 { 2995 /* 2996 * Clear the feature flag if this mitigation is not selected as that 2997 * feature flag controls the BpSpecReduce MSR bit toggling in KVM. 2998 */ 2999 if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE) 3000 setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE); 3001 3002 if (srso_mitigation == SRSO_MITIGATION_NONE) { 3003 if (boot_cpu_has(X86_FEATURE_SBPB)) 3004 x86_pred_cmd = PRED_CMD_SBPB; 3005 return; 3006 } 3007 3008 switch (srso_mitigation) { 3009 case SRSO_MITIGATION_SAFE_RET: 3010 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED: 3011 /* 3012 * Enable the return thunk for generated code 3013 * like ftrace, static_call, etc. 3014 */ 3015 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 3016 setup_force_cpu_cap(X86_FEATURE_UNRET); 3017 3018 if (boot_cpu_data.x86 == 0x19) { 3019 setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); 3020 set_return_thunk(srso_alias_return_thunk); 3021 } else { 3022 setup_force_cpu_cap(X86_FEATURE_SRSO); 3023 set_return_thunk(srso_return_thunk); 3024 } 3025 break; 3026 case SRSO_MITIGATION_IBPB: 3027 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); 3028 /* 3029 * IBPB on entry already obviates the need for 3030 * software-based untraining so clear those in case some 3031 * other mitigation like Retbleed has selected them. 3032 */ 3033 setup_clear_cpu_cap(X86_FEATURE_UNRET); 3034 setup_clear_cpu_cap(X86_FEATURE_RETHUNK); 3035 fallthrough; 3036 case SRSO_MITIGATION_IBPB_ON_VMEXIT: 3037 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); 3038 /* 3039 * There is no need for RSB filling: entry_ibpb() ensures 3040 * all predictions, including the RSB, are invalidated, 3041 * regardless of IBPB implementation. 3042 */ 3043 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); 3044 break; 3045 default: 3046 break; 3047 } 3048 } 3049 3050 #undef pr_fmt 3051 #define pr_fmt(fmt) "VMSCAPE: " fmt 3052 3053 enum vmscape_mitigations { 3054 VMSCAPE_MITIGATION_NONE, 3055 VMSCAPE_MITIGATION_AUTO, 3056 VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER, 3057 VMSCAPE_MITIGATION_IBPB_ON_VMEXIT, 3058 }; 3059 3060 static const char * const vmscape_strings[] = { 3061 [VMSCAPE_MITIGATION_NONE] = "Vulnerable", 3062 /* [VMSCAPE_MITIGATION_AUTO] */ 3063 [VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER] = "Mitigation: IBPB before exit to userspace", 3064 [VMSCAPE_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT", 3065 }; 3066 3067 static enum vmscape_mitigations vmscape_mitigation __ro_after_init = 3068 IS_ENABLED(CONFIG_MITIGATION_VMSCAPE) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE; 3069 3070 static int __init vmscape_parse_cmdline(char *str) 3071 { 3072 if (!str) 3073 return -EINVAL; 3074 3075 if (!strcmp(str, "off")) { 3076 vmscape_mitigation = VMSCAPE_MITIGATION_NONE; 3077 } else if (!strcmp(str, "ibpb")) { 3078 vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER; 3079 } else if (!strcmp(str, "force")) { 3080 setup_force_cpu_bug(X86_BUG_VMSCAPE); 3081 vmscape_mitigation = VMSCAPE_MITIGATION_AUTO; 3082 } else { 3083 pr_err("Ignoring unknown vmscape=%s option.\n", str); 3084 } 3085 3086 return 0; 3087 } 3088 early_param("vmscape", vmscape_parse_cmdline); 3089 3090 static void __init vmscape_select_mitigation(void) 3091 { 3092 if (!boot_cpu_has_bug(X86_BUG_VMSCAPE) || 3093 !boot_cpu_has(X86_FEATURE_IBPB)) { 3094 vmscape_mitigation = VMSCAPE_MITIGATION_NONE; 3095 return; 3096 } 3097 3098 if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO) { 3099 if (should_mitigate_vuln(X86_BUG_VMSCAPE)) 3100 vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER; 3101 else 3102 vmscape_mitigation = VMSCAPE_MITIGATION_NONE; 3103 } 3104 } 3105 3106 static void __init vmscape_update_mitigation(void) 3107 { 3108 if (!boot_cpu_has_bug(X86_BUG_VMSCAPE)) 3109 return; 3110 3111 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB || 3112 srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT) 3113 vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT; 3114 3115 pr_info("%s\n", vmscape_strings[vmscape_mitigation]); 3116 } 3117 3118 static void __init vmscape_apply_mitigation(void) 3119 { 3120 if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER) 3121 setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER); 3122 } 3123 3124 #undef pr_fmt 3125 #define pr_fmt(fmt) fmt 3126 3127 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" 3128 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" 3129 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" 3130 #define VMSCAPE_MSG_SMT "VMSCAPE: SMT on, STIBP is required for full protection. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/vmscape.html for more details.\n" 3131 3132 void cpu_bugs_smt_update(void) 3133 { 3134 mutex_lock(&spec_ctrl_mutex); 3135 3136 if (sched_smt_active() && unprivileged_ebpf_enabled() && 3137 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) 3138 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); 3139 3140 switch (spectre_v2_user_stibp) { 3141 case SPECTRE_V2_USER_NONE: 3142 break; 3143 case SPECTRE_V2_USER_STRICT: 3144 case SPECTRE_V2_USER_STRICT_PREFERRED: 3145 update_stibp_strict(); 3146 break; 3147 case SPECTRE_V2_USER_PRCTL: 3148 case SPECTRE_V2_USER_SECCOMP: 3149 update_indir_branch_cond(); 3150 break; 3151 } 3152 3153 switch (mds_mitigation) { 3154 case MDS_MITIGATION_FULL: 3155 case MDS_MITIGATION_AUTO: 3156 case MDS_MITIGATION_VMWERV: 3157 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) 3158 pr_warn_once(MDS_MSG_SMT); 3159 update_mds_branch_idle(); 3160 break; 3161 case MDS_MITIGATION_OFF: 3162 break; 3163 } 3164 3165 switch (taa_mitigation) { 3166 case TAA_MITIGATION_VERW: 3167 case TAA_MITIGATION_AUTO: 3168 case TAA_MITIGATION_UCODE_NEEDED: 3169 if (sched_smt_active()) 3170 pr_warn_once(TAA_MSG_SMT); 3171 break; 3172 case TAA_MITIGATION_TSX_DISABLED: 3173 case TAA_MITIGATION_OFF: 3174 break; 3175 } 3176 3177 switch (mmio_mitigation) { 3178 case MMIO_MITIGATION_VERW: 3179 case MMIO_MITIGATION_AUTO: 3180 case MMIO_MITIGATION_UCODE_NEEDED: 3181 if (sched_smt_active()) 3182 pr_warn_once(MMIO_MSG_SMT); 3183 break; 3184 case MMIO_MITIGATION_OFF: 3185 break; 3186 } 3187 3188 switch (tsa_mitigation) { 3189 case TSA_MITIGATION_USER_KERNEL: 3190 case TSA_MITIGATION_VM: 3191 case TSA_MITIGATION_AUTO: 3192 case TSA_MITIGATION_FULL: 3193 /* 3194 * TSA-SQ can potentially lead to info leakage between 3195 * SMT threads. 3196 */ 3197 if (sched_smt_active()) 3198 static_branch_enable(&cpu_buf_idle_clear); 3199 else 3200 static_branch_disable(&cpu_buf_idle_clear); 3201 break; 3202 case TSA_MITIGATION_NONE: 3203 case TSA_MITIGATION_UCODE_NEEDED: 3204 break; 3205 } 3206 3207 switch (vmscape_mitigation) { 3208 case VMSCAPE_MITIGATION_NONE: 3209 case VMSCAPE_MITIGATION_AUTO: 3210 break; 3211 case VMSCAPE_MITIGATION_IBPB_ON_VMEXIT: 3212 case VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER: 3213 /* 3214 * Hypervisors can be attacked across-threads, warn for SMT when 3215 * STIBP is not already enabled system-wide. 3216 * 3217 * Intel eIBRS (!AUTOIBRS) implies STIBP on. 3218 */ 3219 if (!sched_smt_active() || 3220 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || 3221 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED || 3222 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 3223 !boot_cpu_has(X86_FEATURE_AUTOIBRS))) 3224 break; 3225 pr_warn_once(VMSCAPE_MSG_SMT); 3226 break; 3227 } 3228 3229 mutex_unlock(&spec_ctrl_mutex); 3230 } 3231 3232 void __init cpu_select_mitigations(void) 3233 { 3234 /* 3235 * Read the SPEC_CTRL MSR to account for reserved bits which may 3236 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD 3237 * init code as it is not enumerated and depends on the family. 3238 */ 3239 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) { 3240 rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 3241 3242 /* 3243 * Previously running kernel (kexec), may have some controls 3244 * turned ON. Clear them and let the mitigations setup below 3245 * rediscover them based on configuration. 3246 */ 3247 x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK; 3248 } 3249 3250 x86_arch_cap_msr = x86_read_arch_cap_msr(); 3251 3252 cpu_print_attack_vectors(); 3253 3254 /* Select the proper CPU mitigations before patching alternatives: */ 3255 spectre_v1_select_mitigation(); 3256 spectre_v2_select_mitigation(); 3257 retbleed_select_mitigation(); 3258 spectre_v2_user_select_mitigation(); 3259 ssb_select_mitigation(); 3260 l1tf_select_mitigation(); 3261 mds_select_mitigation(); 3262 taa_select_mitigation(); 3263 mmio_select_mitigation(); 3264 rfds_select_mitigation(); 3265 srbds_select_mitigation(); 3266 l1d_flush_select_mitigation(); 3267 srso_select_mitigation(); 3268 gds_select_mitigation(); 3269 its_select_mitigation(); 3270 bhi_select_mitigation(); 3271 tsa_select_mitigation(); 3272 vmscape_select_mitigation(); 3273 3274 /* 3275 * After mitigations are selected, some may need to update their 3276 * choices. 3277 */ 3278 spectre_v2_update_mitigation(); 3279 /* 3280 * retbleed_update_mitigation() relies on the state set by 3281 * spectre_v2_update_mitigation(); specifically it wants to know about 3282 * spectre_v2=ibrs. 3283 */ 3284 retbleed_update_mitigation(); 3285 /* 3286 * its_update_mitigation() depends on spectre_v2_update_mitigation() 3287 * and retbleed_update_mitigation(). 3288 */ 3289 its_update_mitigation(); 3290 3291 /* 3292 * spectre_v2_user_update_mitigation() depends on 3293 * retbleed_update_mitigation(), specifically the STIBP 3294 * selection is forced for UNRET or IBPB. 3295 */ 3296 spectre_v2_user_update_mitigation(); 3297 mds_update_mitigation(); 3298 taa_update_mitigation(); 3299 mmio_update_mitigation(); 3300 rfds_update_mitigation(); 3301 bhi_update_mitigation(); 3302 /* srso_update_mitigation() depends on retbleed_update_mitigation(). */ 3303 srso_update_mitigation(); 3304 vmscape_update_mitigation(); 3305 3306 spectre_v1_apply_mitigation(); 3307 spectre_v2_apply_mitigation(); 3308 retbleed_apply_mitigation(); 3309 spectre_v2_user_apply_mitigation(); 3310 ssb_apply_mitigation(); 3311 l1tf_apply_mitigation(); 3312 mds_apply_mitigation(); 3313 taa_apply_mitigation(); 3314 mmio_apply_mitigation(); 3315 rfds_apply_mitigation(); 3316 srbds_apply_mitigation(); 3317 srso_apply_mitigation(); 3318 gds_apply_mitigation(); 3319 its_apply_mitigation(); 3320 bhi_apply_mitigation(); 3321 tsa_apply_mitigation(); 3322 vmscape_apply_mitigation(); 3323 } 3324 3325 #ifdef CONFIG_SYSFS 3326 3327 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" 3328 3329 #if IS_ENABLED(CONFIG_KVM_INTEL) 3330 static const char * const l1tf_vmx_states[] = { 3331 [VMENTER_L1D_FLUSH_AUTO] = "auto", 3332 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", 3333 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", 3334 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", 3335 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", 3336 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" 3337 }; 3338 3339 static ssize_t l1tf_show_state(char *buf) 3340 { 3341 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) 3342 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); 3343 3344 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || 3345 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && 3346 sched_smt_active())) { 3347 return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, 3348 l1tf_vmx_states[l1tf_vmx_mitigation]); 3349 } 3350 3351 return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, 3352 l1tf_vmx_states[l1tf_vmx_mitigation], 3353 sched_smt_active() ? "vulnerable" : "disabled"); 3354 } 3355 3356 static ssize_t itlb_multihit_show_state(char *buf) 3357 { 3358 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || 3359 !boot_cpu_has(X86_FEATURE_VMX)) 3360 return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n"); 3361 else if (!(cr4_read_shadow() & X86_CR4_VMXE)) 3362 return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n"); 3363 else if (itlb_multihit_kvm_mitigation) 3364 return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n"); 3365 else 3366 return sysfs_emit(buf, "KVM: Vulnerable\n"); 3367 } 3368 #else 3369 static ssize_t l1tf_show_state(char *buf) 3370 { 3371 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); 3372 } 3373 3374 static ssize_t itlb_multihit_show_state(char *buf) 3375 { 3376 return sysfs_emit(buf, "Processor vulnerable\n"); 3377 } 3378 #endif 3379 3380 static ssize_t mds_show_state(char *buf) 3381 { 3382 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 3383 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 3384 mds_strings[mds_mitigation]); 3385 } 3386 3387 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { 3388 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], 3389 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : 3390 sched_smt_active() ? "mitigated" : "disabled")); 3391 } 3392 3393 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], 3394 sched_smt_active() ? "vulnerable" : "disabled"); 3395 } 3396 3397 static ssize_t tsx_async_abort_show_state(char *buf) 3398 { 3399 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || 3400 (taa_mitigation == TAA_MITIGATION_OFF)) 3401 return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]); 3402 3403 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 3404 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 3405 taa_strings[taa_mitigation]); 3406 } 3407 3408 return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], 3409 sched_smt_active() ? "vulnerable" : "disabled"); 3410 } 3411 3412 static ssize_t mmio_stale_data_show_state(char *buf) 3413 { 3414 if (mmio_mitigation == MMIO_MITIGATION_OFF) 3415 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); 3416 3417 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 3418 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 3419 mmio_strings[mmio_mitigation]); 3420 } 3421 3422 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], 3423 sched_smt_active() ? "vulnerable" : "disabled"); 3424 } 3425 3426 static ssize_t rfds_show_state(char *buf) 3427 { 3428 return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]); 3429 } 3430 3431 static ssize_t old_microcode_show_state(char *buf) 3432 { 3433 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 3434 return sysfs_emit(buf, "Unknown: running under hypervisor"); 3435 3436 return sysfs_emit(buf, "Vulnerable\n"); 3437 } 3438 3439 static ssize_t its_show_state(char *buf) 3440 { 3441 return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]); 3442 } 3443 3444 static char *stibp_state(void) 3445 { 3446 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 3447 !boot_cpu_has(X86_FEATURE_AUTOIBRS)) 3448 return ""; 3449 3450 switch (spectre_v2_user_stibp) { 3451 case SPECTRE_V2_USER_NONE: 3452 return "; STIBP: disabled"; 3453 case SPECTRE_V2_USER_STRICT: 3454 return "; STIBP: forced"; 3455 case SPECTRE_V2_USER_STRICT_PREFERRED: 3456 return "; STIBP: always-on"; 3457 case SPECTRE_V2_USER_PRCTL: 3458 case SPECTRE_V2_USER_SECCOMP: 3459 if (static_key_enabled(&switch_to_cond_stibp)) 3460 return "; STIBP: conditional"; 3461 } 3462 return ""; 3463 } 3464 3465 static char *ibpb_state(void) 3466 { 3467 if (boot_cpu_has(X86_FEATURE_IBPB)) { 3468 if (static_key_enabled(&switch_mm_always_ibpb)) 3469 return "; IBPB: always-on"; 3470 if (static_key_enabled(&switch_mm_cond_ibpb)) 3471 return "; IBPB: conditional"; 3472 return "; IBPB: disabled"; 3473 } 3474 return ""; 3475 } 3476 3477 static char *pbrsb_eibrs_state(void) 3478 { 3479 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { 3480 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || 3481 boot_cpu_has(X86_FEATURE_RSB_VMEXIT)) 3482 return "; PBRSB-eIBRS: SW sequence"; 3483 else 3484 return "; PBRSB-eIBRS: Vulnerable"; 3485 } else { 3486 return "; PBRSB-eIBRS: Not affected"; 3487 } 3488 } 3489 3490 static const char *spectre_bhi_state(void) 3491 { 3492 if (!boot_cpu_has_bug(X86_BUG_BHI)) 3493 return "; BHI: Not affected"; 3494 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW)) 3495 return "; BHI: BHI_DIS_S"; 3496 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP)) 3497 return "; BHI: SW loop, KVM: SW loop"; 3498 else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && 3499 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) && 3500 rrsba_disabled) 3501 return "; BHI: Retpoline"; 3502 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_VMEXIT)) 3503 return "; BHI: Vulnerable, KVM: SW loop"; 3504 3505 return "; BHI: Vulnerable"; 3506 } 3507 3508 static ssize_t spectre_v2_show_state(char *buf) 3509 { 3510 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) 3511 return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); 3512 3513 if (sched_smt_active() && unprivileged_ebpf_enabled() && 3514 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) 3515 return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); 3516 3517 return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n", 3518 spectre_v2_strings[spectre_v2_enabled], 3519 ibpb_state(), 3520 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "", 3521 stibp_state(), 3522 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "", 3523 pbrsb_eibrs_state(), 3524 spectre_bhi_state(), 3525 /* this should always be at the end */ 3526 spectre_v2_module_string()); 3527 } 3528 3529 static ssize_t srbds_show_state(char *buf) 3530 { 3531 return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]); 3532 } 3533 3534 static ssize_t retbleed_show_state(char *buf) 3535 { 3536 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || 3537 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { 3538 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 3539 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 3540 return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); 3541 3542 return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation], 3543 !sched_smt_active() ? "disabled" : 3544 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || 3545 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? 3546 "enabled with STIBP protection" : "vulnerable"); 3547 } 3548 3549 return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]); 3550 } 3551 3552 static ssize_t srso_show_state(char *buf) 3553 { 3554 return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]); 3555 } 3556 3557 static ssize_t gds_show_state(char *buf) 3558 { 3559 return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); 3560 } 3561 3562 static ssize_t tsa_show_state(char *buf) 3563 { 3564 return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]); 3565 } 3566 3567 static ssize_t vmscape_show_state(char *buf) 3568 { 3569 return sysfs_emit(buf, "%s\n", vmscape_strings[vmscape_mitigation]); 3570 } 3571 3572 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, 3573 char *buf, unsigned int bug) 3574 { 3575 if (!boot_cpu_has_bug(bug)) 3576 return sysfs_emit(buf, "Not affected\n"); 3577 3578 switch (bug) { 3579 case X86_BUG_CPU_MELTDOWN: 3580 if (boot_cpu_has(X86_FEATURE_PTI)) 3581 return sysfs_emit(buf, "Mitigation: PTI\n"); 3582 3583 if (hypervisor_is_type(X86_HYPER_XEN_PV)) 3584 return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); 3585 3586 break; 3587 3588 case X86_BUG_SPECTRE_V1: 3589 return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); 3590 3591 case X86_BUG_SPECTRE_V2: 3592 return spectre_v2_show_state(buf); 3593 3594 case X86_BUG_SPEC_STORE_BYPASS: 3595 return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]); 3596 3597 case X86_BUG_L1TF: 3598 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) 3599 return l1tf_show_state(buf); 3600 break; 3601 3602 case X86_BUG_MDS: 3603 return mds_show_state(buf); 3604 3605 case X86_BUG_TAA: 3606 return tsx_async_abort_show_state(buf); 3607 3608 case X86_BUG_ITLB_MULTIHIT: 3609 return itlb_multihit_show_state(buf); 3610 3611 case X86_BUG_SRBDS: 3612 return srbds_show_state(buf); 3613 3614 case X86_BUG_MMIO_STALE_DATA: 3615 return mmio_stale_data_show_state(buf); 3616 3617 case X86_BUG_RETBLEED: 3618 return retbleed_show_state(buf); 3619 3620 case X86_BUG_SRSO: 3621 return srso_show_state(buf); 3622 3623 case X86_BUG_GDS: 3624 return gds_show_state(buf); 3625 3626 case X86_BUG_RFDS: 3627 return rfds_show_state(buf); 3628 3629 case X86_BUG_OLD_MICROCODE: 3630 return old_microcode_show_state(buf); 3631 3632 case X86_BUG_ITS: 3633 return its_show_state(buf); 3634 3635 case X86_BUG_TSA: 3636 return tsa_show_state(buf); 3637 3638 case X86_BUG_VMSCAPE: 3639 return vmscape_show_state(buf); 3640 3641 default: 3642 break; 3643 } 3644 3645 return sysfs_emit(buf, "Vulnerable\n"); 3646 } 3647 3648 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 3649 { 3650 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); 3651 } 3652 3653 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 3654 { 3655 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); 3656 } 3657 3658 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 3659 { 3660 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); 3661 } 3662 3663 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 3664 { 3665 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); 3666 } 3667 3668 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) 3669 { 3670 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); 3671 } 3672 3673 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf) 3674 { 3675 return cpu_show_common(dev, attr, buf, X86_BUG_MDS); 3676 } 3677 3678 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf) 3679 { 3680 return cpu_show_common(dev, attr, buf, X86_BUG_TAA); 3681 } 3682 3683 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf) 3684 { 3685 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); 3686 } 3687 3688 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) 3689 { 3690 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); 3691 } 3692 3693 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) 3694 { 3695 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); 3696 } 3697 3698 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) 3699 { 3700 return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); 3701 } 3702 3703 ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf) 3704 { 3705 return cpu_show_common(dev, attr, buf, X86_BUG_SRSO); 3706 } 3707 3708 ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf) 3709 { 3710 return cpu_show_common(dev, attr, buf, X86_BUG_GDS); 3711 } 3712 3713 ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf) 3714 { 3715 return cpu_show_common(dev, attr, buf, X86_BUG_RFDS); 3716 } 3717 3718 ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf) 3719 { 3720 return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE); 3721 } 3722 3723 ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf) 3724 { 3725 return cpu_show_common(dev, attr, buf, X86_BUG_ITS); 3726 } 3727 3728 ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf) 3729 { 3730 return cpu_show_common(dev, attr, buf, X86_BUG_TSA); 3731 } 3732 3733 ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf) 3734 { 3735 return cpu_show_common(dev, attr, buf, X86_BUG_VMSCAPE); 3736 } 3737 #endif 3738 3739 void __warn_thunk(void) 3740 { 3741 WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n"); 3742 } 3743