1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1994 Linus Torvalds 4 * 5 * Cyrix stuff, June 1998 by: 6 * - Rafael R. Reilova (moved everything from head.S), 7 * <rreilova@ececs.uc.edu> 8 * - Channing Corn (tests & fixes), 9 * - Andrew D. Balsa (code cleanup). 10 */ 11 #include <linux/init.h> 12 #include <linux/cpu.h> 13 #include <linux/module.h> 14 #include <linux/nospec.h> 15 #include <linux/prctl.h> 16 #include <linux/sched/smt.h> 17 #include <linux/pgtable.h> 18 #include <linux/bpf.h> 19 20 #include <asm/spec-ctrl.h> 21 #include <asm/cmdline.h> 22 #include <asm/bugs.h> 23 #include <asm/processor.h> 24 #include <asm/processor-flags.h> 25 #include <asm/fpu/api.h> 26 #include <asm/msr.h> 27 #include <asm/vmx.h> 28 #include <asm/paravirt.h> 29 #include <asm/cpu_device_id.h> 30 #include <asm/e820/api.h> 31 #include <asm/hypervisor.h> 32 #include <asm/tlbflush.h> 33 #include <asm/cpu.h> 34 35 #include "cpu.h" 36 37 /* 38 * Speculation Vulnerability Handling 39 * 40 * Each vulnerability is handled with the following functions: 41 * <vuln>_select_mitigation() -- Selects a mitigation to use. This should 42 * take into account all relevant command line 43 * options. 44 * <vuln>_update_mitigation() -- This is called after all vulnerabilities have 45 * selected a mitigation, in case the selection 46 * may want to change based on other choices 47 * made. This function is optional. 48 * <vuln>_apply_mitigation() -- Enable the selected mitigation. 49 * 50 * The compile-time mitigation in all cases should be AUTO. An explicit 51 * command-line option can override AUTO. If no such option is 52 * provided, <vuln>_select_mitigation() will override AUTO to the best 53 * mitigation option. 54 */ 55 56 static void __init spectre_v1_select_mitigation(void); 57 static void __init spectre_v1_apply_mitigation(void); 58 static void __init spectre_v2_select_mitigation(void); 59 static void __init spectre_v2_update_mitigation(void); 60 static void __init spectre_v2_apply_mitigation(void); 61 static void __init retbleed_select_mitigation(void); 62 static void __init retbleed_update_mitigation(void); 63 static void __init retbleed_apply_mitigation(void); 64 static void __init spectre_v2_user_select_mitigation(void); 65 static void __init spectre_v2_user_update_mitigation(void); 66 static void __init spectre_v2_user_apply_mitigation(void); 67 static void __init ssb_select_mitigation(void); 68 static void __init ssb_apply_mitigation(void); 69 static void __init l1tf_select_mitigation(void); 70 static void __init l1tf_apply_mitigation(void); 71 static void __init mds_select_mitigation(void); 72 static void __init mds_update_mitigation(void); 73 static void __init mds_apply_mitigation(void); 74 static void __init taa_select_mitigation(void); 75 static void __init taa_update_mitigation(void); 76 static void __init taa_apply_mitigation(void); 77 static void __init mmio_select_mitigation(void); 78 static void __init mmio_update_mitigation(void); 79 static void __init mmio_apply_mitigation(void); 80 static void __init rfds_select_mitigation(void); 81 static void __init rfds_update_mitigation(void); 82 static void __init rfds_apply_mitigation(void); 83 static void __init srbds_select_mitigation(void); 84 static void __init srbds_apply_mitigation(void); 85 static void __init l1d_flush_select_mitigation(void); 86 static void __init srso_select_mitigation(void); 87 static void __init srso_update_mitigation(void); 88 static void __init srso_apply_mitigation(void); 89 static void __init gds_select_mitigation(void); 90 static void __init gds_apply_mitigation(void); 91 static void __init bhi_select_mitigation(void); 92 static void __init bhi_update_mitigation(void); 93 static void __init bhi_apply_mitigation(void); 94 static void __init its_select_mitigation(void); 95 static void __init its_update_mitigation(void); 96 static void __init its_apply_mitigation(void); 97 static void __init tsa_select_mitigation(void); 98 static void __init tsa_apply_mitigation(void); 99 100 /* The base value of the SPEC_CTRL MSR without task-specific bits set */ 101 u64 x86_spec_ctrl_base; 102 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); 103 104 /* The current value of the SPEC_CTRL MSR with task-specific bits set */ 105 DEFINE_PER_CPU(u64, x86_spec_ctrl_current); 106 EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current); 107 108 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; 109 110 static u64 __ro_after_init x86_arch_cap_msr; 111 112 static DEFINE_MUTEX(spec_ctrl_mutex); 113 114 void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk; 115 116 static void __init set_return_thunk(void *thunk) 117 { 118 x86_return_thunk = thunk; 119 120 pr_info("active return thunk: %ps\n", thunk); 121 } 122 123 /* Update SPEC_CTRL MSR and its cached copy unconditionally */ 124 static void update_spec_ctrl(u64 val) 125 { 126 this_cpu_write(x86_spec_ctrl_current, val); 127 wrmsrq(MSR_IA32_SPEC_CTRL, val); 128 } 129 130 /* 131 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ 132 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). 133 */ 134 void update_spec_ctrl_cond(u64 val) 135 { 136 if (this_cpu_read(x86_spec_ctrl_current) == val) 137 return; 138 139 this_cpu_write(x86_spec_ctrl_current, val); 140 141 /* 142 * When KERNEL_IBRS this MSR is written on return-to-user, unless 143 * forced the update can be delayed until that time. 144 */ 145 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) 146 wrmsrq(MSR_IA32_SPEC_CTRL, val); 147 } 148 149 noinstr u64 spec_ctrl_current(void) 150 { 151 return this_cpu_read(x86_spec_ctrl_current); 152 } 153 EXPORT_SYMBOL_GPL(spec_ctrl_current); 154 155 /* 156 * AMD specific MSR info for Speculative Store Bypass control. 157 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). 158 */ 159 u64 __ro_after_init x86_amd_ls_cfg_base; 160 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; 161 162 /* Control conditional STIBP in switch_to() */ 163 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); 164 /* Control conditional IBPB in switch_mm() */ 165 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); 166 /* Control unconditional IBPB in switch_mm() */ 167 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); 168 169 /* Control IBPB on vCPU load */ 170 DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb); 171 EXPORT_SYMBOL_GPL(switch_vcpu_ibpb); 172 173 /* Control CPU buffer clear before idling (halt, mwait) */ 174 DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear); 175 EXPORT_SYMBOL_GPL(cpu_buf_idle_clear); 176 177 /* 178 * Controls whether l1d flush based mitigations are enabled, 179 * based on hw features and admin setting via boot parameter 180 * defaults to false 181 */ 182 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); 183 184 /* 185 * Controls CPU Fill buffer clear before VMenter. This is a subset of 186 * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only 187 * mitigation is required. 188 */ 189 DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear); 190 EXPORT_SYMBOL_GPL(cpu_buf_vm_clear); 191 192 #undef pr_fmt 193 #define pr_fmt(fmt) "mitigations: " fmt 194 195 static void __init cpu_print_attack_vectors(void) 196 { 197 pr_info("Enabled attack vectors: "); 198 199 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL)) 200 pr_cont("user_kernel, "); 201 202 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) 203 pr_cont("user_user, "); 204 205 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) 206 pr_cont("guest_host, "); 207 208 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) 209 pr_cont("guest_guest, "); 210 211 pr_cont("SMT mitigations: "); 212 213 switch (smt_mitigations) { 214 case SMT_MITIGATIONS_OFF: 215 pr_cont("off\n"); 216 break; 217 case SMT_MITIGATIONS_AUTO: 218 pr_cont("auto\n"); 219 break; 220 case SMT_MITIGATIONS_ON: 221 pr_cont("on\n"); 222 } 223 } 224 225 void __init cpu_select_mitigations(void) 226 { 227 /* 228 * Read the SPEC_CTRL MSR to account for reserved bits which may 229 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD 230 * init code as it is not enumerated and depends on the family. 231 */ 232 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) { 233 rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 234 235 /* 236 * Previously running kernel (kexec), may have some controls 237 * turned ON. Clear them and let the mitigations setup below 238 * rediscover them based on configuration. 239 */ 240 x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK; 241 } 242 243 x86_arch_cap_msr = x86_read_arch_cap_msr(); 244 245 cpu_print_attack_vectors(); 246 247 /* Select the proper CPU mitigations before patching alternatives: */ 248 spectre_v1_select_mitigation(); 249 spectre_v2_select_mitigation(); 250 retbleed_select_mitigation(); 251 spectre_v2_user_select_mitigation(); 252 ssb_select_mitigation(); 253 l1tf_select_mitigation(); 254 mds_select_mitigation(); 255 taa_select_mitigation(); 256 mmio_select_mitigation(); 257 rfds_select_mitigation(); 258 srbds_select_mitigation(); 259 l1d_flush_select_mitigation(); 260 srso_select_mitigation(); 261 gds_select_mitigation(); 262 its_select_mitigation(); 263 bhi_select_mitigation(); 264 tsa_select_mitigation(); 265 266 /* 267 * After mitigations are selected, some may need to update their 268 * choices. 269 */ 270 spectre_v2_update_mitigation(); 271 /* 272 * retbleed_update_mitigation() relies on the state set by 273 * spectre_v2_update_mitigation(); specifically it wants to know about 274 * spectre_v2=ibrs. 275 */ 276 retbleed_update_mitigation(); 277 /* 278 * its_update_mitigation() depends on spectre_v2_update_mitigation() 279 * and retbleed_update_mitigation(). 280 */ 281 its_update_mitigation(); 282 283 /* 284 * spectre_v2_user_update_mitigation() depends on 285 * retbleed_update_mitigation(), specifically the STIBP 286 * selection is forced for UNRET or IBPB. 287 */ 288 spectre_v2_user_update_mitigation(); 289 mds_update_mitigation(); 290 taa_update_mitigation(); 291 mmio_update_mitigation(); 292 rfds_update_mitigation(); 293 bhi_update_mitigation(); 294 /* srso_update_mitigation() depends on retbleed_update_mitigation(). */ 295 srso_update_mitigation(); 296 297 spectre_v1_apply_mitigation(); 298 spectre_v2_apply_mitigation(); 299 retbleed_apply_mitigation(); 300 spectre_v2_user_apply_mitigation(); 301 ssb_apply_mitigation(); 302 l1tf_apply_mitigation(); 303 mds_apply_mitigation(); 304 taa_apply_mitigation(); 305 mmio_apply_mitigation(); 306 rfds_apply_mitigation(); 307 srbds_apply_mitigation(); 308 srso_apply_mitigation(); 309 gds_apply_mitigation(); 310 its_apply_mitigation(); 311 bhi_apply_mitigation(); 312 tsa_apply_mitigation(); 313 } 314 315 /* 316 * NOTE: This function is *only* called for SVM, since Intel uses 317 * MSR_IA32_SPEC_CTRL for SSBD. 318 */ 319 void 320 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest) 321 { 322 u64 guestval, hostval; 323 struct thread_info *ti = current_thread_info(); 324 325 /* 326 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update 327 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. 328 */ 329 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && 330 !static_cpu_has(X86_FEATURE_VIRT_SSBD)) 331 return; 332 333 /* 334 * If the host has SSBD mitigation enabled, force it in the host's 335 * virtual MSR value. If its not permanently enabled, evaluate 336 * current's TIF_SSBD thread flag. 337 */ 338 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) 339 hostval = SPEC_CTRL_SSBD; 340 else 341 hostval = ssbd_tif_to_spec_ctrl(ti->flags); 342 343 /* Sanitize the guest value */ 344 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; 345 346 if (hostval != guestval) { 347 unsigned long tif; 348 349 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : 350 ssbd_spec_ctrl_to_tif(hostval); 351 352 speculation_ctrl_update(tif); 353 } 354 } 355 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); 356 357 static void x86_amd_ssb_disable(void) 358 { 359 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; 360 361 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) 362 wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); 363 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) 364 wrmsrq(MSR_AMD64_LS_CFG, msrval); 365 } 366 367 #undef pr_fmt 368 #define pr_fmt(fmt) "MDS: " fmt 369 370 /* 371 * Returns true if vulnerability should be mitigated based on the 372 * selected attack vector controls. 373 * 374 * See Documentation/admin-guide/hw-vuln/attack_vector_controls.rst 375 */ 376 static bool __init should_mitigate_vuln(unsigned int bug) 377 { 378 switch (bug) { 379 /* 380 * The only runtime-selected spectre_v1 mitigations in the kernel are 381 * related to SWAPGS protection on kernel entry. Therefore, protection 382 * is only required for the user->kernel attack vector. 383 */ 384 case X86_BUG_SPECTRE_V1: 385 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL); 386 387 case X86_BUG_SPECTRE_V2: 388 case X86_BUG_RETBLEED: 389 case X86_BUG_L1TF: 390 case X86_BUG_ITS: 391 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || 392 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST); 393 394 case X86_BUG_SPECTRE_V2_USER: 395 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || 396 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST); 397 398 /* 399 * All the vulnerabilities below allow potentially leaking data 400 * across address spaces. Therefore, mitigation is required for 401 * any of these 4 attack vectors. 402 */ 403 case X86_BUG_MDS: 404 case X86_BUG_TAA: 405 case X86_BUG_MMIO_STALE_DATA: 406 case X86_BUG_RFDS: 407 case X86_BUG_SRBDS: 408 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || 409 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || 410 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || 411 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST); 412 413 case X86_BUG_GDS: 414 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || 415 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || 416 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || 417 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST) || 418 (smt_mitigations != SMT_MITIGATIONS_OFF); 419 default: 420 WARN(1, "Unknown bug %x\n", bug); 421 return false; 422 } 423 } 424 425 /* Default mitigation for MDS-affected CPUs */ 426 static enum mds_mitigations mds_mitigation __ro_after_init = 427 IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_AUTO : MDS_MITIGATION_OFF; 428 static bool mds_nosmt __ro_after_init = false; 429 430 static const char * const mds_strings[] = { 431 [MDS_MITIGATION_OFF] = "Vulnerable", 432 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", 433 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", 434 }; 435 436 enum taa_mitigations { 437 TAA_MITIGATION_OFF, 438 TAA_MITIGATION_AUTO, 439 TAA_MITIGATION_UCODE_NEEDED, 440 TAA_MITIGATION_VERW, 441 TAA_MITIGATION_TSX_DISABLED, 442 }; 443 444 /* Default mitigation for TAA-affected CPUs */ 445 static enum taa_mitigations taa_mitigation __ro_after_init = 446 IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_AUTO : TAA_MITIGATION_OFF; 447 448 enum mmio_mitigations { 449 MMIO_MITIGATION_OFF, 450 MMIO_MITIGATION_AUTO, 451 MMIO_MITIGATION_UCODE_NEEDED, 452 MMIO_MITIGATION_VERW, 453 }; 454 455 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */ 456 static enum mmio_mitigations mmio_mitigation __ro_after_init = 457 IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_AUTO : MMIO_MITIGATION_OFF; 458 459 enum rfds_mitigations { 460 RFDS_MITIGATION_OFF, 461 RFDS_MITIGATION_AUTO, 462 RFDS_MITIGATION_VERW, 463 RFDS_MITIGATION_UCODE_NEEDED, 464 }; 465 466 /* Default mitigation for Register File Data Sampling */ 467 static enum rfds_mitigations rfds_mitigation __ro_after_init = 468 IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF; 469 470 /* 471 * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing 472 * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry. 473 */ 474 static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init; 475 476 static void __init mds_select_mitigation(void) 477 { 478 if (!boot_cpu_has_bug(X86_BUG_MDS)) { 479 mds_mitigation = MDS_MITIGATION_OFF; 480 return; 481 } 482 483 if (mds_mitigation == MDS_MITIGATION_AUTO) { 484 if (should_mitigate_vuln(X86_BUG_MDS)) 485 mds_mitigation = MDS_MITIGATION_FULL; 486 else 487 mds_mitigation = MDS_MITIGATION_OFF; 488 } 489 490 if (mds_mitigation == MDS_MITIGATION_OFF) 491 return; 492 493 verw_clear_cpu_buf_mitigation_selected = true; 494 } 495 496 static void __init mds_update_mitigation(void) 497 { 498 if (!boot_cpu_has_bug(X86_BUG_MDS)) 499 return; 500 501 /* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */ 502 if (verw_clear_cpu_buf_mitigation_selected) 503 mds_mitigation = MDS_MITIGATION_FULL; 504 505 if (mds_mitigation == MDS_MITIGATION_FULL) { 506 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) 507 mds_mitigation = MDS_MITIGATION_VMWERV; 508 } 509 510 pr_info("%s\n", mds_strings[mds_mitigation]); 511 } 512 513 static void __init mds_apply_mitigation(void) 514 { 515 if (mds_mitigation == MDS_MITIGATION_FULL || 516 mds_mitigation == MDS_MITIGATION_VMWERV) { 517 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 518 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && 519 (mds_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)) 520 cpu_smt_disable(false); 521 } 522 } 523 524 static int __init mds_cmdline(char *str) 525 { 526 if (!boot_cpu_has_bug(X86_BUG_MDS)) 527 return 0; 528 529 if (!str) 530 return -EINVAL; 531 532 if (!strcmp(str, "off")) 533 mds_mitigation = MDS_MITIGATION_OFF; 534 else if (!strcmp(str, "full")) 535 mds_mitigation = MDS_MITIGATION_FULL; 536 else if (!strcmp(str, "full,nosmt")) { 537 mds_mitigation = MDS_MITIGATION_FULL; 538 mds_nosmt = true; 539 } 540 541 return 0; 542 } 543 early_param("mds", mds_cmdline); 544 545 #undef pr_fmt 546 #define pr_fmt(fmt) "TAA: " fmt 547 548 static bool taa_nosmt __ro_after_init; 549 550 static const char * const taa_strings[] = { 551 [TAA_MITIGATION_OFF] = "Vulnerable", 552 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", 553 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", 554 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled", 555 }; 556 557 static bool __init taa_vulnerable(void) 558 { 559 return boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM); 560 } 561 562 static void __init taa_select_mitigation(void) 563 { 564 if (!boot_cpu_has_bug(X86_BUG_TAA)) { 565 taa_mitigation = TAA_MITIGATION_OFF; 566 return; 567 } 568 569 /* TSX previously disabled by tsx=off */ 570 if (!boot_cpu_has(X86_FEATURE_RTM)) { 571 taa_mitigation = TAA_MITIGATION_TSX_DISABLED; 572 return; 573 } 574 575 /* Microcode will be checked in taa_update_mitigation(). */ 576 if (taa_mitigation == TAA_MITIGATION_AUTO) { 577 if (should_mitigate_vuln(X86_BUG_TAA)) 578 taa_mitigation = TAA_MITIGATION_VERW; 579 else 580 taa_mitigation = TAA_MITIGATION_OFF; 581 } 582 583 if (taa_mitigation != TAA_MITIGATION_OFF) 584 verw_clear_cpu_buf_mitigation_selected = true; 585 } 586 587 static void __init taa_update_mitigation(void) 588 { 589 if (!taa_vulnerable()) 590 return; 591 592 if (verw_clear_cpu_buf_mitigation_selected) 593 taa_mitigation = TAA_MITIGATION_VERW; 594 595 if (taa_mitigation == TAA_MITIGATION_VERW) { 596 /* Check if the requisite ucode is available. */ 597 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) 598 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; 599 600 /* 601 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. 602 * A microcode update fixes this behavior to clear CPU buffers. It also 603 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the 604 * ARCH_CAP_TSX_CTRL_MSR bit. 605 * 606 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode 607 * update is required. 608 */ 609 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && 610 !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)) 611 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; 612 } 613 614 pr_info("%s\n", taa_strings[taa_mitigation]); 615 } 616 617 static void __init taa_apply_mitigation(void) 618 { 619 if (taa_mitigation == TAA_MITIGATION_VERW || 620 taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) { 621 /* 622 * TSX is enabled, select alternate mitigation for TAA which is 623 * the same as MDS. Enable MDS static branch to clear CPU buffers. 624 * 625 * For guests that can't determine whether the correct microcode is 626 * present on host, enable the mitigation for UCODE_NEEDED as well. 627 */ 628 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 629 630 if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON) 631 cpu_smt_disable(false); 632 } 633 } 634 635 static int __init tsx_async_abort_parse_cmdline(char *str) 636 { 637 if (!boot_cpu_has_bug(X86_BUG_TAA)) 638 return 0; 639 640 if (!str) 641 return -EINVAL; 642 643 if (!strcmp(str, "off")) { 644 taa_mitigation = TAA_MITIGATION_OFF; 645 } else if (!strcmp(str, "full")) { 646 taa_mitigation = TAA_MITIGATION_VERW; 647 } else if (!strcmp(str, "full,nosmt")) { 648 taa_mitigation = TAA_MITIGATION_VERW; 649 taa_nosmt = true; 650 } 651 652 return 0; 653 } 654 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); 655 656 #undef pr_fmt 657 #define pr_fmt(fmt) "MMIO Stale Data: " fmt 658 659 static bool mmio_nosmt __ro_after_init = false; 660 661 static const char * const mmio_strings[] = { 662 [MMIO_MITIGATION_OFF] = "Vulnerable", 663 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", 664 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", 665 }; 666 667 static void __init mmio_select_mitigation(void) 668 { 669 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || 670 cpu_mitigations_off()) { 671 mmio_mitigation = MMIO_MITIGATION_OFF; 672 return; 673 } 674 675 /* Microcode will be checked in mmio_update_mitigation(). */ 676 if (mmio_mitigation == MMIO_MITIGATION_AUTO) { 677 if (should_mitigate_vuln(X86_BUG_MMIO_STALE_DATA)) 678 mmio_mitigation = MMIO_MITIGATION_VERW; 679 else 680 mmio_mitigation = MMIO_MITIGATION_OFF; 681 } 682 683 if (mmio_mitigation == MMIO_MITIGATION_OFF) 684 return; 685 686 /* 687 * Enable CPU buffer clear mitigation for host and VMM, if also affected 688 * by MDS or TAA. 689 */ 690 if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable()) 691 verw_clear_cpu_buf_mitigation_selected = true; 692 } 693 694 static void __init mmio_update_mitigation(void) 695 { 696 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 697 return; 698 699 if (verw_clear_cpu_buf_mitigation_selected) 700 mmio_mitigation = MMIO_MITIGATION_VERW; 701 702 if (mmio_mitigation == MMIO_MITIGATION_VERW) { 703 /* 704 * Check if the system has the right microcode. 705 * 706 * CPU Fill buffer clear mitigation is enumerated by either an explicit 707 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS 708 * affected systems. 709 */ 710 if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) || 711 (boot_cpu_has(X86_FEATURE_MD_CLEAR) && 712 boot_cpu_has(X86_FEATURE_FLUSH_L1D) && 713 !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))) 714 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; 715 } 716 717 pr_info("%s\n", mmio_strings[mmio_mitigation]); 718 } 719 720 static void __init mmio_apply_mitigation(void) 721 { 722 if (mmio_mitigation == MMIO_MITIGATION_OFF) 723 return; 724 725 /* 726 * Only enable the VMM mitigation if the CPU buffer clear mitigation is 727 * not being used. 728 */ 729 if (verw_clear_cpu_buf_mitigation_selected) { 730 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 731 static_branch_disable(&cpu_buf_vm_clear); 732 } else { 733 static_branch_enable(&cpu_buf_vm_clear); 734 } 735 736 /* 737 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can 738 * be propagated to uncore buffers, clearing the Fill buffers on idle 739 * is required irrespective of SMT state. 740 */ 741 if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) 742 static_branch_enable(&cpu_buf_idle_clear); 743 744 if (mmio_nosmt || smt_mitigations == SMT_MITIGATIONS_ON) 745 cpu_smt_disable(false); 746 } 747 748 static int __init mmio_stale_data_parse_cmdline(char *str) 749 { 750 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 751 return 0; 752 753 if (!str) 754 return -EINVAL; 755 756 if (!strcmp(str, "off")) { 757 mmio_mitigation = MMIO_MITIGATION_OFF; 758 } else if (!strcmp(str, "full")) { 759 mmio_mitigation = MMIO_MITIGATION_VERW; 760 } else if (!strcmp(str, "full,nosmt")) { 761 mmio_mitigation = MMIO_MITIGATION_VERW; 762 mmio_nosmt = true; 763 } 764 765 return 0; 766 } 767 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); 768 769 #undef pr_fmt 770 #define pr_fmt(fmt) "Register File Data Sampling: " fmt 771 772 static const char * const rfds_strings[] = { 773 [RFDS_MITIGATION_OFF] = "Vulnerable", 774 [RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File", 775 [RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 776 }; 777 778 static inline bool __init verw_clears_cpu_reg_file(void) 779 { 780 return (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR); 781 } 782 783 static void __init rfds_select_mitigation(void) 784 { 785 if (!boot_cpu_has_bug(X86_BUG_RFDS)) { 786 rfds_mitigation = RFDS_MITIGATION_OFF; 787 return; 788 } 789 790 if (rfds_mitigation == RFDS_MITIGATION_AUTO) { 791 if (should_mitigate_vuln(X86_BUG_RFDS)) 792 rfds_mitigation = RFDS_MITIGATION_VERW; 793 else 794 rfds_mitigation = RFDS_MITIGATION_OFF; 795 } 796 797 if (rfds_mitigation == RFDS_MITIGATION_OFF) 798 return; 799 800 if (verw_clears_cpu_reg_file()) 801 verw_clear_cpu_buf_mitigation_selected = true; 802 } 803 804 static void __init rfds_update_mitigation(void) 805 { 806 if (!boot_cpu_has_bug(X86_BUG_RFDS)) 807 return; 808 809 if (verw_clear_cpu_buf_mitigation_selected) 810 rfds_mitigation = RFDS_MITIGATION_VERW; 811 812 if (rfds_mitigation == RFDS_MITIGATION_VERW) { 813 if (!verw_clears_cpu_reg_file()) 814 rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED; 815 } 816 817 pr_info("%s\n", rfds_strings[rfds_mitigation]); 818 } 819 820 static void __init rfds_apply_mitigation(void) 821 { 822 if (rfds_mitigation == RFDS_MITIGATION_VERW) 823 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 824 } 825 826 static __init int rfds_parse_cmdline(char *str) 827 { 828 if (!str) 829 return -EINVAL; 830 831 if (!boot_cpu_has_bug(X86_BUG_RFDS)) 832 return 0; 833 834 if (!strcmp(str, "off")) 835 rfds_mitigation = RFDS_MITIGATION_OFF; 836 else if (!strcmp(str, "on")) 837 rfds_mitigation = RFDS_MITIGATION_VERW; 838 839 return 0; 840 } 841 early_param("reg_file_data_sampling", rfds_parse_cmdline); 842 843 #undef pr_fmt 844 #define pr_fmt(fmt) "SRBDS: " fmt 845 846 enum srbds_mitigations { 847 SRBDS_MITIGATION_OFF, 848 SRBDS_MITIGATION_AUTO, 849 SRBDS_MITIGATION_UCODE_NEEDED, 850 SRBDS_MITIGATION_FULL, 851 SRBDS_MITIGATION_TSX_OFF, 852 SRBDS_MITIGATION_HYPERVISOR, 853 }; 854 855 static enum srbds_mitigations srbds_mitigation __ro_after_init = 856 IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_AUTO : SRBDS_MITIGATION_OFF; 857 858 static const char * const srbds_strings[] = { 859 [SRBDS_MITIGATION_OFF] = "Vulnerable", 860 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 861 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", 862 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", 863 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", 864 }; 865 866 static bool srbds_off; 867 868 void update_srbds_msr(void) 869 { 870 u64 mcu_ctrl; 871 872 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 873 return; 874 875 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 876 return; 877 878 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) 879 return; 880 881 /* 882 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX 883 * being disabled and it hasn't received the SRBDS MSR microcode. 884 */ 885 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) 886 return; 887 888 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 889 890 switch (srbds_mitigation) { 891 case SRBDS_MITIGATION_OFF: 892 case SRBDS_MITIGATION_TSX_OFF: 893 mcu_ctrl |= RNGDS_MITG_DIS; 894 break; 895 case SRBDS_MITIGATION_FULL: 896 mcu_ctrl &= ~RNGDS_MITG_DIS; 897 break; 898 default: 899 break; 900 } 901 902 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 903 } 904 905 static void __init srbds_select_mitigation(void) 906 { 907 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) { 908 srbds_mitigation = SRBDS_MITIGATION_OFF; 909 return; 910 } 911 912 if (srbds_mitigation == SRBDS_MITIGATION_AUTO) { 913 if (should_mitigate_vuln(X86_BUG_SRBDS)) 914 srbds_mitigation = SRBDS_MITIGATION_FULL; 915 else { 916 srbds_mitigation = SRBDS_MITIGATION_OFF; 917 return; 918 } 919 } 920 921 /* 922 * Check to see if this is one of the MDS_NO systems supporting TSX that 923 * are only exposed to SRBDS when TSX is enabled or when CPU is affected 924 * by Processor MMIO Stale Data vulnerability. 925 */ 926 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && 927 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 928 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; 929 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 930 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; 931 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) 932 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; 933 else if (srbds_off) 934 srbds_mitigation = SRBDS_MITIGATION_OFF; 935 936 pr_info("%s\n", srbds_strings[srbds_mitigation]); 937 } 938 939 static void __init srbds_apply_mitigation(void) 940 { 941 update_srbds_msr(); 942 } 943 944 static int __init srbds_parse_cmdline(char *str) 945 { 946 if (!str) 947 return -EINVAL; 948 949 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 950 return 0; 951 952 srbds_off = !strcmp(str, "off"); 953 return 0; 954 } 955 early_param("srbds", srbds_parse_cmdline); 956 957 #undef pr_fmt 958 #define pr_fmt(fmt) "L1D Flush : " fmt 959 960 enum l1d_flush_mitigations { 961 L1D_FLUSH_OFF = 0, 962 L1D_FLUSH_ON, 963 }; 964 965 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF; 966 967 static void __init l1d_flush_select_mitigation(void) 968 { 969 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) 970 return; 971 972 static_branch_enable(&switch_mm_cond_l1d_flush); 973 pr_info("Conditional flush on switch_mm() enabled\n"); 974 } 975 976 static int __init l1d_flush_parse_cmdline(char *str) 977 { 978 if (!strcmp(str, "on")) 979 l1d_flush_mitigation = L1D_FLUSH_ON; 980 981 return 0; 982 } 983 early_param("l1d_flush", l1d_flush_parse_cmdline); 984 985 #undef pr_fmt 986 #define pr_fmt(fmt) "GDS: " fmt 987 988 enum gds_mitigations { 989 GDS_MITIGATION_OFF, 990 GDS_MITIGATION_AUTO, 991 GDS_MITIGATION_UCODE_NEEDED, 992 GDS_MITIGATION_FORCE, 993 GDS_MITIGATION_FULL, 994 GDS_MITIGATION_FULL_LOCKED, 995 GDS_MITIGATION_HYPERVISOR, 996 }; 997 998 static enum gds_mitigations gds_mitigation __ro_after_init = 999 IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_AUTO : GDS_MITIGATION_OFF; 1000 1001 static const char * const gds_strings[] = { 1002 [GDS_MITIGATION_OFF] = "Vulnerable", 1003 [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 1004 [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode", 1005 [GDS_MITIGATION_FULL] = "Mitigation: Microcode", 1006 [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)", 1007 [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", 1008 }; 1009 1010 bool gds_ucode_mitigated(void) 1011 { 1012 return (gds_mitigation == GDS_MITIGATION_FULL || 1013 gds_mitigation == GDS_MITIGATION_FULL_LOCKED); 1014 } 1015 EXPORT_SYMBOL_GPL(gds_ucode_mitigated); 1016 1017 void update_gds_msr(void) 1018 { 1019 u64 mcu_ctrl_after; 1020 u64 mcu_ctrl; 1021 1022 switch (gds_mitigation) { 1023 case GDS_MITIGATION_OFF: 1024 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 1025 mcu_ctrl |= GDS_MITG_DIS; 1026 break; 1027 case GDS_MITIGATION_FULL_LOCKED: 1028 /* 1029 * The LOCKED state comes from the boot CPU. APs might not have 1030 * the same state. Make sure the mitigation is enabled on all 1031 * CPUs. 1032 */ 1033 case GDS_MITIGATION_FULL: 1034 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 1035 mcu_ctrl &= ~GDS_MITG_DIS; 1036 break; 1037 case GDS_MITIGATION_FORCE: 1038 case GDS_MITIGATION_UCODE_NEEDED: 1039 case GDS_MITIGATION_HYPERVISOR: 1040 case GDS_MITIGATION_AUTO: 1041 return; 1042 } 1043 1044 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 1045 1046 /* 1047 * Check to make sure that the WRMSR value was not ignored. Writes to 1048 * GDS_MITG_DIS will be ignored if this processor is locked but the boot 1049 * processor was not. 1050 */ 1051 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); 1052 WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after); 1053 } 1054 1055 static void __init gds_select_mitigation(void) 1056 { 1057 u64 mcu_ctrl; 1058 1059 if (!boot_cpu_has_bug(X86_BUG_GDS)) 1060 return; 1061 1062 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 1063 gds_mitigation = GDS_MITIGATION_HYPERVISOR; 1064 return; 1065 } 1066 1067 /* Will verify below that mitigation _can_ be disabled */ 1068 if (gds_mitigation == GDS_MITIGATION_AUTO) { 1069 if (should_mitigate_vuln(X86_BUG_GDS)) 1070 gds_mitigation = GDS_MITIGATION_FULL; 1071 else 1072 gds_mitigation = GDS_MITIGATION_OFF; 1073 } 1074 1075 /* No microcode */ 1076 if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) { 1077 if (gds_mitigation != GDS_MITIGATION_FORCE) 1078 gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; 1079 return; 1080 } 1081 1082 /* Microcode has mitigation, use it */ 1083 if (gds_mitigation == GDS_MITIGATION_FORCE) 1084 gds_mitigation = GDS_MITIGATION_FULL; 1085 1086 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 1087 if (mcu_ctrl & GDS_MITG_LOCKED) { 1088 if (gds_mitigation == GDS_MITIGATION_OFF) 1089 pr_warn("Mitigation locked. Disable failed.\n"); 1090 1091 /* 1092 * The mitigation is selected from the boot CPU. All other CPUs 1093 * _should_ have the same state. If the boot CPU isn't locked 1094 * but others are then update_gds_msr() will WARN() of the state 1095 * mismatch. If the boot CPU is locked update_gds_msr() will 1096 * ensure the other CPUs have the mitigation enabled. 1097 */ 1098 gds_mitigation = GDS_MITIGATION_FULL_LOCKED; 1099 } 1100 } 1101 1102 static void __init gds_apply_mitigation(void) 1103 { 1104 if (!boot_cpu_has_bug(X86_BUG_GDS)) 1105 return; 1106 1107 /* Microcode is present */ 1108 if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL) 1109 update_gds_msr(); 1110 else if (gds_mitigation == GDS_MITIGATION_FORCE) { 1111 /* 1112 * This only needs to be done on the boot CPU so do it 1113 * here rather than in update_gds_msr() 1114 */ 1115 setup_clear_cpu_cap(X86_FEATURE_AVX); 1116 pr_warn("Microcode update needed! Disabling AVX as mitigation.\n"); 1117 } 1118 1119 pr_info("%s\n", gds_strings[gds_mitigation]); 1120 } 1121 1122 static int __init gds_parse_cmdline(char *str) 1123 { 1124 if (!str) 1125 return -EINVAL; 1126 1127 if (!boot_cpu_has_bug(X86_BUG_GDS)) 1128 return 0; 1129 1130 if (!strcmp(str, "off")) 1131 gds_mitigation = GDS_MITIGATION_OFF; 1132 else if (!strcmp(str, "force")) 1133 gds_mitigation = GDS_MITIGATION_FORCE; 1134 1135 return 0; 1136 } 1137 early_param("gather_data_sampling", gds_parse_cmdline); 1138 1139 #undef pr_fmt 1140 #define pr_fmt(fmt) "Spectre V1 : " fmt 1141 1142 enum spectre_v1_mitigation { 1143 SPECTRE_V1_MITIGATION_NONE, 1144 SPECTRE_V1_MITIGATION_AUTO, 1145 }; 1146 1147 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init = 1148 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V1) ? 1149 SPECTRE_V1_MITIGATION_AUTO : SPECTRE_V1_MITIGATION_NONE; 1150 1151 static const char * const spectre_v1_strings[] = { 1152 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers", 1153 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization", 1154 }; 1155 1156 /* 1157 * Does SMAP provide full mitigation against speculative kernel access to 1158 * userspace? 1159 */ 1160 static bool smap_works_speculatively(void) 1161 { 1162 if (!boot_cpu_has(X86_FEATURE_SMAP)) 1163 return false; 1164 1165 /* 1166 * On CPUs which are vulnerable to Meltdown, SMAP does not 1167 * prevent speculative access to user data in the L1 cache. 1168 * Consider SMAP to be non-functional as a mitigation on these 1169 * CPUs. 1170 */ 1171 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) 1172 return false; 1173 1174 return true; 1175 } 1176 1177 static void __init spectre_v1_select_mitigation(void) 1178 { 1179 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) 1180 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 1181 1182 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V1)) 1183 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 1184 } 1185 1186 static void __init spectre_v1_apply_mitigation(void) 1187 { 1188 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) 1189 return; 1190 1191 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { 1192 /* 1193 * With Spectre v1, a user can speculatively control either 1194 * path of a conditional swapgs with a user-controlled GS 1195 * value. The mitigation is to add lfences to both code paths. 1196 * 1197 * If FSGSBASE is enabled, the user can put a kernel address in 1198 * GS, in which case SMAP provides no protection. 1199 * 1200 * If FSGSBASE is disabled, the user can only put a user space 1201 * address in GS. That makes an attack harder, but still 1202 * possible if there's no SMAP protection. 1203 */ 1204 if (boot_cpu_has(X86_FEATURE_FSGSBASE) || 1205 !smap_works_speculatively()) { 1206 /* 1207 * Mitigation can be provided from SWAPGS itself or 1208 * PTI as the CR3 write in the Meltdown mitigation 1209 * is serializing. 1210 * 1211 * If neither is there, mitigate with an LFENCE to 1212 * stop speculation through swapgs. 1213 */ 1214 if (boot_cpu_has_bug(X86_BUG_SWAPGS) && 1215 !boot_cpu_has(X86_FEATURE_PTI)) 1216 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER); 1217 1218 /* 1219 * Enable lfences in the kernel entry (non-swapgs) 1220 * paths, to prevent user entry from speculatively 1221 * skipping swapgs. 1222 */ 1223 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL); 1224 } 1225 } 1226 1227 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]); 1228 } 1229 1230 static int __init nospectre_v1_cmdline(char *str) 1231 { 1232 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 1233 return 0; 1234 } 1235 early_param("nospectre_v1", nospectre_v1_cmdline); 1236 1237 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE; 1238 1239 /* Depends on spectre_v2 mitigation selected already */ 1240 static inline bool cdt_possible(enum spectre_v2_mitigation mode) 1241 { 1242 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) || 1243 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) 1244 return false; 1245 1246 if (mode == SPECTRE_V2_RETPOLINE || 1247 mode == SPECTRE_V2_EIBRS_RETPOLINE) 1248 return true; 1249 1250 return false; 1251 } 1252 1253 #undef pr_fmt 1254 #define pr_fmt(fmt) "RETBleed: " fmt 1255 1256 enum its_mitigation { 1257 ITS_MITIGATION_OFF, 1258 ITS_MITIGATION_AUTO, 1259 ITS_MITIGATION_VMEXIT_ONLY, 1260 ITS_MITIGATION_ALIGNED_THUNKS, 1261 ITS_MITIGATION_RETPOLINE_STUFF, 1262 }; 1263 1264 static enum its_mitigation its_mitigation __ro_after_init = 1265 IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_MITIGATION_AUTO : ITS_MITIGATION_OFF; 1266 1267 enum retbleed_mitigation { 1268 RETBLEED_MITIGATION_NONE, 1269 RETBLEED_MITIGATION_AUTO, 1270 RETBLEED_MITIGATION_UNRET, 1271 RETBLEED_MITIGATION_IBPB, 1272 RETBLEED_MITIGATION_IBRS, 1273 RETBLEED_MITIGATION_EIBRS, 1274 RETBLEED_MITIGATION_STUFF, 1275 }; 1276 1277 static const char * const retbleed_strings[] = { 1278 [RETBLEED_MITIGATION_NONE] = "Vulnerable", 1279 [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", 1280 [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB", 1281 [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", 1282 [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", 1283 [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing", 1284 }; 1285 1286 static enum retbleed_mitigation retbleed_mitigation __ro_after_init = 1287 IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_MITIGATION_AUTO : RETBLEED_MITIGATION_NONE; 1288 1289 static int __ro_after_init retbleed_nosmt = false; 1290 1291 enum srso_mitigation { 1292 SRSO_MITIGATION_NONE, 1293 SRSO_MITIGATION_AUTO, 1294 SRSO_MITIGATION_UCODE_NEEDED, 1295 SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED, 1296 SRSO_MITIGATION_MICROCODE, 1297 SRSO_MITIGATION_NOSMT, 1298 SRSO_MITIGATION_SAFE_RET, 1299 SRSO_MITIGATION_IBPB, 1300 SRSO_MITIGATION_IBPB_ON_VMEXIT, 1301 SRSO_MITIGATION_BP_SPEC_REDUCE, 1302 }; 1303 1304 static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO; 1305 1306 static int __init retbleed_parse_cmdline(char *str) 1307 { 1308 if (!str) 1309 return -EINVAL; 1310 1311 while (str) { 1312 char *next = strchr(str, ','); 1313 if (next) { 1314 *next = 0; 1315 next++; 1316 } 1317 1318 if (!strcmp(str, "off")) { 1319 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1320 } else if (!strcmp(str, "auto")) { 1321 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1322 } else if (!strcmp(str, "unret")) { 1323 retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 1324 } else if (!strcmp(str, "ibpb")) { 1325 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 1326 } else if (!strcmp(str, "stuff")) { 1327 retbleed_mitigation = RETBLEED_MITIGATION_STUFF; 1328 } else if (!strcmp(str, "nosmt")) { 1329 retbleed_nosmt = true; 1330 } else if (!strcmp(str, "force")) { 1331 setup_force_cpu_bug(X86_BUG_RETBLEED); 1332 } else { 1333 pr_err("Ignoring unknown retbleed option (%s).", str); 1334 } 1335 1336 str = next; 1337 } 1338 1339 return 0; 1340 } 1341 early_param("retbleed", retbleed_parse_cmdline); 1342 1343 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" 1344 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" 1345 1346 static void __init retbleed_select_mitigation(void) 1347 { 1348 if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) { 1349 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1350 return; 1351 } 1352 1353 switch (retbleed_mitigation) { 1354 case RETBLEED_MITIGATION_UNRET: 1355 if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) { 1356 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1357 pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n"); 1358 } 1359 break; 1360 case RETBLEED_MITIGATION_IBPB: 1361 if (!boot_cpu_has(X86_FEATURE_IBPB)) { 1362 pr_err("WARNING: CPU does not support IBPB.\n"); 1363 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1364 } else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { 1365 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); 1366 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1367 } 1368 break; 1369 case RETBLEED_MITIGATION_STUFF: 1370 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) { 1371 pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n"); 1372 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1373 } else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { 1374 pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n"); 1375 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1376 } 1377 break; 1378 default: 1379 break; 1380 } 1381 1382 if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO) 1383 return; 1384 1385 if (!should_mitigate_vuln(X86_BUG_RETBLEED)) { 1386 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1387 return; 1388 } 1389 1390 /* Intel mitigation selected in retbleed_update_mitigation() */ 1391 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 1392 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { 1393 if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) 1394 retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 1395 else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) && 1396 boot_cpu_has(X86_FEATURE_IBPB)) 1397 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 1398 else 1399 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1400 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 1401 /* Final mitigation depends on spectre-v2 selection */ 1402 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) 1403 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; 1404 else if (boot_cpu_has(X86_FEATURE_IBRS)) 1405 retbleed_mitigation = RETBLEED_MITIGATION_IBRS; 1406 else 1407 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1408 } 1409 } 1410 1411 static void __init retbleed_update_mitigation(void) 1412 { 1413 if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) 1414 return; 1415 1416 /* ITS can also enable stuffing */ 1417 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF) 1418 retbleed_mitigation = RETBLEED_MITIGATION_STUFF; 1419 1420 /* If SRSO is using IBPB, that works for retbleed too */ 1421 if (srso_mitigation == SRSO_MITIGATION_IBPB) 1422 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 1423 1424 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF && 1425 !cdt_possible(spectre_v2_enabled)) { 1426 pr_err("WARNING: retbleed=stuff depends on retpoline\n"); 1427 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1428 } 1429 1430 /* 1431 * Let IBRS trump all on Intel without affecting the effects of the 1432 * retbleed= cmdline option except for call depth based stuffing 1433 */ 1434 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 1435 switch (spectre_v2_enabled) { 1436 case SPECTRE_V2_IBRS: 1437 retbleed_mitigation = RETBLEED_MITIGATION_IBRS; 1438 break; 1439 case SPECTRE_V2_EIBRS: 1440 case SPECTRE_V2_EIBRS_RETPOLINE: 1441 case SPECTRE_V2_EIBRS_LFENCE: 1442 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; 1443 break; 1444 default: 1445 if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) 1446 pr_err(RETBLEED_INTEL_MSG); 1447 } 1448 } 1449 1450 pr_info("%s\n", retbleed_strings[retbleed_mitigation]); 1451 } 1452 1453 static void __init retbleed_apply_mitigation(void) 1454 { 1455 bool mitigate_smt = false; 1456 1457 switch (retbleed_mitigation) { 1458 case RETBLEED_MITIGATION_NONE: 1459 return; 1460 1461 case RETBLEED_MITIGATION_UNRET: 1462 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1463 setup_force_cpu_cap(X86_FEATURE_UNRET); 1464 1465 set_return_thunk(retbleed_return_thunk); 1466 1467 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 1468 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 1469 pr_err(RETBLEED_UNTRAIN_MSG); 1470 1471 mitigate_smt = true; 1472 break; 1473 1474 case RETBLEED_MITIGATION_IBPB: 1475 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); 1476 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); 1477 mitigate_smt = true; 1478 1479 /* 1480 * IBPB on entry already obviates the need for 1481 * software-based untraining so clear those in case some 1482 * other mitigation like SRSO has selected them. 1483 */ 1484 setup_clear_cpu_cap(X86_FEATURE_UNRET); 1485 setup_clear_cpu_cap(X86_FEATURE_RETHUNK); 1486 1487 /* 1488 * There is no need for RSB filling: write_ibpb() ensures 1489 * all predictions, including the RSB, are invalidated, 1490 * regardless of IBPB implementation. 1491 */ 1492 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); 1493 1494 break; 1495 1496 case RETBLEED_MITIGATION_STUFF: 1497 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1498 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); 1499 1500 set_return_thunk(call_depth_return_thunk); 1501 break; 1502 1503 default: 1504 break; 1505 } 1506 1507 if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && 1508 (retbleed_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)) 1509 cpu_smt_disable(false); 1510 } 1511 1512 #undef pr_fmt 1513 #define pr_fmt(fmt) "ITS: " fmt 1514 1515 static const char * const its_strings[] = { 1516 [ITS_MITIGATION_OFF] = "Vulnerable", 1517 [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected", 1518 [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks", 1519 [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB", 1520 }; 1521 1522 static int __init its_parse_cmdline(char *str) 1523 { 1524 if (!str) 1525 return -EINVAL; 1526 1527 if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) { 1528 pr_err("Mitigation disabled at compile time, ignoring option (%s)", str); 1529 return 0; 1530 } 1531 1532 if (!strcmp(str, "off")) { 1533 its_mitigation = ITS_MITIGATION_OFF; 1534 } else if (!strcmp(str, "on")) { 1535 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1536 } else if (!strcmp(str, "force")) { 1537 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1538 setup_force_cpu_bug(X86_BUG_ITS); 1539 } else if (!strcmp(str, "vmexit")) { 1540 its_mitigation = ITS_MITIGATION_VMEXIT_ONLY; 1541 } else if (!strcmp(str, "stuff")) { 1542 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; 1543 } else { 1544 pr_err("Ignoring unknown indirect_target_selection option (%s).", str); 1545 } 1546 1547 return 0; 1548 } 1549 early_param("indirect_target_selection", its_parse_cmdline); 1550 1551 static void __init its_select_mitigation(void) 1552 { 1553 if (!boot_cpu_has_bug(X86_BUG_ITS)) { 1554 its_mitigation = ITS_MITIGATION_OFF; 1555 return; 1556 } 1557 1558 if (its_mitigation == ITS_MITIGATION_AUTO) { 1559 if (should_mitigate_vuln(X86_BUG_ITS)) 1560 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1561 else 1562 its_mitigation = ITS_MITIGATION_OFF; 1563 } 1564 1565 if (its_mitigation == ITS_MITIGATION_OFF) 1566 return; 1567 1568 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || 1569 !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) { 1570 pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n"); 1571 its_mitigation = ITS_MITIGATION_OFF; 1572 return; 1573 } 1574 1575 if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) { 1576 pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n"); 1577 its_mitigation = ITS_MITIGATION_OFF; 1578 return; 1579 } 1580 1581 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF && 1582 !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) { 1583 pr_err("RSB stuff mitigation not supported, using default\n"); 1584 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1585 } 1586 1587 if (its_mitigation == ITS_MITIGATION_VMEXIT_ONLY && 1588 !boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) 1589 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1590 } 1591 1592 static void __init its_update_mitigation(void) 1593 { 1594 if (!boot_cpu_has_bug(X86_BUG_ITS)) 1595 return; 1596 1597 switch (spectre_v2_enabled) { 1598 case SPECTRE_V2_NONE: 1599 if (its_mitigation != ITS_MITIGATION_OFF) 1600 pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n"); 1601 its_mitigation = ITS_MITIGATION_OFF; 1602 break; 1603 case SPECTRE_V2_RETPOLINE: 1604 case SPECTRE_V2_EIBRS_RETPOLINE: 1605 /* Retpoline+CDT mitigates ITS */ 1606 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF) 1607 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; 1608 break; 1609 case SPECTRE_V2_LFENCE: 1610 case SPECTRE_V2_EIBRS_LFENCE: 1611 pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n"); 1612 its_mitigation = ITS_MITIGATION_OFF; 1613 break; 1614 default: 1615 break; 1616 } 1617 1618 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF && 1619 !cdt_possible(spectre_v2_enabled)) 1620 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1621 1622 pr_info("%s\n", its_strings[its_mitigation]); 1623 } 1624 1625 static void __init its_apply_mitigation(void) 1626 { 1627 switch (its_mitigation) { 1628 case ITS_MITIGATION_OFF: 1629 case ITS_MITIGATION_AUTO: 1630 case ITS_MITIGATION_VMEXIT_ONLY: 1631 break; 1632 case ITS_MITIGATION_ALIGNED_THUNKS: 1633 if (!boot_cpu_has(X86_FEATURE_RETPOLINE)) 1634 setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS); 1635 1636 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1637 set_return_thunk(its_return_thunk); 1638 break; 1639 case ITS_MITIGATION_RETPOLINE_STUFF: 1640 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1641 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); 1642 set_return_thunk(call_depth_return_thunk); 1643 break; 1644 } 1645 } 1646 1647 #undef pr_fmt 1648 #define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt 1649 1650 enum tsa_mitigations { 1651 TSA_MITIGATION_NONE, 1652 TSA_MITIGATION_AUTO, 1653 TSA_MITIGATION_UCODE_NEEDED, 1654 TSA_MITIGATION_USER_KERNEL, 1655 TSA_MITIGATION_VM, 1656 TSA_MITIGATION_FULL, 1657 }; 1658 1659 static const char * const tsa_strings[] = { 1660 [TSA_MITIGATION_NONE] = "Vulnerable", 1661 [TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 1662 [TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary", 1663 [TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM", 1664 [TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", 1665 }; 1666 1667 static enum tsa_mitigations tsa_mitigation __ro_after_init = 1668 IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE; 1669 1670 static int __init tsa_parse_cmdline(char *str) 1671 { 1672 if (!str) 1673 return -EINVAL; 1674 1675 if (!strcmp(str, "off")) 1676 tsa_mitigation = TSA_MITIGATION_NONE; 1677 else if (!strcmp(str, "on")) 1678 tsa_mitigation = TSA_MITIGATION_FULL; 1679 else if (!strcmp(str, "user")) 1680 tsa_mitigation = TSA_MITIGATION_USER_KERNEL; 1681 else if (!strcmp(str, "vm")) 1682 tsa_mitigation = TSA_MITIGATION_VM; 1683 else 1684 pr_err("Ignoring unknown tsa=%s option.\n", str); 1685 1686 return 0; 1687 } 1688 early_param("tsa", tsa_parse_cmdline); 1689 1690 static void __init tsa_select_mitigation(void) 1691 { 1692 if (!boot_cpu_has_bug(X86_BUG_TSA)) { 1693 tsa_mitigation = TSA_MITIGATION_NONE; 1694 return; 1695 } 1696 1697 if (tsa_mitigation == TSA_MITIGATION_AUTO) { 1698 bool vm = false, uk = false; 1699 1700 tsa_mitigation = TSA_MITIGATION_NONE; 1701 1702 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || 1703 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) { 1704 tsa_mitigation = TSA_MITIGATION_USER_KERNEL; 1705 uk = true; 1706 } 1707 1708 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || 1709 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) { 1710 tsa_mitigation = TSA_MITIGATION_VM; 1711 vm = true; 1712 } 1713 1714 if (uk && vm) 1715 tsa_mitigation = TSA_MITIGATION_FULL; 1716 } 1717 1718 if (tsa_mitigation == TSA_MITIGATION_NONE) 1719 return; 1720 1721 if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) 1722 tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED; 1723 1724 /* 1725 * No need to set verw_clear_cpu_buf_mitigation_selected - it 1726 * doesn't fit all cases here and it is not needed because this 1727 * is the only VERW-based mitigation on AMD. 1728 */ 1729 pr_info("%s\n", tsa_strings[tsa_mitigation]); 1730 } 1731 1732 static void __init tsa_apply_mitigation(void) 1733 { 1734 switch (tsa_mitigation) { 1735 case TSA_MITIGATION_USER_KERNEL: 1736 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 1737 break; 1738 case TSA_MITIGATION_VM: 1739 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); 1740 break; 1741 case TSA_MITIGATION_FULL: 1742 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 1743 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); 1744 break; 1745 default: 1746 break; 1747 } 1748 } 1749 1750 #undef pr_fmt 1751 #define pr_fmt(fmt) "Spectre V2 : " fmt 1752 1753 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = 1754 SPECTRE_V2_USER_NONE; 1755 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = 1756 SPECTRE_V2_USER_NONE; 1757 1758 #ifdef CONFIG_MITIGATION_RETPOLINE 1759 static bool spectre_v2_bad_module; 1760 1761 bool retpoline_module_ok(bool has_retpoline) 1762 { 1763 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) 1764 return true; 1765 1766 pr_err("System may be vulnerable to spectre v2\n"); 1767 spectre_v2_bad_module = true; 1768 return false; 1769 } 1770 1771 static inline const char *spectre_v2_module_string(void) 1772 { 1773 return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; 1774 } 1775 #else 1776 static inline const char *spectre_v2_module_string(void) { return ""; } 1777 #endif 1778 1779 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" 1780 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" 1781 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" 1782 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n" 1783 1784 #ifdef CONFIG_BPF_SYSCALL 1785 void unpriv_ebpf_notify(int new_state) 1786 { 1787 if (new_state) 1788 return; 1789 1790 /* Unprivileged eBPF is enabled */ 1791 1792 switch (spectre_v2_enabled) { 1793 case SPECTRE_V2_EIBRS: 1794 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); 1795 break; 1796 case SPECTRE_V2_EIBRS_LFENCE: 1797 if (sched_smt_active()) 1798 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); 1799 break; 1800 default: 1801 break; 1802 } 1803 } 1804 #endif 1805 1806 static inline bool match_option(const char *arg, int arglen, const char *opt) 1807 { 1808 int len = strlen(opt); 1809 1810 return len == arglen && !strncmp(arg, opt, len); 1811 } 1812 1813 /* The kernel command line selection for spectre v2 */ 1814 enum spectre_v2_mitigation_cmd { 1815 SPECTRE_V2_CMD_NONE, 1816 SPECTRE_V2_CMD_AUTO, 1817 SPECTRE_V2_CMD_FORCE, 1818 SPECTRE_V2_CMD_RETPOLINE, 1819 SPECTRE_V2_CMD_RETPOLINE_GENERIC, 1820 SPECTRE_V2_CMD_RETPOLINE_LFENCE, 1821 SPECTRE_V2_CMD_EIBRS, 1822 SPECTRE_V2_CMD_EIBRS_RETPOLINE, 1823 SPECTRE_V2_CMD_EIBRS_LFENCE, 1824 SPECTRE_V2_CMD_IBRS, 1825 }; 1826 1827 static enum spectre_v2_mitigation_cmd spectre_v2_cmd __ro_after_init = SPECTRE_V2_CMD_AUTO; 1828 1829 enum spectre_v2_user_cmd { 1830 SPECTRE_V2_USER_CMD_NONE, 1831 SPECTRE_V2_USER_CMD_AUTO, 1832 SPECTRE_V2_USER_CMD_FORCE, 1833 SPECTRE_V2_USER_CMD_PRCTL, 1834 SPECTRE_V2_USER_CMD_PRCTL_IBPB, 1835 SPECTRE_V2_USER_CMD_SECCOMP, 1836 SPECTRE_V2_USER_CMD_SECCOMP_IBPB, 1837 }; 1838 1839 static const char * const spectre_v2_user_strings[] = { 1840 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", 1841 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", 1842 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", 1843 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", 1844 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", 1845 }; 1846 1847 static const struct { 1848 const char *option; 1849 enum spectre_v2_user_cmd cmd; 1850 bool secure; 1851 } v2_user_options[] __initconst = { 1852 { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, 1853 { "off", SPECTRE_V2_USER_CMD_NONE, false }, 1854 { "on", SPECTRE_V2_USER_CMD_FORCE, true }, 1855 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false }, 1856 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false }, 1857 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false }, 1858 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, 1859 }; 1860 1861 static void __init spec_v2_user_print_cond(const char *reason, bool secure) 1862 { 1863 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) 1864 pr_info("spectre_v2_user=%s forced on command line.\n", reason); 1865 } 1866 1867 static enum spectre_v2_user_cmd __init spectre_v2_parse_user_cmdline(void) 1868 { 1869 char arg[20]; 1870 int ret, i; 1871 1872 if (!IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2)) 1873 return SPECTRE_V2_USER_CMD_NONE; 1874 1875 ret = cmdline_find_option(boot_command_line, "spectre_v2_user", 1876 arg, sizeof(arg)); 1877 if (ret < 0) 1878 return SPECTRE_V2_USER_CMD_AUTO; 1879 1880 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { 1881 if (match_option(arg, ret, v2_user_options[i].option)) { 1882 spec_v2_user_print_cond(v2_user_options[i].option, 1883 v2_user_options[i].secure); 1884 return v2_user_options[i].cmd; 1885 } 1886 } 1887 1888 pr_err("Unknown user space protection option (%s). Switching to default\n", arg); 1889 return SPECTRE_V2_USER_CMD_AUTO; 1890 } 1891 1892 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) 1893 { 1894 return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS; 1895 } 1896 1897 static void __init spectre_v2_user_select_mitigation(void) 1898 { 1899 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) 1900 return; 1901 1902 switch (spectre_v2_parse_user_cmdline()) { 1903 case SPECTRE_V2_USER_CMD_NONE: 1904 return; 1905 case SPECTRE_V2_USER_CMD_FORCE: 1906 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1907 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT; 1908 break; 1909 case SPECTRE_V2_USER_CMD_AUTO: 1910 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2_USER)) 1911 break; 1912 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; 1913 if (smt_mitigations == SMT_MITIGATIONS_OFF) 1914 break; 1915 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; 1916 break; 1917 case SPECTRE_V2_USER_CMD_PRCTL: 1918 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; 1919 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; 1920 break; 1921 case SPECTRE_V2_USER_CMD_PRCTL_IBPB: 1922 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1923 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; 1924 break; 1925 case SPECTRE_V2_USER_CMD_SECCOMP: 1926 if (IS_ENABLED(CONFIG_SECCOMP)) 1927 spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP; 1928 else 1929 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; 1930 spectre_v2_user_stibp = spectre_v2_user_ibpb; 1931 break; 1932 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: 1933 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1934 if (IS_ENABLED(CONFIG_SECCOMP)) 1935 spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP; 1936 else 1937 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; 1938 break; 1939 } 1940 1941 /* 1942 * At this point, an STIBP mode other than "off" has been set. 1943 * If STIBP support is not being forced, check if STIBP always-on 1944 * is preferred. 1945 */ 1946 if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || 1947 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) && 1948 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) 1949 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED; 1950 1951 if (!boot_cpu_has(X86_FEATURE_IBPB)) 1952 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE; 1953 1954 if (!boot_cpu_has(X86_FEATURE_STIBP)) 1955 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; 1956 } 1957 1958 static void __init spectre_v2_user_update_mitigation(void) 1959 { 1960 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) 1961 return; 1962 1963 /* The spectre_v2 cmd line can override spectre_v2_user options */ 1964 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) { 1965 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE; 1966 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; 1967 } else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) { 1968 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1969 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT; 1970 } 1971 1972 /* 1973 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP 1974 * is not required. 1975 * 1976 * Intel's Enhanced IBRS also protects against cross-thread branch target 1977 * injection in user-mode as the IBRS bit remains always set which 1978 * implicitly enables cross-thread protections. However, in legacy IBRS 1979 * mode, the IBRS bit is set only on kernel entry and cleared on return 1980 * to userspace. AMD Automatic IBRS also does not protect userspace. 1981 * These modes therefore disable the implicit cross-thread protection, 1982 * so allow for STIBP to be selected in those cases. 1983 */ 1984 if (!boot_cpu_has(X86_FEATURE_STIBP) || 1985 !cpu_smt_possible() || 1986 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 1987 !boot_cpu_has(X86_FEATURE_AUTOIBRS))) { 1988 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; 1989 return; 1990 } 1991 1992 if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE && 1993 (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || 1994 retbleed_mitigation == RETBLEED_MITIGATION_IBPB)) { 1995 if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT && 1996 spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT_PREFERRED) 1997 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n"); 1998 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED; 1999 } 2000 pr_info("%s\n", spectre_v2_user_strings[spectre_v2_user_stibp]); 2001 } 2002 2003 static void __init spectre_v2_user_apply_mitigation(void) 2004 { 2005 /* Initialize Indirect Branch Prediction Barrier */ 2006 if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) { 2007 static_branch_enable(&switch_vcpu_ibpb); 2008 2009 switch (spectre_v2_user_ibpb) { 2010 case SPECTRE_V2_USER_STRICT: 2011 static_branch_enable(&switch_mm_always_ibpb); 2012 break; 2013 case SPECTRE_V2_USER_PRCTL: 2014 case SPECTRE_V2_USER_SECCOMP: 2015 static_branch_enable(&switch_mm_cond_ibpb); 2016 break; 2017 default: 2018 break; 2019 } 2020 2021 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", 2022 static_key_enabled(&switch_mm_always_ibpb) ? 2023 "always-on" : "conditional"); 2024 } 2025 } 2026 2027 static const char * const spectre_v2_strings[] = { 2028 [SPECTRE_V2_NONE] = "Vulnerable", 2029 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", 2030 [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", 2031 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS", 2032 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE", 2033 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines", 2034 [SPECTRE_V2_IBRS] = "Mitigation: IBRS", 2035 }; 2036 2037 static const struct { 2038 const char *option; 2039 enum spectre_v2_mitigation_cmd cmd; 2040 bool secure; 2041 } mitigation_options[] __initconst = { 2042 { "off", SPECTRE_V2_CMD_NONE, false }, 2043 { "on", SPECTRE_V2_CMD_FORCE, true }, 2044 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, 2045 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, 2046 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, 2047 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, 2048 { "eibrs", SPECTRE_V2_CMD_EIBRS, false }, 2049 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false }, 2050 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false }, 2051 { "auto", SPECTRE_V2_CMD_AUTO, false }, 2052 { "ibrs", SPECTRE_V2_CMD_IBRS, false }, 2053 }; 2054 2055 static void __init spec_v2_print_cond(const char *reason, bool secure) 2056 { 2057 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) 2058 pr_info("%s selected on command line.\n", reason); 2059 } 2060 2061 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) 2062 { 2063 enum spectre_v2_mitigation_cmd cmd; 2064 char arg[20]; 2065 int ret, i; 2066 2067 cmd = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE; 2068 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) 2069 return SPECTRE_V2_CMD_NONE; 2070 2071 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); 2072 if (ret < 0) 2073 return cmd; 2074 2075 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { 2076 if (!match_option(arg, ret, mitigation_options[i].option)) 2077 continue; 2078 cmd = mitigation_options[i].cmd; 2079 break; 2080 } 2081 2082 if (i >= ARRAY_SIZE(mitigation_options)) { 2083 pr_err("unknown option (%s). Switching to default mode\n", arg); 2084 return cmd; 2085 } 2086 2087 if ((cmd == SPECTRE_V2_CMD_RETPOLINE || 2088 cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || 2089 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || 2090 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 2091 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 2092 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) { 2093 pr_err("%s selected but not compiled in. Switching to AUTO select\n", 2094 mitigation_options[i].option); 2095 return SPECTRE_V2_CMD_AUTO; 2096 } 2097 2098 if ((cmd == SPECTRE_V2_CMD_EIBRS || 2099 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 2100 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 2101 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 2102 pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n", 2103 mitigation_options[i].option); 2104 return SPECTRE_V2_CMD_AUTO; 2105 } 2106 2107 if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || 2108 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) && 2109 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { 2110 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n", 2111 mitigation_options[i].option); 2112 return SPECTRE_V2_CMD_AUTO; 2113 } 2114 2115 if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) { 2116 pr_err("%s selected but not compiled in. Switching to AUTO select\n", 2117 mitigation_options[i].option); 2118 return SPECTRE_V2_CMD_AUTO; 2119 } 2120 2121 if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { 2122 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", 2123 mitigation_options[i].option); 2124 return SPECTRE_V2_CMD_AUTO; 2125 } 2126 2127 if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { 2128 pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n", 2129 mitigation_options[i].option); 2130 return SPECTRE_V2_CMD_AUTO; 2131 } 2132 2133 if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) { 2134 pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n", 2135 mitigation_options[i].option); 2136 return SPECTRE_V2_CMD_AUTO; 2137 } 2138 2139 spec_v2_print_cond(mitigation_options[i].option, 2140 mitigation_options[i].secure); 2141 return cmd; 2142 } 2143 2144 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) 2145 { 2146 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) { 2147 pr_err("Kernel not compiled with retpoline; no mitigation available!"); 2148 return SPECTRE_V2_NONE; 2149 } 2150 2151 return SPECTRE_V2_RETPOLINE; 2152 } 2153 2154 static bool __ro_after_init rrsba_disabled; 2155 2156 /* Disable in-kernel use of non-RSB RET predictors */ 2157 static void __init spec_ctrl_disable_kernel_rrsba(void) 2158 { 2159 if (rrsba_disabled) 2160 return; 2161 2162 if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) { 2163 rrsba_disabled = true; 2164 return; 2165 } 2166 2167 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) 2168 return; 2169 2170 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; 2171 update_spec_ctrl(x86_spec_ctrl_base); 2172 rrsba_disabled = true; 2173 } 2174 2175 static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode) 2176 { 2177 /* 2178 * WARNING! There are many subtleties to consider when changing *any* 2179 * code related to RSB-related mitigations. Before doing so, carefully 2180 * read the following document, and update if necessary: 2181 * 2182 * Documentation/admin-guide/hw-vuln/rsb.rst 2183 * 2184 * In an overly simplified nutshell: 2185 * 2186 * - User->user RSB attacks are conditionally mitigated during 2187 * context switches by cond_mitigation -> write_ibpb(). 2188 * 2189 * - User->kernel and guest->host attacks are mitigated by eIBRS or 2190 * RSB filling. 2191 * 2192 * Though, depending on config, note that other alternative 2193 * mitigations may end up getting used instead, e.g., IBPB on 2194 * entry/vmexit, call depth tracking, or return thunks. 2195 */ 2196 2197 switch (mode) { 2198 case SPECTRE_V2_NONE: 2199 break; 2200 2201 case SPECTRE_V2_EIBRS: 2202 case SPECTRE_V2_EIBRS_LFENCE: 2203 case SPECTRE_V2_EIBRS_RETPOLINE: 2204 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { 2205 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); 2206 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); 2207 } 2208 break; 2209 2210 case SPECTRE_V2_RETPOLINE: 2211 case SPECTRE_V2_LFENCE: 2212 case SPECTRE_V2_IBRS: 2213 pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n"); 2214 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); 2215 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); 2216 break; 2217 2218 default: 2219 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n"); 2220 dump_stack(); 2221 break; 2222 } 2223 } 2224 2225 /* 2226 * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by 2227 * branch history in userspace. Not needed if BHI_NO is set. 2228 */ 2229 static bool __init spec_ctrl_bhi_dis(void) 2230 { 2231 if (!boot_cpu_has(X86_FEATURE_BHI_CTRL)) 2232 return false; 2233 2234 x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S; 2235 update_spec_ctrl(x86_spec_ctrl_base); 2236 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW); 2237 2238 return true; 2239 } 2240 2241 enum bhi_mitigations { 2242 BHI_MITIGATION_OFF, 2243 BHI_MITIGATION_AUTO, 2244 BHI_MITIGATION_ON, 2245 BHI_MITIGATION_VMEXIT_ONLY, 2246 }; 2247 2248 static enum bhi_mitigations bhi_mitigation __ro_after_init = 2249 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_AUTO : BHI_MITIGATION_OFF; 2250 2251 static int __init spectre_bhi_parse_cmdline(char *str) 2252 { 2253 if (!str) 2254 return -EINVAL; 2255 2256 if (!strcmp(str, "off")) 2257 bhi_mitigation = BHI_MITIGATION_OFF; 2258 else if (!strcmp(str, "on")) 2259 bhi_mitigation = BHI_MITIGATION_ON; 2260 else if (!strcmp(str, "vmexit")) 2261 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY; 2262 else 2263 pr_err("Ignoring unknown spectre_bhi option (%s)", str); 2264 2265 return 0; 2266 } 2267 early_param("spectre_bhi", spectre_bhi_parse_cmdline); 2268 2269 static void __init bhi_select_mitigation(void) 2270 { 2271 if (!boot_cpu_has(X86_BUG_BHI)) 2272 bhi_mitigation = BHI_MITIGATION_OFF; 2273 2274 if (bhi_mitigation != BHI_MITIGATION_AUTO) 2275 return; 2276 2277 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) { 2278 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL)) 2279 bhi_mitigation = BHI_MITIGATION_ON; 2280 else 2281 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY; 2282 } else { 2283 bhi_mitigation = BHI_MITIGATION_OFF; 2284 } 2285 } 2286 2287 static void __init bhi_update_mitigation(void) 2288 { 2289 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) 2290 bhi_mitigation = BHI_MITIGATION_OFF; 2291 2292 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && 2293 spectre_v2_cmd == SPECTRE_V2_CMD_AUTO) 2294 bhi_mitigation = BHI_MITIGATION_OFF; 2295 } 2296 2297 static void __init bhi_apply_mitigation(void) 2298 { 2299 if (bhi_mitigation == BHI_MITIGATION_OFF) 2300 return; 2301 2302 /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */ 2303 if (boot_cpu_has(X86_FEATURE_RETPOLINE) && 2304 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) { 2305 spec_ctrl_disable_kernel_rrsba(); 2306 if (rrsba_disabled) 2307 return; 2308 } 2309 2310 if (!IS_ENABLED(CONFIG_X86_64)) 2311 return; 2312 2313 /* Mitigate in hardware if supported */ 2314 if (spec_ctrl_bhi_dis()) 2315 return; 2316 2317 if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) { 2318 pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n"); 2319 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT); 2320 return; 2321 } 2322 2323 pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n"); 2324 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP); 2325 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT); 2326 } 2327 2328 static void __init spectre_v2_select_mitigation(void) 2329 { 2330 spectre_v2_cmd = spectre_v2_parse_cmdline(); 2331 2332 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && 2333 (spectre_v2_cmd == SPECTRE_V2_CMD_NONE || spectre_v2_cmd == SPECTRE_V2_CMD_AUTO)) 2334 return; 2335 2336 switch (spectre_v2_cmd) { 2337 case SPECTRE_V2_CMD_NONE: 2338 return; 2339 2340 case SPECTRE_V2_CMD_AUTO: 2341 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2)) 2342 break; 2343 fallthrough; 2344 case SPECTRE_V2_CMD_FORCE: 2345 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 2346 spectre_v2_enabled = SPECTRE_V2_EIBRS; 2347 break; 2348 } 2349 2350 spectre_v2_enabled = spectre_v2_select_retpoline(); 2351 break; 2352 2353 case SPECTRE_V2_CMD_RETPOLINE_LFENCE: 2354 pr_err(SPECTRE_V2_LFENCE_MSG); 2355 spectre_v2_enabled = SPECTRE_V2_LFENCE; 2356 break; 2357 2358 case SPECTRE_V2_CMD_RETPOLINE_GENERIC: 2359 spectre_v2_enabled = SPECTRE_V2_RETPOLINE; 2360 break; 2361 2362 case SPECTRE_V2_CMD_RETPOLINE: 2363 spectre_v2_enabled = spectre_v2_select_retpoline(); 2364 break; 2365 2366 case SPECTRE_V2_CMD_IBRS: 2367 spectre_v2_enabled = SPECTRE_V2_IBRS; 2368 break; 2369 2370 case SPECTRE_V2_CMD_EIBRS: 2371 spectre_v2_enabled = SPECTRE_V2_EIBRS; 2372 break; 2373 2374 case SPECTRE_V2_CMD_EIBRS_LFENCE: 2375 spectre_v2_enabled = SPECTRE_V2_EIBRS_LFENCE; 2376 break; 2377 2378 case SPECTRE_V2_CMD_EIBRS_RETPOLINE: 2379 spectre_v2_enabled = SPECTRE_V2_EIBRS_RETPOLINE; 2380 break; 2381 } 2382 } 2383 2384 static void __init spectre_v2_update_mitigation(void) 2385 { 2386 if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO && 2387 !spectre_v2_in_eibrs_mode(spectre_v2_enabled)) { 2388 if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) && 2389 boot_cpu_has_bug(X86_BUG_RETBLEED) && 2390 retbleed_mitigation != RETBLEED_MITIGATION_NONE && 2391 retbleed_mitigation != RETBLEED_MITIGATION_STUFF && 2392 boot_cpu_has(X86_FEATURE_IBRS) && 2393 boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 2394 spectre_v2_enabled = SPECTRE_V2_IBRS; 2395 } 2396 } 2397 2398 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 2399 pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]); 2400 } 2401 2402 static void __init spectre_v2_apply_mitigation(void) 2403 { 2404 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) 2405 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); 2406 2407 if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) { 2408 if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) { 2409 msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); 2410 } else { 2411 x86_spec_ctrl_base |= SPEC_CTRL_IBRS; 2412 update_spec_ctrl(x86_spec_ctrl_base); 2413 } 2414 } 2415 2416 switch (spectre_v2_enabled) { 2417 case SPECTRE_V2_NONE: 2418 return; 2419 2420 case SPECTRE_V2_EIBRS: 2421 break; 2422 2423 case SPECTRE_V2_IBRS: 2424 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); 2425 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) 2426 pr_warn(SPECTRE_V2_IBRS_PERF_MSG); 2427 break; 2428 2429 case SPECTRE_V2_LFENCE: 2430 case SPECTRE_V2_EIBRS_LFENCE: 2431 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); 2432 fallthrough; 2433 2434 case SPECTRE_V2_RETPOLINE: 2435 case SPECTRE_V2_EIBRS_RETPOLINE: 2436 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 2437 break; 2438 } 2439 2440 /* 2441 * Disable alternate RSB predictions in kernel when indirect CALLs and 2442 * JMPs gets protection against BHI and Intramode-BTI, but RET 2443 * prediction from a non-RSB predictor is still a risk. 2444 */ 2445 if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE || 2446 spectre_v2_enabled == SPECTRE_V2_EIBRS_RETPOLINE || 2447 spectre_v2_enabled == SPECTRE_V2_RETPOLINE) 2448 spec_ctrl_disable_kernel_rrsba(); 2449 2450 spectre_v2_select_rsb_mitigation(spectre_v2_enabled); 2451 2452 /* 2453 * Retpoline protects the kernel, but doesn't protect firmware. IBRS 2454 * and Enhanced IBRS protect firmware too, so enable IBRS around 2455 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't 2456 * otherwise enabled. 2457 * 2458 * Use "spectre_v2_enabled" to check Enhanced IBRS instead of 2459 * boot_cpu_has(), because the user might select retpoline on the kernel 2460 * command line and if the CPU supports Enhanced IBRS, kernel might 2461 * un-intentionally not enable IBRS around firmware calls. 2462 */ 2463 if (boot_cpu_has_bug(X86_BUG_RETBLEED) && 2464 boot_cpu_has(X86_FEATURE_IBPB) && 2465 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 2466 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { 2467 2468 if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) { 2469 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); 2470 pr_info("Enabling Speculation Barrier for firmware calls\n"); 2471 } 2472 2473 } else if (boot_cpu_has(X86_FEATURE_IBRS) && 2474 !spectre_v2_in_ibrs_mode(spectre_v2_enabled)) { 2475 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); 2476 pr_info("Enabling Restricted Speculation for firmware calls\n"); 2477 } 2478 } 2479 2480 static void update_stibp_msr(void * __unused) 2481 { 2482 u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); 2483 update_spec_ctrl(val); 2484 } 2485 2486 /* Update x86_spec_ctrl_base in case SMT state changed. */ 2487 static void update_stibp_strict(void) 2488 { 2489 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; 2490 2491 if (sched_smt_active()) 2492 mask |= SPEC_CTRL_STIBP; 2493 2494 if (mask == x86_spec_ctrl_base) 2495 return; 2496 2497 pr_info("Update user space SMT mitigation: STIBP %s\n", 2498 mask & SPEC_CTRL_STIBP ? "always-on" : "off"); 2499 x86_spec_ctrl_base = mask; 2500 on_each_cpu(update_stibp_msr, NULL, 1); 2501 } 2502 2503 /* Update the static key controlling the evaluation of TIF_SPEC_IB */ 2504 static void update_indir_branch_cond(void) 2505 { 2506 if (sched_smt_active()) 2507 static_branch_enable(&switch_to_cond_stibp); 2508 else 2509 static_branch_disable(&switch_to_cond_stibp); 2510 } 2511 2512 #undef pr_fmt 2513 #define pr_fmt(fmt) fmt 2514 2515 /* Update the static key controlling the MDS CPU buffer clear in idle */ 2516 static void update_mds_branch_idle(void) 2517 { 2518 /* 2519 * Enable the idle clearing if SMT is active on CPUs which are 2520 * affected only by MSBDS and not any other MDS variant. 2521 * 2522 * The other variants cannot be mitigated when SMT is enabled, so 2523 * clearing the buffers on idle just to prevent the Store Buffer 2524 * repartitioning leak would be a window dressing exercise. 2525 */ 2526 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) 2527 return; 2528 2529 if (sched_smt_active()) { 2530 static_branch_enable(&cpu_buf_idle_clear); 2531 } else if (mmio_mitigation == MMIO_MITIGATION_OFF || 2532 (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) { 2533 static_branch_disable(&cpu_buf_idle_clear); 2534 } 2535 } 2536 2537 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" 2538 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" 2539 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" 2540 2541 void cpu_bugs_smt_update(void) 2542 { 2543 mutex_lock(&spec_ctrl_mutex); 2544 2545 if (sched_smt_active() && unprivileged_ebpf_enabled() && 2546 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) 2547 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); 2548 2549 switch (spectre_v2_user_stibp) { 2550 case SPECTRE_V2_USER_NONE: 2551 break; 2552 case SPECTRE_V2_USER_STRICT: 2553 case SPECTRE_V2_USER_STRICT_PREFERRED: 2554 update_stibp_strict(); 2555 break; 2556 case SPECTRE_V2_USER_PRCTL: 2557 case SPECTRE_V2_USER_SECCOMP: 2558 update_indir_branch_cond(); 2559 break; 2560 } 2561 2562 switch (mds_mitigation) { 2563 case MDS_MITIGATION_FULL: 2564 case MDS_MITIGATION_AUTO: 2565 case MDS_MITIGATION_VMWERV: 2566 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) 2567 pr_warn_once(MDS_MSG_SMT); 2568 update_mds_branch_idle(); 2569 break; 2570 case MDS_MITIGATION_OFF: 2571 break; 2572 } 2573 2574 switch (taa_mitigation) { 2575 case TAA_MITIGATION_VERW: 2576 case TAA_MITIGATION_AUTO: 2577 case TAA_MITIGATION_UCODE_NEEDED: 2578 if (sched_smt_active()) 2579 pr_warn_once(TAA_MSG_SMT); 2580 break; 2581 case TAA_MITIGATION_TSX_DISABLED: 2582 case TAA_MITIGATION_OFF: 2583 break; 2584 } 2585 2586 switch (mmio_mitigation) { 2587 case MMIO_MITIGATION_VERW: 2588 case MMIO_MITIGATION_AUTO: 2589 case MMIO_MITIGATION_UCODE_NEEDED: 2590 if (sched_smt_active()) 2591 pr_warn_once(MMIO_MSG_SMT); 2592 break; 2593 case MMIO_MITIGATION_OFF: 2594 break; 2595 } 2596 2597 switch (tsa_mitigation) { 2598 case TSA_MITIGATION_USER_KERNEL: 2599 case TSA_MITIGATION_VM: 2600 case TSA_MITIGATION_AUTO: 2601 case TSA_MITIGATION_FULL: 2602 /* 2603 * TSA-SQ can potentially lead to info leakage between 2604 * SMT threads. 2605 */ 2606 if (sched_smt_active()) 2607 static_branch_enable(&cpu_buf_idle_clear); 2608 else 2609 static_branch_disable(&cpu_buf_idle_clear); 2610 break; 2611 case TSA_MITIGATION_NONE: 2612 case TSA_MITIGATION_UCODE_NEEDED: 2613 break; 2614 } 2615 2616 mutex_unlock(&spec_ctrl_mutex); 2617 } 2618 2619 #undef pr_fmt 2620 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt 2621 2622 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE; 2623 2624 /* The kernel command line selection */ 2625 enum ssb_mitigation_cmd { 2626 SPEC_STORE_BYPASS_CMD_NONE, 2627 SPEC_STORE_BYPASS_CMD_AUTO, 2628 SPEC_STORE_BYPASS_CMD_ON, 2629 SPEC_STORE_BYPASS_CMD_PRCTL, 2630 SPEC_STORE_BYPASS_CMD_SECCOMP, 2631 }; 2632 2633 static const char * const ssb_strings[] = { 2634 [SPEC_STORE_BYPASS_NONE] = "Vulnerable", 2635 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", 2636 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", 2637 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", 2638 }; 2639 2640 static const struct { 2641 const char *option; 2642 enum ssb_mitigation_cmd cmd; 2643 } ssb_mitigation_options[] __initconst = { 2644 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ 2645 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ 2646 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ 2647 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ 2648 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */ 2649 }; 2650 2651 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) 2652 { 2653 enum ssb_mitigation_cmd cmd; 2654 char arg[20]; 2655 int ret, i; 2656 2657 cmd = IS_ENABLED(CONFIG_MITIGATION_SSB) ? 2658 SPEC_STORE_BYPASS_CMD_AUTO : SPEC_STORE_BYPASS_CMD_NONE; 2659 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") || 2660 cpu_mitigations_off()) { 2661 return SPEC_STORE_BYPASS_CMD_NONE; 2662 } else { 2663 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", 2664 arg, sizeof(arg)); 2665 if (ret < 0) 2666 return cmd; 2667 2668 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { 2669 if (!match_option(arg, ret, ssb_mitigation_options[i].option)) 2670 continue; 2671 2672 cmd = ssb_mitigation_options[i].cmd; 2673 break; 2674 } 2675 2676 if (i >= ARRAY_SIZE(ssb_mitigation_options)) { 2677 pr_err("unknown option (%s). Switching to default mode\n", arg); 2678 return cmd; 2679 } 2680 } 2681 2682 return cmd; 2683 } 2684 2685 static void __init ssb_select_mitigation(void) 2686 { 2687 enum ssb_mitigation_cmd cmd; 2688 2689 if (!boot_cpu_has(X86_FEATURE_SSBD)) 2690 goto out; 2691 2692 cmd = ssb_parse_cmdline(); 2693 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && 2694 (cmd == SPEC_STORE_BYPASS_CMD_NONE || 2695 cmd == SPEC_STORE_BYPASS_CMD_AUTO)) 2696 return; 2697 2698 switch (cmd) { 2699 case SPEC_STORE_BYPASS_CMD_SECCOMP: 2700 /* 2701 * Choose prctl+seccomp as the default mode if seccomp is 2702 * enabled. 2703 */ 2704 if (IS_ENABLED(CONFIG_SECCOMP)) 2705 ssb_mode = SPEC_STORE_BYPASS_SECCOMP; 2706 else 2707 ssb_mode = SPEC_STORE_BYPASS_PRCTL; 2708 break; 2709 case SPEC_STORE_BYPASS_CMD_ON: 2710 ssb_mode = SPEC_STORE_BYPASS_DISABLE; 2711 break; 2712 case SPEC_STORE_BYPASS_CMD_AUTO: 2713 case SPEC_STORE_BYPASS_CMD_PRCTL: 2714 ssb_mode = SPEC_STORE_BYPASS_PRCTL; 2715 break; 2716 case SPEC_STORE_BYPASS_CMD_NONE: 2717 break; 2718 } 2719 2720 out: 2721 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 2722 pr_info("%s\n", ssb_strings[ssb_mode]); 2723 } 2724 2725 static void __init ssb_apply_mitigation(void) 2726 { 2727 /* 2728 * We have three CPU feature flags that are in play here: 2729 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. 2730 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass 2731 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation 2732 */ 2733 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) { 2734 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); 2735 /* 2736 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may 2737 * use a completely different MSR and bit dependent on family. 2738 */ 2739 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && 2740 !static_cpu_has(X86_FEATURE_AMD_SSBD)) { 2741 x86_amd_ssb_disable(); 2742 } else { 2743 x86_spec_ctrl_base |= SPEC_CTRL_SSBD; 2744 update_spec_ctrl(x86_spec_ctrl_base); 2745 } 2746 } 2747 } 2748 2749 #undef pr_fmt 2750 #define pr_fmt(fmt) "Speculation prctl: " fmt 2751 2752 static void task_update_spec_tif(struct task_struct *tsk) 2753 { 2754 /* Force the update of the real TIF bits */ 2755 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); 2756 2757 /* 2758 * Immediately update the speculation control MSRs for the current 2759 * task, but for a non-current task delay setting the CPU 2760 * mitigation until it is scheduled next. 2761 * 2762 * This can only happen for SECCOMP mitigation. For PRCTL it's 2763 * always the current task. 2764 */ 2765 if (tsk == current) 2766 speculation_ctrl_update_current(); 2767 } 2768 2769 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl) 2770 { 2771 2772 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) 2773 return -EPERM; 2774 2775 switch (ctrl) { 2776 case PR_SPEC_ENABLE: 2777 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); 2778 return 0; 2779 case PR_SPEC_DISABLE: 2780 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); 2781 return 0; 2782 default: 2783 return -ERANGE; 2784 } 2785 } 2786 2787 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) 2788 { 2789 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && 2790 ssb_mode != SPEC_STORE_BYPASS_SECCOMP) 2791 return -ENXIO; 2792 2793 switch (ctrl) { 2794 case PR_SPEC_ENABLE: 2795 /* If speculation is force disabled, enable is not allowed */ 2796 if (task_spec_ssb_force_disable(task)) 2797 return -EPERM; 2798 task_clear_spec_ssb_disable(task); 2799 task_clear_spec_ssb_noexec(task); 2800 task_update_spec_tif(task); 2801 break; 2802 case PR_SPEC_DISABLE: 2803 task_set_spec_ssb_disable(task); 2804 task_clear_spec_ssb_noexec(task); 2805 task_update_spec_tif(task); 2806 break; 2807 case PR_SPEC_FORCE_DISABLE: 2808 task_set_spec_ssb_disable(task); 2809 task_set_spec_ssb_force_disable(task); 2810 task_clear_spec_ssb_noexec(task); 2811 task_update_spec_tif(task); 2812 break; 2813 case PR_SPEC_DISABLE_NOEXEC: 2814 if (task_spec_ssb_force_disable(task)) 2815 return -EPERM; 2816 task_set_spec_ssb_disable(task); 2817 task_set_spec_ssb_noexec(task); 2818 task_update_spec_tif(task); 2819 break; 2820 default: 2821 return -ERANGE; 2822 } 2823 return 0; 2824 } 2825 2826 static bool is_spec_ib_user_controlled(void) 2827 { 2828 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || 2829 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || 2830 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || 2831 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP; 2832 } 2833 2834 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) 2835 { 2836 switch (ctrl) { 2837 case PR_SPEC_ENABLE: 2838 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 2839 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 2840 return 0; 2841 2842 /* 2843 * With strict mode for both IBPB and STIBP, the instruction 2844 * code paths avoid checking this task flag and instead, 2845 * unconditionally run the instruction. However, STIBP and IBPB 2846 * are independent and either can be set to conditionally 2847 * enabled regardless of the mode of the other. 2848 * 2849 * If either is set to conditional, allow the task flag to be 2850 * updated, unless it was force-disabled by a previous prctl 2851 * call. Currently, this is possible on an AMD CPU which has the 2852 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the 2853 * kernel is booted with 'spectre_v2_user=seccomp', then 2854 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and 2855 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED. 2856 */ 2857 if (!is_spec_ib_user_controlled() || 2858 task_spec_ib_force_disable(task)) 2859 return -EPERM; 2860 2861 task_clear_spec_ib_disable(task); 2862 task_update_spec_tif(task); 2863 break; 2864 case PR_SPEC_DISABLE: 2865 case PR_SPEC_FORCE_DISABLE: 2866 /* 2867 * Indirect branch speculation is always allowed when 2868 * mitigation is force disabled. 2869 */ 2870 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 2871 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 2872 return -EPERM; 2873 2874 if (!is_spec_ib_user_controlled()) 2875 return 0; 2876 2877 task_set_spec_ib_disable(task); 2878 if (ctrl == PR_SPEC_FORCE_DISABLE) 2879 task_set_spec_ib_force_disable(task); 2880 task_update_spec_tif(task); 2881 if (task == current) 2882 indirect_branch_prediction_barrier(); 2883 break; 2884 default: 2885 return -ERANGE; 2886 } 2887 return 0; 2888 } 2889 2890 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, 2891 unsigned long ctrl) 2892 { 2893 switch (which) { 2894 case PR_SPEC_STORE_BYPASS: 2895 return ssb_prctl_set(task, ctrl); 2896 case PR_SPEC_INDIRECT_BRANCH: 2897 return ib_prctl_set(task, ctrl); 2898 case PR_SPEC_L1D_FLUSH: 2899 return l1d_flush_prctl_set(task, ctrl); 2900 default: 2901 return -ENODEV; 2902 } 2903 } 2904 2905 #ifdef CONFIG_SECCOMP 2906 void arch_seccomp_spec_mitigate(struct task_struct *task) 2907 { 2908 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) 2909 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); 2910 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || 2911 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) 2912 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); 2913 } 2914 #endif 2915 2916 static int l1d_flush_prctl_get(struct task_struct *task) 2917 { 2918 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) 2919 return PR_SPEC_FORCE_DISABLE; 2920 2921 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH)) 2922 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2923 else 2924 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2925 } 2926 2927 static int ssb_prctl_get(struct task_struct *task) 2928 { 2929 switch (ssb_mode) { 2930 case SPEC_STORE_BYPASS_NONE: 2931 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 2932 return PR_SPEC_ENABLE; 2933 return PR_SPEC_NOT_AFFECTED; 2934 case SPEC_STORE_BYPASS_DISABLE: 2935 return PR_SPEC_DISABLE; 2936 case SPEC_STORE_BYPASS_SECCOMP: 2937 case SPEC_STORE_BYPASS_PRCTL: 2938 if (task_spec_ssb_force_disable(task)) 2939 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 2940 if (task_spec_ssb_noexec(task)) 2941 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; 2942 if (task_spec_ssb_disable(task)) 2943 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2944 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2945 } 2946 BUG(); 2947 } 2948 2949 static int ib_prctl_get(struct task_struct *task) 2950 { 2951 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 2952 return PR_SPEC_NOT_AFFECTED; 2953 2954 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 2955 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 2956 return PR_SPEC_ENABLE; 2957 else if (is_spec_ib_user_controlled()) { 2958 if (task_spec_ib_force_disable(task)) 2959 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 2960 if (task_spec_ib_disable(task)) 2961 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2962 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2963 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || 2964 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || 2965 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) 2966 return PR_SPEC_DISABLE; 2967 else 2968 return PR_SPEC_NOT_AFFECTED; 2969 } 2970 2971 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 2972 { 2973 switch (which) { 2974 case PR_SPEC_STORE_BYPASS: 2975 return ssb_prctl_get(task); 2976 case PR_SPEC_INDIRECT_BRANCH: 2977 return ib_prctl_get(task); 2978 case PR_SPEC_L1D_FLUSH: 2979 return l1d_flush_prctl_get(task); 2980 default: 2981 return -ENODEV; 2982 } 2983 } 2984 2985 void x86_spec_ctrl_setup_ap(void) 2986 { 2987 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) 2988 update_spec_ctrl(x86_spec_ctrl_base); 2989 2990 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) 2991 x86_amd_ssb_disable(); 2992 } 2993 2994 bool itlb_multihit_kvm_mitigation; 2995 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); 2996 2997 #undef pr_fmt 2998 #define pr_fmt(fmt) "L1TF: " fmt 2999 3000 /* Default mitigation for L1TF-affected CPUs */ 3001 enum l1tf_mitigations l1tf_mitigation __ro_after_init = 3002 IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF; 3003 #if IS_ENABLED(CONFIG_KVM_INTEL) 3004 EXPORT_SYMBOL_GPL(l1tf_mitigation); 3005 #endif 3006 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; 3007 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); 3008 3009 /* 3010 * These CPUs all support 44bits physical address space internally in the 3011 * cache but CPUID can report a smaller number of physical address bits. 3012 * 3013 * The L1TF mitigation uses the top most address bit for the inversion of 3014 * non present PTEs. When the installed memory reaches into the top most 3015 * address bit due to memory holes, which has been observed on machines 3016 * which report 36bits physical address bits and have 32G RAM installed, 3017 * then the mitigation range check in l1tf_select_mitigation() triggers. 3018 * This is a false positive because the mitigation is still possible due to 3019 * the fact that the cache uses 44bit internally. Use the cache bits 3020 * instead of the reported physical bits and adjust them on the affected 3021 * machines to 44bit if the reported bits are less than 44. 3022 */ 3023 static void override_cache_bits(struct cpuinfo_x86 *c) 3024 { 3025 if (c->x86 != 6) 3026 return; 3027 3028 switch (c->x86_vfm) { 3029 case INTEL_NEHALEM: 3030 case INTEL_WESTMERE: 3031 case INTEL_SANDYBRIDGE: 3032 case INTEL_IVYBRIDGE: 3033 case INTEL_HASWELL: 3034 case INTEL_HASWELL_L: 3035 case INTEL_HASWELL_G: 3036 case INTEL_BROADWELL: 3037 case INTEL_BROADWELL_G: 3038 case INTEL_SKYLAKE_L: 3039 case INTEL_SKYLAKE: 3040 case INTEL_KABYLAKE_L: 3041 case INTEL_KABYLAKE: 3042 if (c->x86_cache_bits < 44) 3043 c->x86_cache_bits = 44; 3044 break; 3045 } 3046 } 3047 3048 static void __init l1tf_select_mitigation(void) 3049 { 3050 if (!boot_cpu_has_bug(X86_BUG_L1TF)) { 3051 l1tf_mitigation = L1TF_MITIGATION_OFF; 3052 return; 3053 } 3054 3055 if (l1tf_mitigation != L1TF_MITIGATION_AUTO) 3056 return; 3057 3058 if (!should_mitigate_vuln(X86_BUG_L1TF)) { 3059 l1tf_mitigation = L1TF_MITIGATION_OFF; 3060 return; 3061 } 3062 3063 if (smt_mitigations == SMT_MITIGATIONS_ON) 3064 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 3065 else 3066 l1tf_mitigation = L1TF_MITIGATION_FLUSH; 3067 } 3068 3069 static void __init l1tf_apply_mitigation(void) 3070 { 3071 u64 half_pa; 3072 3073 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 3074 return; 3075 3076 override_cache_bits(&boot_cpu_data); 3077 3078 switch (l1tf_mitigation) { 3079 case L1TF_MITIGATION_OFF: 3080 case L1TF_MITIGATION_FLUSH_NOWARN: 3081 case L1TF_MITIGATION_FLUSH: 3082 case L1TF_MITIGATION_AUTO: 3083 break; 3084 case L1TF_MITIGATION_FLUSH_NOSMT: 3085 case L1TF_MITIGATION_FULL: 3086 cpu_smt_disable(false); 3087 break; 3088 case L1TF_MITIGATION_FULL_FORCE: 3089 cpu_smt_disable(true); 3090 break; 3091 } 3092 3093 #if CONFIG_PGTABLE_LEVELS == 2 3094 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); 3095 return; 3096 #endif 3097 3098 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; 3099 if (l1tf_mitigation != L1TF_MITIGATION_OFF && 3100 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { 3101 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); 3102 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", 3103 half_pa); 3104 pr_info("However, doing so will make a part of your RAM unusable.\n"); 3105 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n"); 3106 return; 3107 } 3108 3109 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); 3110 } 3111 3112 static int __init l1tf_cmdline(char *str) 3113 { 3114 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 3115 return 0; 3116 3117 if (!str) 3118 return -EINVAL; 3119 3120 if (!strcmp(str, "off")) 3121 l1tf_mitigation = L1TF_MITIGATION_OFF; 3122 else if (!strcmp(str, "flush,nowarn")) 3123 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN; 3124 else if (!strcmp(str, "flush")) 3125 l1tf_mitigation = L1TF_MITIGATION_FLUSH; 3126 else if (!strcmp(str, "flush,nosmt")) 3127 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 3128 else if (!strcmp(str, "full")) 3129 l1tf_mitigation = L1TF_MITIGATION_FULL; 3130 else if (!strcmp(str, "full,force")) 3131 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE; 3132 3133 return 0; 3134 } 3135 early_param("l1tf", l1tf_cmdline); 3136 3137 #undef pr_fmt 3138 #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt 3139 3140 static const char * const srso_strings[] = { 3141 [SRSO_MITIGATION_NONE] = "Vulnerable", 3142 [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 3143 [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode", 3144 [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET", 3145 [SRSO_MITIGATION_NOSMT] = "Mitigation: SMT disabled", 3146 [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET", 3147 [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", 3148 [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only", 3149 [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation" 3150 }; 3151 3152 static int __init srso_parse_cmdline(char *str) 3153 { 3154 if (!str) 3155 return -EINVAL; 3156 3157 if (!strcmp(str, "off")) 3158 srso_mitigation = SRSO_MITIGATION_NONE; 3159 else if (!strcmp(str, "microcode")) 3160 srso_mitigation = SRSO_MITIGATION_MICROCODE; 3161 else if (!strcmp(str, "safe-ret")) 3162 srso_mitigation = SRSO_MITIGATION_SAFE_RET; 3163 else if (!strcmp(str, "ibpb")) 3164 srso_mitigation = SRSO_MITIGATION_IBPB; 3165 else if (!strcmp(str, "ibpb-vmexit")) 3166 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; 3167 else 3168 pr_err("Ignoring unknown SRSO option (%s).", str); 3169 3170 return 0; 3171 } 3172 early_param("spec_rstack_overflow", srso_parse_cmdline); 3173 3174 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options." 3175 3176 static void __init srso_select_mitigation(void) 3177 { 3178 if (!boot_cpu_has_bug(X86_BUG_SRSO)) { 3179 srso_mitigation = SRSO_MITIGATION_NONE; 3180 return; 3181 } 3182 3183 if (srso_mitigation == SRSO_MITIGATION_AUTO) { 3184 /* 3185 * Use safe-RET if user->kernel or guest->host protection is 3186 * required. Otherwise the 'microcode' mitigation is sufficient 3187 * to protect the user->user and guest->guest vectors. 3188 */ 3189 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || 3190 (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) && 3191 !boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))) { 3192 srso_mitigation = SRSO_MITIGATION_SAFE_RET; 3193 } else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || 3194 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) { 3195 srso_mitigation = SRSO_MITIGATION_MICROCODE; 3196 } else { 3197 srso_mitigation = SRSO_MITIGATION_NONE; 3198 return; 3199 } 3200 } 3201 3202 /* Zen1/2 with SMT off aren't vulnerable to SRSO. */ 3203 if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { 3204 srso_mitigation = SRSO_MITIGATION_NOSMT; 3205 return; 3206 } 3207 3208 if (!boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) { 3209 pr_warn("IBPB-extending microcode not applied!\n"); 3210 pr_warn(SRSO_NOTICE); 3211 3212 /* 3213 * Safe-RET provides partial mitigation without microcode, but 3214 * other mitigations require microcode to provide any 3215 * mitigations. 3216 */ 3217 if (srso_mitigation == SRSO_MITIGATION_SAFE_RET) 3218 srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED; 3219 else 3220 srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED; 3221 } 3222 3223 switch (srso_mitigation) { 3224 case SRSO_MITIGATION_SAFE_RET: 3225 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED: 3226 if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) { 3227 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; 3228 goto ibpb_on_vmexit; 3229 } 3230 3231 if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) { 3232 pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n"); 3233 srso_mitigation = SRSO_MITIGATION_NONE; 3234 } 3235 break; 3236 ibpb_on_vmexit: 3237 case SRSO_MITIGATION_IBPB_ON_VMEXIT: 3238 if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) { 3239 pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n"); 3240 srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE; 3241 break; 3242 } 3243 fallthrough; 3244 case SRSO_MITIGATION_IBPB: 3245 if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { 3246 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); 3247 srso_mitigation = SRSO_MITIGATION_NONE; 3248 } 3249 break; 3250 default: 3251 break; 3252 } 3253 } 3254 3255 static void __init srso_update_mitigation(void) 3256 { 3257 /* If retbleed is using IBPB, that works for SRSO as well */ 3258 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB && 3259 boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) 3260 srso_mitigation = SRSO_MITIGATION_IBPB; 3261 3262 if (boot_cpu_has_bug(X86_BUG_SRSO) && 3263 !cpu_mitigations_off()) 3264 pr_info("%s\n", srso_strings[srso_mitigation]); 3265 } 3266 3267 static void __init srso_apply_mitigation(void) 3268 { 3269 /* 3270 * Clear the feature flag if this mitigation is not selected as that 3271 * feature flag controls the BpSpecReduce MSR bit toggling in KVM. 3272 */ 3273 if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE) 3274 setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE); 3275 3276 if (srso_mitigation == SRSO_MITIGATION_NONE) { 3277 if (boot_cpu_has(X86_FEATURE_SBPB)) 3278 x86_pred_cmd = PRED_CMD_SBPB; 3279 return; 3280 } 3281 3282 switch (srso_mitigation) { 3283 case SRSO_MITIGATION_SAFE_RET: 3284 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED: 3285 /* 3286 * Enable the return thunk for generated code 3287 * like ftrace, static_call, etc. 3288 */ 3289 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 3290 setup_force_cpu_cap(X86_FEATURE_UNRET); 3291 3292 if (boot_cpu_data.x86 == 0x19) { 3293 setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); 3294 set_return_thunk(srso_alias_return_thunk); 3295 } else { 3296 setup_force_cpu_cap(X86_FEATURE_SRSO); 3297 set_return_thunk(srso_return_thunk); 3298 } 3299 break; 3300 case SRSO_MITIGATION_IBPB: 3301 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); 3302 /* 3303 * IBPB on entry already obviates the need for 3304 * software-based untraining so clear those in case some 3305 * other mitigation like Retbleed has selected them. 3306 */ 3307 setup_clear_cpu_cap(X86_FEATURE_UNRET); 3308 setup_clear_cpu_cap(X86_FEATURE_RETHUNK); 3309 fallthrough; 3310 case SRSO_MITIGATION_IBPB_ON_VMEXIT: 3311 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); 3312 /* 3313 * There is no need for RSB filling: entry_ibpb() ensures 3314 * all predictions, including the RSB, are invalidated, 3315 * regardless of IBPB implementation. 3316 */ 3317 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); 3318 break; 3319 default: 3320 break; 3321 } 3322 } 3323 3324 #undef pr_fmt 3325 #define pr_fmt(fmt) fmt 3326 3327 #ifdef CONFIG_SYSFS 3328 3329 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" 3330 3331 #if IS_ENABLED(CONFIG_KVM_INTEL) 3332 static const char * const l1tf_vmx_states[] = { 3333 [VMENTER_L1D_FLUSH_AUTO] = "auto", 3334 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", 3335 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", 3336 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", 3337 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", 3338 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" 3339 }; 3340 3341 static ssize_t l1tf_show_state(char *buf) 3342 { 3343 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) 3344 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); 3345 3346 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || 3347 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && 3348 sched_smt_active())) { 3349 return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, 3350 l1tf_vmx_states[l1tf_vmx_mitigation]); 3351 } 3352 3353 return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, 3354 l1tf_vmx_states[l1tf_vmx_mitigation], 3355 sched_smt_active() ? "vulnerable" : "disabled"); 3356 } 3357 3358 static ssize_t itlb_multihit_show_state(char *buf) 3359 { 3360 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || 3361 !boot_cpu_has(X86_FEATURE_VMX)) 3362 return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n"); 3363 else if (!(cr4_read_shadow() & X86_CR4_VMXE)) 3364 return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n"); 3365 else if (itlb_multihit_kvm_mitigation) 3366 return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n"); 3367 else 3368 return sysfs_emit(buf, "KVM: Vulnerable\n"); 3369 } 3370 #else 3371 static ssize_t l1tf_show_state(char *buf) 3372 { 3373 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); 3374 } 3375 3376 static ssize_t itlb_multihit_show_state(char *buf) 3377 { 3378 return sysfs_emit(buf, "Processor vulnerable\n"); 3379 } 3380 #endif 3381 3382 static ssize_t mds_show_state(char *buf) 3383 { 3384 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 3385 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 3386 mds_strings[mds_mitigation]); 3387 } 3388 3389 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { 3390 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], 3391 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : 3392 sched_smt_active() ? "mitigated" : "disabled")); 3393 } 3394 3395 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], 3396 sched_smt_active() ? "vulnerable" : "disabled"); 3397 } 3398 3399 static ssize_t tsx_async_abort_show_state(char *buf) 3400 { 3401 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || 3402 (taa_mitigation == TAA_MITIGATION_OFF)) 3403 return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]); 3404 3405 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 3406 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 3407 taa_strings[taa_mitigation]); 3408 } 3409 3410 return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], 3411 sched_smt_active() ? "vulnerable" : "disabled"); 3412 } 3413 3414 static ssize_t mmio_stale_data_show_state(char *buf) 3415 { 3416 if (mmio_mitigation == MMIO_MITIGATION_OFF) 3417 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); 3418 3419 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 3420 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 3421 mmio_strings[mmio_mitigation]); 3422 } 3423 3424 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], 3425 sched_smt_active() ? "vulnerable" : "disabled"); 3426 } 3427 3428 static ssize_t rfds_show_state(char *buf) 3429 { 3430 return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]); 3431 } 3432 3433 static ssize_t old_microcode_show_state(char *buf) 3434 { 3435 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 3436 return sysfs_emit(buf, "Unknown: running under hypervisor"); 3437 3438 return sysfs_emit(buf, "Vulnerable\n"); 3439 } 3440 3441 static ssize_t its_show_state(char *buf) 3442 { 3443 return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]); 3444 } 3445 3446 static char *stibp_state(void) 3447 { 3448 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 3449 !boot_cpu_has(X86_FEATURE_AUTOIBRS)) 3450 return ""; 3451 3452 switch (spectre_v2_user_stibp) { 3453 case SPECTRE_V2_USER_NONE: 3454 return "; STIBP: disabled"; 3455 case SPECTRE_V2_USER_STRICT: 3456 return "; STIBP: forced"; 3457 case SPECTRE_V2_USER_STRICT_PREFERRED: 3458 return "; STIBP: always-on"; 3459 case SPECTRE_V2_USER_PRCTL: 3460 case SPECTRE_V2_USER_SECCOMP: 3461 if (static_key_enabled(&switch_to_cond_stibp)) 3462 return "; STIBP: conditional"; 3463 } 3464 return ""; 3465 } 3466 3467 static char *ibpb_state(void) 3468 { 3469 if (boot_cpu_has(X86_FEATURE_IBPB)) { 3470 if (static_key_enabled(&switch_mm_always_ibpb)) 3471 return "; IBPB: always-on"; 3472 if (static_key_enabled(&switch_mm_cond_ibpb)) 3473 return "; IBPB: conditional"; 3474 return "; IBPB: disabled"; 3475 } 3476 return ""; 3477 } 3478 3479 static char *pbrsb_eibrs_state(void) 3480 { 3481 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { 3482 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || 3483 boot_cpu_has(X86_FEATURE_RSB_VMEXIT)) 3484 return "; PBRSB-eIBRS: SW sequence"; 3485 else 3486 return "; PBRSB-eIBRS: Vulnerable"; 3487 } else { 3488 return "; PBRSB-eIBRS: Not affected"; 3489 } 3490 } 3491 3492 static const char *spectre_bhi_state(void) 3493 { 3494 if (!boot_cpu_has_bug(X86_BUG_BHI)) 3495 return "; BHI: Not affected"; 3496 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW)) 3497 return "; BHI: BHI_DIS_S"; 3498 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP)) 3499 return "; BHI: SW loop, KVM: SW loop"; 3500 else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && 3501 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) && 3502 rrsba_disabled) 3503 return "; BHI: Retpoline"; 3504 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_VMEXIT)) 3505 return "; BHI: Vulnerable, KVM: SW loop"; 3506 3507 return "; BHI: Vulnerable"; 3508 } 3509 3510 static ssize_t spectre_v2_show_state(char *buf) 3511 { 3512 if (spectre_v2_enabled == SPECTRE_V2_LFENCE) 3513 return sysfs_emit(buf, "Vulnerable: LFENCE\n"); 3514 3515 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) 3516 return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); 3517 3518 if (sched_smt_active() && unprivileged_ebpf_enabled() && 3519 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) 3520 return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); 3521 3522 return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n", 3523 spectre_v2_strings[spectre_v2_enabled], 3524 ibpb_state(), 3525 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "", 3526 stibp_state(), 3527 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "", 3528 pbrsb_eibrs_state(), 3529 spectre_bhi_state(), 3530 /* this should always be at the end */ 3531 spectre_v2_module_string()); 3532 } 3533 3534 static ssize_t srbds_show_state(char *buf) 3535 { 3536 return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]); 3537 } 3538 3539 static ssize_t retbleed_show_state(char *buf) 3540 { 3541 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || 3542 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { 3543 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 3544 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 3545 return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); 3546 3547 return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation], 3548 !sched_smt_active() ? "disabled" : 3549 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || 3550 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? 3551 "enabled with STIBP protection" : "vulnerable"); 3552 } 3553 3554 return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]); 3555 } 3556 3557 static ssize_t srso_show_state(char *buf) 3558 { 3559 return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]); 3560 } 3561 3562 static ssize_t gds_show_state(char *buf) 3563 { 3564 return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); 3565 } 3566 3567 static ssize_t tsa_show_state(char *buf) 3568 { 3569 return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]); 3570 } 3571 3572 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, 3573 char *buf, unsigned int bug) 3574 { 3575 if (!boot_cpu_has_bug(bug)) 3576 return sysfs_emit(buf, "Not affected\n"); 3577 3578 switch (bug) { 3579 case X86_BUG_CPU_MELTDOWN: 3580 if (boot_cpu_has(X86_FEATURE_PTI)) 3581 return sysfs_emit(buf, "Mitigation: PTI\n"); 3582 3583 if (hypervisor_is_type(X86_HYPER_XEN_PV)) 3584 return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); 3585 3586 break; 3587 3588 case X86_BUG_SPECTRE_V1: 3589 return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); 3590 3591 case X86_BUG_SPECTRE_V2: 3592 return spectre_v2_show_state(buf); 3593 3594 case X86_BUG_SPEC_STORE_BYPASS: 3595 return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]); 3596 3597 case X86_BUG_L1TF: 3598 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) 3599 return l1tf_show_state(buf); 3600 break; 3601 3602 case X86_BUG_MDS: 3603 return mds_show_state(buf); 3604 3605 case X86_BUG_TAA: 3606 return tsx_async_abort_show_state(buf); 3607 3608 case X86_BUG_ITLB_MULTIHIT: 3609 return itlb_multihit_show_state(buf); 3610 3611 case X86_BUG_SRBDS: 3612 return srbds_show_state(buf); 3613 3614 case X86_BUG_MMIO_STALE_DATA: 3615 return mmio_stale_data_show_state(buf); 3616 3617 case X86_BUG_RETBLEED: 3618 return retbleed_show_state(buf); 3619 3620 case X86_BUG_SRSO: 3621 return srso_show_state(buf); 3622 3623 case X86_BUG_GDS: 3624 return gds_show_state(buf); 3625 3626 case X86_BUG_RFDS: 3627 return rfds_show_state(buf); 3628 3629 case X86_BUG_OLD_MICROCODE: 3630 return old_microcode_show_state(buf); 3631 3632 case X86_BUG_ITS: 3633 return its_show_state(buf); 3634 3635 case X86_BUG_TSA: 3636 return tsa_show_state(buf); 3637 3638 default: 3639 break; 3640 } 3641 3642 return sysfs_emit(buf, "Vulnerable\n"); 3643 } 3644 3645 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 3646 { 3647 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); 3648 } 3649 3650 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 3651 { 3652 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); 3653 } 3654 3655 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 3656 { 3657 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); 3658 } 3659 3660 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 3661 { 3662 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); 3663 } 3664 3665 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) 3666 { 3667 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); 3668 } 3669 3670 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf) 3671 { 3672 return cpu_show_common(dev, attr, buf, X86_BUG_MDS); 3673 } 3674 3675 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf) 3676 { 3677 return cpu_show_common(dev, attr, buf, X86_BUG_TAA); 3678 } 3679 3680 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf) 3681 { 3682 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); 3683 } 3684 3685 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) 3686 { 3687 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); 3688 } 3689 3690 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) 3691 { 3692 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); 3693 } 3694 3695 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) 3696 { 3697 return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); 3698 } 3699 3700 ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf) 3701 { 3702 return cpu_show_common(dev, attr, buf, X86_BUG_SRSO); 3703 } 3704 3705 ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf) 3706 { 3707 return cpu_show_common(dev, attr, buf, X86_BUG_GDS); 3708 } 3709 3710 ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf) 3711 { 3712 return cpu_show_common(dev, attr, buf, X86_BUG_RFDS); 3713 } 3714 3715 ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf) 3716 { 3717 return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE); 3718 } 3719 3720 ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf) 3721 { 3722 return cpu_show_common(dev, attr, buf, X86_BUG_ITS); 3723 } 3724 3725 ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf) 3726 { 3727 return cpu_show_common(dev, attr, buf, X86_BUG_TSA); 3728 } 3729 #endif 3730 3731 void __warn_thunk(void) 3732 { 3733 WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n"); 3734 } 3735