1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1994 Linus Torvalds 4 * 5 * Cyrix stuff, June 1998 by: 6 * - Rafael R. Reilova (moved everything from head.S), 7 * <rreilova@ececs.uc.edu> 8 * - Channing Corn (tests & fixes), 9 * - Andrew D. Balsa (code cleanup). 10 */ 11 #include <linux/init.h> 12 #include <linux/cpu.h> 13 #include <linux/module.h> 14 #include <linux/nospec.h> 15 #include <linux/prctl.h> 16 #include <linux/sched/smt.h> 17 #include <linux/pgtable.h> 18 #include <linux/bpf.h> 19 20 #include <asm/spec-ctrl.h> 21 #include <asm/cmdline.h> 22 #include <asm/bugs.h> 23 #include <asm/processor.h> 24 #include <asm/processor-flags.h> 25 #include <asm/fpu/api.h> 26 #include <asm/msr.h> 27 #include <asm/vmx.h> 28 #include <asm/paravirt.h> 29 #include <asm/cpu_device_id.h> 30 #include <asm/e820/api.h> 31 #include <asm/hypervisor.h> 32 #include <asm/tlbflush.h> 33 #include <asm/cpu.h> 34 35 #include "cpu.h" 36 37 /* 38 * Speculation Vulnerability Handling 39 * 40 * Each vulnerability is handled with the following functions: 41 * <vuln>_select_mitigation() -- Selects a mitigation to use. This should 42 * take into account all relevant command line 43 * options. 44 * <vuln>_update_mitigation() -- This is called after all vulnerabilities have 45 * selected a mitigation, in case the selection 46 * may want to change based on other choices 47 * made. This function is optional. 48 * <vuln>_apply_mitigation() -- Enable the selected mitigation. 49 * 50 * The compile-time mitigation in all cases should be AUTO. An explicit 51 * command-line option can override AUTO. If no such option is 52 * provided, <vuln>_select_mitigation() will override AUTO to the best 53 * mitigation option. 54 */ 55 56 static void __init spectre_v1_select_mitigation(void); 57 static void __init spectre_v1_apply_mitigation(void); 58 static void __init spectre_v2_select_mitigation(void); 59 static void __init spectre_v2_update_mitigation(void); 60 static void __init spectre_v2_apply_mitigation(void); 61 static void __init retbleed_select_mitigation(void); 62 static void __init retbleed_update_mitigation(void); 63 static void __init retbleed_apply_mitigation(void); 64 static void __init spectre_v2_user_select_mitigation(void); 65 static void __init spectre_v2_user_update_mitigation(void); 66 static void __init spectre_v2_user_apply_mitigation(void); 67 static void __init ssb_select_mitigation(void); 68 static void __init ssb_apply_mitigation(void); 69 static void __init l1tf_select_mitigation(void); 70 static void __init l1tf_apply_mitigation(void); 71 static void __init mds_select_mitigation(void); 72 static void __init mds_update_mitigation(void); 73 static void __init mds_apply_mitigation(void); 74 static void __init taa_select_mitigation(void); 75 static void __init taa_update_mitigation(void); 76 static void __init taa_apply_mitigation(void); 77 static void __init mmio_select_mitigation(void); 78 static void __init mmio_update_mitigation(void); 79 static void __init mmio_apply_mitigation(void); 80 static void __init rfds_select_mitigation(void); 81 static void __init rfds_update_mitigation(void); 82 static void __init rfds_apply_mitigation(void); 83 static void __init srbds_select_mitigation(void); 84 static void __init srbds_apply_mitigation(void); 85 static void __init l1d_flush_select_mitigation(void); 86 static void __init srso_select_mitigation(void); 87 static void __init srso_update_mitigation(void); 88 static void __init srso_apply_mitigation(void); 89 static void __init gds_select_mitigation(void); 90 static void __init gds_apply_mitigation(void); 91 static void __init bhi_select_mitigation(void); 92 static void __init bhi_update_mitigation(void); 93 static void __init bhi_apply_mitigation(void); 94 static void __init its_select_mitigation(void); 95 static void __init its_update_mitigation(void); 96 static void __init its_apply_mitigation(void); 97 static void __init tsa_select_mitigation(void); 98 static void __init tsa_apply_mitigation(void); 99 100 /* The base value of the SPEC_CTRL MSR without task-specific bits set */ 101 u64 x86_spec_ctrl_base; 102 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); 103 104 /* The current value of the SPEC_CTRL MSR with task-specific bits set */ 105 DEFINE_PER_CPU(u64, x86_spec_ctrl_current); 106 EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current); 107 108 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; 109 110 static u64 __ro_after_init x86_arch_cap_msr; 111 112 static DEFINE_MUTEX(spec_ctrl_mutex); 113 114 void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk; 115 116 static void __init set_return_thunk(void *thunk) 117 { 118 x86_return_thunk = thunk; 119 120 pr_info("active return thunk: %ps\n", thunk); 121 } 122 123 /* Update SPEC_CTRL MSR and its cached copy unconditionally */ 124 static void update_spec_ctrl(u64 val) 125 { 126 this_cpu_write(x86_spec_ctrl_current, val); 127 wrmsrq(MSR_IA32_SPEC_CTRL, val); 128 } 129 130 /* 131 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ 132 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). 133 */ 134 void update_spec_ctrl_cond(u64 val) 135 { 136 if (this_cpu_read(x86_spec_ctrl_current) == val) 137 return; 138 139 this_cpu_write(x86_spec_ctrl_current, val); 140 141 /* 142 * When KERNEL_IBRS this MSR is written on return-to-user, unless 143 * forced the update can be delayed until that time. 144 */ 145 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) 146 wrmsrq(MSR_IA32_SPEC_CTRL, val); 147 } 148 149 noinstr u64 spec_ctrl_current(void) 150 { 151 return this_cpu_read(x86_spec_ctrl_current); 152 } 153 EXPORT_SYMBOL_GPL(spec_ctrl_current); 154 155 /* 156 * AMD specific MSR info for Speculative Store Bypass control. 157 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). 158 */ 159 u64 __ro_after_init x86_amd_ls_cfg_base; 160 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; 161 162 /* Control conditional STIBP in switch_to() */ 163 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); 164 /* Control conditional IBPB in switch_mm() */ 165 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); 166 /* Control unconditional IBPB in switch_mm() */ 167 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); 168 169 /* Control IBPB on vCPU load */ 170 DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb); 171 EXPORT_SYMBOL_GPL(switch_vcpu_ibpb); 172 173 /* Control CPU buffer clear before idling (halt, mwait) */ 174 DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear); 175 EXPORT_SYMBOL_GPL(cpu_buf_idle_clear); 176 177 /* 178 * Controls whether l1d flush based mitigations are enabled, 179 * based on hw features and admin setting via boot parameter 180 * defaults to false 181 */ 182 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); 183 184 /* 185 * Controls CPU Fill buffer clear before VMenter. This is a subset of 186 * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only 187 * mitigation is required. 188 */ 189 DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear); 190 EXPORT_SYMBOL_GPL(cpu_buf_vm_clear); 191 192 #undef pr_fmt 193 #define pr_fmt(fmt) "mitigations: " fmt 194 195 static void __init cpu_print_attack_vectors(void) 196 { 197 pr_info("Enabled attack vectors: "); 198 199 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL)) 200 pr_cont("user_kernel, "); 201 202 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) 203 pr_cont("user_user, "); 204 205 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) 206 pr_cont("guest_host, "); 207 208 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) 209 pr_cont("guest_guest, "); 210 211 pr_cont("SMT mitigations: "); 212 213 switch (smt_mitigations) { 214 case SMT_MITIGATIONS_OFF: 215 pr_cont("off\n"); 216 break; 217 case SMT_MITIGATIONS_AUTO: 218 pr_cont("auto\n"); 219 break; 220 case SMT_MITIGATIONS_ON: 221 pr_cont("on\n"); 222 } 223 } 224 225 void __init cpu_select_mitigations(void) 226 { 227 /* 228 * Read the SPEC_CTRL MSR to account for reserved bits which may 229 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD 230 * init code as it is not enumerated and depends on the family. 231 */ 232 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) { 233 rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 234 235 /* 236 * Previously running kernel (kexec), may have some controls 237 * turned ON. Clear them and let the mitigations setup below 238 * rediscover them based on configuration. 239 */ 240 x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK; 241 } 242 243 x86_arch_cap_msr = x86_read_arch_cap_msr(); 244 245 cpu_print_attack_vectors(); 246 247 /* Select the proper CPU mitigations before patching alternatives: */ 248 spectre_v1_select_mitigation(); 249 spectre_v2_select_mitigation(); 250 retbleed_select_mitigation(); 251 spectre_v2_user_select_mitigation(); 252 ssb_select_mitigation(); 253 l1tf_select_mitigation(); 254 mds_select_mitigation(); 255 taa_select_mitigation(); 256 mmio_select_mitigation(); 257 rfds_select_mitigation(); 258 srbds_select_mitigation(); 259 l1d_flush_select_mitigation(); 260 srso_select_mitigation(); 261 gds_select_mitigation(); 262 its_select_mitigation(); 263 bhi_select_mitigation(); 264 tsa_select_mitigation(); 265 266 /* 267 * After mitigations are selected, some may need to update their 268 * choices. 269 */ 270 spectre_v2_update_mitigation(); 271 /* 272 * retbleed_update_mitigation() relies on the state set by 273 * spectre_v2_update_mitigation(); specifically it wants to know about 274 * spectre_v2=ibrs. 275 */ 276 retbleed_update_mitigation(); 277 /* 278 * its_update_mitigation() depends on spectre_v2_update_mitigation() 279 * and retbleed_update_mitigation(). 280 */ 281 its_update_mitigation(); 282 283 /* 284 * spectre_v2_user_update_mitigation() depends on 285 * retbleed_update_mitigation(), specifically the STIBP 286 * selection is forced for UNRET or IBPB. 287 */ 288 spectre_v2_user_update_mitigation(); 289 mds_update_mitigation(); 290 taa_update_mitigation(); 291 mmio_update_mitigation(); 292 rfds_update_mitigation(); 293 bhi_update_mitigation(); 294 /* srso_update_mitigation() depends on retbleed_update_mitigation(). */ 295 srso_update_mitigation(); 296 297 spectre_v1_apply_mitigation(); 298 spectre_v2_apply_mitigation(); 299 retbleed_apply_mitigation(); 300 spectre_v2_user_apply_mitigation(); 301 ssb_apply_mitigation(); 302 l1tf_apply_mitigation(); 303 mds_apply_mitigation(); 304 taa_apply_mitigation(); 305 mmio_apply_mitigation(); 306 rfds_apply_mitigation(); 307 srbds_apply_mitigation(); 308 srso_apply_mitigation(); 309 gds_apply_mitigation(); 310 its_apply_mitigation(); 311 bhi_apply_mitigation(); 312 tsa_apply_mitigation(); 313 } 314 315 /* 316 * NOTE: This function is *only* called for SVM, since Intel uses 317 * MSR_IA32_SPEC_CTRL for SSBD. 318 */ 319 void 320 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest) 321 { 322 u64 guestval, hostval; 323 struct thread_info *ti = current_thread_info(); 324 325 /* 326 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update 327 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. 328 */ 329 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && 330 !static_cpu_has(X86_FEATURE_VIRT_SSBD)) 331 return; 332 333 /* 334 * If the host has SSBD mitigation enabled, force it in the host's 335 * virtual MSR value. If its not permanently enabled, evaluate 336 * current's TIF_SSBD thread flag. 337 */ 338 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) 339 hostval = SPEC_CTRL_SSBD; 340 else 341 hostval = ssbd_tif_to_spec_ctrl(ti->flags); 342 343 /* Sanitize the guest value */ 344 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; 345 346 if (hostval != guestval) { 347 unsigned long tif; 348 349 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : 350 ssbd_spec_ctrl_to_tif(hostval); 351 352 speculation_ctrl_update(tif); 353 } 354 } 355 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); 356 357 static void x86_amd_ssb_disable(void) 358 { 359 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; 360 361 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) 362 wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); 363 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) 364 wrmsrq(MSR_AMD64_LS_CFG, msrval); 365 } 366 367 #undef pr_fmt 368 #define pr_fmt(fmt) "MDS: " fmt 369 370 /* 371 * Returns true if vulnerability should be mitigated based on the 372 * selected attack vector controls. 373 * 374 * See Documentation/admin-guide/hw-vuln/attack_vector_controls.rst 375 */ 376 static bool __init should_mitigate_vuln(unsigned int bug) 377 { 378 switch (bug) { 379 /* 380 * The only runtime-selected spectre_v1 mitigations in the kernel are 381 * related to SWAPGS protection on kernel entry. Therefore, protection 382 * is only required for the user->kernel attack vector. 383 */ 384 case X86_BUG_SPECTRE_V1: 385 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL); 386 387 case X86_BUG_SPECTRE_V2: 388 case X86_BUG_RETBLEED: 389 case X86_BUG_L1TF: 390 case X86_BUG_ITS: 391 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || 392 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST); 393 394 case X86_BUG_SPECTRE_V2_USER: 395 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || 396 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST); 397 398 /* 399 * All the vulnerabilities below allow potentially leaking data 400 * across address spaces. Therefore, mitigation is required for 401 * any of these 4 attack vectors. 402 */ 403 case X86_BUG_MDS: 404 case X86_BUG_TAA: 405 case X86_BUG_MMIO_STALE_DATA: 406 case X86_BUG_RFDS: 407 case X86_BUG_SRBDS: 408 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || 409 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || 410 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || 411 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST); 412 413 case X86_BUG_GDS: 414 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || 415 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || 416 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || 417 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST) || 418 (smt_mitigations != SMT_MITIGATIONS_OFF); 419 default: 420 WARN(1, "Unknown bug %x\n", bug); 421 return false; 422 } 423 } 424 425 /* Default mitigation for MDS-affected CPUs */ 426 static enum mds_mitigations mds_mitigation __ro_after_init = 427 IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_AUTO : MDS_MITIGATION_OFF; 428 static bool mds_nosmt __ro_after_init = false; 429 430 static const char * const mds_strings[] = { 431 [MDS_MITIGATION_OFF] = "Vulnerable", 432 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", 433 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", 434 }; 435 436 enum taa_mitigations { 437 TAA_MITIGATION_OFF, 438 TAA_MITIGATION_AUTO, 439 TAA_MITIGATION_UCODE_NEEDED, 440 TAA_MITIGATION_VERW, 441 TAA_MITIGATION_TSX_DISABLED, 442 }; 443 444 /* Default mitigation for TAA-affected CPUs */ 445 static enum taa_mitigations taa_mitigation __ro_after_init = 446 IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_AUTO : TAA_MITIGATION_OFF; 447 448 enum mmio_mitigations { 449 MMIO_MITIGATION_OFF, 450 MMIO_MITIGATION_AUTO, 451 MMIO_MITIGATION_UCODE_NEEDED, 452 MMIO_MITIGATION_VERW, 453 }; 454 455 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */ 456 static enum mmio_mitigations mmio_mitigation __ro_after_init = 457 IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_AUTO : MMIO_MITIGATION_OFF; 458 459 enum rfds_mitigations { 460 RFDS_MITIGATION_OFF, 461 RFDS_MITIGATION_AUTO, 462 RFDS_MITIGATION_VERW, 463 RFDS_MITIGATION_UCODE_NEEDED, 464 }; 465 466 /* Default mitigation for Register File Data Sampling */ 467 static enum rfds_mitigations rfds_mitigation __ro_after_init = 468 IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF; 469 470 /* 471 * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing 472 * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry. 473 */ 474 static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init; 475 476 static void __init mds_select_mitigation(void) 477 { 478 if (!boot_cpu_has_bug(X86_BUG_MDS)) { 479 mds_mitigation = MDS_MITIGATION_OFF; 480 return; 481 } 482 483 if (mds_mitigation == MDS_MITIGATION_AUTO) { 484 if (should_mitigate_vuln(X86_BUG_MDS)) 485 mds_mitigation = MDS_MITIGATION_FULL; 486 else 487 mds_mitigation = MDS_MITIGATION_OFF; 488 } 489 490 if (mds_mitigation == MDS_MITIGATION_OFF) 491 return; 492 493 verw_clear_cpu_buf_mitigation_selected = true; 494 } 495 496 static void __init mds_update_mitigation(void) 497 { 498 if (!boot_cpu_has_bug(X86_BUG_MDS)) 499 return; 500 501 /* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */ 502 if (verw_clear_cpu_buf_mitigation_selected) 503 mds_mitigation = MDS_MITIGATION_FULL; 504 505 if (mds_mitigation == MDS_MITIGATION_FULL) { 506 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) 507 mds_mitigation = MDS_MITIGATION_VMWERV; 508 } 509 510 pr_info("%s\n", mds_strings[mds_mitigation]); 511 } 512 513 static void __init mds_apply_mitigation(void) 514 { 515 if (mds_mitigation == MDS_MITIGATION_FULL || 516 mds_mitigation == MDS_MITIGATION_VMWERV) { 517 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 518 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && 519 (mds_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)) 520 cpu_smt_disable(false); 521 } 522 } 523 524 static int __init mds_cmdline(char *str) 525 { 526 if (!boot_cpu_has_bug(X86_BUG_MDS)) 527 return 0; 528 529 if (!str) 530 return -EINVAL; 531 532 if (!strcmp(str, "off")) 533 mds_mitigation = MDS_MITIGATION_OFF; 534 else if (!strcmp(str, "full")) 535 mds_mitigation = MDS_MITIGATION_FULL; 536 else if (!strcmp(str, "full,nosmt")) { 537 mds_mitigation = MDS_MITIGATION_FULL; 538 mds_nosmt = true; 539 } 540 541 return 0; 542 } 543 early_param("mds", mds_cmdline); 544 545 #undef pr_fmt 546 #define pr_fmt(fmt) "TAA: " fmt 547 548 static bool taa_nosmt __ro_after_init; 549 550 static const char * const taa_strings[] = { 551 [TAA_MITIGATION_OFF] = "Vulnerable", 552 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", 553 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", 554 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled", 555 }; 556 557 static bool __init taa_vulnerable(void) 558 { 559 return boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM); 560 } 561 562 static void __init taa_select_mitigation(void) 563 { 564 if (!boot_cpu_has_bug(X86_BUG_TAA)) { 565 taa_mitigation = TAA_MITIGATION_OFF; 566 return; 567 } 568 569 /* TSX previously disabled by tsx=off */ 570 if (!boot_cpu_has(X86_FEATURE_RTM)) { 571 taa_mitigation = TAA_MITIGATION_TSX_DISABLED; 572 return; 573 } 574 575 /* Microcode will be checked in taa_update_mitigation(). */ 576 if (taa_mitigation == TAA_MITIGATION_AUTO) { 577 if (should_mitigate_vuln(X86_BUG_TAA)) 578 taa_mitigation = TAA_MITIGATION_VERW; 579 else 580 taa_mitigation = TAA_MITIGATION_OFF; 581 } 582 583 if (taa_mitigation != TAA_MITIGATION_OFF) 584 verw_clear_cpu_buf_mitigation_selected = true; 585 } 586 587 static void __init taa_update_mitigation(void) 588 { 589 if (!taa_vulnerable()) 590 return; 591 592 if (verw_clear_cpu_buf_mitigation_selected) 593 taa_mitigation = TAA_MITIGATION_VERW; 594 595 if (taa_mitigation == TAA_MITIGATION_VERW) { 596 /* Check if the requisite ucode is available. */ 597 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) 598 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; 599 600 /* 601 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. 602 * A microcode update fixes this behavior to clear CPU buffers. It also 603 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the 604 * ARCH_CAP_TSX_CTRL_MSR bit. 605 * 606 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode 607 * update is required. 608 */ 609 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && 610 !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)) 611 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; 612 } 613 614 pr_info("%s\n", taa_strings[taa_mitigation]); 615 } 616 617 static void __init taa_apply_mitigation(void) 618 { 619 if (taa_mitigation == TAA_MITIGATION_VERW || 620 taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) { 621 /* 622 * TSX is enabled, select alternate mitigation for TAA which is 623 * the same as MDS. Enable MDS static branch to clear CPU buffers. 624 * 625 * For guests that can't determine whether the correct microcode is 626 * present on host, enable the mitigation for UCODE_NEEDED as well. 627 */ 628 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 629 630 if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON) 631 cpu_smt_disable(false); 632 } 633 } 634 635 static int __init tsx_async_abort_parse_cmdline(char *str) 636 { 637 if (!boot_cpu_has_bug(X86_BUG_TAA)) 638 return 0; 639 640 if (!str) 641 return -EINVAL; 642 643 if (!strcmp(str, "off")) { 644 taa_mitigation = TAA_MITIGATION_OFF; 645 } else if (!strcmp(str, "full")) { 646 taa_mitigation = TAA_MITIGATION_VERW; 647 } else if (!strcmp(str, "full,nosmt")) { 648 taa_mitigation = TAA_MITIGATION_VERW; 649 taa_nosmt = true; 650 } 651 652 return 0; 653 } 654 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); 655 656 #undef pr_fmt 657 #define pr_fmt(fmt) "MMIO Stale Data: " fmt 658 659 static bool mmio_nosmt __ro_after_init = false; 660 661 static const char * const mmio_strings[] = { 662 [MMIO_MITIGATION_OFF] = "Vulnerable", 663 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", 664 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", 665 }; 666 667 static void __init mmio_select_mitigation(void) 668 { 669 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || 670 cpu_mitigations_off()) { 671 mmio_mitigation = MMIO_MITIGATION_OFF; 672 return; 673 } 674 675 /* Microcode will be checked in mmio_update_mitigation(). */ 676 if (mmio_mitigation == MMIO_MITIGATION_AUTO) { 677 if (should_mitigate_vuln(X86_BUG_MMIO_STALE_DATA)) 678 mmio_mitigation = MMIO_MITIGATION_VERW; 679 else 680 mmio_mitigation = MMIO_MITIGATION_OFF; 681 } 682 683 if (mmio_mitigation == MMIO_MITIGATION_OFF) 684 return; 685 686 /* 687 * Enable CPU buffer clear mitigation for host and VMM, if also affected 688 * by MDS or TAA. 689 */ 690 if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable()) 691 verw_clear_cpu_buf_mitigation_selected = true; 692 } 693 694 static void __init mmio_update_mitigation(void) 695 { 696 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 697 return; 698 699 if (verw_clear_cpu_buf_mitigation_selected) 700 mmio_mitigation = MMIO_MITIGATION_VERW; 701 702 if (mmio_mitigation == MMIO_MITIGATION_VERW) { 703 /* 704 * Check if the system has the right microcode. 705 * 706 * CPU Fill buffer clear mitigation is enumerated by either an explicit 707 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS 708 * affected systems. 709 */ 710 if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) || 711 (boot_cpu_has(X86_FEATURE_MD_CLEAR) && 712 boot_cpu_has(X86_FEATURE_FLUSH_L1D) && 713 !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))) 714 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; 715 } 716 717 pr_info("%s\n", mmio_strings[mmio_mitigation]); 718 } 719 720 static void __init mmio_apply_mitigation(void) 721 { 722 if (mmio_mitigation == MMIO_MITIGATION_OFF) 723 return; 724 725 /* 726 * Only enable the VMM mitigation if the CPU buffer clear mitigation is 727 * not being used. 728 */ 729 if (verw_clear_cpu_buf_mitigation_selected) { 730 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 731 static_branch_disable(&cpu_buf_vm_clear); 732 } else { 733 static_branch_enable(&cpu_buf_vm_clear); 734 } 735 736 /* 737 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can 738 * be propagated to uncore buffers, clearing the Fill buffers on idle 739 * is required irrespective of SMT state. 740 */ 741 if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) 742 static_branch_enable(&cpu_buf_idle_clear); 743 744 if (mmio_nosmt || smt_mitigations == SMT_MITIGATIONS_ON) 745 cpu_smt_disable(false); 746 } 747 748 static int __init mmio_stale_data_parse_cmdline(char *str) 749 { 750 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 751 return 0; 752 753 if (!str) 754 return -EINVAL; 755 756 if (!strcmp(str, "off")) { 757 mmio_mitigation = MMIO_MITIGATION_OFF; 758 } else if (!strcmp(str, "full")) { 759 mmio_mitigation = MMIO_MITIGATION_VERW; 760 } else if (!strcmp(str, "full,nosmt")) { 761 mmio_mitigation = MMIO_MITIGATION_VERW; 762 mmio_nosmt = true; 763 } 764 765 return 0; 766 } 767 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); 768 769 #undef pr_fmt 770 #define pr_fmt(fmt) "Register File Data Sampling: " fmt 771 772 static const char * const rfds_strings[] = { 773 [RFDS_MITIGATION_OFF] = "Vulnerable", 774 [RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File", 775 [RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 776 }; 777 778 static inline bool __init verw_clears_cpu_reg_file(void) 779 { 780 return (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR); 781 } 782 783 static void __init rfds_select_mitigation(void) 784 { 785 if (!boot_cpu_has_bug(X86_BUG_RFDS)) { 786 rfds_mitigation = RFDS_MITIGATION_OFF; 787 return; 788 } 789 790 if (rfds_mitigation == RFDS_MITIGATION_AUTO) { 791 if (should_mitigate_vuln(X86_BUG_RFDS)) 792 rfds_mitigation = RFDS_MITIGATION_VERW; 793 else 794 rfds_mitigation = RFDS_MITIGATION_OFF; 795 } 796 797 if (rfds_mitigation == RFDS_MITIGATION_OFF) 798 return; 799 800 if (verw_clears_cpu_reg_file()) 801 verw_clear_cpu_buf_mitigation_selected = true; 802 } 803 804 static void __init rfds_update_mitigation(void) 805 { 806 if (!boot_cpu_has_bug(X86_BUG_RFDS)) 807 return; 808 809 if (verw_clear_cpu_buf_mitigation_selected) 810 rfds_mitigation = RFDS_MITIGATION_VERW; 811 812 if (rfds_mitigation == RFDS_MITIGATION_VERW) { 813 if (!verw_clears_cpu_reg_file()) 814 rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED; 815 } 816 817 pr_info("%s\n", rfds_strings[rfds_mitigation]); 818 } 819 820 static void __init rfds_apply_mitigation(void) 821 { 822 if (rfds_mitigation == RFDS_MITIGATION_VERW) 823 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 824 } 825 826 static __init int rfds_parse_cmdline(char *str) 827 { 828 if (!str) 829 return -EINVAL; 830 831 if (!boot_cpu_has_bug(X86_BUG_RFDS)) 832 return 0; 833 834 if (!strcmp(str, "off")) 835 rfds_mitigation = RFDS_MITIGATION_OFF; 836 else if (!strcmp(str, "on")) 837 rfds_mitigation = RFDS_MITIGATION_VERW; 838 839 return 0; 840 } 841 early_param("reg_file_data_sampling", rfds_parse_cmdline); 842 843 #undef pr_fmt 844 #define pr_fmt(fmt) "SRBDS: " fmt 845 846 enum srbds_mitigations { 847 SRBDS_MITIGATION_OFF, 848 SRBDS_MITIGATION_AUTO, 849 SRBDS_MITIGATION_UCODE_NEEDED, 850 SRBDS_MITIGATION_FULL, 851 SRBDS_MITIGATION_TSX_OFF, 852 SRBDS_MITIGATION_HYPERVISOR, 853 }; 854 855 static enum srbds_mitigations srbds_mitigation __ro_after_init = 856 IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_AUTO : SRBDS_MITIGATION_OFF; 857 858 static const char * const srbds_strings[] = { 859 [SRBDS_MITIGATION_OFF] = "Vulnerable", 860 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 861 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", 862 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", 863 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", 864 }; 865 866 static bool srbds_off; 867 868 void update_srbds_msr(void) 869 { 870 u64 mcu_ctrl; 871 872 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 873 return; 874 875 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 876 return; 877 878 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) 879 return; 880 881 /* 882 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX 883 * being disabled and it hasn't received the SRBDS MSR microcode. 884 */ 885 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) 886 return; 887 888 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 889 890 switch (srbds_mitigation) { 891 case SRBDS_MITIGATION_OFF: 892 case SRBDS_MITIGATION_TSX_OFF: 893 mcu_ctrl |= RNGDS_MITG_DIS; 894 break; 895 case SRBDS_MITIGATION_FULL: 896 mcu_ctrl &= ~RNGDS_MITG_DIS; 897 break; 898 default: 899 break; 900 } 901 902 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 903 } 904 905 static void __init srbds_select_mitigation(void) 906 { 907 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) { 908 srbds_mitigation = SRBDS_MITIGATION_OFF; 909 return; 910 } 911 912 if (srbds_mitigation == SRBDS_MITIGATION_AUTO) { 913 if (should_mitigate_vuln(X86_BUG_SRBDS)) 914 srbds_mitigation = SRBDS_MITIGATION_FULL; 915 else { 916 srbds_mitigation = SRBDS_MITIGATION_OFF; 917 return; 918 } 919 } 920 921 /* 922 * Check to see if this is one of the MDS_NO systems supporting TSX that 923 * are only exposed to SRBDS when TSX is enabled or when CPU is affected 924 * by Processor MMIO Stale Data vulnerability. 925 */ 926 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && 927 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 928 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; 929 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 930 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; 931 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) 932 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; 933 else if (srbds_off) 934 srbds_mitigation = SRBDS_MITIGATION_OFF; 935 936 pr_info("%s\n", srbds_strings[srbds_mitigation]); 937 } 938 939 static void __init srbds_apply_mitigation(void) 940 { 941 update_srbds_msr(); 942 } 943 944 static int __init srbds_parse_cmdline(char *str) 945 { 946 if (!str) 947 return -EINVAL; 948 949 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 950 return 0; 951 952 srbds_off = !strcmp(str, "off"); 953 return 0; 954 } 955 early_param("srbds", srbds_parse_cmdline); 956 957 #undef pr_fmt 958 #define pr_fmt(fmt) "L1D Flush : " fmt 959 960 enum l1d_flush_mitigations { 961 L1D_FLUSH_OFF = 0, 962 L1D_FLUSH_ON, 963 }; 964 965 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF; 966 967 static void __init l1d_flush_select_mitigation(void) 968 { 969 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) 970 return; 971 972 static_branch_enable(&switch_mm_cond_l1d_flush); 973 pr_info("Conditional flush on switch_mm() enabled\n"); 974 } 975 976 static int __init l1d_flush_parse_cmdline(char *str) 977 { 978 if (!strcmp(str, "on")) 979 l1d_flush_mitigation = L1D_FLUSH_ON; 980 981 return 0; 982 } 983 early_param("l1d_flush", l1d_flush_parse_cmdline); 984 985 #undef pr_fmt 986 #define pr_fmt(fmt) "GDS: " fmt 987 988 enum gds_mitigations { 989 GDS_MITIGATION_OFF, 990 GDS_MITIGATION_AUTO, 991 GDS_MITIGATION_UCODE_NEEDED, 992 GDS_MITIGATION_FORCE, 993 GDS_MITIGATION_FULL, 994 GDS_MITIGATION_FULL_LOCKED, 995 GDS_MITIGATION_HYPERVISOR, 996 }; 997 998 static enum gds_mitigations gds_mitigation __ro_after_init = 999 IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_AUTO : GDS_MITIGATION_OFF; 1000 1001 static const char * const gds_strings[] = { 1002 [GDS_MITIGATION_OFF] = "Vulnerable", 1003 [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 1004 [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode", 1005 [GDS_MITIGATION_FULL] = "Mitigation: Microcode", 1006 [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)", 1007 [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", 1008 }; 1009 1010 bool gds_ucode_mitigated(void) 1011 { 1012 return (gds_mitigation == GDS_MITIGATION_FULL || 1013 gds_mitigation == GDS_MITIGATION_FULL_LOCKED); 1014 } 1015 EXPORT_SYMBOL_GPL(gds_ucode_mitigated); 1016 1017 void update_gds_msr(void) 1018 { 1019 u64 mcu_ctrl_after; 1020 u64 mcu_ctrl; 1021 1022 switch (gds_mitigation) { 1023 case GDS_MITIGATION_OFF: 1024 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 1025 mcu_ctrl |= GDS_MITG_DIS; 1026 break; 1027 case GDS_MITIGATION_FULL_LOCKED: 1028 /* 1029 * The LOCKED state comes from the boot CPU. APs might not have 1030 * the same state. Make sure the mitigation is enabled on all 1031 * CPUs. 1032 */ 1033 case GDS_MITIGATION_FULL: 1034 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 1035 mcu_ctrl &= ~GDS_MITG_DIS; 1036 break; 1037 case GDS_MITIGATION_FORCE: 1038 case GDS_MITIGATION_UCODE_NEEDED: 1039 case GDS_MITIGATION_HYPERVISOR: 1040 case GDS_MITIGATION_AUTO: 1041 return; 1042 } 1043 1044 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 1045 1046 /* 1047 * Check to make sure that the WRMSR value was not ignored. Writes to 1048 * GDS_MITG_DIS will be ignored if this processor is locked but the boot 1049 * processor was not. 1050 */ 1051 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); 1052 WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after); 1053 } 1054 1055 static void __init gds_select_mitigation(void) 1056 { 1057 u64 mcu_ctrl; 1058 1059 if (!boot_cpu_has_bug(X86_BUG_GDS)) 1060 return; 1061 1062 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 1063 gds_mitigation = GDS_MITIGATION_HYPERVISOR; 1064 return; 1065 } 1066 1067 /* Will verify below that mitigation _can_ be disabled */ 1068 if (gds_mitigation == GDS_MITIGATION_AUTO) { 1069 if (should_mitigate_vuln(X86_BUG_GDS)) 1070 gds_mitigation = GDS_MITIGATION_FULL; 1071 else { 1072 gds_mitigation = GDS_MITIGATION_OFF; 1073 return; 1074 } 1075 } 1076 1077 /* No microcode */ 1078 if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) { 1079 if (gds_mitigation != GDS_MITIGATION_FORCE) 1080 gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; 1081 return; 1082 } 1083 1084 /* Microcode has mitigation, use it */ 1085 if (gds_mitigation == GDS_MITIGATION_FORCE) 1086 gds_mitigation = GDS_MITIGATION_FULL; 1087 1088 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 1089 if (mcu_ctrl & GDS_MITG_LOCKED) { 1090 if (gds_mitigation == GDS_MITIGATION_OFF) 1091 pr_warn("Mitigation locked. Disable failed.\n"); 1092 1093 /* 1094 * The mitigation is selected from the boot CPU. All other CPUs 1095 * _should_ have the same state. If the boot CPU isn't locked 1096 * but others are then update_gds_msr() will WARN() of the state 1097 * mismatch. If the boot CPU is locked update_gds_msr() will 1098 * ensure the other CPUs have the mitigation enabled. 1099 */ 1100 gds_mitigation = GDS_MITIGATION_FULL_LOCKED; 1101 } 1102 } 1103 1104 static void __init gds_apply_mitigation(void) 1105 { 1106 if (!boot_cpu_has_bug(X86_BUG_GDS)) 1107 return; 1108 1109 /* Microcode is present */ 1110 if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL) 1111 update_gds_msr(); 1112 else if (gds_mitigation == GDS_MITIGATION_FORCE) { 1113 /* 1114 * This only needs to be done on the boot CPU so do it 1115 * here rather than in update_gds_msr() 1116 */ 1117 setup_clear_cpu_cap(X86_FEATURE_AVX); 1118 pr_warn("Microcode update needed! Disabling AVX as mitigation.\n"); 1119 } 1120 1121 pr_info("%s\n", gds_strings[gds_mitigation]); 1122 } 1123 1124 static int __init gds_parse_cmdline(char *str) 1125 { 1126 if (!str) 1127 return -EINVAL; 1128 1129 if (!boot_cpu_has_bug(X86_BUG_GDS)) 1130 return 0; 1131 1132 if (!strcmp(str, "off")) 1133 gds_mitigation = GDS_MITIGATION_OFF; 1134 else if (!strcmp(str, "force")) 1135 gds_mitigation = GDS_MITIGATION_FORCE; 1136 1137 return 0; 1138 } 1139 early_param("gather_data_sampling", gds_parse_cmdline); 1140 1141 #undef pr_fmt 1142 #define pr_fmt(fmt) "Spectre V1 : " fmt 1143 1144 enum spectre_v1_mitigation { 1145 SPECTRE_V1_MITIGATION_NONE, 1146 SPECTRE_V1_MITIGATION_AUTO, 1147 }; 1148 1149 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init = 1150 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V1) ? 1151 SPECTRE_V1_MITIGATION_AUTO : SPECTRE_V1_MITIGATION_NONE; 1152 1153 static const char * const spectre_v1_strings[] = { 1154 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers", 1155 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization", 1156 }; 1157 1158 /* 1159 * Does SMAP provide full mitigation against speculative kernel access to 1160 * userspace? 1161 */ 1162 static bool smap_works_speculatively(void) 1163 { 1164 if (!boot_cpu_has(X86_FEATURE_SMAP)) 1165 return false; 1166 1167 /* 1168 * On CPUs which are vulnerable to Meltdown, SMAP does not 1169 * prevent speculative access to user data in the L1 cache. 1170 * Consider SMAP to be non-functional as a mitigation on these 1171 * CPUs. 1172 */ 1173 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) 1174 return false; 1175 1176 return true; 1177 } 1178 1179 static void __init spectre_v1_select_mitigation(void) 1180 { 1181 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) 1182 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 1183 1184 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V1)) 1185 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 1186 } 1187 1188 static void __init spectre_v1_apply_mitigation(void) 1189 { 1190 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) 1191 return; 1192 1193 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { 1194 /* 1195 * With Spectre v1, a user can speculatively control either 1196 * path of a conditional swapgs with a user-controlled GS 1197 * value. The mitigation is to add lfences to both code paths. 1198 * 1199 * If FSGSBASE is enabled, the user can put a kernel address in 1200 * GS, in which case SMAP provides no protection. 1201 * 1202 * If FSGSBASE is disabled, the user can only put a user space 1203 * address in GS. That makes an attack harder, but still 1204 * possible if there's no SMAP protection. 1205 */ 1206 if (boot_cpu_has(X86_FEATURE_FSGSBASE) || 1207 !smap_works_speculatively()) { 1208 /* 1209 * Mitigation can be provided from SWAPGS itself or 1210 * PTI as the CR3 write in the Meltdown mitigation 1211 * is serializing. 1212 * 1213 * If neither is there, mitigate with an LFENCE to 1214 * stop speculation through swapgs. 1215 */ 1216 if (boot_cpu_has_bug(X86_BUG_SWAPGS) && 1217 !boot_cpu_has(X86_FEATURE_PTI)) 1218 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER); 1219 1220 /* 1221 * Enable lfences in the kernel entry (non-swapgs) 1222 * paths, to prevent user entry from speculatively 1223 * skipping swapgs. 1224 */ 1225 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL); 1226 } 1227 } 1228 1229 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]); 1230 } 1231 1232 static int __init nospectre_v1_cmdline(char *str) 1233 { 1234 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 1235 return 0; 1236 } 1237 early_param("nospectre_v1", nospectre_v1_cmdline); 1238 1239 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE; 1240 1241 /* Depends on spectre_v2 mitigation selected already */ 1242 static inline bool cdt_possible(enum spectre_v2_mitigation mode) 1243 { 1244 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) || 1245 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) 1246 return false; 1247 1248 if (mode == SPECTRE_V2_RETPOLINE || 1249 mode == SPECTRE_V2_EIBRS_RETPOLINE) 1250 return true; 1251 1252 return false; 1253 } 1254 1255 #undef pr_fmt 1256 #define pr_fmt(fmt) "RETBleed: " fmt 1257 1258 enum its_mitigation { 1259 ITS_MITIGATION_OFF, 1260 ITS_MITIGATION_AUTO, 1261 ITS_MITIGATION_VMEXIT_ONLY, 1262 ITS_MITIGATION_ALIGNED_THUNKS, 1263 ITS_MITIGATION_RETPOLINE_STUFF, 1264 }; 1265 1266 static enum its_mitigation its_mitigation __ro_after_init = 1267 IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_MITIGATION_AUTO : ITS_MITIGATION_OFF; 1268 1269 enum retbleed_mitigation { 1270 RETBLEED_MITIGATION_NONE, 1271 RETBLEED_MITIGATION_AUTO, 1272 RETBLEED_MITIGATION_UNRET, 1273 RETBLEED_MITIGATION_IBPB, 1274 RETBLEED_MITIGATION_IBRS, 1275 RETBLEED_MITIGATION_EIBRS, 1276 RETBLEED_MITIGATION_STUFF, 1277 }; 1278 1279 static const char * const retbleed_strings[] = { 1280 [RETBLEED_MITIGATION_NONE] = "Vulnerable", 1281 [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", 1282 [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB", 1283 [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", 1284 [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", 1285 [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing", 1286 }; 1287 1288 static enum retbleed_mitigation retbleed_mitigation __ro_after_init = 1289 IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_MITIGATION_AUTO : RETBLEED_MITIGATION_NONE; 1290 1291 static int __ro_after_init retbleed_nosmt = false; 1292 1293 enum srso_mitigation { 1294 SRSO_MITIGATION_NONE, 1295 SRSO_MITIGATION_AUTO, 1296 SRSO_MITIGATION_UCODE_NEEDED, 1297 SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED, 1298 SRSO_MITIGATION_MICROCODE, 1299 SRSO_MITIGATION_NOSMT, 1300 SRSO_MITIGATION_SAFE_RET, 1301 SRSO_MITIGATION_IBPB, 1302 SRSO_MITIGATION_IBPB_ON_VMEXIT, 1303 SRSO_MITIGATION_BP_SPEC_REDUCE, 1304 }; 1305 1306 static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO; 1307 1308 static int __init retbleed_parse_cmdline(char *str) 1309 { 1310 if (!str) 1311 return -EINVAL; 1312 1313 while (str) { 1314 char *next = strchr(str, ','); 1315 if (next) { 1316 *next = 0; 1317 next++; 1318 } 1319 1320 if (!strcmp(str, "off")) { 1321 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1322 } else if (!strcmp(str, "auto")) { 1323 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1324 } else if (!strcmp(str, "unret")) { 1325 retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 1326 } else if (!strcmp(str, "ibpb")) { 1327 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 1328 } else if (!strcmp(str, "stuff")) { 1329 retbleed_mitigation = RETBLEED_MITIGATION_STUFF; 1330 } else if (!strcmp(str, "nosmt")) { 1331 retbleed_nosmt = true; 1332 } else if (!strcmp(str, "force")) { 1333 setup_force_cpu_bug(X86_BUG_RETBLEED); 1334 } else { 1335 pr_err("Ignoring unknown retbleed option (%s).", str); 1336 } 1337 1338 str = next; 1339 } 1340 1341 return 0; 1342 } 1343 early_param("retbleed", retbleed_parse_cmdline); 1344 1345 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" 1346 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" 1347 1348 static void __init retbleed_select_mitigation(void) 1349 { 1350 if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) { 1351 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1352 return; 1353 } 1354 1355 switch (retbleed_mitigation) { 1356 case RETBLEED_MITIGATION_UNRET: 1357 if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) { 1358 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1359 pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n"); 1360 } 1361 break; 1362 case RETBLEED_MITIGATION_IBPB: 1363 if (!boot_cpu_has(X86_FEATURE_IBPB)) { 1364 pr_err("WARNING: CPU does not support IBPB.\n"); 1365 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1366 } else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { 1367 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); 1368 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1369 } 1370 break; 1371 case RETBLEED_MITIGATION_STUFF: 1372 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) { 1373 pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n"); 1374 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1375 } else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { 1376 pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n"); 1377 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1378 } 1379 break; 1380 default: 1381 break; 1382 } 1383 1384 if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO) 1385 return; 1386 1387 if (!should_mitigate_vuln(X86_BUG_RETBLEED)) { 1388 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1389 return; 1390 } 1391 1392 /* Intel mitigation selected in retbleed_update_mitigation() */ 1393 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 1394 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { 1395 if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) 1396 retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 1397 else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) && 1398 boot_cpu_has(X86_FEATURE_IBPB)) 1399 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 1400 else 1401 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1402 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 1403 /* Final mitigation depends on spectre-v2 selection */ 1404 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) 1405 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; 1406 else if (boot_cpu_has(X86_FEATURE_IBRS)) 1407 retbleed_mitigation = RETBLEED_MITIGATION_IBRS; 1408 else 1409 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1410 } 1411 } 1412 1413 static void __init retbleed_update_mitigation(void) 1414 { 1415 if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) 1416 return; 1417 1418 /* ITS can also enable stuffing */ 1419 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF) 1420 retbleed_mitigation = RETBLEED_MITIGATION_STUFF; 1421 1422 /* If SRSO is using IBPB, that works for retbleed too */ 1423 if (srso_mitigation == SRSO_MITIGATION_IBPB) 1424 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 1425 1426 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF && 1427 !cdt_possible(spectre_v2_enabled)) { 1428 pr_err("WARNING: retbleed=stuff depends on retpoline\n"); 1429 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1430 } 1431 1432 /* 1433 * Let IBRS trump all on Intel without affecting the effects of the 1434 * retbleed= cmdline option except for call depth based stuffing 1435 */ 1436 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 1437 switch (spectre_v2_enabled) { 1438 case SPECTRE_V2_IBRS: 1439 retbleed_mitigation = RETBLEED_MITIGATION_IBRS; 1440 break; 1441 case SPECTRE_V2_EIBRS: 1442 case SPECTRE_V2_EIBRS_RETPOLINE: 1443 case SPECTRE_V2_EIBRS_LFENCE: 1444 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; 1445 break; 1446 default: 1447 if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) 1448 pr_err(RETBLEED_INTEL_MSG); 1449 } 1450 } 1451 1452 pr_info("%s\n", retbleed_strings[retbleed_mitigation]); 1453 } 1454 1455 static void __init retbleed_apply_mitigation(void) 1456 { 1457 bool mitigate_smt = false; 1458 1459 switch (retbleed_mitigation) { 1460 case RETBLEED_MITIGATION_NONE: 1461 return; 1462 1463 case RETBLEED_MITIGATION_UNRET: 1464 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1465 setup_force_cpu_cap(X86_FEATURE_UNRET); 1466 1467 set_return_thunk(retbleed_return_thunk); 1468 1469 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 1470 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 1471 pr_err(RETBLEED_UNTRAIN_MSG); 1472 1473 mitigate_smt = true; 1474 break; 1475 1476 case RETBLEED_MITIGATION_IBPB: 1477 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); 1478 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); 1479 mitigate_smt = true; 1480 1481 /* 1482 * IBPB on entry already obviates the need for 1483 * software-based untraining so clear those in case some 1484 * other mitigation like SRSO has selected them. 1485 */ 1486 setup_clear_cpu_cap(X86_FEATURE_UNRET); 1487 setup_clear_cpu_cap(X86_FEATURE_RETHUNK); 1488 1489 /* 1490 * There is no need for RSB filling: write_ibpb() ensures 1491 * all predictions, including the RSB, are invalidated, 1492 * regardless of IBPB implementation. 1493 */ 1494 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); 1495 1496 break; 1497 1498 case RETBLEED_MITIGATION_STUFF: 1499 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1500 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); 1501 1502 set_return_thunk(call_depth_return_thunk); 1503 break; 1504 1505 default: 1506 break; 1507 } 1508 1509 if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && 1510 (retbleed_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)) 1511 cpu_smt_disable(false); 1512 } 1513 1514 #undef pr_fmt 1515 #define pr_fmt(fmt) "ITS: " fmt 1516 1517 static const char * const its_strings[] = { 1518 [ITS_MITIGATION_OFF] = "Vulnerable", 1519 [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected", 1520 [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks", 1521 [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB", 1522 }; 1523 1524 static int __init its_parse_cmdline(char *str) 1525 { 1526 if (!str) 1527 return -EINVAL; 1528 1529 if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) { 1530 pr_err("Mitigation disabled at compile time, ignoring option (%s)", str); 1531 return 0; 1532 } 1533 1534 if (!strcmp(str, "off")) { 1535 its_mitigation = ITS_MITIGATION_OFF; 1536 } else if (!strcmp(str, "on")) { 1537 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1538 } else if (!strcmp(str, "force")) { 1539 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1540 setup_force_cpu_bug(X86_BUG_ITS); 1541 } else if (!strcmp(str, "vmexit")) { 1542 its_mitigation = ITS_MITIGATION_VMEXIT_ONLY; 1543 } else if (!strcmp(str, "stuff")) { 1544 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; 1545 } else { 1546 pr_err("Ignoring unknown indirect_target_selection option (%s).", str); 1547 } 1548 1549 return 0; 1550 } 1551 early_param("indirect_target_selection", its_parse_cmdline); 1552 1553 static void __init its_select_mitigation(void) 1554 { 1555 if (!boot_cpu_has_bug(X86_BUG_ITS)) { 1556 its_mitigation = ITS_MITIGATION_OFF; 1557 return; 1558 } 1559 1560 if (its_mitigation == ITS_MITIGATION_AUTO) { 1561 if (should_mitigate_vuln(X86_BUG_ITS)) 1562 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1563 else 1564 its_mitigation = ITS_MITIGATION_OFF; 1565 } 1566 1567 if (its_mitigation == ITS_MITIGATION_OFF) 1568 return; 1569 1570 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || 1571 !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) { 1572 pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n"); 1573 its_mitigation = ITS_MITIGATION_OFF; 1574 return; 1575 } 1576 1577 if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) { 1578 pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n"); 1579 its_mitigation = ITS_MITIGATION_OFF; 1580 return; 1581 } 1582 1583 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF && 1584 !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) { 1585 pr_err("RSB stuff mitigation not supported, using default\n"); 1586 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1587 } 1588 1589 if (its_mitigation == ITS_MITIGATION_VMEXIT_ONLY && 1590 !boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) 1591 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1592 } 1593 1594 static void __init its_update_mitigation(void) 1595 { 1596 if (!boot_cpu_has_bug(X86_BUG_ITS)) 1597 return; 1598 1599 switch (spectre_v2_enabled) { 1600 case SPECTRE_V2_NONE: 1601 if (its_mitigation != ITS_MITIGATION_OFF) 1602 pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n"); 1603 its_mitigation = ITS_MITIGATION_OFF; 1604 break; 1605 case SPECTRE_V2_RETPOLINE: 1606 case SPECTRE_V2_EIBRS_RETPOLINE: 1607 /* Retpoline+CDT mitigates ITS */ 1608 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF) 1609 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; 1610 break; 1611 case SPECTRE_V2_LFENCE: 1612 case SPECTRE_V2_EIBRS_LFENCE: 1613 pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n"); 1614 its_mitigation = ITS_MITIGATION_OFF; 1615 break; 1616 default: 1617 break; 1618 } 1619 1620 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF && 1621 !cdt_possible(spectre_v2_enabled)) 1622 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1623 1624 pr_info("%s\n", its_strings[its_mitigation]); 1625 } 1626 1627 static void __init its_apply_mitigation(void) 1628 { 1629 switch (its_mitigation) { 1630 case ITS_MITIGATION_OFF: 1631 case ITS_MITIGATION_AUTO: 1632 case ITS_MITIGATION_VMEXIT_ONLY: 1633 break; 1634 case ITS_MITIGATION_ALIGNED_THUNKS: 1635 if (!boot_cpu_has(X86_FEATURE_RETPOLINE)) 1636 setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS); 1637 1638 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1639 set_return_thunk(its_return_thunk); 1640 break; 1641 case ITS_MITIGATION_RETPOLINE_STUFF: 1642 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1643 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); 1644 set_return_thunk(call_depth_return_thunk); 1645 break; 1646 } 1647 } 1648 1649 #undef pr_fmt 1650 #define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt 1651 1652 enum tsa_mitigations { 1653 TSA_MITIGATION_NONE, 1654 TSA_MITIGATION_AUTO, 1655 TSA_MITIGATION_UCODE_NEEDED, 1656 TSA_MITIGATION_USER_KERNEL, 1657 TSA_MITIGATION_VM, 1658 TSA_MITIGATION_FULL, 1659 }; 1660 1661 static const char * const tsa_strings[] = { 1662 [TSA_MITIGATION_NONE] = "Vulnerable", 1663 [TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 1664 [TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary", 1665 [TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM", 1666 [TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", 1667 }; 1668 1669 static enum tsa_mitigations tsa_mitigation __ro_after_init = 1670 IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE; 1671 1672 static int __init tsa_parse_cmdline(char *str) 1673 { 1674 if (!str) 1675 return -EINVAL; 1676 1677 if (!strcmp(str, "off")) 1678 tsa_mitigation = TSA_MITIGATION_NONE; 1679 else if (!strcmp(str, "on")) 1680 tsa_mitigation = TSA_MITIGATION_FULL; 1681 else if (!strcmp(str, "user")) 1682 tsa_mitigation = TSA_MITIGATION_USER_KERNEL; 1683 else if (!strcmp(str, "vm")) 1684 tsa_mitigation = TSA_MITIGATION_VM; 1685 else 1686 pr_err("Ignoring unknown tsa=%s option.\n", str); 1687 1688 return 0; 1689 } 1690 early_param("tsa", tsa_parse_cmdline); 1691 1692 static void __init tsa_select_mitigation(void) 1693 { 1694 if (!boot_cpu_has_bug(X86_BUG_TSA)) { 1695 tsa_mitigation = TSA_MITIGATION_NONE; 1696 return; 1697 } 1698 1699 if (tsa_mitigation == TSA_MITIGATION_AUTO) { 1700 bool vm = false, uk = false; 1701 1702 tsa_mitigation = TSA_MITIGATION_NONE; 1703 1704 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || 1705 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) { 1706 tsa_mitigation = TSA_MITIGATION_USER_KERNEL; 1707 uk = true; 1708 } 1709 1710 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || 1711 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) { 1712 tsa_mitigation = TSA_MITIGATION_VM; 1713 vm = true; 1714 } 1715 1716 if (uk && vm) 1717 tsa_mitigation = TSA_MITIGATION_FULL; 1718 } 1719 1720 if (tsa_mitigation == TSA_MITIGATION_NONE) 1721 return; 1722 1723 if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) 1724 tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED; 1725 1726 /* 1727 * No need to set verw_clear_cpu_buf_mitigation_selected - it 1728 * doesn't fit all cases here and it is not needed because this 1729 * is the only VERW-based mitigation on AMD. 1730 */ 1731 pr_info("%s\n", tsa_strings[tsa_mitigation]); 1732 } 1733 1734 static void __init tsa_apply_mitigation(void) 1735 { 1736 switch (tsa_mitigation) { 1737 case TSA_MITIGATION_USER_KERNEL: 1738 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 1739 break; 1740 case TSA_MITIGATION_VM: 1741 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); 1742 break; 1743 case TSA_MITIGATION_FULL: 1744 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 1745 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); 1746 break; 1747 default: 1748 break; 1749 } 1750 } 1751 1752 #undef pr_fmt 1753 #define pr_fmt(fmt) "Spectre V2 : " fmt 1754 1755 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = 1756 SPECTRE_V2_USER_NONE; 1757 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = 1758 SPECTRE_V2_USER_NONE; 1759 1760 #ifdef CONFIG_MITIGATION_RETPOLINE 1761 static bool spectre_v2_bad_module; 1762 1763 bool retpoline_module_ok(bool has_retpoline) 1764 { 1765 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) 1766 return true; 1767 1768 pr_err("System may be vulnerable to spectre v2\n"); 1769 spectre_v2_bad_module = true; 1770 return false; 1771 } 1772 1773 static inline const char *spectre_v2_module_string(void) 1774 { 1775 return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; 1776 } 1777 #else 1778 static inline const char *spectre_v2_module_string(void) { return ""; } 1779 #endif 1780 1781 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" 1782 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" 1783 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" 1784 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n" 1785 1786 #ifdef CONFIG_BPF_SYSCALL 1787 void unpriv_ebpf_notify(int new_state) 1788 { 1789 if (new_state) 1790 return; 1791 1792 /* Unprivileged eBPF is enabled */ 1793 1794 switch (spectre_v2_enabled) { 1795 case SPECTRE_V2_EIBRS: 1796 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); 1797 break; 1798 case SPECTRE_V2_EIBRS_LFENCE: 1799 if (sched_smt_active()) 1800 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); 1801 break; 1802 default: 1803 break; 1804 } 1805 } 1806 #endif 1807 1808 static inline bool match_option(const char *arg, int arglen, const char *opt) 1809 { 1810 int len = strlen(opt); 1811 1812 return len == arglen && !strncmp(arg, opt, len); 1813 } 1814 1815 /* The kernel command line selection for spectre v2 */ 1816 enum spectre_v2_mitigation_cmd { 1817 SPECTRE_V2_CMD_NONE, 1818 SPECTRE_V2_CMD_AUTO, 1819 SPECTRE_V2_CMD_FORCE, 1820 SPECTRE_V2_CMD_RETPOLINE, 1821 SPECTRE_V2_CMD_RETPOLINE_GENERIC, 1822 SPECTRE_V2_CMD_RETPOLINE_LFENCE, 1823 SPECTRE_V2_CMD_EIBRS, 1824 SPECTRE_V2_CMD_EIBRS_RETPOLINE, 1825 SPECTRE_V2_CMD_EIBRS_LFENCE, 1826 SPECTRE_V2_CMD_IBRS, 1827 }; 1828 1829 static enum spectre_v2_mitigation_cmd spectre_v2_cmd __ro_after_init = SPECTRE_V2_CMD_AUTO; 1830 1831 enum spectre_v2_user_cmd { 1832 SPECTRE_V2_USER_CMD_NONE, 1833 SPECTRE_V2_USER_CMD_AUTO, 1834 SPECTRE_V2_USER_CMD_FORCE, 1835 SPECTRE_V2_USER_CMD_PRCTL, 1836 SPECTRE_V2_USER_CMD_PRCTL_IBPB, 1837 SPECTRE_V2_USER_CMD_SECCOMP, 1838 SPECTRE_V2_USER_CMD_SECCOMP_IBPB, 1839 }; 1840 1841 static const char * const spectre_v2_user_strings[] = { 1842 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", 1843 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", 1844 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", 1845 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", 1846 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", 1847 }; 1848 1849 static const struct { 1850 const char *option; 1851 enum spectre_v2_user_cmd cmd; 1852 bool secure; 1853 } v2_user_options[] __initconst = { 1854 { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, 1855 { "off", SPECTRE_V2_USER_CMD_NONE, false }, 1856 { "on", SPECTRE_V2_USER_CMD_FORCE, true }, 1857 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false }, 1858 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false }, 1859 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false }, 1860 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, 1861 }; 1862 1863 static void __init spec_v2_user_print_cond(const char *reason, bool secure) 1864 { 1865 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) 1866 pr_info("spectre_v2_user=%s forced on command line.\n", reason); 1867 } 1868 1869 static enum spectre_v2_user_cmd __init spectre_v2_parse_user_cmdline(void) 1870 { 1871 char arg[20]; 1872 int ret, i; 1873 1874 if (!IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2)) 1875 return SPECTRE_V2_USER_CMD_NONE; 1876 1877 ret = cmdline_find_option(boot_command_line, "spectre_v2_user", 1878 arg, sizeof(arg)); 1879 if (ret < 0) 1880 return SPECTRE_V2_USER_CMD_AUTO; 1881 1882 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { 1883 if (match_option(arg, ret, v2_user_options[i].option)) { 1884 spec_v2_user_print_cond(v2_user_options[i].option, 1885 v2_user_options[i].secure); 1886 return v2_user_options[i].cmd; 1887 } 1888 } 1889 1890 pr_err("Unknown user space protection option (%s). Switching to default\n", arg); 1891 return SPECTRE_V2_USER_CMD_AUTO; 1892 } 1893 1894 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) 1895 { 1896 return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS; 1897 } 1898 1899 static void __init spectre_v2_user_select_mitigation(void) 1900 { 1901 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) 1902 return; 1903 1904 switch (spectre_v2_parse_user_cmdline()) { 1905 case SPECTRE_V2_USER_CMD_NONE: 1906 return; 1907 case SPECTRE_V2_USER_CMD_FORCE: 1908 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1909 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT; 1910 break; 1911 case SPECTRE_V2_USER_CMD_AUTO: 1912 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2_USER)) 1913 break; 1914 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; 1915 if (smt_mitigations == SMT_MITIGATIONS_OFF) 1916 break; 1917 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; 1918 break; 1919 case SPECTRE_V2_USER_CMD_PRCTL: 1920 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; 1921 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; 1922 break; 1923 case SPECTRE_V2_USER_CMD_PRCTL_IBPB: 1924 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1925 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; 1926 break; 1927 case SPECTRE_V2_USER_CMD_SECCOMP: 1928 if (IS_ENABLED(CONFIG_SECCOMP)) 1929 spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP; 1930 else 1931 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; 1932 spectre_v2_user_stibp = spectre_v2_user_ibpb; 1933 break; 1934 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: 1935 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1936 if (IS_ENABLED(CONFIG_SECCOMP)) 1937 spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP; 1938 else 1939 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; 1940 break; 1941 } 1942 1943 /* 1944 * At this point, an STIBP mode other than "off" has been set. 1945 * If STIBP support is not being forced, check if STIBP always-on 1946 * is preferred. 1947 */ 1948 if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || 1949 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) && 1950 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) 1951 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED; 1952 1953 if (!boot_cpu_has(X86_FEATURE_IBPB)) 1954 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE; 1955 1956 if (!boot_cpu_has(X86_FEATURE_STIBP)) 1957 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; 1958 } 1959 1960 static void __init spectre_v2_user_update_mitigation(void) 1961 { 1962 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) 1963 return; 1964 1965 /* The spectre_v2 cmd line can override spectre_v2_user options */ 1966 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) { 1967 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE; 1968 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; 1969 } else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) { 1970 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1971 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT; 1972 } 1973 1974 /* 1975 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP 1976 * is not required. 1977 * 1978 * Intel's Enhanced IBRS also protects against cross-thread branch target 1979 * injection in user-mode as the IBRS bit remains always set which 1980 * implicitly enables cross-thread protections. However, in legacy IBRS 1981 * mode, the IBRS bit is set only on kernel entry and cleared on return 1982 * to userspace. AMD Automatic IBRS also does not protect userspace. 1983 * These modes therefore disable the implicit cross-thread protection, 1984 * so allow for STIBP to be selected in those cases. 1985 */ 1986 if (!boot_cpu_has(X86_FEATURE_STIBP) || 1987 !cpu_smt_possible() || 1988 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 1989 !boot_cpu_has(X86_FEATURE_AUTOIBRS))) { 1990 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; 1991 return; 1992 } 1993 1994 if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE && 1995 (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || 1996 retbleed_mitigation == RETBLEED_MITIGATION_IBPB)) { 1997 if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT && 1998 spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT_PREFERRED) 1999 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n"); 2000 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED; 2001 } 2002 pr_info("%s\n", spectre_v2_user_strings[spectre_v2_user_stibp]); 2003 } 2004 2005 static void __init spectre_v2_user_apply_mitigation(void) 2006 { 2007 /* Initialize Indirect Branch Prediction Barrier */ 2008 if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) { 2009 static_branch_enable(&switch_vcpu_ibpb); 2010 2011 switch (spectre_v2_user_ibpb) { 2012 case SPECTRE_V2_USER_STRICT: 2013 static_branch_enable(&switch_mm_always_ibpb); 2014 break; 2015 case SPECTRE_V2_USER_PRCTL: 2016 case SPECTRE_V2_USER_SECCOMP: 2017 static_branch_enable(&switch_mm_cond_ibpb); 2018 break; 2019 default: 2020 break; 2021 } 2022 2023 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", 2024 static_key_enabled(&switch_mm_always_ibpb) ? 2025 "always-on" : "conditional"); 2026 } 2027 } 2028 2029 static const char * const spectre_v2_strings[] = { 2030 [SPECTRE_V2_NONE] = "Vulnerable", 2031 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", 2032 [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", 2033 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS", 2034 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE", 2035 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines", 2036 [SPECTRE_V2_IBRS] = "Mitigation: IBRS", 2037 }; 2038 2039 static const struct { 2040 const char *option; 2041 enum spectre_v2_mitigation_cmd cmd; 2042 bool secure; 2043 } mitigation_options[] __initconst = { 2044 { "off", SPECTRE_V2_CMD_NONE, false }, 2045 { "on", SPECTRE_V2_CMD_FORCE, true }, 2046 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, 2047 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, 2048 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, 2049 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, 2050 { "eibrs", SPECTRE_V2_CMD_EIBRS, false }, 2051 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false }, 2052 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false }, 2053 { "auto", SPECTRE_V2_CMD_AUTO, false }, 2054 { "ibrs", SPECTRE_V2_CMD_IBRS, false }, 2055 }; 2056 2057 static void __init spec_v2_print_cond(const char *reason, bool secure) 2058 { 2059 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) 2060 pr_info("%s selected on command line.\n", reason); 2061 } 2062 2063 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) 2064 { 2065 enum spectre_v2_mitigation_cmd cmd; 2066 char arg[20]; 2067 int ret, i; 2068 2069 cmd = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE; 2070 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) 2071 return SPECTRE_V2_CMD_NONE; 2072 2073 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); 2074 if (ret < 0) 2075 return cmd; 2076 2077 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { 2078 if (!match_option(arg, ret, mitigation_options[i].option)) 2079 continue; 2080 cmd = mitigation_options[i].cmd; 2081 break; 2082 } 2083 2084 if (i >= ARRAY_SIZE(mitigation_options)) { 2085 pr_err("unknown option (%s). Switching to default mode\n", arg); 2086 return cmd; 2087 } 2088 2089 if ((cmd == SPECTRE_V2_CMD_RETPOLINE || 2090 cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || 2091 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || 2092 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 2093 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 2094 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) { 2095 pr_err("%s selected but not compiled in. Switching to AUTO select\n", 2096 mitigation_options[i].option); 2097 return SPECTRE_V2_CMD_AUTO; 2098 } 2099 2100 if ((cmd == SPECTRE_V2_CMD_EIBRS || 2101 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 2102 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 2103 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 2104 pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n", 2105 mitigation_options[i].option); 2106 return SPECTRE_V2_CMD_AUTO; 2107 } 2108 2109 if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || 2110 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) && 2111 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { 2112 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n", 2113 mitigation_options[i].option); 2114 return SPECTRE_V2_CMD_AUTO; 2115 } 2116 2117 if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) { 2118 pr_err("%s selected but not compiled in. Switching to AUTO select\n", 2119 mitigation_options[i].option); 2120 return SPECTRE_V2_CMD_AUTO; 2121 } 2122 2123 if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { 2124 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", 2125 mitigation_options[i].option); 2126 return SPECTRE_V2_CMD_AUTO; 2127 } 2128 2129 if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { 2130 pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n", 2131 mitigation_options[i].option); 2132 return SPECTRE_V2_CMD_AUTO; 2133 } 2134 2135 if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) { 2136 pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n", 2137 mitigation_options[i].option); 2138 return SPECTRE_V2_CMD_AUTO; 2139 } 2140 2141 spec_v2_print_cond(mitigation_options[i].option, 2142 mitigation_options[i].secure); 2143 return cmd; 2144 } 2145 2146 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) 2147 { 2148 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) { 2149 pr_err("Kernel not compiled with retpoline; no mitigation available!"); 2150 return SPECTRE_V2_NONE; 2151 } 2152 2153 return SPECTRE_V2_RETPOLINE; 2154 } 2155 2156 static bool __ro_after_init rrsba_disabled; 2157 2158 /* Disable in-kernel use of non-RSB RET predictors */ 2159 static void __init spec_ctrl_disable_kernel_rrsba(void) 2160 { 2161 if (rrsba_disabled) 2162 return; 2163 2164 if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) { 2165 rrsba_disabled = true; 2166 return; 2167 } 2168 2169 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) 2170 return; 2171 2172 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; 2173 update_spec_ctrl(x86_spec_ctrl_base); 2174 rrsba_disabled = true; 2175 } 2176 2177 static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode) 2178 { 2179 /* 2180 * WARNING! There are many subtleties to consider when changing *any* 2181 * code related to RSB-related mitigations. Before doing so, carefully 2182 * read the following document, and update if necessary: 2183 * 2184 * Documentation/admin-guide/hw-vuln/rsb.rst 2185 * 2186 * In an overly simplified nutshell: 2187 * 2188 * - User->user RSB attacks are conditionally mitigated during 2189 * context switches by cond_mitigation -> write_ibpb(). 2190 * 2191 * - User->kernel and guest->host attacks are mitigated by eIBRS or 2192 * RSB filling. 2193 * 2194 * Though, depending on config, note that other alternative 2195 * mitigations may end up getting used instead, e.g., IBPB on 2196 * entry/vmexit, call depth tracking, or return thunks. 2197 */ 2198 2199 switch (mode) { 2200 case SPECTRE_V2_NONE: 2201 break; 2202 2203 case SPECTRE_V2_EIBRS: 2204 case SPECTRE_V2_EIBRS_LFENCE: 2205 case SPECTRE_V2_EIBRS_RETPOLINE: 2206 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { 2207 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); 2208 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); 2209 } 2210 break; 2211 2212 case SPECTRE_V2_RETPOLINE: 2213 case SPECTRE_V2_LFENCE: 2214 case SPECTRE_V2_IBRS: 2215 pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n"); 2216 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); 2217 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); 2218 break; 2219 2220 default: 2221 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n"); 2222 dump_stack(); 2223 break; 2224 } 2225 } 2226 2227 /* 2228 * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by 2229 * branch history in userspace. Not needed if BHI_NO is set. 2230 */ 2231 static bool __init spec_ctrl_bhi_dis(void) 2232 { 2233 if (!boot_cpu_has(X86_FEATURE_BHI_CTRL)) 2234 return false; 2235 2236 x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S; 2237 update_spec_ctrl(x86_spec_ctrl_base); 2238 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW); 2239 2240 return true; 2241 } 2242 2243 enum bhi_mitigations { 2244 BHI_MITIGATION_OFF, 2245 BHI_MITIGATION_AUTO, 2246 BHI_MITIGATION_ON, 2247 BHI_MITIGATION_VMEXIT_ONLY, 2248 }; 2249 2250 static enum bhi_mitigations bhi_mitigation __ro_after_init = 2251 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_AUTO : BHI_MITIGATION_OFF; 2252 2253 static int __init spectre_bhi_parse_cmdline(char *str) 2254 { 2255 if (!str) 2256 return -EINVAL; 2257 2258 if (!strcmp(str, "off")) 2259 bhi_mitigation = BHI_MITIGATION_OFF; 2260 else if (!strcmp(str, "on")) 2261 bhi_mitigation = BHI_MITIGATION_ON; 2262 else if (!strcmp(str, "vmexit")) 2263 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY; 2264 else 2265 pr_err("Ignoring unknown spectre_bhi option (%s)", str); 2266 2267 return 0; 2268 } 2269 early_param("spectre_bhi", spectre_bhi_parse_cmdline); 2270 2271 static void __init bhi_select_mitigation(void) 2272 { 2273 if (!boot_cpu_has(X86_BUG_BHI)) 2274 bhi_mitigation = BHI_MITIGATION_OFF; 2275 2276 if (bhi_mitigation != BHI_MITIGATION_AUTO) 2277 return; 2278 2279 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) { 2280 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL)) 2281 bhi_mitigation = BHI_MITIGATION_ON; 2282 else 2283 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY; 2284 } else { 2285 bhi_mitigation = BHI_MITIGATION_OFF; 2286 } 2287 } 2288 2289 static void __init bhi_update_mitigation(void) 2290 { 2291 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) 2292 bhi_mitigation = BHI_MITIGATION_OFF; 2293 2294 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && 2295 spectre_v2_cmd == SPECTRE_V2_CMD_AUTO) 2296 bhi_mitigation = BHI_MITIGATION_OFF; 2297 } 2298 2299 static void __init bhi_apply_mitigation(void) 2300 { 2301 if (bhi_mitigation == BHI_MITIGATION_OFF) 2302 return; 2303 2304 /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */ 2305 if (boot_cpu_has(X86_FEATURE_RETPOLINE) && 2306 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) { 2307 spec_ctrl_disable_kernel_rrsba(); 2308 if (rrsba_disabled) 2309 return; 2310 } 2311 2312 if (!IS_ENABLED(CONFIG_X86_64)) 2313 return; 2314 2315 /* Mitigate in hardware if supported */ 2316 if (spec_ctrl_bhi_dis()) 2317 return; 2318 2319 if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) { 2320 pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n"); 2321 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT); 2322 return; 2323 } 2324 2325 pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n"); 2326 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP); 2327 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT); 2328 } 2329 2330 static void __init spectre_v2_select_mitigation(void) 2331 { 2332 spectre_v2_cmd = spectre_v2_parse_cmdline(); 2333 2334 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && 2335 (spectre_v2_cmd == SPECTRE_V2_CMD_NONE || spectre_v2_cmd == SPECTRE_V2_CMD_AUTO)) 2336 return; 2337 2338 switch (spectre_v2_cmd) { 2339 case SPECTRE_V2_CMD_NONE: 2340 return; 2341 2342 case SPECTRE_V2_CMD_AUTO: 2343 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2)) 2344 break; 2345 fallthrough; 2346 case SPECTRE_V2_CMD_FORCE: 2347 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 2348 spectre_v2_enabled = SPECTRE_V2_EIBRS; 2349 break; 2350 } 2351 2352 spectre_v2_enabled = spectre_v2_select_retpoline(); 2353 break; 2354 2355 case SPECTRE_V2_CMD_RETPOLINE_LFENCE: 2356 pr_err(SPECTRE_V2_LFENCE_MSG); 2357 spectre_v2_enabled = SPECTRE_V2_LFENCE; 2358 break; 2359 2360 case SPECTRE_V2_CMD_RETPOLINE_GENERIC: 2361 spectre_v2_enabled = SPECTRE_V2_RETPOLINE; 2362 break; 2363 2364 case SPECTRE_V2_CMD_RETPOLINE: 2365 spectre_v2_enabled = spectre_v2_select_retpoline(); 2366 break; 2367 2368 case SPECTRE_V2_CMD_IBRS: 2369 spectre_v2_enabled = SPECTRE_V2_IBRS; 2370 break; 2371 2372 case SPECTRE_V2_CMD_EIBRS: 2373 spectre_v2_enabled = SPECTRE_V2_EIBRS; 2374 break; 2375 2376 case SPECTRE_V2_CMD_EIBRS_LFENCE: 2377 spectre_v2_enabled = SPECTRE_V2_EIBRS_LFENCE; 2378 break; 2379 2380 case SPECTRE_V2_CMD_EIBRS_RETPOLINE: 2381 spectre_v2_enabled = SPECTRE_V2_EIBRS_RETPOLINE; 2382 break; 2383 } 2384 } 2385 2386 static void __init spectre_v2_update_mitigation(void) 2387 { 2388 if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO && 2389 !spectre_v2_in_eibrs_mode(spectre_v2_enabled)) { 2390 if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) && 2391 boot_cpu_has_bug(X86_BUG_RETBLEED) && 2392 retbleed_mitigation != RETBLEED_MITIGATION_NONE && 2393 retbleed_mitigation != RETBLEED_MITIGATION_STUFF && 2394 boot_cpu_has(X86_FEATURE_IBRS) && 2395 boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 2396 spectre_v2_enabled = SPECTRE_V2_IBRS; 2397 } 2398 } 2399 2400 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 2401 pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]); 2402 } 2403 2404 static void __init spectre_v2_apply_mitigation(void) 2405 { 2406 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) 2407 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); 2408 2409 if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) { 2410 if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) { 2411 msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); 2412 } else { 2413 x86_spec_ctrl_base |= SPEC_CTRL_IBRS; 2414 update_spec_ctrl(x86_spec_ctrl_base); 2415 } 2416 } 2417 2418 switch (spectre_v2_enabled) { 2419 case SPECTRE_V2_NONE: 2420 return; 2421 2422 case SPECTRE_V2_EIBRS: 2423 break; 2424 2425 case SPECTRE_V2_IBRS: 2426 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); 2427 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) 2428 pr_warn(SPECTRE_V2_IBRS_PERF_MSG); 2429 break; 2430 2431 case SPECTRE_V2_LFENCE: 2432 case SPECTRE_V2_EIBRS_LFENCE: 2433 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); 2434 fallthrough; 2435 2436 case SPECTRE_V2_RETPOLINE: 2437 case SPECTRE_V2_EIBRS_RETPOLINE: 2438 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 2439 break; 2440 } 2441 2442 /* 2443 * Disable alternate RSB predictions in kernel when indirect CALLs and 2444 * JMPs gets protection against BHI and Intramode-BTI, but RET 2445 * prediction from a non-RSB predictor is still a risk. 2446 */ 2447 if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE || 2448 spectre_v2_enabled == SPECTRE_V2_EIBRS_RETPOLINE || 2449 spectre_v2_enabled == SPECTRE_V2_RETPOLINE) 2450 spec_ctrl_disable_kernel_rrsba(); 2451 2452 spectre_v2_select_rsb_mitigation(spectre_v2_enabled); 2453 2454 /* 2455 * Retpoline protects the kernel, but doesn't protect firmware. IBRS 2456 * and Enhanced IBRS protect firmware too, so enable IBRS around 2457 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't 2458 * otherwise enabled. 2459 * 2460 * Use "spectre_v2_enabled" to check Enhanced IBRS instead of 2461 * boot_cpu_has(), because the user might select retpoline on the kernel 2462 * command line and if the CPU supports Enhanced IBRS, kernel might 2463 * un-intentionally not enable IBRS around firmware calls. 2464 */ 2465 if (boot_cpu_has_bug(X86_BUG_RETBLEED) && 2466 boot_cpu_has(X86_FEATURE_IBPB) && 2467 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 2468 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { 2469 2470 if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) { 2471 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); 2472 pr_info("Enabling Speculation Barrier for firmware calls\n"); 2473 } 2474 2475 } else if (boot_cpu_has(X86_FEATURE_IBRS) && 2476 !spectre_v2_in_ibrs_mode(spectre_v2_enabled)) { 2477 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); 2478 pr_info("Enabling Restricted Speculation for firmware calls\n"); 2479 } 2480 } 2481 2482 static void update_stibp_msr(void * __unused) 2483 { 2484 u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); 2485 update_spec_ctrl(val); 2486 } 2487 2488 /* Update x86_spec_ctrl_base in case SMT state changed. */ 2489 static void update_stibp_strict(void) 2490 { 2491 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; 2492 2493 if (sched_smt_active()) 2494 mask |= SPEC_CTRL_STIBP; 2495 2496 if (mask == x86_spec_ctrl_base) 2497 return; 2498 2499 pr_info("Update user space SMT mitigation: STIBP %s\n", 2500 mask & SPEC_CTRL_STIBP ? "always-on" : "off"); 2501 x86_spec_ctrl_base = mask; 2502 on_each_cpu(update_stibp_msr, NULL, 1); 2503 } 2504 2505 /* Update the static key controlling the evaluation of TIF_SPEC_IB */ 2506 static void update_indir_branch_cond(void) 2507 { 2508 if (sched_smt_active()) 2509 static_branch_enable(&switch_to_cond_stibp); 2510 else 2511 static_branch_disable(&switch_to_cond_stibp); 2512 } 2513 2514 #undef pr_fmt 2515 #define pr_fmt(fmt) fmt 2516 2517 /* Update the static key controlling the MDS CPU buffer clear in idle */ 2518 static void update_mds_branch_idle(void) 2519 { 2520 /* 2521 * Enable the idle clearing if SMT is active on CPUs which are 2522 * affected only by MSBDS and not any other MDS variant. 2523 * 2524 * The other variants cannot be mitigated when SMT is enabled, so 2525 * clearing the buffers on idle just to prevent the Store Buffer 2526 * repartitioning leak would be a window dressing exercise. 2527 */ 2528 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) 2529 return; 2530 2531 if (sched_smt_active()) { 2532 static_branch_enable(&cpu_buf_idle_clear); 2533 } else if (mmio_mitigation == MMIO_MITIGATION_OFF || 2534 (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) { 2535 static_branch_disable(&cpu_buf_idle_clear); 2536 } 2537 } 2538 2539 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" 2540 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" 2541 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" 2542 2543 void cpu_bugs_smt_update(void) 2544 { 2545 mutex_lock(&spec_ctrl_mutex); 2546 2547 if (sched_smt_active() && unprivileged_ebpf_enabled() && 2548 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) 2549 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); 2550 2551 switch (spectre_v2_user_stibp) { 2552 case SPECTRE_V2_USER_NONE: 2553 break; 2554 case SPECTRE_V2_USER_STRICT: 2555 case SPECTRE_V2_USER_STRICT_PREFERRED: 2556 update_stibp_strict(); 2557 break; 2558 case SPECTRE_V2_USER_PRCTL: 2559 case SPECTRE_V2_USER_SECCOMP: 2560 update_indir_branch_cond(); 2561 break; 2562 } 2563 2564 switch (mds_mitigation) { 2565 case MDS_MITIGATION_FULL: 2566 case MDS_MITIGATION_AUTO: 2567 case MDS_MITIGATION_VMWERV: 2568 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) 2569 pr_warn_once(MDS_MSG_SMT); 2570 update_mds_branch_idle(); 2571 break; 2572 case MDS_MITIGATION_OFF: 2573 break; 2574 } 2575 2576 switch (taa_mitigation) { 2577 case TAA_MITIGATION_VERW: 2578 case TAA_MITIGATION_AUTO: 2579 case TAA_MITIGATION_UCODE_NEEDED: 2580 if (sched_smt_active()) 2581 pr_warn_once(TAA_MSG_SMT); 2582 break; 2583 case TAA_MITIGATION_TSX_DISABLED: 2584 case TAA_MITIGATION_OFF: 2585 break; 2586 } 2587 2588 switch (mmio_mitigation) { 2589 case MMIO_MITIGATION_VERW: 2590 case MMIO_MITIGATION_AUTO: 2591 case MMIO_MITIGATION_UCODE_NEEDED: 2592 if (sched_smt_active()) 2593 pr_warn_once(MMIO_MSG_SMT); 2594 break; 2595 case MMIO_MITIGATION_OFF: 2596 break; 2597 } 2598 2599 switch (tsa_mitigation) { 2600 case TSA_MITIGATION_USER_KERNEL: 2601 case TSA_MITIGATION_VM: 2602 case TSA_MITIGATION_AUTO: 2603 case TSA_MITIGATION_FULL: 2604 /* 2605 * TSA-SQ can potentially lead to info leakage between 2606 * SMT threads. 2607 */ 2608 if (sched_smt_active()) 2609 static_branch_enable(&cpu_buf_idle_clear); 2610 else 2611 static_branch_disable(&cpu_buf_idle_clear); 2612 break; 2613 case TSA_MITIGATION_NONE: 2614 case TSA_MITIGATION_UCODE_NEEDED: 2615 break; 2616 } 2617 2618 mutex_unlock(&spec_ctrl_mutex); 2619 } 2620 2621 #undef pr_fmt 2622 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt 2623 2624 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE; 2625 2626 /* The kernel command line selection */ 2627 enum ssb_mitigation_cmd { 2628 SPEC_STORE_BYPASS_CMD_NONE, 2629 SPEC_STORE_BYPASS_CMD_AUTO, 2630 SPEC_STORE_BYPASS_CMD_ON, 2631 SPEC_STORE_BYPASS_CMD_PRCTL, 2632 SPEC_STORE_BYPASS_CMD_SECCOMP, 2633 }; 2634 2635 static const char * const ssb_strings[] = { 2636 [SPEC_STORE_BYPASS_NONE] = "Vulnerable", 2637 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", 2638 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", 2639 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", 2640 }; 2641 2642 static const struct { 2643 const char *option; 2644 enum ssb_mitigation_cmd cmd; 2645 } ssb_mitigation_options[] __initconst = { 2646 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ 2647 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ 2648 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ 2649 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ 2650 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */ 2651 }; 2652 2653 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) 2654 { 2655 enum ssb_mitigation_cmd cmd; 2656 char arg[20]; 2657 int ret, i; 2658 2659 cmd = IS_ENABLED(CONFIG_MITIGATION_SSB) ? 2660 SPEC_STORE_BYPASS_CMD_AUTO : SPEC_STORE_BYPASS_CMD_NONE; 2661 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") || 2662 cpu_mitigations_off()) { 2663 return SPEC_STORE_BYPASS_CMD_NONE; 2664 } else { 2665 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", 2666 arg, sizeof(arg)); 2667 if (ret < 0) 2668 return cmd; 2669 2670 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { 2671 if (!match_option(arg, ret, ssb_mitigation_options[i].option)) 2672 continue; 2673 2674 cmd = ssb_mitigation_options[i].cmd; 2675 break; 2676 } 2677 2678 if (i >= ARRAY_SIZE(ssb_mitigation_options)) { 2679 pr_err("unknown option (%s). Switching to default mode\n", arg); 2680 return cmd; 2681 } 2682 } 2683 2684 return cmd; 2685 } 2686 2687 static void __init ssb_select_mitigation(void) 2688 { 2689 enum ssb_mitigation_cmd cmd; 2690 2691 if (!boot_cpu_has(X86_FEATURE_SSBD)) 2692 goto out; 2693 2694 cmd = ssb_parse_cmdline(); 2695 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && 2696 (cmd == SPEC_STORE_BYPASS_CMD_NONE || 2697 cmd == SPEC_STORE_BYPASS_CMD_AUTO)) 2698 return; 2699 2700 switch (cmd) { 2701 case SPEC_STORE_BYPASS_CMD_SECCOMP: 2702 /* 2703 * Choose prctl+seccomp as the default mode if seccomp is 2704 * enabled. 2705 */ 2706 if (IS_ENABLED(CONFIG_SECCOMP)) 2707 ssb_mode = SPEC_STORE_BYPASS_SECCOMP; 2708 else 2709 ssb_mode = SPEC_STORE_BYPASS_PRCTL; 2710 break; 2711 case SPEC_STORE_BYPASS_CMD_ON: 2712 ssb_mode = SPEC_STORE_BYPASS_DISABLE; 2713 break; 2714 case SPEC_STORE_BYPASS_CMD_AUTO: 2715 case SPEC_STORE_BYPASS_CMD_PRCTL: 2716 ssb_mode = SPEC_STORE_BYPASS_PRCTL; 2717 break; 2718 case SPEC_STORE_BYPASS_CMD_NONE: 2719 break; 2720 } 2721 2722 out: 2723 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 2724 pr_info("%s\n", ssb_strings[ssb_mode]); 2725 } 2726 2727 static void __init ssb_apply_mitigation(void) 2728 { 2729 /* 2730 * We have three CPU feature flags that are in play here: 2731 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. 2732 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass 2733 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation 2734 */ 2735 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) { 2736 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); 2737 /* 2738 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may 2739 * use a completely different MSR and bit dependent on family. 2740 */ 2741 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && 2742 !static_cpu_has(X86_FEATURE_AMD_SSBD)) { 2743 x86_amd_ssb_disable(); 2744 } else { 2745 x86_spec_ctrl_base |= SPEC_CTRL_SSBD; 2746 update_spec_ctrl(x86_spec_ctrl_base); 2747 } 2748 } 2749 } 2750 2751 #undef pr_fmt 2752 #define pr_fmt(fmt) "Speculation prctl: " fmt 2753 2754 static void task_update_spec_tif(struct task_struct *tsk) 2755 { 2756 /* Force the update of the real TIF bits */ 2757 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); 2758 2759 /* 2760 * Immediately update the speculation control MSRs for the current 2761 * task, but for a non-current task delay setting the CPU 2762 * mitigation until it is scheduled next. 2763 * 2764 * This can only happen for SECCOMP mitigation. For PRCTL it's 2765 * always the current task. 2766 */ 2767 if (tsk == current) 2768 speculation_ctrl_update_current(); 2769 } 2770 2771 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl) 2772 { 2773 2774 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) 2775 return -EPERM; 2776 2777 switch (ctrl) { 2778 case PR_SPEC_ENABLE: 2779 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); 2780 return 0; 2781 case PR_SPEC_DISABLE: 2782 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); 2783 return 0; 2784 default: 2785 return -ERANGE; 2786 } 2787 } 2788 2789 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) 2790 { 2791 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && 2792 ssb_mode != SPEC_STORE_BYPASS_SECCOMP) 2793 return -ENXIO; 2794 2795 switch (ctrl) { 2796 case PR_SPEC_ENABLE: 2797 /* If speculation is force disabled, enable is not allowed */ 2798 if (task_spec_ssb_force_disable(task)) 2799 return -EPERM; 2800 task_clear_spec_ssb_disable(task); 2801 task_clear_spec_ssb_noexec(task); 2802 task_update_spec_tif(task); 2803 break; 2804 case PR_SPEC_DISABLE: 2805 task_set_spec_ssb_disable(task); 2806 task_clear_spec_ssb_noexec(task); 2807 task_update_spec_tif(task); 2808 break; 2809 case PR_SPEC_FORCE_DISABLE: 2810 task_set_spec_ssb_disable(task); 2811 task_set_spec_ssb_force_disable(task); 2812 task_clear_spec_ssb_noexec(task); 2813 task_update_spec_tif(task); 2814 break; 2815 case PR_SPEC_DISABLE_NOEXEC: 2816 if (task_spec_ssb_force_disable(task)) 2817 return -EPERM; 2818 task_set_spec_ssb_disable(task); 2819 task_set_spec_ssb_noexec(task); 2820 task_update_spec_tif(task); 2821 break; 2822 default: 2823 return -ERANGE; 2824 } 2825 return 0; 2826 } 2827 2828 static bool is_spec_ib_user_controlled(void) 2829 { 2830 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || 2831 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || 2832 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || 2833 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP; 2834 } 2835 2836 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) 2837 { 2838 switch (ctrl) { 2839 case PR_SPEC_ENABLE: 2840 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 2841 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 2842 return 0; 2843 2844 /* 2845 * With strict mode for both IBPB and STIBP, the instruction 2846 * code paths avoid checking this task flag and instead, 2847 * unconditionally run the instruction. However, STIBP and IBPB 2848 * are independent and either can be set to conditionally 2849 * enabled regardless of the mode of the other. 2850 * 2851 * If either is set to conditional, allow the task flag to be 2852 * updated, unless it was force-disabled by a previous prctl 2853 * call. Currently, this is possible on an AMD CPU which has the 2854 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the 2855 * kernel is booted with 'spectre_v2_user=seccomp', then 2856 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and 2857 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED. 2858 */ 2859 if (!is_spec_ib_user_controlled() || 2860 task_spec_ib_force_disable(task)) 2861 return -EPERM; 2862 2863 task_clear_spec_ib_disable(task); 2864 task_update_spec_tif(task); 2865 break; 2866 case PR_SPEC_DISABLE: 2867 case PR_SPEC_FORCE_DISABLE: 2868 /* 2869 * Indirect branch speculation is always allowed when 2870 * mitigation is force disabled. 2871 */ 2872 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 2873 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 2874 return -EPERM; 2875 2876 if (!is_spec_ib_user_controlled()) 2877 return 0; 2878 2879 task_set_spec_ib_disable(task); 2880 if (ctrl == PR_SPEC_FORCE_DISABLE) 2881 task_set_spec_ib_force_disable(task); 2882 task_update_spec_tif(task); 2883 if (task == current) 2884 indirect_branch_prediction_barrier(); 2885 break; 2886 default: 2887 return -ERANGE; 2888 } 2889 return 0; 2890 } 2891 2892 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, 2893 unsigned long ctrl) 2894 { 2895 switch (which) { 2896 case PR_SPEC_STORE_BYPASS: 2897 return ssb_prctl_set(task, ctrl); 2898 case PR_SPEC_INDIRECT_BRANCH: 2899 return ib_prctl_set(task, ctrl); 2900 case PR_SPEC_L1D_FLUSH: 2901 return l1d_flush_prctl_set(task, ctrl); 2902 default: 2903 return -ENODEV; 2904 } 2905 } 2906 2907 #ifdef CONFIG_SECCOMP 2908 void arch_seccomp_spec_mitigate(struct task_struct *task) 2909 { 2910 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) 2911 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); 2912 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || 2913 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) 2914 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); 2915 } 2916 #endif 2917 2918 static int l1d_flush_prctl_get(struct task_struct *task) 2919 { 2920 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) 2921 return PR_SPEC_FORCE_DISABLE; 2922 2923 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH)) 2924 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2925 else 2926 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2927 } 2928 2929 static int ssb_prctl_get(struct task_struct *task) 2930 { 2931 switch (ssb_mode) { 2932 case SPEC_STORE_BYPASS_NONE: 2933 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 2934 return PR_SPEC_ENABLE; 2935 return PR_SPEC_NOT_AFFECTED; 2936 case SPEC_STORE_BYPASS_DISABLE: 2937 return PR_SPEC_DISABLE; 2938 case SPEC_STORE_BYPASS_SECCOMP: 2939 case SPEC_STORE_BYPASS_PRCTL: 2940 if (task_spec_ssb_force_disable(task)) 2941 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 2942 if (task_spec_ssb_noexec(task)) 2943 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; 2944 if (task_spec_ssb_disable(task)) 2945 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2946 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2947 } 2948 BUG(); 2949 } 2950 2951 static int ib_prctl_get(struct task_struct *task) 2952 { 2953 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 2954 return PR_SPEC_NOT_AFFECTED; 2955 2956 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 2957 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 2958 return PR_SPEC_ENABLE; 2959 else if (is_spec_ib_user_controlled()) { 2960 if (task_spec_ib_force_disable(task)) 2961 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 2962 if (task_spec_ib_disable(task)) 2963 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2964 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2965 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || 2966 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || 2967 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) 2968 return PR_SPEC_DISABLE; 2969 else 2970 return PR_SPEC_NOT_AFFECTED; 2971 } 2972 2973 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 2974 { 2975 switch (which) { 2976 case PR_SPEC_STORE_BYPASS: 2977 return ssb_prctl_get(task); 2978 case PR_SPEC_INDIRECT_BRANCH: 2979 return ib_prctl_get(task); 2980 case PR_SPEC_L1D_FLUSH: 2981 return l1d_flush_prctl_get(task); 2982 default: 2983 return -ENODEV; 2984 } 2985 } 2986 2987 void x86_spec_ctrl_setup_ap(void) 2988 { 2989 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) 2990 update_spec_ctrl(x86_spec_ctrl_base); 2991 2992 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) 2993 x86_amd_ssb_disable(); 2994 } 2995 2996 bool itlb_multihit_kvm_mitigation; 2997 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); 2998 2999 #undef pr_fmt 3000 #define pr_fmt(fmt) "L1TF: " fmt 3001 3002 /* Default mitigation for L1TF-affected CPUs */ 3003 enum l1tf_mitigations l1tf_mitigation __ro_after_init = 3004 IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF; 3005 #if IS_ENABLED(CONFIG_KVM_INTEL) 3006 EXPORT_SYMBOL_GPL(l1tf_mitigation); 3007 #endif 3008 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; 3009 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); 3010 3011 /* 3012 * These CPUs all support 44bits physical address space internally in the 3013 * cache but CPUID can report a smaller number of physical address bits. 3014 * 3015 * The L1TF mitigation uses the top most address bit for the inversion of 3016 * non present PTEs. When the installed memory reaches into the top most 3017 * address bit due to memory holes, which has been observed on machines 3018 * which report 36bits physical address bits and have 32G RAM installed, 3019 * then the mitigation range check in l1tf_select_mitigation() triggers. 3020 * This is a false positive because the mitigation is still possible due to 3021 * the fact that the cache uses 44bit internally. Use the cache bits 3022 * instead of the reported physical bits and adjust them on the affected 3023 * machines to 44bit if the reported bits are less than 44. 3024 */ 3025 static void override_cache_bits(struct cpuinfo_x86 *c) 3026 { 3027 if (c->x86 != 6) 3028 return; 3029 3030 switch (c->x86_vfm) { 3031 case INTEL_NEHALEM: 3032 case INTEL_WESTMERE: 3033 case INTEL_SANDYBRIDGE: 3034 case INTEL_IVYBRIDGE: 3035 case INTEL_HASWELL: 3036 case INTEL_HASWELL_L: 3037 case INTEL_HASWELL_G: 3038 case INTEL_BROADWELL: 3039 case INTEL_BROADWELL_G: 3040 case INTEL_SKYLAKE_L: 3041 case INTEL_SKYLAKE: 3042 case INTEL_KABYLAKE_L: 3043 case INTEL_KABYLAKE: 3044 if (c->x86_cache_bits < 44) 3045 c->x86_cache_bits = 44; 3046 break; 3047 } 3048 } 3049 3050 static void __init l1tf_select_mitigation(void) 3051 { 3052 if (!boot_cpu_has_bug(X86_BUG_L1TF)) { 3053 l1tf_mitigation = L1TF_MITIGATION_OFF; 3054 return; 3055 } 3056 3057 if (l1tf_mitigation != L1TF_MITIGATION_AUTO) 3058 return; 3059 3060 if (!should_mitigate_vuln(X86_BUG_L1TF)) { 3061 l1tf_mitigation = L1TF_MITIGATION_OFF; 3062 return; 3063 } 3064 3065 if (smt_mitigations == SMT_MITIGATIONS_ON) 3066 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 3067 else 3068 l1tf_mitigation = L1TF_MITIGATION_FLUSH; 3069 } 3070 3071 static void __init l1tf_apply_mitigation(void) 3072 { 3073 u64 half_pa; 3074 3075 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 3076 return; 3077 3078 override_cache_bits(&boot_cpu_data); 3079 3080 switch (l1tf_mitigation) { 3081 case L1TF_MITIGATION_OFF: 3082 case L1TF_MITIGATION_FLUSH_NOWARN: 3083 case L1TF_MITIGATION_FLUSH: 3084 case L1TF_MITIGATION_AUTO: 3085 break; 3086 case L1TF_MITIGATION_FLUSH_NOSMT: 3087 case L1TF_MITIGATION_FULL: 3088 cpu_smt_disable(false); 3089 break; 3090 case L1TF_MITIGATION_FULL_FORCE: 3091 cpu_smt_disable(true); 3092 break; 3093 } 3094 3095 #if CONFIG_PGTABLE_LEVELS == 2 3096 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); 3097 return; 3098 #endif 3099 3100 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; 3101 if (l1tf_mitigation != L1TF_MITIGATION_OFF && 3102 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { 3103 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); 3104 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", 3105 half_pa); 3106 pr_info("However, doing so will make a part of your RAM unusable.\n"); 3107 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n"); 3108 return; 3109 } 3110 3111 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); 3112 } 3113 3114 static int __init l1tf_cmdline(char *str) 3115 { 3116 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 3117 return 0; 3118 3119 if (!str) 3120 return -EINVAL; 3121 3122 if (!strcmp(str, "off")) 3123 l1tf_mitigation = L1TF_MITIGATION_OFF; 3124 else if (!strcmp(str, "flush,nowarn")) 3125 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN; 3126 else if (!strcmp(str, "flush")) 3127 l1tf_mitigation = L1TF_MITIGATION_FLUSH; 3128 else if (!strcmp(str, "flush,nosmt")) 3129 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 3130 else if (!strcmp(str, "full")) 3131 l1tf_mitigation = L1TF_MITIGATION_FULL; 3132 else if (!strcmp(str, "full,force")) 3133 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE; 3134 3135 return 0; 3136 } 3137 early_param("l1tf", l1tf_cmdline); 3138 3139 #undef pr_fmt 3140 #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt 3141 3142 static const char * const srso_strings[] = { 3143 [SRSO_MITIGATION_NONE] = "Vulnerable", 3144 [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 3145 [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode", 3146 [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET", 3147 [SRSO_MITIGATION_NOSMT] = "Mitigation: SMT disabled", 3148 [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET", 3149 [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", 3150 [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only", 3151 [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation" 3152 }; 3153 3154 static int __init srso_parse_cmdline(char *str) 3155 { 3156 if (!str) 3157 return -EINVAL; 3158 3159 if (!strcmp(str, "off")) 3160 srso_mitigation = SRSO_MITIGATION_NONE; 3161 else if (!strcmp(str, "microcode")) 3162 srso_mitigation = SRSO_MITIGATION_MICROCODE; 3163 else if (!strcmp(str, "safe-ret")) 3164 srso_mitigation = SRSO_MITIGATION_SAFE_RET; 3165 else if (!strcmp(str, "ibpb")) 3166 srso_mitigation = SRSO_MITIGATION_IBPB; 3167 else if (!strcmp(str, "ibpb-vmexit")) 3168 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; 3169 else 3170 pr_err("Ignoring unknown SRSO option (%s).", str); 3171 3172 return 0; 3173 } 3174 early_param("spec_rstack_overflow", srso_parse_cmdline); 3175 3176 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options." 3177 3178 static void __init srso_select_mitigation(void) 3179 { 3180 if (!boot_cpu_has_bug(X86_BUG_SRSO)) { 3181 srso_mitigation = SRSO_MITIGATION_NONE; 3182 return; 3183 } 3184 3185 if (srso_mitigation == SRSO_MITIGATION_AUTO) { 3186 /* 3187 * Use safe-RET if user->kernel or guest->host protection is 3188 * required. Otherwise the 'microcode' mitigation is sufficient 3189 * to protect the user->user and guest->guest vectors. 3190 */ 3191 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || 3192 (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) && 3193 !boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))) { 3194 srso_mitigation = SRSO_MITIGATION_SAFE_RET; 3195 } else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || 3196 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) { 3197 srso_mitigation = SRSO_MITIGATION_MICROCODE; 3198 } else { 3199 srso_mitigation = SRSO_MITIGATION_NONE; 3200 return; 3201 } 3202 } 3203 3204 /* Zen1/2 with SMT off aren't vulnerable to SRSO. */ 3205 if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { 3206 srso_mitigation = SRSO_MITIGATION_NOSMT; 3207 return; 3208 } 3209 3210 if (!boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) { 3211 pr_warn("IBPB-extending microcode not applied!\n"); 3212 pr_warn(SRSO_NOTICE); 3213 3214 /* 3215 * Safe-RET provides partial mitigation without microcode, but 3216 * other mitigations require microcode to provide any 3217 * mitigations. 3218 */ 3219 if (srso_mitigation == SRSO_MITIGATION_SAFE_RET) 3220 srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED; 3221 else 3222 srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED; 3223 } 3224 3225 switch (srso_mitigation) { 3226 case SRSO_MITIGATION_SAFE_RET: 3227 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED: 3228 if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) { 3229 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; 3230 goto ibpb_on_vmexit; 3231 } 3232 3233 if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) { 3234 pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n"); 3235 srso_mitigation = SRSO_MITIGATION_NONE; 3236 } 3237 break; 3238 ibpb_on_vmexit: 3239 case SRSO_MITIGATION_IBPB_ON_VMEXIT: 3240 if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) { 3241 pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n"); 3242 srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE; 3243 break; 3244 } 3245 fallthrough; 3246 case SRSO_MITIGATION_IBPB: 3247 if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { 3248 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); 3249 srso_mitigation = SRSO_MITIGATION_NONE; 3250 } 3251 break; 3252 default: 3253 break; 3254 } 3255 } 3256 3257 static void __init srso_update_mitigation(void) 3258 { 3259 /* If retbleed is using IBPB, that works for SRSO as well */ 3260 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB && 3261 boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) 3262 srso_mitigation = SRSO_MITIGATION_IBPB; 3263 3264 if (boot_cpu_has_bug(X86_BUG_SRSO) && 3265 !cpu_mitigations_off()) 3266 pr_info("%s\n", srso_strings[srso_mitigation]); 3267 } 3268 3269 static void __init srso_apply_mitigation(void) 3270 { 3271 /* 3272 * Clear the feature flag if this mitigation is not selected as that 3273 * feature flag controls the BpSpecReduce MSR bit toggling in KVM. 3274 */ 3275 if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE) 3276 setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE); 3277 3278 if (srso_mitigation == SRSO_MITIGATION_NONE) { 3279 if (boot_cpu_has(X86_FEATURE_SBPB)) 3280 x86_pred_cmd = PRED_CMD_SBPB; 3281 return; 3282 } 3283 3284 switch (srso_mitigation) { 3285 case SRSO_MITIGATION_SAFE_RET: 3286 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED: 3287 /* 3288 * Enable the return thunk for generated code 3289 * like ftrace, static_call, etc. 3290 */ 3291 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 3292 setup_force_cpu_cap(X86_FEATURE_UNRET); 3293 3294 if (boot_cpu_data.x86 == 0x19) { 3295 setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); 3296 set_return_thunk(srso_alias_return_thunk); 3297 } else { 3298 setup_force_cpu_cap(X86_FEATURE_SRSO); 3299 set_return_thunk(srso_return_thunk); 3300 } 3301 break; 3302 case SRSO_MITIGATION_IBPB: 3303 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); 3304 /* 3305 * IBPB on entry already obviates the need for 3306 * software-based untraining so clear those in case some 3307 * other mitigation like Retbleed has selected them. 3308 */ 3309 setup_clear_cpu_cap(X86_FEATURE_UNRET); 3310 setup_clear_cpu_cap(X86_FEATURE_RETHUNK); 3311 fallthrough; 3312 case SRSO_MITIGATION_IBPB_ON_VMEXIT: 3313 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); 3314 /* 3315 * There is no need for RSB filling: entry_ibpb() ensures 3316 * all predictions, including the RSB, are invalidated, 3317 * regardless of IBPB implementation. 3318 */ 3319 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); 3320 break; 3321 default: 3322 break; 3323 } 3324 } 3325 3326 #undef pr_fmt 3327 #define pr_fmt(fmt) fmt 3328 3329 #ifdef CONFIG_SYSFS 3330 3331 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" 3332 3333 #if IS_ENABLED(CONFIG_KVM_INTEL) 3334 static const char * const l1tf_vmx_states[] = { 3335 [VMENTER_L1D_FLUSH_AUTO] = "auto", 3336 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", 3337 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", 3338 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", 3339 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", 3340 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" 3341 }; 3342 3343 static ssize_t l1tf_show_state(char *buf) 3344 { 3345 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) 3346 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); 3347 3348 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || 3349 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && 3350 sched_smt_active())) { 3351 return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, 3352 l1tf_vmx_states[l1tf_vmx_mitigation]); 3353 } 3354 3355 return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, 3356 l1tf_vmx_states[l1tf_vmx_mitigation], 3357 sched_smt_active() ? "vulnerable" : "disabled"); 3358 } 3359 3360 static ssize_t itlb_multihit_show_state(char *buf) 3361 { 3362 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || 3363 !boot_cpu_has(X86_FEATURE_VMX)) 3364 return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n"); 3365 else if (!(cr4_read_shadow() & X86_CR4_VMXE)) 3366 return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n"); 3367 else if (itlb_multihit_kvm_mitigation) 3368 return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n"); 3369 else 3370 return sysfs_emit(buf, "KVM: Vulnerable\n"); 3371 } 3372 #else 3373 static ssize_t l1tf_show_state(char *buf) 3374 { 3375 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); 3376 } 3377 3378 static ssize_t itlb_multihit_show_state(char *buf) 3379 { 3380 return sysfs_emit(buf, "Processor vulnerable\n"); 3381 } 3382 #endif 3383 3384 static ssize_t mds_show_state(char *buf) 3385 { 3386 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 3387 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 3388 mds_strings[mds_mitigation]); 3389 } 3390 3391 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { 3392 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], 3393 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : 3394 sched_smt_active() ? "mitigated" : "disabled")); 3395 } 3396 3397 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], 3398 sched_smt_active() ? "vulnerable" : "disabled"); 3399 } 3400 3401 static ssize_t tsx_async_abort_show_state(char *buf) 3402 { 3403 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || 3404 (taa_mitigation == TAA_MITIGATION_OFF)) 3405 return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]); 3406 3407 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 3408 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 3409 taa_strings[taa_mitigation]); 3410 } 3411 3412 return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], 3413 sched_smt_active() ? "vulnerable" : "disabled"); 3414 } 3415 3416 static ssize_t mmio_stale_data_show_state(char *buf) 3417 { 3418 if (mmio_mitigation == MMIO_MITIGATION_OFF) 3419 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); 3420 3421 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 3422 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 3423 mmio_strings[mmio_mitigation]); 3424 } 3425 3426 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], 3427 sched_smt_active() ? "vulnerable" : "disabled"); 3428 } 3429 3430 static ssize_t rfds_show_state(char *buf) 3431 { 3432 return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]); 3433 } 3434 3435 static ssize_t old_microcode_show_state(char *buf) 3436 { 3437 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 3438 return sysfs_emit(buf, "Unknown: running under hypervisor"); 3439 3440 return sysfs_emit(buf, "Vulnerable\n"); 3441 } 3442 3443 static ssize_t its_show_state(char *buf) 3444 { 3445 return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]); 3446 } 3447 3448 static char *stibp_state(void) 3449 { 3450 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 3451 !boot_cpu_has(X86_FEATURE_AUTOIBRS)) 3452 return ""; 3453 3454 switch (spectre_v2_user_stibp) { 3455 case SPECTRE_V2_USER_NONE: 3456 return "; STIBP: disabled"; 3457 case SPECTRE_V2_USER_STRICT: 3458 return "; STIBP: forced"; 3459 case SPECTRE_V2_USER_STRICT_PREFERRED: 3460 return "; STIBP: always-on"; 3461 case SPECTRE_V2_USER_PRCTL: 3462 case SPECTRE_V2_USER_SECCOMP: 3463 if (static_key_enabled(&switch_to_cond_stibp)) 3464 return "; STIBP: conditional"; 3465 } 3466 return ""; 3467 } 3468 3469 static char *ibpb_state(void) 3470 { 3471 if (boot_cpu_has(X86_FEATURE_IBPB)) { 3472 if (static_key_enabled(&switch_mm_always_ibpb)) 3473 return "; IBPB: always-on"; 3474 if (static_key_enabled(&switch_mm_cond_ibpb)) 3475 return "; IBPB: conditional"; 3476 return "; IBPB: disabled"; 3477 } 3478 return ""; 3479 } 3480 3481 static char *pbrsb_eibrs_state(void) 3482 { 3483 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { 3484 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || 3485 boot_cpu_has(X86_FEATURE_RSB_VMEXIT)) 3486 return "; PBRSB-eIBRS: SW sequence"; 3487 else 3488 return "; PBRSB-eIBRS: Vulnerable"; 3489 } else { 3490 return "; PBRSB-eIBRS: Not affected"; 3491 } 3492 } 3493 3494 static const char *spectre_bhi_state(void) 3495 { 3496 if (!boot_cpu_has_bug(X86_BUG_BHI)) 3497 return "; BHI: Not affected"; 3498 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW)) 3499 return "; BHI: BHI_DIS_S"; 3500 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP)) 3501 return "; BHI: SW loop, KVM: SW loop"; 3502 else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && 3503 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) && 3504 rrsba_disabled) 3505 return "; BHI: Retpoline"; 3506 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_VMEXIT)) 3507 return "; BHI: Vulnerable, KVM: SW loop"; 3508 3509 return "; BHI: Vulnerable"; 3510 } 3511 3512 static ssize_t spectre_v2_show_state(char *buf) 3513 { 3514 if (spectre_v2_enabled == SPECTRE_V2_LFENCE) 3515 return sysfs_emit(buf, "Vulnerable: LFENCE\n"); 3516 3517 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) 3518 return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); 3519 3520 if (sched_smt_active() && unprivileged_ebpf_enabled() && 3521 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) 3522 return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); 3523 3524 return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n", 3525 spectre_v2_strings[spectre_v2_enabled], 3526 ibpb_state(), 3527 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "", 3528 stibp_state(), 3529 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "", 3530 pbrsb_eibrs_state(), 3531 spectre_bhi_state(), 3532 /* this should always be at the end */ 3533 spectre_v2_module_string()); 3534 } 3535 3536 static ssize_t srbds_show_state(char *buf) 3537 { 3538 return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]); 3539 } 3540 3541 static ssize_t retbleed_show_state(char *buf) 3542 { 3543 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || 3544 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { 3545 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 3546 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 3547 return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); 3548 3549 return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation], 3550 !sched_smt_active() ? "disabled" : 3551 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || 3552 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? 3553 "enabled with STIBP protection" : "vulnerable"); 3554 } 3555 3556 return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]); 3557 } 3558 3559 static ssize_t srso_show_state(char *buf) 3560 { 3561 return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]); 3562 } 3563 3564 static ssize_t gds_show_state(char *buf) 3565 { 3566 return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); 3567 } 3568 3569 static ssize_t tsa_show_state(char *buf) 3570 { 3571 return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]); 3572 } 3573 3574 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, 3575 char *buf, unsigned int bug) 3576 { 3577 if (!boot_cpu_has_bug(bug)) 3578 return sysfs_emit(buf, "Not affected\n"); 3579 3580 switch (bug) { 3581 case X86_BUG_CPU_MELTDOWN: 3582 if (boot_cpu_has(X86_FEATURE_PTI)) 3583 return sysfs_emit(buf, "Mitigation: PTI\n"); 3584 3585 if (hypervisor_is_type(X86_HYPER_XEN_PV)) 3586 return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); 3587 3588 break; 3589 3590 case X86_BUG_SPECTRE_V1: 3591 return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); 3592 3593 case X86_BUG_SPECTRE_V2: 3594 return spectre_v2_show_state(buf); 3595 3596 case X86_BUG_SPEC_STORE_BYPASS: 3597 return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]); 3598 3599 case X86_BUG_L1TF: 3600 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) 3601 return l1tf_show_state(buf); 3602 break; 3603 3604 case X86_BUG_MDS: 3605 return mds_show_state(buf); 3606 3607 case X86_BUG_TAA: 3608 return tsx_async_abort_show_state(buf); 3609 3610 case X86_BUG_ITLB_MULTIHIT: 3611 return itlb_multihit_show_state(buf); 3612 3613 case X86_BUG_SRBDS: 3614 return srbds_show_state(buf); 3615 3616 case X86_BUG_MMIO_STALE_DATA: 3617 return mmio_stale_data_show_state(buf); 3618 3619 case X86_BUG_RETBLEED: 3620 return retbleed_show_state(buf); 3621 3622 case X86_BUG_SRSO: 3623 return srso_show_state(buf); 3624 3625 case X86_BUG_GDS: 3626 return gds_show_state(buf); 3627 3628 case X86_BUG_RFDS: 3629 return rfds_show_state(buf); 3630 3631 case X86_BUG_OLD_MICROCODE: 3632 return old_microcode_show_state(buf); 3633 3634 case X86_BUG_ITS: 3635 return its_show_state(buf); 3636 3637 case X86_BUG_TSA: 3638 return tsa_show_state(buf); 3639 3640 default: 3641 break; 3642 } 3643 3644 return sysfs_emit(buf, "Vulnerable\n"); 3645 } 3646 3647 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 3648 { 3649 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); 3650 } 3651 3652 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 3653 { 3654 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); 3655 } 3656 3657 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 3658 { 3659 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); 3660 } 3661 3662 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 3663 { 3664 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); 3665 } 3666 3667 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) 3668 { 3669 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); 3670 } 3671 3672 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf) 3673 { 3674 return cpu_show_common(dev, attr, buf, X86_BUG_MDS); 3675 } 3676 3677 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf) 3678 { 3679 return cpu_show_common(dev, attr, buf, X86_BUG_TAA); 3680 } 3681 3682 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf) 3683 { 3684 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); 3685 } 3686 3687 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) 3688 { 3689 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); 3690 } 3691 3692 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) 3693 { 3694 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); 3695 } 3696 3697 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) 3698 { 3699 return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); 3700 } 3701 3702 ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf) 3703 { 3704 return cpu_show_common(dev, attr, buf, X86_BUG_SRSO); 3705 } 3706 3707 ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf) 3708 { 3709 return cpu_show_common(dev, attr, buf, X86_BUG_GDS); 3710 } 3711 3712 ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf) 3713 { 3714 return cpu_show_common(dev, attr, buf, X86_BUG_RFDS); 3715 } 3716 3717 ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf) 3718 { 3719 return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE); 3720 } 3721 3722 ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf) 3723 { 3724 return cpu_show_common(dev, attr, buf, X86_BUG_ITS); 3725 } 3726 3727 ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf) 3728 { 3729 return cpu_show_common(dev, attr, buf, X86_BUG_TSA); 3730 } 3731 #endif 3732 3733 void __warn_thunk(void) 3734 { 3735 WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n"); 3736 } 3737