Lines Matching full:if
52 * command-line option can override AUTO. If no such option is
100 if (this_cpu_read(x86_spec_ctrl_current) == val) in update_spec_ctrl_cond()
109 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) in update_spec_ctrl_cond()
155 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL)) in cpu_print_attack_vectors()
158 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) in cpu_print_attack_vectors()
161 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) in cpu_print_attack_vectors()
164 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) in cpu_print_attack_vectors()
192 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update in x86_virt_spec_ctrl()
193 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. in x86_virt_spec_ctrl()
195 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && in x86_virt_spec_ctrl()
200 * If the host has SSBD mitigation enabled, force it in the host's in x86_virt_spec_ctrl()
201 * virtual MSR value. If its not permanently enabled, evaluate in x86_virt_spec_ctrl()
204 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) in x86_virt_spec_ctrl()
212 if (hostval != guestval) { in x86_virt_spec_ctrl()
227 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) in x86_amd_ssb_disable()
229 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) in x86_amd_ssb_disable()
237 * Returns true if vulnerability should be mitigated based on the
344 * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing on exit to
351 if (!boot_cpu_has_bug(X86_BUG_MDS)) { in mds_select_mitigation()
356 if (mds_mitigation == MDS_MITIGATION_AUTO) { in mds_select_mitigation()
357 if (should_mitigate_vuln(X86_BUG_MDS)) in mds_select_mitigation()
363 if (mds_mitigation == MDS_MITIGATION_OFF) in mds_select_mitigation()
371 if (!boot_cpu_has_bug(X86_BUG_MDS)) in mds_update_mitigation()
374 /* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */ in mds_update_mitigation()
375 if (verw_clear_cpu_buf_mitigation_selected) in mds_update_mitigation()
378 if (mds_mitigation == MDS_MITIGATION_FULL) { in mds_update_mitigation()
379 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) in mds_update_mitigation()
388 if (mds_mitigation == MDS_MITIGATION_FULL || in mds_apply_mitigation()
392 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && in mds_apply_mitigation()
400 if (!boot_cpu_has_bug(X86_BUG_MDS)) in mds_cmdline()
403 if (!str) in mds_cmdline()
406 if (!strcmp(str, "off")) in mds_cmdline()
408 else if (!strcmp(str, "full")) in mds_cmdline()
410 else if (!strcmp(str, "full,nosmt")) { in mds_cmdline()
438 if (!boot_cpu_has_bug(X86_BUG_TAA)) { in taa_select_mitigation()
444 if (!boot_cpu_has(X86_FEATURE_RTM)) { in taa_select_mitigation()
450 if (taa_mitigation == TAA_MITIGATION_AUTO) { in taa_select_mitigation()
451 if (should_mitigate_vuln(X86_BUG_TAA)) in taa_select_mitigation()
457 if (taa_mitigation != TAA_MITIGATION_OFF) in taa_select_mitigation()
463 if (!taa_vulnerable()) in taa_update_mitigation()
466 if (verw_clear_cpu_buf_mitigation_selected) in taa_update_mitigation()
469 if (taa_mitigation == TAA_MITIGATION_VERW) { in taa_update_mitigation()
470 /* Check if the requisite ucode is available. */ in taa_update_mitigation()
471 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) in taa_update_mitigation()
480 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode in taa_update_mitigation()
483 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && in taa_update_mitigation()
493 if (taa_mitigation == TAA_MITIGATION_VERW || in taa_apply_mitigation()
505 if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON) in taa_apply_mitigation()
512 if (!boot_cpu_has_bug(X86_BUG_TAA)) in tsx_async_abort_parse_cmdline()
515 if (!str) in tsx_async_abort_parse_cmdline()
518 if (!strcmp(str, "off")) { in tsx_async_abort_parse_cmdline()
520 } else if (!strcmp(str, "full")) { in tsx_async_abort_parse_cmdline()
522 } else if (!strcmp(str, "full,nosmt")) { in tsx_async_abort_parse_cmdline()
544 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { in mmio_select_mitigation()
550 if (mmio_mitigation == MMIO_MITIGATION_AUTO) { in mmio_select_mitigation()
551 if (should_mitigate_vuln(X86_BUG_MMIO_STALE_DATA)) in mmio_select_mitigation()
557 if (mmio_mitigation == MMIO_MITIGATION_OFF) in mmio_select_mitigation()
561 * Enable CPU buffer clear mitigation for host and VMM, if also affected in mmio_select_mitigation()
564 if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable()) in mmio_select_mitigation()
570 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) in mmio_update_mitigation()
573 if (verw_clear_cpu_buf_mitigation_selected) in mmio_update_mitigation()
576 if (mmio_mitigation == MMIO_MITIGATION_VERW) { in mmio_update_mitigation()
578 * Check if the system has the right microcode. in mmio_update_mitigation()
584 if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) || in mmio_update_mitigation()
596 if (mmio_mitigation == MMIO_MITIGATION_OFF) in mmio_apply_mitigation()
600 * Only enable the VMM mitigation if the CPU buffer clear mitigation is in mmio_apply_mitigation()
603 if (verw_clear_cpu_buf_mitigation_selected) { in mmio_apply_mitigation()
611 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can in mmio_apply_mitigation()
615 if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) in mmio_apply_mitigation()
618 if (mmio_nosmt || smt_mitigations == SMT_MITIGATIONS_ON) in mmio_apply_mitigation()
624 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) in mmio_stale_data_parse_cmdline()
627 if (!str) in mmio_stale_data_parse_cmdline()
630 if (!strcmp(str, "off")) { in mmio_stale_data_parse_cmdline()
632 } else if (!strcmp(str, "full")) { in mmio_stale_data_parse_cmdline()
634 } else if (!strcmp(str, "full,nosmt")) { in mmio_stale_data_parse_cmdline()
659 if (!boot_cpu_has_bug(X86_BUG_RFDS)) { in rfds_select_mitigation()
664 if (rfds_mitigation == RFDS_MITIGATION_AUTO) { in rfds_select_mitigation()
665 if (should_mitigate_vuln(X86_BUG_RFDS)) in rfds_select_mitigation()
671 if (rfds_mitigation == RFDS_MITIGATION_OFF) in rfds_select_mitigation()
674 if (verw_clears_cpu_reg_file()) in rfds_select_mitigation()
680 if (!boot_cpu_has_bug(X86_BUG_RFDS)) in rfds_update_mitigation()
683 if (verw_clear_cpu_buf_mitigation_selected) in rfds_update_mitigation()
686 if (rfds_mitigation == RFDS_MITIGATION_VERW) { in rfds_update_mitigation()
687 if (!verw_clears_cpu_reg_file()) in rfds_update_mitigation()
696 if (rfds_mitigation == RFDS_MITIGATION_VERW) { in rfds_apply_mitigation()
704 if (!str) in rfds_parse_cmdline()
707 if (!boot_cpu_has_bug(X86_BUG_RFDS)) in rfds_parse_cmdline()
710 if (!strcmp(str, "off")) in rfds_parse_cmdline()
712 else if (!strcmp(str, "on")) in rfds_parse_cmdline()
748 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) in update_srbds_msr()
751 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) in update_srbds_msr()
754 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) in update_srbds_msr()
761 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) in update_srbds_msr()
783 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) { in srbds_select_mitigation()
788 if (srbds_mitigation == SRBDS_MITIGATION_AUTO) { in srbds_select_mitigation()
789 if (should_mitigate_vuln(X86_BUG_SRBDS)) in srbds_select_mitigation()
798 * Check to see if this is one of the MDS_NO systems supporting TSX that in srbds_select_mitigation()
802 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && in srbds_select_mitigation()
805 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) in srbds_select_mitigation()
807 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) in srbds_select_mitigation()
809 else if (srbds_off) in srbds_select_mitigation()
822 if (!str) in srbds_parse_cmdline()
825 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) in srbds_parse_cmdline()
845 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) in l1d_flush_select_mitigation()
854 if (!strcmp(str, "on")) in l1d_flush_parse_cmdline()
924 * GDS_MITG_DIS will be ignored if this processor is locked but the boot in update_gds_msr()
935 if (!boot_cpu_has_bug(X86_BUG_GDS)) in gds_select_mitigation()
938 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { in gds_select_mitigation()
944 if (gds_mitigation == GDS_MITIGATION_AUTO) { in gds_select_mitigation()
945 if (should_mitigate_vuln(X86_BUG_GDS)) in gds_select_mitigation()
952 if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) { in gds_select_mitigation()
953 if (gds_mitigation != GDS_MITIGATION_FORCE) in gds_select_mitigation()
959 if (gds_mitigation == GDS_MITIGATION_FORCE) in gds_select_mitigation()
963 if (mcu_ctrl & GDS_MITG_LOCKED) { in gds_select_mitigation()
964 if (gds_mitigation == GDS_MITIGATION_OFF) in gds_select_mitigation()
969 * _should_ have the same state. If the boot CPU isn't locked in gds_select_mitigation()
971 * mismatch. If the boot CPU is locked update_gds_msr() will in gds_select_mitigation()
980 if (!boot_cpu_has_bug(X86_BUG_GDS)) in gds_apply_mitigation()
984 if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL) in gds_apply_mitigation()
986 else if (gds_mitigation == GDS_MITIGATION_FORCE) { in gds_apply_mitigation()
1000 if (!str) in gds_parse_cmdline()
1003 if (!boot_cpu_has_bug(X86_BUG_GDS)) in gds_parse_cmdline()
1006 if (!strcmp(str, "off")) in gds_parse_cmdline()
1008 else if (!strcmp(str, "force")) in gds_parse_cmdline()
1038 if (!boot_cpu_has(X86_FEATURE_SMAP)) in smap_works_speculatively()
1047 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) in smap_works_speculatively()
1055 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) in spectre_v1_select_mitigation()
1058 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V1)) in spectre_v1_select_mitigation()
1064 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) in spectre_v1_apply_mitigation()
1067 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { in spectre_v1_apply_mitigation()
1073 * If FSGSBASE is enabled, the user can put a kernel address in in spectre_v1_apply_mitigation()
1076 * If FSGSBASE is disabled, the user can only put a user space in spectre_v1_apply_mitigation()
1078 * possible if there's no SMAP protection. in spectre_v1_apply_mitigation()
1080 if (boot_cpu_has(X86_FEATURE_FSGSBASE) || in spectre_v1_apply_mitigation()
1087 * If neither is there, mitigate with an LFENCE to in spectre_v1_apply_mitigation()
1090 if (boot_cpu_has_bug(X86_BUG_SWAPGS) && in spectre_v1_apply_mitigation()
1118 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) || in cdt_possible()
1122 if (mode == SPECTRE_V2_RETPOLINE || in cdt_possible()
1184 if (!str) in retbleed_parse_cmdline()
1189 if (next) { in retbleed_parse_cmdline()
1194 if (!strcmp(str, "off")) { in retbleed_parse_cmdline()
1196 } else if (!strcmp(str, "auto")) { in retbleed_parse_cmdline()
1198 } else if (!strcmp(str, "unret")) { in retbleed_parse_cmdline()
1200 } else if (!strcmp(str, "ibpb")) { in retbleed_parse_cmdline()
1202 } else if (!strcmp(str, "stuff")) { in retbleed_parse_cmdline()
1204 } else if (!strcmp(str, "nosmt")) { in retbleed_parse_cmdline()
1206 } else if (!strcmp(str, "force")) { in retbleed_parse_cmdline()
1224 if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) { in retbleed_select_mitigation()
1231 if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) { in retbleed_select_mitigation()
1237 if (!boot_cpu_has(X86_FEATURE_IBPB)) { in retbleed_select_mitigation()
1240 } else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { in retbleed_select_mitigation()
1246 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) { in retbleed_select_mitigation()
1249 } else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { in retbleed_select_mitigation()
1258 if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO) in retbleed_select_mitigation()
1261 if (!should_mitigate_vuln(X86_BUG_RETBLEED)) { in retbleed_select_mitigation()
1267 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || in retbleed_select_mitigation()
1269 if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) in retbleed_select_mitigation()
1271 else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) && in retbleed_select_mitigation()
1276 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { in retbleed_select_mitigation()
1278 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) in retbleed_select_mitigation()
1280 else if (boot_cpu_has(X86_FEATURE_IBRS)) in retbleed_select_mitigation()
1289 if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) in retbleed_update_mitigation()
1293 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF) in retbleed_update_mitigation()
1296 /* If SRSO is using IBPB, that works for retbleed too */ in retbleed_update_mitigation()
1297 if (srso_mitigation == SRSO_MITIGATION_IBPB) in retbleed_update_mitigation()
1300 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF && in retbleed_update_mitigation()
1310 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { in retbleed_update_mitigation()
1321 if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) { in retbleed_update_mitigation()
1322 if (retbleed_mitigation != RETBLEED_MITIGATION_NONE) in retbleed_update_mitigation()
1347 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && in retbleed_apply_mitigation()
1387 if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && in retbleed_apply_mitigation()
1404 if (!str) in its_parse_cmdline()
1407 if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) { in its_parse_cmdline()
1412 if (!strcmp(str, "off")) { in its_parse_cmdline()
1414 } else if (!strcmp(str, "on")) { in its_parse_cmdline()
1416 } else if (!strcmp(str, "force")) { in its_parse_cmdline()
1419 } else if (!strcmp(str, "vmexit")) { in its_parse_cmdline()
1421 } else if (!strcmp(str, "stuff")) { in its_parse_cmdline()
1433 if (!boot_cpu_has_bug(X86_BUG_ITS)) { in its_select_mitigation()
1438 if (its_mitigation == ITS_MITIGATION_AUTO) { in its_select_mitigation()
1439 if (should_mitigate_vuln(X86_BUG_ITS)) in its_select_mitigation()
1445 if (its_mitigation == ITS_MITIGATION_OFF) in its_select_mitigation()
1448 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || in its_select_mitigation()
1455 if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) { in its_select_mitigation()
1461 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF && in its_select_mitigation()
1467 if (its_mitigation == ITS_MITIGATION_VMEXIT_ONLY && in its_select_mitigation()
1474 if (!boot_cpu_has_bug(X86_BUG_ITS)) in its_update_mitigation()
1479 if (its_mitigation != ITS_MITIGATION_OFF) in its_update_mitigation()
1486 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF) in its_update_mitigation()
1498 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF && in its_update_mitigation()
1513 if (!boot_cpu_has(X86_FEATURE_RETPOLINE)) in its_apply_mitigation()
1552 if (!str) in tsa_parse_cmdline()
1555 if (!strcmp(str, "off")) in tsa_parse_cmdline()
1557 else if (!strcmp(str, "on")) in tsa_parse_cmdline()
1559 else if (!strcmp(str, "user")) in tsa_parse_cmdline()
1561 else if (!strcmp(str, "vm")) in tsa_parse_cmdline()
1572 if (!boot_cpu_has_bug(X86_BUG_TSA)) { in tsa_select_mitigation()
1577 if (tsa_mitigation == TSA_MITIGATION_AUTO) { in tsa_select_mitigation()
1582 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || in tsa_select_mitigation()
1588 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || in tsa_select_mitigation()
1594 if (uk && vm) in tsa_select_mitigation()
1598 if (tsa_mitigation == TSA_MITIGATION_NONE) in tsa_select_mitigation()
1601 if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) in tsa_select_mitigation()
1643 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) in retpoline_module_ok()
1667 if (new_state) in unpriv_ebpf_notify()
1677 if (sched_smt_active()) in unpriv_ebpf_notify()
1726 if (!str) in spectre_v2_user_parse_cmdline()
1729 if (!strcmp(str, "auto")) in spectre_v2_user_parse_cmdline()
1731 else if (!strcmp(str, "off")) in spectre_v2_user_parse_cmdline()
1733 else if (!strcmp(str, "on")) in spectre_v2_user_parse_cmdline()
1735 else if (!strcmp(str, "prctl")) in spectre_v2_user_parse_cmdline()
1737 else if (!strcmp(str, "prctl,ibpb")) in spectre_v2_user_parse_cmdline()
1739 else if (!strcmp(str, "seccomp")) in spectre_v2_user_parse_cmdline()
1741 else if (!strcmp(str, "seccomp,ibpb")) in spectre_v2_user_parse_cmdline()
1757 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) in spectre_v2_user_select_mitigation()
1768 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2_USER)) in spectre_v2_user_select_mitigation()
1771 if (smt_mitigations == SMT_MITIGATIONS_OFF) in spectre_v2_user_select_mitigation()
1784 if (IS_ENABLED(CONFIG_SECCOMP)) in spectre_v2_user_select_mitigation()
1792 if (IS_ENABLED(CONFIG_SECCOMP)) in spectre_v2_user_select_mitigation()
1801 * If STIBP support is not being forced, check if STIBP always-on in spectre_v2_user_select_mitigation()
1804 if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || in spectre_v2_user_select_mitigation()
1809 if (!boot_cpu_has(X86_FEATURE_IBPB)) in spectre_v2_user_select_mitigation()
1812 if (!boot_cpu_has(X86_FEATURE_STIBP)) in spectre_v2_user_select_mitigation()
1818 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) in spectre_v2_user_update_mitigation()
1822 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) { in spectre_v2_user_update_mitigation()
1825 } else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) { in spectre_v2_user_update_mitigation()
1831 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP in spectre_v2_user_update_mitigation()
1842 if (!boot_cpu_has(X86_FEATURE_STIBP) || in spectre_v2_user_update_mitigation()
1850 if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE && in spectre_v2_user_update_mitigation()
1853 if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT && in spectre_v2_user_update_mitigation()
1864 if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) { in spectre_v2_user_apply_mitigation()
1907 if (!str) in spectre_v2_parse_cmdline()
1910 if (nospectre_v2) in spectre_v2_parse_cmdline()
1913 if (!strcmp(str, "off")) { in spectre_v2_parse_cmdline()
1915 } else if (!strcmp(str, "on")) { in spectre_v2_parse_cmdline()
1919 } else if (!strcmp(str, "retpoline")) { in spectre_v2_parse_cmdline()
1921 } else if (!strcmp(str, "retpoline,amd") || in spectre_v2_parse_cmdline()
1924 } else if (!strcmp(str, "retpoline,generic")) { in spectre_v2_parse_cmdline()
1926 } else if (!strcmp(str, "eibrs")) { in spectre_v2_parse_cmdline()
1928 } else if (!strcmp(str, "eibrs,lfence")) { in spectre_v2_parse_cmdline()
1930 } else if (!strcmp(str, "eibrs,retpoline")) { in spectre_v2_parse_cmdline()
1932 } else if (!strcmp(str, "auto")) { in spectre_v2_parse_cmdline()
1934 } else if (!strcmp(str, "ibrs")) { in spectre_v2_parse_cmdline()
1946 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) { in spectre_v2_select_retpoline()
1959 if (rrsba_disabled) in spec_ctrl_disable_kernel_rrsba()
1962 if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) { in spec_ctrl_disable_kernel_rrsba()
1967 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) in spec_ctrl_disable_kernel_rrsba()
1980 * read the following document, and update if necessary: in spectre_v2_select_rsb_mitigation()
2004 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { in spectre_v2_select_rsb_mitigation()
2027 * branch history in userspace. Not needed if BHI_NO is set.
2031 if (!boot_cpu_has(X86_FEATURE_BHI_CTRL)) in spec_ctrl_bhi_dis()
2053 if (!str) in spectre_bhi_parse_cmdline()
2056 if (!strcmp(str, "off")) in spectre_bhi_parse_cmdline()
2058 else if (!strcmp(str, "on")) in spectre_bhi_parse_cmdline()
2060 else if (!strcmp(str, "vmexit")) in spectre_bhi_parse_cmdline()
2071 if (!boot_cpu_has(X86_BUG_BHI)) in bhi_select_mitigation()
2074 if (bhi_mitigation != BHI_MITIGATION_AUTO) in bhi_select_mitigation()
2077 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) { in bhi_select_mitigation()
2078 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL)) in bhi_select_mitigation()
2089 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) in bhi_update_mitigation()
2095 if (bhi_mitigation == BHI_MITIGATION_OFF) in bhi_apply_mitigation()
2099 if (boot_cpu_has(X86_FEATURE_RETPOLINE) && in bhi_apply_mitigation()
2102 if (rrsba_disabled) in bhi_apply_mitigation()
2106 if (!IS_ENABLED(CONFIG_X86_64)) in bhi_apply_mitigation()
2109 /* Mitigate in hardware if supported */ in bhi_apply_mitigation()
2110 if (spec_ctrl_bhi_dis()) in bhi_apply_mitigation()
2113 if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) { in bhi_apply_mitigation()
2126 if ((spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE || in spectre_v2_select_mitigation()
2136 if ((spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS || in spectre_v2_select_mitigation()
2144 if ((spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || in spectre_v2_select_mitigation()
2151 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) { in spectre_v2_select_mitigation()
2156 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { in spectre_v2_select_mitigation()
2161 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { in spectre_v2_select_mitigation()
2166 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) { in spectre_v2_select_mitigation()
2171 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) { in spectre_v2_select_mitigation()
2181 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2)) in spectre_v2_select_mitigation()
2185 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { in spectre_v2_select_mitigation()
2226 if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO && in spectre_v2_update_mitigation()
2228 if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) && in spectre_v2_update_mitigation()
2238 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) in spectre_v2_update_mitigation()
2244 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) in spectre_v2_apply_mitigation()
2247 if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) { in spectre_v2_apply_mitigation()
2248 if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) { in spectre_v2_apply_mitigation()
2265 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) in spectre_v2_apply_mitigation()
2285 if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE || in spectre_v2_apply_mitigation()
2300 * command line and if the CPU supports Enhanced IBRS, kernel might in spectre_v2_apply_mitigation()
2303 if (boot_cpu_has_bug(X86_BUG_RETBLEED) && in spectre_v2_apply_mitigation()
2308 if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) { in spectre_v2_apply_mitigation()
2313 } else if (boot_cpu_has(X86_FEATURE_IBRS) && in spectre_v2_apply_mitigation()
2331 if (sched_smt_active()) in update_stibp_strict()
2334 if (mask == x86_spec_ctrl_base) in update_stibp_strict()
2346 if (sched_smt_active()) in update_indir_branch_cond()
2359 * Enable the idle clearing if SMT is active on CPUs which are in update_mds_branch_idle()
2366 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) in update_mds_branch_idle()
2369 if (sched_smt_active()) { in update_mds_branch_idle()
2371 } else if (mmio_mitigation == MMIO_MITIGATION_OFF || in update_mds_branch_idle()
2402 if (!str) in ssb_parse_cmdline()
2405 if (nossb) in ssb_parse_cmdline()
2408 if (!strcmp(str, "auto")) in ssb_parse_cmdline()
2410 else if (!strcmp(str, "on")) in ssb_parse_cmdline()
2412 else if (!strcmp(str, "off")) in ssb_parse_cmdline()
2414 else if (!strcmp(str, "prctl")) in ssb_parse_cmdline()
2416 else if (!strcmp(str, "seccomp")) in ssb_parse_cmdline()
2429 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) { in ssb_select_mitigation()
2434 if (ssb_mode == SPEC_STORE_BYPASS_AUTO) { in ssb_select_mitigation()
2435 if (should_mitigate_vuln(X86_BUG_SPEC_STORE_BYPASS)) in ssb_select_mitigation()
2441 if (!boot_cpu_has(X86_FEATURE_SSBD)) in ssb_select_mitigation()
2455 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) { in ssb_apply_mitigation()
2461 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && in ssb_apply_mitigation()
2487 if (tsk == current) in task_update_spec_tif()
2494 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) in l1d_flush_prctl_set()
2511 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && in ssb_prctl_set()
2517 /* If speculation is force disabled, enable is not allowed */ in ssb_prctl_set()
2518 if (task_spec_ssb_force_disable(task)) in ssb_prctl_set()
2536 if (task_spec_ssb_force_disable(task)) in ssb_prctl_set()
2560 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && in ib_prctl_set()
2571 * If either is set to conditional, allow the task flag to be in ib_prctl_set()
2574 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the in ib_prctl_set()
2579 if (!is_spec_ib_user_controlled() || in ib_prctl_set()
2592 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && in ib_prctl_set()
2596 if (!is_spec_ib_user_controlled()) in ib_prctl_set()
2600 if (ctrl == PR_SPEC_FORCE_DISABLE) in ib_prctl_set()
2603 if (task == current) in ib_prctl_set()
2630 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) in arch_seccomp_spec_mitigate()
2632 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || in arch_seccomp_spec_mitigate()
2640 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) in l1d_flush_prctl_get()
2643 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH)) in l1d_flush_prctl_get()
2653 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) in ssb_prctl_get()
2661 if (task_spec_ssb_force_disable(task)) in ssb_prctl_get()
2663 if (task_spec_ssb_noexec(task)) in ssb_prctl_get()
2665 if (task_spec_ssb_disable(task)) in ssb_prctl_get()
2674 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) in ib_prctl_get()
2677 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && in ib_prctl_get()
2680 else if (is_spec_ib_user_controlled()) { in ib_prctl_get()
2681 if (task_spec_ib_force_disable(task)) in ib_prctl_get()
2683 if (task_spec_ib_disable(task)) in ib_prctl_get()
2686 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || in ib_prctl_get()
2710 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) in x86_spec_ctrl_setup_ap()
2713 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) in x86_spec_ctrl_setup_ap()
2742 * machines to 44bit if the reported bits are less than 44.
2746 if (c->x86 != 6) in override_cache_bits()
2763 if (c->x86_cache_bits < 44) in override_cache_bits()
2771 if (!boot_cpu_has_bug(X86_BUG_L1TF)) { in l1tf_select_mitigation()
2776 if (l1tf_mitigation != L1TF_MITIGATION_AUTO) in l1tf_select_mitigation()
2779 if (!should_mitigate_vuln(X86_BUG_L1TF)) { in l1tf_select_mitigation()
2784 if (smt_mitigations == SMT_MITIGATIONS_ON) in l1tf_select_mitigation()
2794 if (!boot_cpu_has_bug(X86_BUG_L1TF)) in l1tf_apply_mitigation()
2814 #if CONFIG_PGTABLE_LEVELS == 2 in l1tf_apply_mitigation()
2820 if (l1tf_mitigation != L1TF_MITIGATION_OFF && in l1tf_apply_mitigation()
2835 if (!boot_cpu_has_bug(X86_BUG_L1TF)) in l1tf_cmdline()
2838 if (!str) in l1tf_cmdline()
2841 if (!strcmp(str, "off")) in l1tf_cmdline()
2843 else if (!strcmp(str, "flush,nowarn")) in l1tf_cmdline()
2845 else if (!strcmp(str, "flush")) in l1tf_cmdline()
2847 else if (!strcmp(str, "flush,nosmt")) in l1tf_cmdline()
2849 else if (!strcmp(str, "full")) in l1tf_cmdline()
2851 else if (!strcmp(str, "full,force")) in l1tf_cmdline()
2875 if (!str) in srso_parse_cmdline()
2878 if (!strcmp(str, "off")) in srso_parse_cmdline()
2880 else if (!strcmp(str, "microcode")) in srso_parse_cmdline()
2882 else if (!strcmp(str, "safe-ret")) in srso_parse_cmdline()
2884 else if (!strcmp(str, "ibpb")) in srso_parse_cmdline()
2886 else if (!strcmp(str, "ibpb-vmexit")) in srso_parse_cmdline()
2899 if (!boot_cpu_has_bug(X86_BUG_SRSO)) { in srso_select_mitigation()
2904 if (srso_mitigation == SRSO_MITIGATION_AUTO) { in srso_select_mitigation()
2906 * Use safe-RET if user->kernel or guest->host protection is in srso_select_mitigation()
2910 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || in srso_select_mitigation()
2914 } else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || in srso_select_mitigation()
2924 if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { in srso_select_mitigation()
2929 if (!boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) { in srso_select_mitigation()
2938 if (srso_mitigation == SRSO_MITIGATION_SAFE_RET) in srso_select_mitigation()
2947 if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) { in srso_select_mitigation()
2952 if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) { in srso_select_mitigation()
2959 if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) { in srso_select_mitigation()
2966 if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { in srso_select_mitigation()
2978 if (!boot_cpu_has_bug(X86_BUG_SRSO)) in srso_update_mitigation()
2981 /* If retbleed is using IBPB, that works for SRSO as well */ in srso_update_mitigation()
2982 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB && in srso_update_mitigation()
2992 * Clear the feature flag if this mitigation is not selected as that in srso_apply_mitigation()
2995 if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE) in srso_apply_mitigation()
2998 if (srso_mitigation == SRSO_MITIGATION_NONE) { in srso_apply_mitigation()
2999 if (boot_cpu_has(X86_FEATURE_SBPB)) in srso_apply_mitigation()
3014 if (boot_cpu_data.x86 == 0x19) { in srso_apply_mitigation()
3068 if (!str) in vmscape_parse_cmdline()
3071 if (!strcmp(str, "off")) { in vmscape_parse_cmdline()
3073 } else if (!strcmp(str, "ibpb")) { in vmscape_parse_cmdline()
3075 } else if (!strcmp(str, "force")) { in vmscape_parse_cmdline()
3088 if (!boot_cpu_has_bug(X86_BUG_VMSCAPE) || in vmscape_select_mitigation()
3094 if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO) { in vmscape_select_mitigation()
3095 if (should_mitigate_vuln(X86_BUG_VMSCAPE)) in vmscape_select_mitigation()
3104 if (!boot_cpu_has_bug(X86_BUG_VMSCAPE)) in vmscape_update_mitigation()
3107 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB || in vmscape_update_mitigation()
3116 if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER) in vmscape_apply_mitigation()
3132 if (sched_smt_active() && unprivileged_ebpf_enabled() && in cpu_bugs_smt_update()
3153 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) in cpu_bugs_smt_update()
3165 if (sched_smt_active()) in cpu_bugs_smt_update()
3177 if (sched_smt_active()) in cpu_bugs_smt_update()
3193 if (sched_smt_active()) in cpu_bugs_smt_update()
3215 if (!sched_smt_active() || in cpu_bugs_smt_update()
3235 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) { in cpu_select_mitigations()
3325 #if IS_ENABLED(CONFIG_KVM_INTEL)
3337 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) in l1tf_show_state()
3340 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || in l1tf_show_state()
3354 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || in itlb_multihit_show_state()
3357 else if (!(cr4_read_shadow() & X86_CR4_VMXE)) in itlb_multihit_show_state()
3359 else if (itlb_multihit_kvm_mitigation) in itlb_multihit_show_state()
3378 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { in mds_show_state()
3383 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { in mds_show_state()
3395 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || in tsx_async_abort_show_state()
3399 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { in tsx_async_abort_show_state()
3410 if (mmio_mitigation == MMIO_MITIGATION_OFF) in mmio_stale_data_show_state()
3413 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { in mmio_stale_data_show_state()
3429 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) in old_microcode_show_state()
3442 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && in stibp_state()
3455 if (static_key_enabled(&switch_to_cond_stibp)) in stibp_state()
3463 if (boot_cpu_has(X86_FEATURE_IBPB)) { in ibpb_state()
3464 if (static_key_enabled(&switch_mm_always_ibpb)) in ibpb_state()
3466 if (static_key_enabled(&switch_mm_cond_ibpb)) in ibpb_state()
3475 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { in pbrsb_eibrs_state()
3476 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || in pbrsb_eibrs_state()
3488 if (!boot_cpu_has_bug(X86_BUG_BHI)) in spectre_bhi_state()
3490 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW)) in spectre_bhi_state()
3492 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP)) in spectre_bhi_state()
3494 else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && in spectre_bhi_state()
3498 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_VMEXIT)) in spectre_bhi_state()
3506 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) in spectre_v2_show_state()
3509 if (sched_smt_active() && unprivileged_ebpf_enabled() && in spectre_v2_show_state()
3532 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || in retbleed_show_state()
3534 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && in retbleed_show_state()
3571 if (!boot_cpu_has_bug(bug)) in cpu_show_common()
3576 if (boot_cpu_has(X86_FEATURE_PTI)) in cpu_show_common()
3579 if (hypervisor_is_type(X86_HYPER_XEN_PV)) in cpu_show_common()
3594 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) in cpu_show_common()