1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1994 Linus Torvalds 4 * 5 * Cyrix stuff, June 1998 by: 6 * - Rafael R. Reilova (moved everything from head.S), 7 * <rreilova@ececs.uc.edu> 8 * - Channing Corn (tests & fixes), 9 * - Andrew D. Balsa (code cleanup). 10 */ 11 #include <linux/init.h> 12 #include <linux/utsname.h> 13 #include <linux/cpu.h> 14 #include <linux/module.h> 15 16 #include <asm/nospec-branch.h> 17 #include <asm/cmdline.h> 18 #include <asm/bugs.h> 19 #include <asm/processor.h> 20 #include <asm/processor-flags.h> 21 #include <asm/fpu/internal.h> 22 #include <asm/msr.h> 23 #include <asm/paravirt.h> 24 #include <asm/alternative.h> 25 #include <asm/pgtable.h> 26 #include <asm/set_memory.h> 27 #include <asm/intel-family.h> 28 29 static void __init spectre_v2_select_mitigation(void); 30 31 void __init check_bugs(void) 32 { 33 identify_boot_cpu(); 34 35 if (!IS_ENABLED(CONFIG_SMP)) { 36 pr_info("CPU: "); 37 print_cpu_info(&boot_cpu_data); 38 } 39 40 /* Select the proper spectre mitigation before patching alternatives */ 41 spectre_v2_select_mitigation(); 42 43 #ifdef CONFIG_X86_32 44 /* 45 * Check whether we are able to run this kernel safely on SMP. 46 * 47 * - i386 is no longer supported. 48 * - In order to run on anything without a TSC, we need to be 49 * compiled for a i486. 50 */ 51 if (boot_cpu_data.x86 < 4) 52 panic("Kernel requires i486+ for 'invlpg' and other features"); 53 54 init_utsname()->machine[1] = 55 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 56 alternative_instructions(); 57 58 fpu__init_check_bugs(); 59 #else /* CONFIG_X86_64 */ 60 alternative_instructions(); 61 62 /* 63 * Make sure the first 2MB area is not mapped by huge pages 64 * There are typically fixed size MTRRs in there and overlapping 65 * MTRRs into large pages causes slow downs. 66 * 67 * Right now we don't do that with gbpages because there seems 68 * very little benefit for that case. 69 */ 70 if (!direct_gbpages) 71 set_memory_4k((unsigned long)__va(0), 1); 72 #endif 73 } 74 75 /* The kernel command line selection */ 76 enum spectre_v2_mitigation_cmd { 77 SPECTRE_V2_CMD_NONE, 78 SPECTRE_V2_CMD_AUTO, 79 SPECTRE_V2_CMD_FORCE, 80 SPECTRE_V2_CMD_RETPOLINE, 81 SPECTRE_V2_CMD_RETPOLINE_GENERIC, 82 SPECTRE_V2_CMD_RETPOLINE_AMD, 83 }; 84 85 static const char *spectre_v2_strings[] = { 86 [SPECTRE_V2_NONE] = "Vulnerable", 87 [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline", 88 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline", 89 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", 90 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", 91 }; 92 93 #undef pr_fmt 94 #define pr_fmt(fmt) "Spectre V2 : " fmt 95 96 static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; 97 98 #ifdef RETPOLINE 99 static bool spectre_v2_bad_module; 100 101 bool retpoline_module_ok(bool has_retpoline) 102 { 103 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) 104 return true; 105 106 pr_err("System may be vunerable to spectre v2\n"); 107 spectre_v2_bad_module = true; 108 return false; 109 } 110 111 static inline const char *spectre_v2_module_string(void) 112 { 113 return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; 114 } 115 #else 116 static inline const char *spectre_v2_module_string(void) { return ""; } 117 #endif 118 119 static void __init spec2_print_if_insecure(const char *reason) 120 { 121 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 122 pr_info("%s\n", reason); 123 } 124 125 static void __init spec2_print_if_secure(const char *reason) 126 { 127 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 128 pr_info("%s\n", reason); 129 } 130 131 static inline bool retp_compiler(void) 132 { 133 return __is_defined(RETPOLINE); 134 } 135 136 static inline bool match_option(const char *arg, int arglen, const char *opt) 137 { 138 int len = strlen(opt); 139 140 return len == arglen && !strncmp(arg, opt, len); 141 } 142 143 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) 144 { 145 char arg[20]; 146 int ret; 147 148 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, 149 sizeof(arg)); 150 if (ret > 0) { 151 if (match_option(arg, ret, "off")) { 152 goto disable; 153 } else if (match_option(arg, ret, "on")) { 154 spec2_print_if_secure("force enabled on command line."); 155 return SPECTRE_V2_CMD_FORCE; 156 } else if (match_option(arg, ret, "retpoline")) { 157 spec2_print_if_insecure("retpoline selected on command line."); 158 return SPECTRE_V2_CMD_RETPOLINE; 159 } else if (match_option(arg, ret, "retpoline,amd")) { 160 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { 161 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n"); 162 return SPECTRE_V2_CMD_AUTO; 163 } 164 spec2_print_if_insecure("AMD retpoline selected on command line."); 165 return SPECTRE_V2_CMD_RETPOLINE_AMD; 166 } else if (match_option(arg, ret, "retpoline,generic")) { 167 spec2_print_if_insecure("generic retpoline selected on command line."); 168 return SPECTRE_V2_CMD_RETPOLINE_GENERIC; 169 } else if (match_option(arg, ret, "auto")) { 170 return SPECTRE_V2_CMD_AUTO; 171 } 172 } 173 174 if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2")) 175 return SPECTRE_V2_CMD_AUTO; 176 disable: 177 spec2_print_if_insecure("disabled on command line."); 178 return SPECTRE_V2_CMD_NONE; 179 } 180 181 /* Check for Skylake-like CPUs (for RSB handling) */ 182 static bool __init is_skylake_era(void) 183 { 184 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 185 boot_cpu_data.x86 == 6) { 186 switch (boot_cpu_data.x86_model) { 187 case INTEL_FAM6_SKYLAKE_MOBILE: 188 case INTEL_FAM6_SKYLAKE_DESKTOP: 189 case INTEL_FAM6_SKYLAKE_X: 190 case INTEL_FAM6_KABYLAKE_MOBILE: 191 case INTEL_FAM6_KABYLAKE_DESKTOP: 192 return true; 193 } 194 } 195 return false; 196 } 197 198 static void __init spectre_v2_select_mitigation(void) 199 { 200 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); 201 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE; 202 203 /* 204 * If the CPU is not affected and the command line mode is NONE or AUTO 205 * then nothing to do. 206 */ 207 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && 208 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO)) 209 return; 210 211 switch (cmd) { 212 case SPECTRE_V2_CMD_NONE: 213 return; 214 215 case SPECTRE_V2_CMD_FORCE: 216 /* FALLTRHU */ 217 case SPECTRE_V2_CMD_AUTO: 218 goto retpoline_auto; 219 220 case SPECTRE_V2_CMD_RETPOLINE_AMD: 221 if (IS_ENABLED(CONFIG_RETPOLINE)) 222 goto retpoline_amd; 223 break; 224 case SPECTRE_V2_CMD_RETPOLINE_GENERIC: 225 if (IS_ENABLED(CONFIG_RETPOLINE)) 226 goto retpoline_generic; 227 break; 228 case SPECTRE_V2_CMD_RETPOLINE: 229 if (IS_ENABLED(CONFIG_RETPOLINE)) 230 goto retpoline_auto; 231 break; 232 } 233 pr_err("kernel not compiled with retpoline; no mitigation available!"); 234 return; 235 236 retpoline_auto: 237 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { 238 retpoline_amd: 239 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { 240 pr_err("LFENCE not serializing. Switching to generic retpoline\n"); 241 goto retpoline_generic; 242 } 243 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : 244 SPECTRE_V2_RETPOLINE_MINIMAL_AMD; 245 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); 246 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 247 } else { 248 retpoline_generic: 249 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC : 250 SPECTRE_V2_RETPOLINE_MINIMAL; 251 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 252 } 253 254 spectre_v2_enabled = mode; 255 pr_info("%s\n", spectre_v2_strings[mode]); 256 257 /* 258 * If neither SMEP or KPTI are available, there is a risk of 259 * hitting userspace addresses in the RSB after a context switch 260 * from a shallow call stack to a deeper one. To prevent this fill 261 * the entire RSB, even when using IBRS. 262 * 263 * Skylake era CPUs have a separate issue with *underflow* of the 264 * RSB, when they will predict 'ret' targets from the generic BTB. 265 * The proper mitigation for this is IBRS. If IBRS is not supported 266 * or deactivated in favour of retpolines the RSB fill on context 267 * switch is required. 268 */ 269 if ((!boot_cpu_has(X86_FEATURE_PTI) && 270 !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) { 271 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); 272 pr_info("Filling RSB on context switch\n"); 273 } 274 275 /* Initialize Indirect Branch Prediction Barrier if supported */ 276 if (boot_cpu_has(X86_FEATURE_IBPB)) { 277 setup_force_cpu_cap(X86_FEATURE_USE_IBPB); 278 pr_info("Enabling Indirect Branch Prediction Barrier\n"); 279 } 280 } 281 282 #undef pr_fmt 283 284 #ifdef CONFIG_SYSFS 285 ssize_t cpu_show_meltdown(struct device *dev, 286 struct device_attribute *attr, char *buf) 287 { 288 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) 289 return sprintf(buf, "Not affected\n"); 290 if (boot_cpu_has(X86_FEATURE_PTI)) 291 return sprintf(buf, "Mitigation: PTI\n"); 292 return sprintf(buf, "Vulnerable\n"); 293 } 294 295 ssize_t cpu_show_spectre_v1(struct device *dev, 296 struct device_attribute *attr, char *buf) 297 { 298 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) 299 return sprintf(buf, "Not affected\n"); 300 return sprintf(buf, "Vulnerable\n"); 301 } 302 303 ssize_t cpu_show_spectre_v2(struct device *dev, 304 struct device_attribute *attr, char *buf) 305 { 306 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 307 return sprintf(buf, "Not affected\n"); 308 309 return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], 310 boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", 311 spectre_v2_module_string()); 312 } 313 #endif 314 315 void __ibp_barrier(void) 316 { 317 __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0); 318 } 319 EXPORT_SYMBOL_GPL(__ibp_barrier); 320