1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1994 Linus Torvalds 4 * 5 * Cyrix stuff, June 1998 by: 6 * - Rafael R. Reilova (moved everything from head.S), 7 * <rreilova@ececs.uc.edu> 8 * - Channing Corn (tests & fixes), 9 * - Andrew D. Balsa (code cleanup). 10 */ 11 #include <linux/init.h> 12 #include <linux/utsname.h> 13 #include <linux/cpu.h> 14 #include <linux/module.h> 15 #include <linux/nospec.h> 16 #include <linux/prctl.h> 17 18 #include <asm/spec-ctrl.h> 19 #include <asm/cmdline.h> 20 #include <asm/bugs.h> 21 #include <asm/processor.h> 22 #include <asm/processor-flags.h> 23 #include <asm/fpu/internal.h> 24 #include <asm/msr.h> 25 #include <asm/paravirt.h> 26 #include <asm/alternative.h> 27 #include <asm/pgtable.h> 28 #include <asm/set_memory.h> 29 #include <asm/intel-family.h> 30 31 static void __init spectre_v2_select_mitigation(void); 32 static void __init ssb_select_mitigation(void); 33 34 /* 35 * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any 36 * writes to SPEC_CTRL contain whatever reserved bits have been set. 37 */ 38 u64 __ro_after_init x86_spec_ctrl_base; 39 40 /* 41 * The vendor and possibly platform specific bits which can be modified in 42 * x86_spec_ctrl_base. 43 */ 44 static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS; 45 46 /* 47 * AMD specific MSR info for Speculative Store Bypass control. 48 * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu(). 49 */ 50 u64 __ro_after_init x86_amd_ls_cfg_base; 51 u64 __ro_after_init x86_amd_ls_cfg_rds_mask; 52 53 void __init check_bugs(void) 54 { 55 identify_boot_cpu(); 56 57 if (!IS_ENABLED(CONFIG_SMP)) { 58 pr_info("CPU: "); 59 print_cpu_info(&boot_cpu_data); 60 } 61 62 /* 63 * Read the SPEC_CTRL MSR to account for reserved bits which may 64 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD 65 * init code as it is not enumerated and depends on the family. 66 */ 67 if (boot_cpu_has(X86_FEATURE_IBRS)) 68 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 69 70 /* Select the proper spectre mitigation before patching alternatives */ 71 spectre_v2_select_mitigation(); 72 73 /* 74 * Select proper mitigation for any exposure to the Speculative Store 75 * Bypass vulnerability. 76 */ 77 ssb_select_mitigation(); 78 79 #ifdef CONFIG_X86_32 80 /* 81 * Check whether we are able to run this kernel safely on SMP. 82 * 83 * - i386 is no longer supported. 84 * - In order to run on anything without a TSC, we need to be 85 * compiled for a i486. 86 */ 87 if (boot_cpu_data.x86 < 4) 88 panic("Kernel requires i486+ for 'invlpg' and other features"); 89 90 init_utsname()->machine[1] = 91 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 92 alternative_instructions(); 93 94 fpu__init_check_bugs(); 95 #else /* CONFIG_X86_64 */ 96 alternative_instructions(); 97 98 /* 99 * Make sure the first 2MB area is not mapped by huge pages 100 * There are typically fixed size MTRRs in there and overlapping 101 * MTRRs into large pages causes slow downs. 102 * 103 * Right now we don't do that with gbpages because there seems 104 * very little benefit for that case. 105 */ 106 if (!direct_gbpages) 107 set_memory_4k((unsigned long)__va(0), 1); 108 #endif 109 } 110 111 /* The kernel command line selection */ 112 enum spectre_v2_mitigation_cmd { 113 SPECTRE_V2_CMD_NONE, 114 SPECTRE_V2_CMD_AUTO, 115 SPECTRE_V2_CMD_FORCE, 116 SPECTRE_V2_CMD_RETPOLINE, 117 SPECTRE_V2_CMD_RETPOLINE_GENERIC, 118 SPECTRE_V2_CMD_RETPOLINE_AMD, 119 }; 120 121 static const char *spectre_v2_strings[] = { 122 [SPECTRE_V2_NONE] = "Vulnerable", 123 [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline", 124 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline", 125 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", 126 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", 127 }; 128 129 #undef pr_fmt 130 #define pr_fmt(fmt) "Spectre V2 : " fmt 131 132 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = 133 SPECTRE_V2_NONE; 134 135 void x86_spec_ctrl_set(u64 val) 136 { 137 if (val & x86_spec_ctrl_mask) 138 WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val); 139 else 140 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val); 141 } 142 EXPORT_SYMBOL_GPL(x86_spec_ctrl_set); 143 144 u64 x86_spec_ctrl_get_default(void) 145 { 146 u64 msrval = x86_spec_ctrl_base; 147 148 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 149 msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags); 150 return msrval; 151 } 152 EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); 153 154 void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl) 155 { 156 u64 host = x86_spec_ctrl_base; 157 158 if (!boot_cpu_has(X86_FEATURE_IBRS)) 159 return; 160 161 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 162 host |= rds_tif_to_spec_ctrl(current_thread_info()->flags); 163 164 if (host != guest_spec_ctrl) 165 wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl); 166 } 167 EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest); 168 169 void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl) 170 { 171 u64 host = x86_spec_ctrl_base; 172 173 if (!boot_cpu_has(X86_FEATURE_IBRS)) 174 return; 175 176 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 177 host |= rds_tif_to_spec_ctrl(current_thread_info()->flags); 178 179 if (host != guest_spec_ctrl) 180 wrmsrl(MSR_IA32_SPEC_CTRL, host); 181 } 182 EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host); 183 184 static void x86_amd_rds_enable(void) 185 { 186 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask; 187 188 if (boot_cpu_has(X86_FEATURE_AMD_RDS)) 189 wrmsrl(MSR_AMD64_LS_CFG, msrval); 190 } 191 192 #ifdef RETPOLINE 193 static bool spectre_v2_bad_module; 194 195 bool retpoline_module_ok(bool has_retpoline) 196 { 197 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) 198 return true; 199 200 pr_err("System may be vulnerable to spectre v2\n"); 201 spectre_v2_bad_module = true; 202 return false; 203 } 204 205 static inline const char *spectre_v2_module_string(void) 206 { 207 return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; 208 } 209 #else 210 static inline const char *spectre_v2_module_string(void) { return ""; } 211 #endif 212 213 static void __init spec2_print_if_insecure(const char *reason) 214 { 215 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 216 pr_info("%s selected on command line.\n", reason); 217 } 218 219 static void __init spec2_print_if_secure(const char *reason) 220 { 221 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 222 pr_info("%s selected on command line.\n", reason); 223 } 224 225 static inline bool retp_compiler(void) 226 { 227 return __is_defined(RETPOLINE); 228 } 229 230 static inline bool match_option(const char *arg, int arglen, const char *opt) 231 { 232 int len = strlen(opt); 233 234 return len == arglen && !strncmp(arg, opt, len); 235 } 236 237 static const struct { 238 const char *option; 239 enum spectre_v2_mitigation_cmd cmd; 240 bool secure; 241 } mitigation_options[] = { 242 { "off", SPECTRE_V2_CMD_NONE, false }, 243 { "on", SPECTRE_V2_CMD_FORCE, true }, 244 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, 245 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, 246 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, 247 { "auto", SPECTRE_V2_CMD_AUTO, false }, 248 }; 249 250 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) 251 { 252 char arg[20]; 253 int ret, i; 254 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; 255 256 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) 257 return SPECTRE_V2_CMD_NONE; 258 else { 259 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); 260 if (ret < 0) 261 return SPECTRE_V2_CMD_AUTO; 262 263 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { 264 if (!match_option(arg, ret, mitigation_options[i].option)) 265 continue; 266 cmd = mitigation_options[i].cmd; 267 break; 268 } 269 270 if (i >= ARRAY_SIZE(mitigation_options)) { 271 pr_err("unknown option (%s). Switching to AUTO select\n", arg); 272 return SPECTRE_V2_CMD_AUTO; 273 } 274 } 275 276 if ((cmd == SPECTRE_V2_CMD_RETPOLINE || 277 cmd == SPECTRE_V2_CMD_RETPOLINE_AMD || 278 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) && 279 !IS_ENABLED(CONFIG_RETPOLINE)) { 280 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option); 281 return SPECTRE_V2_CMD_AUTO; 282 } 283 284 if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD && 285 boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { 286 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n"); 287 return SPECTRE_V2_CMD_AUTO; 288 } 289 290 if (mitigation_options[i].secure) 291 spec2_print_if_secure(mitigation_options[i].option); 292 else 293 spec2_print_if_insecure(mitigation_options[i].option); 294 295 return cmd; 296 } 297 298 /* Check for Skylake-like CPUs (for RSB handling) */ 299 static bool __init is_skylake_era(void) 300 { 301 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 302 boot_cpu_data.x86 == 6) { 303 switch (boot_cpu_data.x86_model) { 304 case INTEL_FAM6_SKYLAKE_MOBILE: 305 case INTEL_FAM6_SKYLAKE_DESKTOP: 306 case INTEL_FAM6_SKYLAKE_X: 307 case INTEL_FAM6_KABYLAKE_MOBILE: 308 case INTEL_FAM6_KABYLAKE_DESKTOP: 309 return true; 310 } 311 } 312 return false; 313 } 314 315 static void __init spectre_v2_select_mitigation(void) 316 { 317 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); 318 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE; 319 320 /* 321 * If the CPU is not affected and the command line mode is NONE or AUTO 322 * then nothing to do. 323 */ 324 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && 325 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO)) 326 return; 327 328 switch (cmd) { 329 case SPECTRE_V2_CMD_NONE: 330 return; 331 332 case SPECTRE_V2_CMD_FORCE: 333 case SPECTRE_V2_CMD_AUTO: 334 if (IS_ENABLED(CONFIG_RETPOLINE)) 335 goto retpoline_auto; 336 break; 337 case SPECTRE_V2_CMD_RETPOLINE_AMD: 338 if (IS_ENABLED(CONFIG_RETPOLINE)) 339 goto retpoline_amd; 340 break; 341 case SPECTRE_V2_CMD_RETPOLINE_GENERIC: 342 if (IS_ENABLED(CONFIG_RETPOLINE)) 343 goto retpoline_generic; 344 break; 345 case SPECTRE_V2_CMD_RETPOLINE: 346 if (IS_ENABLED(CONFIG_RETPOLINE)) 347 goto retpoline_auto; 348 break; 349 } 350 pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!"); 351 return; 352 353 retpoline_auto: 354 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { 355 retpoline_amd: 356 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { 357 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); 358 goto retpoline_generic; 359 } 360 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : 361 SPECTRE_V2_RETPOLINE_MINIMAL_AMD; 362 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); 363 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 364 } else { 365 retpoline_generic: 366 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC : 367 SPECTRE_V2_RETPOLINE_MINIMAL; 368 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 369 } 370 371 spectre_v2_enabled = mode; 372 pr_info("%s\n", spectre_v2_strings[mode]); 373 374 /* 375 * If neither SMEP nor PTI are available, there is a risk of 376 * hitting userspace addresses in the RSB after a context switch 377 * from a shallow call stack to a deeper one. To prevent this fill 378 * the entire RSB, even when using IBRS. 379 * 380 * Skylake era CPUs have a separate issue with *underflow* of the 381 * RSB, when they will predict 'ret' targets from the generic BTB. 382 * The proper mitigation for this is IBRS. If IBRS is not supported 383 * or deactivated in favour of retpolines the RSB fill on context 384 * switch is required. 385 */ 386 if ((!boot_cpu_has(X86_FEATURE_PTI) && 387 !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) { 388 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); 389 pr_info("Spectre v2 mitigation: Filling RSB on context switch\n"); 390 } 391 392 /* Initialize Indirect Branch Prediction Barrier if supported */ 393 if (boot_cpu_has(X86_FEATURE_IBPB)) { 394 setup_force_cpu_cap(X86_FEATURE_USE_IBPB); 395 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); 396 } 397 398 /* 399 * Retpoline means the kernel is safe because it has no indirect 400 * branches. But firmware isn't, so use IBRS to protect that. 401 */ 402 if (boot_cpu_has(X86_FEATURE_IBRS)) { 403 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); 404 pr_info("Enabling Restricted Speculation for firmware calls\n"); 405 } 406 } 407 408 #undef pr_fmt 409 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt 410 411 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE; 412 413 /* The kernel command line selection */ 414 enum ssb_mitigation_cmd { 415 SPEC_STORE_BYPASS_CMD_NONE, 416 SPEC_STORE_BYPASS_CMD_AUTO, 417 SPEC_STORE_BYPASS_CMD_ON, 418 SPEC_STORE_BYPASS_CMD_PRCTL, 419 }; 420 421 static const char *ssb_strings[] = { 422 [SPEC_STORE_BYPASS_NONE] = "Vulnerable", 423 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", 424 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl" 425 }; 426 427 static const struct { 428 const char *option; 429 enum ssb_mitigation_cmd cmd; 430 } ssb_mitigation_options[] = { 431 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ 432 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ 433 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ 434 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ 435 }; 436 437 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) 438 { 439 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO; 440 char arg[20]; 441 int ret, i; 442 443 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) { 444 return SPEC_STORE_BYPASS_CMD_NONE; 445 } else { 446 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", 447 arg, sizeof(arg)); 448 if (ret < 0) 449 return SPEC_STORE_BYPASS_CMD_AUTO; 450 451 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { 452 if (!match_option(arg, ret, ssb_mitigation_options[i].option)) 453 continue; 454 455 cmd = ssb_mitigation_options[i].cmd; 456 break; 457 } 458 459 if (i >= ARRAY_SIZE(ssb_mitigation_options)) { 460 pr_err("unknown option (%s). Switching to AUTO select\n", arg); 461 return SPEC_STORE_BYPASS_CMD_AUTO; 462 } 463 } 464 465 return cmd; 466 } 467 468 static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void) 469 { 470 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; 471 enum ssb_mitigation_cmd cmd; 472 473 if (!boot_cpu_has(X86_FEATURE_RDS)) 474 return mode; 475 476 cmd = ssb_parse_cmdline(); 477 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && 478 (cmd == SPEC_STORE_BYPASS_CMD_NONE || 479 cmd == SPEC_STORE_BYPASS_CMD_AUTO)) 480 return mode; 481 482 switch (cmd) { 483 case SPEC_STORE_BYPASS_CMD_AUTO: 484 /* Choose prctl as the default mode */ 485 mode = SPEC_STORE_BYPASS_PRCTL; 486 break; 487 case SPEC_STORE_BYPASS_CMD_ON: 488 mode = SPEC_STORE_BYPASS_DISABLE; 489 break; 490 case SPEC_STORE_BYPASS_CMD_PRCTL: 491 mode = SPEC_STORE_BYPASS_PRCTL; 492 break; 493 case SPEC_STORE_BYPASS_CMD_NONE: 494 break; 495 } 496 497 /* 498 * We have three CPU feature flags that are in play here: 499 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. 500 * - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass 501 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation 502 */ 503 if (mode == SPEC_STORE_BYPASS_DISABLE) { 504 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); 505 /* 506 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses 507 * a completely different MSR and bit dependent on family. 508 */ 509 switch (boot_cpu_data.x86_vendor) { 510 case X86_VENDOR_INTEL: 511 x86_spec_ctrl_base |= SPEC_CTRL_RDS; 512 x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS; 513 x86_spec_ctrl_set(SPEC_CTRL_RDS); 514 break; 515 case X86_VENDOR_AMD: 516 x86_amd_rds_enable(); 517 break; 518 } 519 } 520 521 return mode; 522 } 523 524 static void ssb_select_mitigation() 525 { 526 ssb_mode = __ssb_select_mitigation(); 527 528 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 529 pr_info("%s\n", ssb_strings[ssb_mode]); 530 } 531 532 #undef pr_fmt 533 534 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) 535 { 536 bool update; 537 538 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL) 539 return -ENXIO; 540 541 switch (ctrl) { 542 case PR_SPEC_ENABLE: 543 /* If speculation is force disabled, enable is not allowed */ 544 if (task_spec_ssb_force_disable(task)) 545 return -EPERM; 546 task_clear_spec_ssb_disable(task); 547 update = test_and_clear_tsk_thread_flag(task, TIF_RDS); 548 break; 549 case PR_SPEC_DISABLE: 550 task_set_spec_ssb_disable(task); 551 update = !test_and_set_tsk_thread_flag(task, TIF_RDS); 552 break; 553 case PR_SPEC_FORCE_DISABLE: 554 task_set_spec_ssb_disable(task); 555 task_set_spec_ssb_force_disable(task); 556 update = !test_and_set_tsk_thread_flag(task, TIF_RDS); 557 break; 558 default: 559 return -ERANGE; 560 } 561 562 /* 563 * If being set on non-current task, delay setting the CPU 564 * mitigation until it is next scheduled. 565 */ 566 if (task == current && update) 567 speculative_store_bypass_update(); 568 569 return 0; 570 } 571 572 static int ssb_prctl_get(struct task_struct *task) 573 { 574 switch (ssb_mode) { 575 case SPEC_STORE_BYPASS_DISABLE: 576 return PR_SPEC_DISABLE; 577 case SPEC_STORE_BYPASS_PRCTL: 578 if (task_spec_ssb_force_disable(task)) 579 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 580 if (task_spec_ssb_disable(task)) 581 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 582 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 583 default: 584 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 585 return PR_SPEC_ENABLE; 586 return PR_SPEC_NOT_AFFECTED; 587 } 588 } 589 590 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, 591 unsigned long ctrl) 592 { 593 switch (which) { 594 case PR_SPEC_STORE_BYPASS: 595 return ssb_prctl_set(task, ctrl); 596 default: 597 return -ENODEV; 598 } 599 } 600 601 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 602 { 603 switch (which) { 604 case PR_SPEC_STORE_BYPASS: 605 return ssb_prctl_get(task); 606 default: 607 return -ENODEV; 608 } 609 } 610 611 void x86_spec_ctrl_setup_ap(void) 612 { 613 if (boot_cpu_has(X86_FEATURE_IBRS)) 614 x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask); 615 616 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) 617 x86_amd_rds_enable(); 618 } 619 620 #ifdef CONFIG_SYSFS 621 622 ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, 623 char *buf, unsigned int bug) 624 { 625 if (!boot_cpu_has_bug(bug)) 626 return sprintf(buf, "Not affected\n"); 627 628 switch (bug) { 629 case X86_BUG_CPU_MELTDOWN: 630 if (boot_cpu_has(X86_FEATURE_PTI)) 631 return sprintf(buf, "Mitigation: PTI\n"); 632 633 break; 634 635 case X86_BUG_SPECTRE_V1: 636 return sprintf(buf, "Mitigation: __user pointer sanitization\n"); 637 638 case X86_BUG_SPECTRE_V2: 639 return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], 640 boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", 641 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", 642 spectre_v2_module_string()); 643 644 case X86_BUG_SPEC_STORE_BYPASS: 645 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); 646 647 default: 648 break; 649 } 650 651 return sprintf(buf, "Vulnerable\n"); 652 } 653 654 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 655 { 656 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); 657 } 658 659 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 660 { 661 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); 662 } 663 664 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 665 { 666 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); 667 } 668 669 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 670 { 671 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); 672 } 673 #endif 674