1 // SPDX-License-Identifier: GPL-2.0-only 2 /* cpu_feature_enabled() cannot be used this early */ 3 #define USE_EARLY_PGTABLE_L5 4 5 #include <linux/memblock.h> 6 #include <linux/linkage.h> 7 #include <linux/bitops.h> 8 #include <linux/kernel.h> 9 #include <linux/export.h> 10 #include <linux/percpu.h> 11 #include <linux/string.h> 12 #include <linux/ctype.h> 13 #include <linux/delay.h> 14 #include <linux/sched/mm.h> 15 #include <linux/sched/clock.h> 16 #include <linux/sched/task.h> 17 #include <linux/sched/smt.h> 18 #include <linux/init.h> 19 #include <linux/kprobes.h> 20 #include <linux/kgdb.h> 21 #include <linux/mem_encrypt.h> 22 #include <linux/smp.h> 23 #include <linux/cpu.h> 24 #include <linux/io.h> 25 #include <linux/syscore_ops.h> 26 #include <linux/pgtable.h> 27 #include <linux/stackprotector.h> 28 #include <linux/utsname.h> 29 30 #include <asm/alternative.h> 31 #include <asm/cmdline.h> 32 #include <asm/perf_event.h> 33 #include <asm/mmu_context.h> 34 #include <asm/doublefault.h> 35 #include <asm/archrandom.h> 36 #include <asm/hypervisor.h> 37 #include <asm/processor.h> 38 #include <asm/tlbflush.h> 39 #include <asm/debugreg.h> 40 #include <asm/sections.h> 41 #include <asm/vsyscall.h> 42 #include <linux/topology.h> 43 #include <linux/cpumask.h> 44 #include <linux/atomic.h> 45 #include <asm/proto.h> 46 #include <asm/setup.h> 47 #include <asm/apic.h> 48 #include <asm/desc.h> 49 #include <asm/fpu/api.h> 50 #include <asm/mtrr.h> 51 #include <asm/hwcap2.h> 52 #include <linux/numa.h> 53 #include <asm/numa.h> 54 #include <asm/asm.h> 55 #include <asm/bugs.h> 56 #include <asm/cpu.h> 57 #include <asm/mce.h> 58 #include <asm/msr.h> 59 #include <asm/cacheinfo.h> 60 #include <asm/memtype.h> 61 #include <asm/microcode.h> 62 #include <asm/intel-family.h> 63 #include <asm/cpu_device_id.h> 64 #include <asm/fred.h> 65 #include <asm/uv/uv.h> 66 #include <asm/ia32.h> 67 #include <asm/set_memory.h> 68 #include <asm/traps.h> 69 #include <asm/sev.h> 70 #include <asm/tdx.h> 71 #include <asm/posted_intr.h> 72 #include <asm/runtime-const.h> 73 74 #include "cpu.h" 75 76 DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); 77 EXPORT_PER_CPU_SYMBOL(cpu_info); 78 79 u32 elf_hwcap2 __read_mostly; 80 81 /* Number of siblings per CPU package */ 82 unsigned int __max_threads_per_core __ro_after_init = 1; 83 EXPORT_SYMBOL(__max_threads_per_core); 84 85 unsigned int __max_dies_per_package __ro_after_init = 1; 86 EXPORT_SYMBOL(__max_dies_per_package); 87 88 unsigned int __max_logical_packages __ro_after_init = 1; 89 EXPORT_SYMBOL(__max_logical_packages); 90 91 unsigned int __num_cores_per_package __ro_after_init = 1; 92 EXPORT_SYMBOL(__num_cores_per_package); 93 94 unsigned int __num_threads_per_package __ro_after_init = 1; 95 EXPORT_SYMBOL(__num_threads_per_package); 96 97 static struct ppin_info { 98 int feature; 99 int msr_ppin_ctl; 100 int msr_ppin; 101 } ppin_info[] = { 102 [X86_VENDOR_INTEL] = { 103 .feature = X86_FEATURE_INTEL_PPIN, 104 .msr_ppin_ctl = MSR_PPIN_CTL, 105 .msr_ppin = MSR_PPIN 106 }, 107 [X86_VENDOR_AMD] = { 108 .feature = X86_FEATURE_AMD_PPIN, 109 .msr_ppin_ctl = MSR_AMD_PPIN_CTL, 110 .msr_ppin = MSR_AMD_PPIN 111 }, 112 }; 113 114 static const struct x86_cpu_id ppin_cpuids[] = { 115 X86_MATCH_FEATURE(X86_FEATURE_AMD_PPIN, &ppin_info[X86_VENDOR_AMD]), 116 X86_MATCH_FEATURE(X86_FEATURE_INTEL_PPIN, &ppin_info[X86_VENDOR_INTEL]), 117 118 /* Legacy models without CPUID enumeration */ 119 X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &ppin_info[X86_VENDOR_INTEL]), 120 X86_MATCH_VFM(INTEL_HASWELL_X, &ppin_info[X86_VENDOR_INTEL]), 121 X86_MATCH_VFM(INTEL_BROADWELL_D, &ppin_info[X86_VENDOR_INTEL]), 122 X86_MATCH_VFM(INTEL_BROADWELL_X, &ppin_info[X86_VENDOR_INTEL]), 123 X86_MATCH_VFM(INTEL_SKYLAKE_X, &ppin_info[X86_VENDOR_INTEL]), 124 X86_MATCH_VFM(INTEL_ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]), 125 X86_MATCH_VFM(INTEL_ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]), 126 X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]), 127 X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &ppin_info[X86_VENDOR_INTEL]), 128 X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]), 129 X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]), 130 131 {} 132 }; 133 134 static void ppin_init(struct cpuinfo_x86 *c) 135 { 136 const struct x86_cpu_id *id; 137 unsigned long long val; 138 struct ppin_info *info; 139 140 id = x86_match_cpu(ppin_cpuids); 141 if (!id) 142 return; 143 144 /* 145 * Testing the presence of the MSR is not enough. Need to check 146 * that the PPIN_CTL allows reading of the PPIN. 147 */ 148 info = (struct ppin_info *)id->driver_data; 149 150 if (rdmsrl_safe(info->msr_ppin_ctl, &val)) 151 goto clear_ppin; 152 153 if ((val & 3UL) == 1UL) { 154 /* PPIN locked in disabled mode */ 155 goto clear_ppin; 156 } 157 158 /* If PPIN is disabled, try to enable */ 159 if (!(val & 2UL)) { 160 wrmsrl_safe(info->msr_ppin_ctl, val | 2UL); 161 rdmsrl_safe(info->msr_ppin_ctl, &val); 162 } 163 164 /* Is the enable bit set? */ 165 if (val & 2UL) { 166 c->ppin = __rdmsr(info->msr_ppin); 167 set_cpu_cap(c, info->feature); 168 return; 169 } 170 171 clear_ppin: 172 setup_clear_cpu_cap(info->feature); 173 } 174 175 static void default_init(struct cpuinfo_x86 *c) 176 { 177 #ifdef CONFIG_X86_64 178 cpu_detect_cache_sizes(c); 179 #else 180 /* Not much we can do here... */ 181 /* Check if at least it has cpuid */ 182 if (c->cpuid_level == -1) { 183 /* No cpuid. It must be an ancient CPU */ 184 if (c->x86 == 4) 185 strcpy(c->x86_model_id, "486"); 186 else if (c->x86 == 3) 187 strcpy(c->x86_model_id, "386"); 188 } 189 #endif 190 } 191 192 static const struct cpu_dev default_cpu = { 193 .c_init = default_init, 194 .c_vendor = "Unknown", 195 .c_x86_vendor = X86_VENDOR_UNKNOWN, 196 }; 197 198 static const struct cpu_dev *this_cpu = &default_cpu; 199 200 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 201 #ifdef CONFIG_X86_64 202 /* 203 * We need valid kernel segments for data and code in long mode too 204 * IRET will check the segment types kkeil 2000/10/28 205 * Also sysret mandates a special GDT layout 206 * 207 * TLS descriptors are currently at a different place compared to i386. 208 * Hopefully nobody expects them at a fixed place (Wine?) 209 */ 210 [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff), 211 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(DESC_CODE64, 0, 0xfffff), 212 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(DESC_DATA64, 0, 0xfffff), 213 [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(DESC_CODE32 | DESC_USER, 0, 0xfffff), 214 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(DESC_DATA64 | DESC_USER, 0, 0xfffff), 215 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(DESC_CODE64 | DESC_USER, 0, 0xfffff), 216 #else 217 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff), 218 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff), 219 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(DESC_CODE32 | DESC_USER, 0, 0xfffff), 220 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(DESC_DATA32 | DESC_USER, 0, 0xfffff), 221 /* 222 * Segments used for calling PnP BIOS have byte granularity. 223 * They code segments and data segments have fixed 64k limits, 224 * the transfer segment sizes are set at run time. 225 */ 226 [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(DESC_CODE32_BIOS, 0, 0xffff), 227 [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(DESC_CODE16, 0, 0xffff), 228 [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(DESC_DATA16, 0, 0xffff), 229 [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(DESC_DATA16, 0, 0), 230 [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(DESC_DATA16, 0, 0), 231 /* 232 * The APM segments have byte granularity and their bases 233 * are set at run time. All have 64k limits. 234 */ 235 [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(DESC_CODE32_BIOS, 0, 0xffff), 236 [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(DESC_CODE16, 0, 0xffff), 237 [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(DESC_DATA32_BIOS, 0, 0xffff), 238 239 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff), 240 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff), 241 #endif 242 } }; 243 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 244 245 #ifdef CONFIG_X86_64 246 static int __init x86_nopcid_setup(char *s) 247 { 248 /* nopcid doesn't accept parameters */ 249 if (s) 250 return -EINVAL; 251 252 /* do not emit a message if the feature is not present */ 253 if (!boot_cpu_has(X86_FEATURE_PCID)) 254 return 0; 255 256 setup_clear_cpu_cap(X86_FEATURE_PCID); 257 pr_info("nopcid: PCID feature disabled\n"); 258 return 0; 259 } 260 early_param("nopcid", x86_nopcid_setup); 261 #endif 262 263 static int __init x86_noinvpcid_setup(char *s) 264 { 265 /* noinvpcid doesn't accept parameters */ 266 if (s) 267 return -EINVAL; 268 269 /* do not emit a message if the feature is not present */ 270 if (!boot_cpu_has(X86_FEATURE_INVPCID)) 271 return 0; 272 273 setup_clear_cpu_cap(X86_FEATURE_INVPCID); 274 pr_info("noinvpcid: INVPCID feature disabled\n"); 275 return 0; 276 } 277 early_param("noinvpcid", x86_noinvpcid_setup); 278 279 /* Standard macro to see if a specific flag is changeable */ 280 static inline bool flag_is_changeable_p(unsigned long flag) 281 { 282 unsigned long f1, f2; 283 284 if (!IS_ENABLED(CONFIG_X86_32)) 285 return true; 286 287 /* 288 * Cyrix and IDT cpus allow disabling of CPUID 289 * so the code below may return different results 290 * when it is executed before and after enabling 291 * the CPUID. Add "volatile" to not allow gcc to 292 * optimize the subsequent calls to this function. 293 */ 294 asm volatile ("pushfl \n\t" 295 "pushfl \n\t" 296 "popl %0 \n\t" 297 "movl %0, %1 \n\t" 298 "xorl %2, %0 \n\t" 299 "pushl %0 \n\t" 300 "popfl \n\t" 301 "pushfl \n\t" 302 "popl %0 \n\t" 303 "popfl \n\t" 304 305 : "=&r" (f1), "=&r" (f2) 306 : "ir" (flag)); 307 308 return (f1 ^ f2) & flag; 309 } 310 311 #ifdef CONFIG_X86_32 312 static int cachesize_override = -1; 313 static int disable_x86_serial_nr = 1; 314 315 static int __init cachesize_setup(char *str) 316 { 317 get_option(&str, &cachesize_override); 318 return 1; 319 } 320 __setup("cachesize=", cachesize_setup); 321 322 /* Probe for the CPUID instruction */ 323 bool have_cpuid_p(void) 324 { 325 return flag_is_changeable_p(X86_EFLAGS_ID); 326 } 327 328 static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 329 { 330 unsigned long lo, hi; 331 332 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) 333 return; 334 335 /* Disable processor serial number: */ 336 337 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 338 lo |= 0x200000; 339 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 340 341 pr_notice("CPU serial number disabled.\n"); 342 clear_cpu_cap(c, X86_FEATURE_PN); 343 344 /* Disabling the serial number may affect the cpuid level */ 345 c->cpuid_level = cpuid_eax(0); 346 } 347 348 static int __init x86_serial_nr_setup(char *s) 349 { 350 disable_x86_serial_nr = 0; 351 return 1; 352 } 353 __setup("serialnumber", x86_serial_nr_setup); 354 #else 355 static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 356 { 357 } 358 #endif 359 360 static __always_inline void setup_smep(struct cpuinfo_x86 *c) 361 { 362 if (cpu_has(c, X86_FEATURE_SMEP)) 363 cr4_set_bits(X86_CR4_SMEP); 364 } 365 366 static __always_inline void setup_smap(struct cpuinfo_x86 *c) 367 { 368 unsigned long eflags = native_save_fl(); 369 370 /* This should have been cleared long ago */ 371 BUG_ON(eflags & X86_EFLAGS_AC); 372 373 if (cpu_has(c, X86_FEATURE_SMAP)) 374 cr4_set_bits(X86_CR4_SMAP); 375 } 376 377 static __always_inline void setup_umip(struct cpuinfo_x86 *c) 378 { 379 /* Check the boot processor, plus build option for UMIP. */ 380 if (!cpu_feature_enabled(X86_FEATURE_UMIP)) 381 goto out; 382 383 /* Check the current processor's cpuid bits. */ 384 if (!cpu_has(c, X86_FEATURE_UMIP)) 385 goto out; 386 387 cr4_set_bits(X86_CR4_UMIP); 388 389 pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n"); 390 391 return; 392 393 out: 394 /* 395 * Make sure UMIP is disabled in case it was enabled in a 396 * previous boot (e.g., via kexec). 397 */ 398 cr4_clear_bits(X86_CR4_UMIP); 399 } 400 401 /* These bits should not change their value after CPU init is finished. */ 402 static const unsigned long cr4_pinned_mask = X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | 403 X86_CR4_FSGSBASE | X86_CR4_CET | X86_CR4_FRED; 404 static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning); 405 static unsigned long cr4_pinned_bits __ro_after_init; 406 407 void native_write_cr0(unsigned long val) 408 { 409 unsigned long bits_missing = 0; 410 411 set_register: 412 asm volatile("mov %0,%%cr0": "+r" (val) : : "memory"); 413 414 if (static_branch_likely(&cr_pinning)) { 415 if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) { 416 bits_missing = X86_CR0_WP; 417 val |= bits_missing; 418 goto set_register; 419 } 420 /* Warn after we've set the missing bits. */ 421 WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n"); 422 } 423 } 424 EXPORT_SYMBOL(native_write_cr0); 425 426 void __no_profile native_write_cr4(unsigned long val) 427 { 428 unsigned long bits_changed = 0; 429 430 set_register: 431 asm volatile("mov %0,%%cr4": "+r" (val) : : "memory"); 432 433 if (static_branch_likely(&cr_pinning)) { 434 if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) { 435 bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits; 436 val = (val & ~cr4_pinned_mask) | cr4_pinned_bits; 437 goto set_register; 438 } 439 /* Warn after we've corrected the changed bits. */ 440 WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n", 441 bits_changed); 442 } 443 } 444 #if IS_MODULE(CONFIG_LKDTM) 445 EXPORT_SYMBOL_GPL(native_write_cr4); 446 #endif 447 448 void cr4_update_irqsoff(unsigned long set, unsigned long clear) 449 { 450 unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4); 451 452 lockdep_assert_irqs_disabled(); 453 454 newval = (cr4 & ~clear) | set; 455 if (newval != cr4) { 456 this_cpu_write(cpu_tlbstate.cr4, newval); 457 __write_cr4(newval); 458 } 459 } 460 EXPORT_SYMBOL(cr4_update_irqsoff); 461 462 /* Read the CR4 shadow. */ 463 unsigned long cr4_read_shadow(void) 464 { 465 return this_cpu_read(cpu_tlbstate.cr4); 466 } 467 EXPORT_SYMBOL_GPL(cr4_read_shadow); 468 469 void cr4_init(void) 470 { 471 unsigned long cr4 = __read_cr4(); 472 473 if (boot_cpu_has(X86_FEATURE_PCID)) 474 cr4 |= X86_CR4_PCIDE; 475 if (static_branch_likely(&cr_pinning)) 476 cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits; 477 478 __write_cr4(cr4); 479 480 /* Initialize cr4 shadow for this CPU. */ 481 this_cpu_write(cpu_tlbstate.cr4, cr4); 482 } 483 484 /* 485 * Once CPU feature detection is finished (and boot params have been 486 * parsed), record any of the sensitive CR bits that are set, and 487 * enable CR pinning. 488 */ 489 static void __init setup_cr_pinning(void) 490 { 491 cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask; 492 static_key_enable(&cr_pinning.key); 493 } 494 495 static __init int x86_nofsgsbase_setup(char *arg) 496 { 497 /* Require an exact match without trailing characters. */ 498 if (strlen(arg)) 499 return 0; 500 501 /* Do not emit a message if the feature is not present. */ 502 if (!boot_cpu_has(X86_FEATURE_FSGSBASE)) 503 return 1; 504 505 setup_clear_cpu_cap(X86_FEATURE_FSGSBASE); 506 pr_info("FSGSBASE disabled via kernel command line\n"); 507 return 1; 508 } 509 __setup("nofsgsbase", x86_nofsgsbase_setup); 510 511 /* 512 * Protection Keys are not available in 32-bit mode. 513 */ 514 static bool pku_disabled; 515 516 static __always_inline void setup_pku(struct cpuinfo_x86 *c) 517 { 518 if (c == &boot_cpu_data) { 519 if (pku_disabled || !cpu_feature_enabled(X86_FEATURE_PKU)) 520 return; 521 /* 522 * Setting CR4.PKE will cause the X86_FEATURE_OSPKE cpuid 523 * bit to be set. Enforce it. 524 */ 525 setup_force_cpu_cap(X86_FEATURE_OSPKE); 526 527 } else if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) { 528 return; 529 } 530 531 cr4_set_bits(X86_CR4_PKE); 532 /* Load the default PKRU value */ 533 pkru_write_default(); 534 } 535 536 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 537 static __init int setup_disable_pku(char *arg) 538 { 539 /* 540 * Do not clear the X86_FEATURE_PKU bit. All of the 541 * runtime checks are against OSPKE so clearing the 542 * bit does nothing. 543 * 544 * This way, we will see "pku" in cpuinfo, but not 545 * "ospke", which is exactly what we want. It shows 546 * that the CPU has PKU, but the OS has not enabled it. 547 * This happens to be exactly how a system would look 548 * if we disabled the config option. 549 */ 550 pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n"); 551 pku_disabled = true; 552 return 1; 553 } 554 __setup("nopku", setup_disable_pku); 555 #endif 556 557 #ifdef CONFIG_X86_KERNEL_IBT 558 559 __noendbr u64 ibt_save(bool disable) 560 { 561 u64 msr = 0; 562 563 if (cpu_feature_enabled(X86_FEATURE_IBT)) { 564 rdmsrl(MSR_IA32_S_CET, msr); 565 if (disable) 566 wrmsrl(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN); 567 } 568 569 return msr; 570 } 571 572 __noendbr void ibt_restore(u64 save) 573 { 574 u64 msr; 575 576 if (cpu_feature_enabled(X86_FEATURE_IBT)) { 577 rdmsrl(MSR_IA32_S_CET, msr); 578 msr &= ~CET_ENDBR_EN; 579 msr |= (save & CET_ENDBR_EN); 580 wrmsrl(MSR_IA32_S_CET, msr); 581 } 582 } 583 584 #endif 585 586 static __always_inline void setup_cet(struct cpuinfo_x86 *c) 587 { 588 bool user_shstk, kernel_ibt; 589 590 if (!IS_ENABLED(CONFIG_X86_CET)) 591 return; 592 593 kernel_ibt = HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT); 594 user_shstk = cpu_feature_enabled(X86_FEATURE_SHSTK) && 595 IS_ENABLED(CONFIG_X86_USER_SHADOW_STACK); 596 597 if (!kernel_ibt && !user_shstk) 598 return; 599 600 if (user_shstk) 601 set_cpu_cap(c, X86_FEATURE_USER_SHSTK); 602 603 if (kernel_ibt) 604 wrmsrl(MSR_IA32_S_CET, CET_ENDBR_EN); 605 else 606 wrmsrl(MSR_IA32_S_CET, 0); 607 608 cr4_set_bits(X86_CR4_CET); 609 610 if (kernel_ibt && ibt_selftest()) { 611 pr_err("IBT selftest: Failed!\n"); 612 wrmsrl(MSR_IA32_S_CET, 0); 613 setup_clear_cpu_cap(X86_FEATURE_IBT); 614 } 615 } 616 617 __noendbr void cet_disable(void) 618 { 619 if (!(cpu_feature_enabled(X86_FEATURE_IBT) || 620 cpu_feature_enabled(X86_FEATURE_SHSTK))) 621 return; 622 623 wrmsrl(MSR_IA32_S_CET, 0); 624 wrmsrl(MSR_IA32_U_CET, 0); 625 } 626 627 /* 628 * Some CPU features depend on higher CPUID levels, which may not always 629 * be available due to CPUID level capping or broken virtualization 630 * software. Add those features to this table to auto-disable them. 631 */ 632 struct cpuid_dependent_feature { 633 u32 feature; 634 u32 level; 635 }; 636 637 static const struct cpuid_dependent_feature 638 cpuid_dependent_features[] = { 639 { X86_FEATURE_MWAIT, 0x00000005 }, 640 { X86_FEATURE_DCA, 0x00000009 }, 641 { X86_FEATURE_XSAVE, 0x0000000d }, 642 { 0, 0 } 643 }; 644 645 static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 646 { 647 const struct cpuid_dependent_feature *df; 648 649 for (df = cpuid_dependent_features; df->feature; df++) { 650 651 if (!cpu_has(c, df->feature)) 652 continue; 653 /* 654 * Note: cpuid_level is set to -1 if unavailable, but 655 * extended_extended_level is set to 0 if unavailable 656 * and the legitimate extended levels are all negative 657 * when signed; hence the weird messing around with 658 * signs here... 659 */ 660 if (!((s32)df->level < 0 ? 661 (u32)df->level > (u32)c->extended_cpuid_level : 662 (s32)df->level > (s32)c->cpuid_level)) 663 continue; 664 665 clear_cpu_cap(c, df->feature); 666 if (!warn) 667 continue; 668 669 pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", 670 x86_cap_flag(df->feature), df->level); 671 } 672 } 673 674 /* 675 * Naming convention should be: <Name> [(<Codename>)] 676 * This table only is used unless init_<vendor>() below doesn't set it; 677 * in particular, if CPUID levels 0x80000002..4 are supported, this 678 * isn't used 679 */ 680 681 /* Look up CPU names by table lookup. */ 682 static const char *table_lookup_model(struct cpuinfo_x86 *c) 683 { 684 #ifdef CONFIG_X86_32 685 const struct legacy_cpu_model_info *info; 686 687 if (c->x86_model >= 16) 688 return NULL; /* Range check */ 689 690 if (!this_cpu) 691 return NULL; 692 693 info = this_cpu->legacy_models; 694 695 while (info->family) { 696 if (info->family == c->x86) 697 return info->model_names[c->x86_model]; 698 info++; 699 } 700 #endif 701 return NULL; /* Not found */ 702 } 703 704 /* Aligned to unsigned long to avoid split lock in atomic bitmap ops */ 705 __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long)); 706 __u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long)); 707 708 #ifdef CONFIG_X86_32 709 /* The 32-bit entry code needs to find cpu_entry_area. */ 710 DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); 711 #endif 712 713 /* Load the original GDT from the per-cpu structure */ 714 void load_direct_gdt(int cpu) 715 { 716 struct desc_ptr gdt_descr; 717 718 gdt_descr.address = (long)get_cpu_gdt_rw(cpu); 719 gdt_descr.size = GDT_SIZE - 1; 720 load_gdt(&gdt_descr); 721 } 722 EXPORT_SYMBOL_GPL(load_direct_gdt); 723 724 /* Load a fixmap remapping of the per-cpu GDT */ 725 void load_fixmap_gdt(int cpu) 726 { 727 struct desc_ptr gdt_descr; 728 729 gdt_descr.address = (long)get_cpu_gdt_ro(cpu); 730 gdt_descr.size = GDT_SIZE - 1; 731 load_gdt(&gdt_descr); 732 } 733 EXPORT_SYMBOL_GPL(load_fixmap_gdt); 734 735 /** 736 * switch_gdt_and_percpu_base - Switch to direct GDT and runtime per CPU base 737 * @cpu: The CPU number for which this is invoked 738 * 739 * Invoked during early boot to switch from early GDT and early per CPU to 740 * the direct GDT and the runtime per CPU area. On 32-bit the percpu base 741 * switch is implicit by loading the direct GDT. On 64bit this requires 742 * to update GSBASE. 743 */ 744 void __init switch_gdt_and_percpu_base(int cpu) 745 { 746 load_direct_gdt(cpu); 747 748 #ifdef CONFIG_X86_64 749 /* 750 * No need to load %gs. It is already correct. 751 * 752 * Writing %gs on 64bit would zero GSBASE which would make any per 753 * CPU operation up to the point of the wrmsrl() fault. 754 * 755 * Set GSBASE to the new offset. Until the wrmsrl() happens the 756 * early mapping is still valid. That means the GSBASE update will 757 * lose any prior per CPU data which was not copied over in 758 * setup_per_cpu_areas(). 759 * 760 * This works even with stackprotector enabled because the 761 * per CPU stack canary is 0 in both per CPU areas. 762 */ 763 wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu)); 764 #else 765 /* 766 * %fs is already set to __KERNEL_PERCPU, but after switching GDT 767 * it is required to load FS again so that the 'hidden' part is 768 * updated from the new GDT. Up to this point the early per CPU 769 * translation is active. Any content of the early per CPU data 770 * which was not copied over in setup_per_cpu_areas() is lost. 771 */ 772 loadsegment(fs, __KERNEL_PERCPU); 773 #endif 774 } 775 776 static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 777 778 static void get_model_name(struct cpuinfo_x86 *c) 779 { 780 unsigned int *v; 781 char *p, *q, *s; 782 783 if (c->extended_cpuid_level < 0x80000004) 784 return; 785 786 v = (unsigned int *)c->x86_model_id; 787 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 788 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 789 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 790 c->x86_model_id[48] = 0; 791 792 /* Trim whitespace */ 793 p = q = s = &c->x86_model_id[0]; 794 795 while (*p == ' ') 796 p++; 797 798 while (*p) { 799 /* Note the last non-whitespace index */ 800 if (!isspace(*p)) 801 s = q; 802 803 *q++ = *p++; 804 } 805 806 *(s + 1) = '\0'; 807 } 808 809 void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) 810 { 811 unsigned int n, dummy, ebx, ecx, edx, l2size; 812 813 n = c->extended_cpuid_level; 814 815 if (n >= 0x80000005) { 816 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); 817 c->x86_cache_size = (ecx>>24) + (edx>>24); 818 #ifdef CONFIG_X86_64 819 /* On K8 L1 TLB is inclusive, so don't count it */ 820 c->x86_tlbsize = 0; 821 #endif 822 } 823 824 if (n < 0x80000006) /* Some chips just has a large L1. */ 825 return; 826 827 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); 828 l2size = ecx >> 16; 829 830 #ifdef CONFIG_X86_64 831 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); 832 #else 833 /* do processor-specific cache resizing */ 834 if (this_cpu->legacy_cache_size) 835 l2size = this_cpu->legacy_cache_size(c, l2size); 836 837 /* Allow user to override all this if necessary. */ 838 if (cachesize_override != -1) 839 l2size = cachesize_override; 840 841 if (l2size == 0) 842 return; /* Again, no L2 cache is possible */ 843 #endif 844 845 c->x86_cache_size = l2size; 846 } 847 848 u16 __read_mostly tlb_lli_4k[NR_INFO]; 849 u16 __read_mostly tlb_lli_2m[NR_INFO]; 850 u16 __read_mostly tlb_lli_4m[NR_INFO]; 851 u16 __read_mostly tlb_lld_4k[NR_INFO]; 852 u16 __read_mostly tlb_lld_2m[NR_INFO]; 853 u16 __read_mostly tlb_lld_4m[NR_INFO]; 854 u16 __read_mostly tlb_lld_1g[NR_INFO]; 855 856 static void cpu_detect_tlb(struct cpuinfo_x86 *c) 857 { 858 if (this_cpu->c_detect_tlb) 859 this_cpu->c_detect_tlb(c); 860 861 pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n", 862 tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], 863 tlb_lli_4m[ENTRIES]); 864 865 pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n", 866 tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES], 867 tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]); 868 } 869 870 static void get_cpu_vendor(struct cpuinfo_x86 *c) 871 { 872 char *v = c->x86_vendor_id; 873 int i; 874 875 for (i = 0; i < X86_VENDOR_NUM; i++) { 876 if (!cpu_devs[i]) 877 break; 878 879 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 880 (cpu_devs[i]->c_ident[1] && 881 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 882 883 this_cpu = cpu_devs[i]; 884 c->x86_vendor = this_cpu->c_x86_vendor; 885 return; 886 } 887 } 888 889 pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \ 890 "CPU: Your system may be unstable.\n", v); 891 892 c->x86_vendor = X86_VENDOR_UNKNOWN; 893 this_cpu = &default_cpu; 894 } 895 896 void cpu_detect(struct cpuinfo_x86 *c) 897 { 898 /* Get vendor name */ 899 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 900 (unsigned int *)&c->x86_vendor_id[0], 901 (unsigned int *)&c->x86_vendor_id[8], 902 (unsigned int *)&c->x86_vendor_id[4]); 903 904 c->x86 = 4; 905 /* Intel-defined flags: level 0x00000001 */ 906 if (c->cpuid_level >= 0x00000001) { 907 u32 junk, tfms, cap0, misc; 908 909 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 910 c->x86 = x86_family(tfms); 911 c->x86_model = x86_model(tfms); 912 c->x86_stepping = x86_stepping(tfms); 913 914 if (cap0 & (1<<19)) { 915 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 916 c->x86_cache_alignment = c->x86_clflush_size; 917 } 918 } 919 } 920 921 static void apply_forced_caps(struct cpuinfo_x86 *c) 922 { 923 int i; 924 925 for (i = 0; i < NCAPINTS + NBUGINTS; i++) { 926 c->x86_capability[i] &= ~cpu_caps_cleared[i]; 927 c->x86_capability[i] |= cpu_caps_set[i]; 928 } 929 } 930 931 static void init_speculation_control(struct cpuinfo_x86 *c) 932 { 933 /* 934 * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support, 935 * and they also have a different bit for STIBP support. Also, 936 * a hypervisor might have set the individual AMD bits even on 937 * Intel CPUs, for finer-grained selection of what's available. 938 */ 939 if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { 940 set_cpu_cap(c, X86_FEATURE_IBRS); 941 set_cpu_cap(c, X86_FEATURE_IBPB); 942 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 943 } 944 945 if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) 946 set_cpu_cap(c, X86_FEATURE_STIBP); 947 948 if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) || 949 cpu_has(c, X86_FEATURE_VIRT_SSBD)) 950 set_cpu_cap(c, X86_FEATURE_SSBD); 951 952 if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { 953 set_cpu_cap(c, X86_FEATURE_IBRS); 954 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 955 } 956 957 if (cpu_has(c, X86_FEATURE_AMD_IBPB)) 958 set_cpu_cap(c, X86_FEATURE_IBPB); 959 960 if (cpu_has(c, X86_FEATURE_AMD_STIBP)) { 961 set_cpu_cap(c, X86_FEATURE_STIBP); 962 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 963 } 964 965 if (cpu_has(c, X86_FEATURE_AMD_SSBD)) { 966 set_cpu_cap(c, X86_FEATURE_SSBD); 967 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 968 clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD); 969 } 970 } 971 972 void get_cpu_cap(struct cpuinfo_x86 *c) 973 { 974 u32 eax, ebx, ecx, edx; 975 976 /* Intel-defined flags: level 0x00000001 */ 977 if (c->cpuid_level >= 0x00000001) { 978 cpuid(0x00000001, &eax, &ebx, &ecx, &edx); 979 980 c->x86_capability[CPUID_1_ECX] = ecx; 981 c->x86_capability[CPUID_1_EDX] = edx; 982 } 983 984 /* Thermal and Power Management Leaf: level 0x00000006 (eax) */ 985 if (c->cpuid_level >= 0x00000006) 986 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); 987 988 /* Additional Intel-defined flags: level 0x00000007 */ 989 if (c->cpuid_level >= 0x00000007) { 990 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); 991 c->x86_capability[CPUID_7_0_EBX] = ebx; 992 c->x86_capability[CPUID_7_ECX] = ecx; 993 c->x86_capability[CPUID_7_EDX] = edx; 994 995 /* Check valid sub-leaf index before accessing it */ 996 if (eax >= 1) { 997 cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx); 998 c->x86_capability[CPUID_7_1_EAX] = eax; 999 } 1000 } 1001 1002 /* Extended state features: level 0x0000000d */ 1003 if (c->cpuid_level >= 0x0000000d) { 1004 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); 1005 1006 c->x86_capability[CPUID_D_1_EAX] = eax; 1007 } 1008 1009 /* AMD-defined flags: level 0x80000001 */ 1010 eax = cpuid_eax(0x80000000); 1011 c->extended_cpuid_level = eax; 1012 1013 if ((eax & 0xffff0000) == 0x80000000) { 1014 if (eax >= 0x80000001) { 1015 cpuid(0x80000001, &eax, &ebx, &ecx, &edx); 1016 1017 c->x86_capability[CPUID_8000_0001_ECX] = ecx; 1018 c->x86_capability[CPUID_8000_0001_EDX] = edx; 1019 } 1020 } 1021 1022 if (c->extended_cpuid_level >= 0x80000007) { 1023 cpuid(0x80000007, &eax, &ebx, &ecx, &edx); 1024 1025 c->x86_capability[CPUID_8000_0007_EBX] = ebx; 1026 c->x86_power = edx; 1027 } 1028 1029 if (c->extended_cpuid_level >= 0x80000008) { 1030 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); 1031 c->x86_capability[CPUID_8000_0008_EBX] = ebx; 1032 } 1033 1034 if (c->extended_cpuid_level >= 0x8000000a) 1035 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); 1036 1037 if (c->extended_cpuid_level >= 0x8000001f) 1038 c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f); 1039 1040 if (c->extended_cpuid_level >= 0x80000021) 1041 c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021); 1042 1043 init_scattered_cpuid_features(c); 1044 init_speculation_control(c); 1045 1046 /* 1047 * Clear/Set all flags overridden by options, after probe. 1048 * This needs to happen each time we re-probe, which may happen 1049 * several times during CPU initialization. 1050 */ 1051 apply_forced_caps(c); 1052 } 1053 1054 void get_cpu_address_sizes(struct cpuinfo_x86 *c) 1055 { 1056 u32 eax, ebx, ecx, edx; 1057 1058 if (!cpu_has(c, X86_FEATURE_CPUID) || 1059 (c->extended_cpuid_level < 0x80000008)) { 1060 if (IS_ENABLED(CONFIG_X86_64)) { 1061 c->x86_clflush_size = 64; 1062 c->x86_phys_bits = 36; 1063 c->x86_virt_bits = 48; 1064 } else { 1065 c->x86_clflush_size = 32; 1066 c->x86_virt_bits = 32; 1067 c->x86_phys_bits = 32; 1068 1069 if (cpu_has(c, X86_FEATURE_PAE) || 1070 cpu_has(c, X86_FEATURE_PSE36)) 1071 c->x86_phys_bits = 36; 1072 } 1073 } else { 1074 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); 1075 1076 c->x86_virt_bits = (eax >> 8) & 0xff; 1077 c->x86_phys_bits = eax & 0xff; 1078 1079 /* Provide a sane default if not enumerated: */ 1080 if (!c->x86_clflush_size) 1081 c->x86_clflush_size = 32; 1082 } 1083 1084 c->x86_cache_bits = c->x86_phys_bits; 1085 c->x86_cache_alignment = c->x86_clflush_size; 1086 } 1087 1088 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 1089 { 1090 int i; 1091 1092 /* 1093 * First of all, decide if this is a 486 or higher 1094 * It's a 486 if we can modify the AC flag 1095 */ 1096 if (flag_is_changeable_p(X86_EFLAGS_AC)) 1097 c->x86 = 4; 1098 else 1099 c->x86 = 3; 1100 1101 for (i = 0; i < X86_VENDOR_NUM; i++) 1102 if (cpu_devs[i] && cpu_devs[i]->c_identify) { 1103 c->x86_vendor_id[0] = 0; 1104 cpu_devs[i]->c_identify(c); 1105 if (c->x86_vendor_id[0]) { 1106 get_cpu_vendor(c); 1107 break; 1108 } 1109 } 1110 } 1111 1112 #define NO_SPECULATION BIT(0) 1113 #define NO_MELTDOWN BIT(1) 1114 #define NO_SSB BIT(2) 1115 #define NO_L1TF BIT(3) 1116 #define NO_MDS BIT(4) 1117 #define MSBDS_ONLY BIT(5) 1118 #define NO_SWAPGS BIT(6) 1119 #define NO_ITLB_MULTIHIT BIT(7) 1120 #define NO_SPECTRE_V2 BIT(8) 1121 #define NO_MMIO BIT(9) 1122 #define NO_EIBRS_PBRSB BIT(10) 1123 #define NO_BHI BIT(11) 1124 1125 #define VULNWL(vendor, family, model, whitelist) \ 1126 X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist) 1127 1128 #define VULNWL_INTEL(vfm, whitelist) \ 1129 X86_MATCH_VFM(vfm, whitelist) 1130 1131 #define VULNWL_AMD(family, whitelist) \ 1132 VULNWL(AMD, family, X86_MODEL_ANY, whitelist) 1133 1134 #define VULNWL_HYGON(family, whitelist) \ 1135 VULNWL(HYGON, family, X86_MODEL_ANY, whitelist) 1136 1137 static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { 1138 VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION), 1139 VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION), 1140 VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION), 1141 VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION), 1142 VULNWL(VORTEX, 5, X86_MODEL_ANY, NO_SPECULATION), 1143 VULNWL(VORTEX, 6, X86_MODEL_ANY, NO_SPECULATION), 1144 1145 /* Intel Family 6 */ 1146 VULNWL_INTEL(INTEL_TIGERLAKE, NO_MMIO), 1147 VULNWL_INTEL(INTEL_TIGERLAKE_L, NO_MMIO), 1148 VULNWL_INTEL(INTEL_ALDERLAKE, NO_MMIO), 1149 VULNWL_INTEL(INTEL_ALDERLAKE_L, NO_MMIO), 1150 1151 VULNWL_INTEL(INTEL_ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT), 1152 VULNWL_INTEL(INTEL_ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT), 1153 VULNWL_INTEL(INTEL_ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), 1154 VULNWL_INTEL(INTEL_ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT), 1155 VULNWL_INTEL(INTEL_ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), 1156 1157 VULNWL_INTEL(INTEL_ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1158 VULNWL_INTEL(INTEL_ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1159 VULNWL_INTEL(INTEL_ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1160 VULNWL_INTEL(INTEL_ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1161 VULNWL_INTEL(INTEL_XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1162 VULNWL_INTEL(INTEL_XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1163 1164 VULNWL_INTEL(INTEL_CORE_YONAH, NO_SSB), 1165 1166 VULNWL_INTEL(INTEL_ATOM_AIRMONT_MID, NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | MSBDS_ONLY), 1167 VULNWL_INTEL(INTEL_ATOM_AIRMONT_NP, NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), 1168 1169 VULNWL_INTEL(INTEL_ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), 1170 VULNWL_INTEL(INTEL_ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), 1171 VULNWL_INTEL(INTEL_ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB), 1172 1173 /* 1174 * Technically, swapgs isn't serializing on AMD (despite it previously 1175 * being documented as such in the APM). But according to AMD, %gs is 1176 * updated non-speculatively, and the issuing of %gs-relative memory 1177 * operands will be blocked until the %gs update completes, which is 1178 * good enough for our purposes. 1179 */ 1180 1181 VULNWL_INTEL(INTEL_ATOM_TREMONT, NO_EIBRS_PBRSB), 1182 VULNWL_INTEL(INTEL_ATOM_TREMONT_L, NO_EIBRS_PBRSB), 1183 VULNWL_INTEL(INTEL_ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB), 1184 1185 /* AMD Family 0xf - 0x12 */ 1186 VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI), 1187 VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI), 1188 VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI), 1189 VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI), 1190 1191 /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ 1192 VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI), 1193 VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI), 1194 1195 /* Zhaoxin Family 7 */ 1196 VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI), 1197 VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI), 1198 {} 1199 }; 1200 1201 #define VULNBL(vendor, family, model, blacklist) \ 1202 X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist) 1203 1204 #define VULNBL_INTEL_STEPPINGS(vfm, steppings, issues) \ 1205 X86_MATCH_VFM_STEPPINGS(vfm, steppings, issues) 1206 1207 #define VULNBL_AMD(family, blacklist) \ 1208 VULNBL(AMD, family, X86_MODEL_ANY, blacklist) 1209 1210 #define VULNBL_HYGON(family, blacklist) \ 1211 VULNBL(HYGON, family, X86_MODEL_ANY, blacklist) 1212 1213 #define SRBDS BIT(0) 1214 /* CPU is affected by X86_BUG_MMIO_STALE_DATA */ 1215 #define MMIO BIT(1) 1216 /* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */ 1217 #define MMIO_SBDS BIT(2) 1218 /* CPU is affected by RETbleed, speculating where you would not expect it */ 1219 #define RETBLEED BIT(3) 1220 /* CPU is affected by SMT (cross-thread) return predictions */ 1221 #define SMT_RSB BIT(4) 1222 /* CPU is affected by SRSO */ 1223 #define SRSO BIT(5) 1224 /* CPU is affected by GDS */ 1225 #define GDS BIT(6) 1226 /* CPU is affected by Register File Data Sampling */ 1227 #define RFDS BIT(7) 1228 1229 static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { 1230 VULNBL_INTEL_STEPPINGS(INTEL_IVYBRIDGE, X86_STEPPING_ANY, SRBDS), 1231 VULNBL_INTEL_STEPPINGS(INTEL_HASWELL, X86_STEPPING_ANY, SRBDS), 1232 VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_L, X86_STEPPING_ANY, SRBDS), 1233 VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_G, X86_STEPPING_ANY, SRBDS), 1234 VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_X, X86_STEPPING_ANY, MMIO), 1235 VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPING_ANY, MMIO), 1236 VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_G, X86_STEPPING_ANY, SRBDS), 1237 VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_X, X86_STEPPING_ANY, MMIO), 1238 VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL, X86_STEPPING_ANY, SRBDS), 1239 VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), 1240 VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), 1241 VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), 1242 VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), 1243 VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), 1244 VULNBL_INTEL_STEPPINGS(INTEL_CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED), 1245 VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), 1246 VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS), 1247 VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS), 1248 VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), 1249 VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED), 1250 VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), 1251 VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE_L, X86_STEPPING_ANY, GDS), 1252 VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE, X86_STEPPING_ANY, GDS), 1253 VULNBL_INTEL_STEPPINGS(INTEL_LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), 1254 VULNBL_INTEL_STEPPINGS(INTEL_ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), 1255 VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE, X86_STEPPING_ANY, RFDS), 1256 VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE_L, X86_STEPPING_ANY, RFDS), 1257 VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE, X86_STEPPING_ANY, RFDS), 1258 VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE_P, X86_STEPPING_ANY, RFDS), 1259 VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE_S, X86_STEPPING_ANY, RFDS), 1260 VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GRACEMONT, X86_STEPPING_ANY, RFDS), 1261 VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS), 1262 VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO | RFDS), 1263 VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS), 1264 VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT, X86_STEPPING_ANY, RFDS), 1265 VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT_D, X86_STEPPING_ANY, RFDS), 1266 VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT_PLUS, X86_STEPPING_ANY, RFDS), 1267 1268 VULNBL_AMD(0x15, RETBLEED), 1269 VULNBL_AMD(0x16, RETBLEED), 1270 VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO), 1271 VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO), 1272 VULNBL_AMD(0x19, SRSO), 1273 {} 1274 }; 1275 1276 static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which) 1277 { 1278 const struct x86_cpu_id *m = x86_match_cpu(table); 1279 1280 return m && !!(m->driver_data & which); 1281 } 1282 1283 u64 x86_read_arch_cap_msr(void) 1284 { 1285 u64 x86_arch_cap_msr = 0; 1286 1287 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) 1288 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr); 1289 1290 return x86_arch_cap_msr; 1291 } 1292 1293 static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr) 1294 { 1295 return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO && 1296 x86_arch_cap_msr & ARCH_CAP_PSDP_NO && 1297 x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO); 1298 } 1299 1300 static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr) 1301 { 1302 /* The "immunity" bit trumps everything else: */ 1303 if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO) 1304 return false; 1305 1306 /* 1307 * VMMs set ARCH_CAP_RFDS_CLEAR for processors not in the blacklist to 1308 * indicate that mitigation is needed because guest is running on a 1309 * vulnerable hardware or may migrate to such hardware: 1310 */ 1311 if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR) 1312 return true; 1313 1314 /* Only consult the blacklist when there is no enumeration: */ 1315 return cpu_matches(cpu_vuln_blacklist, RFDS); 1316 } 1317 1318 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) 1319 { 1320 u64 x86_arch_cap_msr = x86_read_arch_cap_msr(); 1321 1322 /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */ 1323 if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) && 1324 !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO)) 1325 setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT); 1326 1327 if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION)) 1328 return; 1329 1330 setup_force_cpu_bug(X86_BUG_SPECTRE_V1); 1331 1332 if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2)) 1333 setup_force_cpu_bug(X86_BUG_SPECTRE_V2); 1334 1335 if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) && 1336 !(x86_arch_cap_msr & ARCH_CAP_SSB_NO) && 1337 !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) 1338 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); 1339 1340 /* 1341 * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature 1342 * flag and protect from vendor-specific bugs via the whitelist. 1343 * 1344 * Don't use AutoIBRS when SNP is enabled because it degrades host 1345 * userspace indirect branch performance. 1346 */ 1347 if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) || 1348 (cpu_has(c, X86_FEATURE_AUTOIBRS) && 1349 !cpu_feature_enabled(X86_FEATURE_SEV_SNP))) { 1350 setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); 1351 if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) && 1352 !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO)) 1353 setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB); 1354 } 1355 1356 if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) && 1357 !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) { 1358 setup_force_cpu_bug(X86_BUG_MDS); 1359 if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY)) 1360 setup_force_cpu_bug(X86_BUG_MSBDS_ONLY); 1361 } 1362 1363 if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS)) 1364 setup_force_cpu_bug(X86_BUG_SWAPGS); 1365 1366 /* 1367 * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when: 1368 * - TSX is supported or 1369 * - TSX_CTRL is present 1370 * 1371 * TSX_CTRL check is needed for cases when TSX could be disabled before 1372 * the kernel boot e.g. kexec. 1373 * TSX_CTRL check alone is not sufficient for cases when the microcode 1374 * update is not present or running as guest that don't get TSX_CTRL. 1375 */ 1376 if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) && 1377 (cpu_has(c, X86_FEATURE_RTM) || 1378 (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))) 1379 setup_force_cpu_bug(X86_BUG_TAA); 1380 1381 /* 1382 * SRBDS affects CPUs which support RDRAND or RDSEED and are listed 1383 * in the vulnerability blacklist. 1384 * 1385 * Some of the implications and mitigation of Shared Buffers Data 1386 * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as 1387 * SRBDS. 1388 */ 1389 if ((cpu_has(c, X86_FEATURE_RDRAND) || 1390 cpu_has(c, X86_FEATURE_RDSEED)) && 1391 cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS)) 1392 setup_force_cpu_bug(X86_BUG_SRBDS); 1393 1394 /* 1395 * Processor MMIO Stale Data bug enumeration 1396 * 1397 * Affected CPU list is generally enough to enumerate the vulnerability, 1398 * but for virtualization case check for ARCH_CAP MSR bits also, VMM may 1399 * not want the guest to enumerate the bug. 1400 * 1401 * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist, 1402 * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits. 1403 */ 1404 if (!arch_cap_mmio_immune(x86_arch_cap_msr)) { 1405 if (cpu_matches(cpu_vuln_blacklist, MMIO)) 1406 setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); 1407 else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO)) 1408 setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN); 1409 } 1410 1411 if (!cpu_has(c, X86_FEATURE_BTC_NO)) { 1412 if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA)) 1413 setup_force_cpu_bug(X86_BUG_RETBLEED); 1414 } 1415 1416 if (cpu_matches(cpu_vuln_blacklist, SMT_RSB)) 1417 setup_force_cpu_bug(X86_BUG_SMT_RSB); 1418 1419 if (!cpu_has(c, X86_FEATURE_SRSO_NO)) { 1420 if (cpu_matches(cpu_vuln_blacklist, SRSO)) 1421 setup_force_cpu_bug(X86_BUG_SRSO); 1422 } 1423 1424 /* 1425 * Check if CPU is vulnerable to GDS. If running in a virtual machine on 1426 * an affected processor, the VMM may have disabled the use of GATHER by 1427 * disabling AVX2. The only way to do this in HW is to clear XCR0[2], 1428 * which means that AVX will be disabled. 1429 */ 1430 if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) && 1431 boot_cpu_has(X86_FEATURE_AVX)) 1432 setup_force_cpu_bug(X86_BUG_GDS); 1433 1434 if (vulnerable_to_rfds(x86_arch_cap_msr)) 1435 setup_force_cpu_bug(X86_BUG_RFDS); 1436 1437 /* When virtualized, eIBRS could be hidden, assume vulnerable */ 1438 if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) && 1439 !cpu_matches(cpu_vuln_whitelist, NO_BHI) && 1440 (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) || 1441 boot_cpu_has(X86_FEATURE_HYPERVISOR))) 1442 setup_force_cpu_bug(X86_BUG_BHI); 1443 1444 if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET)) 1445 setup_force_cpu_bug(X86_BUG_IBPB_NO_RET); 1446 1447 if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) 1448 return; 1449 1450 /* Rogue Data Cache Load? No! */ 1451 if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO) 1452 return; 1453 1454 setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); 1455 1456 if (cpu_matches(cpu_vuln_whitelist, NO_L1TF)) 1457 return; 1458 1459 setup_force_cpu_bug(X86_BUG_L1TF); 1460 } 1461 1462 /* 1463 * The NOPL instruction is supposed to exist on all CPUs of family >= 6; 1464 * unfortunately, that's not true in practice because of early VIA 1465 * chips and (more importantly) broken virtualizers that are not easy 1466 * to detect. In the latter case it doesn't even *fail* reliably, so 1467 * probing for it doesn't even work. Disable it completely on 32-bit 1468 * unless we can find a reliable way to detect all the broken cases. 1469 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). 1470 */ 1471 static void detect_nopl(void) 1472 { 1473 #ifdef CONFIG_X86_32 1474 setup_clear_cpu_cap(X86_FEATURE_NOPL); 1475 #else 1476 setup_force_cpu_cap(X86_FEATURE_NOPL); 1477 #endif 1478 } 1479 1480 /* 1481 * We parse cpu parameters early because fpu__init_system() is executed 1482 * before parse_early_param(). 1483 */ 1484 static void __init cpu_parse_early_param(void) 1485 { 1486 char arg[128]; 1487 char *argptr = arg, *opt; 1488 int arglen, taint = 0; 1489 1490 #ifdef CONFIG_X86_32 1491 if (cmdline_find_option_bool(boot_command_line, "no387")) 1492 #ifdef CONFIG_MATH_EMULATION 1493 setup_clear_cpu_cap(X86_FEATURE_FPU); 1494 #else 1495 pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n"); 1496 #endif 1497 1498 if (cmdline_find_option_bool(boot_command_line, "nofxsr")) 1499 setup_clear_cpu_cap(X86_FEATURE_FXSR); 1500 #endif 1501 1502 if (cmdline_find_option_bool(boot_command_line, "noxsave")) 1503 setup_clear_cpu_cap(X86_FEATURE_XSAVE); 1504 1505 if (cmdline_find_option_bool(boot_command_line, "noxsaveopt")) 1506 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); 1507 1508 if (cmdline_find_option_bool(boot_command_line, "noxsaves")) 1509 setup_clear_cpu_cap(X86_FEATURE_XSAVES); 1510 1511 if (cmdline_find_option_bool(boot_command_line, "nousershstk")) 1512 setup_clear_cpu_cap(X86_FEATURE_USER_SHSTK); 1513 1514 /* Minimize the gap between FRED is available and available but disabled. */ 1515 arglen = cmdline_find_option(boot_command_line, "fred", arg, sizeof(arg)); 1516 if (arglen != 2 || strncmp(arg, "on", 2)) 1517 setup_clear_cpu_cap(X86_FEATURE_FRED); 1518 1519 arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg)); 1520 if (arglen <= 0) 1521 return; 1522 1523 pr_info("Clearing CPUID bits:"); 1524 1525 while (argptr) { 1526 bool found __maybe_unused = false; 1527 unsigned int bit; 1528 1529 opt = strsep(&argptr, ","); 1530 1531 /* 1532 * Handle naked numbers first for feature flags which don't 1533 * have names. 1534 */ 1535 if (!kstrtouint(opt, 10, &bit)) { 1536 if (bit < NCAPINTS * 32) { 1537 1538 /* empty-string, i.e., ""-defined feature flags */ 1539 if (!x86_cap_flags[bit]) 1540 pr_cont(" " X86_CAP_FMT_NUM, x86_cap_flag_num(bit)); 1541 else 1542 pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit)); 1543 1544 setup_clear_cpu_cap(bit); 1545 taint++; 1546 } 1547 /* 1548 * The assumption is that there are no feature names with only 1549 * numbers in the name thus go to the next argument. 1550 */ 1551 continue; 1552 } 1553 1554 for (bit = 0; bit < 32 * NCAPINTS; bit++) { 1555 if (!x86_cap_flag(bit)) 1556 continue; 1557 1558 if (strcmp(x86_cap_flag(bit), opt)) 1559 continue; 1560 1561 pr_cont(" %s", opt); 1562 setup_clear_cpu_cap(bit); 1563 taint++; 1564 found = true; 1565 break; 1566 } 1567 1568 if (!found) 1569 pr_cont(" (unknown: %s)", opt); 1570 } 1571 pr_cont("\n"); 1572 1573 if (taint) 1574 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); 1575 } 1576 1577 /* 1578 * Do minimum CPU detection early. 1579 * Fields really needed: vendor, cpuid_level, family, model, mask, 1580 * cache alignment. 1581 * The others are not touched to avoid unwanted side effects. 1582 * 1583 * WARNING: this function is only called on the boot CPU. Don't add code 1584 * here that is supposed to run on all CPUs. 1585 */ 1586 static void __init early_identify_cpu(struct cpuinfo_x86 *c) 1587 { 1588 memset(&c->x86_capability, 0, sizeof(c->x86_capability)); 1589 c->extended_cpuid_level = 0; 1590 1591 if (!have_cpuid_p()) 1592 identify_cpu_without_cpuid(c); 1593 1594 /* cyrix could have cpuid enabled via c_identify()*/ 1595 if (have_cpuid_p()) { 1596 cpu_detect(c); 1597 get_cpu_vendor(c); 1598 intel_unlock_cpuid_leafs(c); 1599 get_cpu_cap(c); 1600 setup_force_cpu_cap(X86_FEATURE_CPUID); 1601 get_cpu_address_sizes(c); 1602 cpu_parse_early_param(); 1603 1604 cpu_init_topology(c); 1605 1606 if (this_cpu->c_early_init) 1607 this_cpu->c_early_init(c); 1608 1609 c->cpu_index = 0; 1610 filter_cpuid_features(c, false); 1611 1612 if (this_cpu->c_bsp_init) 1613 this_cpu->c_bsp_init(c); 1614 } else { 1615 setup_clear_cpu_cap(X86_FEATURE_CPUID); 1616 get_cpu_address_sizes(c); 1617 cpu_init_topology(c); 1618 } 1619 1620 setup_force_cpu_cap(X86_FEATURE_ALWAYS); 1621 1622 cpu_set_bug_bits(c); 1623 1624 sld_setup(c); 1625 1626 #ifdef CONFIG_X86_32 1627 /* 1628 * Regardless of whether PCID is enumerated, the SDM says 1629 * that it can't be enabled in 32-bit mode. 1630 */ 1631 setup_clear_cpu_cap(X86_FEATURE_PCID); 1632 #endif 1633 1634 /* 1635 * Later in the boot process pgtable_l5_enabled() relies on 1636 * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not 1637 * enabled by this point we need to clear the feature bit to avoid 1638 * false-positives at the later stage. 1639 * 1640 * pgtable_l5_enabled() can be false here for several reasons: 1641 * - 5-level paging is disabled compile-time; 1642 * - it's 32-bit kernel; 1643 * - machine doesn't support 5-level paging; 1644 * - user specified 'no5lvl' in kernel command line. 1645 */ 1646 if (!pgtable_l5_enabled()) 1647 setup_clear_cpu_cap(X86_FEATURE_LA57); 1648 1649 detect_nopl(); 1650 } 1651 1652 void __init early_cpu_init(void) 1653 { 1654 const struct cpu_dev *const *cdev; 1655 int count = 0; 1656 1657 #ifdef CONFIG_PROCESSOR_SELECT 1658 pr_info("KERNEL supported cpus:\n"); 1659 #endif 1660 1661 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { 1662 const struct cpu_dev *cpudev = *cdev; 1663 1664 if (count >= X86_VENDOR_NUM) 1665 break; 1666 cpu_devs[count] = cpudev; 1667 count++; 1668 1669 #ifdef CONFIG_PROCESSOR_SELECT 1670 { 1671 unsigned int j; 1672 1673 for (j = 0; j < 2; j++) { 1674 if (!cpudev->c_ident[j]) 1675 continue; 1676 pr_info(" %s %s\n", cpudev->c_vendor, 1677 cpudev->c_ident[j]); 1678 } 1679 } 1680 #endif 1681 } 1682 early_identify_cpu(&boot_cpu_data); 1683 } 1684 1685 static bool detect_null_seg_behavior(void) 1686 { 1687 /* 1688 * Empirically, writing zero to a segment selector on AMD does 1689 * not clear the base, whereas writing zero to a segment 1690 * selector on Intel does clear the base. Intel's behavior 1691 * allows slightly faster context switches in the common case 1692 * where GS is unused by the prev and next threads. 1693 * 1694 * Since neither vendor documents this anywhere that I can see, 1695 * detect it directly instead of hard-coding the choice by 1696 * vendor. 1697 * 1698 * I've designated AMD's behavior as the "bug" because it's 1699 * counterintuitive and less friendly. 1700 */ 1701 1702 unsigned long old_base, tmp; 1703 rdmsrl(MSR_FS_BASE, old_base); 1704 wrmsrl(MSR_FS_BASE, 1); 1705 loadsegment(fs, 0); 1706 rdmsrl(MSR_FS_BASE, tmp); 1707 wrmsrl(MSR_FS_BASE, old_base); 1708 return tmp == 0; 1709 } 1710 1711 void check_null_seg_clears_base(struct cpuinfo_x86 *c) 1712 { 1713 /* BUG_NULL_SEG is only relevant with 64bit userspace */ 1714 if (!IS_ENABLED(CONFIG_X86_64)) 1715 return; 1716 1717 if (cpu_has(c, X86_FEATURE_NULL_SEL_CLR_BASE)) 1718 return; 1719 1720 /* 1721 * CPUID bit above wasn't set. If this kernel is still running 1722 * as a HV guest, then the HV has decided not to advertize 1723 * that CPUID bit for whatever reason. For example, one 1724 * member of the migration pool might be vulnerable. Which 1725 * means, the bug is present: set the BUG flag and return. 1726 */ 1727 if (cpu_has(c, X86_FEATURE_HYPERVISOR)) { 1728 set_cpu_bug(c, X86_BUG_NULL_SEG); 1729 return; 1730 } 1731 1732 /* 1733 * Zen2 CPUs also have this behaviour, but no CPUID bit. 1734 * 0x18 is the respective family for Hygon. 1735 */ 1736 if ((c->x86 == 0x17 || c->x86 == 0x18) && 1737 detect_null_seg_behavior()) 1738 return; 1739 1740 /* All the remaining ones are affected */ 1741 set_cpu_bug(c, X86_BUG_NULL_SEG); 1742 } 1743 1744 static void generic_identify(struct cpuinfo_x86 *c) 1745 { 1746 c->extended_cpuid_level = 0; 1747 1748 if (!have_cpuid_p()) 1749 identify_cpu_without_cpuid(c); 1750 1751 /* cyrix could have cpuid enabled via c_identify()*/ 1752 if (!have_cpuid_p()) 1753 return; 1754 1755 cpu_detect(c); 1756 1757 get_cpu_vendor(c); 1758 intel_unlock_cpuid_leafs(c); 1759 get_cpu_cap(c); 1760 1761 get_cpu_address_sizes(c); 1762 1763 get_model_name(c); /* Default name */ 1764 1765 /* 1766 * ESPFIX is a strange bug. All real CPUs have it. Paravirt 1767 * systems that run Linux at CPL > 0 may or may not have the 1768 * issue, but, even if they have the issue, there's absolutely 1769 * nothing we can do about it because we can't use the real IRET 1770 * instruction. 1771 * 1772 * NB: For the time being, only 32-bit kernels support 1773 * X86_BUG_ESPFIX as such. 64-bit kernels directly choose 1774 * whether to apply espfix using paravirt hooks. If any 1775 * non-paravirt system ever shows up that does *not* have the 1776 * ESPFIX issue, we can change this. 1777 */ 1778 #ifdef CONFIG_X86_32 1779 set_cpu_bug(c, X86_BUG_ESPFIX); 1780 #endif 1781 } 1782 1783 /* 1784 * This does the hard work of actually picking apart the CPU stuff... 1785 */ 1786 static void identify_cpu(struct cpuinfo_x86 *c) 1787 { 1788 int i; 1789 1790 c->loops_per_jiffy = loops_per_jiffy; 1791 c->x86_cache_size = 0; 1792 c->x86_vendor = X86_VENDOR_UNKNOWN; 1793 c->x86_model = c->x86_stepping = 0; /* So far unknown... */ 1794 c->x86_vendor_id[0] = '\0'; /* Unset */ 1795 c->x86_model_id[0] = '\0'; /* Unset */ 1796 #ifdef CONFIG_X86_64 1797 c->x86_clflush_size = 64; 1798 c->x86_phys_bits = 36; 1799 c->x86_virt_bits = 48; 1800 #else 1801 c->cpuid_level = -1; /* CPUID not detected */ 1802 c->x86_clflush_size = 32; 1803 c->x86_phys_bits = 32; 1804 c->x86_virt_bits = 32; 1805 #endif 1806 c->x86_cache_alignment = c->x86_clflush_size; 1807 memset(&c->x86_capability, 0, sizeof(c->x86_capability)); 1808 #ifdef CONFIG_X86_VMX_FEATURE_NAMES 1809 memset(&c->vmx_capability, 0, sizeof(c->vmx_capability)); 1810 #endif 1811 1812 generic_identify(c); 1813 1814 cpu_parse_topology(c); 1815 1816 if (this_cpu->c_identify) 1817 this_cpu->c_identify(c); 1818 1819 /* Clear/Set all flags overridden by options, after probe */ 1820 apply_forced_caps(c); 1821 1822 /* 1823 * Set default APIC and TSC_DEADLINE MSR fencing flag. AMD and 1824 * Hygon will clear it in ->c_init() below. 1825 */ 1826 set_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE); 1827 1828 /* 1829 * Vendor-specific initialization. In this section we 1830 * canonicalize the feature flags, meaning if there are 1831 * features a certain CPU supports which CPUID doesn't 1832 * tell us, CPUID claiming incorrect flags, or other bugs, 1833 * we handle them here. 1834 * 1835 * At the end of this section, c->x86_capability better 1836 * indicate the features this CPU genuinely supports! 1837 */ 1838 if (this_cpu->c_init) 1839 this_cpu->c_init(c); 1840 1841 bus_lock_init(); 1842 1843 /* Disable the PN if appropriate */ 1844 squash_the_stupid_serial_number(c); 1845 1846 /* Set up SMEP/SMAP/UMIP */ 1847 setup_smep(c); 1848 setup_smap(c); 1849 setup_umip(c); 1850 1851 /* Enable FSGSBASE instructions if available. */ 1852 if (cpu_has(c, X86_FEATURE_FSGSBASE)) { 1853 cr4_set_bits(X86_CR4_FSGSBASE); 1854 elf_hwcap2 |= HWCAP2_FSGSBASE; 1855 } 1856 1857 /* 1858 * The vendor-specific functions might have changed features. 1859 * Now we do "generic changes." 1860 */ 1861 1862 /* Filter out anything that depends on CPUID levels we don't have */ 1863 filter_cpuid_features(c, true); 1864 1865 /* If the model name is still unset, do table lookup. */ 1866 if (!c->x86_model_id[0]) { 1867 const char *p; 1868 p = table_lookup_model(c); 1869 if (p) 1870 strcpy(c->x86_model_id, p); 1871 else 1872 /* Last resort... */ 1873 sprintf(c->x86_model_id, "%02x/%02x", 1874 c->x86, c->x86_model); 1875 } 1876 1877 x86_init_rdrand(c); 1878 setup_pku(c); 1879 setup_cet(c); 1880 1881 /* 1882 * Clear/Set all flags overridden by options, need do it 1883 * before following smp all cpus cap AND. 1884 */ 1885 apply_forced_caps(c); 1886 1887 /* 1888 * On SMP, boot_cpu_data holds the common feature set between 1889 * all CPUs; so make sure that we indicate which features are 1890 * common between the CPUs. The first time this routine gets 1891 * executed, c == &boot_cpu_data. 1892 */ 1893 if (c != &boot_cpu_data) { 1894 /* AND the already accumulated flags with these */ 1895 for (i = 0; i < NCAPINTS; i++) 1896 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 1897 1898 /* OR, i.e. replicate the bug flags */ 1899 for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) 1900 c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; 1901 } 1902 1903 ppin_init(c); 1904 1905 /* Init Machine Check Exception if available. */ 1906 mcheck_cpu_init(c); 1907 1908 numa_add_cpu(smp_processor_id()); 1909 } 1910 1911 /* 1912 * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions 1913 * on 32-bit kernels: 1914 */ 1915 #ifdef CONFIG_X86_32 1916 void enable_sep_cpu(void) 1917 { 1918 struct tss_struct *tss; 1919 int cpu; 1920 1921 if (!boot_cpu_has(X86_FEATURE_SEP)) 1922 return; 1923 1924 cpu = get_cpu(); 1925 tss = &per_cpu(cpu_tss_rw, cpu); 1926 1927 /* 1928 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- 1929 * see the big comment in struct x86_hw_tss's definition. 1930 */ 1931 1932 tss->x86_tss.ss1 = __KERNEL_CS; 1933 wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); 1934 wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0); 1935 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); 1936 1937 put_cpu(); 1938 } 1939 #endif 1940 1941 static __init void identify_boot_cpu(void) 1942 { 1943 identify_cpu(&boot_cpu_data); 1944 if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT)) 1945 pr_info("CET detected: Indirect Branch Tracking enabled\n"); 1946 #ifdef CONFIG_X86_32 1947 enable_sep_cpu(); 1948 #endif 1949 cpu_detect_tlb(&boot_cpu_data); 1950 setup_cr_pinning(); 1951 1952 tsx_init(); 1953 tdx_init(); 1954 lkgs_init(); 1955 } 1956 1957 void identify_secondary_cpu(struct cpuinfo_x86 *c) 1958 { 1959 BUG_ON(c == &boot_cpu_data); 1960 identify_cpu(c); 1961 #ifdef CONFIG_X86_32 1962 enable_sep_cpu(); 1963 #endif 1964 x86_spec_ctrl_setup_ap(); 1965 update_srbds_msr(); 1966 if (boot_cpu_has_bug(X86_BUG_GDS)) 1967 update_gds_msr(); 1968 1969 tsx_ap_init(); 1970 } 1971 1972 void print_cpu_info(struct cpuinfo_x86 *c) 1973 { 1974 const char *vendor = NULL; 1975 1976 if (c->x86_vendor < X86_VENDOR_NUM) { 1977 vendor = this_cpu->c_vendor; 1978 } else { 1979 if (c->cpuid_level >= 0) 1980 vendor = c->x86_vendor_id; 1981 } 1982 1983 if (vendor && !strstr(c->x86_model_id, vendor)) 1984 pr_cont("%s ", vendor); 1985 1986 if (c->x86_model_id[0]) 1987 pr_cont("%s", c->x86_model_id); 1988 else 1989 pr_cont("%d86", c->x86); 1990 1991 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); 1992 1993 if (c->x86_stepping || c->cpuid_level >= 0) 1994 pr_cont(", stepping: 0x%x)\n", c->x86_stepping); 1995 else 1996 pr_cont(")\n"); 1997 } 1998 1999 /* 2000 * clearcpuid= was already parsed in cpu_parse_early_param(). This dummy 2001 * function prevents it from becoming an environment variable for init. 2002 */ 2003 static __init int setup_clearcpuid(char *arg) 2004 { 2005 return 1; 2006 } 2007 __setup("clearcpuid=", setup_clearcpuid); 2008 2009 DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = { 2010 .current_task = &init_task, 2011 .preempt_count = INIT_PREEMPT_COUNT, 2012 .top_of_stack = TOP_OF_INIT_STACK, 2013 }; 2014 EXPORT_PER_CPU_SYMBOL(pcpu_hot); 2015 EXPORT_PER_CPU_SYMBOL(const_pcpu_hot); 2016 2017 #ifdef CONFIG_X86_64 2018 DEFINE_PER_CPU_FIRST(struct fixed_percpu_data, 2019 fixed_percpu_data) __aligned(PAGE_SIZE) __visible; 2020 EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data); 2021 2022 static void wrmsrl_cstar(unsigned long val) 2023 { 2024 /* 2025 * Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR 2026 * is so far ignored by the CPU, but raises a #VE trap in a TDX 2027 * guest. Avoid the pointless write on all Intel CPUs. 2028 */ 2029 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 2030 wrmsrl(MSR_CSTAR, val); 2031 } 2032 2033 static inline void idt_syscall_init(void) 2034 { 2035 wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); 2036 2037 if (ia32_enabled()) { 2038 wrmsrl_cstar((unsigned long)entry_SYSCALL_compat); 2039 /* 2040 * This only works on Intel CPUs. 2041 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. 2042 * This does not cause SYSENTER to jump to the wrong location, because 2043 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). 2044 */ 2045 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); 2046 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 2047 (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1)); 2048 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); 2049 } else { 2050 wrmsrl_cstar((unsigned long)entry_SYSCALL32_ignore); 2051 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); 2052 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); 2053 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); 2054 } 2055 2056 /* 2057 * Flags to clear on syscall; clear as much as possible 2058 * to minimize user space-kernel interference. 2059 */ 2060 wrmsrl(MSR_SYSCALL_MASK, 2061 X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF| 2062 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_TF| 2063 X86_EFLAGS_IF|X86_EFLAGS_DF|X86_EFLAGS_OF| 2064 X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_RF| 2065 X86_EFLAGS_AC|X86_EFLAGS_ID); 2066 } 2067 2068 /* May not be marked __init: used by software suspend */ 2069 void syscall_init(void) 2070 { 2071 /* The default user and kernel segments */ 2072 wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); 2073 2074 /* 2075 * Except the IA32_STAR MSR, there is NO need to setup SYSCALL and 2076 * SYSENTER MSRs for FRED, because FRED uses the ring 3 FRED 2077 * entrypoint for SYSCALL and SYSENTER, and ERETU is the only legit 2078 * instruction to return to ring 3 (both sysexit and sysret cause 2079 * #UD when FRED is enabled). 2080 */ 2081 if (!cpu_feature_enabled(X86_FEATURE_FRED)) 2082 idt_syscall_init(); 2083 } 2084 2085 #else /* CONFIG_X86_64 */ 2086 2087 #ifdef CONFIG_STACKPROTECTOR 2088 DEFINE_PER_CPU(unsigned long, __stack_chk_guard); 2089 #ifndef CONFIG_SMP 2090 EXPORT_PER_CPU_SYMBOL(__stack_chk_guard); 2091 #endif 2092 #endif 2093 2094 #endif /* CONFIG_X86_64 */ 2095 2096 /* 2097 * Clear all 6 debug registers: 2098 */ 2099 static void clear_all_debug_regs(void) 2100 { 2101 int i; 2102 2103 for (i = 0; i < 8; i++) { 2104 /* Ignore db4, db5 */ 2105 if ((i == 4) || (i == 5)) 2106 continue; 2107 2108 set_debugreg(0, i); 2109 } 2110 } 2111 2112 #ifdef CONFIG_KGDB 2113 /* 2114 * Restore debug regs if using kgdbwait and you have a kernel debugger 2115 * connection established. 2116 */ 2117 static void dbg_restore_debug_regs(void) 2118 { 2119 if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) 2120 arch_kgdb_ops.correct_hw_break(); 2121 } 2122 #else /* ! CONFIG_KGDB */ 2123 #define dbg_restore_debug_regs() 2124 #endif /* ! CONFIG_KGDB */ 2125 2126 static inline void setup_getcpu(int cpu) 2127 { 2128 unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu)); 2129 struct desc_struct d = { }; 2130 2131 if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID)) 2132 wrmsr(MSR_TSC_AUX, cpudata, 0); 2133 2134 /* Store CPU and node number in limit. */ 2135 d.limit0 = cpudata; 2136 d.limit1 = cpudata >> 16; 2137 2138 d.type = 5; /* RO data, expand down, accessed */ 2139 d.dpl = 3; /* Visible to user code */ 2140 d.s = 1; /* Not a system segment */ 2141 d.p = 1; /* Present */ 2142 d.d = 1; /* 32-bit */ 2143 2144 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S); 2145 } 2146 2147 #ifdef CONFIG_X86_64 2148 static inline void tss_setup_ist(struct tss_struct *tss) 2149 { 2150 /* Set up the per-CPU TSS IST stacks */ 2151 tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF); 2152 tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI); 2153 tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB); 2154 tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE); 2155 /* Only mapped when SEV-ES is active */ 2156 tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC); 2157 } 2158 #else /* CONFIG_X86_64 */ 2159 static inline void tss_setup_ist(struct tss_struct *tss) { } 2160 #endif /* !CONFIG_X86_64 */ 2161 2162 static inline void tss_setup_io_bitmap(struct tss_struct *tss) 2163 { 2164 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID; 2165 2166 #ifdef CONFIG_X86_IOPL_IOPERM 2167 tss->io_bitmap.prev_max = 0; 2168 tss->io_bitmap.prev_sequence = 0; 2169 memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap)); 2170 /* 2171 * Invalidate the extra array entry past the end of the all 2172 * permission bitmap as required by the hardware. 2173 */ 2174 tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL; 2175 #endif 2176 } 2177 2178 /* 2179 * Setup everything needed to handle exceptions from the IDT, including the IST 2180 * exceptions which use paranoid_entry(). 2181 */ 2182 void cpu_init_exception_handling(bool boot_cpu) 2183 { 2184 struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); 2185 int cpu = raw_smp_processor_id(); 2186 2187 /* paranoid_entry() gets the CPU number from the GDT */ 2188 setup_getcpu(cpu); 2189 2190 /* For IDT mode, IST vectors need to be set in TSS. */ 2191 if (!cpu_feature_enabled(X86_FEATURE_FRED)) 2192 tss_setup_ist(tss); 2193 tss_setup_io_bitmap(tss); 2194 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); 2195 2196 load_TR_desc(); 2197 2198 /* GHCB needs to be setup to handle #VC. */ 2199 setup_ghcb(); 2200 2201 if (cpu_feature_enabled(X86_FEATURE_FRED)) { 2202 /* The boot CPU has enabled FRED during early boot */ 2203 if (!boot_cpu) 2204 cpu_init_fred_exceptions(); 2205 2206 cpu_init_fred_rsps(); 2207 } else { 2208 load_current_idt(); 2209 } 2210 } 2211 2212 void __init cpu_init_replace_early_idt(void) 2213 { 2214 if (cpu_feature_enabled(X86_FEATURE_FRED)) 2215 cpu_init_fred_exceptions(); 2216 else 2217 idt_setup_early_pf(); 2218 } 2219 2220 /* 2221 * cpu_init() initializes state that is per-CPU. Some data is already 2222 * initialized (naturally) in the bootstrap process, such as the GDT. We 2223 * reload it nevertheless, this function acts as a 'CPU state barrier', 2224 * nothing should get across. 2225 */ 2226 void cpu_init(void) 2227 { 2228 struct task_struct *cur = current; 2229 int cpu = raw_smp_processor_id(); 2230 2231 #ifdef CONFIG_NUMA 2232 if (this_cpu_read(numa_node) == 0 && 2233 early_cpu_to_node(cpu) != NUMA_NO_NODE) 2234 set_numa_node(early_cpu_to_node(cpu)); 2235 #endif 2236 pr_debug("Initializing CPU#%d\n", cpu); 2237 2238 if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) || 2239 boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE)) 2240 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 2241 2242 if (IS_ENABLED(CONFIG_X86_64)) { 2243 loadsegment(fs, 0); 2244 memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 2245 syscall_init(); 2246 2247 wrmsrl(MSR_FS_BASE, 0); 2248 wrmsrl(MSR_KERNEL_GS_BASE, 0); 2249 barrier(); 2250 2251 x2apic_setup(); 2252 2253 intel_posted_msi_init(); 2254 } 2255 2256 mmgrab(&init_mm); 2257 cur->active_mm = &init_mm; 2258 BUG_ON(cur->mm); 2259 initialize_tlbstate_and_flush(); 2260 enter_lazy_tlb(&init_mm, cur); 2261 2262 /* 2263 * sp0 points to the entry trampoline stack regardless of what task 2264 * is running. 2265 */ 2266 load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1)); 2267 2268 load_mm_ldt(&init_mm); 2269 2270 clear_all_debug_regs(); 2271 dbg_restore_debug_regs(); 2272 2273 doublefault_init_cpu_tss(); 2274 2275 if (is_uv_system()) 2276 uv_cpu_init(); 2277 2278 load_fixmap_gdt(cpu); 2279 } 2280 2281 #ifdef CONFIG_MICROCODE_LATE_LOADING 2282 /** 2283 * store_cpu_caps() - Store a snapshot of CPU capabilities 2284 * @curr_info: Pointer where to store it 2285 * 2286 * Returns: None 2287 */ 2288 void store_cpu_caps(struct cpuinfo_x86 *curr_info) 2289 { 2290 /* Reload CPUID max function as it might've changed. */ 2291 curr_info->cpuid_level = cpuid_eax(0); 2292 2293 /* Copy all capability leafs and pick up the synthetic ones. */ 2294 memcpy(&curr_info->x86_capability, &boot_cpu_data.x86_capability, 2295 sizeof(curr_info->x86_capability)); 2296 2297 /* Get the hardware CPUID leafs */ 2298 get_cpu_cap(curr_info); 2299 } 2300 2301 /** 2302 * microcode_check() - Check if any CPU capabilities changed after an update. 2303 * @prev_info: CPU capabilities stored before an update. 2304 * 2305 * The microcode loader calls this upon late microcode load to recheck features, 2306 * only when microcode has been updated. Caller holds and CPU hotplug lock. 2307 * 2308 * Return: None 2309 */ 2310 void microcode_check(struct cpuinfo_x86 *prev_info) 2311 { 2312 struct cpuinfo_x86 curr_info; 2313 2314 perf_check_microcode(); 2315 2316 amd_check_microcode(); 2317 2318 store_cpu_caps(&curr_info); 2319 2320 if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability, 2321 sizeof(prev_info->x86_capability))) 2322 return; 2323 2324 pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n"); 2325 pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); 2326 } 2327 #endif 2328 2329 /* 2330 * Invoked from core CPU hotplug code after hotplug operations 2331 */ 2332 void arch_smt_update(void) 2333 { 2334 /* Handle the speculative execution misfeatures */ 2335 cpu_bugs_smt_update(); 2336 /* Check whether IPI broadcasting can be enabled */ 2337 apic_smt_update(); 2338 } 2339 2340 void __init arch_cpu_finalize_init(void) 2341 { 2342 struct cpuinfo_x86 *c = this_cpu_ptr(&cpu_info); 2343 2344 identify_boot_cpu(); 2345 2346 select_idle_routine(); 2347 2348 /* 2349 * identify_boot_cpu() initialized SMT support information, let the 2350 * core code know. 2351 */ 2352 cpu_smt_set_num_threads(__max_threads_per_core, __max_threads_per_core); 2353 2354 if (!IS_ENABLED(CONFIG_SMP)) { 2355 pr_info("CPU: "); 2356 print_cpu_info(&boot_cpu_data); 2357 } 2358 2359 cpu_select_mitigations(); 2360 2361 arch_smt_update(); 2362 2363 if (IS_ENABLED(CONFIG_X86_32)) { 2364 /* 2365 * Check whether this is a real i386 which is not longer 2366 * supported and fixup the utsname. 2367 */ 2368 if (boot_cpu_data.x86 < 4) 2369 panic("Kernel requires i486+ for 'invlpg' and other features"); 2370 2371 init_utsname()->machine[1] = 2372 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 2373 } 2374 2375 /* 2376 * Must be before alternatives because it might set or clear 2377 * feature bits. 2378 */ 2379 fpu__init_system(); 2380 fpu__init_cpu(); 2381 2382 /* 2383 * Ensure that access to the per CPU representation has the initial 2384 * boot CPU configuration. 2385 */ 2386 *c = boot_cpu_data; 2387 c->initialized = true; 2388 2389 alternative_instructions(); 2390 2391 if (IS_ENABLED(CONFIG_X86_64)) { 2392 unsigned long USER_PTR_MAX = TASK_SIZE_MAX; 2393 2394 /* 2395 * Enable this when LAM is gated on LASS support 2396 if (cpu_feature_enabled(X86_FEATURE_LAM)) 2397 USER_PTR_MAX = (1ul << 63) - PAGE_SIZE; 2398 */ 2399 runtime_const_init(ptr, USER_PTR_MAX); 2400 2401 /* 2402 * Make sure the first 2MB area is not mapped by huge pages 2403 * There are typically fixed size MTRRs in there and overlapping 2404 * MTRRs into large pages causes slow downs. 2405 * 2406 * Right now we don't do that with gbpages because there seems 2407 * very little benefit for that case. 2408 */ 2409 if (!direct_gbpages) 2410 set_memory_4k((unsigned long)__va(0), 1); 2411 } else { 2412 fpu__init_check_bugs(); 2413 } 2414 2415 /* 2416 * This needs to be called before any devices perform DMA 2417 * operations that might use the SWIOTLB bounce buffers. It will 2418 * mark the bounce buffers as decrypted so that their usage will 2419 * not cause "plain-text" data to be decrypted when accessed. It 2420 * must be called after late_time_init() so that Hyper-V x86/x64 2421 * hypercalls work when the SWIOTLB bounce buffers are decrypted. 2422 */ 2423 mem_encrypt_init(); 2424 } 2425