1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/export.h> 3 #include <linux/bitops.h> 4 #include <linux/elf.h> 5 #include <linux/mm.h> 6 7 #include <linux/io.h> 8 #include <linux/sched.h> 9 #include <linux/sched/clock.h> 10 #include <linux/random.h> 11 #include <linux/topology.h> 12 #include <asm/processor.h> 13 #include <asm/apic.h> 14 #include <asm/cacheinfo.h> 15 #include <asm/cpu.h> 16 #include <asm/cpu_device_id.h> 17 #include <asm/spec-ctrl.h> 18 #include <asm/smp.h> 19 #include <asm/numa.h> 20 #include <asm/pci-direct.h> 21 #include <asm/delay.h> 22 #include <asm/debugreg.h> 23 #include <asm/resctrl.h> 24 #include <asm/sev.h> 25 26 #ifdef CONFIG_X86_64 27 # include <asm/mmconfig.h> 28 #endif 29 30 #include "cpu.h" 31 32 u16 invlpgb_count_max __ro_after_init; 33 34 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) 35 { 36 u32 gprs[8] = { 0 }; 37 int err; 38 39 WARN_ONCE((boot_cpu_data.x86 != 0xf), 40 "%s should only be used on K8!\n", __func__); 41 42 gprs[1] = msr; 43 gprs[7] = 0x9c5a203a; 44 45 err = rdmsr_safe_regs(gprs); 46 47 *p = gprs[0] | ((u64)gprs[2] << 32); 48 49 return err; 50 } 51 52 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) 53 { 54 u32 gprs[8] = { 0 }; 55 56 WARN_ONCE((boot_cpu_data.x86 != 0xf), 57 "%s should only be used on K8!\n", __func__); 58 59 gprs[0] = (u32)val; 60 gprs[1] = msr; 61 gprs[2] = val >> 32; 62 gprs[7] = 0x9c5a203a; 63 64 return wrmsr_safe_regs(gprs); 65 } 66 67 /* 68 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause 69 * misexecution of code under Linux. Owners of such processors should 70 * contact AMD for precise details and a CPU swap. 71 * 72 * See http://www.multimania.com/poulot/k6bug.html 73 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6" 74 * (Publication # 21266 Issue Date: August 1998) 75 * 76 * The following test is erm.. interesting. AMD neglected to up 77 * the chip setting when fixing the bug but they also tweaked some 78 * performance at the same time.. 79 */ 80 81 #ifdef CONFIG_X86_32 82 extern __visible void vide(void); 83 __asm__(".text\n" 84 ".globl vide\n" 85 ".type vide, @function\n" 86 ".align 4\n" 87 "vide: ret\n"); 88 #endif 89 90 static void init_amd_k5(struct cpuinfo_x86 *c) 91 { 92 #ifdef CONFIG_X86_32 93 /* 94 * General Systems BIOSen alias the cpu frequency registers 95 * of the Elan at 0x000df000. Unfortunately, one of the Linux 96 * drivers subsequently pokes it, and changes the CPU speed. 97 * Workaround : Remove the unneeded alias. 98 */ 99 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ 100 #define CBAR_ENB (0x80000000) 101 #define CBAR_KEY (0X000000CB) 102 if (c->x86_model == 9 || c->x86_model == 10) { 103 if (inl(CBAR) & CBAR_ENB) 104 outl(0 | CBAR_KEY, CBAR); 105 } 106 #endif 107 } 108 109 static void init_amd_k6(struct cpuinfo_x86 *c) 110 { 111 #ifdef CONFIG_X86_32 112 u32 l, h; 113 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); 114 115 if (c->x86_model < 6) { 116 /* Based on AMD doc 20734R - June 2000 */ 117 if (c->x86_model == 0) { 118 clear_cpu_cap(c, X86_FEATURE_APIC); 119 set_cpu_cap(c, X86_FEATURE_PGE); 120 } 121 return; 122 } 123 124 if (c->x86_model == 6 && c->x86_stepping == 1) { 125 const int K6_BUG_LOOP = 1000000; 126 int n; 127 void (*f_vide)(void); 128 u64 d, d2; 129 130 pr_info("AMD K6 stepping B detected - "); 131 132 /* 133 * It looks like AMD fixed the 2.6.2 bug and improved indirect 134 * calls at the same time. 135 */ 136 137 n = K6_BUG_LOOP; 138 f_vide = vide; 139 OPTIMIZER_HIDE_VAR(f_vide); 140 d = rdtsc(); 141 while (n--) 142 f_vide(); 143 d2 = rdtsc(); 144 d = d2-d; 145 146 if (d > 20*K6_BUG_LOOP) 147 pr_cont("system stability may be impaired when more than 32 MB are used.\n"); 148 else 149 pr_cont("probably OK (after B9730xxxx).\n"); 150 } 151 152 /* K6 with old style WHCR */ 153 if (c->x86_model < 8 || 154 (c->x86_model == 8 && c->x86_stepping < 8)) { 155 /* We can only write allocate on the low 508Mb */ 156 if (mbytes > 508) 157 mbytes = 508; 158 159 rdmsr(MSR_K6_WHCR, l, h); 160 if ((l&0x0000FFFF) == 0) { 161 unsigned long flags; 162 l = (1<<0)|((mbytes/4)<<1); 163 local_irq_save(flags); 164 wbinvd(); 165 wrmsr(MSR_K6_WHCR, l, h); 166 local_irq_restore(flags); 167 pr_info("Enabling old style K6 write allocation for %d Mb\n", 168 mbytes); 169 } 170 return; 171 } 172 173 if ((c->x86_model == 8 && c->x86_stepping > 7) || 174 c->x86_model == 9 || c->x86_model == 13) { 175 /* The more serious chips .. */ 176 177 if (mbytes > 4092) 178 mbytes = 4092; 179 180 rdmsr(MSR_K6_WHCR, l, h); 181 if ((l&0xFFFF0000) == 0) { 182 unsigned long flags; 183 l = ((mbytes>>2)<<22)|(1<<16); 184 local_irq_save(flags); 185 wbinvd(); 186 wrmsr(MSR_K6_WHCR, l, h); 187 local_irq_restore(flags); 188 pr_info("Enabling new style K6 write allocation for %d Mb\n", 189 mbytes); 190 } 191 192 return; 193 } 194 195 if (c->x86_model == 10) { 196 /* AMD Geode LX is model 10 */ 197 /* placeholder for any needed mods */ 198 return; 199 } 200 #endif 201 } 202 203 static void init_amd_k7(struct cpuinfo_x86 *c) 204 { 205 #ifdef CONFIG_X86_32 206 u32 l, h; 207 208 /* 209 * Bit 15 of Athlon specific MSR 15, needs to be 0 210 * to enable SSE on Palomino/Morgan/Barton CPU's. 211 * If the BIOS didn't enable it already, enable it here. 212 */ 213 if (c->x86_model >= 6 && c->x86_model <= 10) { 214 if (!cpu_has(c, X86_FEATURE_XMM)) { 215 pr_info("Enabling disabled K7/SSE Support.\n"); 216 msr_clear_bit(MSR_K7_HWCR, 15); 217 set_cpu_cap(c, X86_FEATURE_XMM); 218 } 219 } 220 221 /* 222 * It's been determined by AMD that Athlons since model 8 stepping 1 223 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx 224 * As per AMD technical note 27212 0.2 225 */ 226 if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) { 227 rdmsr(MSR_K7_CLK_CTL, l, h); 228 if ((l & 0xfff00000) != 0x20000000) { 229 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", 230 l, ((l & 0x000fffff)|0x20000000)); 231 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); 232 } 233 } 234 235 /* calling is from identify_secondary_cpu() ? */ 236 if (!c->cpu_index) 237 return; 238 239 /* 240 * Certain Athlons might work (for various values of 'work') in SMP 241 * but they are not certified as MP capable. 242 */ 243 /* Athlon 660/661 is valid. */ 244 if ((c->x86_model == 6) && ((c->x86_stepping == 0) || 245 (c->x86_stepping == 1))) 246 return; 247 248 /* Duron 670 is valid */ 249 if ((c->x86_model == 7) && (c->x86_stepping == 0)) 250 return; 251 252 /* 253 * Athlon 662, Duron 671, and Athlon >model 7 have capability 254 * bit. It's worth noting that the A5 stepping (662) of some 255 * Athlon XP's have the MP bit set. 256 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for 257 * more. 258 */ 259 if (((c->x86_model == 6) && (c->x86_stepping >= 2)) || 260 ((c->x86_model == 7) && (c->x86_stepping >= 1)) || 261 (c->x86_model > 7)) 262 if (cpu_has(c, X86_FEATURE_MP)) 263 return; 264 265 /* If we get here, not a certified SMP capable AMD system. */ 266 267 /* 268 * Don't taint if we are running SMP kernel on a single non-MP 269 * approved Athlon 270 */ 271 WARN_ONCE(1, "WARNING: This combination of AMD" 272 " processors is not suitable for SMP.\n"); 273 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); 274 #endif 275 } 276 277 #ifdef CONFIG_NUMA 278 /* 279 * To workaround broken NUMA config. Read the comment in 280 * srat_detect_node(). 281 */ 282 static int nearby_node(int apicid) 283 { 284 int i, node; 285 286 for (i = apicid - 1; i >= 0; i--) { 287 node = __apicid_to_node[i]; 288 if (node != NUMA_NO_NODE && node_online(node)) 289 return node; 290 } 291 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { 292 node = __apicid_to_node[i]; 293 if (node != NUMA_NO_NODE && node_online(node)) 294 return node; 295 } 296 return first_node(node_online_map); /* Shouldn't happen */ 297 } 298 #endif 299 300 static void srat_detect_node(struct cpuinfo_x86 *c) 301 { 302 #ifdef CONFIG_NUMA 303 int cpu = smp_processor_id(); 304 int node; 305 unsigned apicid = c->topo.apicid; 306 307 node = numa_cpu_node(cpu); 308 if (node == NUMA_NO_NODE) 309 node = per_cpu_llc_id(cpu); 310 311 /* 312 * On multi-fabric platform (e.g. Numascale NumaChip) a 313 * platform-specific handler needs to be called to fixup some 314 * IDs of the CPU. 315 */ 316 if (x86_cpuinit.fixup_cpu_id) 317 x86_cpuinit.fixup_cpu_id(c, node); 318 319 if (!node_online(node)) { 320 /* 321 * Two possibilities here: 322 * 323 * - The CPU is missing memory and no node was created. In 324 * that case try picking one from a nearby CPU. 325 * 326 * - The APIC IDs differ from the HyperTransport node IDs 327 * which the K8 northbridge parsing fills in. Assume 328 * they are all increased by a constant offset, but in 329 * the same order as the HT nodeids. If that doesn't 330 * result in a usable node fall back to the path for the 331 * previous case. 332 * 333 * This workaround operates directly on the mapping between 334 * APIC ID and NUMA node, assuming certain relationship 335 * between APIC ID, HT node ID and NUMA topology. As going 336 * through CPU mapping may alter the outcome, directly 337 * access __apicid_to_node[]. 338 */ 339 int ht_nodeid = c->topo.initial_apicid; 340 341 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE) 342 node = __apicid_to_node[ht_nodeid]; 343 /* Pick a nearby node */ 344 if (!node_online(node)) 345 node = nearby_node(apicid); 346 } 347 numa_set_node(cpu, node); 348 #endif 349 } 350 351 static void bsp_determine_snp(struct cpuinfo_x86 *c) 352 { 353 #ifdef CONFIG_ARCH_HAS_CC_PLATFORM 354 cc_vendor = CC_VENDOR_AMD; 355 356 if (cpu_has(c, X86_FEATURE_SEV_SNP)) { 357 /* 358 * RMP table entry format is not architectural and is defined by the 359 * per-processor PPR. Restrict SNP support on the known CPU models 360 * for which the RMP table entry format is currently defined or for 361 * processors which support the architecturally defined RMPREAD 362 * instruction. 363 */ 364 if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && 365 (cpu_feature_enabled(X86_FEATURE_ZEN3) || 366 cpu_feature_enabled(X86_FEATURE_ZEN4) || 367 cpu_feature_enabled(X86_FEATURE_RMPREAD)) && 368 snp_probe_rmptable_info()) { 369 cc_platform_set(CC_ATTR_HOST_SEV_SNP); 370 } else { 371 setup_clear_cpu_cap(X86_FEATURE_SEV_SNP); 372 cc_platform_clear(CC_ATTR_HOST_SEV_SNP); 373 } 374 } 375 #endif 376 } 377 378 static void bsp_init_amd(struct cpuinfo_x86 *c) 379 { 380 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { 381 382 if (c->x86 > 0x10 || 383 (c->x86 == 0x10 && c->x86_model >= 0x2)) { 384 u64 val; 385 386 rdmsrl(MSR_K7_HWCR, val); 387 if (!(val & BIT(24))) 388 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n"); 389 } 390 } 391 392 if (c->x86 == 0x15) { 393 unsigned long upperbit; 394 u32 cpuid, assoc; 395 396 cpuid = cpuid_edx(0x80000005); 397 assoc = cpuid >> 16 & 0xff; 398 upperbit = ((cpuid >> 24) << 10) / assoc; 399 400 va_align.mask = (upperbit - 1) & PAGE_MASK; 401 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; 402 403 /* A random value per boot for bit slice [12:upper_bit) */ 404 va_align.bits = get_random_u32() & va_align.mask; 405 } 406 407 if (cpu_has(c, X86_FEATURE_MWAITX)) 408 use_mwaitx_delay(); 409 410 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && 411 !boot_cpu_has(X86_FEATURE_VIRT_SSBD) && 412 c->x86 >= 0x15 && c->x86 <= 0x17) { 413 unsigned int bit; 414 415 switch (c->x86) { 416 case 0x15: bit = 54; break; 417 case 0x16: bit = 33; break; 418 case 0x17: bit = 10; break; 419 default: return; 420 } 421 /* 422 * Try to cache the base value so further operations can 423 * avoid RMW. If that faults, do not enable SSBD. 424 */ 425 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { 426 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); 427 setup_force_cpu_cap(X86_FEATURE_SSBD); 428 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit; 429 } 430 } 431 432 resctrl_cpu_detect(c); 433 434 /* Figure out Zen generations: */ 435 switch (c->x86) { 436 case 0x17: 437 switch (c->x86_model) { 438 case 0x00 ... 0x2f: 439 case 0x50 ... 0x5f: 440 setup_force_cpu_cap(X86_FEATURE_ZEN1); 441 break; 442 case 0x30 ... 0x4f: 443 case 0x60 ... 0x7f: 444 case 0x90 ... 0x91: 445 case 0xa0 ... 0xaf: 446 setup_force_cpu_cap(X86_FEATURE_ZEN2); 447 break; 448 default: 449 goto warn; 450 } 451 break; 452 453 case 0x19: 454 switch (c->x86_model) { 455 case 0x00 ... 0x0f: 456 case 0x20 ... 0x5f: 457 setup_force_cpu_cap(X86_FEATURE_ZEN3); 458 break; 459 case 0x10 ... 0x1f: 460 case 0x60 ... 0xaf: 461 setup_force_cpu_cap(X86_FEATURE_ZEN4); 462 break; 463 default: 464 goto warn; 465 } 466 break; 467 468 case 0x1a: 469 switch (c->x86_model) { 470 case 0x00 ... 0x2f: 471 case 0x40 ... 0x4f: 472 case 0x60 ... 0x7f: 473 setup_force_cpu_cap(X86_FEATURE_ZEN5); 474 break; 475 default: 476 goto warn; 477 } 478 break; 479 480 default: 481 break; 482 } 483 484 bsp_determine_snp(c); 485 return; 486 487 warn: 488 WARN_ONCE(1, "Family 0x%x, model: 0x%x??\n", c->x86, c->x86_model); 489 } 490 491 static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) 492 { 493 u64 msr; 494 495 /* 496 * BIOS support is required for SME and SEV. 497 * For SME: If BIOS has enabled SME then adjust x86_phys_bits by 498 * the SME physical address space reduction value. 499 * If BIOS has not enabled SME then don't advertise the 500 * SME feature (set in scattered.c). 501 * If the kernel has not enabled SME via any means then 502 * don't advertise the SME feature. 503 * For SEV: If BIOS has not enabled SEV then don't advertise SEV and 504 * any additional functionality based on it. 505 * 506 * In all cases, since support for SME and SEV requires long mode, 507 * don't advertise the feature under CONFIG_X86_32. 508 */ 509 if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) { 510 /* Check if memory encryption is enabled */ 511 rdmsrl(MSR_AMD64_SYSCFG, msr); 512 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) 513 goto clear_all; 514 515 /* 516 * Always adjust physical address bits. Even though this 517 * will be a value above 32-bits this is still done for 518 * CONFIG_X86_32 so that accurate values are reported. 519 */ 520 c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f; 521 522 if (IS_ENABLED(CONFIG_X86_32)) 523 goto clear_all; 524 525 if (!sme_me_mask) 526 setup_clear_cpu_cap(X86_FEATURE_SME); 527 528 rdmsrl(MSR_K7_HWCR, msr); 529 if (!(msr & MSR_K7_HWCR_SMMLOCK)) 530 goto clear_sev; 531 532 return; 533 534 clear_all: 535 setup_clear_cpu_cap(X86_FEATURE_SME); 536 clear_sev: 537 setup_clear_cpu_cap(X86_FEATURE_SEV); 538 setup_clear_cpu_cap(X86_FEATURE_SEV_ES); 539 setup_clear_cpu_cap(X86_FEATURE_SEV_SNP); 540 } 541 } 542 543 static void early_init_amd(struct cpuinfo_x86 *c) 544 { 545 u32 dummy; 546 547 if (c->x86 >= 0xf) 548 set_cpu_cap(c, X86_FEATURE_K8); 549 550 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); 551 552 /* 553 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 554 * with P/T states and does not stop in deep C-states 555 */ 556 if (c->x86_power & (1 << 8)) { 557 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 558 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 559 } 560 561 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ 562 if (c->x86_power & BIT(12)) 563 set_cpu_cap(c, X86_FEATURE_ACC_POWER); 564 565 /* Bit 14 indicates the Runtime Average Power Limit interface. */ 566 if (c->x86_power & BIT(14)) 567 set_cpu_cap(c, X86_FEATURE_RAPL); 568 569 #ifdef CONFIG_X86_64 570 set_cpu_cap(c, X86_FEATURE_SYSCALL32); 571 #else 572 /* Set MTRR capability flag if appropriate */ 573 if (c->x86 == 5) 574 if (c->x86_model == 13 || c->x86_model == 9 || 575 (c->x86_model == 8 && c->x86_stepping >= 8)) 576 set_cpu_cap(c, X86_FEATURE_K6_MTRR); 577 #endif 578 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) 579 /* 580 * ApicID can always be treated as an 8-bit value for AMD APIC versions 581 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we 582 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families 583 * after 16h. 584 */ 585 if (boot_cpu_has(X86_FEATURE_APIC)) { 586 if (c->x86 > 0x16) 587 set_cpu_cap(c, X86_FEATURE_EXTD_APICID); 588 else if (c->x86 >= 0xf) { 589 /* check CPU config space for extended APIC ID */ 590 unsigned int val; 591 592 val = read_pci_config(0, 24, 0, 0x68); 593 if ((val >> 17 & 0x3) == 0x3) 594 set_cpu_cap(c, X86_FEATURE_EXTD_APICID); 595 } 596 } 597 #endif 598 599 /* 600 * This is only needed to tell the kernel whether to use VMCALL 601 * and VMMCALL. VMMCALL is never executed except under virt, so 602 * we can set it unconditionally. 603 */ 604 set_cpu_cap(c, X86_FEATURE_VMMCALL); 605 606 /* F16h erratum 793, CVE-2013-6885 */ 607 if (c->x86 == 0x16 && c->x86_model <= 0xf) 608 msr_set_bit(MSR_AMD64_LS_CFG, 15); 609 610 early_detect_mem_encrypt(c); 611 612 if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) { 613 if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB)) 614 setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); 615 else if (c->x86 >= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) { 616 setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); 617 setup_force_cpu_cap(X86_FEATURE_SBPB); 618 } 619 } 620 } 621 622 static void init_amd_k8(struct cpuinfo_x86 *c) 623 { 624 u32 level; 625 u64 value; 626 627 /* On C+ stepping K8 rep microcode works well for copy/memset */ 628 level = cpuid_eax(1); 629 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) 630 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 631 632 /* 633 * Some BIOSes incorrectly force this feature, but only K8 revision D 634 * (model = 0x14) and later actually support it. 635 * (AMD Erratum #110, docId: 25759). 636 */ 637 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM) && !cpu_has(c, X86_FEATURE_HYPERVISOR)) { 638 clear_cpu_cap(c, X86_FEATURE_LAHF_LM); 639 if (!rdmsrl_amd_safe(0xc001100d, &value)) { 640 value &= ~BIT_64(32); 641 wrmsrl_amd_safe(0xc001100d, value); 642 } 643 } 644 645 if (!c->x86_model_id[0]) 646 strcpy(c->x86_model_id, "Hammer"); 647 648 #ifdef CONFIG_SMP 649 /* 650 * Disable TLB flush filter by setting HWCR.FFDIS on K8 651 * bit 6 of msr C001_0015 652 * 653 * Errata 63 for SH-B3 steppings 654 * Errata 122 for all steppings (F+ have it disabled by default) 655 */ 656 msr_set_bit(MSR_K7_HWCR, 6); 657 #endif 658 set_cpu_bug(c, X86_BUG_SWAPGS_FENCE); 659 660 /* 661 * Check models and steppings affected by erratum 400. This is 662 * used to select the proper idle routine and to enable the 663 * check whether the machine is affected in arch_post_acpi_subsys_init() 664 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check. 665 */ 666 if (c->x86_model > 0x41 || 667 (c->x86_model == 0x41 && c->x86_stepping >= 0x2)) 668 setup_force_cpu_bug(X86_BUG_AMD_E400); 669 } 670 671 static void init_amd_gh(struct cpuinfo_x86 *c) 672 { 673 #ifdef CONFIG_MMCONF_FAM10H 674 /* do this for boot cpu */ 675 if (c == &boot_cpu_data) 676 check_enable_amd_mmconf_dmi(); 677 678 fam10h_check_enable_mmcfg(); 679 #endif 680 681 /* 682 * Disable GART TLB Walk Errors on Fam10h. We do this here because this 683 * is always needed when GART is enabled, even in a kernel which has no 684 * MCE support built in. BIOS should disable GartTlbWlk Errors already. 685 * If it doesn't, we do it here as suggested by the BKDG. 686 * 687 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 688 */ 689 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10); 690 691 /* 692 * On family 10h BIOS may not have properly enabled WC+ support, causing 693 * it to be converted to CD memtype. This may result in performance 694 * degradation for certain nested-paging guests. Prevent this conversion 695 * by clearing bit 24 in MSR_AMD64_BU_CFG2. 696 * 697 * NOTE: we want to use the _safe accessors so as not to #GP kvm 698 * guests on older kvm hosts. 699 */ 700 msr_clear_bit(MSR_AMD64_BU_CFG2, 24); 701 702 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); 703 704 /* 705 * Check models and steppings affected by erratum 400. This is 706 * used to select the proper idle routine and to enable the 707 * check whether the machine is affected in arch_post_acpi_subsys_init() 708 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check. 709 */ 710 if (c->x86_model > 0x2 || 711 (c->x86_model == 0x2 && c->x86_stepping >= 0x1)) 712 setup_force_cpu_bug(X86_BUG_AMD_E400); 713 } 714 715 static void init_amd_ln(struct cpuinfo_x86 *c) 716 { 717 /* 718 * Apply erratum 665 fix unconditionally so machines without a BIOS 719 * fix work. 720 */ 721 msr_set_bit(MSR_AMD64_DE_CFG, 31); 722 } 723 724 static bool rdrand_force; 725 726 static int __init rdrand_cmdline(char *str) 727 { 728 if (!str) 729 return -EINVAL; 730 731 if (!strcmp(str, "force")) 732 rdrand_force = true; 733 else 734 return -EINVAL; 735 736 return 0; 737 } 738 early_param("rdrand", rdrand_cmdline); 739 740 static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c) 741 { 742 /* 743 * Saving of the MSR used to hide the RDRAND support during 744 * suspend/resume is done by arch/x86/power/cpu.c, which is 745 * dependent on CONFIG_PM_SLEEP. 746 */ 747 if (!IS_ENABLED(CONFIG_PM_SLEEP)) 748 return; 749 750 /* 751 * The self-test can clear X86_FEATURE_RDRAND, so check for 752 * RDRAND support using the CPUID function directly. 753 */ 754 if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force) 755 return; 756 757 msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62); 758 759 /* 760 * Verify that the CPUID change has occurred in case the kernel is 761 * running virtualized and the hypervisor doesn't support the MSR. 762 */ 763 if (cpuid_ecx(1) & BIT(30)) { 764 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n"); 765 return; 766 } 767 768 clear_cpu_cap(c, X86_FEATURE_RDRAND); 769 pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n"); 770 } 771 772 static void init_amd_jg(struct cpuinfo_x86 *c) 773 { 774 /* 775 * Some BIOS implementations do not restore proper RDRAND support 776 * across suspend and resume. Check on whether to hide the RDRAND 777 * instruction support via CPUID. 778 */ 779 clear_rdrand_cpuid_bit(c); 780 } 781 782 static void init_amd_bd(struct cpuinfo_x86 *c) 783 { 784 u64 value; 785 786 /* 787 * The way access filter has a performance penalty on some workloads. 788 * Disable it on the affected CPUs. 789 */ 790 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) { 791 if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) { 792 value |= 0x1E; 793 wrmsrl_safe(MSR_F15H_IC_CFG, value); 794 } 795 } 796 797 /* 798 * Some BIOS implementations do not restore proper RDRAND support 799 * across suspend and resume. Check on whether to hide the RDRAND 800 * instruction support via CPUID. 801 */ 802 clear_rdrand_cpuid_bit(c); 803 } 804 805 static const struct x86_cpu_id erratum_1386_microcode[] = { 806 X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, 0x17, 0x01), 0x2, 0x2, 0x0800126e), 807 X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, 0x17, 0x31), 0x0, 0x0, 0x08301052), 808 {} 809 }; 810 811 static void fix_erratum_1386(struct cpuinfo_x86 *c) 812 { 813 /* 814 * Work around Erratum 1386. The XSAVES instruction malfunctions in 815 * certain circumstances on Zen1/2 uarch, and not all parts have had 816 * updated microcode at the time of writing (March 2023). 817 * 818 * Affected parts all have no supervisor XSAVE states, meaning that 819 * the XSAVEC instruction (which works fine) is equivalent. 820 * 821 * Clear the feature flag only on microcode revisions which 822 * don't have the fix. 823 */ 824 if (x86_match_min_microcode_rev(erratum_1386_microcode)) 825 return; 826 827 clear_cpu_cap(c, X86_FEATURE_XSAVES); 828 } 829 830 void init_spectral_chicken(struct cpuinfo_x86 *c) 831 { 832 #ifdef CONFIG_MITIGATION_UNRET_ENTRY 833 u64 value; 834 835 /* 836 * On Zen2 we offer this chicken (bit) on the altar of Speculation. 837 * 838 * This suppresses speculation from the middle of a basic block, i.e. it 839 * suppresses non-branch predictions. 840 */ 841 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { 842 if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) { 843 value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT; 844 wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value); 845 } 846 } 847 #endif 848 } 849 850 static void init_amd_zen_common(void) 851 { 852 setup_force_cpu_cap(X86_FEATURE_ZEN); 853 #ifdef CONFIG_NUMA 854 node_reclaim_distance = 32; 855 #endif 856 } 857 858 static void init_amd_zen1(struct cpuinfo_x86 *c) 859 { 860 fix_erratum_1386(c); 861 862 /* Fix up CPUID bits, but only if not virtualised. */ 863 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { 864 865 /* Erratum 1076: CPB feature bit not being set in CPUID. */ 866 if (!cpu_has(c, X86_FEATURE_CPB)) 867 set_cpu_cap(c, X86_FEATURE_CPB); 868 } 869 870 pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); 871 setup_force_cpu_bug(X86_BUG_DIV0); 872 873 /* 874 * Turn off the Instructions Retired free counter on machines that are 875 * susceptible to erratum #1054 "Instructions Retired Performance 876 * Counter May Be Inaccurate". 877 */ 878 if (c->x86_model < 0x30) { 879 msr_clear_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); 880 clear_cpu_cap(c, X86_FEATURE_IRPERF); 881 } 882 } 883 884 static bool cpu_has_zenbleed_microcode(void) 885 { 886 u32 good_rev = 0; 887 888 switch (boot_cpu_data.x86_model) { 889 case 0x30 ... 0x3f: good_rev = 0x0830107b; break; 890 case 0x60 ... 0x67: good_rev = 0x0860010c; break; 891 case 0x68 ... 0x6f: good_rev = 0x08608107; break; 892 case 0x70 ... 0x7f: good_rev = 0x08701033; break; 893 case 0xa0 ... 0xaf: good_rev = 0x08a00009; break; 894 895 default: 896 return false; 897 } 898 899 if (boot_cpu_data.microcode < good_rev) 900 return false; 901 902 return true; 903 } 904 905 static void zen2_zenbleed_check(struct cpuinfo_x86 *c) 906 { 907 if (cpu_has(c, X86_FEATURE_HYPERVISOR)) 908 return; 909 910 if (!cpu_has(c, X86_FEATURE_AVX)) 911 return; 912 913 if (!cpu_has_zenbleed_microcode()) { 914 pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n"); 915 msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); 916 } else { 917 msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); 918 } 919 } 920 921 static void init_amd_zen2(struct cpuinfo_x86 *c) 922 { 923 init_spectral_chicken(c); 924 fix_erratum_1386(c); 925 zen2_zenbleed_check(c); 926 } 927 928 static void init_amd_zen3(struct cpuinfo_x86 *c) 929 { 930 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { 931 /* 932 * Zen3 (Fam19 model < 0x10) parts are not susceptible to 933 * Branch Type Confusion, but predate the allocation of the 934 * BTC_NO bit. 935 */ 936 if (!cpu_has(c, X86_FEATURE_BTC_NO)) 937 set_cpu_cap(c, X86_FEATURE_BTC_NO); 938 } 939 } 940 941 static void init_amd_zen4(struct cpuinfo_x86 *c) 942 { 943 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) 944 msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT); 945 946 /* 947 * These Zen4 SoCs advertise support for virtualized VMLOAD/VMSAVE 948 * in some BIOS versions but they can lead to random host reboots. 949 */ 950 switch (c->x86_model) { 951 case 0x18 ... 0x1f: 952 case 0x60 ... 0x7f: 953 clear_cpu_cap(c, X86_FEATURE_V_VMSAVE_VMLOAD); 954 break; 955 } 956 } 957 958 static void init_amd_zen5(struct cpuinfo_x86 *c) 959 { 960 } 961 962 static void init_amd(struct cpuinfo_x86 *c) 963 { 964 u64 vm_cr; 965 966 early_init_amd(c); 967 968 /* 969 * Bit 31 in normal CPUID used for nonstandard 3DNow ID; 970 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway 971 */ 972 clear_cpu_cap(c, 0*32+31); 973 974 if (c->x86 >= 0x10) 975 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 976 977 /* AMD FSRM also implies FSRS */ 978 if (cpu_has(c, X86_FEATURE_FSRM)) 979 set_cpu_cap(c, X86_FEATURE_FSRS); 980 981 /* K6s reports MCEs but don't actually have all the MSRs */ 982 if (c->x86 < 6) 983 clear_cpu_cap(c, X86_FEATURE_MCE); 984 985 switch (c->x86) { 986 case 4: init_amd_k5(c); break; 987 case 5: init_amd_k6(c); break; 988 case 6: init_amd_k7(c); break; 989 case 0xf: init_amd_k8(c); break; 990 case 0x10: init_amd_gh(c); break; 991 case 0x12: init_amd_ln(c); break; 992 case 0x15: init_amd_bd(c); break; 993 case 0x16: init_amd_jg(c); break; 994 } 995 996 /* 997 * Save up on some future enablement work and do common Zen 998 * settings. 999 */ 1000 if (c->x86 >= 0x17) 1001 init_amd_zen_common(); 1002 1003 if (boot_cpu_has(X86_FEATURE_ZEN1)) 1004 init_amd_zen1(c); 1005 else if (boot_cpu_has(X86_FEATURE_ZEN2)) 1006 init_amd_zen2(c); 1007 else if (boot_cpu_has(X86_FEATURE_ZEN3)) 1008 init_amd_zen3(c); 1009 else if (boot_cpu_has(X86_FEATURE_ZEN4)) 1010 init_amd_zen4(c); 1011 else if (boot_cpu_has(X86_FEATURE_ZEN5)) 1012 init_amd_zen5(c); 1013 1014 /* 1015 * Enable workaround for FXSAVE leak on CPUs 1016 * without a XSaveErPtr feature 1017 */ 1018 if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR))) 1019 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK); 1020 1021 cpu_detect_cache_sizes(c); 1022 1023 srat_detect_node(c); 1024 1025 init_amd_cacheinfo(c); 1026 1027 if (cpu_has(c, X86_FEATURE_SVM)) { 1028 rdmsrl(MSR_VM_CR, vm_cr); 1029 if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) { 1030 pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n"); 1031 clear_cpu_cap(c, X86_FEATURE_SVM); 1032 } 1033 } 1034 1035 if (!cpu_has(c, X86_FEATURE_LFENCE_RDTSC) && cpu_has(c, X86_FEATURE_XMM2)) { 1036 /* 1037 * Use LFENCE for execution serialization. On families which 1038 * don't have that MSR, LFENCE is already serializing. 1039 * msr_set_bit() uses the safe accessors, too, even if the MSR 1040 * is not present. 1041 */ 1042 msr_set_bit(MSR_AMD64_DE_CFG, 1043 MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT); 1044 1045 /* A serializing LFENCE stops RDTSC speculation */ 1046 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 1047 } 1048 1049 /* 1050 * Family 0x12 and above processors have APIC timer 1051 * running in deep C states. 1052 */ 1053 if (c->x86 > 0x11) 1054 set_cpu_cap(c, X86_FEATURE_ARAT); 1055 1056 /* 3DNow or LM implies PREFETCHW */ 1057 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH)) 1058 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM)) 1059 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH); 1060 1061 /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ 1062 if (!cpu_feature_enabled(X86_FEATURE_XENPV)) 1063 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); 1064 1065 /* Enable the Instructions Retired free counter */ 1066 if (cpu_has(c, X86_FEATURE_IRPERF)) 1067 msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); 1068 1069 check_null_seg_clears_base(c); 1070 1071 /* 1072 * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up 1073 * using the trampoline code and as part of it, MSR_EFER gets prepared there in 1074 * order to be replicated onto them. Regardless, set it here again, if not set, 1075 * to protect against any future refactoring/code reorganization which might 1076 * miss setting this important bit. 1077 */ 1078 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 1079 cpu_has(c, X86_FEATURE_AUTOIBRS)) 1080 WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS) < 0); 1081 1082 /* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */ 1083 clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE); 1084 1085 /* Enable Translation Cache Extension */ 1086 if (cpu_has(c, X86_FEATURE_TCE)) 1087 msr_set_bit(MSR_EFER, _EFER_TCE); 1088 } 1089 1090 #ifdef CONFIG_X86_32 1091 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) 1092 { 1093 /* AMD errata T13 (order #21922) */ 1094 if (c->x86 == 6) { 1095 /* Duron Rev A0 */ 1096 if (c->x86_model == 3 && c->x86_stepping == 0) 1097 size = 64; 1098 /* Tbird rev A1/A2 */ 1099 if (c->x86_model == 4 && 1100 (c->x86_stepping == 0 || c->x86_stepping == 1)) 1101 size = 256; 1102 } 1103 return size; 1104 } 1105 #endif 1106 1107 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) 1108 { 1109 u32 ebx, eax, ecx, edx; 1110 u16 mask = 0xfff; 1111 1112 if (c->x86 < 0xf) 1113 return; 1114 1115 if (c->extended_cpuid_level < 0x80000006) 1116 return; 1117 1118 cpuid(0x80000006, &eax, &ebx, &ecx, &edx); 1119 1120 tlb_lld_4k = (ebx >> 16) & mask; 1121 tlb_lli_4k = ebx & mask; 1122 1123 /* 1124 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB 1125 * characteristics from the CPUID function 0x80000005 instead. 1126 */ 1127 if (c->x86 == 0xf) { 1128 cpuid(0x80000005, &eax, &ebx, &ecx, &edx); 1129 mask = 0xff; 1130 } 1131 1132 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ 1133 if (!((eax >> 16) & mask)) 1134 tlb_lld_2m = (cpuid_eax(0x80000005) >> 16) & 0xff; 1135 else 1136 tlb_lld_2m = (eax >> 16) & mask; 1137 1138 /* a 4M entry uses two 2M entries */ 1139 tlb_lld_4m = tlb_lld_2m >> 1; 1140 1141 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ 1142 if (!(eax & mask)) { 1143 /* Erratum 658 */ 1144 if (c->x86 == 0x15 && c->x86_model <= 0x1f) { 1145 tlb_lli_2m = 1024; 1146 } else { 1147 cpuid(0x80000005, &eax, &ebx, &ecx, &edx); 1148 tlb_lli_2m = eax & 0xff; 1149 } 1150 } else 1151 tlb_lli_2m = eax & mask; 1152 1153 tlb_lli_4m = tlb_lli_2m >> 1; 1154 1155 /* Max number of pages INVLPGB can invalidate in one shot */ 1156 if (cpu_has(c, X86_FEATURE_INVLPGB)) 1157 invlpgb_count_max = (cpuid_edx(0x80000008) & 0xffff) + 1; 1158 } 1159 1160 static const struct cpu_dev amd_cpu_dev = { 1161 .c_vendor = "AMD", 1162 .c_ident = { "AuthenticAMD" }, 1163 #ifdef CONFIG_X86_32 1164 .legacy_models = { 1165 { .family = 4, .model_names = 1166 { 1167 [3] = "486 DX/2", 1168 [7] = "486 DX/2-WB", 1169 [8] = "486 DX/4", 1170 [9] = "486 DX/4-WB", 1171 [14] = "Am5x86-WT", 1172 [15] = "Am5x86-WB" 1173 } 1174 }, 1175 }, 1176 .legacy_cache_size = amd_size_cache, 1177 #endif 1178 .c_early_init = early_init_amd, 1179 .c_detect_tlb = cpu_detect_tlb_amd, 1180 .c_bsp_init = bsp_init_amd, 1181 .c_init = init_amd, 1182 .c_x86_vendor = X86_VENDOR_AMD, 1183 }; 1184 1185 cpu_dev_register(amd_cpu_dev); 1186 1187 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask); 1188 1189 static unsigned int amd_msr_dr_addr_masks[] = { 1190 MSR_F16H_DR0_ADDR_MASK, 1191 MSR_F16H_DR1_ADDR_MASK, 1192 MSR_F16H_DR1_ADDR_MASK + 1, 1193 MSR_F16H_DR1_ADDR_MASK + 2 1194 }; 1195 1196 void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr) 1197 { 1198 int cpu = smp_processor_id(); 1199 1200 if (!cpu_feature_enabled(X86_FEATURE_BPEXT)) 1201 return; 1202 1203 if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks))) 1204 return; 1205 1206 if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask) 1207 return; 1208 1209 wrmsr(amd_msr_dr_addr_masks[dr], mask, 0); 1210 per_cpu(amd_dr_addr_mask, cpu)[dr] = mask; 1211 } 1212 1213 unsigned long amd_get_dr_addr_mask(unsigned int dr) 1214 { 1215 if (!cpu_feature_enabled(X86_FEATURE_BPEXT)) 1216 return 0; 1217 1218 if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks))) 1219 return 0; 1220 1221 return per_cpu(amd_dr_addr_mask[dr], smp_processor_id()); 1222 } 1223 EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask); 1224 1225 static void zenbleed_check_cpu(void *unused) 1226 { 1227 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); 1228 1229 zen2_zenbleed_check(c); 1230 } 1231 1232 void amd_check_microcode(void) 1233 { 1234 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 1235 return; 1236 1237 if (cpu_feature_enabled(X86_FEATURE_ZEN2)) 1238 on_each_cpu(zenbleed_check_cpu, NULL, 1); 1239 } 1240