1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/export.h> 3 #include <linux/bitops.h> 4 #include <linux/elf.h> 5 #include <linux/mm.h> 6 7 #include <linux/io.h> 8 #include <linux/sched.h> 9 #include <linux/sched/clock.h> 10 #include <linux/random.h> 11 #include <linux/topology.h> 12 #include <asm/processor.h> 13 #include <asm/apic.h> 14 #include <asm/cacheinfo.h> 15 #include <asm/cpu.h> 16 #include <asm/spec-ctrl.h> 17 #include <asm/smp.h> 18 #include <asm/numa.h> 19 #include <asm/pci-direct.h> 20 #include <asm/delay.h> 21 #include <asm/debugreg.h> 22 #include <asm/resctrl.h> 23 #include <asm/sev.h> 24 25 #ifdef CONFIG_X86_64 26 # include <asm/mmconfig.h> 27 #endif 28 29 #include "cpu.h" 30 31 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) 32 { 33 u32 gprs[8] = { 0 }; 34 int err; 35 36 WARN_ONCE((boot_cpu_data.x86 != 0xf), 37 "%s should only be used on K8!\n", __func__); 38 39 gprs[1] = msr; 40 gprs[7] = 0x9c5a203a; 41 42 err = rdmsr_safe_regs(gprs); 43 44 *p = gprs[0] | ((u64)gprs[2] << 32); 45 46 return err; 47 } 48 49 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) 50 { 51 u32 gprs[8] = { 0 }; 52 53 WARN_ONCE((boot_cpu_data.x86 != 0xf), 54 "%s should only be used on K8!\n", __func__); 55 56 gprs[0] = (u32)val; 57 gprs[1] = msr; 58 gprs[2] = val >> 32; 59 gprs[7] = 0x9c5a203a; 60 61 return wrmsr_safe_regs(gprs); 62 } 63 64 /* 65 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause 66 * misexecution of code under Linux. Owners of such processors should 67 * contact AMD for precise details and a CPU swap. 68 * 69 * See http://www.multimania.com/poulot/k6bug.html 70 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6" 71 * (Publication # 21266 Issue Date: August 1998) 72 * 73 * The following test is erm.. interesting. AMD neglected to up 74 * the chip setting when fixing the bug but they also tweaked some 75 * performance at the same time.. 76 */ 77 78 #ifdef CONFIG_X86_32 79 extern __visible void vide(void); 80 __asm__(".text\n" 81 ".globl vide\n" 82 ".type vide, @function\n" 83 ".align 4\n" 84 "vide: ret\n"); 85 #endif 86 87 static void init_amd_k5(struct cpuinfo_x86 *c) 88 { 89 #ifdef CONFIG_X86_32 90 /* 91 * General Systems BIOSen alias the cpu frequency registers 92 * of the Elan at 0x000df000. Unfortunately, one of the Linux 93 * drivers subsequently pokes it, and changes the CPU speed. 94 * Workaround : Remove the unneeded alias. 95 */ 96 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ 97 #define CBAR_ENB (0x80000000) 98 #define CBAR_KEY (0X000000CB) 99 if (c->x86_model == 9 || c->x86_model == 10) { 100 if (inl(CBAR) & CBAR_ENB) 101 outl(0 | CBAR_KEY, CBAR); 102 } 103 #endif 104 } 105 106 static void init_amd_k6(struct cpuinfo_x86 *c) 107 { 108 #ifdef CONFIG_X86_32 109 u32 l, h; 110 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); 111 112 if (c->x86_model < 6) { 113 /* Based on AMD doc 20734R - June 2000 */ 114 if (c->x86_model == 0) { 115 clear_cpu_cap(c, X86_FEATURE_APIC); 116 set_cpu_cap(c, X86_FEATURE_PGE); 117 } 118 return; 119 } 120 121 if (c->x86_model == 6 && c->x86_stepping == 1) { 122 const int K6_BUG_LOOP = 1000000; 123 int n; 124 void (*f_vide)(void); 125 u64 d, d2; 126 127 pr_info("AMD K6 stepping B detected - "); 128 129 /* 130 * It looks like AMD fixed the 2.6.2 bug and improved indirect 131 * calls at the same time. 132 */ 133 134 n = K6_BUG_LOOP; 135 f_vide = vide; 136 OPTIMIZER_HIDE_VAR(f_vide); 137 d = rdtsc(); 138 while (n--) 139 f_vide(); 140 d2 = rdtsc(); 141 d = d2-d; 142 143 if (d > 20*K6_BUG_LOOP) 144 pr_cont("system stability may be impaired when more than 32 MB are used.\n"); 145 else 146 pr_cont("probably OK (after B9730xxxx).\n"); 147 } 148 149 /* K6 with old style WHCR */ 150 if (c->x86_model < 8 || 151 (c->x86_model == 8 && c->x86_stepping < 8)) { 152 /* We can only write allocate on the low 508Mb */ 153 if (mbytes > 508) 154 mbytes = 508; 155 156 rdmsr(MSR_K6_WHCR, l, h); 157 if ((l&0x0000FFFF) == 0) { 158 unsigned long flags; 159 l = (1<<0)|((mbytes/4)<<1); 160 local_irq_save(flags); 161 wbinvd(); 162 wrmsr(MSR_K6_WHCR, l, h); 163 local_irq_restore(flags); 164 pr_info("Enabling old style K6 write allocation for %d Mb\n", 165 mbytes); 166 } 167 return; 168 } 169 170 if ((c->x86_model == 8 && c->x86_stepping > 7) || 171 c->x86_model == 9 || c->x86_model == 13) { 172 /* The more serious chips .. */ 173 174 if (mbytes > 4092) 175 mbytes = 4092; 176 177 rdmsr(MSR_K6_WHCR, l, h); 178 if ((l&0xFFFF0000) == 0) { 179 unsigned long flags; 180 l = ((mbytes>>2)<<22)|(1<<16); 181 local_irq_save(flags); 182 wbinvd(); 183 wrmsr(MSR_K6_WHCR, l, h); 184 local_irq_restore(flags); 185 pr_info("Enabling new style K6 write allocation for %d Mb\n", 186 mbytes); 187 } 188 189 return; 190 } 191 192 if (c->x86_model == 10) { 193 /* AMD Geode LX is model 10 */ 194 /* placeholder for any needed mods */ 195 return; 196 } 197 #endif 198 } 199 200 static void init_amd_k7(struct cpuinfo_x86 *c) 201 { 202 #ifdef CONFIG_X86_32 203 u32 l, h; 204 205 /* 206 * Bit 15 of Athlon specific MSR 15, needs to be 0 207 * to enable SSE on Palomino/Morgan/Barton CPU's. 208 * If the BIOS didn't enable it already, enable it here. 209 */ 210 if (c->x86_model >= 6 && c->x86_model <= 10) { 211 if (!cpu_has(c, X86_FEATURE_XMM)) { 212 pr_info("Enabling disabled K7/SSE Support.\n"); 213 msr_clear_bit(MSR_K7_HWCR, 15); 214 set_cpu_cap(c, X86_FEATURE_XMM); 215 } 216 } 217 218 /* 219 * It's been determined by AMD that Athlons since model 8 stepping 1 220 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx 221 * As per AMD technical note 27212 0.2 222 */ 223 if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) { 224 rdmsr(MSR_K7_CLK_CTL, l, h); 225 if ((l & 0xfff00000) != 0x20000000) { 226 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", 227 l, ((l & 0x000fffff)|0x20000000)); 228 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); 229 } 230 } 231 232 /* calling is from identify_secondary_cpu() ? */ 233 if (!c->cpu_index) 234 return; 235 236 /* 237 * Certain Athlons might work (for various values of 'work') in SMP 238 * but they are not certified as MP capable. 239 */ 240 /* Athlon 660/661 is valid. */ 241 if ((c->x86_model == 6) && ((c->x86_stepping == 0) || 242 (c->x86_stepping == 1))) 243 return; 244 245 /* Duron 670 is valid */ 246 if ((c->x86_model == 7) && (c->x86_stepping == 0)) 247 return; 248 249 /* 250 * Athlon 662, Duron 671, and Athlon >model 7 have capability 251 * bit. It's worth noting that the A5 stepping (662) of some 252 * Athlon XP's have the MP bit set. 253 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for 254 * more. 255 */ 256 if (((c->x86_model == 6) && (c->x86_stepping >= 2)) || 257 ((c->x86_model == 7) && (c->x86_stepping >= 1)) || 258 (c->x86_model > 7)) 259 if (cpu_has(c, X86_FEATURE_MP)) 260 return; 261 262 /* If we get here, not a certified SMP capable AMD system. */ 263 264 /* 265 * Don't taint if we are running SMP kernel on a single non-MP 266 * approved Athlon 267 */ 268 WARN_ONCE(1, "WARNING: This combination of AMD" 269 " processors is not suitable for SMP.\n"); 270 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); 271 #endif 272 } 273 274 #ifdef CONFIG_NUMA 275 /* 276 * To workaround broken NUMA config. Read the comment in 277 * srat_detect_node(). 278 */ 279 static int nearby_node(int apicid) 280 { 281 int i, node; 282 283 for (i = apicid - 1; i >= 0; i--) { 284 node = __apicid_to_node[i]; 285 if (node != NUMA_NO_NODE && node_online(node)) 286 return node; 287 } 288 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { 289 node = __apicid_to_node[i]; 290 if (node != NUMA_NO_NODE && node_online(node)) 291 return node; 292 } 293 return first_node(node_online_map); /* Shouldn't happen */ 294 } 295 #endif 296 297 static void srat_detect_node(struct cpuinfo_x86 *c) 298 { 299 #ifdef CONFIG_NUMA 300 int cpu = smp_processor_id(); 301 int node; 302 unsigned apicid = c->topo.apicid; 303 304 node = numa_cpu_node(cpu); 305 if (node == NUMA_NO_NODE) 306 node = per_cpu_llc_id(cpu); 307 308 /* 309 * On multi-fabric platform (e.g. Numascale NumaChip) a 310 * platform-specific handler needs to be called to fixup some 311 * IDs of the CPU. 312 */ 313 if (x86_cpuinit.fixup_cpu_id) 314 x86_cpuinit.fixup_cpu_id(c, node); 315 316 if (!node_online(node)) { 317 /* 318 * Two possibilities here: 319 * 320 * - The CPU is missing memory and no node was created. In 321 * that case try picking one from a nearby CPU. 322 * 323 * - The APIC IDs differ from the HyperTransport node IDs 324 * which the K8 northbridge parsing fills in. Assume 325 * they are all increased by a constant offset, but in 326 * the same order as the HT nodeids. If that doesn't 327 * result in a usable node fall back to the path for the 328 * previous case. 329 * 330 * This workaround operates directly on the mapping between 331 * APIC ID and NUMA node, assuming certain relationship 332 * between APIC ID, HT node ID and NUMA topology. As going 333 * through CPU mapping may alter the outcome, directly 334 * access __apicid_to_node[]. 335 */ 336 int ht_nodeid = c->topo.initial_apicid; 337 338 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE) 339 node = __apicid_to_node[ht_nodeid]; 340 /* Pick a nearby node */ 341 if (!node_online(node)) 342 node = nearby_node(apicid); 343 } 344 numa_set_node(cpu, node); 345 #endif 346 } 347 348 static void bsp_determine_snp(struct cpuinfo_x86 *c) 349 { 350 #ifdef CONFIG_ARCH_HAS_CC_PLATFORM 351 cc_vendor = CC_VENDOR_AMD; 352 353 if (cpu_has(c, X86_FEATURE_SEV_SNP)) { 354 /* 355 * RMP table entry format is not architectural and is defined by the 356 * per-processor PPR. Restrict SNP support on the known CPU models 357 * for which the RMP table entry format is currently defined for. 358 */ 359 if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && 360 c->x86 >= 0x19 && snp_probe_rmptable_info()) { 361 cc_platform_set(CC_ATTR_HOST_SEV_SNP); 362 } else { 363 setup_clear_cpu_cap(X86_FEATURE_SEV_SNP); 364 cc_platform_clear(CC_ATTR_HOST_SEV_SNP); 365 } 366 } 367 #endif 368 } 369 370 static void bsp_init_amd(struct cpuinfo_x86 *c) 371 { 372 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { 373 374 if (c->x86 > 0x10 || 375 (c->x86 == 0x10 && c->x86_model >= 0x2)) { 376 u64 val; 377 378 rdmsrl(MSR_K7_HWCR, val); 379 if (!(val & BIT(24))) 380 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n"); 381 } 382 } 383 384 if (c->x86 == 0x15) { 385 unsigned long upperbit; 386 u32 cpuid, assoc; 387 388 cpuid = cpuid_edx(0x80000005); 389 assoc = cpuid >> 16 & 0xff; 390 upperbit = ((cpuid >> 24) << 10) / assoc; 391 392 va_align.mask = (upperbit - 1) & PAGE_MASK; 393 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; 394 395 /* A random value per boot for bit slice [12:upper_bit) */ 396 va_align.bits = get_random_u32() & va_align.mask; 397 } 398 399 if (cpu_has(c, X86_FEATURE_MWAITX)) 400 use_mwaitx_delay(); 401 402 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && 403 !boot_cpu_has(X86_FEATURE_VIRT_SSBD) && 404 c->x86 >= 0x15 && c->x86 <= 0x17) { 405 unsigned int bit; 406 407 switch (c->x86) { 408 case 0x15: bit = 54; break; 409 case 0x16: bit = 33; break; 410 case 0x17: bit = 10; break; 411 default: return; 412 } 413 /* 414 * Try to cache the base value so further operations can 415 * avoid RMW. If that faults, do not enable SSBD. 416 */ 417 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { 418 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); 419 setup_force_cpu_cap(X86_FEATURE_SSBD); 420 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit; 421 } 422 } 423 424 resctrl_cpu_detect(c); 425 426 /* Figure out Zen generations: */ 427 switch (c->x86) { 428 case 0x17: 429 switch (c->x86_model) { 430 case 0x00 ... 0x2f: 431 case 0x50 ... 0x5f: 432 setup_force_cpu_cap(X86_FEATURE_ZEN1); 433 break; 434 case 0x30 ... 0x4f: 435 case 0x60 ... 0x7f: 436 case 0x90 ... 0x91: 437 case 0xa0 ... 0xaf: 438 setup_force_cpu_cap(X86_FEATURE_ZEN2); 439 break; 440 default: 441 goto warn; 442 } 443 break; 444 445 case 0x19: 446 switch (c->x86_model) { 447 case 0x00 ... 0x0f: 448 case 0x20 ... 0x5f: 449 setup_force_cpu_cap(X86_FEATURE_ZEN3); 450 break; 451 case 0x10 ... 0x1f: 452 case 0x60 ... 0xaf: 453 setup_force_cpu_cap(X86_FEATURE_ZEN4); 454 break; 455 default: 456 goto warn; 457 } 458 break; 459 460 case 0x1a: 461 switch (c->x86_model) { 462 case 0x00 ... 0x0f: 463 case 0x20 ... 0x2f: 464 case 0x40 ... 0x4f: 465 case 0x70 ... 0x7f: 466 setup_force_cpu_cap(X86_FEATURE_ZEN5); 467 break; 468 default: 469 goto warn; 470 } 471 break; 472 473 default: 474 break; 475 } 476 477 bsp_determine_snp(c); 478 return; 479 480 warn: 481 WARN_ONCE(1, "Family 0x%x, model: 0x%x??\n", c->x86, c->x86_model); 482 } 483 484 static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) 485 { 486 u64 msr; 487 488 /* 489 * BIOS support is required for SME and SEV. 490 * For SME: If BIOS has enabled SME then adjust x86_phys_bits by 491 * the SME physical address space reduction value. 492 * If BIOS has not enabled SME then don't advertise the 493 * SME feature (set in scattered.c). 494 * If the kernel has not enabled SME via any means then 495 * don't advertise the SME feature. 496 * For SEV: If BIOS has not enabled SEV then don't advertise SEV and 497 * any additional functionality based on it. 498 * 499 * In all cases, since support for SME and SEV requires long mode, 500 * don't advertise the feature under CONFIG_X86_32. 501 */ 502 if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) { 503 /* Check if memory encryption is enabled */ 504 rdmsrl(MSR_AMD64_SYSCFG, msr); 505 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) 506 goto clear_all; 507 508 /* 509 * Always adjust physical address bits. Even though this 510 * will be a value above 32-bits this is still done for 511 * CONFIG_X86_32 so that accurate values are reported. 512 */ 513 c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f; 514 515 if (IS_ENABLED(CONFIG_X86_32)) 516 goto clear_all; 517 518 if (!sme_me_mask) 519 setup_clear_cpu_cap(X86_FEATURE_SME); 520 521 rdmsrl(MSR_K7_HWCR, msr); 522 if (!(msr & MSR_K7_HWCR_SMMLOCK)) 523 goto clear_sev; 524 525 return; 526 527 clear_all: 528 setup_clear_cpu_cap(X86_FEATURE_SME); 529 clear_sev: 530 setup_clear_cpu_cap(X86_FEATURE_SEV); 531 setup_clear_cpu_cap(X86_FEATURE_SEV_ES); 532 setup_clear_cpu_cap(X86_FEATURE_SEV_SNP); 533 } 534 } 535 536 static void early_init_amd(struct cpuinfo_x86 *c) 537 { 538 u32 dummy; 539 540 if (c->x86 >= 0xf) 541 set_cpu_cap(c, X86_FEATURE_K8); 542 543 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); 544 545 /* 546 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 547 * with P/T states and does not stop in deep C-states 548 */ 549 if (c->x86_power & (1 << 8)) { 550 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 551 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 552 } 553 554 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ 555 if (c->x86_power & BIT(12)) 556 set_cpu_cap(c, X86_FEATURE_ACC_POWER); 557 558 /* Bit 14 indicates the Runtime Average Power Limit interface. */ 559 if (c->x86_power & BIT(14)) 560 set_cpu_cap(c, X86_FEATURE_RAPL); 561 562 #ifdef CONFIG_X86_64 563 set_cpu_cap(c, X86_FEATURE_SYSCALL32); 564 #else 565 /* Set MTRR capability flag if appropriate */ 566 if (c->x86 == 5) 567 if (c->x86_model == 13 || c->x86_model == 9 || 568 (c->x86_model == 8 && c->x86_stepping >= 8)) 569 set_cpu_cap(c, X86_FEATURE_K6_MTRR); 570 #endif 571 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) 572 /* 573 * ApicID can always be treated as an 8-bit value for AMD APIC versions 574 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we 575 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families 576 * after 16h. 577 */ 578 if (boot_cpu_has(X86_FEATURE_APIC)) { 579 if (c->x86 > 0x16) 580 set_cpu_cap(c, X86_FEATURE_EXTD_APICID); 581 else if (c->x86 >= 0xf) { 582 /* check CPU config space for extended APIC ID */ 583 unsigned int val; 584 585 val = read_pci_config(0, 24, 0, 0x68); 586 if ((val >> 17 & 0x3) == 0x3) 587 set_cpu_cap(c, X86_FEATURE_EXTD_APICID); 588 } 589 } 590 #endif 591 592 /* 593 * This is only needed to tell the kernel whether to use VMCALL 594 * and VMMCALL. VMMCALL is never executed except under virt, so 595 * we can set it unconditionally. 596 */ 597 set_cpu_cap(c, X86_FEATURE_VMMCALL); 598 599 /* F16h erratum 793, CVE-2013-6885 */ 600 if (c->x86 == 0x16 && c->x86_model <= 0xf) 601 msr_set_bit(MSR_AMD64_LS_CFG, 15); 602 603 early_detect_mem_encrypt(c); 604 605 if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) { 606 if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB)) 607 setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); 608 else if (c->x86 >= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) { 609 setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); 610 setup_force_cpu_cap(X86_FEATURE_SBPB); 611 } 612 } 613 } 614 615 static void init_amd_k8(struct cpuinfo_x86 *c) 616 { 617 u32 level; 618 u64 value; 619 620 /* On C+ stepping K8 rep microcode works well for copy/memset */ 621 level = cpuid_eax(1); 622 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) 623 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 624 625 /* 626 * Some BIOSes incorrectly force this feature, but only K8 revision D 627 * (model = 0x14) and later actually support it. 628 * (AMD Erratum #110, docId: 25759). 629 */ 630 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { 631 clear_cpu_cap(c, X86_FEATURE_LAHF_LM); 632 if (!rdmsrl_amd_safe(0xc001100d, &value)) { 633 value &= ~BIT_64(32); 634 wrmsrl_amd_safe(0xc001100d, value); 635 } 636 } 637 638 if (!c->x86_model_id[0]) 639 strcpy(c->x86_model_id, "Hammer"); 640 641 #ifdef CONFIG_SMP 642 /* 643 * Disable TLB flush filter by setting HWCR.FFDIS on K8 644 * bit 6 of msr C001_0015 645 * 646 * Errata 63 for SH-B3 steppings 647 * Errata 122 for all steppings (F+ have it disabled by default) 648 */ 649 msr_set_bit(MSR_K7_HWCR, 6); 650 #endif 651 set_cpu_bug(c, X86_BUG_SWAPGS_FENCE); 652 653 /* 654 * Check models and steppings affected by erratum 400. This is 655 * used to select the proper idle routine and to enable the 656 * check whether the machine is affected in arch_post_acpi_subsys_init() 657 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check. 658 */ 659 if (c->x86_model > 0x41 || 660 (c->x86_model == 0x41 && c->x86_stepping >= 0x2)) 661 setup_force_cpu_bug(X86_BUG_AMD_E400); 662 } 663 664 static void init_amd_gh(struct cpuinfo_x86 *c) 665 { 666 #ifdef CONFIG_MMCONF_FAM10H 667 /* do this for boot cpu */ 668 if (c == &boot_cpu_data) 669 check_enable_amd_mmconf_dmi(); 670 671 fam10h_check_enable_mmcfg(); 672 #endif 673 674 /* 675 * Disable GART TLB Walk Errors on Fam10h. We do this here because this 676 * is always needed when GART is enabled, even in a kernel which has no 677 * MCE support built in. BIOS should disable GartTlbWlk Errors already. 678 * If it doesn't, we do it here as suggested by the BKDG. 679 * 680 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 681 */ 682 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10); 683 684 /* 685 * On family 10h BIOS may not have properly enabled WC+ support, causing 686 * it to be converted to CD memtype. This may result in performance 687 * degradation for certain nested-paging guests. Prevent this conversion 688 * by clearing bit 24 in MSR_AMD64_BU_CFG2. 689 * 690 * NOTE: we want to use the _safe accessors so as not to #GP kvm 691 * guests on older kvm hosts. 692 */ 693 msr_clear_bit(MSR_AMD64_BU_CFG2, 24); 694 695 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); 696 697 /* 698 * Check models and steppings affected by erratum 400. This is 699 * used to select the proper idle routine and to enable the 700 * check whether the machine is affected in arch_post_acpi_subsys_init() 701 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check. 702 */ 703 if (c->x86_model > 0x2 || 704 (c->x86_model == 0x2 && c->x86_stepping >= 0x1)) 705 setup_force_cpu_bug(X86_BUG_AMD_E400); 706 } 707 708 static void init_amd_ln(struct cpuinfo_x86 *c) 709 { 710 /* 711 * Apply erratum 665 fix unconditionally so machines without a BIOS 712 * fix work. 713 */ 714 msr_set_bit(MSR_AMD64_DE_CFG, 31); 715 } 716 717 static bool rdrand_force; 718 719 static int __init rdrand_cmdline(char *str) 720 { 721 if (!str) 722 return -EINVAL; 723 724 if (!strcmp(str, "force")) 725 rdrand_force = true; 726 else 727 return -EINVAL; 728 729 return 0; 730 } 731 early_param("rdrand", rdrand_cmdline); 732 733 static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c) 734 { 735 /* 736 * Saving of the MSR used to hide the RDRAND support during 737 * suspend/resume is done by arch/x86/power/cpu.c, which is 738 * dependent on CONFIG_PM_SLEEP. 739 */ 740 if (!IS_ENABLED(CONFIG_PM_SLEEP)) 741 return; 742 743 /* 744 * The self-test can clear X86_FEATURE_RDRAND, so check for 745 * RDRAND support using the CPUID function directly. 746 */ 747 if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force) 748 return; 749 750 msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62); 751 752 /* 753 * Verify that the CPUID change has occurred in case the kernel is 754 * running virtualized and the hypervisor doesn't support the MSR. 755 */ 756 if (cpuid_ecx(1) & BIT(30)) { 757 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n"); 758 return; 759 } 760 761 clear_cpu_cap(c, X86_FEATURE_RDRAND); 762 pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n"); 763 } 764 765 static void init_amd_jg(struct cpuinfo_x86 *c) 766 { 767 /* 768 * Some BIOS implementations do not restore proper RDRAND support 769 * across suspend and resume. Check on whether to hide the RDRAND 770 * instruction support via CPUID. 771 */ 772 clear_rdrand_cpuid_bit(c); 773 } 774 775 static void init_amd_bd(struct cpuinfo_x86 *c) 776 { 777 u64 value; 778 779 /* 780 * The way access filter has a performance penalty on some workloads. 781 * Disable it on the affected CPUs. 782 */ 783 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) { 784 if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) { 785 value |= 0x1E; 786 wrmsrl_safe(MSR_F15H_IC_CFG, value); 787 } 788 } 789 790 /* 791 * Some BIOS implementations do not restore proper RDRAND support 792 * across suspend and resume. Check on whether to hide the RDRAND 793 * instruction support via CPUID. 794 */ 795 clear_rdrand_cpuid_bit(c); 796 } 797 798 static void fix_erratum_1386(struct cpuinfo_x86 *c) 799 { 800 /* 801 * Work around Erratum 1386. The XSAVES instruction malfunctions in 802 * certain circumstances on Zen1/2 uarch, and not all parts have had 803 * updated microcode at the time of writing (March 2023). 804 * 805 * Affected parts all have no supervisor XSAVE states, meaning that 806 * the XSAVEC instruction (which works fine) is equivalent. 807 */ 808 clear_cpu_cap(c, X86_FEATURE_XSAVES); 809 } 810 811 void init_spectral_chicken(struct cpuinfo_x86 *c) 812 { 813 #ifdef CONFIG_MITIGATION_UNRET_ENTRY 814 u64 value; 815 816 /* 817 * On Zen2 we offer this chicken (bit) on the altar of Speculation. 818 * 819 * This suppresses speculation from the middle of a basic block, i.e. it 820 * suppresses non-branch predictions. 821 */ 822 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { 823 if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) { 824 value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT; 825 wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value); 826 } 827 } 828 #endif 829 } 830 831 static void init_amd_zen_common(void) 832 { 833 setup_force_cpu_cap(X86_FEATURE_ZEN); 834 #ifdef CONFIG_NUMA 835 node_reclaim_distance = 32; 836 #endif 837 } 838 839 static void init_amd_zen1(struct cpuinfo_x86 *c) 840 { 841 fix_erratum_1386(c); 842 843 /* Fix up CPUID bits, but only if not virtualised. */ 844 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { 845 846 /* Erratum 1076: CPB feature bit not being set in CPUID. */ 847 if (!cpu_has(c, X86_FEATURE_CPB)) 848 set_cpu_cap(c, X86_FEATURE_CPB); 849 } 850 851 pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); 852 setup_force_cpu_bug(X86_BUG_DIV0); 853 } 854 855 static bool cpu_has_zenbleed_microcode(void) 856 { 857 u32 good_rev = 0; 858 859 switch (boot_cpu_data.x86_model) { 860 case 0x30 ... 0x3f: good_rev = 0x0830107b; break; 861 case 0x60 ... 0x67: good_rev = 0x0860010c; break; 862 case 0x68 ... 0x6f: good_rev = 0x08608107; break; 863 case 0x70 ... 0x7f: good_rev = 0x08701033; break; 864 case 0xa0 ... 0xaf: good_rev = 0x08a00009; break; 865 866 default: 867 return false; 868 } 869 870 if (boot_cpu_data.microcode < good_rev) 871 return false; 872 873 return true; 874 } 875 876 static void zen2_zenbleed_check(struct cpuinfo_x86 *c) 877 { 878 if (cpu_has(c, X86_FEATURE_HYPERVISOR)) 879 return; 880 881 if (!cpu_has(c, X86_FEATURE_AVX)) 882 return; 883 884 if (!cpu_has_zenbleed_microcode()) { 885 pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n"); 886 msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); 887 } else { 888 msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); 889 } 890 } 891 892 static void init_amd_zen2(struct cpuinfo_x86 *c) 893 { 894 init_spectral_chicken(c); 895 fix_erratum_1386(c); 896 zen2_zenbleed_check(c); 897 } 898 899 static void init_amd_zen3(struct cpuinfo_x86 *c) 900 { 901 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { 902 /* 903 * Zen3 (Fam19 model < 0x10) parts are not susceptible to 904 * Branch Type Confusion, but predate the allocation of the 905 * BTC_NO bit. 906 */ 907 if (!cpu_has(c, X86_FEATURE_BTC_NO)) 908 set_cpu_cap(c, X86_FEATURE_BTC_NO); 909 } 910 } 911 912 static void init_amd_zen4(struct cpuinfo_x86 *c) 913 { 914 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) 915 msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT); 916 } 917 918 static void init_amd_zen5(struct cpuinfo_x86 *c) 919 { 920 } 921 922 static void init_amd(struct cpuinfo_x86 *c) 923 { 924 u64 vm_cr; 925 926 early_init_amd(c); 927 928 /* 929 * Bit 31 in normal CPUID used for nonstandard 3DNow ID; 930 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway 931 */ 932 clear_cpu_cap(c, 0*32+31); 933 934 if (c->x86 >= 0x10) 935 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 936 937 /* AMD FSRM also implies FSRS */ 938 if (cpu_has(c, X86_FEATURE_FSRM)) 939 set_cpu_cap(c, X86_FEATURE_FSRS); 940 941 /* K6s reports MCEs but don't actually have all the MSRs */ 942 if (c->x86 < 6) 943 clear_cpu_cap(c, X86_FEATURE_MCE); 944 945 switch (c->x86) { 946 case 4: init_amd_k5(c); break; 947 case 5: init_amd_k6(c); break; 948 case 6: init_amd_k7(c); break; 949 case 0xf: init_amd_k8(c); break; 950 case 0x10: init_amd_gh(c); break; 951 case 0x12: init_amd_ln(c); break; 952 case 0x15: init_amd_bd(c); break; 953 case 0x16: init_amd_jg(c); break; 954 } 955 956 /* 957 * Save up on some future enablement work and do common Zen 958 * settings. 959 */ 960 if (c->x86 >= 0x17) 961 init_amd_zen_common(); 962 963 if (boot_cpu_has(X86_FEATURE_ZEN1)) 964 init_amd_zen1(c); 965 else if (boot_cpu_has(X86_FEATURE_ZEN2)) 966 init_amd_zen2(c); 967 else if (boot_cpu_has(X86_FEATURE_ZEN3)) 968 init_amd_zen3(c); 969 else if (boot_cpu_has(X86_FEATURE_ZEN4)) 970 init_amd_zen4(c); 971 else if (boot_cpu_has(X86_FEATURE_ZEN5)) 972 init_amd_zen5(c); 973 974 /* 975 * Enable workaround for FXSAVE leak on CPUs 976 * without a XSaveErPtr feature 977 */ 978 if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR))) 979 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK); 980 981 cpu_detect_cache_sizes(c); 982 983 srat_detect_node(c); 984 985 init_amd_cacheinfo(c); 986 987 if (cpu_has(c, X86_FEATURE_SVM)) { 988 rdmsrl(MSR_VM_CR, vm_cr); 989 if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) { 990 pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n"); 991 clear_cpu_cap(c, X86_FEATURE_SVM); 992 } 993 } 994 995 if (!cpu_has(c, X86_FEATURE_LFENCE_RDTSC) && cpu_has(c, X86_FEATURE_XMM2)) { 996 /* 997 * Use LFENCE for execution serialization. On families which 998 * don't have that MSR, LFENCE is already serializing. 999 * msr_set_bit() uses the safe accessors, too, even if the MSR 1000 * is not present. 1001 */ 1002 msr_set_bit(MSR_AMD64_DE_CFG, 1003 MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT); 1004 1005 /* A serializing LFENCE stops RDTSC speculation */ 1006 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 1007 } 1008 1009 /* 1010 * Family 0x12 and above processors have APIC timer 1011 * running in deep C states. 1012 */ 1013 if (c->x86 > 0x11) 1014 set_cpu_cap(c, X86_FEATURE_ARAT); 1015 1016 /* 3DNow or LM implies PREFETCHW */ 1017 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH)) 1018 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM)) 1019 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH); 1020 1021 /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ 1022 if (!cpu_feature_enabled(X86_FEATURE_XENPV)) 1023 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); 1024 1025 /* 1026 * Turn on the Instructions Retired free counter on machines not 1027 * susceptible to erratum #1054 "Instructions Retired Performance 1028 * Counter May Be Inaccurate". 1029 */ 1030 if (cpu_has(c, X86_FEATURE_IRPERF) && 1031 (boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f)) 1032 msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); 1033 1034 check_null_seg_clears_base(c); 1035 1036 /* 1037 * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up 1038 * using the trampoline code and as part of it, MSR_EFER gets prepared there in 1039 * order to be replicated onto them. Regardless, set it here again, if not set, 1040 * to protect against any future refactoring/code reorganization which might 1041 * miss setting this important bit. 1042 */ 1043 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 1044 cpu_has(c, X86_FEATURE_AUTOIBRS)) 1045 WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS)); 1046 1047 /* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */ 1048 clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE); 1049 } 1050 1051 #ifdef CONFIG_X86_32 1052 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) 1053 { 1054 /* AMD errata T13 (order #21922) */ 1055 if (c->x86 == 6) { 1056 /* Duron Rev A0 */ 1057 if (c->x86_model == 3 && c->x86_stepping == 0) 1058 size = 64; 1059 /* Tbird rev A1/A2 */ 1060 if (c->x86_model == 4 && 1061 (c->x86_stepping == 0 || c->x86_stepping == 1)) 1062 size = 256; 1063 } 1064 return size; 1065 } 1066 #endif 1067 1068 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) 1069 { 1070 u32 ebx, eax, ecx, edx; 1071 u16 mask = 0xfff; 1072 1073 if (c->x86 < 0xf) 1074 return; 1075 1076 if (c->extended_cpuid_level < 0x80000006) 1077 return; 1078 1079 cpuid(0x80000006, &eax, &ebx, &ecx, &edx); 1080 1081 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask; 1082 tlb_lli_4k[ENTRIES] = ebx & mask; 1083 1084 /* 1085 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB 1086 * characteristics from the CPUID function 0x80000005 instead. 1087 */ 1088 if (c->x86 == 0xf) { 1089 cpuid(0x80000005, &eax, &ebx, &ecx, &edx); 1090 mask = 0xff; 1091 } 1092 1093 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ 1094 if (!((eax >> 16) & mask)) 1095 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff; 1096 else 1097 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask; 1098 1099 /* a 4M entry uses two 2M entries */ 1100 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1; 1101 1102 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ 1103 if (!(eax & mask)) { 1104 /* Erratum 658 */ 1105 if (c->x86 == 0x15 && c->x86_model <= 0x1f) { 1106 tlb_lli_2m[ENTRIES] = 1024; 1107 } else { 1108 cpuid(0x80000005, &eax, &ebx, &ecx, &edx); 1109 tlb_lli_2m[ENTRIES] = eax & 0xff; 1110 } 1111 } else 1112 tlb_lli_2m[ENTRIES] = eax & mask; 1113 1114 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1; 1115 } 1116 1117 static const struct cpu_dev amd_cpu_dev = { 1118 .c_vendor = "AMD", 1119 .c_ident = { "AuthenticAMD" }, 1120 #ifdef CONFIG_X86_32 1121 .legacy_models = { 1122 { .family = 4, .model_names = 1123 { 1124 [3] = "486 DX/2", 1125 [7] = "486 DX/2-WB", 1126 [8] = "486 DX/4", 1127 [9] = "486 DX/4-WB", 1128 [14] = "Am5x86-WT", 1129 [15] = "Am5x86-WB" 1130 } 1131 }, 1132 }, 1133 .legacy_cache_size = amd_size_cache, 1134 #endif 1135 .c_early_init = early_init_amd, 1136 .c_detect_tlb = cpu_detect_tlb_amd, 1137 .c_bsp_init = bsp_init_amd, 1138 .c_init = init_amd, 1139 .c_x86_vendor = X86_VENDOR_AMD, 1140 }; 1141 1142 cpu_dev_register(amd_cpu_dev); 1143 1144 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask); 1145 1146 static unsigned int amd_msr_dr_addr_masks[] = { 1147 MSR_F16H_DR0_ADDR_MASK, 1148 MSR_F16H_DR1_ADDR_MASK, 1149 MSR_F16H_DR1_ADDR_MASK + 1, 1150 MSR_F16H_DR1_ADDR_MASK + 2 1151 }; 1152 1153 void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr) 1154 { 1155 int cpu = smp_processor_id(); 1156 1157 if (!cpu_feature_enabled(X86_FEATURE_BPEXT)) 1158 return; 1159 1160 if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks))) 1161 return; 1162 1163 if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask) 1164 return; 1165 1166 wrmsr(amd_msr_dr_addr_masks[dr], mask, 0); 1167 per_cpu(amd_dr_addr_mask, cpu)[dr] = mask; 1168 } 1169 1170 unsigned long amd_get_dr_addr_mask(unsigned int dr) 1171 { 1172 if (!cpu_feature_enabled(X86_FEATURE_BPEXT)) 1173 return 0; 1174 1175 if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks))) 1176 return 0; 1177 1178 return per_cpu(amd_dr_addr_mask[dr], smp_processor_id()); 1179 } 1180 EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask); 1181 1182 u32 amd_get_highest_perf(void) 1183 { 1184 struct cpuinfo_x86 *c = &boot_cpu_data; 1185 1186 if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) || 1187 (c->x86_model >= 0x70 && c->x86_model < 0x80))) 1188 return 166; 1189 1190 if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) || 1191 (c->x86_model >= 0x40 && c->x86_model < 0x70))) 1192 return 166; 1193 1194 return 255; 1195 } 1196 EXPORT_SYMBOL_GPL(amd_get_highest_perf); 1197 1198 static void zenbleed_check_cpu(void *unused) 1199 { 1200 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); 1201 1202 zen2_zenbleed_check(c); 1203 } 1204 1205 void amd_check_microcode(void) 1206 { 1207 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 1208 return; 1209 1210 on_each_cpu(zenbleed_check_cpu, NULL, 1); 1211 } 1212 1213 /* 1214 * Issue a DIV 0/1 insn to clear any division data from previous DIV 1215 * operations. 1216 */ 1217 void noinstr amd_clear_divider(void) 1218 { 1219 asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) 1220 :: "a" (0), "d" (0), "r" (1)); 1221 } 1222 EXPORT_SYMBOL_GPL(amd_clear_divider); 1223