1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/export.h> 3 #include <linux/bitops.h> 4 #include <linux/elf.h> 5 #include <linux/mm.h> 6 7 #include <linux/io.h> 8 #include <linux/sched.h> 9 #include <linux/sched/clock.h> 10 #include <linux/random.h> 11 #include <linux/topology.h> 12 #include <linux/platform_data/x86/amd-fch.h> 13 #include <asm/processor.h> 14 #include <asm/apic.h> 15 #include <asm/cacheinfo.h> 16 #include <asm/cpu.h> 17 #include <asm/cpu_device_id.h> 18 #include <asm/spec-ctrl.h> 19 #include <asm/smp.h> 20 #include <asm/numa.h> 21 #include <asm/pci-direct.h> 22 #include <asm/delay.h> 23 #include <asm/debugreg.h> 24 #include <asm/resctrl.h> 25 #include <asm/msr.h> 26 #include <asm/sev.h> 27 28 #ifdef CONFIG_X86_64 29 # include <asm/mmconfig.h> 30 #endif 31 32 #include "cpu.h" 33 34 u16 invlpgb_count_max __ro_after_init = 1; 35 36 static inline int rdmsrq_amd_safe(unsigned msr, u64 *p) 37 { 38 u32 gprs[8] = { 0 }; 39 int err; 40 41 WARN_ONCE((boot_cpu_data.x86 != 0xf), 42 "%s should only be used on K8!\n", __func__); 43 44 gprs[1] = msr; 45 gprs[7] = 0x9c5a203a; 46 47 err = rdmsr_safe_regs(gprs); 48 49 *p = gprs[0] | ((u64)gprs[2] << 32); 50 51 return err; 52 } 53 54 static inline int wrmsrq_amd_safe(unsigned msr, u64 val) 55 { 56 u32 gprs[8] = { 0 }; 57 58 WARN_ONCE((boot_cpu_data.x86 != 0xf), 59 "%s should only be used on K8!\n", __func__); 60 61 gprs[0] = (u32)val; 62 gprs[1] = msr; 63 gprs[2] = val >> 32; 64 gprs[7] = 0x9c5a203a; 65 66 return wrmsr_safe_regs(gprs); 67 } 68 69 /* 70 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause 71 * misexecution of code under Linux. Owners of such processors should 72 * contact AMD for precise details and a CPU swap. 73 * 74 * See http://www.multimania.com/poulot/k6bug.html 75 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6" 76 * (Publication # 21266 Issue Date: August 1998) 77 * 78 * The following test is erm.. interesting. AMD neglected to up 79 * the chip setting when fixing the bug but they also tweaked some 80 * performance at the same time.. 81 */ 82 83 #ifdef CONFIG_X86_32 84 extern __visible void vide(void); 85 __asm__(".text\n" 86 ".globl vide\n" 87 ".type vide, @function\n" 88 ".align 4\n" 89 "vide: ret\n"); 90 #endif 91 92 static void init_amd_k5(struct cpuinfo_x86 *c) 93 { 94 #ifdef CONFIG_X86_32 95 /* 96 * General Systems BIOSen alias the cpu frequency registers 97 * of the Elan at 0x000df000. Unfortunately, one of the Linux 98 * drivers subsequently pokes it, and changes the CPU speed. 99 * Workaround : Remove the unneeded alias. 100 */ 101 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ 102 #define CBAR_ENB (0x80000000) 103 #define CBAR_KEY (0X000000CB) 104 if (c->x86_model == 9 || c->x86_model == 10) { 105 if (inl(CBAR) & CBAR_ENB) 106 outl(0 | CBAR_KEY, CBAR); 107 } 108 #endif 109 } 110 111 static void init_amd_k6(struct cpuinfo_x86 *c) 112 { 113 #ifdef CONFIG_X86_32 114 u32 l, h; 115 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); 116 117 if (c->x86_model < 6) { 118 /* Based on AMD doc 20734R - June 2000 */ 119 if (c->x86_model == 0) { 120 clear_cpu_cap(c, X86_FEATURE_APIC); 121 set_cpu_cap(c, X86_FEATURE_PGE); 122 } 123 return; 124 } 125 126 if (c->x86_model == 6 && c->x86_stepping == 1) { 127 const int K6_BUG_LOOP = 1000000; 128 int n; 129 void (*f_vide)(void); 130 u64 d, d2; 131 132 pr_info("AMD K6 stepping B detected - "); 133 134 /* 135 * It looks like AMD fixed the 2.6.2 bug and improved indirect 136 * calls at the same time. 137 */ 138 139 n = K6_BUG_LOOP; 140 f_vide = vide; 141 OPTIMIZER_HIDE_VAR(f_vide); 142 d = rdtsc(); 143 while (n--) 144 f_vide(); 145 d2 = rdtsc(); 146 d = d2-d; 147 148 if (d > 20*K6_BUG_LOOP) 149 pr_cont("system stability may be impaired when more than 32 MB are used.\n"); 150 else 151 pr_cont("probably OK (after B9730xxxx).\n"); 152 } 153 154 /* K6 with old style WHCR */ 155 if (c->x86_model < 8 || 156 (c->x86_model == 8 && c->x86_stepping < 8)) { 157 /* We can only write allocate on the low 508Mb */ 158 if (mbytes > 508) 159 mbytes = 508; 160 161 rdmsr(MSR_K6_WHCR, l, h); 162 if ((l&0x0000FFFF) == 0) { 163 unsigned long flags; 164 l = (1<<0)|((mbytes/4)<<1); 165 local_irq_save(flags); 166 wbinvd(); 167 wrmsr(MSR_K6_WHCR, l, h); 168 local_irq_restore(flags); 169 pr_info("Enabling old style K6 write allocation for %d Mb\n", 170 mbytes); 171 } 172 return; 173 } 174 175 if ((c->x86_model == 8 && c->x86_stepping > 7) || 176 c->x86_model == 9 || c->x86_model == 13) { 177 /* The more serious chips .. */ 178 179 if (mbytes > 4092) 180 mbytes = 4092; 181 182 rdmsr(MSR_K6_WHCR, l, h); 183 if ((l&0xFFFF0000) == 0) { 184 unsigned long flags; 185 l = ((mbytes>>2)<<22)|(1<<16); 186 local_irq_save(flags); 187 wbinvd(); 188 wrmsr(MSR_K6_WHCR, l, h); 189 local_irq_restore(flags); 190 pr_info("Enabling new style K6 write allocation for %d Mb\n", 191 mbytes); 192 } 193 194 return; 195 } 196 197 if (c->x86_model == 10) { 198 /* AMD Geode LX is model 10 */ 199 /* placeholder for any needed mods */ 200 return; 201 } 202 #endif 203 } 204 205 static void init_amd_k7(struct cpuinfo_x86 *c) 206 { 207 #ifdef CONFIG_X86_32 208 u32 l, h; 209 210 /* 211 * Bit 15 of Athlon specific MSR 15, needs to be 0 212 * to enable SSE on Palomino/Morgan/Barton CPU's. 213 * If the BIOS didn't enable it already, enable it here. 214 */ 215 if (c->x86_model >= 6 && c->x86_model <= 10) { 216 if (!cpu_has(c, X86_FEATURE_XMM)) { 217 pr_info("Enabling disabled K7/SSE Support.\n"); 218 msr_clear_bit(MSR_K7_HWCR, 15); 219 set_cpu_cap(c, X86_FEATURE_XMM); 220 } 221 } 222 223 /* 224 * It's been determined by AMD that Athlons since model 8 stepping 1 225 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx 226 * As per AMD technical note 27212 0.2 227 */ 228 if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) { 229 rdmsr(MSR_K7_CLK_CTL, l, h); 230 if ((l & 0xfff00000) != 0x20000000) { 231 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", 232 l, ((l & 0x000fffff)|0x20000000)); 233 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); 234 } 235 } 236 237 /* calling is from identify_secondary_cpu() ? */ 238 if (!c->cpu_index) 239 return; 240 241 /* 242 * Certain Athlons might work (for various values of 'work') in SMP 243 * but they are not certified as MP capable. 244 */ 245 /* Athlon 660/661 is valid. */ 246 if ((c->x86_model == 6) && ((c->x86_stepping == 0) || 247 (c->x86_stepping == 1))) 248 return; 249 250 /* Duron 670 is valid */ 251 if ((c->x86_model == 7) && (c->x86_stepping == 0)) 252 return; 253 254 /* 255 * Athlon 662, Duron 671, and Athlon >model 7 have capability 256 * bit. It's worth noting that the A5 stepping (662) of some 257 * Athlon XP's have the MP bit set. 258 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for 259 * more. 260 */ 261 if (((c->x86_model == 6) && (c->x86_stepping >= 2)) || 262 ((c->x86_model == 7) && (c->x86_stepping >= 1)) || 263 (c->x86_model > 7)) 264 if (cpu_has(c, X86_FEATURE_MP)) 265 return; 266 267 /* If we get here, not a certified SMP capable AMD system. */ 268 269 /* 270 * Don't taint if we are running SMP kernel on a single non-MP 271 * approved Athlon 272 */ 273 WARN_ONCE(1, "WARNING: This combination of AMD" 274 " processors is not suitable for SMP.\n"); 275 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); 276 #endif 277 } 278 279 #ifdef CONFIG_NUMA 280 /* 281 * To workaround broken NUMA config. Read the comment in 282 * srat_detect_node(). 283 */ 284 static int nearby_node(int apicid) 285 { 286 int i, node; 287 288 for (i = apicid - 1; i >= 0; i--) { 289 node = __apicid_to_node[i]; 290 if (node != NUMA_NO_NODE && node_online(node)) 291 return node; 292 } 293 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { 294 node = __apicid_to_node[i]; 295 if (node != NUMA_NO_NODE && node_online(node)) 296 return node; 297 } 298 return first_node(node_online_map); /* Shouldn't happen */ 299 } 300 #endif 301 302 static void srat_detect_node(struct cpuinfo_x86 *c) 303 { 304 #ifdef CONFIG_NUMA 305 int cpu = smp_processor_id(); 306 int node; 307 unsigned apicid = c->topo.apicid; 308 309 node = numa_cpu_node(cpu); 310 if (node == NUMA_NO_NODE) 311 node = per_cpu_llc_id(cpu); 312 313 /* 314 * On multi-fabric platform (e.g. Numascale NumaChip) a 315 * platform-specific handler needs to be called to fixup some 316 * IDs of the CPU. 317 */ 318 if (x86_cpuinit.fixup_cpu_id) 319 x86_cpuinit.fixup_cpu_id(c, node); 320 321 if (!node_online(node)) { 322 /* 323 * Two possibilities here: 324 * 325 * - The CPU is missing memory and no node was created. In 326 * that case try picking one from a nearby CPU. 327 * 328 * - The APIC IDs differ from the HyperTransport node IDs 329 * which the K8 northbridge parsing fills in. Assume 330 * they are all increased by a constant offset, but in 331 * the same order as the HT nodeids. If that doesn't 332 * result in a usable node fall back to the path for the 333 * previous case. 334 * 335 * This workaround operates directly on the mapping between 336 * APIC ID and NUMA node, assuming certain relationship 337 * between APIC ID, HT node ID and NUMA topology. As going 338 * through CPU mapping may alter the outcome, directly 339 * access __apicid_to_node[]. 340 */ 341 int ht_nodeid = c->topo.initial_apicid; 342 343 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE) 344 node = __apicid_to_node[ht_nodeid]; 345 /* Pick a nearby node */ 346 if (!node_online(node)) 347 node = nearby_node(apicid); 348 } 349 numa_set_node(cpu, node); 350 #endif 351 } 352 353 static void bsp_determine_snp(struct cpuinfo_x86 *c) 354 { 355 #ifdef CONFIG_ARCH_HAS_CC_PLATFORM 356 cc_vendor = CC_VENDOR_AMD; 357 358 if (cpu_has(c, X86_FEATURE_SEV_SNP)) { 359 /* 360 * RMP table entry format is not architectural and is defined by the 361 * per-processor PPR. Restrict SNP support on the known CPU models 362 * for which the RMP table entry format is currently defined or for 363 * processors which support the architecturally defined RMPREAD 364 * instruction. 365 */ 366 if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && 367 (cpu_feature_enabled(X86_FEATURE_ZEN3) || 368 cpu_feature_enabled(X86_FEATURE_ZEN4) || 369 cpu_feature_enabled(X86_FEATURE_RMPREAD)) && 370 snp_probe_rmptable_info()) { 371 cc_platform_set(CC_ATTR_HOST_SEV_SNP); 372 } else { 373 setup_clear_cpu_cap(X86_FEATURE_SEV_SNP); 374 cc_platform_clear(CC_ATTR_HOST_SEV_SNP); 375 } 376 } 377 #endif 378 } 379 380 #define ZEN_MODEL_STEP_UCODE(fam, model, step, ucode) \ 381 X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, fam, model), \ 382 step, step, ucode) 383 384 static const struct x86_cpu_id amd_tsa_microcode[] = { 385 ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x1, 0x0a0011d7), 386 ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x2, 0x0a00123b), 387 ZEN_MODEL_STEP_UCODE(0x19, 0x08, 0x2, 0x0a00820d), 388 ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x1, 0x0a10114c), 389 ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x2, 0x0a10124c), 390 ZEN_MODEL_STEP_UCODE(0x19, 0x18, 0x1, 0x0a108109), 391 ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x0, 0x0a20102e), 392 ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x2, 0x0a201211), 393 ZEN_MODEL_STEP_UCODE(0x19, 0x44, 0x1, 0x0a404108), 394 ZEN_MODEL_STEP_UCODE(0x19, 0x50, 0x0, 0x0a500012), 395 ZEN_MODEL_STEP_UCODE(0x19, 0x61, 0x2, 0x0a60120a), 396 ZEN_MODEL_STEP_UCODE(0x19, 0x74, 0x1, 0x0a704108), 397 ZEN_MODEL_STEP_UCODE(0x19, 0x75, 0x2, 0x0a705208), 398 ZEN_MODEL_STEP_UCODE(0x19, 0x78, 0x0, 0x0a708008), 399 ZEN_MODEL_STEP_UCODE(0x19, 0x7c, 0x0, 0x0a70c008), 400 ZEN_MODEL_STEP_UCODE(0x19, 0xa0, 0x2, 0x0aa00216), 401 {}, 402 }; 403 404 static void tsa_init(struct cpuinfo_x86 *c) 405 { 406 if (cpu_has(c, X86_FEATURE_HYPERVISOR)) 407 return; 408 409 if (cpu_has(c, X86_FEATURE_ZEN3) || 410 cpu_has(c, X86_FEATURE_ZEN4)) { 411 if (x86_match_min_microcode_rev(amd_tsa_microcode)) 412 setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR); 413 else 414 pr_debug("%s: current revision: 0x%x\n", __func__, c->microcode); 415 } else { 416 setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO); 417 setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO); 418 } 419 } 420 421 static void bsp_init_amd(struct cpuinfo_x86 *c) 422 { 423 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { 424 425 if (c->x86 > 0x10 || 426 (c->x86 == 0x10 && c->x86_model >= 0x2)) { 427 u64 val; 428 429 rdmsrq(MSR_K7_HWCR, val); 430 if (!(val & BIT(24))) 431 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n"); 432 } 433 } 434 435 if (c->x86 == 0x15) { 436 unsigned long upperbit; 437 u32 cpuid, assoc; 438 439 cpuid = cpuid_edx(0x80000005); 440 assoc = cpuid >> 16 & 0xff; 441 upperbit = ((cpuid >> 24) << 10) / assoc; 442 443 va_align.mask = (upperbit - 1) & PAGE_MASK; 444 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; 445 446 /* A random value per boot for bit slice [12:upper_bit) */ 447 va_align.bits = get_random_u32() & va_align.mask; 448 } 449 450 if (cpu_has(c, X86_FEATURE_MWAITX)) 451 use_mwaitx_delay(); 452 453 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && 454 !boot_cpu_has(X86_FEATURE_VIRT_SSBD) && 455 c->x86 >= 0x15 && c->x86 <= 0x17) { 456 unsigned int bit; 457 458 switch (c->x86) { 459 case 0x15: bit = 54; break; 460 case 0x16: bit = 33; break; 461 case 0x17: bit = 10; break; 462 default: return; 463 } 464 /* 465 * Try to cache the base value so further operations can 466 * avoid RMW. If that faults, do not enable SSBD. 467 */ 468 if (!rdmsrq_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { 469 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); 470 setup_force_cpu_cap(X86_FEATURE_SSBD); 471 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit; 472 } 473 } 474 475 resctrl_cpu_detect(c); 476 477 /* Figure out Zen generations: */ 478 switch (c->x86) { 479 case 0x17: 480 switch (c->x86_model) { 481 case 0x00 ... 0x2f: 482 case 0x50 ... 0x5f: 483 setup_force_cpu_cap(X86_FEATURE_ZEN1); 484 break; 485 case 0x30 ... 0x4f: 486 case 0x60 ... 0x7f: 487 case 0x90 ... 0x91: 488 case 0xa0 ... 0xaf: 489 setup_force_cpu_cap(X86_FEATURE_ZEN2); 490 break; 491 default: 492 goto warn; 493 } 494 break; 495 496 case 0x19: 497 switch (c->x86_model) { 498 case 0x00 ... 0x0f: 499 case 0x20 ... 0x5f: 500 setup_force_cpu_cap(X86_FEATURE_ZEN3); 501 break; 502 case 0x10 ... 0x1f: 503 case 0x60 ... 0xaf: 504 setup_force_cpu_cap(X86_FEATURE_ZEN4); 505 break; 506 default: 507 goto warn; 508 } 509 break; 510 511 case 0x1a: 512 switch (c->x86_model) { 513 case 0x00 ... 0x2f: 514 case 0x40 ... 0x4f: 515 case 0x60 ... 0x7f: 516 setup_force_cpu_cap(X86_FEATURE_ZEN5); 517 break; 518 case 0x50 ... 0x5f: 519 case 0x90 ... 0xaf: 520 case 0xc0 ... 0xcf: 521 setup_force_cpu_cap(X86_FEATURE_ZEN6); 522 break; 523 default: 524 goto warn; 525 } 526 break; 527 528 default: 529 break; 530 } 531 532 bsp_determine_snp(c); 533 tsa_init(c); 534 535 if (cpu_has(c, X86_FEATURE_GP_ON_USER_CPUID)) 536 setup_force_cpu_cap(X86_FEATURE_CPUID_FAULT); 537 538 return; 539 540 warn: 541 WARN_ONCE(1, "Family 0x%x, model: 0x%x??\n", c->x86, c->x86_model); 542 } 543 544 static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) 545 { 546 u64 msr; 547 548 /* 549 * BIOS support is required for SME and SEV. 550 * For SME: If BIOS has enabled SME then adjust x86_phys_bits by 551 * the SME physical address space reduction value. 552 * If BIOS has not enabled SME then don't advertise the 553 * SME feature (set in scattered.c). 554 * If the kernel has not enabled SME via any means then 555 * don't advertise the SME feature. 556 * For SEV: If BIOS has not enabled SEV then don't advertise SEV and 557 * any additional functionality based on it. 558 * 559 * In all cases, since support for SME and SEV requires long mode, 560 * don't advertise the feature under CONFIG_X86_32. 561 */ 562 if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) { 563 /* Check if memory encryption is enabled */ 564 rdmsrq(MSR_AMD64_SYSCFG, msr); 565 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) 566 goto clear_all; 567 568 /* 569 * Always adjust physical address bits. Even though this 570 * will be a value above 32-bits this is still done for 571 * CONFIG_X86_32 so that accurate values are reported. 572 */ 573 c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f; 574 575 if (IS_ENABLED(CONFIG_X86_32)) 576 goto clear_all; 577 578 if (!sme_me_mask) 579 setup_clear_cpu_cap(X86_FEATURE_SME); 580 581 rdmsrq(MSR_K7_HWCR, msr); 582 if (!(msr & MSR_K7_HWCR_SMMLOCK)) 583 goto clear_sev; 584 585 return; 586 587 clear_all: 588 setup_clear_cpu_cap(X86_FEATURE_SME); 589 clear_sev: 590 setup_clear_cpu_cap(X86_FEATURE_SEV); 591 setup_clear_cpu_cap(X86_FEATURE_SEV_ES); 592 setup_clear_cpu_cap(X86_FEATURE_SEV_SNP); 593 } 594 } 595 596 static void early_init_amd(struct cpuinfo_x86 *c) 597 { 598 u32 dummy; 599 600 if (c->x86 >= 0xf) 601 set_cpu_cap(c, X86_FEATURE_K8); 602 603 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); 604 605 /* 606 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 607 * with P/T states and does not stop in deep C-states 608 */ 609 if (c->x86_power & (1 << 8)) { 610 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 611 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 612 } 613 614 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ 615 if (c->x86_power & BIT(12)) 616 set_cpu_cap(c, X86_FEATURE_ACC_POWER); 617 618 /* Bit 14 indicates the Runtime Average Power Limit interface. */ 619 if (c->x86_power & BIT(14)) 620 set_cpu_cap(c, X86_FEATURE_RAPL); 621 622 #ifdef CONFIG_X86_64 623 set_cpu_cap(c, X86_FEATURE_SYSCALL32); 624 #else 625 /* Set MTRR capability flag if appropriate */ 626 if (c->x86 == 5) 627 if (c->x86_model == 13 || c->x86_model == 9 || 628 (c->x86_model == 8 && c->x86_stepping >= 8)) 629 set_cpu_cap(c, X86_FEATURE_K6_MTRR); 630 #endif 631 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) 632 /* 633 * ApicID can always be treated as an 8-bit value for AMD APIC versions 634 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we 635 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families 636 * after 16h. 637 */ 638 if (boot_cpu_has(X86_FEATURE_APIC)) { 639 if (c->x86 > 0x16) 640 set_cpu_cap(c, X86_FEATURE_EXTD_APICID); 641 else if (c->x86 >= 0xf) { 642 /* check CPU config space for extended APIC ID */ 643 unsigned int val; 644 645 val = read_pci_config(0, 24, 0, 0x68); 646 if ((val >> 17 & 0x3) == 0x3) 647 set_cpu_cap(c, X86_FEATURE_EXTD_APICID); 648 } 649 } 650 #endif 651 652 /* 653 * This is only needed to tell the kernel whether to use VMCALL 654 * and VMMCALL. VMMCALL is never executed except under virt, so 655 * we can set it unconditionally. 656 */ 657 set_cpu_cap(c, X86_FEATURE_VMMCALL); 658 659 /* F16h erratum 793, CVE-2013-6885 */ 660 if (c->x86 == 0x16 && c->x86_model <= 0xf) 661 msr_set_bit(MSR_AMD64_LS_CFG, 15); 662 663 early_detect_mem_encrypt(c); 664 665 if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) { 666 if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB)) 667 setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); 668 else if (c->x86 >= 0x19 && !wrmsrq_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) { 669 setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); 670 setup_force_cpu_cap(X86_FEATURE_SBPB); 671 } 672 } 673 } 674 675 static void init_amd_k8(struct cpuinfo_x86 *c) 676 { 677 u32 level; 678 u64 value; 679 680 /* On C+ stepping K8 rep microcode works well for copy/memset */ 681 level = cpuid_eax(1); 682 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) 683 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 684 685 /* 686 * Some BIOSes incorrectly force this feature, but only K8 revision D 687 * (model = 0x14) and later actually support it. 688 * (AMD Erratum #110, docId: 25759). 689 */ 690 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM) && !cpu_has(c, X86_FEATURE_HYPERVISOR)) { 691 clear_cpu_cap(c, X86_FEATURE_LAHF_LM); 692 if (!rdmsrq_amd_safe(0xc001100d, &value)) { 693 value &= ~BIT_64(32); 694 wrmsrq_amd_safe(0xc001100d, value); 695 } 696 } 697 698 if (!c->x86_model_id[0]) 699 strscpy(c->x86_model_id, "Hammer"); 700 701 #ifdef CONFIG_SMP 702 /* 703 * Disable TLB flush filter by setting HWCR.FFDIS on K8 704 * bit 6 of msr C001_0015 705 * 706 * Errata 63 for SH-B3 steppings 707 * Errata 122 for all steppings (F+ have it disabled by default) 708 */ 709 msr_set_bit(MSR_K7_HWCR, 6); 710 #endif 711 set_cpu_bug(c, X86_BUG_SWAPGS_FENCE); 712 713 /* 714 * Check models and steppings affected by erratum 400. This is 715 * used to select the proper idle routine and to enable the 716 * check whether the machine is affected in arch_post_acpi_subsys_init() 717 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check. 718 */ 719 if (c->x86_model > 0x41 || 720 (c->x86_model == 0x41 && c->x86_stepping >= 0x2)) 721 setup_force_cpu_bug(X86_BUG_AMD_E400); 722 } 723 724 static void init_amd_gh(struct cpuinfo_x86 *c) 725 { 726 #ifdef CONFIG_MMCONF_FAM10H 727 /* do this for boot cpu */ 728 if (c == &boot_cpu_data) 729 check_enable_amd_mmconf_dmi(); 730 731 fam10h_check_enable_mmcfg(); 732 #endif 733 734 /* 735 * Disable GART TLB Walk Errors on Fam10h. We do this here because this 736 * is always needed when GART is enabled, even in a kernel which has no 737 * MCE support built in. BIOS should disable GartTlbWlk Errors already. 738 * If it doesn't, we do it here as suggested by the BKDG. 739 * 740 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 741 */ 742 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10); 743 744 /* 745 * On family 10h BIOS may not have properly enabled WC+ support, causing 746 * it to be converted to CD memtype. This may result in performance 747 * degradation for certain nested-paging guests. Prevent this conversion 748 * by clearing bit 24 in MSR_AMD64_BU_CFG2. 749 * 750 * NOTE: we want to use the _safe accessors so as not to #GP kvm 751 * guests on older kvm hosts. 752 */ 753 msr_clear_bit(MSR_AMD64_BU_CFG2, 24); 754 755 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); 756 757 /* 758 * Check models and steppings affected by erratum 400. This is 759 * used to select the proper idle routine and to enable the 760 * check whether the machine is affected in arch_post_acpi_subsys_init() 761 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check. 762 */ 763 if (c->x86_model > 0x2 || 764 (c->x86_model == 0x2 && c->x86_stepping >= 0x1)) 765 setup_force_cpu_bug(X86_BUG_AMD_E400); 766 } 767 768 static void init_amd_ln(struct cpuinfo_x86 *c) 769 { 770 /* 771 * Apply erratum 665 fix unconditionally so machines without a BIOS 772 * fix work. 773 */ 774 msr_set_bit(MSR_AMD64_DE_CFG, 31); 775 } 776 777 static bool rdrand_force; 778 779 static int __init rdrand_cmdline(char *str) 780 { 781 if (!str) 782 return -EINVAL; 783 784 if (!strcmp(str, "force")) 785 rdrand_force = true; 786 else 787 return -EINVAL; 788 789 return 0; 790 } 791 early_param("rdrand", rdrand_cmdline); 792 793 static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c) 794 { 795 /* 796 * Saving of the MSR used to hide the RDRAND support during 797 * suspend/resume is done by arch/x86/power/cpu.c, which is 798 * dependent on CONFIG_PM_SLEEP. 799 */ 800 if (!IS_ENABLED(CONFIG_PM_SLEEP)) 801 return; 802 803 /* 804 * The self-test can clear X86_FEATURE_RDRAND, so check for 805 * RDRAND support using the CPUID function directly. 806 */ 807 if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force) 808 return; 809 810 msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62); 811 812 /* 813 * Verify that the CPUID change has occurred in case the kernel is 814 * running virtualized and the hypervisor doesn't support the MSR. 815 */ 816 if (cpuid_ecx(1) & BIT(30)) { 817 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n"); 818 return; 819 } 820 821 clear_cpu_cap(c, X86_FEATURE_RDRAND); 822 pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n"); 823 } 824 825 static void init_amd_jg(struct cpuinfo_x86 *c) 826 { 827 /* 828 * Some BIOS implementations do not restore proper RDRAND support 829 * across suspend and resume. Check on whether to hide the RDRAND 830 * instruction support via CPUID. 831 */ 832 clear_rdrand_cpuid_bit(c); 833 } 834 835 static void init_amd_bd(struct cpuinfo_x86 *c) 836 { 837 u64 value; 838 839 /* 840 * The way access filter has a performance penalty on some workloads. 841 * Disable it on the affected CPUs. 842 */ 843 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) { 844 if (!rdmsrq_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) { 845 value |= 0x1E; 846 wrmsrq_safe(MSR_F15H_IC_CFG, value); 847 } 848 } 849 850 /* 851 * Some BIOS implementations do not restore proper RDRAND support 852 * across suspend and resume. Check on whether to hide the RDRAND 853 * instruction support via CPUID. 854 */ 855 clear_rdrand_cpuid_bit(c); 856 } 857 858 static const struct x86_cpu_id erratum_1386_microcode[] = { 859 X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, 0x17, 0x01), 0x2, 0x2, 0x0800126e), 860 X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, 0x17, 0x31), 0x0, 0x0, 0x08301052), 861 {} 862 }; 863 864 static void fix_erratum_1386(struct cpuinfo_x86 *c) 865 { 866 /* 867 * Work around Erratum 1386. The XSAVES instruction malfunctions in 868 * certain circumstances on Zen1/2 uarch, and not all parts have had 869 * updated microcode at the time of writing (March 2023). 870 * 871 * Affected parts all have no supervisor XSAVE states, meaning that 872 * the XSAVEC instruction (which works fine) is equivalent. 873 * 874 * Clear the feature flag only on microcode revisions which 875 * don't have the fix. 876 */ 877 if (x86_match_min_microcode_rev(erratum_1386_microcode)) 878 return; 879 880 clear_cpu_cap(c, X86_FEATURE_XSAVES); 881 } 882 883 void init_spectral_chicken(struct cpuinfo_x86 *c) 884 { 885 #ifdef CONFIG_MITIGATION_UNRET_ENTRY 886 u64 value; 887 888 /* 889 * On Zen2 we offer this chicken (bit) on the altar of Speculation. 890 * 891 * This suppresses speculation from the middle of a basic block, i.e. it 892 * suppresses non-branch predictions. 893 */ 894 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { 895 if (!rdmsrq_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) { 896 value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT; 897 wrmsrq_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value); 898 } 899 } 900 #endif 901 } 902 903 static void init_amd_zen_common(void) 904 { 905 setup_force_cpu_cap(X86_FEATURE_ZEN); 906 #ifdef CONFIG_NUMA 907 node_reclaim_distance = 32; 908 #endif 909 } 910 911 static void init_amd_zen1(struct cpuinfo_x86 *c) 912 { 913 fix_erratum_1386(c); 914 915 /* Fix up CPUID bits, but only if not virtualised. */ 916 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { 917 918 /* Erratum 1076: CPB feature bit not being set in CPUID. */ 919 if (!cpu_has(c, X86_FEATURE_CPB)) 920 set_cpu_cap(c, X86_FEATURE_CPB); 921 } 922 923 pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); 924 setup_force_cpu_bug(X86_BUG_DIV0); 925 926 /* 927 * Turn off the Instructions Retired free counter on machines that are 928 * susceptible to erratum #1054 "Instructions Retired Performance 929 * Counter May Be Inaccurate". 930 */ 931 if (c->x86_model < 0x30) { 932 msr_clear_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); 933 clear_cpu_cap(c, X86_FEATURE_IRPERF); 934 } 935 } 936 937 static bool cpu_has_zenbleed_microcode(void) 938 { 939 u32 good_rev = 0; 940 941 switch (boot_cpu_data.x86_model) { 942 case 0x30 ... 0x3f: good_rev = 0x0830107b; break; 943 case 0x60 ... 0x67: good_rev = 0x0860010c; break; 944 case 0x68 ... 0x6f: good_rev = 0x08608107; break; 945 case 0x70 ... 0x7f: good_rev = 0x08701033; break; 946 case 0xa0 ... 0xaf: good_rev = 0x08a00009; break; 947 948 default: 949 return false; 950 } 951 952 if (boot_cpu_data.microcode < good_rev) 953 return false; 954 955 return true; 956 } 957 958 static void zen2_zenbleed_check(struct cpuinfo_x86 *c) 959 { 960 if (cpu_has(c, X86_FEATURE_HYPERVISOR)) 961 return; 962 963 if (!cpu_has(c, X86_FEATURE_AVX)) 964 return; 965 966 if (!cpu_has_zenbleed_microcode()) { 967 pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n"); 968 msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); 969 } else { 970 msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); 971 } 972 } 973 974 static void init_amd_zen2(struct cpuinfo_x86 *c) 975 { 976 init_spectral_chicken(c); 977 fix_erratum_1386(c); 978 zen2_zenbleed_check(c); 979 980 /* Disable RDSEED on AMD Cyan Skillfish because of an error. */ 981 if (c->x86_model == 0x47 && c->x86_stepping == 0x0) { 982 clear_cpu_cap(c, X86_FEATURE_RDSEED); 983 msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18); 984 pr_emerg("RDSEED is not reliable on this platform; disabling.\n"); 985 } 986 987 /* Correct misconfigured CPUID on some clients. */ 988 clear_cpu_cap(c, X86_FEATURE_INVLPGB); 989 } 990 991 static void init_amd_zen3(struct cpuinfo_x86 *c) 992 { 993 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { 994 /* 995 * Zen3 (Fam19 model < 0x10) parts are not susceptible to 996 * Branch Type Confusion, but predate the allocation of the 997 * BTC_NO bit. 998 */ 999 if (!cpu_has(c, X86_FEATURE_BTC_NO)) 1000 set_cpu_cap(c, X86_FEATURE_BTC_NO); 1001 } 1002 } 1003 1004 static void init_amd_zen4(struct cpuinfo_x86 *c) 1005 { 1006 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) 1007 msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT); 1008 1009 /* 1010 * These Zen4 SoCs advertise support for virtualized VMLOAD/VMSAVE 1011 * in some BIOS versions but they can lead to random host reboots. 1012 */ 1013 switch (c->x86_model) { 1014 case 0x18 ... 0x1f: 1015 case 0x60 ... 0x7f: 1016 clear_cpu_cap(c, X86_FEATURE_V_VMSAVE_VMLOAD); 1017 break; 1018 } 1019 } 1020 1021 static void init_amd_zen5(struct cpuinfo_x86 *c) 1022 { 1023 } 1024 1025 static void init_amd(struct cpuinfo_x86 *c) 1026 { 1027 u64 vm_cr; 1028 1029 early_init_amd(c); 1030 1031 /* 1032 * Bit 31 in normal CPUID used for nonstandard 3DNow ID; 1033 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway 1034 */ 1035 clear_cpu_cap(c, 0*32+31); 1036 1037 if (c->x86 >= 0x10) 1038 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 1039 1040 /* AMD FSRM also implies FSRS */ 1041 if (cpu_has(c, X86_FEATURE_FSRM)) 1042 set_cpu_cap(c, X86_FEATURE_FSRS); 1043 1044 /* K6s reports MCEs but don't actually have all the MSRs */ 1045 if (c->x86 < 6) 1046 clear_cpu_cap(c, X86_FEATURE_MCE); 1047 1048 switch (c->x86) { 1049 case 4: init_amd_k5(c); break; 1050 case 5: init_amd_k6(c); break; 1051 case 6: init_amd_k7(c); break; 1052 case 0xf: init_amd_k8(c); break; 1053 case 0x10: init_amd_gh(c); break; 1054 case 0x12: init_amd_ln(c); break; 1055 case 0x15: init_amd_bd(c); break; 1056 case 0x16: init_amd_jg(c); break; 1057 } 1058 1059 /* 1060 * Save up on some future enablement work and do common Zen 1061 * settings. 1062 */ 1063 if (c->x86 >= 0x17) 1064 init_amd_zen_common(); 1065 1066 if (boot_cpu_has(X86_FEATURE_ZEN1)) 1067 init_amd_zen1(c); 1068 else if (boot_cpu_has(X86_FEATURE_ZEN2)) 1069 init_amd_zen2(c); 1070 else if (boot_cpu_has(X86_FEATURE_ZEN3)) 1071 init_amd_zen3(c); 1072 else if (boot_cpu_has(X86_FEATURE_ZEN4)) 1073 init_amd_zen4(c); 1074 else if (boot_cpu_has(X86_FEATURE_ZEN5)) 1075 init_amd_zen5(c); 1076 1077 /* 1078 * Enable workaround for FXSAVE leak on CPUs 1079 * without a XSaveErPtr feature 1080 */ 1081 if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR))) 1082 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK); 1083 1084 cpu_detect_cache_sizes(c); 1085 1086 srat_detect_node(c); 1087 1088 init_amd_cacheinfo(c); 1089 1090 if (cpu_has(c, X86_FEATURE_SVM)) { 1091 rdmsrq(MSR_VM_CR, vm_cr); 1092 if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) { 1093 pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n"); 1094 clear_cpu_cap(c, X86_FEATURE_SVM); 1095 } 1096 } 1097 1098 if (!cpu_has(c, X86_FEATURE_LFENCE_RDTSC) && cpu_has(c, X86_FEATURE_XMM2)) { 1099 /* 1100 * Use LFENCE for execution serialization. On families which 1101 * don't have that MSR, LFENCE is already serializing. 1102 * msr_set_bit() uses the safe accessors, too, even if the MSR 1103 * is not present. 1104 */ 1105 msr_set_bit(MSR_AMD64_DE_CFG, 1106 MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT); 1107 1108 /* A serializing LFENCE stops RDTSC speculation */ 1109 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 1110 } 1111 1112 /* 1113 * Family 0x12 and above processors have APIC timer 1114 * running in deep C states. 1115 */ 1116 if (c->x86 > 0x11) 1117 set_cpu_cap(c, X86_FEATURE_ARAT); 1118 1119 /* 3DNow or LM implies PREFETCHW */ 1120 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH)) 1121 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM)) 1122 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH); 1123 1124 /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ 1125 if (!cpu_feature_enabled(X86_FEATURE_XENPV)) 1126 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); 1127 1128 /* Enable the Instructions Retired free counter */ 1129 if (cpu_has(c, X86_FEATURE_IRPERF)) 1130 msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); 1131 1132 check_null_seg_clears_base(c); 1133 1134 /* 1135 * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up 1136 * using the trampoline code and as part of it, MSR_EFER gets prepared there in 1137 * order to be replicated onto them. Regardless, set it here again, if not set, 1138 * to protect against any future refactoring/code reorganization which might 1139 * miss setting this important bit. 1140 */ 1141 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 1142 cpu_has(c, X86_FEATURE_AUTOIBRS)) 1143 WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS) < 0); 1144 1145 /* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */ 1146 clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE); 1147 1148 /* Enable Translation Cache Extension */ 1149 if (cpu_has(c, X86_FEATURE_TCE)) 1150 msr_set_bit(MSR_EFER, _EFER_TCE); 1151 } 1152 1153 #ifdef CONFIG_X86_32 1154 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) 1155 { 1156 /* AMD errata T13 (order #21922) */ 1157 if (c->x86 == 6) { 1158 /* Duron Rev A0 */ 1159 if (c->x86_model == 3 && c->x86_stepping == 0) 1160 size = 64; 1161 /* Tbird rev A1/A2 */ 1162 if (c->x86_model == 4 && 1163 (c->x86_stepping == 0 || c->x86_stepping == 1)) 1164 size = 256; 1165 } 1166 return size; 1167 } 1168 #endif 1169 1170 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) 1171 { 1172 u32 ebx, eax, ecx, edx; 1173 u16 mask = 0xfff; 1174 1175 if (c->x86 < 0xf) 1176 return; 1177 1178 if (c->extended_cpuid_level < 0x80000006) 1179 return; 1180 1181 cpuid(0x80000006, &eax, &ebx, &ecx, &edx); 1182 1183 tlb_lld_4k = (ebx >> 16) & mask; 1184 tlb_lli_4k = ebx & mask; 1185 1186 /* 1187 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB 1188 * characteristics from the CPUID function 0x80000005 instead. 1189 */ 1190 if (c->x86 == 0xf) { 1191 cpuid(0x80000005, &eax, &ebx, &ecx, &edx); 1192 mask = 0xff; 1193 } 1194 1195 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ 1196 if (!((eax >> 16) & mask)) 1197 tlb_lld_2m = (cpuid_eax(0x80000005) >> 16) & 0xff; 1198 else 1199 tlb_lld_2m = (eax >> 16) & mask; 1200 1201 /* a 4M entry uses two 2M entries */ 1202 tlb_lld_4m = tlb_lld_2m >> 1; 1203 1204 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ 1205 if (!(eax & mask)) { 1206 /* Erratum 658 */ 1207 if (c->x86 == 0x15 && c->x86_model <= 0x1f) { 1208 tlb_lli_2m = 1024; 1209 } else { 1210 cpuid(0x80000005, &eax, &ebx, &ecx, &edx); 1211 tlb_lli_2m = eax & 0xff; 1212 } 1213 } else 1214 tlb_lli_2m = eax & mask; 1215 1216 tlb_lli_4m = tlb_lli_2m >> 1; 1217 1218 /* Max number of pages INVLPGB can invalidate in one shot */ 1219 if (cpu_has(c, X86_FEATURE_INVLPGB)) 1220 invlpgb_count_max = (cpuid_edx(0x80000008) & 0xffff) + 1; 1221 } 1222 1223 static const struct cpu_dev amd_cpu_dev = { 1224 .c_vendor = "AMD", 1225 .c_ident = { "AuthenticAMD" }, 1226 #ifdef CONFIG_X86_32 1227 .legacy_models = { 1228 { .family = 4, .model_names = 1229 { 1230 [3] = "486 DX/2", 1231 [7] = "486 DX/2-WB", 1232 [8] = "486 DX/4", 1233 [9] = "486 DX/4-WB", 1234 [14] = "Am5x86-WT", 1235 [15] = "Am5x86-WB" 1236 } 1237 }, 1238 }, 1239 .legacy_cache_size = amd_size_cache, 1240 #endif 1241 .c_early_init = early_init_amd, 1242 .c_detect_tlb = cpu_detect_tlb_amd, 1243 .c_bsp_init = bsp_init_amd, 1244 .c_init = init_amd, 1245 .c_x86_vendor = X86_VENDOR_AMD, 1246 }; 1247 1248 cpu_dev_register(amd_cpu_dev); 1249 1250 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask); 1251 1252 static unsigned int amd_msr_dr_addr_masks[] = { 1253 MSR_F16H_DR0_ADDR_MASK, 1254 MSR_F16H_DR1_ADDR_MASK, 1255 MSR_F16H_DR1_ADDR_MASK + 1, 1256 MSR_F16H_DR1_ADDR_MASK + 2 1257 }; 1258 1259 void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr) 1260 { 1261 int cpu = smp_processor_id(); 1262 1263 if (!cpu_feature_enabled(X86_FEATURE_BPEXT)) 1264 return; 1265 1266 if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks))) 1267 return; 1268 1269 if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask) 1270 return; 1271 1272 wrmsrq(amd_msr_dr_addr_masks[dr], mask); 1273 per_cpu(amd_dr_addr_mask, cpu)[dr] = mask; 1274 } 1275 1276 unsigned long amd_get_dr_addr_mask(unsigned int dr) 1277 { 1278 if (!cpu_feature_enabled(X86_FEATURE_BPEXT)) 1279 return 0; 1280 1281 if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks))) 1282 return 0; 1283 1284 return per_cpu(amd_dr_addr_mask[dr], smp_processor_id()); 1285 } 1286 EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask); 1287 1288 static void zenbleed_check_cpu(void *unused) 1289 { 1290 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); 1291 1292 zen2_zenbleed_check(c); 1293 } 1294 1295 void amd_check_microcode(void) 1296 { 1297 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 1298 return; 1299 1300 if (cpu_feature_enabled(X86_FEATURE_ZEN2)) 1301 on_each_cpu(zenbleed_check_cpu, NULL, 1); 1302 } 1303 1304 static const char * const s5_reset_reason_txt[] = { 1305 [0] = "thermal pin BP_THERMTRIP_L was tripped", 1306 [1] = "power button was pressed for 4 seconds", 1307 [2] = "shutdown pin was tripped", 1308 [4] = "remote ASF power off command was received", 1309 [9] = "internal CPU thermal limit was tripped", 1310 [16] = "system reset pin BP_SYS_RST_L was tripped", 1311 [17] = "software issued PCI reset", 1312 [18] = "software wrote 0x4 to reset control register 0xCF9", 1313 [19] = "software wrote 0x6 to reset control register 0xCF9", 1314 [20] = "software wrote 0xE to reset control register 0xCF9", 1315 [21] = "ACPI power state transition occurred", 1316 [22] = "keyboard reset pin KB_RST_L was tripped", 1317 [23] = "internal CPU shutdown event occurred", 1318 [24] = "system failed to boot before failed boot timer expired", 1319 [25] = "hardware watchdog timer expired", 1320 [26] = "remote ASF reset command was received", 1321 [27] = "an uncorrected error caused a data fabric sync flood event", 1322 [29] = "FCH and MP1 failed warm reset handshake", 1323 [30] = "a parity error occurred", 1324 [31] = "a software sync flood event occurred", 1325 }; 1326 1327 static __init int print_s5_reset_status_mmio(void) 1328 { 1329 unsigned long value; 1330 void __iomem *addr; 1331 int i; 1332 1333 if (!cpu_feature_enabled(X86_FEATURE_ZEN)) 1334 return 0; 1335 1336 addr = ioremap(FCH_PM_BASE + FCH_PM_S5_RESET_STATUS, sizeof(value)); 1337 if (!addr) 1338 return 0; 1339 1340 value = ioread32(addr); 1341 iounmap(addr); 1342 1343 for (i = 0; i < ARRAY_SIZE(s5_reset_reason_txt); i++) { 1344 if (!(value & BIT(i))) 1345 continue; 1346 1347 if (s5_reset_reason_txt[i]) { 1348 pr_info("x86/amd: Previous system reset reason [0x%08lx]: %s\n", 1349 value, s5_reset_reason_txt[i]); 1350 } 1351 } 1352 1353 return 0; 1354 } 1355 late_initcall(print_s5_reset_status_mmio); 1356