1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/export.h>
3 #include <linux/bitops.h>
4 #include <linux/dmi.h>
5 #include <linux/elf.h>
6 #include <linux/mm.h>
7 #include <linux/kvm_types.h>
8 #include <linux/io.h>
9 #include <linux/sched.h>
10 #include <linux/sched/clock.h>
11 #include <linux/random.h>
12 #include <linux/topology.h>
13 #include <linux/platform_data/x86/amd-fch.h>
14 #include <asm/processor.h>
15 #include <asm/apic.h>
16 #include <asm/cacheinfo.h>
17 #include <asm/cpu.h>
18 #include <asm/cpu_device_id.h>
19 #include <asm/spec-ctrl.h>
20 #include <asm/smp.h>
21 #include <asm/numa.h>
22 #include <asm/pci-direct.h>
23 #include <asm/delay.h>
24 #include <asm/debugreg.h>
25 #include <asm/resctrl.h>
26 #include <asm/msr.h>
27 #include <asm/sev.h>
28
29 #ifdef CONFIG_X86_64
30 # include <asm/mmconfig.h>
31 #endif
32
33 #include "cpu.h"
34
35 u16 invlpgb_count_max __ro_after_init = 1;
36
rdmsrq_amd_safe(unsigned msr,u64 * p)37 static inline int rdmsrq_amd_safe(unsigned msr, u64 *p)
38 {
39 u32 gprs[8] = { 0 };
40 int err;
41
42 WARN_ONCE((boot_cpu_data.x86 != 0xf),
43 "%s should only be used on K8!\n", __func__);
44
45 gprs[1] = msr;
46 gprs[7] = 0x9c5a203a;
47
48 err = rdmsr_safe_regs(gprs);
49
50 *p = gprs[0] | ((u64)gprs[2] << 32);
51
52 return err;
53 }
54
wrmsrq_amd_safe(unsigned msr,u64 val)55 static inline int wrmsrq_amd_safe(unsigned msr, u64 val)
56 {
57 u32 gprs[8] = { 0 };
58
59 WARN_ONCE((boot_cpu_data.x86 != 0xf),
60 "%s should only be used on K8!\n", __func__);
61
62 gprs[0] = (u32)val;
63 gprs[1] = msr;
64 gprs[2] = val >> 32;
65 gprs[7] = 0x9c5a203a;
66
67 return wrmsr_safe_regs(gprs);
68 }
69
70 /*
71 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
72 * misexecution of code under Linux. Owners of such processors should
73 * contact AMD for precise details and a CPU swap.
74 *
75 * See http://www.multimania.com/poulot/k6bug.html
76 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
77 * (Publication # 21266 Issue Date: August 1998)
78 *
79 * The following test is erm.. interesting. AMD neglected to up
80 * the chip setting when fixing the bug but they also tweaked some
81 * performance at the same time..
82 */
83
84 #ifdef CONFIG_X86_32
85 extern __visible void vide(void);
86 __asm__(".text\n"
87 ".globl vide\n"
88 ".type vide, @function\n"
89 ".align 4\n"
90 "vide: ret\n");
91 #endif
92
init_amd_k5(struct cpuinfo_x86 * c)93 static void init_amd_k5(struct cpuinfo_x86 *c)
94 {
95 #ifdef CONFIG_X86_32
96 /*
97 * General Systems BIOSen alias the cpu frequency registers
98 * of the Elan at 0x000df000. Unfortunately, one of the Linux
99 * drivers subsequently pokes it, and changes the CPU speed.
100 * Workaround : Remove the unneeded alias.
101 */
102 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
103 #define CBAR_ENB (0x80000000)
104 #define CBAR_KEY (0X000000CB)
105 if (c->x86_model == 9 || c->x86_model == 10) {
106 if (inl(CBAR) & CBAR_ENB)
107 outl(0 | CBAR_KEY, CBAR);
108 }
109 #endif
110 }
111
init_amd_k6(struct cpuinfo_x86 * c)112 static void init_amd_k6(struct cpuinfo_x86 *c)
113 {
114 #ifdef CONFIG_X86_32
115 u32 l, h;
116 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
117
118 if (c->x86_model < 6) {
119 /* Based on AMD doc 20734R - June 2000 */
120 if (c->x86_model == 0) {
121 clear_cpu_cap(c, X86_FEATURE_APIC);
122 set_cpu_cap(c, X86_FEATURE_PGE);
123 }
124 return;
125 }
126
127 if (c->x86_model == 6 && c->x86_stepping == 1) {
128 const int K6_BUG_LOOP = 1000000;
129 int n;
130 void (*f_vide)(void);
131 u64 d, d2;
132
133 pr_info("AMD K6 stepping B detected - ");
134
135 /*
136 * It looks like AMD fixed the 2.6.2 bug and improved indirect
137 * calls at the same time.
138 */
139
140 n = K6_BUG_LOOP;
141 f_vide = vide;
142 OPTIMIZER_HIDE_VAR(f_vide);
143 d = rdtsc();
144 while (n--)
145 f_vide();
146 d2 = rdtsc();
147 d = d2-d;
148
149 if (d > 20*K6_BUG_LOOP)
150 pr_cont("system stability may be impaired when more than 32 MB are used.\n");
151 else
152 pr_cont("probably OK (after B9730xxxx).\n");
153 }
154
155 /* K6 with old style WHCR */
156 if (c->x86_model < 8 ||
157 (c->x86_model == 8 && c->x86_stepping < 8)) {
158 /* We can only write allocate on the low 508Mb */
159 if (mbytes > 508)
160 mbytes = 508;
161
162 rdmsr(MSR_K6_WHCR, l, h);
163 if ((l&0x0000FFFF) == 0) {
164 unsigned long flags;
165 l = (1<<0)|((mbytes/4)<<1);
166 local_irq_save(flags);
167 wbinvd();
168 wrmsr(MSR_K6_WHCR, l, h);
169 local_irq_restore(flags);
170 pr_info("Enabling old style K6 write allocation for %d Mb\n",
171 mbytes);
172 }
173 return;
174 }
175
176 if ((c->x86_model == 8 && c->x86_stepping > 7) ||
177 c->x86_model == 9 || c->x86_model == 13) {
178 /* The more serious chips .. */
179
180 if (mbytes > 4092)
181 mbytes = 4092;
182
183 rdmsr(MSR_K6_WHCR, l, h);
184 if ((l&0xFFFF0000) == 0) {
185 unsigned long flags;
186 l = ((mbytes>>2)<<22)|(1<<16);
187 local_irq_save(flags);
188 wbinvd();
189 wrmsr(MSR_K6_WHCR, l, h);
190 local_irq_restore(flags);
191 pr_info("Enabling new style K6 write allocation for %d Mb\n",
192 mbytes);
193 }
194
195 return;
196 }
197
198 if (c->x86_model == 10) {
199 /* AMD Geode LX is model 10 */
200 /* placeholder for any needed mods */
201 return;
202 }
203 #endif
204 }
205
init_amd_k7(struct cpuinfo_x86 * c)206 static void init_amd_k7(struct cpuinfo_x86 *c)
207 {
208 #ifdef CONFIG_X86_32
209 u32 l, h;
210
211 /*
212 * Bit 15 of Athlon specific MSR 15, needs to be 0
213 * to enable SSE on Palomino/Morgan/Barton CPU's.
214 * If the BIOS didn't enable it already, enable it here.
215 */
216 if (c->x86_model >= 6 && c->x86_model <= 10) {
217 if (!cpu_has(c, X86_FEATURE_XMM)) {
218 pr_info("Enabling disabled K7/SSE Support.\n");
219 msr_clear_bit(MSR_K7_HWCR, 15);
220 set_cpu_cap(c, X86_FEATURE_XMM);
221 }
222 }
223
224 /*
225 * It's been determined by AMD that Athlons since model 8 stepping 1
226 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
227 * As per AMD technical note 27212 0.2
228 */
229 if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
230 rdmsr(MSR_K7_CLK_CTL, l, h);
231 if ((l & 0xfff00000) != 0x20000000) {
232 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
233 l, ((l & 0x000fffff)|0x20000000));
234 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
235 }
236 }
237
238 /* calling is from identify_secondary_cpu() ? */
239 if (!c->cpu_index)
240 return;
241
242 /*
243 * Certain Athlons might work (for various values of 'work') in SMP
244 * but they are not certified as MP capable.
245 */
246 /* Athlon 660/661 is valid. */
247 if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
248 (c->x86_stepping == 1)))
249 return;
250
251 /* Duron 670 is valid */
252 if ((c->x86_model == 7) && (c->x86_stepping == 0))
253 return;
254
255 /*
256 * Athlon 662, Duron 671, and Athlon >model 7 have capability
257 * bit. It's worth noting that the A5 stepping (662) of some
258 * Athlon XP's have the MP bit set.
259 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
260 * more.
261 */
262 if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
263 ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
264 (c->x86_model > 7))
265 if (cpu_has(c, X86_FEATURE_MP))
266 return;
267
268 /* If we get here, not a certified SMP capable AMD system. */
269
270 /*
271 * Don't taint if we are running SMP kernel on a single non-MP
272 * approved Athlon
273 */
274 WARN_ONCE(1, "WARNING: This combination of AMD"
275 " processors is not suitable for SMP.\n");
276 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
277 #endif
278 }
279
280 #ifdef CONFIG_NUMA
281 /*
282 * To workaround broken NUMA config. Read the comment in
283 * srat_detect_node().
284 */
nearby_node(int apicid)285 static int nearby_node(int apicid)
286 {
287 int i, node;
288
289 for (i = apicid - 1; i >= 0; i--) {
290 node = __apicid_to_node[i];
291 if (node != NUMA_NO_NODE && node_online(node))
292 return node;
293 }
294 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
295 node = __apicid_to_node[i];
296 if (node != NUMA_NO_NODE && node_online(node))
297 return node;
298 }
299 return first_node(node_online_map); /* Shouldn't happen */
300 }
301 #endif
302
srat_detect_node(struct cpuinfo_x86 * c)303 static void srat_detect_node(struct cpuinfo_x86 *c)
304 {
305 #ifdef CONFIG_NUMA
306 int cpu = smp_processor_id();
307 int node;
308 unsigned apicid = c->topo.apicid;
309
310 node = numa_cpu_node(cpu);
311 if (node == NUMA_NO_NODE)
312 node = per_cpu_llc_id(cpu);
313
314 /*
315 * On multi-fabric platform (e.g. Numascale NumaChip) a
316 * platform-specific handler needs to be called to fixup some
317 * IDs of the CPU.
318 */
319 if (x86_cpuinit.fixup_cpu_id)
320 x86_cpuinit.fixup_cpu_id(c, node);
321
322 if (!node_online(node)) {
323 /*
324 * Two possibilities here:
325 *
326 * - The CPU is missing memory and no node was created. In
327 * that case try picking one from a nearby CPU.
328 *
329 * - The APIC IDs differ from the HyperTransport node IDs
330 * which the K8 northbridge parsing fills in. Assume
331 * they are all increased by a constant offset, but in
332 * the same order as the HT nodeids. If that doesn't
333 * result in a usable node fall back to the path for the
334 * previous case.
335 *
336 * This workaround operates directly on the mapping between
337 * APIC ID and NUMA node, assuming certain relationship
338 * between APIC ID, HT node ID and NUMA topology. As going
339 * through CPU mapping may alter the outcome, directly
340 * access __apicid_to_node[].
341 */
342 int ht_nodeid = c->topo.initial_apicid;
343
344 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
345 node = __apicid_to_node[ht_nodeid];
346 /* Pick a nearby node */
347 if (!node_online(node))
348 node = nearby_node(apicid);
349 }
350 numa_set_node(cpu, node);
351 #endif
352 }
353
bsp_determine_snp(struct cpuinfo_x86 * c)354 static void bsp_determine_snp(struct cpuinfo_x86 *c)
355 {
356 #ifdef CONFIG_ARCH_HAS_CC_PLATFORM
357 cc_vendor = CC_VENDOR_AMD;
358
359 if (cpu_has(c, X86_FEATURE_SEV_SNP)) {
360 /*
361 * RMP table entry format is not architectural and is defined by the
362 * per-processor PPR. Restrict SNP support on the known CPU models
363 * for which the RMP table entry format is currently defined or for
364 * processors which support the architecturally defined RMPREAD
365 * instruction.
366 */
367 if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
368 (cpu_feature_enabled(X86_FEATURE_ZEN3) ||
369 cpu_feature_enabled(X86_FEATURE_ZEN4) ||
370 cpu_feature_enabled(X86_FEATURE_RMPREAD)) &&
371 snp_probe_rmptable_info()) {
372 cc_platform_set(CC_ATTR_HOST_SEV_SNP);
373 } else {
374 setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
375 cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
376 }
377 }
378 #endif
379 }
380
381 #define ZEN_MODEL_STEP_UCODE(fam, model, step, ucode) \
382 X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, fam, model), \
383 step, step, ucode)
384
385 static const struct x86_cpu_id amd_tsa_microcode[] = {
386 ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x1, 0x0a0011d7),
387 ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x2, 0x0a00123b),
388 ZEN_MODEL_STEP_UCODE(0x19, 0x08, 0x2, 0x0a00820d),
389 ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x1, 0x0a10114c),
390 ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x2, 0x0a10124c),
391 ZEN_MODEL_STEP_UCODE(0x19, 0x18, 0x1, 0x0a108109),
392 ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x0, 0x0a20102e),
393 ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x2, 0x0a201211),
394 ZEN_MODEL_STEP_UCODE(0x19, 0x44, 0x1, 0x0a404108),
395 ZEN_MODEL_STEP_UCODE(0x19, 0x50, 0x0, 0x0a500012),
396 ZEN_MODEL_STEP_UCODE(0x19, 0x61, 0x2, 0x0a60120a),
397 ZEN_MODEL_STEP_UCODE(0x19, 0x74, 0x1, 0x0a704108),
398 ZEN_MODEL_STEP_UCODE(0x19, 0x75, 0x2, 0x0a705208),
399 ZEN_MODEL_STEP_UCODE(0x19, 0x78, 0x0, 0x0a708008),
400 ZEN_MODEL_STEP_UCODE(0x19, 0x7c, 0x0, 0x0a70c008),
401 ZEN_MODEL_STEP_UCODE(0x19, 0xa0, 0x2, 0x0aa00216),
402 {},
403 };
404
tsa_init(struct cpuinfo_x86 * c)405 static void tsa_init(struct cpuinfo_x86 *c)
406 {
407 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
408 return;
409
410 if (cpu_has(c, X86_FEATURE_ZEN3) ||
411 cpu_has(c, X86_FEATURE_ZEN4)) {
412 if (x86_match_min_microcode_rev(amd_tsa_microcode))
413 setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR);
414 else
415 pr_debug("%s: current revision: 0x%x\n", __func__, c->microcode);
416 } else {
417 setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO);
418 setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO);
419 }
420 }
421
bsp_init_amd(struct cpuinfo_x86 * c)422 static void bsp_init_amd(struct cpuinfo_x86 *c)
423 {
424 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
425
426 if (c->x86 > 0x10 ||
427 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
428 u64 val;
429
430 rdmsrq(MSR_K7_HWCR, val);
431 if (!(val & BIT(24)))
432 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
433 }
434 }
435
436 if (c->x86 == 0x15) {
437 unsigned long upperbit;
438 u32 cpuid, assoc;
439
440 cpuid = cpuid_edx(0x80000005);
441 assoc = cpuid >> 16 & 0xff;
442 upperbit = ((cpuid >> 24) << 10) / assoc;
443
444 va_align.mask = (upperbit - 1) & PAGE_MASK;
445 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
446
447 /* A random value per boot for bit slice [12:upper_bit) */
448 va_align.bits = get_random_u32() & va_align.mask;
449 }
450
451 if (cpu_has(c, X86_FEATURE_MWAITX))
452 use_mwaitx_delay();
453
454 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
455 !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
456 c->x86 >= 0x15 && c->x86 <= 0x17) {
457 unsigned int bit;
458
459 switch (c->x86) {
460 case 0x15: bit = 54; break;
461 case 0x16: bit = 33; break;
462 case 0x17: bit = 10; break;
463 default: return;
464 }
465 /*
466 * Try to cache the base value so further operations can
467 * avoid RMW. If that faults, do not enable SSBD.
468 */
469 if (!rdmsrq_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
470 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
471 setup_force_cpu_cap(X86_FEATURE_SSBD);
472 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
473 }
474 }
475
476 resctrl_cpu_detect(c);
477
478 /* Figure out Zen generations: */
479 switch (c->x86) {
480 case 0x17:
481 switch (c->x86_model) {
482 case 0x00 ... 0x2f:
483 case 0x50 ... 0x5f:
484 setup_force_cpu_cap(X86_FEATURE_ZEN1);
485 break;
486 case 0x30 ... 0x4f:
487 case 0x60 ... 0x7f:
488 case 0x90 ... 0x91:
489 case 0xa0 ... 0xaf:
490 setup_force_cpu_cap(X86_FEATURE_ZEN2);
491 break;
492 default:
493 goto warn;
494 }
495 break;
496
497 case 0x19:
498 switch (c->x86_model) {
499 case 0x00 ... 0x0f:
500 case 0x20 ... 0x5f:
501 setup_force_cpu_cap(X86_FEATURE_ZEN3);
502 break;
503 case 0x10 ... 0x1f:
504 case 0x60 ... 0xaf:
505 setup_force_cpu_cap(X86_FEATURE_ZEN4);
506 break;
507 default:
508 goto warn;
509 }
510 break;
511
512 case 0x1a:
513 switch (c->x86_model) {
514 case 0x00 ... 0x2f:
515 case 0x40 ... 0x4f:
516 case 0x60 ... 0x7f:
517 setup_force_cpu_cap(X86_FEATURE_ZEN5);
518 break;
519 case 0x50 ... 0x5f:
520 case 0x80 ... 0xaf:
521 case 0xc0 ... 0xcf:
522 setup_force_cpu_cap(X86_FEATURE_ZEN6);
523 break;
524 default:
525 goto warn;
526 }
527 break;
528
529 default:
530 break;
531 }
532
533 bsp_determine_snp(c);
534 tsa_init(c);
535
536 if (cpu_has(c, X86_FEATURE_GP_ON_USER_CPUID))
537 setup_force_cpu_cap(X86_FEATURE_CPUID_FAULT);
538
539 return;
540
541 warn:
542 WARN_ONCE(1, "Family 0x%x, model: 0x%x??\n", c->x86, c->x86_model);
543 }
544
early_detect_mem_encrypt(struct cpuinfo_x86 * c)545 static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
546 {
547 u64 msr;
548
549 /*
550 * Mark using WBINVD is needed during kexec on processors that
551 * support SME. This provides support for performing a successful
552 * kexec when going from SME inactive to SME active (or vice-versa).
553 *
554 * The cache must be cleared so that if there are entries with the
555 * same physical address, both with and without the encryption bit,
556 * they don't race each other when flushed and potentially end up
557 * with the wrong entry being committed to memory.
558 *
559 * Test the CPUID bit directly because with mem_encrypt=off the
560 * BSP will clear the X86_FEATURE_SME bit and the APs will not
561 * see it set after that.
562 */
563 if (c->extended_cpuid_level >= 0x8000001f && (cpuid_eax(0x8000001f) & BIT(0)))
564 __this_cpu_write(cache_state_incoherent, true);
565
566 /*
567 * BIOS support is required for SME and SEV.
568 * For SME: If BIOS has enabled SME then adjust x86_phys_bits by
569 * the SME physical address space reduction value.
570 * If BIOS has not enabled SME then don't advertise the
571 * SME feature (set in scattered.c).
572 * If the kernel has not enabled SME via any means then
573 * don't advertise the SME feature.
574 * For SEV: If BIOS has not enabled SEV then don't advertise SEV and
575 * any additional functionality based on it.
576 *
577 * In all cases, since support for SME and SEV requires long mode,
578 * don't advertise the feature under CONFIG_X86_32.
579 */
580 if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
581 /* Check if memory encryption is enabled */
582 rdmsrq(MSR_AMD64_SYSCFG, msr);
583 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
584 goto clear_all;
585
586 /*
587 * Always adjust physical address bits. Even though this
588 * will be a value above 32-bits this is still done for
589 * CONFIG_X86_32 so that accurate values are reported.
590 */
591 c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
592
593 if (IS_ENABLED(CONFIG_X86_32))
594 goto clear_all;
595
596 if (!sme_me_mask)
597 setup_clear_cpu_cap(X86_FEATURE_SME);
598
599 rdmsrq(MSR_K7_HWCR, msr);
600 if (!(msr & MSR_K7_HWCR_SMMLOCK))
601 goto clear_sev;
602
603 return;
604
605 clear_all:
606 setup_clear_cpu_cap(X86_FEATURE_SME);
607 clear_sev:
608 setup_clear_cpu_cap(X86_FEATURE_SEV);
609 setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
610 setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
611 }
612 }
613
early_init_amd(struct cpuinfo_x86 * c)614 static void early_init_amd(struct cpuinfo_x86 *c)
615 {
616 u32 dummy;
617
618 if (c->x86 >= 0xf)
619 set_cpu_cap(c, X86_FEATURE_K8);
620
621 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
622
623 /*
624 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
625 * with P/T states and does not stop in deep C-states
626 */
627 if (c->x86_power & (1 << 8)) {
628 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
629 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
630 }
631
632 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
633 if (c->x86_power & BIT(12))
634 set_cpu_cap(c, X86_FEATURE_ACC_POWER);
635
636 /* Bit 14 indicates the Runtime Average Power Limit interface. */
637 if (c->x86_power & BIT(14))
638 set_cpu_cap(c, X86_FEATURE_RAPL);
639
640 #ifdef CONFIG_X86_64
641 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
642 #else
643 /* Set MTRR capability flag if appropriate */
644 if (c->x86 == 5)
645 if (c->x86_model == 13 || c->x86_model == 9 ||
646 (c->x86_model == 8 && c->x86_stepping >= 8))
647 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
648 #endif
649 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
650 /*
651 * ApicID can always be treated as an 8-bit value for AMD APIC versions
652 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
653 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
654 * after 16h.
655 */
656 if (boot_cpu_has(X86_FEATURE_APIC)) {
657 if (c->x86 > 0x16)
658 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
659 else if (c->x86 >= 0xf) {
660 /* check CPU config space for extended APIC ID */
661 unsigned int val;
662
663 val = read_pci_config(0, 24, 0, 0x68);
664 if ((val >> 17 & 0x3) == 0x3)
665 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
666 }
667 }
668 #endif
669
670 /*
671 * This is only needed to tell the kernel whether to use VMCALL
672 * and VMMCALL. VMMCALL is never executed except under virt, so
673 * we can set it unconditionally.
674 */
675 set_cpu_cap(c, X86_FEATURE_VMMCALL);
676
677 /* F16h erratum 793, CVE-2013-6885 */
678 if (c->x86 == 0x16 && c->x86_model <= 0xf)
679 msr_set_bit(MSR_AMD64_LS_CFG, 15);
680
681 early_detect_mem_encrypt(c);
682
683 if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
684 if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
685 setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
686 else if (c->x86 >= 0x19 && !wrmsrq_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
687 setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
688 setup_force_cpu_cap(X86_FEATURE_SBPB);
689 }
690 }
691 }
692
init_amd_k8(struct cpuinfo_x86 * c)693 static void init_amd_k8(struct cpuinfo_x86 *c)
694 {
695 u32 level;
696 u64 value;
697
698 /* On C+ stepping K8 rep microcode works well for copy/memset */
699 level = cpuid_eax(1);
700 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
701 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
702
703 /*
704 * Some BIOSes incorrectly force this feature, but only K8 revision D
705 * (model = 0x14) and later actually support it.
706 * (AMD Erratum #110, docId: 25759).
707 */
708 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM) && !cpu_has(c, X86_FEATURE_HYPERVISOR)) {
709 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
710 if (!rdmsrq_amd_safe(0xc001100d, &value)) {
711 value &= ~BIT_64(32);
712 wrmsrq_amd_safe(0xc001100d, value);
713 }
714 }
715
716 if (!c->x86_model_id[0])
717 strscpy(c->x86_model_id, "Hammer");
718
719 #ifdef CONFIG_SMP
720 /*
721 * Disable TLB flush filter by setting HWCR.FFDIS on K8
722 * bit 6 of msr C001_0015
723 *
724 * Errata 63 for SH-B3 steppings
725 * Errata 122 for all steppings (F+ have it disabled by default)
726 */
727 msr_set_bit(MSR_K7_HWCR, 6);
728 #endif
729 set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
730
731 /*
732 * Check models and steppings affected by erratum 400. This is
733 * used to select the proper idle routine and to enable the
734 * check whether the machine is affected in arch_post_acpi_subsys_init()
735 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
736 */
737 if (c->x86_model > 0x41 ||
738 (c->x86_model == 0x41 && c->x86_stepping >= 0x2))
739 setup_force_cpu_bug(X86_BUG_AMD_E400);
740 }
741
init_amd_gh(struct cpuinfo_x86 * c)742 static void init_amd_gh(struct cpuinfo_x86 *c)
743 {
744 #ifdef CONFIG_MMCONF_FAM10H
745 /* do this for boot cpu */
746 if (c == &boot_cpu_data)
747 check_enable_amd_mmconf_dmi();
748
749 fam10h_check_enable_mmcfg();
750 #endif
751
752 /*
753 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
754 * is always needed when GART is enabled, even in a kernel which has no
755 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
756 * If it doesn't, we do it here as suggested by the BKDG.
757 *
758 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
759 */
760 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
761
762 /*
763 * On family 10h BIOS may not have properly enabled WC+ support, causing
764 * it to be converted to CD memtype. This may result in performance
765 * degradation for certain nested-paging guests. Prevent this conversion
766 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
767 *
768 * NOTE: we want to use the _safe accessors so as not to #GP kvm
769 * guests on older kvm hosts.
770 */
771 msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
772
773 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
774
775 /*
776 * Check models and steppings affected by erratum 400. This is
777 * used to select the proper idle routine and to enable the
778 * check whether the machine is affected in arch_post_acpi_subsys_init()
779 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
780 */
781 if (c->x86_model > 0x2 ||
782 (c->x86_model == 0x2 && c->x86_stepping >= 0x1))
783 setup_force_cpu_bug(X86_BUG_AMD_E400);
784 }
785
init_amd_ln(struct cpuinfo_x86 * c)786 static void init_amd_ln(struct cpuinfo_x86 *c)
787 {
788 /*
789 * Apply erratum 665 fix unconditionally so machines without a BIOS
790 * fix work.
791 */
792 msr_set_bit(MSR_AMD64_DE_CFG, 31);
793 }
794
795 static bool rdrand_force;
796
rdrand_cmdline(char * str)797 static int __init rdrand_cmdline(char *str)
798 {
799 if (!str)
800 return -EINVAL;
801
802 if (!strcmp(str, "force"))
803 rdrand_force = true;
804 else
805 return -EINVAL;
806
807 return 0;
808 }
809 early_param("rdrand", rdrand_cmdline);
810
clear_rdrand_cpuid_bit(struct cpuinfo_x86 * c)811 static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
812 {
813 /*
814 * Saving of the MSR used to hide the RDRAND support during
815 * suspend/resume is done by arch/x86/power/cpu.c, which is
816 * dependent on CONFIG_PM_SLEEP.
817 */
818 if (!IS_ENABLED(CONFIG_PM_SLEEP))
819 return;
820
821 /*
822 * The self-test can clear X86_FEATURE_RDRAND, so check for
823 * RDRAND support using the CPUID function directly.
824 */
825 if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
826 return;
827
828 msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
829
830 /*
831 * Verify that the CPUID change has occurred in case the kernel is
832 * running virtualized and the hypervisor doesn't support the MSR.
833 */
834 if (cpuid_ecx(1) & BIT(30)) {
835 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
836 return;
837 }
838
839 clear_cpu_cap(c, X86_FEATURE_RDRAND);
840 pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
841 }
842
init_amd_jg(struct cpuinfo_x86 * c)843 static void init_amd_jg(struct cpuinfo_x86 *c)
844 {
845 /*
846 * Some BIOS implementations do not restore proper RDRAND support
847 * across suspend and resume. Check on whether to hide the RDRAND
848 * instruction support via CPUID.
849 */
850 clear_rdrand_cpuid_bit(c);
851 }
852
init_amd_bd(struct cpuinfo_x86 * c)853 static void init_amd_bd(struct cpuinfo_x86 *c)
854 {
855 u64 value;
856
857 /*
858 * The way access filter has a performance penalty on some workloads.
859 * Disable it on the affected CPUs.
860 */
861 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
862 if (!rdmsrq_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
863 value |= 0x1E;
864 wrmsrq_safe(MSR_F15H_IC_CFG, value);
865 }
866 }
867
868 /*
869 * Some BIOS implementations do not restore proper RDRAND support
870 * across suspend and resume. Check on whether to hide the RDRAND
871 * instruction support via CPUID.
872 */
873 clear_rdrand_cpuid_bit(c);
874 }
875
876 static const struct x86_cpu_id erratum_1386_microcode[] = {
877 ZEN_MODEL_STEP_UCODE(0x17, 0x01, 0x2, 0x0800126e),
878 ZEN_MODEL_STEP_UCODE(0x17, 0x31, 0x0, 0x08301052),
879 {}
880 };
881
fix_erratum_1386(struct cpuinfo_x86 * c)882 static void fix_erratum_1386(struct cpuinfo_x86 *c)
883 {
884 /*
885 * Work around Erratum 1386. The XSAVES instruction malfunctions in
886 * certain circumstances on Zen1/2 uarch, and not all parts have had
887 * updated microcode at the time of writing (March 2023).
888 *
889 * Affected parts all have no supervisor XSAVE states, meaning that
890 * the XSAVEC instruction (which works fine) is equivalent.
891 *
892 * Clear the feature flag only on microcode revisions which
893 * don't have the fix.
894 */
895 if (x86_match_min_microcode_rev(erratum_1386_microcode))
896 return;
897
898 clear_cpu_cap(c, X86_FEATURE_XSAVES);
899 }
900
init_spectral_chicken(struct cpuinfo_x86 * c)901 void init_spectral_chicken(struct cpuinfo_x86 *c)
902 {
903 #ifdef CONFIG_MITIGATION_UNRET_ENTRY
904 /*
905 * On Zen2 we offer this chicken (bit) on the altar of Speculation.
906 *
907 * This suppresses speculation from the middle of a basic block, i.e. it
908 * suppresses non-branch predictions.
909 */
910 if (!cpu_has(c, X86_FEATURE_HYPERVISOR))
911 msr_set_bit(MSR_ZEN2_SPECTRAL_CHICKEN, MSR_ZEN2_SPECTRAL_CHICKEN_BIT);
912 #endif
913 }
914
init_amd_zen_common(void)915 static void init_amd_zen_common(void)
916 {
917 setup_force_cpu_cap(X86_FEATURE_ZEN);
918 #ifdef CONFIG_NUMA
919 node_reclaim_distance = 32;
920 #endif
921 }
922
init_amd_zen1(struct cpuinfo_x86 * c)923 static void init_amd_zen1(struct cpuinfo_x86 *c)
924 {
925 fix_erratum_1386(c);
926
927 /* Fix up CPUID bits, but only if not virtualised. */
928 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
929
930 /* Erratum 1076: CPB feature bit not being set in CPUID. */
931 if (!cpu_has(c, X86_FEATURE_CPB))
932 set_cpu_cap(c, X86_FEATURE_CPB);
933 }
934
935 pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
936 setup_force_cpu_bug(X86_BUG_DIV0);
937
938 /*
939 * Turn off the Instructions Retired free counter on machines that are
940 * susceptible to erratum #1054 "Instructions Retired Performance
941 * Counter May Be Inaccurate".
942 */
943 if (c->x86_model < 0x30) {
944 msr_clear_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
945 clear_cpu_cap(c, X86_FEATURE_IRPERF);
946 }
947
948 pr_notice_once("AMD Zen1 FPDSS bug detected, enabling mitigation.\n");
949 msr_set_bit(MSR_AMD64_FP_CFG, MSR_AMD64_FP_CFG_ZEN1_DENORM_FIX_BIT);
950 }
951
952 static const struct x86_cpu_id amd_zenbleed_microcode[] = {
953 ZEN_MODEL_STEP_UCODE(0x17, 0x31, 0x0, 0x0830107b),
954 ZEN_MODEL_STEP_UCODE(0x17, 0x60, 0x1, 0x0860010c),
955 ZEN_MODEL_STEP_UCODE(0x17, 0x68, 0x1, 0x08608107),
956 ZEN_MODEL_STEP_UCODE(0x17, 0x71, 0x0, 0x08701033),
957 ZEN_MODEL_STEP_UCODE(0x17, 0xa0, 0x0, 0x08a00009),
958 {}
959 };
960
zen2_zenbleed_check(struct cpuinfo_x86 * c)961 static void zen2_zenbleed_check(struct cpuinfo_x86 *c)
962 {
963 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
964 return;
965
966 if (!cpu_has(c, X86_FEATURE_AVX))
967 return;
968
969 if (!x86_match_min_microcode_rev(amd_zenbleed_microcode)) {
970 pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n");
971 msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
972 } else {
973 msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
974 }
975 }
976
init_amd_zen2(struct cpuinfo_x86 * c)977 static void init_amd_zen2(struct cpuinfo_x86 *c)
978 {
979 init_spectral_chicken(c);
980 fix_erratum_1386(c);
981 zen2_zenbleed_check(c);
982
983 /* Disable RDSEED on AMD Cyan Skillfish because of an error. */
984 if (c->x86_model == 0x47 && c->x86_stepping == 0x0) {
985 clear_cpu_cap(c, X86_FEATURE_RDSEED);
986 msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18);
987 pr_emerg("RDSEED is not reliable on this platform; disabling.\n");
988 }
989
990 /* Correct misconfigured CPUID on some clients. */
991 clear_cpu_cap(c, X86_FEATURE_INVLPGB);
992
993 if (!cpu_has(c, X86_FEATURE_HYPERVISOR))
994 msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN2_BP_CFG_BUG_FIX_BIT);
995 }
996
init_amd_zen3(struct cpuinfo_x86 * c)997 static void init_amd_zen3(struct cpuinfo_x86 *c)
998 {
999 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
1000 /*
1001 * Zen3 (Fam19 model < 0x10) parts are not susceptible to
1002 * Branch Type Confusion, but predate the allocation of the
1003 * BTC_NO bit.
1004 */
1005 if (!cpu_has(c, X86_FEATURE_BTC_NO))
1006 set_cpu_cap(c, X86_FEATURE_BTC_NO);
1007 }
1008 }
1009
init_amd_zen4(struct cpuinfo_x86 * c)1010 static void init_amd_zen4(struct cpuinfo_x86 *c)
1011 {
1012 if (!cpu_has(c, X86_FEATURE_HYPERVISOR))
1013 msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
1014
1015 /*
1016 * These Zen4 SoCs advertise support for virtualized VMLOAD/VMSAVE
1017 * in some BIOS versions but they can lead to random host reboots.
1018 */
1019 switch (c->x86_model) {
1020 case 0x18 ... 0x1f:
1021 case 0x60 ... 0x7f:
1022 clear_cpu_cap(c, X86_FEATURE_V_VMSAVE_VMLOAD);
1023 break;
1024 }
1025 }
1026
1027 static const struct x86_cpu_id zen5_rdseed_microcode[] = {
1028 ZEN_MODEL_STEP_UCODE(0x1a, 0x02, 0x1, 0x0b00215a),
1029 ZEN_MODEL_STEP_UCODE(0x1a, 0x08, 0x1, 0x0b008121),
1030 ZEN_MODEL_STEP_UCODE(0x1a, 0x11, 0x0, 0x0b101054),
1031 ZEN_MODEL_STEP_UCODE(0x1a, 0x24, 0x0, 0x0b204037),
1032 ZEN_MODEL_STEP_UCODE(0x1a, 0x44, 0x0, 0x0b404035),
1033 ZEN_MODEL_STEP_UCODE(0x1a, 0x44, 0x1, 0x0b404108),
1034 ZEN_MODEL_STEP_UCODE(0x1a, 0x60, 0x0, 0x0b600037),
1035 ZEN_MODEL_STEP_UCODE(0x1a, 0x68, 0x0, 0x0b608038),
1036 ZEN_MODEL_STEP_UCODE(0x1a, 0x70, 0x0, 0x0b700037),
1037 {},
1038 };
1039
init_amd_zen5(struct cpuinfo_x86 * c)1040 static void init_amd_zen5(struct cpuinfo_x86 *c)
1041 {
1042 if (!x86_match_min_microcode_rev(zen5_rdseed_microcode)) {
1043 clear_cpu_cap(c, X86_FEATURE_RDSEED);
1044 msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18);
1045 pr_emerg_once("RDSEED32 is broken. Disabling the corresponding CPUID bit.\n");
1046 }
1047 }
1048
init_amd(struct cpuinfo_x86 * c)1049 static void init_amd(struct cpuinfo_x86 *c)
1050 {
1051 u64 vm_cr;
1052
1053 early_init_amd(c);
1054
1055 if (c->x86 >= 0x10)
1056 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
1057
1058 /* AMD FSRM also implies FSRS */
1059 if (cpu_has(c, X86_FEATURE_FSRM))
1060 set_cpu_cap(c, X86_FEATURE_FSRS);
1061
1062 /* K6s reports MCEs but don't actually have all the MSRs */
1063 if (c->x86 < 6)
1064 clear_cpu_cap(c, X86_FEATURE_MCE);
1065
1066 switch (c->x86) {
1067 case 4: init_amd_k5(c); break;
1068 case 5: init_amd_k6(c); break;
1069 case 6: init_amd_k7(c); break;
1070 case 0xf: init_amd_k8(c); break;
1071 case 0x10: init_amd_gh(c); break;
1072 case 0x12: init_amd_ln(c); break;
1073 case 0x15: init_amd_bd(c); break;
1074 case 0x16: init_amd_jg(c); break;
1075 }
1076
1077 /*
1078 * Save up on some future enablement work and do common Zen
1079 * settings.
1080 */
1081 if (c->x86 >= 0x17)
1082 init_amd_zen_common();
1083
1084 if (boot_cpu_has(X86_FEATURE_ZEN1))
1085 init_amd_zen1(c);
1086 else if (boot_cpu_has(X86_FEATURE_ZEN2))
1087 init_amd_zen2(c);
1088 else if (boot_cpu_has(X86_FEATURE_ZEN3))
1089 init_amd_zen3(c);
1090 else if (boot_cpu_has(X86_FEATURE_ZEN4))
1091 init_amd_zen4(c);
1092 else if (boot_cpu_has(X86_FEATURE_ZEN5))
1093 init_amd_zen5(c);
1094
1095 /*
1096 * Enable workaround for FXSAVE leak on CPUs
1097 * without a XSaveErPtr feature
1098 */
1099 if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
1100 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
1101
1102 cpu_detect_cache_sizes(c);
1103
1104 srat_detect_node(c);
1105
1106 init_amd_cacheinfo(c);
1107
1108 if (cpu_has(c, X86_FEATURE_SVM)) {
1109 rdmsrq(MSR_VM_CR, vm_cr);
1110 if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) {
1111 pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
1112 clear_cpu_cap(c, X86_FEATURE_SVM);
1113 }
1114 }
1115
1116 if (!cpu_has(c, X86_FEATURE_LFENCE_RDTSC) && cpu_has(c, X86_FEATURE_XMM2)) {
1117 /*
1118 * Use LFENCE for execution serialization. On families which
1119 * don't have that MSR, LFENCE is already serializing.
1120 * msr_set_bit() uses the safe accessors, too, even if the MSR
1121 * is not present.
1122 */
1123 msr_set_bit(MSR_AMD64_DE_CFG,
1124 MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
1125
1126 /* A serializing LFENCE stops RDTSC speculation */
1127 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
1128 }
1129
1130 /*
1131 * Family 0x12 and above processors have APIC timer
1132 * running in deep C states.
1133 */
1134 if (c->x86 > 0x11)
1135 set_cpu_cap(c, X86_FEATURE_ARAT);
1136
1137 /* 3DNow or LM implies PREFETCHW */
1138 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
1139 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
1140 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
1141
1142 /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
1143 if (!cpu_feature_enabled(X86_FEATURE_XENPV))
1144 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
1145
1146 /* Enable the Instructions Retired free counter */
1147 if (cpu_has(c, X86_FEATURE_IRPERF))
1148 msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
1149
1150 check_null_seg_clears_base(c);
1151
1152 /*
1153 * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up
1154 * using the trampoline code and as part of it, MSR_EFER gets prepared there in
1155 * order to be replicated onto them. Regardless, set it here again, if not set,
1156 * to protect against any future refactoring/code reorganization which might
1157 * miss setting this important bit.
1158 */
1159 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1160 cpu_has(c, X86_FEATURE_AUTOIBRS))
1161 WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS) < 0);
1162
1163 /* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
1164 clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
1165
1166 /* Enable Translation Cache Extension */
1167 if (cpu_has(c, X86_FEATURE_TCE))
1168 msr_set_bit(MSR_EFER, _EFER_TCE);
1169 }
1170
1171 #ifdef CONFIG_X86_32
amd_size_cache(struct cpuinfo_x86 * c,unsigned int size)1172 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
1173 {
1174 /* AMD errata T13 (order #21922) */
1175 if (c->x86 == 6) {
1176 /* Duron Rev A0 */
1177 if (c->x86_model == 3 && c->x86_stepping == 0)
1178 size = 64;
1179 /* Tbird rev A1/A2 */
1180 if (c->x86_model == 4 &&
1181 (c->x86_stepping == 0 || c->x86_stepping == 1))
1182 size = 256;
1183 }
1184 return size;
1185 }
1186 #endif
1187
cpu_detect_tlb_amd(struct cpuinfo_x86 * c)1188 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1189 {
1190 u32 ebx, eax, ecx, edx;
1191 u16 mask = 0xfff;
1192
1193 if (c->x86 < 0xf)
1194 return;
1195
1196 if (c->extended_cpuid_level < 0x80000006)
1197 return;
1198
1199 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1200
1201 tlb_lld_4k = (ebx >> 16) & mask;
1202 tlb_lli_4k = ebx & mask;
1203
1204 /*
1205 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1206 * characteristics from the CPUID function 0x80000005 instead.
1207 */
1208 if (c->x86 == 0xf) {
1209 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1210 mask = 0xff;
1211 }
1212
1213 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1214 if (!((eax >> 16) & mask))
1215 tlb_lld_2m = (cpuid_eax(0x80000005) >> 16) & 0xff;
1216 else
1217 tlb_lld_2m = (eax >> 16) & mask;
1218
1219 /* a 4M entry uses two 2M entries */
1220 tlb_lld_4m = tlb_lld_2m >> 1;
1221
1222 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1223 if (!(eax & mask)) {
1224 /* Erratum 658 */
1225 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1226 tlb_lli_2m = 1024;
1227 } else {
1228 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1229 tlb_lli_2m = eax & 0xff;
1230 }
1231 } else
1232 tlb_lli_2m = eax & mask;
1233
1234 tlb_lli_4m = tlb_lli_2m >> 1;
1235
1236 /* Max number of pages INVLPGB can invalidate in one shot */
1237 if (cpu_has(c, X86_FEATURE_INVLPGB))
1238 invlpgb_count_max = (cpuid_edx(0x80000008) & 0xffff) + 1;
1239 }
1240
1241 static const struct cpu_dev amd_cpu_dev = {
1242 .c_vendor = "AMD",
1243 .c_ident = { "AuthenticAMD" },
1244 #ifdef CONFIG_X86_32
1245 .legacy_models = {
1246 { .family = 4, .model_names =
1247 {
1248 [3] = "486 DX/2",
1249 [7] = "486 DX/2-WB",
1250 [8] = "486 DX/4",
1251 [9] = "486 DX/4-WB",
1252 [14] = "Am5x86-WT",
1253 [15] = "Am5x86-WB"
1254 }
1255 },
1256 },
1257 .legacy_cache_size = amd_size_cache,
1258 #endif
1259 .c_early_init = early_init_amd,
1260 .c_detect_tlb = cpu_detect_tlb_amd,
1261 .c_bsp_init = bsp_init_amd,
1262 .c_init = init_amd,
1263 .c_x86_vendor = X86_VENDOR_AMD,
1264 };
1265
1266 cpu_dev_register(amd_cpu_dev);
1267
1268 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask);
1269
1270 static unsigned int amd_msr_dr_addr_masks[] = {
1271 MSR_F16H_DR0_ADDR_MASK,
1272 MSR_F16H_DR1_ADDR_MASK,
1273 MSR_F16H_DR1_ADDR_MASK + 1,
1274 MSR_F16H_DR1_ADDR_MASK + 2
1275 };
1276
amd_set_dr_addr_mask(unsigned long mask,unsigned int dr)1277 void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr)
1278 {
1279 int cpu = smp_processor_id();
1280
1281 if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
1282 return;
1283
1284 if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
1285 return;
1286
1287 if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask)
1288 return;
1289
1290 wrmsrq(amd_msr_dr_addr_masks[dr], mask);
1291 per_cpu(amd_dr_addr_mask, cpu)[dr] = mask;
1292 }
1293
amd_get_dr_addr_mask(unsigned int dr)1294 unsigned long amd_get_dr_addr_mask(unsigned int dr)
1295 {
1296 if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
1297 return 0;
1298
1299 if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
1300 return 0;
1301
1302 return per_cpu(amd_dr_addr_mask[dr], smp_processor_id());
1303 }
1304 EXPORT_SYMBOL_FOR_KVM(amd_get_dr_addr_mask);
1305
zenbleed_check_cpu(void * unused)1306 static void zenbleed_check_cpu(void *unused)
1307 {
1308 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
1309
1310 zen2_zenbleed_check(c);
1311 }
1312
amd_check_microcode(void)1313 void amd_check_microcode(void)
1314 {
1315 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
1316 return;
1317
1318 if (cpu_feature_enabled(X86_FEATURE_ZEN2))
1319 on_each_cpu(zenbleed_check_cpu, NULL, 1);
1320 }
1321
1322 static const char * const s5_reset_reason_txt[] = {
1323 [0] = "thermal pin BP_THERMTRIP_L was tripped",
1324 [1] = "power button was pressed for 4 seconds",
1325 [2] = "shutdown pin was tripped",
1326 [4] = "remote ASF power off command was received",
1327 [9] = "internal CPU thermal limit was tripped",
1328 [16] = "system reset pin BP_SYS_RST_L was tripped",
1329 [17] = "software issued PCI reset",
1330 [18] = "software wrote 0x4 to reset control register 0xCF9",
1331 [19] = "software wrote 0x6 to reset control register 0xCF9",
1332 [20] = "software wrote 0xE to reset control register 0xCF9",
1333 [21] = "ACPI power state transition occurred",
1334 [22] = "keyboard reset pin KB_RST_L was tripped",
1335 [23] = "internal CPU shutdown event occurred",
1336 [24] = "system failed to boot before failed boot timer expired",
1337 [25] = "hardware watchdog timer expired",
1338 [26] = "remote ASF reset command was received",
1339 [27] = "an uncorrected error caused a data fabric sync flood event",
1340 [29] = "FCH and MP1 failed warm reset handshake",
1341 [30] = "a parity error occurred",
1342 [31] = "a software sync flood event occurred",
1343 };
1344
print_s5_reset_status_mmio(void)1345 static __init int print_s5_reset_status_mmio(void)
1346 {
1347 void __iomem *addr;
1348 u32 value;
1349 int i;
1350
1351 if (!cpu_feature_enabled(X86_FEATURE_ZEN))
1352 return 0;
1353
1354 addr = ioremap(FCH_PM_BASE + FCH_PM_S5_RESET_STATUS, sizeof(value));
1355 if (!addr)
1356 return 0;
1357
1358 value = ioread32(addr);
1359
1360 /* Value with "all bits set" is an error response and should be ignored. */
1361 if (value == U32_MAX) {
1362 iounmap(addr);
1363 return 0;
1364 }
1365
1366 /*
1367 * Clear all reason bits so they won't be retained if the next reset
1368 * does not update the register. Besides, some bits are never cleared by
1369 * hardware so it's software's responsibility to clear them.
1370 *
1371 * Writing the value back effectively clears all reason bits as they are
1372 * write-1-to-clear.
1373 */
1374 iowrite32(value, addr);
1375 iounmap(addr);
1376
1377 for (i = 0; i < ARRAY_SIZE(s5_reset_reason_txt); i++) {
1378 if (!(value & BIT(i)))
1379 continue;
1380
1381 if (s5_reset_reason_txt[i]) {
1382 pr_info("x86/amd: Previous system reset reason [0x%08x]: %s\n",
1383 value, s5_reset_reason_txt[i]);
1384 }
1385 }
1386
1387 return 0;
1388 }
1389 late_initcall(print_s5_reset_status_mmio);
1390
dmi_scan_additional(const struct dmi_header * d,void * p)1391 static void __init dmi_scan_additional(const struct dmi_header *d, void *p)
1392 {
1393 struct dmi_a_info *info = (struct dmi_a_info *)d;
1394 void *next, *end;
1395
1396 if (!IS_ENABLED(CONFIG_DMI))
1397 return;
1398
1399 if (info->header.type != DMI_ENTRY_ADDITIONAL ||
1400 info->header.length < DMI_A_INFO_MIN_SIZE ||
1401 info->count < 1)
1402 return;
1403
1404 next = (void *)(info + 1);
1405 end = (void *)info + info->header.length;
1406
1407 do {
1408 struct dmi_a_info_entry *entry;
1409 const char *string_ptr;
1410
1411 entry = (struct dmi_a_info_entry *)next;
1412
1413 /*
1414 * Not much can be done to validate data. At least the entry
1415 * length shouldn't be 0.
1416 */
1417 if (!entry->length)
1418 return;
1419
1420 string_ptr = dmi_string_nosave(&info->header, entry->str_num);
1421
1422 /* Sample string: AGESA!V9 StrixKrackanPI-FP8 1.1.0.0c */
1423 if (!strncmp(string_ptr, "AGESA", 5)) {
1424 pr_info("AGESA: %s\n", string_ptr);
1425 break;
1426 }
1427
1428 next += entry->length;
1429 } while (end - next >= DMI_A_INFO_ENT_MIN_SIZE);
1430 }
1431
print_dmi_agesa(void)1432 static __init int print_dmi_agesa(void)
1433 {
1434 dmi_walk(dmi_scan_additional, NULL);
1435 return 0;
1436 }
1437 late_initcall(print_dmi_agesa);
1438