xref: /linux/arch/x86/kernel/cpu/amd.c (revision 334fbe734e687404f346eba7d5d96ed2b44d35ab)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/export.h>
3 #include <linux/bitops.h>
4 #include <linux/dmi.h>
5 #include <linux/elf.h>
6 #include <linux/mm.h>
7 #include <linux/kvm_types.h>
8 #include <linux/io.h>
9 #include <linux/sched.h>
10 #include <linux/sched/clock.h>
11 #include <linux/random.h>
12 #include <linux/topology.h>
13 #include <linux/platform_data/x86/amd-fch.h>
14 #include <asm/processor.h>
15 #include <asm/apic.h>
16 #include <asm/cacheinfo.h>
17 #include <asm/cpu.h>
18 #include <asm/cpu_device_id.h>
19 #include <asm/spec-ctrl.h>
20 #include <asm/smp.h>
21 #include <asm/numa.h>
22 #include <asm/pci-direct.h>
23 #include <asm/delay.h>
24 #include <asm/debugreg.h>
25 #include <asm/resctrl.h>
26 #include <asm/msr.h>
27 #include <asm/sev.h>
28 
29 #ifdef CONFIG_X86_64
30 # include <asm/mmconfig.h>
31 #endif
32 
33 #include "cpu.h"
34 
35 u16 invlpgb_count_max __ro_after_init = 1;
36 
37 static inline int rdmsrq_amd_safe(unsigned msr, u64 *p)
38 {
39 	u32 gprs[8] = { 0 };
40 	int err;
41 
42 	WARN_ONCE((boot_cpu_data.x86 != 0xf),
43 		  "%s should only be used on K8!\n", __func__);
44 
45 	gprs[1] = msr;
46 	gprs[7] = 0x9c5a203a;
47 
48 	err = rdmsr_safe_regs(gprs);
49 
50 	*p = gprs[0] | ((u64)gprs[2] << 32);
51 
52 	return err;
53 }
54 
55 static inline int wrmsrq_amd_safe(unsigned msr, u64 val)
56 {
57 	u32 gprs[8] = { 0 };
58 
59 	WARN_ONCE((boot_cpu_data.x86 != 0xf),
60 		  "%s should only be used on K8!\n", __func__);
61 
62 	gprs[0] = (u32)val;
63 	gprs[1] = msr;
64 	gprs[2] = val >> 32;
65 	gprs[7] = 0x9c5a203a;
66 
67 	return wrmsr_safe_regs(gprs);
68 }
69 
70 /*
71  *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
72  *	misexecution of code under Linux. Owners of such processors should
73  *	contact AMD for precise details and a CPU swap.
74  *
75  *	See	http://www.multimania.com/poulot/k6bug.html
76  *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
77  *		(Publication # 21266  Issue Date: August 1998)
78  *
79  *	The following test is erm.. interesting. AMD neglected to up
80  *	the chip setting when fixing the bug but they also tweaked some
81  *	performance at the same time..
82  */
83 
84 #ifdef CONFIG_X86_32
85 extern __visible void vide(void);
86 __asm__(".text\n"
87 	".globl vide\n"
88 	".type vide, @function\n"
89 	".align 4\n"
90 	"vide: ret\n");
91 #endif
92 
93 static void init_amd_k5(struct cpuinfo_x86 *c)
94 {
95 #ifdef CONFIG_X86_32
96 /*
97  * General Systems BIOSen alias the cpu frequency registers
98  * of the Elan at 0x000df000. Unfortunately, one of the Linux
99  * drivers subsequently pokes it, and changes the CPU speed.
100  * Workaround : Remove the unneeded alias.
101  */
102 #define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
103 #define CBAR_ENB	(0x80000000)
104 #define CBAR_KEY	(0X000000CB)
105 	if (c->x86_model == 9 || c->x86_model == 10) {
106 		if (inl(CBAR) & CBAR_ENB)
107 			outl(0 | CBAR_KEY, CBAR);
108 	}
109 #endif
110 }
111 
112 static void init_amd_k6(struct cpuinfo_x86 *c)
113 {
114 #ifdef CONFIG_X86_32
115 	u32 l, h;
116 	int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
117 
118 	if (c->x86_model < 6) {
119 		/* Based on AMD doc 20734R - June 2000 */
120 		if (c->x86_model == 0) {
121 			clear_cpu_cap(c, X86_FEATURE_APIC);
122 			set_cpu_cap(c, X86_FEATURE_PGE);
123 		}
124 		return;
125 	}
126 
127 	if (c->x86_model == 6 && c->x86_stepping == 1) {
128 		const int K6_BUG_LOOP = 1000000;
129 		int n;
130 		void (*f_vide)(void);
131 		u64 d, d2;
132 
133 		pr_info("AMD K6 stepping B detected - ");
134 
135 		/*
136 		 * It looks like AMD fixed the 2.6.2 bug and improved indirect
137 		 * calls at the same time.
138 		 */
139 
140 		n = K6_BUG_LOOP;
141 		f_vide = vide;
142 		OPTIMIZER_HIDE_VAR(f_vide);
143 		d = rdtsc();
144 		while (n--)
145 			f_vide();
146 		d2 = rdtsc();
147 		d = d2-d;
148 
149 		if (d > 20*K6_BUG_LOOP)
150 			pr_cont("system stability may be impaired when more than 32 MB are used.\n");
151 		else
152 			pr_cont("probably OK (after B9730xxxx).\n");
153 	}
154 
155 	/* K6 with old style WHCR */
156 	if (c->x86_model < 8 ||
157 	   (c->x86_model == 8 && c->x86_stepping < 8)) {
158 		/* We can only write allocate on the low 508Mb */
159 		if (mbytes > 508)
160 			mbytes = 508;
161 
162 		rdmsr(MSR_K6_WHCR, l, h);
163 		if ((l&0x0000FFFF) == 0) {
164 			unsigned long flags;
165 			l = (1<<0)|((mbytes/4)<<1);
166 			local_irq_save(flags);
167 			wbinvd();
168 			wrmsr(MSR_K6_WHCR, l, h);
169 			local_irq_restore(flags);
170 			pr_info("Enabling old style K6 write allocation for %d Mb\n",
171 				mbytes);
172 		}
173 		return;
174 	}
175 
176 	if ((c->x86_model == 8 && c->x86_stepping > 7) ||
177 	     c->x86_model == 9 || c->x86_model == 13) {
178 		/* The more serious chips .. */
179 
180 		if (mbytes > 4092)
181 			mbytes = 4092;
182 
183 		rdmsr(MSR_K6_WHCR, l, h);
184 		if ((l&0xFFFF0000) == 0) {
185 			unsigned long flags;
186 			l = ((mbytes>>2)<<22)|(1<<16);
187 			local_irq_save(flags);
188 			wbinvd();
189 			wrmsr(MSR_K6_WHCR, l, h);
190 			local_irq_restore(flags);
191 			pr_info("Enabling new style K6 write allocation for %d Mb\n",
192 				mbytes);
193 		}
194 
195 		return;
196 	}
197 
198 	if (c->x86_model == 10) {
199 		/* AMD Geode LX is model 10 */
200 		/* placeholder for any needed mods */
201 		return;
202 	}
203 #endif
204 }
205 
206 static void init_amd_k7(struct cpuinfo_x86 *c)
207 {
208 #ifdef CONFIG_X86_32
209 	u32 l, h;
210 
211 	/*
212 	 * Bit 15 of Athlon specific MSR 15, needs to be 0
213 	 * to enable SSE on Palomino/Morgan/Barton CPU's.
214 	 * If the BIOS didn't enable it already, enable it here.
215 	 */
216 	if (c->x86_model >= 6 && c->x86_model <= 10) {
217 		if (!cpu_has(c, X86_FEATURE_XMM)) {
218 			pr_info("Enabling disabled K7/SSE Support.\n");
219 			msr_clear_bit(MSR_K7_HWCR, 15);
220 			set_cpu_cap(c, X86_FEATURE_XMM);
221 		}
222 	}
223 
224 	/*
225 	 * It's been determined by AMD that Athlons since model 8 stepping 1
226 	 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
227 	 * As per AMD technical note 27212 0.2
228 	 */
229 	if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
230 		rdmsr(MSR_K7_CLK_CTL, l, h);
231 		if ((l & 0xfff00000) != 0x20000000) {
232 			pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
233 				l, ((l & 0x000fffff)|0x20000000));
234 			wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
235 		}
236 	}
237 
238 	/* calling is from identify_secondary_cpu() ? */
239 	if (!c->cpu_index)
240 		return;
241 
242 	/*
243 	 * Certain Athlons might work (for various values of 'work') in SMP
244 	 * but they are not certified as MP capable.
245 	 */
246 	/* Athlon 660/661 is valid. */
247 	if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
248 	    (c->x86_stepping == 1)))
249 		return;
250 
251 	/* Duron 670 is valid */
252 	if ((c->x86_model == 7) && (c->x86_stepping == 0))
253 		return;
254 
255 	/*
256 	 * Athlon 662, Duron 671, and Athlon >model 7 have capability
257 	 * bit. It's worth noting that the A5 stepping (662) of some
258 	 * Athlon XP's have the MP bit set.
259 	 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
260 	 * more.
261 	 */
262 	if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
263 	    ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
264 	     (c->x86_model > 7))
265 		if (cpu_has(c, X86_FEATURE_MP))
266 			return;
267 
268 	/* If we get here, not a certified SMP capable AMD system. */
269 
270 	/*
271 	 * Don't taint if we are running SMP kernel on a single non-MP
272 	 * approved Athlon
273 	 */
274 	WARN_ONCE(1, "WARNING: This combination of AMD"
275 		" processors is not suitable for SMP.\n");
276 	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
277 #endif
278 }
279 
280 #ifdef CONFIG_NUMA
281 /*
282  * To workaround broken NUMA config.  Read the comment in
283  * srat_detect_node().
284  */
285 static int nearby_node(int apicid)
286 {
287 	int i, node;
288 
289 	for (i = apicid - 1; i >= 0; i--) {
290 		node = __apicid_to_node[i];
291 		if (node != NUMA_NO_NODE && node_online(node))
292 			return node;
293 	}
294 	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
295 		node = __apicid_to_node[i];
296 		if (node != NUMA_NO_NODE && node_online(node))
297 			return node;
298 	}
299 	return first_node(node_online_map); /* Shouldn't happen */
300 }
301 #endif
302 
303 static void srat_detect_node(struct cpuinfo_x86 *c)
304 {
305 #ifdef CONFIG_NUMA
306 	int cpu = smp_processor_id();
307 	int node;
308 	unsigned apicid = c->topo.apicid;
309 
310 	node = numa_cpu_node(cpu);
311 	if (node == NUMA_NO_NODE)
312 		node = per_cpu_llc_id(cpu);
313 
314 	/*
315 	 * On multi-fabric platform (e.g. Numascale NumaChip) a
316 	 * platform-specific handler needs to be called to fixup some
317 	 * IDs of the CPU.
318 	 */
319 	if (x86_cpuinit.fixup_cpu_id)
320 		x86_cpuinit.fixup_cpu_id(c, node);
321 
322 	if (!node_online(node)) {
323 		/*
324 		 * Two possibilities here:
325 		 *
326 		 * - The CPU is missing memory and no node was created.  In
327 		 *   that case try picking one from a nearby CPU.
328 		 *
329 		 * - The APIC IDs differ from the HyperTransport node IDs
330 		 *   which the K8 northbridge parsing fills in.  Assume
331 		 *   they are all increased by a constant offset, but in
332 		 *   the same order as the HT nodeids.  If that doesn't
333 		 *   result in a usable node fall back to the path for the
334 		 *   previous case.
335 		 *
336 		 * This workaround operates directly on the mapping between
337 		 * APIC ID and NUMA node, assuming certain relationship
338 		 * between APIC ID, HT node ID and NUMA topology.  As going
339 		 * through CPU mapping may alter the outcome, directly
340 		 * access __apicid_to_node[].
341 		 */
342 		int ht_nodeid = c->topo.initial_apicid;
343 
344 		if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
345 			node = __apicid_to_node[ht_nodeid];
346 		/* Pick a nearby node */
347 		if (!node_online(node))
348 			node = nearby_node(apicid);
349 	}
350 	numa_set_node(cpu, node);
351 #endif
352 }
353 
354 static void bsp_determine_snp(struct cpuinfo_x86 *c)
355 {
356 #ifdef CONFIG_ARCH_HAS_CC_PLATFORM
357 	cc_vendor = CC_VENDOR_AMD;
358 
359 	if (cpu_has(c, X86_FEATURE_SEV_SNP)) {
360 		/*
361 		 * RMP table entry format is not architectural and is defined by the
362 		 * per-processor PPR. Restrict SNP support on the known CPU models
363 		 * for which the RMP table entry format is currently defined or for
364 		 * processors which support the architecturally defined RMPREAD
365 		 * instruction.
366 		 */
367 		if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
368 		    (cpu_feature_enabled(X86_FEATURE_ZEN3) ||
369 		     cpu_feature_enabled(X86_FEATURE_ZEN4) ||
370 		     cpu_feature_enabled(X86_FEATURE_RMPREAD)) &&
371 		    snp_probe_rmptable_info()) {
372 			cc_platform_set(CC_ATTR_HOST_SEV_SNP);
373 		} else {
374 			setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
375 			cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
376 		}
377 	}
378 #endif
379 }
380 
381 #define ZEN_MODEL_STEP_UCODE(fam, model, step, ucode) \
382 	X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, fam, model), \
383 			    step, step, ucode)
384 
385 static const struct x86_cpu_id amd_tsa_microcode[] = {
386 	ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x1, 0x0a0011d7),
387 	ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x2, 0x0a00123b),
388 	ZEN_MODEL_STEP_UCODE(0x19, 0x08, 0x2, 0x0a00820d),
389 	ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x1, 0x0a10114c),
390 	ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x2, 0x0a10124c),
391 	ZEN_MODEL_STEP_UCODE(0x19, 0x18, 0x1, 0x0a108109),
392 	ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x0, 0x0a20102e),
393 	ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x2, 0x0a201211),
394 	ZEN_MODEL_STEP_UCODE(0x19, 0x44, 0x1, 0x0a404108),
395 	ZEN_MODEL_STEP_UCODE(0x19, 0x50, 0x0, 0x0a500012),
396 	ZEN_MODEL_STEP_UCODE(0x19, 0x61, 0x2, 0x0a60120a),
397 	ZEN_MODEL_STEP_UCODE(0x19, 0x74, 0x1, 0x0a704108),
398 	ZEN_MODEL_STEP_UCODE(0x19, 0x75, 0x2, 0x0a705208),
399 	ZEN_MODEL_STEP_UCODE(0x19, 0x78, 0x0, 0x0a708008),
400 	ZEN_MODEL_STEP_UCODE(0x19, 0x7c, 0x0, 0x0a70c008),
401 	ZEN_MODEL_STEP_UCODE(0x19, 0xa0, 0x2, 0x0aa00216),
402 	{},
403 };
404 
405 static void tsa_init(struct cpuinfo_x86 *c)
406 {
407 	if (cpu_has(c, X86_FEATURE_HYPERVISOR))
408 		return;
409 
410 	if (cpu_has(c, X86_FEATURE_ZEN3) ||
411 	    cpu_has(c, X86_FEATURE_ZEN4)) {
412 		if (x86_match_min_microcode_rev(amd_tsa_microcode))
413 			setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR);
414 		else
415 			pr_debug("%s: current revision: 0x%x\n", __func__, c->microcode);
416 	} else {
417 		setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO);
418 		setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO);
419 	}
420 }
421 
422 static void bsp_init_amd(struct cpuinfo_x86 *c)
423 {
424 	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
425 
426 		if (c->x86 > 0x10 ||
427 		    (c->x86 == 0x10 && c->x86_model >= 0x2)) {
428 			u64 val;
429 
430 			rdmsrq(MSR_K7_HWCR, val);
431 			if (!(val & BIT(24)))
432 				pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
433 		}
434 	}
435 
436 	if (c->x86 == 0x15) {
437 		unsigned long upperbit;
438 		u32 cpuid, assoc;
439 
440 		cpuid	 = cpuid_edx(0x80000005);
441 		assoc	 = cpuid >> 16 & 0xff;
442 		upperbit = ((cpuid >> 24) << 10) / assoc;
443 
444 		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
445 		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
446 
447 		/* A random value per boot for bit slice [12:upper_bit) */
448 		va_align.bits = get_random_u32() & va_align.mask;
449 	}
450 
451 	if (cpu_has(c, X86_FEATURE_MWAITX))
452 		use_mwaitx_delay();
453 
454 	if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
455 	    !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
456 	    c->x86 >= 0x15 && c->x86 <= 0x17) {
457 		unsigned int bit;
458 
459 		switch (c->x86) {
460 		case 0x15: bit = 54; break;
461 		case 0x16: bit = 33; break;
462 		case 0x17: bit = 10; break;
463 		default: return;
464 		}
465 		/*
466 		 * Try to cache the base value so further operations can
467 		 * avoid RMW. If that faults, do not enable SSBD.
468 		 */
469 		if (!rdmsrq_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
470 			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
471 			setup_force_cpu_cap(X86_FEATURE_SSBD);
472 			x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
473 		}
474 	}
475 
476 	resctrl_cpu_detect(c);
477 
478 	/* Figure out Zen generations: */
479 	switch (c->x86) {
480 	case 0x17:
481 		switch (c->x86_model) {
482 		case 0x00 ... 0x2f:
483 		case 0x50 ... 0x5f:
484 			setup_force_cpu_cap(X86_FEATURE_ZEN1);
485 			break;
486 		case 0x30 ... 0x4f:
487 		case 0x60 ... 0x7f:
488 		case 0x90 ... 0x91:
489 		case 0xa0 ... 0xaf:
490 			setup_force_cpu_cap(X86_FEATURE_ZEN2);
491 			break;
492 		default:
493 			goto warn;
494 		}
495 		break;
496 
497 	case 0x19:
498 		switch (c->x86_model) {
499 		case 0x00 ... 0x0f:
500 		case 0x20 ... 0x5f:
501 			setup_force_cpu_cap(X86_FEATURE_ZEN3);
502 			break;
503 		case 0x10 ... 0x1f:
504 		case 0x60 ... 0xaf:
505 			setup_force_cpu_cap(X86_FEATURE_ZEN4);
506 			break;
507 		default:
508 			goto warn;
509 		}
510 		break;
511 
512 	case 0x1a:
513 		switch (c->x86_model) {
514 		case 0x00 ... 0x2f:
515 		case 0x40 ... 0x4f:
516 		case 0x60 ... 0x7f:
517 			setup_force_cpu_cap(X86_FEATURE_ZEN5);
518 			break;
519 		case 0x50 ... 0x5f:
520 		case 0x80 ... 0xaf:
521 		case 0xc0 ... 0xcf:
522 			setup_force_cpu_cap(X86_FEATURE_ZEN6);
523 			break;
524 		default:
525 			goto warn;
526 		}
527 		break;
528 
529 	default:
530 		break;
531 	}
532 
533 	bsp_determine_snp(c);
534 	tsa_init(c);
535 
536 	if (cpu_has(c, X86_FEATURE_GP_ON_USER_CPUID))
537 		setup_force_cpu_cap(X86_FEATURE_CPUID_FAULT);
538 
539 	return;
540 
541 warn:
542 	WARN_ONCE(1, "Family 0x%x, model: 0x%x??\n", c->x86, c->x86_model);
543 }
544 
545 static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
546 {
547 	u64 msr;
548 
549 	/*
550 	 * Mark using WBINVD is needed during kexec on processors that
551 	 * support SME. This provides support for performing a successful
552 	 * kexec when going from SME inactive to SME active (or vice-versa).
553 	 *
554 	 * The cache must be cleared so that if there are entries with the
555 	 * same physical address, both with and without the encryption bit,
556 	 * they don't race each other when flushed and potentially end up
557 	 * with the wrong entry being committed to memory.
558 	 *
559 	 * Test the CPUID bit directly because with mem_encrypt=off the
560 	 * BSP will clear the X86_FEATURE_SME bit and the APs will not
561 	 * see it set after that.
562 	 */
563 	if (c->extended_cpuid_level >= 0x8000001f && (cpuid_eax(0x8000001f) & BIT(0)))
564 		__this_cpu_write(cache_state_incoherent, true);
565 
566 	/*
567 	 * BIOS support is required for SME and SEV.
568 	 *   For SME: If BIOS has enabled SME then adjust x86_phys_bits by
569 	 *	      the SME physical address space reduction value.
570 	 *	      If BIOS has not enabled SME then don't advertise the
571 	 *	      SME feature (set in scattered.c).
572 	 *	      If the kernel has not enabled SME via any means then
573 	 *	      don't advertise the SME feature.
574 	 *   For SEV: If BIOS has not enabled SEV then don't advertise SEV and
575 	 *	      any additional functionality based on it.
576 	 *
577 	 *   In all cases, since support for SME and SEV requires long mode,
578 	 *   don't advertise the feature under CONFIG_X86_32.
579 	 */
580 	if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
581 		/* Check if memory encryption is enabled */
582 		rdmsrq(MSR_AMD64_SYSCFG, msr);
583 		if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
584 			goto clear_all;
585 
586 		/*
587 		 * Always adjust physical address bits. Even though this
588 		 * will be a value above 32-bits this is still done for
589 		 * CONFIG_X86_32 so that accurate values are reported.
590 		 */
591 		c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
592 
593 		if (IS_ENABLED(CONFIG_X86_32))
594 			goto clear_all;
595 
596 		if (!sme_me_mask)
597 			setup_clear_cpu_cap(X86_FEATURE_SME);
598 
599 		rdmsrq(MSR_K7_HWCR, msr);
600 		if (!(msr & MSR_K7_HWCR_SMMLOCK))
601 			goto clear_sev;
602 
603 		return;
604 
605 clear_all:
606 		setup_clear_cpu_cap(X86_FEATURE_SME);
607 clear_sev:
608 		setup_clear_cpu_cap(X86_FEATURE_SEV);
609 		setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
610 		setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
611 	}
612 }
613 
614 static void early_init_amd(struct cpuinfo_x86 *c)
615 {
616 	u32 dummy;
617 
618 	if (c->x86 >= 0xf)
619 		set_cpu_cap(c, X86_FEATURE_K8);
620 
621 	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
622 
623 	/*
624 	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
625 	 * with P/T states and does not stop in deep C-states
626 	 */
627 	if (c->x86_power & (1 << 8)) {
628 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
629 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
630 	}
631 
632 	/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
633 	if (c->x86_power & BIT(12))
634 		set_cpu_cap(c, X86_FEATURE_ACC_POWER);
635 
636 	/* Bit 14 indicates the Runtime Average Power Limit interface. */
637 	if (c->x86_power & BIT(14))
638 		set_cpu_cap(c, X86_FEATURE_RAPL);
639 
640 #ifdef CONFIG_X86_64
641 	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
642 #else
643 	/*  Set MTRR capability flag if appropriate */
644 	if (c->x86 == 5)
645 		if (c->x86_model == 13 || c->x86_model == 9 ||
646 		    (c->x86_model == 8 && c->x86_stepping >= 8))
647 			set_cpu_cap(c, X86_FEATURE_K6_MTRR);
648 #endif
649 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
650 	/*
651 	 * ApicID can always be treated as an 8-bit value for AMD APIC versions
652 	 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
653 	 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
654 	 * after 16h.
655 	 */
656 	if (boot_cpu_has(X86_FEATURE_APIC)) {
657 		if (c->x86 > 0x16)
658 			set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
659 		else if (c->x86 >= 0xf) {
660 			/* check CPU config space for extended APIC ID */
661 			unsigned int val;
662 
663 			val = read_pci_config(0, 24, 0, 0x68);
664 			if ((val >> 17 & 0x3) == 0x3)
665 				set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
666 		}
667 	}
668 #endif
669 
670 	/*
671 	 * This is only needed to tell the kernel whether to use VMCALL
672 	 * and VMMCALL.  VMMCALL is never executed except under virt, so
673 	 * we can set it unconditionally.
674 	 */
675 	set_cpu_cap(c, X86_FEATURE_VMMCALL);
676 
677 	/* F16h erratum 793, CVE-2013-6885 */
678 	if (c->x86 == 0x16 && c->x86_model <= 0xf)
679 		msr_set_bit(MSR_AMD64_LS_CFG, 15);
680 
681 	early_detect_mem_encrypt(c);
682 
683 	if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
684 		if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
685 			setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
686 		else if (c->x86 >= 0x19 && !wrmsrq_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
687 			setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
688 			setup_force_cpu_cap(X86_FEATURE_SBPB);
689 		}
690 	}
691 }
692 
693 static void init_amd_k8(struct cpuinfo_x86 *c)
694 {
695 	u32 level;
696 	u64 value;
697 
698 	/* On C+ stepping K8 rep microcode works well for copy/memset */
699 	level = cpuid_eax(1);
700 	if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
701 		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
702 
703 	/*
704 	 * Some BIOSes incorrectly force this feature, but only K8 revision D
705 	 * (model = 0x14) and later actually support it.
706 	 * (AMD Erratum #110, docId: 25759).
707 	 */
708 	if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM) && !cpu_has(c, X86_FEATURE_HYPERVISOR)) {
709 		clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
710 		if (!rdmsrq_amd_safe(0xc001100d, &value)) {
711 			value &= ~BIT_64(32);
712 			wrmsrq_amd_safe(0xc001100d, value);
713 		}
714 	}
715 
716 	if (!c->x86_model_id[0])
717 		strscpy(c->x86_model_id, "Hammer");
718 
719 #ifdef CONFIG_SMP
720 	/*
721 	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
722 	 * bit 6 of msr C001_0015
723 	 *
724 	 * Errata 63 for SH-B3 steppings
725 	 * Errata 122 for all steppings (F+ have it disabled by default)
726 	 */
727 	msr_set_bit(MSR_K7_HWCR, 6);
728 #endif
729 	set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
730 
731 	/*
732 	 * Check models and steppings affected by erratum 400. This is
733 	 * used to select the proper idle routine and to enable the
734 	 * check whether the machine is affected in arch_post_acpi_subsys_init()
735 	 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
736 	 */
737 	if (c->x86_model > 0x41 ||
738 	    (c->x86_model == 0x41 && c->x86_stepping >= 0x2))
739 		setup_force_cpu_bug(X86_BUG_AMD_E400);
740 }
741 
742 static void init_amd_gh(struct cpuinfo_x86 *c)
743 {
744 #ifdef CONFIG_MMCONF_FAM10H
745 	/* do this for boot cpu */
746 	if (c == &boot_cpu_data)
747 		check_enable_amd_mmconf_dmi();
748 
749 	fam10h_check_enable_mmcfg();
750 #endif
751 
752 	/*
753 	 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
754 	 * is always needed when GART is enabled, even in a kernel which has no
755 	 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
756 	 * If it doesn't, we do it here as suggested by the BKDG.
757 	 *
758 	 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
759 	 */
760 	msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
761 
762 	/*
763 	 * On family 10h BIOS may not have properly enabled WC+ support, causing
764 	 * it to be converted to CD memtype. This may result in performance
765 	 * degradation for certain nested-paging guests. Prevent this conversion
766 	 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
767 	 *
768 	 * NOTE: we want to use the _safe accessors so as not to #GP kvm
769 	 * guests on older kvm hosts.
770 	 */
771 	msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
772 
773 	set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
774 
775 	/*
776 	 * Check models and steppings affected by erratum 400. This is
777 	 * used to select the proper idle routine and to enable the
778 	 * check whether the machine is affected in arch_post_acpi_subsys_init()
779 	 * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
780 	 */
781 	if (c->x86_model > 0x2 ||
782 	    (c->x86_model == 0x2 && c->x86_stepping >= 0x1))
783 		setup_force_cpu_bug(X86_BUG_AMD_E400);
784 }
785 
786 static void init_amd_ln(struct cpuinfo_x86 *c)
787 {
788 	/*
789 	 * Apply erratum 665 fix unconditionally so machines without a BIOS
790 	 * fix work.
791 	 */
792 	msr_set_bit(MSR_AMD64_DE_CFG, 31);
793 }
794 
795 static bool rdrand_force;
796 
797 static int __init rdrand_cmdline(char *str)
798 {
799 	if (!str)
800 		return -EINVAL;
801 
802 	if (!strcmp(str, "force"))
803 		rdrand_force = true;
804 	else
805 		return -EINVAL;
806 
807 	return 0;
808 }
809 early_param("rdrand", rdrand_cmdline);
810 
811 static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
812 {
813 	/*
814 	 * Saving of the MSR used to hide the RDRAND support during
815 	 * suspend/resume is done by arch/x86/power/cpu.c, which is
816 	 * dependent on CONFIG_PM_SLEEP.
817 	 */
818 	if (!IS_ENABLED(CONFIG_PM_SLEEP))
819 		return;
820 
821 	/*
822 	 * The self-test can clear X86_FEATURE_RDRAND, so check for
823 	 * RDRAND support using the CPUID function directly.
824 	 */
825 	if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
826 		return;
827 
828 	msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
829 
830 	/*
831 	 * Verify that the CPUID change has occurred in case the kernel is
832 	 * running virtualized and the hypervisor doesn't support the MSR.
833 	 */
834 	if (cpuid_ecx(1) & BIT(30)) {
835 		pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
836 		return;
837 	}
838 
839 	clear_cpu_cap(c, X86_FEATURE_RDRAND);
840 	pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
841 }
842 
843 static void init_amd_jg(struct cpuinfo_x86 *c)
844 {
845 	/*
846 	 * Some BIOS implementations do not restore proper RDRAND support
847 	 * across suspend and resume. Check on whether to hide the RDRAND
848 	 * instruction support via CPUID.
849 	 */
850 	clear_rdrand_cpuid_bit(c);
851 }
852 
853 static void init_amd_bd(struct cpuinfo_x86 *c)
854 {
855 	u64 value;
856 
857 	/*
858 	 * The way access filter has a performance penalty on some workloads.
859 	 * Disable it on the affected CPUs.
860 	 */
861 	if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
862 		if (!rdmsrq_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
863 			value |= 0x1E;
864 			wrmsrq_safe(MSR_F15H_IC_CFG, value);
865 		}
866 	}
867 
868 	/*
869 	 * Some BIOS implementations do not restore proper RDRAND support
870 	 * across suspend and resume. Check on whether to hide the RDRAND
871 	 * instruction support via CPUID.
872 	 */
873 	clear_rdrand_cpuid_bit(c);
874 }
875 
876 static const struct x86_cpu_id erratum_1386_microcode[] = {
877 	ZEN_MODEL_STEP_UCODE(0x17, 0x01, 0x2, 0x0800126e),
878 	ZEN_MODEL_STEP_UCODE(0x17, 0x31, 0x0, 0x08301052),
879 	{}
880 };
881 
882 static void fix_erratum_1386(struct cpuinfo_x86 *c)
883 {
884 	/*
885 	 * Work around Erratum 1386.  The XSAVES instruction malfunctions in
886 	 * certain circumstances on Zen1/2 uarch, and not all parts have had
887 	 * updated microcode at the time of writing (March 2023).
888 	 *
889 	 * Affected parts all have no supervisor XSAVE states, meaning that
890 	 * the XSAVEC instruction (which works fine) is equivalent.
891 	 *
892 	 * Clear the feature flag only on microcode revisions which
893 	 * don't have the fix.
894 	 */
895 	if (x86_match_min_microcode_rev(erratum_1386_microcode))
896 		return;
897 
898 	clear_cpu_cap(c, X86_FEATURE_XSAVES);
899 }
900 
901 void init_spectral_chicken(struct cpuinfo_x86 *c)
902 {
903 #ifdef CONFIG_MITIGATION_UNRET_ENTRY
904 	/*
905 	 * On Zen2 we offer this chicken (bit) on the altar of Speculation.
906 	 *
907 	 * This suppresses speculation from the middle of a basic block, i.e. it
908 	 * suppresses non-branch predictions.
909 	 */
910 	if (!cpu_has(c, X86_FEATURE_HYPERVISOR))
911 		msr_set_bit(MSR_ZEN2_SPECTRAL_CHICKEN, MSR_ZEN2_SPECTRAL_CHICKEN_BIT);
912 #endif
913 }
914 
915 static void init_amd_zen_common(void)
916 {
917 	setup_force_cpu_cap(X86_FEATURE_ZEN);
918 #ifdef CONFIG_NUMA
919 	node_reclaim_distance = 32;
920 #endif
921 }
922 
923 static void init_amd_zen1(struct cpuinfo_x86 *c)
924 {
925 	fix_erratum_1386(c);
926 
927 	/* Fix up CPUID bits, but only if not virtualised. */
928 	if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
929 
930 		/* Erratum 1076: CPB feature bit not being set in CPUID. */
931 		if (!cpu_has(c, X86_FEATURE_CPB))
932 			set_cpu_cap(c, X86_FEATURE_CPB);
933 	}
934 
935 	pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
936 	setup_force_cpu_bug(X86_BUG_DIV0);
937 
938 	/*
939 	 * Turn off the Instructions Retired free counter on machines that are
940 	 * susceptible to erratum #1054 "Instructions Retired Performance
941 	 * Counter May Be Inaccurate".
942 	 */
943 	if (c->x86_model < 0x30) {
944 		msr_clear_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
945 		clear_cpu_cap(c, X86_FEATURE_IRPERF);
946 	}
947 }
948 
949 static const struct x86_cpu_id amd_zenbleed_microcode[] = {
950 	ZEN_MODEL_STEP_UCODE(0x17, 0x31, 0x0, 0x0830107b),
951 	ZEN_MODEL_STEP_UCODE(0x17, 0x60, 0x1, 0x0860010c),
952 	ZEN_MODEL_STEP_UCODE(0x17, 0x68, 0x1, 0x08608107),
953 	ZEN_MODEL_STEP_UCODE(0x17, 0x71, 0x0, 0x08701033),
954 	ZEN_MODEL_STEP_UCODE(0x17, 0xa0, 0x0, 0x08a00009),
955 	{}
956 };
957 
958 static void zen2_zenbleed_check(struct cpuinfo_x86 *c)
959 {
960 	if (cpu_has(c, X86_FEATURE_HYPERVISOR))
961 		return;
962 
963 	if (!cpu_has(c, X86_FEATURE_AVX))
964 		return;
965 
966 	if (!x86_match_min_microcode_rev(amd_zenbleed_microcode)) {
967 		pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n");
968 		msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
969 	} else {
970 		msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
971 	}
972 }
973 
974 static void init_amd_zen2(struct cpuinfo_x86 *c)
975 {
976 	init_spectral_chicken(c);
977 	fix_erratum_1386(c);
978 	zen2_zenbleed_check(c);
979 
980 	/* Disable RDSEED on AMD Cyan Skillfish because of an error. */
981 	if (c->x86_model == 0x47 && c->x86_stepping == 0x0) {
982 		clear_cpu_cap(c, X86_FEATURE_RDSEED);
983 		msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18);
984 		pr_emerg("RDSEED is not reliable on this platform; disabling.\n");
985 	}
986 
987 	/* Correct misconfigured CPUID on some clients. */
988 	clear_cpu_cap(c, X86_FEATURE_INVLPGB);
989 }
990 
991 static void init_amd_zen3(struct cpuinfo_x86 *c)
992 {
993 	if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
994 		/*
995 		 * Zen3 (Fam19 model < 0x10) parts are not susceptible to
996 		 * Branch Type Confusion, but predate the allocation of the
997 		 * BTC_NO bit.
998 		 */
999 		if (!cpu_has(c, X86_FEATURE_BTC_NO))
1000 			set_cpu_cap(c, X86_FEATURE_BTC_NO);
1001 	}
1002 }
1003 
1004 static void init_amd_zen4(struct cpuinfo_x86 *c)
1005 {
1006 	if (!cpu_has(c, X86_FEATURE_HYPERVISOR))
1007 		msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
1008 
1009 	/*
1010 	 * These Zen4 SoCs advertise support for virtualized VMLOAD/VMSAVE
1011 	 * in some BIOS versions but they can lead to random host reboots.
1012 	 */
1013 	switch (c->x86_model) {
1014 	case 0x18 ... 0x1f:
1015 	case 0x60 ... 0x7f:
1016 		clear_cpu_cap(c, X86_FEATURE_V_VMSAVE_VMLOAD);
1017 		break;
1018 	}
1019 }
1020 
1021 static const struct x86_cpu_id zen5_rdseed_microcode[] = {
1022 	ZEN_MODEL_STEP_UCODE(0x1a, 0x02, 0x1, 0x0b00215a),
1023 	ZEN_MODEL_STEP_UCODE(0x1a, 0x08, 0x1, 0x0b008121),
1024 	ZEN_MODEL_STEP_UCODE(0x1a, 0x11, 0x0, 0x0b101054),
1025 	ZEN_MODEL_STEP_UCODE(0x1a, 0x24, 0x0, 0x0b204037),
1026 	ZEN_MODEL_STEP_UCODE(0x1a, 0x44, 0x0, 0x0b404035),
1027 	ZEN_MODEL_STEP_UCODE(0x1a, 0x44, 0x1, 0x0b404108),
1028 	ZEN_MODEL_STEP_UCODE(0x1a, 0x60, 0x0, 0x0b600037),
1029 	ZEN_MODEL_STEP_UCODE(0x1a, 0x68, 0x0, 0x0b608038),
1030 	ZEN_MODEL_STEP_UCODE(0x1a, 0x70, 0x0, 0x0b700037),
1031 	{},
1032 };
1033 
1034 static void init_amd_zen5(struct cpuinfo_x86 *c)
1035 {
1036 	if (!x86_match_min_microcode_rev(zen5_rdseed_microcode)) {
1037 		clear_cpu_cap(c, X86_FEATURE_RDSEED);
1038 		msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18);
1039 		pr_emerg_once("RDSEED32 is broken. Disabling the corresponding CPUID bit.\n");
1040 	}
1041 }
1042 
1043 static void init_amd(struct cpuinfo_x86 *c)
1044 {
1045 	u64 vm_cr;
1046 
1047 	early_init_amd(c);
1048 
1049 	if (c->x86 >= 0x10)
1050 		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
1051 
1052 	/* AMD FSRM also implies FSRS */
1053 	if (cpu_has(c, X86_FEATURE_FSRM))
1054 		set_cpu_cap(c, X86_FEATURE_FSRS);
1055 
1056 	/* K6s reports MCEs but don't actually have all the MSRs */
1057 	if (c->x86 < 6)
1058 		clear_cpu_cap(c, X86_FEATURE_MCE);
1059 
1060 	switch (c->x86) {
1061 	case 4:    init_amd_k5(c); break;
1062 	case 5:    init_amd_k6(c); break;
1063 	case 6:	   init_amd_k7(c); break;
1064 	case 0xf:  init_amd_k8(c); break;
1065 	case 0x10: init_amd_gh(c); break;
1066 	case 0x12: init_amd_ln(c); break;
1067 	case 0x15: init_amd_bd(c); break;
1068 	case 0x16: init_amd_jg(c); break;
1069 	}
1070 
1071 	/*
1072 	 * Save up on some future enablement work and do common Zen
1073 	 * settings.
1074 	 */
1075 	if (c->x86 >= 0x17)
1076 		init_amd_zen_common();
1077 
1078 	if (boot_cpu_has(X86_FEATURE_ZEN1))
1079 		init_amd_zen1(c);
1080 	else if (boot_cpu_has(X86_FEATURE_ZEN2))
1081 		init_amd_zen2(c);
1082 	else if (boot_cpu_has(X86_FEATURE_ZEN3))
1083 		init_amd_zen3(c);
1084 	else if (boot_cpu_has(X86_FEATURE_ZEN4))
1085 		init_amd_zen4(c);
1086 	else if (boot_cpu_has(X86_FEATURE_ZEN5))
1087 		init_amd_zen5(c);
1088 
1089 	/*
1090 	 * Enable workaround for FXSAVE leak on CPUs
1091 	 * without a XSaveErPtr feature
1092 	 */
1093 	if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
1094 		set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
1095 
1096 	cpu_detect_cache_sizes(c);
1097 
1098 	srat_detect_node(c);
1099 
1100 	init_amd_cacheinfo(c);
1101 
1102 	if (cpu_has(c, X86_FEATURE_SVM)) {
1103 		rdmsrq(MSR_VM_CR, vm_cr);
1104 		if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) {
1105 			pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n");
1106 			clear_cpu_cap(c, X86_FEATURE_SVM);
1107 		}
1108 	}
1109 
1110 	if (!cpu_has(c, X86_FEATURE_LFENCE_RDTSC) && cpu_has(c, X86_FEATURE_XMM2)) {
1111 		/*
1112 		 * Use LFENCE for execution serialization.  On families which
1113 		 * don't have that MSR, LFENCE is already serializing.
1114 		 * msr_set_bit() uses the safe accessors, too, even if the MSR
1115 		 * is not present.
1116 		 */
1117 		msr_set_bit(MSR_AMD64_DE_CFG,
1118 			    MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
1119 
1120 		/* A serializing LFENCE stops RDTSC speculation */
1121 		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
1122 	}
1123 
1124 	/*
1125 	 * Family 0x12 and above processors have APIC timer
1126 	 * running in deep C states.
1127 	 */
1128 	if (c->x86 > 0x11)
1129 		set_cpu_cap(c, X86_FEATURE_ARAT);
1130 
1131 	/* 3DNow or LM implies PREFETCHW */
1132 	if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
1133 		if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
1134 			set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
1135 
1136 	/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
1137 	if (!cpu_feature_enabled(X86_FEATURE_XENPV))
1138 		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
1139 
1140 	/* Enable the Instructions Retired free counter */
1141 	if (cpu_has(c, X86_FEATURE_IRPERF))
1142 		msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
1143 
1144 	check_null_seg_clears_base(c);
1145 
1146 	/*
1147 	 * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up
1148 	 * using the trampoline code and as part of it, MSR_EFER gets prepared there in
1149 	 * order to be replicated onto them. Regardless, set it here again, if not set,
1150 	 * to protect against any future refactoring/code reorganization which might
1151 	 * miss setting this important bit.
1152 	 */
1153 	if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1154 	    cpu_has(c, X86_FEATURE_AUTOIBRS))
1155 		WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS) < 0);
1156 
1157 	/* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
1158 	clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
1159 
1160 	/* Enable Translation Cache Extension */
1161 	if (cpu_has(c, X86_FEATURE_TCE))
1162 		msr_set_bit(MSR_EFER, _EFER_TCE);
1163 }
1164 
1165 #ifdef CONFIG_X86_32
1166 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
1167 {
1168 	/* AMD errata T13 (order #21922) */
1169 	if (c->x86 == 6) {
1170 		/* Duron Rev A0 */
1171 		if (c->x86_model == 3 && c->x86_stepping == 0)
1172 			size = 64;
1173 		/* Tbird rev A1/A2 */
1174 		if (c->x86_model == 4 &&
1175 			(c->x86_stepping == 0 || c->x86_stepping == 1))
1176 			size = 256;
1177 	}
1178 	return size;
1179 }
1180 #endif
1181 
1182 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1183 {
1184 	u32 ebx, eax, ecx, edx;
1185 	u16 mask = 0xfff;
1186 
1187 	if (c->x86 < 0xf)
1188 		return;
1189 
1190 	if (c->extended_cpuid_level < 0x80000006)
1191 		return;
1192 
1193 	cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1194 
1195 	tlb_lld_4k = (ebx >> 16) & mask;
1196 	tlb_lli_4k = ebx & mask;
1197 
1198 	/*
1199 	 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1200 	 * characteristics from the CPUID function 0x80000005 instead.
1201 	 */
1202 	if (c->x86 == 0xf) {
1203 		cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1204 		mask = 0xff;
1205 	}
1206 
1207 	/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1208 	if (!((eax >> 16) & mask))
1209 		tlb_lld_2m = (cpuid_eax(0x80000005) >> 16) & 0xff;
1210 	else
1211 		tlb_lld_2m = (eax >> 16) & mask;
1212 
1213 	/* a 4M entry uses two 2M entries */
1214 	tlb_lld_4m = tlb_lld_2m >> 1;
1215 
1216 	/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1217 	if (!(eax & mask)) {
1218 		/* Erratum 658 */
1219 		if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1220 			tlb_lli_2m = 1024;
1221 		} else {
1222 			cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1223 			tlb_lli_2m = eax & 0xff;
1224 		}
1225 	} else
1226 		tlb_lli_2m = eax & mask;
1227 
1228 	tlb_lli_4m = tlb_lli_2m >> 1;
1229 
1230 	/* Max number of pages INVLPGB can invalidate in one shot */
1231 	if (cpu_has(c, X86_FEATURE_INVLPGB))
1232 		invlpgb_count_max = (cpuid_edx(0x80000008) & 0xffff) + 1;
1233 }
1234 
1235 static const struct cpu_dev amd_cpu_dev = {
1236 	.c_vendor	= "AMD",
1237 	.c_ident	= { "AuthenticAMD" },
1238 #ifdef CONFIG_X86_32
1239 	.legacy_models = {
1240 		{ .family = 4, .model_names =
1241 		  {
1242 			  [3] = "486 DX/2",
1243 			  [7] = "486 DX/2-WB",
1244 			  [8] = "486 DX/4",
1245 			  [9] = "486 DX/4-WB",
1246 			  [14] = "Am5x86-WT",
1247 			  [15] = "Am5x86-WB"
1248 		  }
1249 		},
1250 	},
1251 	.legacy_cache_size = amd_size_cache,
1252 #endif
1253 	.c_early_init   = early_init_amd,
1254 	.c_detect_tlb	= cpu_detect_tlb_amd,
1255 	.c_bsp_init	= bsp_init_amd,
1256 	.c_init		= init_amd,
1257 	.c_x86_vendor	= X86_VENDOR_AMD,
1258 };
1259 
1260 cpu_dev_register(amd_cpu_dev);
1261 
1262 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask);
1263 
1264 static unsigned int amd_msr_dr_addr_masks[] = {
1265 	MSR_F16H_DR0_ADDR_MASK,
1266 	MSR_F16H_DR1_ADDR_MASK,
1267 	MSR_F16H_DR1_ADDR_MASK + 1,
1268 	MSR_F16H_DR1_ADDR_MASK + 2
1269 };
1270 
1271 void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr)
1272 {
1273 	int cpu = smp_processor_id();
1274 
1275 	if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
1276 		return;
1277 
1278 	if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
1279 		return;
1280 
1281 	if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask)
1282 		return;
1283 
1284 	wrmsrq(amd_msr_dr_addr_masks[dr], mask);
1285 	per_cpu(amd_dr_addr_mask, cpu)[dr] = mask;
1286 }
1287 
1288 unsigned long amd_get_dr_addr_mask(unsigned int dr)
1289 {
1290 	if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
1291 		return 0;
1292 
1293 	if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
1294 		return 0;
1295 
1296 	return per_cpu(amd_dr_addr_mask[dr], smp_processor_id());
1297 }
1298 EXPORT_SYMBOL_FOR_KVM(amd_get_dr_addr_mask);
1299 
1300 static void zenbleed_check_cpu(void *unused)
1301 {
1302 	struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
1303 
1304 	zen2_zenbleed_check(c);
1305 }
1306 
1307 void amd_check_microcode(void)
1308 {
1309 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
1310 		return;
1311 
1312 	if (cpu_feature_enabled(X86_FEATURE_ZEN2))
1313 		on_each_cpu(zenbleed_check_cpu, NULL, 1);
1314 }
1315 
1316 static const char * const s5_reset_reason_txt[] = {
1317 	[0]  = "thermal pin BP_THERMTRIP_L was tripped",
1318 	[1]  = "power button was pressed for 4 seconds",
1319 	[2]  = "shutdown pin was tripped",
1320 	[4]  = "remote ASF power off command was received",
1321 	[9]  = "internal CPU thermal limit was tripped",
1322 	[16] = "system reset pin BP_SYS_RST_L was tripped",
1323 	[17] = "software issued PCI reset",
1324 	[18] = "software wrote 0x4 to reset control register 0xCF9",
1325 	[19] = "software wrote 0x6 to reset control register 0xCF9",
1326 	[20] = "software wrote 0xE to reset control register 0xCF9",
1327 	[21] = "ACPI power state transition occurred",
1328 	[22] = "keyboard reset pin KB_RST_L was tripped",
1329 	[23] = "internal CPU shutdown event occurred",
1330 	[24] = "system failed to boot before failed boot timer expired",
1331 	[25] = "hardware watchdog timer expired",
1332 	[26] = "remote ASF reset command was received",
1333 	[27] = "an uncorrected error caused a data fabric sync flood event",
1334 	[29] = "FCH and MP1 failed warm reset handshake",
1335 	[30] = "a parity error occurred",
1336 	[31] = "a software sync flood event occurred",
1337 };
1338 
1339 static __init int print_s5_reset_status_mmio(void)
1340 {
1341 	void __iomem *addr;
1342 	u32 value;
1343 	int i;
1344 
1345 	if (!cpu_feature_enabled(X86_FEATURE_ZEN))
1346 		return 0;
1347 
1348 	addr = ioremap(FCH_PM_BASE + FCH_PM_S5_RESET_STATUS, sizeof(value));
1349 	if (!addr)
1350 		return 0;
1351 
1352 	value = ioread32(addr);
1353 
1354 	/* Value with "all bits set" is an error response and should be ignored. */
1355 	if (value == U32_MAX) {
1356 		iounmap(addr);
1357 		return 0;
1358 	}
1359 
1360 	/*
1361 	 * Clear all reason bits so they won't be retained if the next reset
1362 	 * does not update the register. Besides, some bits are never cleared by
1363 	 * hardware so it's software's responsibility to clear them.
1364 	 *
1365 	 * Writing the value back effectively clears all reason bits as they are
1366 	 * write-1-to-clear.
1367 	 */
1368 	iowrite32(value, addr);
1369 	iounmap(addr);
1370 
1371 	for (i = 0; i < ARRAY_SIZE(s5_reset_reason_txt); i++) {
1372 		if (!(value & BIT(i)))
1373 			continue;
1374 
1375 		if (s5_reset_reason_txt[i]) {
1376 			pr_info("x86/amd: Previous system reset reason [0x%08x]: %s\n",
1377 				value, s5_reset_reason_txt[i]);
1378 		}
1379 	}
1380 
1381 	return 0;
1382 }
1383 late_initcall(print_s5_reset_status_mmio);
1384 
1385 static void __init dmi_scan_additional(const struct dmi_header *d, void *p)
1386 {
1387 	struct dmi_a_info *info = (struct dmi_a_info *)d;
1388 	void *next, *end;
1389 
1390 	if (!IS_ENABLED(CONFIG_DMI))
1391 		return;
1392 
1393 	if (info->header.type != DMI_ENTRY_ADDITIONAL ||
1394 	    info->header.length < DMI_A_INFO_MIN_SIZE ||
1395 	    info->count < 1)
1396 		return;
1397 
1398 	next = (void *)(info + 1);
1399 	end  = (void *)info + info->header.length;
1400 
1401 	do {
1402 		struct dmi_a_info_entry *entry;
1403 		const char *string_ptr;
1404 
1405 		entry = (struct dmi_a_info_entry *)next;
1406 
1407 		/*
1408 		 * Not much can be done to validate data. At least the entry
1409 		 * length shouldn't be 0.
1410 		 */
1411 		if (!entry->length)
1412 			return;
1413 
1414 		string_ptr = dmi_string_nosave(&info->header, entry->str_num);
1415 
1416 		/* Sample string: AGESA!V9 StrixKrackanPI-FP8 1.1.0.0c */
1417 		if (!strncmp(string_ptr, "AGESA", 5)) {
1418 			pr_info("AGESA: %s\n", string_ptr);
1419 			break;
1420 		}
1421 
1422 		next += entry->length;
1423 	} while (end - next >= DMI_A_INFO_ENT_MIN_SIZE);
1424 }
1425 
1426 static __init int print_dmi_agesa(void)
1427 {
1428 	dmi_walk(dmi_scan_additional, NULL);
1429 	return 0;
1430 }
1431 late_initcall(print_dmi_agesa);
1432