xref: /linux/arch/x86/kernel/cpu/amd.c (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/export.h>
3 #include <linux/bitops.h>
4 #include <linux/elf.h>
5 #include <linux/mm.h>
6 
7 #include <linux/io.h>
8 #include <linux/sched.h>
9 #include <linux/sched/clock.h>
10 #include <linux/random.h>
11 #include <linux/topology.h>
12 #include <asm/processor.h>
13 #include <asm/apic.h>
14 #include <asm/cacheinfo.h>
15 #include <asm/cpu.h>
16 #include <asm/spec-ctrl.h>
17 #include <asm/smp.h>
18 #include <asm/numa.h>
19 #include <asm/pci-direct.h>
20 #include <asm/delay.h>
21 #include <asm/debugreg.h>
22 #include <asm/resctrl.h>
23 
24 #ifdef CONFIG_X86_64
25 # include <asm/mmconfig.h>
26 #endif
27 
28 #include "cpu.h"
29 
30 static const int amd_erratum_383[];
31 static const int amd_erratum_400[];
32 static const int amd_erratum_1054[];
33 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
34 
35 /*
36  * nodes_per_socket: Stores the number of nodes per socket.
37  * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
38  * Node Identifiers[10:8]
39  */
40 static u32 nodes_per_socket = 1;
41 
42 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
43 {
44 	u32 gprs[8] = { 0 };
45 	int err;
46 
47 	WARN_ONCE((boot_cpu_data.x86 != 0xf),
48 		  "%s should only be used on K8!\n", __func__);
49 
50 	gprs[1] = msr;
51 	gprs[7] = 0x9c5a203a;
52 
53 	err = rdmsr_safe_regs(gprs);
54 
55 	*p = gprs[0] | ((u64)gprs[2] << 32);
56 
57 	return err;
58 }
59 
60 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
61 {
62 	u32 gprs[8] = { 0 };
63 
64 	WARN_ONCE((boot_cpu_data.x86 != 0xf),
65 		  "%s should only be used on K8!\n", __func__);
66 
67 	gprs[0] = (u32)val;
68 	gprs[1] = msr;
69 	gprs[2] = val >> 32;
70 	gprs[7] = 0x9c5a203a;
71 
72 	return wrmsr_safe_regs(gprs);
73 }
74 
75 /*
76  *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
77  *	misexecution of code under Linux. Owners of such processors should
78  *	contact AMD for precise details and a CPU swap.
79  *
80  *	See	http://www.multimania.com/poulot/k6bug.html
81  *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
82  *		(Publication # 21266  Issue Date: August 1998)
83  *
84  *	The following test is erm.. interesting. AMD neglected to up
85  *	the chip setting when fixing the bug but they also tweaked some
86  *	performance at the same time..
87  */
88 
89 #ifdef CONFIG_X86_32
90 extern __visible void vide(void);
91 __asm__(".text\n"
92 	".globl vide\n"
93 	".type vide, @function\n"
94 	".align 4\n"
95 	"vide: ret\n");
96 #endif
97 
98 static void init_amd_k5(struct cpuinfo_x86 *c)
99 {
100 #ifdef CONFIG_X86_32
101 /*
102  * General Systems BIOSen alias the cpu frequency registers
103  * of the Elan at 0x000df000. Unfortunately, one of the Linux
104  * drivers subsequently pokes it, and changes the CPU speed.
105  * Workaround : Remove the unneeded alias.
106  */
107 #define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
108 #define CBAR_ENB	(0x80000000)
109 #define CBAR_KEY	(0X000000CB)
110 	if (c->x86_model == 9 || c->x86_model == 10) {
111 		if (inl(CBAR) & CBAR_ENB)
112 			outl(0 | CBAR_KEY, CBAR);
113 	}
114 #endif
115 }
116 
117 static void init_amd_k6(struct cpuinfo_x86 *c)
118 {
119 #ifdef CONFIG_X86_32
120 	u32 l, h;
121 	int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
122 
123 	if (c->x86_model < 6) {
124 		/* Based on AMD doc 20734R - June 2000 */
125 		if (c->x86_model == 0) {
126 			clear_cpu_cap(c, X86_FEATURE_APIC);
127 			set_cpu_cap(c, X86_FEATURE_PGE);
128 		}
129 		return;
130 	}
131 
132 	if (c->x86_model == 6 && c->x86_stepping == 1) {
133 		const int K6_BUG_LOOP = 1000000;
134 		int n;
135 		void (*f_vide)(void);
136 		u64 d, d2;
137 
138 		pr_info("AMD K6 stepping B detected - ");
139 
140 		/*
141 		 * It looks like AMD fixed the 2.6.2 bug and improved indirect
142 		 * calls at the same time.
143 		 */
144 
145 		n = K6_BUG_LOOP;
146 		f_vide = vide;
147 		OPTIMIZER_HIDE_VAR(f_vide);
148 		d = rdtsc();
149 		while (n--)
150 			f_vide();
151 		d2 = rdtsc();
152 		d = d2-d;
153 
154 		if (d > 20*K6_BUG_LOOP)
155 			pr_cont("system stability may be impaired when more than 32 MB are used.\n");
156 		else
157 			pr_cont("probably OK (after B9730xxxx).\n");
158 	}
159 
160 	/* K6 with old style WHCR */
161 	if (c->x86_model < 8 ||
162 	   (c->x86_model == 8 && c->x86_stepping < 8)) {
163 		/* We can only write allocate on the low 508Mb */
164 		if (mbytes > 508)
165 			mbytes = 508;
166 
167 		rdmsr(MSR_K6_WHCR, l, h);
168 		if ((l&0x0000FFFF) == 0) {
169 			unsigned long flags;
170 			l = (1<<0)|((mbytes/4)<<1);
171 			local_irq_save(flags);
172 			wbinvd();
173 			wrmsr(MSR_K6_WHCR, l, h);
174 			local_irq_restore(flags);
175 			pr_info("Enabling old style K6 write allocation for %d Mb\n",
176 				mbytes);
177 		}
178 		return;
179 	}
180 
181 	if ((c->x86_model == 8 && c->x86_stepping > 7) ||
182 	     c->x86_model == 9 || c->x86_model == 13) {
183 		/* The more serious chips .. */
184 
185 		if (mbytes > 4092)
186 			mbytes = 4092;
187 
188 		rdmsr(MSR_K6_WHCR, l, h);
189 		if ((l&0xFFFF0000) == 0) {
190 			unsigned long flags;
191 			l = ((mbytes>>2)<<22)|(1<<16);
192 			local_irq_save(flags);
193 			wbinvd();
194 			wrmsr(MSR_K6_WHCR, l, h);
195 			local_irq_restore(flags);
196 			pr_info("Enabling new style K6 write allocation for %d Mb\n",
197 				mbytes);
198 		}
199 
200 		return;
201 	}
202 
203 	if (c->x86_model == 10) {
204 		/* AMD Geode LX is model 10 */
205 		/* placeholder for any needed mods */
206 		return;
207 	}
208 #endif
209 }
210 
211 static void init_amd_k7(struct cpuinfo_x86 *c)
212 {
213 #ifdef CONFIG_X86_32
214 	u32 l, h;
215 
216 	/*
217 	 * Bit 15 of Athlon specific MSR 15, needs to be 0
218 	 * to enable SSE on Palomino/Morgan/Barton CPU's.
219 	 * If the BIOS didn't enable it already, enable it here.
220 	 */
221 	if (c->x86_model >= 6 && c->x86_model <= 10) {
222 		if (!cpu_has(c, X86_FEATURE_XMM)) {
223 			pr_info("Enabling disabled K7/SSE Support.\n");
224 			msr_clear_bit(MSR_K7_HWCR, 15);
225 			set_cpu_cap(c, X86_FEATURE_XMM);
226 		}
227 	}
228 
229 	/*
230 	 * It's been determined by AMD that Athlons since model 8 stepping 1
231 	 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
232 	 * As per AMD technical note 27212 0.2
233 	 */
234 	if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
235 		rdmsr(MSR_K7_CLK_CTL, l, h);
236 		if ((l & 0xfff00000) != 0x20000000) {
237 			pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
238 				l, ((l & 0x000fffff)|0x20000000));
239 			wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
240 		}
241 	}
242 
243 	/* calling is from identify_secondary_cpu() ? */
244 	if (!c->cpu_index)
245 		return;
246 
247 	/*
248 	 * Certain Athlons might work (for various values of 'work') in SMP
249 	 * but they are not certified as MP capable.
250 	 */
251 	/* Athlon 660/661 is valid. */
252 	if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
253 	    (c->x86_stepping == 1)))
254 		return;
255 
256 	/* Duron 670 is valid */
257 	if ((c->x86_model == 7) && (c->x86_stepping == 0))
258 		return;
259 
260 	/*
261 	 * Athlon 662, Duron 671, and Athlon >model 7 have capability
262 	 * bit. It's worth noting that the A5 stepping (662) of some
263 	 * Athlon XP's have the MP bit set.
264 	 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
265 	 * more.
266 	 */
267 	if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
268 	    ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
269 	     (c->x86_model > 7))
270 		if (cpu_has(c, X86_FEATURE_MP))
271 			return;
272 
273 	/* If we get here, not a certified SMP capable AMD system. */
274 
275 	/*
276 	 * Don't taint if we are running SMP kernel on a single non-MP
277 	 * approved Athlon
278 	 */
279 	WARN_ONCE(1, "WARNING: This combination of AMD"
280 		" processors is not suitable for SMP.\n");
281 	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
282 #endif
283 }
284 
285 #ifdef CONFIG_NUMA
286 /*
287  * To workaround broken NUMA config.  Read the comment in
288  * srat_detect_node().
289  */
290 static int nearby_node(int apicid)
291 {
292 	int i, node;
293 
294 	for (i = apicid - 1; i >= 0; i--) {
295 		node = __apicid_to_node[i];
296 		if (node != NUMA_NO_NODE && node_online(node))
297 			return node;
298 	}
299 	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
300 		node = __apicid_to_node[i];
301 		if (node != NUMA_NO_NODE && node_online(node))
302 			return node;
303 	}
304 	return first_node(node_online_map); /* Shouldn't happen */
305 }
306 #endif
307 
308 /*
309  * Fix up cpu_core_id for pre-F17h systems to be in the
310  * [0 .. cores_per_node - 1] range. Not really needed but
311  * kept so as not to break existing setups.
312  */
313 static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
314 {
315 	u32 cus_per_node;
316 
317 	if (c->x86 >= 0x17)
318 		return;
319 
320 	cus_per_node = c->x86_max_cores / nodes_per_socket;
321 	c->cpu_core_id %= cus_per_node;
322 }
323 
324 /*
325  * Fixup core topology information for
326  * (1) AMD multi-node processors
327  *     Assumption: Number of cores in each internal node is the same.
328  * (2) AMD processors supporting compute units
329  */
330 static void amd_get_topology(struct cpuinfo_x86 *c)
331 {
332 	int cpu = smp_processor_id();
333 
334 	/* get information required for multi-node processors */
335 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
336 		int err;
337 		u32 eax, ebx, ecx, edx;
338 
339 		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
340 
341 		c->cpu_die_id  = ecx & 0xff;
342 
343 		if (c->x86 == 0x15)
344 			c->cu_id = ebx & 0xff;
345 
346 		if (c->x86 >= 0x17) {
347 			c->cpu_core_id = ebx & 0xff;
348 
349 			if (smp_num_siblings > 1)
350 				c->x86_max_cores /= smp_num_siblings;
351 		}
352 
353 		/*
354 		 * In case leaf B is available, use it to derive
355 		 * topology information.
356 		 */
357 		err = detect_extended_topology(c);
358 		if (!err)
359 			c->x86_coreid_bits = get_count_order(c->x86_max_cores);
360 
361 		cacheinfo_amd_init_llc_id(c, cpu);
362 
363 	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
364 		u64 value;
365 
366 		rdmsrl(MSR_FAM10H_NODE_ID, value);
367 		c->cpu_die_id = value & 7;
368 
369 		per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
370 	} else
371 		return;
372 
373 	if (nodes_per_socket > 1) {
374 		set_cpu_cap(c, X86_FEATURE_AMD_DCM);
375 		legacy_fixup_core_id(c);
376 	}
377 }
378 
379 /*
380  * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
381  * Assumes number of cores is a power of two.
382  */
383 static void amd_detect_cmp(struct cpuinfo_x86 *c)
384 {
385 	unsigned bits;
386 	int cpu = smp_processor_id();
387 
388 	bits = c->x86_coreid_bits;
389 	/* Low order bits define the core id (index of core in socket) */
390 	c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
391 	/* Convert the initial APIC ID into the socket ID */
392 	c->phys_proc_id = c->initial_apicid >> bits;
393 	/* use socket ID also for last level cache */
394 	per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
395 }
396 
397 u32 amd_get_nodes_per_socket(void)
398 {
399 	return nodes_per_socket;
400 }
401 EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
402 
403 static void srat_detect_node(struct cpuinfo_x86 *c)
404 {
405 #ifdef CONFIG_NUMA
406 	int cpu = smp_processor_id();
407 	int node;
408 	unsigned apicid = c->apicid;
409 
410 	node = numa_cpu_node(cpu);
411 	if (node == NUMA_NO_NODE)
412 		node = get_llc_id(cpu);
413 
414 	/*
415 	 * On multi-fabric platform (e.g. Numascale NumaChip) a
416 	 * platform-specific handler needs to be called to fixup some
417 	 * IDs of the CPU.
418 	 */
419 	if (x86_cpuinit.fixup_cpu_id)
420 		x86_cpuinit.fixup_cpu_id(c, node);
421 
422 	if (!node_online(node)) {
423 		/*
424 		 * Two possibilities here:
425 		 *
426 		 * - The CPU is missing memory and no node was created.  In
427 		 *   that case try picking one from a nearby CPU.
428 		 *
429 		 * - The APIC IDs differ from the HyperTransport node IDs
430 		 *   which the K8 northbridge parsing fills in.  Assume
431 		 *   they are all increased by a constant offset, but in
432 		 *   the same order as the HT nodeids.  If that doesn't
433 		 *   result in a usable node fall back to the path for the
434 		 *   previous case.
435 		 *
436 		 * This workaround operates directly on the mapping between
437 		 * APIC ID and NUMA node, assuming certain relationship
438 		 * between APIC ID, HT node ID and NUMA topology.  As going
439 		 * through CPU mapping may alter the outcome, directly
440 		 * access __apicid_to_node[].
441 		 */
442 		int ht_nodeid = c->initial_apicid;
443 
444 		if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
445 			node = __apicid_to_node[ht_nodeid];
446 		/* Pick a nearby node */
447 		if (!node_online(node))
448 			node = nearby_node(apicid);
449 	}
450 	numa_set_node(cpu, node);
451 #endif
452 }
453 
454 static void early_init_amd_mc(struct cpuinfo_x86 *c)
455 {
456 #ifdef CONFIG_SMP
457 	unsigned bits, ecx;
458 
459 	/* Multi core CPU? */
460 	if (c->extended_cpuid_level < 0x80000008)
461 		return;
462 
463 	ecx = cpuid_ecx(0x80000008);
464 
465 	c->x86_max_cores = (ecx & 0xff) + 1;
466 
467 	/* CPU telling us the core id bits shift? */
468 	bits = (ecx >> 12) & 0xF;
469 
470 	/* Otherwise recompute */
471 	if (bits == 0) {
472 		while ((1 << bits) < c->x86_max_cores)
473 			bits++;
474 	}
475 
476 	c->x86_coreid_bits = bits;
477 #endif
478 }
479 
480 static void bsp_init_amd(struct cpuinfo_x86 *c)
481 {
482 	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
483 
484 		if (c->x86 > 0x10 ||
485 		    (c->x86 == 0x10 && c->x86_model >= 0x2)) {
486 			u64 val;
487 
488 			rdmsrl(MSR_K7_HWCR, val);
489 			if (!(val & BIT(24)))
490 				pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
491 		}
492 	}
493 
494 	if (c->x86 == 0x15) {
495 		unsigned long upperbit;
496 		u32 cpuid, assoc;
497 
498 		cpuid	 = cpuid_edx(0x80000005);
499 		assoc	 = cpuid >> 16 & 0xff;
500 		upperbit = ((cpuid >> 24) << 10) / assoc;
501 
502 		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
503 		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
504 
505 		/* A random value per boot for bit slice [12:upper_bit) */
506 		va_align.bits = get_random_u32() & va_align.mask;
507 	}
508 
509 	if (cpu_has(c, X86_FEATURE_MWAITX))
510 		use_mwaitx_delay();
511 
512 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
513 		u32 ecx;
514 
515 		ecx = cpuid_ecx(0x8000001e);
516 		__max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
517 	} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
518 		u64 value;
519 
520 		rdmsrl(MSR_FAM10H_NODE_ID, value);
521 		__max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
522 	}
523 
524 	if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
525 	    !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
526 	    c->x86 >= 0x15 && c->x86 <= 0x17) {
527 		unsigned int bit;
528 
529 		switch (c->x86) {
530 		case 0x15: bit = 54; break;
531 		case 0x16: bit = 33; break;
532 		case 0x17: bit = 10; break;
533 		default: return;
534 		}
535 		/*
536 		 * Try to cache the base value so further operations can
537 		 * avoid RMW. If that faults, do not enable SSBD.
538 		 */
539 		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
540 			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
541 			setup_force_cpu_cap(X86_FEATURE_SSBD);
542 			x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
543 		}
544 	}
545 
546 	resctrl_cpu_detect(c);
547 }
548 
549 static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
550 {
551 	u64 msr;
552 
553 	/*
554 	 * BIOS support is required for SME and SEV.
555 	 *   For SME: If BIOS has enabled SME then adjust x86_phys_bits by
556 	 *	      the SME physical address space reduction value.
557 	 *	      If BIOS has not enabled SME then don't advertise the
558 	 *	      SME feature (set in scattered.c).
559 	 *	      If the kernel has not enabled SME via any means then
560 	 *	      don't advertise the SME feature.
561 	 *   For SEV: If BIOS has not enabled SEV then don't advertise the
562 	 *            SEV and SEV_ES feature (set in scattered.c).
563 	 *
564 	 *   In all cases, since support for SME and SEV requires long mode,
565 	 *   don't advertise the feature under CONFIG_X86_32.
566 	 */
567 	if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
568 		/* Check if memory encryption is enabled */
569 		rdmsrl(MSR_AMD64_SYSCFG, msr);
570 		if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
571 			goto clear_all;
572 
573 		/*
574 		 * Always adjust physical address bits. Even though this
575 		 * will be a value above 32-bits this is still done for
576 		 * CONFIG_X86_32 so that accurate values are reported.
577 		 */
578 		c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
579 
580 		if (IS_ENABLED(CONFIG_X86_32))
581 			goto clear_all;
582 
583 		if (!sme_me_mask)
584 			setup_clear_cpu_cap(X86_FEATURE_SME);
585 
586 		rdmsrl(MSR_K7_HWCR, msr);
587 		if (!(msr & MSR_K7_HWCR_SMMLOCK))
588 			goto clear_sev;
589 
590 		return;
591 
592 clear_all:
593 		setup_clear_cpu_cap(X86_FEATURE_SME);
594 clear_sev:
595 		setup_clear_cpu_cap(X86_FEATURE_SEV);
596 		setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
597 	}
598 }
599 
600 static void early_init_amd(struct cpuinfo_x86 *c)
601 {
602 	u64 value;
603 	u32 dummy;
604 
605 	early_init_amd_mc(c);
606 
607 	if (c->x86 >= 0xf)
608 		set_cpu_cap(c, X86_FEATURE_K8);
609 
610 	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
611 
612 	/*
613 	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
614 	 * with P/T states and does not stop in deep C-states
615 	 */
616 	if (c->x86_power & (1 << 8)) {
617 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
618 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
619 	}
620 
621 	/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
622 	if (c->x86_power & BIT(12))
623 		set_cpu_cap(c, X86_FEATURE_ACC_POWER);
624 
625 	/* Bit 14 indicates the Runtime Average Power Limit interface. */
626 	if (c->x86_power & BIT(14))
627 		set_cpu_cap(c, X86_FEATURE_RAPL);
628 
629 #ifdef CONFIG_X86_64
630 	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
631 #else
632 	/*  Set MTRR capability flag if appropriate */
633 	if (c->x86 == 5)
634 		if (c->x86_model == 13 || c->x86_model == 9 ||
635 		    (c->x86_model == 8 && c->x86_stepping >= 8))
636 			set_cpu_cap(c, X86_FEATURE_K6_MTRR);
637 #endif
638 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
639 	/*
640 	 * ApicID can always be treated as an 8-bit value for AMD APIC versions
641 	 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
642 	 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
643 	 * after 16h.
644 	 */
645 	if (boot_cpu_has(X86_FEATURE_APIC)) {
646 		if (c->x86 > 0x16)
647 			set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
648 		else if (c->x86 >= 0xf) {
649 			/* check CPU config space for extended APIC ID */
650 			unsigned int val;
651 
652 			val = read_pci_config(0, 24, 0, 0x68);
653 			if ((val >> 17 & 0x3) == 0x3)
654 				set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
655 		}
656 	}
657 #endif
658 
659 	/*
660 	 * This is only needed to tell the kernel whether to use VMCALL
661 	 * and VMMCALL.  VMMCALL is never executed except under virt, so
662 	 * we can set it unconditionally.
663 	 */
664 	set_cpu_cap(c, X86_FEATURE_VMMCALL);
665 
666 	/* F16h erratum 793, CVE-2013-6885 */
667 	if (c->x86 == 0x16 && c->x86_model <= 0xf)
668 		msr_set_bit(MSR_AMD64_LS_CFG, 15);
669 
670 	/*
671 	 * Check whether the machine is affected by erratum 400. This is
672 	 * used to select the proper idle routine and to enable the check
673 	 * whether the machine is affected in arch_post_acpi_init(), which
674 	 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
675 	 */
676 	if (cpu_has_amd_erratum(c, amd_erratum_400))
677 		set_cpu_bug(c, X86_BUG_AMD_E400);
678 
679 	early_detect_mem_encrypt(c);
680 
681 	/* Re-enable TopologyExtensions if switched off by BIOS */
682 	if (c->x86 == 0x15 &&
683 	    (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
684 	    !cpu_has(c, X86_FEATURE_TOPOEXT)) {
685 
686 		if (msr_set_bit(0xc0011005, 54) > 0) {
687 			rdmsrl(0xc0011005, value);
688 			if (value & BIT_64(54)) {
689 				set_cpu_cap(c, X86_FEATURE_TOPOEXT);
690 				pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
691 			}
692 		}
693 	}
694 
695 	if (cpu_has(c, X86_FEATURE_TOPOEXT))
696 		smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
697 }
698 
699 static void init_amd_k8(struct cpuinfo_x86 *c)
700 {
701 	u32 level;
702 	u64 value;
703 
704 	/* On C+ stepping K8 rep microcode works well for copy/memset */
705 	level = cpuid_eax(1);
706 	if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
707 		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
708 
709 	/*
710 	 * Some BIOSes incorrectly force this feature, but only K8 revision D
711 	 * (model = 0x14) and later actually support it.
712 	 * (AMD Erratum #110, docId: 25759).
713 	 */
714 	if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
715 		clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
716 		if (!rdmsrl_amd_safe(0xc001100d, &value)) {
717 			value &= ~BIT_64(32);
718 			wrmsrl_amd_safe(0xc001100d, value);
719 		}
720 	}
721 
722 	if (!c->x86_model_id[0])
723 		strcpy(c->x86_model_id, "Hammer");
724 
725 #ifdef CONFIG_SMP
726 	/*
727 	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
728 	 * bit 6 of msr C001_0015
729 	 *
730 	 * Errata 63 for SH-B3 steppings
731 	 * Errata 122 for all steppings (F+ have it disabled by default)
732 	 */
733 	msr_set_bit(MSR_K7_HWCR, 6);
734 #endif
735 	set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
736 }
737 
738 static void init_amd_gh(struct cpuinfo_x86 *c)
739 {
740 #ifdef CONFIG_MMCONF_FAM10H
741 	/* do this for boot cpu */
742 	if (c == &boot_cpu_data)
743 		check_enable_amd_mmconf_dmi();
744 
745 	fam10h_check_enable_mmcfg();
746 #endif
747 
748 	/*
749 	 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
750 	 * is always needed when GART is enabled, even in a kernel which has no
751 	 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
752 	 * If it doesn't, we do it here as suggested by the BKDG.
753 	 *
754 	 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
755 	 */
756 	msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
757 
758 	/*
759 	 * On family 10h BIOS may not have properly enabled WC+ support, causing
760 	 * it to be converted to CD memtype. This may result in performance
761 	 * degradation for certain nested-paging guests. Prevent this conversion
762 	 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
763 	 *
764 	 * NOTE: we want to use the _safe accessors so as not to #GP kvm
765 	 * guests on older kvm hosts.
766 	 */
767 	msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
768 
769 	if (cpu_has_amd_erratum(c, amd_erratum_383))
770 		set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
771 }
772 
773 static void init_amd_ln(struct cpuinfo_x86 *c)
774 {
775 	/*
776 	 * Apply erratum 665 fix unconditionally so machines without a BIOS
777 	 * fix work.
778 	 */
779 	msr_set_bit(MSR_AMD64_DE_CFG, 31);
780 }
781 
782 static bool rdrand_force;
783 
784 static int __init rdrand_cmdline(char *str)
785 {
786 	if (!str)
787 		return -EINVAL;
788 
789 	if (!strcmp(str, "force"))
790 		rdrand_force = true;
791 	else
792 		return -EINVAL;
793 
794 	return 0;
795 }
796 early_param("rdrand", rdrand_cmdline);
797 
798 static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
799 {
800 	/*
801 	 * Saving of the MSR used to hide the RDRAND support during
802 	 * suspend/resume is done by arch/x86/power/cpu.c, which is
803 	 * dependent on CONFIG_PM_SLEEP.
804 	 */
805 	if (!IS_ENABLED(CONFIG_PM_SLEEP))
806 		return;
807 
808 	/*
809 	 * The self-test can clear X86_FEATURE_RDRAND, so check for
810 	 * RDRAND support using the CPUID function directly.
811 	 */
812 	if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
813 		return;
814 
815 	msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
816 
817 	/*
818 	 * Verify that the CPUID change has occurred in case the kernel is
819 	 * running virtualized and the hypervisor doesn't support the MSR.
820 	 */
821 	if (cpuid_ecx(1) & BIT(30)) {
822 		pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
823 		return;
824 	}
825 
826 	clear_cpu_cap(c, X86_FEATURE_RDRAND);
827 	pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
828 }
829 
830 static void init_amd_jg(struct cpuinfo_x86 *c)
831 {
832 	/*
833 	 * Some BIOS implementations do not restore proper RDRAND support
834 	 * across suspend and resume. Check on whether to hide the RDRAND
835 	 * instruction support via CPUID.
836 	 */
837 	clear_rdrand_cpuid_bit(c);
838 }
839 
840 static void init_amd_bd(struct cpuinfo_x86 *c)
841 {
842 	u64 value;
843 
844 	/*
845 	 * The way access filter has a performance penalty on some workloads.
846 	 * Disable it on the affected CPUs.
847 	 */
848 	if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
849 		if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
850 			value |= 0x1E;
851 			wrmsrl_safe(MSR_F15H_IC_CFG, value);
852 		}
853 	}
854 
855 	/*
856 	 * Some BIOS implementations do not restore proper RDRAND support
857 	 * across suspend and resume. Check on whether to hide the RDRAND
858 	 * instruction support via CPUID.
859 	 */
860 	clear_rdrand_cpuid_bit(c);
861 }
862 
863 void init_spectral_chicken(struct cpuinfo_x86 *c)
864 {
865 #ifdef CONFIG_CPU_UNRET_ENTRY
866 	u64 value;
867 
868 	/*
869 	 * On Zen2 we offer this chicken (bit) on the altar of Speculation.
870 	 *
871 	 * This suppresses speculation from the middle of a basic block, i.e. it
872 	 * suppresses non-branch predictions.
873 	 *
874 	 * We use STIBP as a heuristic to filter out Zen2 from the rest of F17H
875 	 */
876 	if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_AMD_STIBP)) {
877 		if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
878 			value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT;
879 			wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
880 		}
881 	}
882 #endif
883 	/*
884 	 * Work around Erratum 1386.  The XSAVES instruction malfunctions in
885 	 * certain circumstances on Zen1/2 uarch, and not all parts have had
886 	 * updated microcode at the time of writing (March 2023).
887 	 *
888 	 * Affected parts all have no supervisor XSAVE states, meaning that
889 	 * the XSAVEC instruction (which works fine) is equivalent.
890 	 */
891 	clear_cpu_cap(c, X86_FEATURE_XSAVES);
892 }
893 
894 static void init_amd_zn(struct cpuinfo_x86 *c)
895 {
896 	set_cpu_cap(c, X86_FEATURE_ZEN);
897 
898 #ifdef CONFIG_NUMA
899 	node_reclaim_distance = 32;
900 #endif
901 
902 	/* Fix up CPUID bits, but only if not virtualised. */
903 	if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
904 
905 		/* Erratum 1076: CPB feature bit not being set in CPUID. */
906 		if (!cpu_has(c, X86_FEATURE_CPB))
907 			set_cpu_cap(c, X86_FEATURE_CPB);
908 
909 		/*
910 		 * Zen3 (Fam19 model < 0x10) parts are not susceptible to
911 		 * Branch Type Confusion, but predate the allocation of the
912 		 * BTC_NO bit.
913 		 */
914 		if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO))
915 			set_cpu_cap(c, X86_FEATURE_BTC_NO);
916 	}
917 }
918 
919 static void init_amd(struct cpuinfo_x86 *c)
920 {
921 	early_init_amd(c);
922 
923 	/*
924 	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
925 	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
926 	 */
927 	clear_cpu_cap(c, 0*32+31);
928 
929 	if (c->x86 >= 0x10)
930 		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
931 
932 	/* AMD FSRM also implies FSRS */
933 	if (cpu_has(c, X86_FEATURE_FSRM))
934 		set_cpu_cap(c, X86_FEATURE_FSRS);
935 
936 	/* get apicid instead of initial apic id from cpuid */
937 	c->apicid = hard_smp_processor_id();
938 
939 	/* K6s reports MCEs but don't actually have all the MSRs */
940 	if (c->x86 < 6)
941 		clear_cpu_cap(c, X86_FEATURE_MCE);
942 
943 	switch (c->x86) {
944 	case 4:    init_amd_k5(c); break;
945 	case 5:    init_amd_k6(c); break;
946 	case 6:	   init_amd_k7(c); break;
947 	case 0xf:  init_amd_k8(c); break;
948 	case 0x10: init_amd_gh(c); break;
949 	case 0x12: init_amd_ln(c); break;
950 	case 0x15: init_amd_bd(c); break;
951 	case 0x16: init_amd_jg(c); break;
952 	case 0x17: init_spectral_chicken(c);
953 		   fallthrough;
954 	case 0x19: init_amd_zn(c); break;
955 	}
956 
957 	/*
958 	 * Enable workaround for FXSAVE leak on CPUs
959 	 * without a XSaveErPtr feature
960 	 */
961 	if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
962 		set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
963 
964 	cpu_detect_cache_sizes(c);
965 
966 	amd_detect_cmp(c);
967 	amd_get_topology(c);
968 	srat_detect_node(c);
969 
970 	init_amd_cacheinfo(c);
971 
972 	if (!cpu_has(c, X86_FEATURE_LFENCE_RDTSC) && cpu_has(c, X86_FEATURE_XMM2)) {
973 		/*
974 		 * Use LFENCE for execution serialization.  On families which
975 		 * don't have that MSR, LFENCE is already serializing.
976 		 * msr_set_bit() uses the safe accessors, too, even if the MSR
977 		 * is not present.
978 		 */
979 		msr_set_bit(MSR_AMD64_DE_CFG,
980 			    MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
981 
982 		/* A serializing LFENCE stops RDTSC speculation */
983 		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
984 	}
985 
986 	/*
987 	 * Family 0x12 and above processors have APIC timer
988 	 * running in deep C states.
989 	 */
990 	if (c->x86 > 0x11)
991 		set_cpu_cap(c, X86_FEATURE_ARAT);
992 
993 	/* 3DNow or LM implies PREFETCHW */
994 	if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
995 		if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
996 			set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
997 
998 	/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
999 	if (!cpu_feature_enabled(X86_FEATURE_XENPV))
1000 		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
1001 
1002 	/*
1003 	 * Turn on the Instructions Retired free counter on machines not
1004 	 * susceptible to erratum #1054 "Instructions Retired Performance
1005 	 * Counter May Be Inaccurate".
1006 	 */
1007 	if (cpu_has(c, X86_FEATURE_IRPERF) &&
1008 	    !cpu_has_amd_erratum(c, amd_erratum_1054))
1009 		msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
1010 
1011 	check_null_seg_clears_base(c);
1012 
1013 	/*
1014 	 * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up
1015 	 * using the trampoline code and as part of it, MSR_EFER gets prepared there in
1016 	 * order to be replicated onto them. Regardless, set it here again, if not set,
1017 	 * to protect against any future refactoring/code reorganization which might
1018 	 * miss setting this important bit.
1019 	 */
1020 	if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1021 	    cpu_has(c, X86_FEATURE_AUTOIBRS))
1022 		WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
1023 }
1024 
1025 #ifdef CONFIG_X86_32
1026 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
1027 {
1028 	/* AMD errata T13 (order #21922) */
1029 	if (c->x86 == 6) {
1030 		/* Duron Rev A0 */
1031 		if (c->x86_model == 3 && c->x86_stepping == 0)
1032 			size = 64;
1033 		/* Tbird rev A1/A2 */
1034 		if (c->x86_model == 4 &&
1035 			(c->x86_stepping == 0 || c->x86_stepping == 1))
1036 			size = 256;
1037 	}
1038 	return size;
1039 }
1040 #endif
1041 
1042 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1043 {
1044 	u32 ebx, eax, ecx, edx;
1045 	u16 mask = 0xfff;
1046 
1047 	if (c->x86 < 0xf)
1048 		return;
1049 
1050 	if (c->extended_cpuid_level < 0x80000006)
1051 		return;
1052 
1053 	cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1054 
1055 	tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1056 	tlb_lli_4k[ENTRIES] = ebx & mask;
1057 
1058 	/*
1059 	 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1060 	 * characteristics from the CPUID function 0x80000005 instead.
1061 	 */
1062 	if (c->x86 == 0xf) {
1063 		cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1064 		mask = 0xff;
1065 	}
1066 
1067 	/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1068 	if (!((eax >> 16) & mask))
1069 		tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1070 	else
1071 		tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1072 
1073 	/* a 4M entry uses two 2M entries */
1074 	tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1075 
1076 	/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1077 	if (!(eax & mask)) {
1078 		/* Erratum 658 */
1079 		if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1080 			tlb_lli_2m[ENTRIES] = 1024;
1081 		} else {
1082 			cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1083 			tlb_lli_2m[ENTRIES] = eax & 0xff;
1084 		}
1085 	} else
1086 		tlb_lli_2m[ENTRIES] = eax & mask;
1087 
1088 	tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1089 }
1090 
1091 static const struct cpu_dev amd_cpu_dev = {
1092 	.c_vendor	= "AMD",
1093 	.c_ident	= { "AuthenticAMD" },
1094 #ifdef CONFIG_X86_32
1095 	.legacy_models = {
1096 		{ .family = 4, .model_names =
1097 		  {
1098 			  [3] = "486 DX/2",
1099 			  [7] = "486 DX/2-WB",
1100 			  [8] = "486 DX/4",
1101 			  [9] = "486 DX/4-WB",
1102 			  [14] = "Am5x86-WT",
1103 			  [15] = "Am5x86-WB"
1104 		  }
1105 		},
1106 	},
1107 	.legacy_cache_size = amd_size_cache,
1108 #endif
1109 	.c_early_init   = early_init_amd,
1110 	.c_detect_tlb	= cpu_detect_tlb_amd,
1111 	.c_bsp_init	= bsp_init_amd,
1112 	.c_init		= init_amd,
1113 	.c_x86_vendor	= X86_VENDOR_AMD,
1114 };
1115 
1116 cpu_dev_register(amd_cpu_dev);
1117 
1118 /*
1119  * AMD errata checking
1120  *
1121  * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
1122  * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
1123  * have an OSVW id assigned, which it takes as first argument. Both take a
1124  * variable number of family-specific model-stepping ranges created by
1125  * AMD_MODEL_RANGE().
1126  *
1127  * Example:
1128  *
1129  * const int amd_erratum_319[] =
1130  *	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
1131  *			   AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
1132  *			   AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
1133  */
1134 
1135 #define AMD_LEGACY_ERRATUM(...)		{ -1, __VA_ARGS__, 0 }
1136 #define AMD_OSVW_ERRATUM(osvw_id, ...)	{ osvw_id, __VA_ARGS__, 0 }
1137 #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1138 	((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1139 #define AMD_MODEL_RANGE_FAMILY(range)	(((range) >> 24) & 0xff)
1140 #define AMD_MODEL_RANGE_START(range)	(((range) >> 12) & 0xfff)
1141 #define AMD_MODEL_RANGE_END(range)	((range) & 0xfff)
1142 
1143 static const int amd_erratum_400[] =
1144 	AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
1145 			    AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
1146 
1147 static const int amd_erratum_383[] =
1148 	AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
1149 
1150 /* #1054: Instructions Retired Performance Counter May Be Inaccurate */
1151 static const int amd_erratum_1054[] =
1152 	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
1153 
1154 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1155 {
1156 	int osvw_id = *erratum++;
1157 	u32 range;
1158 	u32 ms;
1159 
1160 	if (osvw_id >= 0 && osvw_id < 65536 &&
1161 	    cpu_has(cpu, X86_FEATURE_OSVW)) {
1162 		u64 osvw_len;
1163 
1164 		rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
1165 		if (osvw_id < osvw_len) {
1166 			u64 osvw_bits;
1167 
1168 			rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
1169 			    osvw_bits);
1170 			return osvw_bits & (1ULL << (osvw_id & 0x3f));
1171 		}
1172 	}
1173 
1174 	/* OSVW unavailable or ID unknown, match family-model-stepping range */
1175 	ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1176 	while ((range = *erratum++))
1177 		if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1178 		    (ms >= AMD_MODEL_RANGE_START(range)) &&
1179 		    (ms <= AMD_MODEL_RANGE_END(range)))
1180 			return true;
1181 
1182 	return false;
1183 }
1184 
1185 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask);
1186 
1187 static unsigned int amd_msr_dr_addr_masks[] = {
1188 	MSR_F16H_DR0_ADDR_MASK,
1189 	MSR_F16H_DR1_ADDR_MASK,
1190 	MSR_F16H_DR1_ADDR_MASK + 1,
1191 	MSR_F16H_DR1_ADDR_MASK + 2
1192 };
1193 
1194 void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr)
1195 {
1196 	int cpu = smp_processor_id();
1197 
1198 	if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
1199 		return;
1200 
1201 	if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
1202 		return;
1203 
1204 	if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask)
1205 		return;
1206 
1207 	wrmsr(amd_msr_dr_addr_masks[dr], mask, 0);
1208 	per_cpu(amd_dr_addr_mask, cpu)[dr] = mask;
1209 }
1210 
1211 unsigned long amd_get_dr_addr_mask(unsigned int dr)
1212 {
1213 	if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
1214 		return 0;
1215 
1216 	if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
1217 		return 0;
1218 
1219 	return per_cpu(amd_dr_addr_mask[dr], smp_processor_id());
1220 }
1221 EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask);
1222 
1223 u32 amd_get_highest_perf(void)
1224 {
1225 	struct cpuinfo_x86 *c = &boot_cpu_data;
1226 
1227 	if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
1228 			       (c->x86_model >= 0x70 && c->x86_model < 0x80)))
1229 		return 166;
1230 
1231 	if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
1232 			       (c->x86_model >= 0x40 && c->x86_model < 0x70)))
1233 		return 166;
1234 
1235 	return 255;
1236 }
1237 EXPORT_SYMBOL_GPL(amd_get_highest_perf);
1238