xref: /linux/arch/x86/kernel/cpu/amd.c (revision 44b111b519160e33fdc41eadb39af86a24707edf)
1 #include <linux/export.h>
2 #include <linux/init.h>
3 #include <linux/bitops.h>
4 #include <linux/elf.h>
5 #include <linux/mm.h>
6 
7 #include <linux/io.h>
8 #include <asm/processor.h>
9 #include <asm/apic.h>
10 #include <asm/cpu.h>
11 #include <asm/pci-direct.h>
12 
13 #ifdef CONFIG_X86_64
14 # include <asm/numa_64.h>
15 # include <asm/mmconfig.h>
16 # include <asm/cacheflush.h>
17 #endif
18 
19 #include "cpu.h"
20 
21 #ifdef CONFIG_X86_32
22 /*
23  *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
24  *	misexecution of code under Linux. Owners of such processors should
25  *	contact AMD for precise details and a CPU swap.
26  *
27  *	See	http://www.multimania.com/poulot/k6bug.html
28  *		http://www.amd.com/K6/k6docs/revgd.html
29  *
30  *	The following test is erm.. interesting. AMD neglected to up
31  *	the chip setting when fixing the bug but they also tweaked some
32  *	performance at the same time..
33  */
34 
35 extern void vide(void);
36 __asm__(".align 4\nvide: ret");
37 
38 static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
39 {
40 /*
41  * General Systems BIOSen alias the cpu frequency registers
42  * of the Elan at 0x000df000. Unfortuantly, one of the Linux
43  * drivers subsequently pokes it, and changes the CPU speed.
44  * Workaround : Remove the unneeded alias.
45  */
46 #define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
47 #define CBAR_ENB	(0x80000000)
48 #define CBAR_KEY	(0X000000CB)
49 	if (c->x86_model == 9 || c->x86_model == 10) {
50 		if (inl(CBAR) & CBAR_ENB)
51 			outl(0 | CBAR_KEY, CBAR);
52 	}
53 }
54 
55 
56 static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
57 {
58 	u32 l, h;
59 	int mbytes = num_physpages >> (20-PAGE_SHIFT);
60 
61 	if (c->x86_model < 6) {
62 		/* Based on AMD doc 20734R - June 2000 */
63 		if (c->x86_model == 0) {
64 			clear_cpu_cap(c, X86_FEATURE_APIC);
65 			set_cpu_cap(c, X86_FEATURE_PGE);
66 		}
67 		return;
68 	}
69 
70 	if (c->x86_model == 6 && c->x86_mask == 1) {
71 		const int K6_BUG_LOOP = 1000000;
72 		int n;
73 		void (*f_vide)(void);
74 		unsigned long d, d2;
75 
76 		printk(KERN_INFO "AMD K6 stepping B detected - ");
77 
78 		/*
79 		 * It looks like AMD fixed the 2.6.2 bug and improved indirect
80 		 * calls at the same time.
81 		 */
82 
83 		n = K6_BUG_LOOP;
84 		f_vide = vide;
85 		rdtscl(d);
86 		while (n--)
87 			f_vide();
88 		rdtscl(d2);
89 		d = d2-d;
90 
91 		if (d > 20*K6_BUG_LOOP)
92 			printk(KERN_CONT
93 				"system stability may be impaired when more than 32 MB are used.\n");
94 		else
95 			printk(KERN_CONT "probably OK (after B9730xxxx).\n");
96 		printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
97 	}
98 
99 	/* K6 with old style WHCR */
100 	if (c->x86_model < 8 ||
101 	   (c->x86_model == 8 && c->x86_mask < 8)) {
102 		/* We can only write allocate on the low 508Mb */
103 		if (mbytes > 508)
104 			mbytes = 508;
105 
106 		rdmsr(MSR_K6_WHCR, l, h);
107 		if ((l&0x0000FFFF) == 0) {
108 			unsigned long flags;
109 			l = (1<<0)|((mbytes/4)<<1);
110 			local_irq_save(flags);
111 			wbinvd();
112 			wrmsr(MSR_K6_WHCR, l, h);
113 			local_irq_restore(flags);
114 			printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
115 				mbytes);
116 		}
117 		return;
118 	}
119 
120 	if ((c->x86_model == 8 && c->x86_mask > 7) ||
121 	     c->x86_model == 9 || c->x86_model == 13) {
122 		/* The more serious chips .. */
123 
124 		if (mbytes > 4092)
125 			mbytes = 4092;
126 
127 		rdmsr(MSR_K6_WHCR, l, h);
128 		if ((l&0xFFFF0000) == 0) {
129 			unsigned long flags;
130 			l = ((mbytes>>2)<<22)|(1<<16);
131 			local_irq_save(flags);
132 			wbinvd();
133 			wrmsr(MSR_K6_WHCR, l, h);
134 			local_irq_restore(flags);
135 			printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
136 				mbytes);
137 		}
138 
139 		return;
140 	}
141 
142 	if (c->x86_model == 10) {
143 		/* AMD Geode LX is model 10 */
144 		/* placeholder for any needed mods */
145 		return;
146 	}
147 }
148 
149 static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
150 {
151 #ifdef CONFIG_SMP
152 	/* calling is from identify_secondary_cpu() ? */
153 	if (!c->cpu_index)
154 		return;
155 
156 	/*
157 	 * Certain Athlons might work (for various values of 'work') in SMP
158 	 * but they are not certified as MP capable.
159 	 */
160 	/* Athlon 660/661 is valid. */
161 	if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
162 	    (c->x86_mask == 1)))
163 		goto valid_k7;
164 
165 	/* Duron 670 is valid */
166 	if ((c->x86_model == 7) && (c->x86_mask == 0))
167 		goto valid_k7;
168 
169 	/*
170 	 * Athlon 662, Duron 671, and Athlon >model 7 have capability
171 	 * bit. It's worth noting that the A5 stepping (662) of some
172 	 * Athlon XP's have the MP bit set.
173 	 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
174 	 * more.
175 	 */
176 	if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
177 	    ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
178 	     (c->x86_model > 7))
179 		if (cpu_has_mp)
180 			goto valid_k7;
181 
182 	/* If we get here, not a certified SMP capable AMD system. */
183 
184 	/*
185 	 * Don't taint if we are running SMP kernel on a single non-MP
186 	 * approved Athlon
187 	 */
188 	WARN_ONCE(1, "WARNING: This combination of AMD"
189 		" processors is not suitable for SMP.\n");
190 	if (!test_taint(TAINT_UNSAFE_SMP))
191 		add_taint(TAINT_UNSAFE_SMP);
192 
193 valid_k7:
194 	;
195 #endif
196 }
197 
198 static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
199 {
200 	u32 l, h;
201 
202 	/*
203 	 * Bit 15 of Athlon specific MSR 15, needs to be 0
204 	 * to enable SSE on Palomino/Morgan/Barton CPU's.
205 	 * If the BIOS didn't enable it already, enable it here.
206 	 */
207 	if (c->x86_model >= 6 && c->x86_model <= 10) {
208 		if (!cpu_has(c, X86_FEATURE_XMM)) {
209 			printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
210 			rdmsr(MSR_K7_HWCR, l, h);
211 			l &= ~0x00008000;
212 			wrmsr(MSR_K7_HWCR, l, h);
213 			set_cpu_cap(c, X86_FEATURE_XMM);
214 		}
215 	}
216 
217 	/*
218 	 * It's been determined by AMD that Athlons since model 8 stepping 1
219 	 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
220 	 * As per AMD technical note 27212 0.2
221 	 */
222 	if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
223 		rdmsr(MSR_K7_CLK_CTL, l, h);
224 		if ((l & 0xfff00000) != 0x20000000) {
225 			printk(KERN_INFO
226 			    "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
227 					l, ((l & 0x000fffff)|0x20000000));
228 			wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
229 		}
230 	}
231 
232 	set_cpu_cap(c, X86_FEATURE_K7);
233 
234 	amd_k7_smp_check(c);
235 }
236 #endif
237 
238 #ifdef CONFIG_NUMA
239 /*
240  * To workaround broken NUMA config.  Read the comment in
241  * srat_detect_node().
242  */
243 static int __cpuinit nearby_node(int apicid)
244 {
245 	int i, node;
246 
247 	for (i = apicid - 1; i >= 0; i--) {
248 		node = __apicid_to_node[i];
249 		if (node != NUMA_NO_NODE && node_online(node))
250 			return node;
251 	}
252 	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
253 		node = __apicid_to_node[i];
254 		if (node != NUMA_NO_NODE && node_online(node))
255 			return node;
256 	}
257 	return first_node(node_online_map); /* Shouldn't happen */
258 }
259 #endif
260 
261 /*
262  * Fixup core topology information for
263  * (1) AMD multi-node processors
264  *     Assumption: Number of cores in each internal node is the same.
265  * (2) AMD processors supporting compute units
266  */
267 #ifdef CONFIG_X86_HT
268 static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
269 {
270 	u32 nodes, cores_per_cu = 1;
271 	u8 node_id;
272 	int cpu = smp_processor_id();
273 
274 	/* get information required for multi-node processors */
275 	if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
276 		u32 eax, ebx, ecx, edx;
277 
278 		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
279 		nodes = ((ecx >> 8) & 7) + 1;
280 		node_id = ecx & 7;
281 
282 		/* get compute unit information */
283 		smp_num_siblings = ((ebx >> 8) & 3) + 1;
284 		c->compute_unit_id = ebx & 0xff;
285 		cores_per_cu += ((ebx >> 8) & 3);
286 	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
287 		u64 value;
288 
289 		rdmsrl(MSR_FAM10H_NODE_ID, value);
290 		nodes = ((value >> 3) & 7) + 1;
291 		node_id = value & 7;
292 	} else
293 		return;
294 
295 	/* fixup multi-node processor information */
296 	if (nodes > 1) {
297 		u32 cores_per_node;
298 		u32 cus_per_node;
299 
300 		set_cpu_cap(c, X86_FEATURE_AMD_DCM);
301 		cores_per_node = c->x86_max_cores / nodes;
302 		cus_per_node = cores_per_node / cores_per_cu;
303 
304 		/* store NodeID, use llc_shared_map to store sibling info */
305 		per_cpu(cpu_llc_id, cpu) = node_id;
306 
307 		/* core id has to be in the [0 .. cores_per_node - 1] range */
308 		c->cpu_core_id %= cores_per_node;
309 		c->compute_unit_id %= cus_per_node;
310 	}
311 }
312 #endif
313 
314 /*
315  * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
316  * Assumes number of cores is a power of two.
317  */
318 static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
319 {
320 #ifdef CONFIG_X86_HT
321 	unsigned bits;
322 	int cpu = smp_processor_id();
323 
324 	bits = c->x86_coreid_bits;
325 	/* Low order bits define the core id (index of core in socket) */
326 	c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
327 	/* Convert the initial APIC ID into the socket ID */
328 	c->phys_proc_id = c->initial_apicid >> bits;
329 	/* use socket ID also for last level cache */
330 	per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
331 	amd_get_topology(c);
332 #endif
333 }
334 
335 int amd_get_nb_id(int cpu)
336 {
337 	int id = 0;
338 #ifdef CONFIG_SMP
339 	id = per_cpu(cpu_llc_id, cpu);
340 #endif
341 	return id;
342 }
343 EXPORT_SYMBOL_GPL(amd_get_nb_id);
344 
345 static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
346 {
347 #ifdef CONFIG_NUMA
348 	int cpu = smp_processor_id();
349 	int node;
350 	unsigned apicid = c->apicid;
351 
352 	node = numa_cpu_node(cpu);
353 	if (node == NUMA_NO_NODE)
354 		node = per_cpu(cpu_llc_id, cpu);
355 
356 	/*
357 	 * If core numbers are inconsistent, it's likely a multi-fabric platform,
358 	 * so invoke platform-specific handler
359 	 */
360 	if (c->phys_proc_id != node)
361 		x86_cpuinit.fixup_cpu_id(c, node);
362 
363 	if (!node_online(node)) {
364 		/*
365 		 * Two possibilities here:
366 		 *
367 		 * - The CPU is missing memory and no node was created.  In
368 		 *   that case try picking one from a nearby CPU.
369 		 *
370 		 * - The APIC IDs differ from the HyperTransport node IDs
371 		 *   which the K8 northbridge parsing fills in.  Assume
372 		 *   they are all increased by a constant offset, but in
373 		 *   the same order as the HT nodeids.  If that doesn't
374 		 *   result in a usable node fall back to the path for the
375 		 *   previous case.
376 		 *
377 		 * This workaround operates directly on the mapping between
378 		 * APIC ID and NUMA node, assuming certain relationship
379 		 * between APIC ID, HT node ID and NUMA topology.  As going
380 		 * through CPU mapping may alter the outcome, directly
381 		 * access __apicid_to_node[].
382 		 */
383 		int ht_nodeid = c->initial_apicid;
384 
385 		if (ht_nodeid >= 0 &&
386 		    __apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
387 			node = __apicid_to_node[ht_nodeid];
388 		/* Pick a nearby node */
389 		if (!node_online(node))
390 			node = nearby_node(apicid);
391 	}
392 	numa_set_node(cpu, node);
393 #endif
394 }
395 
396 static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
397 {
398 #ifdef CONFIG_X86_HT
399 	unsigned bits, ecx;
400 
401 	/* Multi core CPU? */
402 	if (c->extended_cpuid_level < 0x80000008)
403 		return;
404 
405 	ecx = cpuid_ecx(0x80000008);
406 
407 	c->x86_max_cores = (ecx & 0xff) + 1;
408 
409 	/* CPU telling us the core id bits shift? */
410 	bits = (ecx >> 12) & 0xF;
411 
412 	/* Otherwise recompute */
413 	if (bits == 0) {
414 		while ((1 << bits) < c->x86_max_cores)
415 			bits++;
416 	}
417 
418 	c->x86_coreid_bits = bits;
419 #endif
420 }
421 
422 static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
423 {
424 	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
425 
426 		if (c->x86 > 0x10 ||
427 		    (c->x86 == 0x10 && c->x86_model >= 0x2)) {
428 			u64 val;
429 
430 			rdmsrl(MSR_K7_HWCR, val);
431 			if (!(val & BIT(24)))
432 				printk(KERN_WARNING FW_BUG "TSC doesn't count "
433 					"with P0 frequency!\n");
434 		}
435 	}
436 
437 	if (c->x86 == 0x15) {
438 		unsigned long upperbit;
439 		u32 cpuid, assoc;
440 
441 		cpuid	 = cpuid_edx(0x80000005);
442 		assoc	 = cpuid >> 16 & 0xff;
443 		upperbit = ((cpuid >> 24) << 10) / assoc;
444 
445 		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
446 		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
447 	}
448 }
449 
450 static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
451 {
452 	early_init_amd_mc(c);
453 
454 	/*
455 	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
456 	 * with P/T states and does not stop in deep C-states
457 	 */
458 	if (c->x86_power & (1 << 8)) {
459 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
460 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
461 	}
462 
463 #ifdef CONFIG_X86_64
464 	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
465 #else
466 	/*  Set MTRR capability flag if appropriate */
467 	if (c->x86 == 5)
468 		if (c->x86_model == 13 || c->x86_model == 9 ||
469 		    (c->x86_model == 8 && c->x86_mask >= 8))
470 			set_cpu_cap(c, X86_FEATURE_K6_MTRR);
471 #endif
472 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
473 	/* check CPU config space for extended APIC ID */
474 	if (cpu_has_apic && c->x86 >= 0xf) {
475 		unsigned int val;
476 		val = read_pci_config(0, 24, 0, 0x68);
477 		if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
478 			set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
479 	}
480 #endif
481 }
482 
483 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
484 {
485 	u32 dummy;
486 
487 #ifdef CONFIG_SMP
488 	unsigned long long value;
489 
490 	/*
491 	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
492 	 * bit 6 of msr C001_0015
493 	 *
494 	 * Errata 63 for SH-B3 steppings
495 	 * Errata 122 for all steppings (F+ have it disabled by default)
496 	 */
497 	if (c->x86 == 0xf) {
498 		rdmsrl(MSR_K7_HWCR, value);
499 		value |= 1 << 6;
500 		wrmsrl(MSR_K7_HWCR, value);
501 	}
502 #endif
503 
504 	early_init_amd(c);
505 
506 	/*
507 	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
508 	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
509 	 */
510 	clear_cpu_cap(c, 0*32+31);
511 
512 #ifdef CONFIG_X86_64
513 	/* On C+ stepping K8 rep microcode works well for copy/memset */
514 	if (c->x86 == 0xf) {
515 		u32 level;
516 
517 		level = cpuid_eax(1);
518 		if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
519 			set_cpu_cap(c, X86_FEATURE_REP_GOOD);
520 
521 		/*
522 		 * Some BIOSes incorrectly force this feature, but only K8
523 		 * revision D (model = 0x14) and later actually support it.
524 		 * (AMD Erratum #110, docId: 25759).
525 		 */
526 		if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
527 			u64 val;
528 
529 			clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
530 			if (!rdmsrl_amd_safe(0xc001100d, &val)) {
531 				val &= ~(1ULL << 32);
532 				wrmsrl_amd_safe(0xc001100d, val);
533 			}
534 		}
535 
536 	}
537 	if (c->x86 >= 0x10)
538 		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
539 
540 	/* get apicid instead of initial apic id from cpuid */
541 	c->apicid = hard_smp_processor_id();
542 #else
543 
544 	/*
545 	 *	FIXME: We should handle the K5 here. Set up the write
546 	 *	range and also turn on MSR 83 bits 4 and 31 (write alloc,
547 	 *	no bus pipeline)
548 	 */
549 
550 	switch (c->x86) {
551 	case 4:
552 		init_amd_k5(c);
553 		break;
554 	case 5:
555 		init_amd_k6(c);
556 		break;
557 	case 6: /* An Athlon/Duron */
558 		init_amd_k7(c);
559 		break;
560 	}
561 
562 	/* K6s reports MCEs but don't actually have all the MSRs */
563 	if (c->x86 < 6)
564 		clear_cpu_cap(c, X86_FEATURE_MCE);
565 #endif
566 
567 	/* Enable workaround for FXSAVE leak */
568 	if (c->x86 >= 6)
569 		set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
570 
571 	if (!c->x86_model_id[0]) {
572 		switch (c->x86) {
573 		case 0xf:
574 			/* Should distinguish Models here, but this is only
575 			   a fallback anyways. */
576 			strcpy(c->x86_model_id, "Hammer");
577 			break;
578 		}
579 	}
580 
581 	cpu_detect_cache_sizes(c);
582 
583 	/* Multi core CPU? */
584 	if (c->extended_cpuid_level >= 0x80000008) {
585 		amd_detect_cmp(c);
586 		srat_detect_node(c);
587 	}
588 
589 #ifdef CONFIG_X86_32
590 	detect_ht(c);
591 #endif
592 
593 	if (c->extended_cpuid_level >= 0x80000006) {
594 		if (cpuid_edx(0x80000006) & 0xf000)
595 			num_cache_leaves = 4;
596 		else
597 			num_cache_leaves = 3;
598 	}
599 
600 	if (c->x86 >= 0xf)
601 		set_cpu_cap(c, X86_FEATURE_K8);
602 
603 	if (cpu_has_xmm2) {
604 		/* MFENCE stops RDTSC speculation */
605 		set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
606 	}
607 
608 #ifdef CONFIG_X86_64
609 	if (c->x86 == 0x10) {
610 		/* do this for boot cpu */
611 		if (c == &boot_cpu_data)
612 			check_enable_amd_mmconf_dmi();
613 
614 		fam10h_check_enable_mmcfg();
615 	}
616 
617 	if (c == &boot_cpu_data && c->x86 >= 0xf) {
618 		unsigned long long tseg;
619 
620 		/*
621 		 * Split up direct mapping around the TSEG SMM area.
622 		 * Don't do it for gbpages because there seems very little
623 		 * benefit in doing so.
624 		 */
625 		if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
626 			printk(KERN_DEBUG "tseg: %010llx\n", tseg);
627 			if ((tseg>>PMD_SHIFT) <
628 				(max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
629 				((tseg>>PMD_SHIFT) <
630 				(max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
631 				(tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
632 				set_memory_4k((unsigned long)__va(tseg), 1);
633 		}
634 	}
635 #endif
636 
637 	/*
638 	 * Family 0x12 and above processors have APIC timer
639 	 * running in deep C states.
640 	 */
641 	if (c->x86 > 0x11)
642 		set_cpu_cap(c, X86_FEATURE_ARAT);
643 
644 	/*
645 	 * Disable GART TLB Walk Errors on Fam10h. We do this here
646 	 * because this is always needed when GART is enabled, even in a
647 	 * kernel which has no MCE support built in.
648 	 */
649 	if (c->x86 == 0x10) {
650 		/*
651 		 * BIOS should disable GartTlbWlk Errors themself. If
652 		 * it doesn't do it here as suggested by the BKDG.
653 		 *
654 		 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
655 		 */
656 		u64 mask;
657 		int err;
658 
659 		err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
660 		if (err == 0) {
661 			mask |= (1 << 10);
662 			checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
663 		}
664 	}
665 
666 	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
667 }
668 
669 #ifdef CONFIG_X86_32
670 static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
671 							unsigned int size)
672 {
673 	/* AMD errata T13 (order #21922) */
674 	if ((c->x86 == 6)) {
675 		/* Duron Rev A0 */
676 		if (c->x86_model == 3 && c->x86_mask == 0)
677 			size = 64;
678 		/* Tbird rev A1/A2 */
679 		if (c->x86_model == 4 &&
680 			(c->x86_mask == 0 || c->x86_mask == 1))
681 			size = 256;
682 	}
683 	return size;
684 }
685 #endif
686 
687 static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
688 	.c_vendor	= "AMD",
689 	.c_ident	= { "AuthenticAMD" },
690 #ifdef CONFIG_X86_32
691 	.c_models = {
692 		{ .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
693 		  {
694 			  [3] = "486 DX/2",
695 			  [7] = "486 DX/2-WB",
696 			  [8] = "486 DX/4",
697 			  [9] = "486 DX/4-WB",
698 			  [14] = "Am5x86-WT",
699 			  [15] = "Am5x86-WB"
700 		  }
701 		},
702 	},
703 	.c_size_cache	= amd_size_cache,
704 #endif
705 	.c_early_init   = early_init_amd,
706 	.c_bsp_init	= bsp_init_amd,
707 	.c_init		= init_amd,
708 	.c_x86_vendor	= X86_VENDOR_AMD,
709 };
710 
711 cpu_dev_register(amd_cpu_dev);
712 
713 /*
714  * AMD errata checking
715  *
716  * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
717  * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
718  * have an OSVW id assigned, which it takes as first argument. Both take a
719  * variable number of family-specific model-stepping ranges created by
720  * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const
721  * int[] in arch/x86/include/asm/processor.h.
722  *
723  * Example:
724  *
725  * const int amd_erratum_319[] =
726  *	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
727  *			   AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
728  *			   AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
729  */
730 
731 const int amd_erratum_400[] =
732 	AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
733 			    AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
734 EXPORT_SYMBOL_GPL(amd_erratum_400);
735 
736 const int amd_erratum_383[] =
737 	AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
738 EXPORT_SYMBOL_GPL(amd_erratum_383);
739 
740 bool cpu_has_amd_erratum(const int *erratum)
741 {
742 	struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
743 	int osvw_id = *erratum++;
744 	u32 range;
745 	u32 ms;
746 
747 	/*
748 	 * If called early enough that current_cpu_data hasn't been initialized
749 	 * yet, fall back to boot_cpu_data.
750 	 */
751 	if (cpu->x86 == 0)
752 		cpu = &boot_cpu_data;
753 
754 	if (cpu->x86_vendor != X86_VENDOR_AMD)
755 		return false;
756 
757 	if (osvw_id >= 0 && osvw_id < 65536 &&
758 	    cpu_has(cpu, X86_FEATURE_OSVW)) {
759 		u64 osvw_len;
760 
761 		rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
762 		if (osvw_id < osvw_len) {
763 			u64 osvw_bits;
764 
765 			rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
766 			    osvw_bits);
767 			return osvw_bits & (1ULL << (osvw_id & 0x3f));
768 		}
769 	}
770 
771 	/* OSVW unavailable or ID unknown, match family-model-stepping range */
772 	ms = (cpu->x86_model << 4) | cpu->x86_mask;
773 	while ((range = *erratum++))
774 		if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
775 		    (ms >= AMD_MODEL_RANGE_START(range)) &&
776 		    (ms <= AMD_MODEL_RANGE_END(range)))
777 			return true;
778 
779 	return false;
780 }
781 
782 EXPORT_SYMBOL_GPL(cpu_has_amd_erratum);
783