xref: /titanic_50/usr/src/uts/i86pc/os/cpuid.c (revision 05fa0d51e3dcc60bf87a28d2fd544362e368a474)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Various routines to handle identification
30  * and classification of x86 processors.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/archsystm.h>
35 #include <sys/x86_archext.h>
36 #include <sys/kmem.h>
37 #include <sys/systm.h>
38 #include <sys/cmn_err.h>
39 #include <sys/sunddi.h>
40 #include <sys/sunndi.h>
41 #include <sys/cpuvar.h>
42 #include <sys/processor.h>
43 #include <sys/chip.h>
44 #include <sys/fp.h>
45 #include <sys/controlregs.h>
46 #include <sys/auxv_386.h>
47 #include <sys/bitmap.h>
48 #include <sys/controlregs.h>
49 #include <sys/memnode.h>
50 
51 /*
52  * Pass 0 of cpuid feature analysis happens in locore. It contains special code
53  * to recognize Cyrix processors that are not cpuid-compliant, and to deal with
54  * them accordingly. For most modern processors, feature detection occurs here
55  * in pass 1.
56  *
57  * Pass 1 of cpuid feature analysis happens just at the beginning of mlsetup()
58  * for the boot CPU and does the basic analysis that the early kernel needs.
59  * x86_feature is set based on the return value of cpuid_pass1() of the boot
60  * CPU.
61  *
62  * Pass 1 includes:
63  *
64  *	o Determining vendor/model/family/stepping and setting x86_type and
65  *	  x86_vendor accordingly.
66  *	o Processing the feature flags returned by the cpuid instruction while
67  *	  applying any workarounds or tricks for the specific processor.
68  *	o Mapping the feature flags into Solaris feature bits (X86_*).
69  *	o Processing extended feature flags if supported by the processor,
70  *	  again while applying specific processor knowledge.
71  *	o Determining the CMT characteristics of the system.
72  *
73  * Pass 1 is done on non-boot CPUs during their initialization and the results
74  * are used only as a meager attempt at ensuring that all processors within the
75  * system support the same features.
76  *
77  * Pass 2 of cpuid feature analysis happens just at the beginning
78  * of startup().  It just copies in and corrects the remainder
79  * of the cpuid data we depend on: standard cpuid functions that we didn't
80  * need for pass1 feature analysis, and extended cpuid functions beyond the
81  * simple feature processing done in pass1.
82  *
83  * Pass 3 of cpuid analysis is invoked after basic kernel services; in
84  * particular kernel memory allocation has been made available. It creates a
85  * readable brand string based on the data collected in the first two passes.
86  *
87  * Pass 4 of cpuid analysis is invoked after post_startup() when all
88  * the support infrastructure for various hardware features has been
89  * initialized. It determines which processor features will be reported
90  * to userland via the aux vector.
91  *
92  * All passes are executed on all CPUs, but only the boot CPU determines what
93  * features the kernel will use.
94  *
95  * Much of the worst junk in this file is for the support of processors
96  * that didn't really implement the cpuid instruction properly.
97  *
98  * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon,
99  * the pass numbers.  Accordingly, changes to the pass code may require changes
100  * to the accessor code.
101  */
102 
103 uint_t x86_feature = 0;
104 uint_t x86_vendor = X86_VENDOR_IntelClone;
105 uint_t x86_type = X86_TYPE_OTHER;
106 
107 ulong_t cr4_value;
108 uint_t pentiumpro_bug4046376;
109 uint_t pentiumpro_bug4064495;
110 
111 uint_t enable486;
112 
113 /*
114  * This set of strings are for processors rumored to support the cpuid
115  * instruction, and is used by locore.s to figure out how to set x86_vendor
116  */
117 const char CyrixInstead[] = "CyrixInstead";
118 
119 /*
120  * These constants determine how many of the elements of the
121  * cpuid we cache in the cpuid_info data structure; the
122  * remaining elements are accessible via the cpuid instruction.
123  */
124 
125 #define	NMAX_CPI_STD	6		/* eax = 0 .. 5 */
126 #define	NMAX_CPI_EXTD	9		/* eax = 0x80000000 .. 0x80000008 */
127 
128 struct cpuid_info {
129 	uint_t cpi_pass;		/* last pass completed */
130 	/*
131 	 * standard function information
132 	 */
133 	uint_t cpi_maxeax;		/* fn 0: %eax */
134 	char cpi_vendorstr[13];		/* fn 0: %ebx:%ecx:%edx */
135 	uint_t cpi_vendor;		/* enum of cpi_vendorstr */
136 
137 	uint_t cpi_family;		/* fn 1: extended family */
138 	uint_t cpi_model;		/* fn 1: extended model */
139 	uint_t cpi_step;		/* fn 1: stepping */
140 	chipid_t cpi_chipid;		/* fn 1: %ebx: chip # on ht cpus */
141 	uint_t cpi_brandid;		/* fn 1: %ebx: brand ID */
142 	int cpi_clogid;			/* fn 1: %ebx: thread # */
143 	uint_t cpi_ncpu_per_chip;	/* fn 1: %ebx: logical cpu count */
144 	uint8_t cpi_cacheinfo[16];	/* fn 2: intel-style cache desc */
145 	uint_t cpi_ncache;		/* fn 2: number of elements */
146 	struct cpuid_regs cpi_std[NMAX_CPI_STD];	/* 0 .. 5 */
147 	/*
148 	 * extended function information
149 	 */
150 	uint_t cpi_xmaxeax;		/* fn 0x80000000: %eax */
151 	char cpi_brandstr[49];		/* fn 0x8000000[234] */
152 	uint8_t cpi_pabits;		/* fn 0x80000006: %eax */
153 	uint8_t cpi_vabits;		/* fn 0x80000006: %eax */
154 	struct cpuid_regs cpi_extd[NMAX_CPI_EXTD]; /* 0x8000000[0-8] */
155 	id_t cpi_coreid;
156 	uint_t cpi_ncore_per_chip;	/* AMD: fn 0x80000008: %ecx[7-0] */
157 					/* Intel: fn 4: %eax[31-26] */
158 	/*
159 	 * supported feature information
160 	 */
161 	uint32_t cpi_support[4];
162 #define	STD_EDX_FEATURES	0
163 #define	AMD_EDX_FEATURES	1
164 #define	TM_EDX_FEATURES		2
165 #define	STD_ECX_FEATURES	3
166 
167 };
168 
169 
170 static struct cpuid_info cpuid_info0;
171 
172 /*
173  * These bit fields are defined by the Intel Application Note AP-485
174  * "Intel Processor Identification and the CPUID Instruction"
175  */
176 #define	CPI_FAMILY_XTD(cpi)	BITX((cpi)->cpi_std[1].cp_eax, 27, 20)
177 #define	CPI_MODEL_XTD(cpi)	BITX((cpi)->cpi_std[1].cp_eax, 19, 16)
178 #define	CPI_TYPE(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 13, 12)
179 #define	CPI_FAMILY(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 11, 8)
180 #define	CPI_STEP(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 3, 0)
181 #define	CPI_MODEL(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 7, 4)
182 
183 #define	CPI_FEATURES_EDX(cpi)		((cpi)->cpi_std[1].cp_edx)
184 #define	CPI_FEATURES_ECX(cpi)		((cpi)->cpi_std[1].cp_ecx)
185 #define	CPI_FEATURES_XTD_EDX(cpi)	((cpi)->cpi_extd[1].cp_edx)
186 #define	CPI_FEATURES_XTD_ECX(cpi)	((cpi)->cpi_extd[1].cp_ecx)
187 
188 #define	CPI_BRANDID(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 7, 0)
189 #define	CPI_CHUNKS(cpi)		BITX((cpi)->cpi_std[1].cp_ebx, 15, 7)
190 #define	CPI_CPU_COUNT(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 23, 16)
191 #define	CPI_APIC_ID(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 31, 24)
192 
193 #define	CPI_MAXEAX_MAX		0x100		/* sanity control */
194 #define	CPI_XMAXEAX_MAX		0x80000100
195 
196 /*
197  *  Some undocumented ways of patching the results of the cpuid
198  *  instruction to permit running Solaris 10 on future cpus that
199  *  we don't currently support.  Could be set to non-zero values
200  *  via settings in eeprom.
201  */
202 
203 uint32_t cpuid_feature_ecx_include;
204 uint32_t cpuid_feature_ecx_exclude;
205 uint32_t cpuid_feature_edx_include;
206 uint32_t cpuid_feature_edx_exclude;
207 
208 uint_t
209 cpuid_pass1(cpu_t *cpu)
210 {
211 	uint32_t mask_ecx, mask_edx;
212 	uint_t feature = X86_CPUID;
213 	struct cpuid_info *cpi;
214 	struct cpuid_regs *cp;
215 	int xcpuid;
216 
217 	/*
218 	 * By convention, cpu0 is the boot cpu, which is called
219 	 * before memory allocation is available.  Other cpus are
220 	 * initialized when memory becomes available.
221 	 */
222 	if (cpu->cpu_id == 0)
223 		cpu->cpu_m.mcpu_cpi = cpi = &cpuid_info0;
224 	else
225 		cpu->cpu_m.mcpu_cpi = cpi =
226 		    kmem_zalloc(sizeof (*cpi), KM_SLEEP);
227 
228 	cp = &cpi->cpi_std[0];
229 	cp->cp_eax = 0;
230 	cpi->cpi_maxeax = __cpuid_insn(cp);
231 	{
232 		uint32_t *iptr = (uint32_t *)cpi->cpi_vendorstr;
233 		*iptr++ = cp->cp_ebx;
234 		*iptr++ = cp->cp_edx;
235 		*iptr++ = cp->cp_ecx;
236 		*(char *)&cpi->cpi_vendorstr[12] = '\0';
237 	}
238 
239 	/*
240 	 * Map the vendor string to a type code
241 	 */
242 	if (strcmp(cpi->cpi_vendorstr, "GenuineIntel") == 0)
243 		cpi->cpi_vendor = X86_VENDOR_Intel;
244 	else if (strcmp(cpi->cpi_vendorstr, "AuthenticAMD") == 0)
245 		cpi->cpi_vendor = X86_VENDOR_AMD;
246 	else if (strcmp(cpi->cpi_vendorstr, "GenuineTMx86") == 0)
247 		cpi->cpi_vendor = X86_VENDOR_TM;
248 	else if (strcmp(cpi->cpi_vendorstr, CyrixInstead) == 0)
249 		/*
250 		 * CyrixInstead is a variable used by the Cyrix detection code
251 		 * in locore.
252 		 */
253 		cpi->cpi_vendor = X86_VENDOR_Cyrix;
254 	else if (strcmp(cpi->cpi_vendorstr, "UMC UMC UMC ") == 0)
255 		cpi->cpi_vendor = X86_VENDOR_UMC;
256 	else if (strcmp(cpi->cpi_vendorstr, "NexGenDriven") == 0)
257 		cpi->cpi_vendor = X86_VENDOR_NexGen;
258 	else if (strcmp(cpi->cpi_vendorstr, "CentaurHauls") == 0)
259 		cpi->cpi_vendor = X86_VENDOR_Centaur;
260 	else if (strcmp(cpi->cpi_vendorstr, "RiseRiseRise") == 0)
261 		cpi->cpi_vendor = X86_VENDOR_Rise;
262 	else if (strcmp(cpi->cpi_vendorstr, "SiS SiS SiS ") == 0)
263 		cpi->cpi_vendor = X86_VENDOR_SiS;
264 	else if (strcmp(cpi->cpi_vendorstr, "Geode by NSC") == 0)
265 		cpi->cpi_vendor = X86_VENDOR_NSC;
266 	else
267 		cpi->cpi_vendor = X86_VENDOR_IntelClone;
268 
269 	x86_vendor = cpi->cpi_vendor; /* for compatibility */
270 
271 	/*
272 	 * Limit the range in case of weird hardware
273 	 */
274 	if (cpi->cpi_maxeax > CPI_MAXEAX_MAX)
275 		cpi->cpi_maxeax = CPI_MAXEAX_MAX;
276 	if (cpi->cpi_maxeax < 1)
277 		goto pass1_done;
278 
279 	cp = &cpi->cpi_std[1];
280 	cp->cp_eax = 1;
281 	(void) __cpuid_insn(cp);
282 
283 	/*
284 	 * Extract identifying constants for easy access.
285 	 */
286 	cpi->cpi_model = CPI_MODEL(cpi);
287 	cpi->cpi_family = CPI_FAMILY(cpi);
288 
289 	if (cpi->cpi_family == 0xf) {
290 		cpi->cpi_family += CPI_FAMILY_XTD(cpi);
291 		cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
292 	}
293 
294 	cpi->cpi_step = CPI_STEP(cpi);
295 	cpi->cpi_brandid = CPI_BRANDID(cpi);
296 
297 	/*
298 	 * *default* assumptions:
299 	 * - believe %edx feature word
300 	 * - ignore %ecx feature word
301 	 * - 32-bit virtual and physical addressing
302 	 */
303 	mask_edx = 0xffffffff;
304 	mask_ecx = 0;
305 
306 	cpi->cpi_pabits = cpi->cpi_vabits = 32;
307 
308 	switch (cpi->cpi_vendor) {
309 	case X86_VENDOR_Intel:
310 		if (cpi->cpi_family == 5)
311 			x86_type = X86_TYPE_P5;
312 		else if (cpi->cpi_family == 6) {
313 			x86_type = X86_TYPE_P6;
314 			pentiumpro_bug4046376 = 1;
315 			pentiumpro_bug4064495 = 1;
316 			/*
317 			 * Clear the SEP bit when it was set erroneously
318 			 */
319 			if (cpi->cpi_model < 3 && cpi->cpi_step < 3)
320 				cp->cp_edx &= ~CPUID_INTC_EDX_SEP;
321 		} else if (cpi->cpi_family == 0xf) {
322 			x86_type = X86_TYPE_P4;
323 			/*
324 			 * We don't currently depend on any of the %ecx
325 			 * features until Prescott, so we'll only check
326 			 * this from P4 onwards.  We might want to revisit
327 			 * that idea later.
328 			 */
329 			mask_ecx = 0xffffffff;
330 		} else if (cpi->cpi_family > 0xf)
331 			mask_ecx = 0xffffffff;
332 		break;
333 	case X86_VENDOR_IntelClone:
334 	default:
335 		break;
336 	case X86_VENDOR_AMD:
337 #if defined(OPTERON_ERRATUM_108)
338 		if (cpi->cpi_family == 0xf && cpi->cpi_model == 0xe) {
339 			cp->cp_eax = (0xf0f & cp->cp_eax) | 0xc0;
340 			cpi->cpi_model = 0xc;
341 		} else
342 #endif
343 		if (cpi->cpi_family == 5) {
344 			/*
345 			 * AMD K5 and K6
346 			 *
347 			 * These CPUs have an incomplete implementation
348 			 * of MCA/MCE which we mask away.
349 			 */
350 			mask_edx &= ~(CPUID_INTC_EDX_MCE | CPUID_INTC_EDX_MCA);
351 
352 			/*
353 			 * Model 0 uses the wrong (APIC) bit
354 			 * to indicate PGE.  Fix it here.
355 			 */
356 			if (cpi->cpi_model == 0) {
357 				if (cp->cp_edx & 0x200) {
358 					cp->cp_edx &= ~0x200;
359 					cp->cp_edx |= CPUID_INTC_EDX_PGE;
360 				}
361 			}
362 
363 			/*
364 			 * Early models had problems w/ MMX; disable.
365 			 */
366 			if (cpi->cpi_model < 6)
367 				mask_edx &= ~CPUID_INTC_EDX_MMX;
368 		}
369 
370 		/*
371 		 * For newer families, SSE3 and CX16, at least, are valid;
372 		 * enable all
373 		 */
374 		if (cpi->cpi_family >= 0xf)
375 			mask_ecx = 0xffffffff;
376 		break;
377 	case X86_VENDOR_TM:
378 		/*
379 		 * workaround the NT workaround in CMS 4.1
380 		 */
381 		if (cpi->cpi_family == 5 && cpi->cpi_model == 4 &&
382 		    (cpi->cpi_step == 2 || cpi->cpi_step == 3))
383 			cp->cp_edx |= CPUID_INTC_EDX_CX8;
384 		break;
385 	case X86_VENDOR_Centaur:
386 		/*
387 		 * workaround the NT workarounds again
388 		 */
389 		if (cpi->cpi_family == 6)
390 			cp->cp_edx |= CPUID_INTC_EDX_CX8;
391 		break;
392 	case X86_VENDOR_Cyrix:
393 		/*
394 		 * We rely heavily on the probing in locore
395 		 * to actually figure out what parts, if any,
396 		 * of the Cyrix cpuid instruction to believe.
397 		 */
398 		switch (x86_type) {
399 		case X86_TYPE_CYRIX_486:
400 			mask_edx = 0;
401 			break;
402 		case X86_TYPE_CYRIX_6x86:
403 			mask_edx = 0;
404 			break;
405 		case X86_TYPE_CYRIX_6x86L:
406 			mask_edx =
407 			    CPUID_INTC_EDX_DE |
408 			    CPUID_INTC_EDX_CX8;
409 			break;
410 		case X86_TYPE_CYRIX_6x86MX:
411 			mask_edx =
412 			    CPUID_INTC_EDX_DE |
413 			    CPUID_INTC_EDX_MSR |
414 			    CPUID_INTC_EDX_CX8 |
415 			    CPUID_INTC_EDX_PGE |
416 			    CPUID_INTC_EDX_CMOV |
417 			    CPUID_INTC_EDX_MMX;
418 			break;
419 		case X86_TYPE_CYRIX_GXm:
420 			mask_edx =
421 			    CPUID_INTC_EDX_MSR |
422 			    CPUID_INTC_EDX_CX8 |
423 			    CPUID_INTC_EDX_CMOV |
424 			    CPUID_INTC_EDX_MMX;
425 			break;
426 		case X86_TYPE_CYRIX_MediaGX:
427 			break;
428 		case X86_TYPE_CYRIX_MII:
429 		case X86_TYPE_VIA_CYRIX_III:
430 			mask_edx =
431 			    CPUID_INTC_EDX_DE |
432 			    CPUID_INTC_EDX_TSC |
433 			    CPUID_INTC_EDX_MSR |
434 			    CPUID_INTC_EDX_CX8 |
435 			    CPUID_INTC_EDX_PGE |
436 			    CPUID_INTC_EDX_CMOV |
437 			    CPUID_INTC_EDX_MMX;
438 			break;
439 		default:
440 			break;
441 		}
442 		break;
443 	}
444 
445 	/*
446 	 * Now we've figured out the masks that determine
447 	 * which bits we choose to believe, apply the masks
448 	 * to the feature words, then map the kernel's view
449 	 * of these feature words into its feature word.
450 	 */
451 	cp->cp_edx &= mask_edx;
452 	cp->cp_ecx &= mask_ecx;
453 
454 	/*
455 	 * fold in fix ups
456 	 */
457 
458 	cp->cp_edx |= cpuid_feature_edx_include;
459 	cp->cp_edx &= ~cpuid_feature_edx_exclude;
460 
461 
462 	cp->cp_ecx |= cpuid_feature_ecx_include;
463 	cp->cp_ecx &= ~cpuid_feature_ecx_exclude;
464 
465 	if (cp->cp_edx & CPUID_INTC_EDX_PSE)
466 		feature |= X86_LARGEPAGE;
467 	if (cp->cp_edx & CPUID_INTC_EDX_TSC)
468 		feature |= X86_TSC;
469 	if (cp->cp_edx & CPUID_INTC_EDX_MSR)
470 		feature |= X86_MSR;
471 	if (cp->cp_edx & CPUID_INTC_EDX_MTRR)
472 		feature |= X86_MTRR;
473 	if (cp->cp_edx & CPUID_INTC_EDX_PGE)
474 		feature |= X86_PGE;
475 	if (cp->cp_edx & CPUID_INTC_EDX_CMOV)
476 		feature |= X86_CMOV;
477 	if (cp->cp_edx & CPUID_INTC_EDX_MMX)
478 		feature |= X86_MMX;
479 	if ((cp->cp_edx & CPUID_INTC_EDX_MCE) != 0 &&
480 	    (cp->cp_edx & CPUID_INTC_EDX_MCA) != 0)
481 		feature |= X86_MCA;
482 	if (cp->cp_edx & CPUID_INTC_EDX_PAE)
483 		feature |= X86_PAE;
484 	if (cp->cp_edx & CPUID_INTC_EDX_CX8)
485 		feature |= X86_CX8;
486 	/*
487 	 * Once this bit was thought questionable, but it looks like it's
488 	 * back, as of Application Note 485 March 2005 (24161829.pdf)
489 	 */
490 	if (cp->cp_ecx & CPUID_INTC_ECX_CX16)
491 		feature |= X86_CX16;
492 	if (cp->cp_edx & CPUID_INTC_EDX_PAT)
493 		feature |= X86_PAT;
494 	if (cp->cp_edx & CPUID_INTC_EDX_SEP)
495 		feature |= X86_SEP;
496 	if (cp->cp_edx & CPUID_INTC_EDX_FXSR) {
497 		/*
498 		 * In our implementation, fxsave/fxrstor
499 		 * are prerequisites before we'll even
500 		 * try and do SSE things.
501 		 */
502 		if (cp->cp_edx & CPUID_INTC_EDX_SSE)
503 			feature |= X86_SSE;
504 		if (cp->cp_edx & CPUID_INTC_EDX_SSE2)
505 			feature |= X86_SSE2;
506 		if (cp->cp_ecx & CPUID_INTC_ECX_SSE3)
507 			feature |= X86_SSE3;
508 	}
509 	if (cp->cp_edx & CPUID_INTC_EDX_DE)
510 		cr4_value |= CR4_DE;
511 
512 	if (feature & X86_PAE)
513 		cpi->cpi_pabits = 36;
514 
515 	/*
516 	 * Hyperthreading configuration is slightly tricky on Intel
517 	 * and pure clones, and even trickier on AMD.
518 	 *
519 	 * (AMD chose to set the HTT bit on their CMP processors,
520 	 * even though they're not actually hyperthreaded.  Thus it
521 	 * takes a bit more work to figure out what's really going
522 	 * on ... see the handling of the CMP_LEGACY bit below)
523 	 */
524 	if (cp->cp_edx & CPUID_INTC_EDX_HTT) {
525 		cpi->cpi_ncpu_per_chip = CPI_CPU_COUNT(cpi);
526 		if (cpi->cpi_ncpu_per_chip > 1)
527 			feature |= X86_HTT;
528 	} else {
529 		cpi->cpi_ncpu_per_chip = 1;
530 	}
531 
532 	/*
533 	 * Work on the "extended" feature information, doing
534 	 * some basic initialization for cpuid_pass2()
535 	 */
536 	xcpuid = 0;
537 	switch (cpi->cpi_vendor) {
538 	case X86_VENDOR_Intel:
539 		if (cpi->cpi_family >= 0xf)
540 			xcpuid++;
541 		break;
542 	case X86_VENDOR_AMD:
543 		if (cpi->cpi_family > 5 ||
544 		    (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
545 			xcpuid++;
546 		break;
547 	case X86_VENDOR_Cyrix:
548 		/*
549 		 * Only these Cyrix CPUs are -known- to support
550 		 * extended cpuid operations.
551 		 */
552 		if (x86_type == X86_TYPE_VIA_CYRIX_III ||
553 		    x86_type == X86_TYPE_CYRIX_GXm)
554 			xcpuid++;
555 		break;
556 	case X86_VENDOR_Centaur:
557 	case X86_VENDOR_TM:
558 	default:
559 		xcpuid++;
560 		break;
561 	}
562 
563 	if (xcpuid) {
564 		cp = &cpi->cpi_extd[0];
565 		cp->cp_eax = 0x80000000;
566 		cpi->cpi_xmaxeax = __cpuid_insn(cp);
567 	}
568 
569 	if (cpi->cpi_xmaxeax & 0x80000000) {
570 
571 		if (cpi->cpi_xmaxeax > CPI_XMAXEAX_MAX)
572 			cpi->cpi_xmaxeax = CPI_XMAXEAX_MAX;
573 
574 		switch (cpi->cpi_vendor) {
575 		case X86_VENDOR_Intel:
576 		case X86_VENDOR_AMD:
577 			if (cpi->cpi_xmaxeax < 0x80000001)
578 				break;
579 			cp = &cpi->cpi_extd[1];
580 			cp->cp_eax = 0x80000001;
581 			(void) __cpuid_insn(cp);
582 			if (cpi->cpi_vendor == X86_VENDOR_AMD &&
583 			    cpi->cpi_family == 5 &&
584 			    cpi->cpi_model == 6 &&
585 			    cpi->cpi_step == 6) {
586 				/*
587 				 * K6 model 6 uses bit 10 to indicate SYSC
588 				 * Later models use bit 11. Fix it here.
589 				 */
590 				if (cp->cp_edx & 0x400) {
591 					cp->cp_edx &= ~0x400;
592 					cp->cp_edx |= CPUID_AMD_EDX_SYSC;
593 				}
594 			}
595 
596 			/*
597 			 * Compute the additions to the kernel's feature word.
598 			 */
599 			if (cp->cp_edx & CPUID_AMD_EDX_NX)
600 				feature |= X86_NX;
601 
602 			/*
603 			 * If both the HTT and CMP_LEGACY bits are set,
604 			 * then we're not actually HyperThreaded.  Read
605 			 * "AMD CPUID Specification" for more details.
606 			 */
607 			if (cpi->cpi_vendor == X86_VENDOR_AMD &&
608 			    (feature & X86_HTT) &&
609 			    (cp->cp_ecx & CPUID_AMD_ECX_CMP_LEGACY)) {
610 				feature &= ~X86_HTT;
611 				feature |= X86_CMP;
612 			}
613 #if defined(_LP64)
614 			/*
615 			 * It's really tricky to support syscall/sysret in
616 			 * the i386 kernel; we rely on sysenter/sysexit
617 			 * instead.  In the amd64 kernel, things are -way-
618 			 * better.
619 			 */
620 			if (cp->cp_edx & CPUID_AMD_EDX_SYSC)
621 				feature |= X86_ASYSC;
622 
623 			/*
624 			 * While we're thinking about system calls, note
625 			 * that AMD processors don't support sysenter
626 			 * in long mode at all, so don't try to program them.
627 			 */
628 			if (x86_vendor == X86_VENDOR_AMD)
629 				feature &= ~X86_SEP;
630 #endif
631 			break;
632 		default:
633 			break;
634 		}
635 
636 		/*
637 		 * Get CPUID data about processor cores and hyperthreads.
638 		 */
639 		switch (cpi->cpi_vendor) {
640 		case X86_VENDOR_Intel:
641 			if (cpi->cpi_maxeax >= 4) {
642 				cp = &cpi->cpi_std[4];
643 				cp->cp_eax = 4;
644 				cp->cp_ecx = 0;
645 				(void) __cpuid_insn(cp);
646 			}
647 			/*FALLTHROUGH*/
648 		case X86_VENDOR_AMD:
649 			if (cpi->cpi_xmaxeax < 0x80000008)
650 				break;
651 			cp = &cpi->cpi_extd[8];
652 			cp->cp_eax = 0x80000008;
653 			(void) __cpuid_insn(cp);
654 			/*
655 			 * Virtual and physical address limits from
656 			 * cpuid override previously guessed values.
657 			 */
658 			cpi->cpi_pabits = BITX(cp->cp_eax, 7, 0);
659 			cpi->cpi_vabits = BITX(cp->cp_eax, 15, 8);
660 			break;
661 		default:
662 			break;
663 		}
664 
665 		switch (cpi->cpi_vendor) {
666 		case X86_VENDOR_Intel:
667 			if (cpi->cpi_maxeax < 4) {
668 				cpi->cpi_ncore_per_chip = 1;
669 				break;
670 			} else {
671 				cpi->cpi_ncore_per_chip =
672 				    BITX((cpi)->cpi_std[4].cp_eax, 31, 26) + 1;
673 			}
674 			break;
675 		case X86_VENDOR_AMD:
676 			if (cpi->cpi_xmaxeax < 0x80000008) {
677 				cpi->cpi_ncore_per_chip = 1;
678 				break;
679 			} else {
680 				cpi->cpi_ncore_per_chip =
681 				    BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1;
682 			}
683 			break;
684 		default:
685 			cpi->cpi_ncore_per_chip = 1;
686 			break;
687 		}
688 
689 	}
690 
691 	/*
692 	 * If more than one core, then this processor is CMP.
693 	 */
694 	if (cpi->cpi_ncore_per_chip > 1)
695 		feature |= X86_CMP;
696 	/*
697 	 * If the number of cores is the same as the number
698 	 * of CPUs, then we cannot have HyperThreading.
699 	 */
700 	if (cpi->cpi_ncpu_per_chip == cpi->cpi_ncore_per_chip)
701 		feature &= ~X86_HTT;
702 
703 	if ((feature & (X86_HTT | X86_CMP)) == 0) {
704 		/*
705 		 * Single-core single-threaded processors.
706 		 */
707 		cpi->cpi_chipid = -1;
708 		cpi->cpi_clogid = 0;
709 		cpi->cpi_coreid = cpu->cpu_id;
710 	} else if (cpi->cpi_ncpu_per_chip > 1) {
711 		uint_t i;
712 		uint_t chipid_shift = 0;
713 		uint_t coreid_shift = 0;
714 		uint_t apic_id = CPI_APIC_ID(cpi);
715 
716 		for (i = 1; i < cpi->cpi_ncpu_per_chip; i <<= 1)
717 			chipid_shift++;
718 		cpi->cpi_chipid = apic_id >> chipid_shift;
719 		cpi->cpi_clogid = apic_id & ((1 << chipid_shift) - 1);
720 
721 		if (cpi->cpi_vendor == X86_VENDOR_Intel) {
722 			if (feature & X86_CMP) {
723 				/*
724 				 * Multi-core (and possibly multi-threaded)
725 				 * processors.
726 				 */
727 				uint_t ncpu_per_core;
728 				if (cpi->cpi_ncore_per_chip == 1)
729 					ncpu_per_core = cpi->cpi_ncpu_per_chip;
730 				else if (cpi->cpi_ncore_per_chip > 1)
731 					ncpu_per_core = cpi->cpi_ncpu_per_chip /
732 					    cpi->cpi_ncore_per_chip;
733 				/*
734 				 * 8bit APIC IDs on dual core Pentiums
735 				 * look like this:
736 				 *
737 				 * +-----------------------+------+------+
738 				 * | Physical Package ID   |  MC  |  HT  |
739 				 * +-----------------------+------+------+
740 				 * <------- chipid -------->
741 				 * <------- coreid --------------->
742 				 *			   <--- clogid -->
743 				 *
744 				 * Where the number of bits necessary to
745 				 * represent MC and HT fields together equals
746 				 * to the minimum number of bits necessary to
747 				 * store the value of cpi->cpi_ncpu_per_chip.
748 				 * Of those bits, the MC part uses the number
749 				 * of bits necessary to store the value of
750 				 * cpi->cpi_ncore_per_chip.
751 				 */
752 				for (i = 1; i < ncpu_per_core; i <<= 1)
753 					coreid_shift++;
754 				cpi->cpi_coreid =
755 				    apic_id & ((1 << coreid_shift) - 1);
756 			} else if (feature & X86_HTT) {
757 				/*
758 				 * Single-core multi-threaded processors.
759 				 */
760 				cpi->cpi_coreid = cpi->cpi_chipid;
761 			}
762 		} else if (cpi->cpi_vendor == X86_VENDOR_AMD) {
763 			/*
764 			 * AMD currently only has dual-core processors with
765 			 * single-threaded cores.  If they ever release
766 			 * multi-threaded processors, then this code
767 			 * will have to be updated.
768 			 */
769 			cpi->cpi_coreid = cpu->cpu_id;
770 		} else {
771 			/*
772 			 * All other processors are currently
773 			 * assumed to have single cores.
774 			 */
775 			cpi->cpi_coreid = cpi->cpi_chipid;
776 		}
777 	}
778 
779 pass1_done:
780 	cpi->cpi_pass = 1;
781 	return (feature);
782 }
783 
784 /*
785  * Make copies of the cpuid table entries we depend on, in
786  * part for ease of parsing now, in part so that we have only
787  * one place to correct any of it, in part for ease of
788  * later export to userland, and in part so we can look at
789  * this stuff in a crash dump.
790  */
791 
792 /*ARGSUSED*/
793 void
794 cpuid_pass2(cpu_t *cpu)
795 {
796 	uint_t n, nmax;
797 	int i;
798 	struct cpuid_regs *cp;
799 	uint8_t *dp;
800 	uint32_t *iptr;
801 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
802 
803 	ASSERT(cpi->cpi_pass == 1);
804 
805 	if (cpi->cpi_maxeax < 1)
806 		goto pass2_done;
807 
808 	if ((nmax = cpi->cpi_maxeax + 1) > NMAX_CPI_STD)
809 		nmax = NMAX_CPI_STD;
810 	/*
811 	 * (We already handled n == 0 and n == 1 in pass 1)
812 	 */
813 	for (n = 2, cp = &cpi->cpi_std[2]; n < nmax; n++, cp++) {
814 		cp->cp_eax = n;
815 		(void) __cpuid_insn(cp);
816 		switch (n) {
817 		case 2:
818 			/*
819 			 * "the lower 8 bits of the %eax register
820 			 * contain a value that identifies the number
821 			 * of times the cpuid [instruction] has to be
822 			 * executed to obtain a complete image of the
823 			 * processor's caching systems."
824 			 *
825 			 * How *do* they make this stuff up?
826 			 */
827 			cpi->cpi_ncache = sizeof (*cp) *
828 			    BITX(cp->cp_eax, 7, 0);
829 			if (cpi->cpi_ncache == 0)
830 				break;
831 			cpi->cpi_ncache--;	/* skip count byte */
832 
833 			/*
834 			 * Well, for now, rather than attempt to implement
835 			 * this slightly dubious algorithm, we just look
836 			 * at the first 15 ..
837 			 */
838 			if (cpi->cpi_ncache > (sizeof (*cp) - 1))
839 				cpi->cpi_ncache = sizeof (*cp) - 1;
840 
841 			dp = cpi->cpi_cacheinfo;
842 			if (BITX(cp->cp_eax, 31, 31) == 0) {
843 				uint8_t *p = (void *)&cp->cp_eax;
844 				for (i = 1; i < 3; i++)
845 					if (p[i] != 0)
846 						*dp++ = p[i];
847 			}
848 			if (BITX(cp->cp_ebx, 31, 31) == 0) {
849 				uint8_t *p = (void *)&cp->cp_ebx;
850 				for (i = 0; i < 4; i++)
851 					if (p[i] != 0)
852 						*dp++ = p[i];
853 			}
854 			if (BITX(cp->cp_ecx, 31, 31) == 0) {
855 				uint8_t *p = (void *)&cp->cp_ecx;
856 				for (i = 0; i < 4; i++)
857 					if (p[i] != 0)
858 						*dp++ = p[i];
859 			}
860 			if (BITX(cp->cp_edx, 31, 31) == 0) {
861 				uint8_t *p = (void *)&cp->cp_edx;
862 				for (i = 0; i < 4; i++)
863 					if (p[i] != 0)
864 						*dp++ = p[i];
865 			}
866 			break;
867 		case 3:	/* Processor serial number, if PSN supported */
868 		case 4:	/* Deterministic cache parameters */
869 		case 5:	/* Monitor/Mwait parameters */
870 		default:
871 			break;
872 		}
873 	}
874 
875 	if ((cpi->cpi_xmaxeax & 0x80000000) == 0)
876 		goto pass2_done;
877 
878 	if ((nmax = cpi->cpi_xmaxeax - 0x80000000 + 1) > NMAX_CPI_EXTD)
879 		nmax = NMAX_CPI_EXTD;
880 	/*
881 	 * Copy the extended properties, fixing them as we go.
882 	 * (We already handled n == 0 and n == 1 in pass 1)
883 	 */
884 	iptr = (void *)cpi->cpi_brandstr;
885 	for (n = 2, cp = &cpi->cpi_extd[2]; n < nmax; cp++, n++) {
886 		cp->cp_eax = 0x80000000 + n;
887 		(void) __cpuid_insn(cp);
888 		switch (n) {
889 		case 2:
890 		case 3:
891 		case 4:
892 			/*
893 			 * Extract the brand string
894 			 */
895 			*iptr++ = cp->cp_eax;
896 			*iptr++ = cp->cp_ebx;
897 			*iptr++ = cp->cp_ecx;
898 			*iptr++ = cp->cp_edx;
899 			break;
900 		case 5:
901 			switch (cpi->cpi_vendor) {
902 			case X86_VENDOR_AMD:
903 				/*
904 				 * The Athlon and Duron were the first
905 				 * parts to report the sizes of the
906 				 * TLB for large pages. Before then,
907 				 * we don't trust the data.
908 				 */
909 				if (cpi->cpi_family < 6 ||
910 				    (cpi->cpi_family == 6 &&
911 				    cpi->cpi_model < 1))
912 					cp->cp_eax = 0;
913 				break;
914 			default:
915 				break;
916 			}
917 			break;
918 		case 6:
919 			switch (cpi->cpi_vendor) {
920 			case X86_VENDOR_AMD:
921 				/*
922 				 * The Athlon and Duron were the first
923 				 * AMD parts with L2 TLB's.
924 				 * Before then, don't trust the data.
925 				 */
926 				if (cpi->cpi_family < 6 ||
927 				    cpi->cpi_family == 6 &&
928 				    cpi->cpi_model < 1)
929 					cp->cp_eax = cp->cp_ebx = 0;
930 				/*
931 				 * AMD Duron rev A0 reports L2
932 				 * cache size incorrectly as 1K
933 				 * when it is really 64K
934 				 */
935 				if (cpi->cpi_family == 6 &&
936 				    cpi->cpi_model == 3 &&
937 				    cpi->cpi_step == 0) {
938 					cp->cp_ecx &= 0xffff;
939 					cp->cp_ecx |= 0x400000;
940 				}
941 				break;
942 			case X86_VENDOR_Cyrix:	/* VIA C3 */
943 				/*
944 				 * VIA C3 processors are a bit messed
945 				 * up w.r.t. encoding cache sizes in %ecx
946 				 */
947 				if (cpi->cpi_family != 6)
948 					break;
949 				/*
950 				 * model 7 and 8 were incorrectly encoded
951 				 *
952 				 * xxx is model 8 really broken?
953 				 */
954 				if (cpi->cpi_model == 7 ||
955 				    cpi->cpi_model == 8)
956 					cp->cp_ecx =
957 					    BITX(cp->cp_ecx, 31, 24) << 16 |
958 					    BITX(cp->cp_ecx, 23, 16) << 12 |
959 					    BITX(cp->cp_ecx, 15, 8) << 8 |
960 					    BITX(cp->cp_ecx, 7, 0);
961 				/*
962 				 * model 9 stepping 1 has wrong associativity
963 				 */
964 				if (cpi->cpi_model == 9 && cpi->cpi_step == 1)
965 					cp->cp_ecx |= 8 << 12;
966 				break;
967 			case X86_VENDOR_Intel:
968 				/*
969 				 * Extended L2 Cache features function.
970 				 * First appeared on Prescott.
971 				 */
972 			default:
973 				break;
974 			}
975 			break;
976 		default:
977 			break;
978 		}
979 	}
980 
981 pass2_done:
982 	cpi->cpi_pass = 2;
983 }
984 
985 static const char *
986 intel_cpubrand(const struct cpuid_info *cpi)
987 {
988 	int i;
989 
990 	if ((x86_feature & X86_CPUID) == 0 ||
991 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
992 		return ("i486");
993 
994 	switch (cpi->cpi_family) {
995 	case 5:
996 		return ("Intel Pentium(r)");
997 	case 6:
998 		switch (cpi->cpi_model) {
999 			uint_t celeron, xeon;
1000 			const struct cpuid_regs *cp;
1001 		case 0:
1002 		case 1:
1003 		case 2:
1004 			return ("Intel Pentium(r) Pro");
1005 		case 3:
1006 		case 4:
1007 			return ("Intel Pentium(r) II");
1008 		case 6:
1009 			return ("Intel Celeron(r)");
1010 		case 5:
1011 		case 7:
1012 			celeron = xeon = 0;
1013 			cp = &cpi->cpi_std[2];	/* cache info */
1014 
1015 			for (i = 1; i < 3; i++) {
1016 				uint_t tmp;
1017 
1018 				tmp = (cp->cp_eax >> (8 * i)) & 0xff;
1019 				if (tmp == 0x40)
1020 					celeron++;
1021 				if (tmp >= 0x44 && tmp <= 0x45)
1022 					xeon++;
1023 			}
1024 
1025 			for (i = 0; i < 2; i++) {
1026 				uint_t tmp;
1027 
1028 				tmp = (cp->cp_ebx >> (8 * i)) & 0xff;
1029 				if (tmp == 0x40)
1030 					celeron++;
1031 				else if (tmp >= 0x44 && tmp <= 0x45)
1032 					xeon++;
1033 			}
1034 
1035 			for (i = 0; i < 4; i++) {
1036 				uint_t tmp;
1037 
1038 				tmp = (cp->cp_ecx >> (8 * i)) & 0xff;
1039 				if (tmp == 0x40)
1040 					celeron++;
1041 				else if (tmp >= 0x44 && tmp <= 0x45)
1042 					xeon++;
1043 			}
1044 
1045 			for (i = 0; i < 4; i++) {
1046 				uint_t tmp;
1047 
1048 				tmp = (cp->cp_edx >> (8 * i)) & 0xff;
1049 				if (tmp == 0x40)
1050 					celeron++;
1051 				else if (tmp >= 0x44 && tmp <= 0x45)
1052 					xeon++;
1053 			}
1054 
1055 			if (celeron)
1056 				return ("Intel Celeron(r)");
1057 			if (xeon)
1058 				return (cpi->cpi_model == 5 ?
1059 				    "Intel Pentium(r) II Xeon(tm)" :
1060 				    "Intel Pentium(r) III Xeon(tm)");
1061 			return (cpi->cpi_model == 5 ?
1062 			    "Intel Pentium(r) II or Pentium(r) II Xeon(tm)" :
1063 			    "Intel Pentium(r) III or Pentium(r) III Xeon(tm)");
1064 		default:
1065 			break;
1066 		}
1067 	default:
1068 		break;
1069 	}
1070 
1071 	if (cpi->cpi_family <= 0xf && cpi->cpi_model <= 0xf &&
1072 	    cpi->cpi_brandid != 0) {
1073 		static const struct {
1074 			uint_t bt_bid;
1075 			const char *bt_str;
1076 		} brand_tbl[] = {
1077 			{ 0x1,	"Intel(r) Celeron(r)" },
1078 			{ 0x2,	"Intel(r) Pentium(r) III" },
1079 			{ 0x3,	"Intel(r) Pentium(r) III Xeon(tm)" },
1080 			{ 0x4,	"Intel(r) Pentium(r) III" },
1081 			{ 0x6,	"Mobile Intel(r) Pentium(r) III" },
1082 			{ 0x7,	"Mobile Intel(r) Celeron(r)" },
1083 			{ 0x8,	"Intel(r) Pentium(r) 4" },
1084 			{ 0x9,	"Intel(r) Pentium(r) 4" },
1085 			{ 0xa,	"Intel(r) Celeron(r)" },
1086 			{ 0xb,	"Intel(r) Xeon(tm)" },
1087 			{ 0xc,	"Intel(r) Xeon(tm) MP" },
1088 			{ 0xe,	"Mobile Intel(r) Pentium(r) 4" },
1089 			{ 0xf,	"Mobile Intel(r) Celeron(r)" }
1090 		};
1091 		uint_t btblmax = sizeof (brand_tbl) / sizeof (brand_tbl[0]);
1092 		uint_t sgn;
1093 
1094 		sgn = (cpi->cpi_family << 8) |
1095 		    (cpi->cpi_model << 4) | cpi->cpi_step;
1096 
1097 		for (i = 0; i < btblmax; i++)
1098 			if (brand_tbl[i].bt_bid == cpi->cpi_brandid)
1099 				break;
1100 		if (i < btblmax) {
1101 			if (sgn == 0x6b1 && cpi->cpi_brandid == 3)
1102 				return ("Intel(r) Celeron(r)");
1103 			if (sgn < 0xf13 && cpi->cpi_brandid == 0xb)
1104 				return ("Intel(r) Xeon(tm) MP");
1105 			if (sgn < 0xf13 && cpi->cpi_brandid == 0xe)
1106 				return ("Intel(r) Xeon(tm)");
1107 			return (brand_tbl[i].bt_str);
1108 		}
1109 	}
1110 
1111 	return (NULL);
1112 }
1113 
1114 static const char *
1115 amd_cpubrand(const struct cpuid_info *cpi)
1116 {
1117 	if ((x86_feature & X86_CPUID) == 0 ||
1118 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
1119 		return ("i486 compatible");
1120 
1121 	switch (cpi->cpi_family) {
1122 	case 5:
1123 		switch (cpi->cpi_model) {
1124 		case 0:
1125 		case 1:
1126 		case 2:
1127 		case 3:
1128 		case 4:
1129 		case 5:
1130 			return ("AMD-K5(r)");
1131 		case 6:
1132 		case 7:
1133 			return ("AMD-K6(r)");
1134 		case 8:
1135 			return ("AMD-K6(r)-2");
1136 		case 9:
1137 			return ("AMD-K6(r)-III");
1138 		default:
1139 			return ("AMD (family 5)");
1140 		}
1141 	case 6:
1142 		switch (cpi->cpi_model) {
1143 		case 1:
1144 			return ("AMD-K7(tm)");
1145 		case 0:
1146 		case 2:
1147 		case 4:
1148 			return ("AMD Athlon(tm)");
1149 		case 3:
1150 		case 7:
1151 			return ("AMD Duron(tm)");
1152 		case 6:
1153 		case 8:
1154 		case 10:
1155 			/*
1156 			 * Use the L2 cache size to distinguish
1157 			 */
1158 			return ((cpi->cpi_extd[6].cp_ecx >> 16) >= 256 ?
1159 			    "AMD Athlon(tm)" : "AMD Duron(tm)");
1160 		default:
1161 			return ("AMD (family 6)");
1162 		}
1163 	default:
1164 		break;
1165 	}
1166 
1167 	if (cpi->cpi_family == 0xf && cpi->cpi_model == 5 &&
1168 	    cpi->cpi_brandid != 0) {
1169 		switch (BITX(cpi->cpi_brandid, 7, 5)) {
1170 		case 3:
1171 			return ("AMD Opteron(tm) UP 1xx");
1172 		case 4:
1173 			return ("AMD Opteron(tm) DP 2xx");
1174 		case 5:
1175 			return ("AMD Opteron(tm) MP 8xx");
1176 		default:
1177 			return ("AMD Opteron(tm)");
1178 		}
1179 	}
1180 
1181 	return (NULL);
1182 }
1183 
1184 static const char *
1185 cyrix_cpubrand(struct cpuid_info *cpi, uint_t type)
1186 {
1187 	if ((x86_feature & X86_CPUID) == 0 ||
1188 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5 ||
1189 	    type == X86_TYPE_CYRIX_486)
1190 		return ("i486 compatible");
1191 
1192 	switch (type) {
1193 	case X86_TYPE_CYRIX_6x86:
1194 		return ("Cyrix 6x86");
1195 	case X86_TYPE_CYRIX_6x86L:
1196 		return ("Cyrix 6x86L");
1197 	case X86_TYPE_CYRIX_6x86MX:
1198 		return ("Cyrix 6x86MX");
1199 	case X86_TYPE_CYRIX_GXm:
1200 		return ("Cyrix GXm");
1201 	case X86_TYPE_CYRIX_MediaGX:
1202 		return ("Cyrix MediaGX");
1203 	case X86_TYPE_CYRIX_MII:
1204 		return ("Cyrix M2");
1205 	case X86_TYPE_VIA_CYRIX_III:
1206 		return ("VIA Cyrix M3");
1207 	default:
1208 		/*
1209 		 * Have another wild guess ..
1210 		 */
1211 		if (cpi->cpi_family == 4 && cpi->cpi_model == 9)
1212 			return ("Cyrix 5x86");
1213 		else if (cpi->cpi_family == 5) {
1214 			switch (cpi->cpi_model) {
1215 			case 2:
1216 				return ("Cyrix 6x86");	/* Cyrix M1 */
1217 			case 4:
1218 				return ("Cyrix MediaGX");
1219 			default:
1220 				break;
1221 			}
1222 		} else if (cpi->cpi_family == 6) {
1223 			switch (cpi->cpi_model) {
1224 			case 0:
1225 				return ("Cyrix 6x86MX"); /* Cyrix M2? */
1226 			case 5:
1227 			case 6:
1228 			case 7:
1229 			case 8:
1230 			case 9:
1231 				return ("VIA C3");
1232 			default:
1233 				break;
1234 			}
1235 		}
1236 		break;
1237 	}
1238 	return (NULL);
1239 }
1240 
1241 /*
1242  * This only gets called in the case that the CPU extended
1243  * feature brand string (0x80000002, 0x80000003, 0x80000004)
1244  * aren't available, or contain null bytes for some reason.
1245  */
1246 static void
1247 fabricate_brandstr(struct cpuid_info *cpi)
1248 {
1249 	const char *brand = NULL;
1250 
1251 	switch (cpi->cpi_vendor) {
1252 	case X86_VENDOR_Intel:
1253 		brand = intel_cpubrand(cpi);
1254 		break;
1255 	case X86_VENDOR_AMD:
1256 		brand = amd_cpubrand(cpi);
1257 		break;
1258 	case X86_VENDOR_Cyrix:
1259 		brand = cyrix_cpubrand(cpi, x86_type);
1260 		break;
1261 	case X86_VENDOR_NexGen:
1262 		if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
1263 			brand = "NexGen Nx586";
1264 		break;
1265 	case X86_VENDOR_Centaur:
1266 		if (cpi->cpi_family == 5)
1267 			switch (cpi->cpi_model) {
1268 			case 4:
1269 				brand = "Centaur C6";
1270 				break;
1271 			case 8:
1272 				brand = "Centaur C2";
1273 				break;
1274 			case 9:
1275 				brand = "Centaur C3";
1276 				break;
1277 			default:
1278 				break;
1279 			}
1280 		break;
1281 	case X86_VENDOR_Rise:
1282 		if (cpi->cpi_family == 5 &&
1283 		    (cpi->cpi_model == 0 || cpi->cpi_model == 2))
1284 			brand = "Rise mP6";
1285 		break;
1286 	case X86_VENDOR_SiS:
1287 		if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
1288 			brand = "SiS 55x";
1289 		break;
1290 	case X86_VENDOR_TM:
1291 		if (cpi->cpi_family == 5 && cpi->cpi_model == 4)
1292 			brand = "Transmeta Crusoe TM3x00 or TM5x00";
1293 		break;
1294 	case X86_VENDOR_NSC:
1295 	case X86_VENDOR_UMC:
1296 	default:
1297 		break;
1298 	}
1299 	if (brand) {
1300 		(void) strcpy((char *)cpi->cpi_brandstr, brand);
1301 		return;
1302 	}
1303 
1304 	/*
1305 	 * If all else fails ...
1306 	 */
1307 	(void) snprintf(cpi->cpi_brandstr, sizeof (cpi->cpi_brandstr),
1308 	    "%s %d.%d.%d", cpi->cpi_vendorstr, cpi->cpi_family,
1309 	    cpi->cpi_model, cpi->cpi_step);
1310 }
1311 
1312 /*
1313  * This routine is called just after kernel memory allocation
1314  * becomes available on cpu0, and as part of mp_startup() on
1315  * the other cpus.
1316  *
1317  * Fixup the brand string.
1318  */
1319 /*ARGSUSED*/
1320 void
1321 cpuid_pass3(cpu_t *cpu)
1322 {
1323 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
1324 
1325 	ASSERT(cpi->cpi_pass == 2);
1326 
1327 	if ((cpi->cpi_xmaxeax & 0x80000000) == 0) {
1328 		fabricate_brandstr(cpi);
1329 		goto pass3_done;
1330 	}
1331 
1332 	/*
1333 	 * If we successfully extracted a brand string from the cpuid
1334 	 * instruction, clean it up by removing leading spaces and
1335 	 * similar junk.
1336 	 */
1337 	if (cpi->cpi_brandstr[0]) {
1338 		size_t maxlen = sizeof (cpi->cpi_brandstr);
1339 		char *src, *dst;
1340 
1341 		dst = src = (char *)cpi->cpi_brandstr;
1342 		src[maxlen - 1] = '\0';
1343 		/*
1344 		 * strip leading spaces
1345 		 */
1346 		while (*src == ' ')
1347 			src++;
1348 		/*
1349 		 * Remove any 'Genuine' or "Authentic" prefixes
1350 		 */
1351 		if (strncmp(src, "Genuine ", 8) == 0)
1352 			src += 8;
1353 		if (strncmp(src, "Authentic ", 10) == 0)
1354 			src += 10;
1355 
1356 		/*
1357 		 * Now do an in-place copy.
1358 		 * Map (R) to (r) and (TM) to (tm).
1359 		 * The era of teletypes is long gone, and there's
1360 		 * -really- no need to shout.
1361 		 */
1362 		while (*src != '\0') {
1363 			if (src[0] == '(') {
1364 				if (strncmp(src + 1, "R)", 2) == 0) {
1365 					(void) strncpy(dst, "(r)", 3);
1366 					src += 3;
1367 					dst += 3;
1368 					continue;
1369 				}
1370 				if (strncmp(src + 1, "TM)", 3) == 0) {
1371 					(void) strncpy(dst, "(tm)", 4);
1372 					src += 4;
1373 					dst += 4;
1374 					continue;
1375 				}
1376 			}
1377 			*dst++ = *src++;
1378 		}
1379 		*dst = '\0';
1380 
1381 		/*
1382 		 * Finally, remove any trailing spaces
1383 		 */
1384 		while (--dst > cpi->cpi_brandstr)
1385 			if (*dst == ' ')
1386 				*dst = '\0';
1387 			else
1388 				break;
1389 	} else
1390 		fabricate_brandstr(cpi);
1391 
1392 pass3_done:
1393 	cpi->cpi_pass = 3;
1394 }
1395 
1396 /*
1397  * This routine is called out of bind_hwcap() much later in the life
1398  * of the kernel (post_startup()).  The job of this routine is to resolve
1399  * the hardware feature support and kernel support for those features into
1400  * what we're actually going to tell applications via the aux vector.
1401  */
1402 uint_t
1403 cpuid_pass4(cpu_t *cpu)
1404 {
1405 	struct cpuid_info *cpi;
1406 	uint_t hwcap_flags = 0;
1407 
1408 	if (cpu == NULL)
1409 		cpu = CPU;
1410 	cpi = cpu->cpu_m.mcpu_cpi;
1411 
1412 	ASSERT(cpi->cpi_pass == 3);
1413 
1414 	if (cpi->cpi_maxeax >= 1) {
1415 		uint32_t *edx = &cpi->cpi_support[STD_EDX_FEATURES];
1416 		uint32_t *ecx = &cpi->cpi_support[STD_ECX_FEATURES];
1417 
1418 		*edx = CPI_FEATURES_EDX(cpi);
1419 		*ecx = CPI_FEATURES_ECX(cpi);
1420 
1421 		/*
1422 		 * [these require explicit kernel support]
1423 		 */
1424 		if ((x86_feature & X86_SEP) == 0)
1425 			*edx &= ~CPUID_INTC_EDX_SEP;
1426 
1427 		if ((x86_feature & X86_SSE) == 0)
1428 			*edx &= ~(CPUID_INTC_EDX_FXSR|CPUID_INTC_EDX_SSE);
1429 		if ((x86_feature & X86_SSE2) == 0)
1430 			*edx &= ~CPUID_INTC_EDX_SSE2;
1431 
1432 		if ((x86_feature & X86_HTT) == 0)
1433 			*edx &= ~CPUID_INTC_EDX_HTT;
1434 
1435 		if ((x86_feature & X86_SSE3) == 0)
1436 			*ecx &= ~CPUID_INTC_ECX_SSE3;
1437 
1438 		/*
1439 		 * [no explicit support required beyond x87 fp context]
1440 		 */
1441 		if (!fpu_exists)
1442 			*edx &= ~(CPUID_INTC_EDX_FPU | CPUID_INTC_EDX_MMX);
1443 
1444 		/*
1445 		 * Now map the supported feature vector to things that we
1446 		 * think userland will care about.
1447 		 */
1448 		if (*edx & CPUID_INTC_EDX_SEP)
1449 			hwcap_flags |= AV_386_SEP;
1450 		if (*edx & CPUID_INTC_EDX_SSE)
1451 			hwcap_flags |= AV_386_FXSR | AV_386_SSE;
1452 		if (*edx & CPUID_INTC_EDX_SSE2)
1453 			hwcap_flags |= AV_386_SSE2;
1454 		if (*ecx & CPUID_INTC_ECX_SSE3)
1455 			hwcap_flags |= AV_386_SSE3;
1456 
1457 		if (*edx & CPUID_INTC_EDX_FPU)
1458 			hwcap_flags |= AV_386_FPU;
1459 		if (*edx & CPUID_INTC_EDX_MMX)
1460 			hwcap_flags |= AV_386_MMX;
1461 
1462 		if (*edx & CPUID_INTC_EDX_TSC)
1463 			hwcap_flags |= AV_386_TSC;
1464 		if (*edx & CPUID_INTC_EDX_CX8)
1465 			hwcap_flags |= AV_386_CX8;
1466 		if (*edx & CPUID_INTC_EDX_CMOV)
1467 			hwcap_flags |= AV_386_CMOV;
1468 		if (*ecx & CPUID_INTC_ECX_MON)
1469 			hwcap_flags |= AV_386_MON;
1470 #if defined(CPUID_INTC_ECX_CX16)
1471 		if (*ecx & CPUID_INTC_ECX_CX16)
1472 			hwcap_flags |= AV_386_CX16;
1473 #endif
1474 	}
1475 
1476 	if (x86_feature & X86_HTT)
1477 		hwcap_flags |= AV_386_PAUSE;
1478 
1479 	if (cpi->cpi_xmaxeax < 0x80000001)
1480 		goto pass4_done;
1481 
1482 	switch (cpi->cpi_vendor) {
1483 		struct cpuid_regs cp;
1484 		uint32_t *edx;
1485 
1486 	case X86_VENDOR_Intel:	/* sigh */
1487 	case X86_VENDOR_AMD:
1488 		edx = &cpi->cpi_support[AMD_EDX_FEATURES];
1489 
1490 		*edx = CPI_FEATURES_XTD_EDX(cpi);
1491 
1492 		/*
1493 		 * [no explicit support required beyond
1494 		 * x87 fp context and exception handlers]
1495 		 */
1496 		if (!fpu_exists)
1497 			*edx &= ~(CPUID_AMD_EDX_MMXamd |
1498 			    CPUID_AMD_EDX_3DNow | CPUID_AMD_EDX_3DNowx);
1499 
1500 		if ((x86_feature & X86_ASYSC) == 0)
1501 			*edx &= ~CPUID_AMD_EDX_SYSC;
1502 		if ((x86_feature & X86_NX) == 0)
1503 			*edx &= ~CPUID_AMD_EDX_NX;
1504 #if !defined(_LP64)
1505 		*edx &= ~CPUID_AMD_EDX_LM;
1506 #endif
1507 		/*
1508 		 * Now map the supported feature vector to
1509 		 * things that we think userland will care about.
1510 		 */
1511 		if (*edx & CPUID_AMD_EDX_SYSC)
1512 			hwcap_flags |= AV_386_AMD_SYSC;
1513 		if (*edx & CPUID_AMD_EDX_MMXamd)
1514 			hwcap_flags |= AV_386_AMD_MMX;
1515 		if (*edx & CPUID_AMD_EDX_3DNow)
1516 			hwcap_flags |= AV_386_AMD_3DNow;
1517 		if (*edx & CPUID_AMD_EDX_3DNowx)
1518 			hwcap_flags |= AV_386_AMD_3DNowx;
1519 		break;
1520 
1521 	case X86_VENDOR_TM:
1522 		cp.cp_eax = 0x80860001;
1523 		(void) __cpuid_insn(&cp);
1524 		cpi->cpi_support[TM_EDX_FEATURES] = cp.cp_edx;
1525 		break;
1526 
1527 	default:
1528 		break;
1529 	}
1530 
1531 pass4_done:
1532 	cpi->cpi_pass = 4;
1533 	return (hwcap_flags);
1534 }
1535 
1536 
1537 /*
1538  * Simulate the cpuid instruction using the data we previously
1539  * captured about this CPU.  We try our best to return the truth
1540  * about the hardware, independently of kernel support.
1541  */
1542 uint32_t
1543 cpuid_insn(cpu_t *cpu, struct cpuid_regs *cp)
1544 {
1545 	struct cpuid_info *cpi;
1546 	struct cpuid_regs *xcp;
1547 
1548 	if (cpu == NULL)
1549 		cpu = CPU;
1550 	cpi = cpu->cpu_m.mcpu_cpi;
1551 
1552 	ASSERT(cpuid_checkpass(cpu, 3));
1553 
1554 	/*
1555 	 * CPUID data is cached in two separate places: cpi_std for standard
1556 	 * CPUID functions, and cpi_extd for extended CPUID functions.
1557 	 */
1558 	if (cp->cp_eax <= cpi->cpi_maxeax && cp->cp_eax < NMAX_CPI_STD)
1559 		xcp = &cpi->cpi_std[cp->cp_eax];
1560 	else if (cp->cp_eax >= 0x80000000 && cp->cp_eax <= cpi->cpi_xmaxeax &&
1561 	    cp->cp_eax < 0x80000000 + NMAX_CPI_EXTD)
1562 		xcp = &cpi->cpi_extd[cp->cp_eax - 0x80000000];
1563 	else
1564 		/*
1565 		 * The caller is asking for data from an input parameter which
1566 		 * the kernel has not cached.  In this case we go fetch from
1567 		 * the hardware and return the data directly to the user.
1568 		 */
1569 		return (__cpuid_insn(cp));
1570 
1571 	cp->cp_eax = xcp->cp_eax;
1572 	cp->cp_ebx = xcp->cp_ebx;
1573 	cp->cp_ecx = xcp->cp_ecx;
1574 	cp->cp_edx = xcp->cp_edx;
1575 	return (cp->cp_eax);
1576 }
1577 
1578 int
1579 cpuid_checkpass(cpu_t *cpu, int pass)
1580 {
1581 	return (cpu != NULL && cpu->cpu_m.mcpu_cpi != NULL &&
1582 	    cpu->cpu_m.mcpu_cpi->cpi_pass >= pass);
1583 }
1584 
1585 int
1586 cpuid_getbrandstr(cpu_t *cpu, char *s, size_t n)
1587 {
1588 	ASSERT(cpuid_checkpass(cpu, 3));
1589 
1590 	return (snprintf(s, n, "%s", cpu->cpu_m.mcpu_cpi->cpi_brandstr));
1591 }
1592 
1593 int
1594 cpuid_is_cmt(cpu_t *cpu)
1595 {
1596 	if (cpu == NULL)
1597 		cpu = CPU;
1598 
1599 	ASSERT(cpuid_checkpass(cpu, 1));
1600 
1601 	return (cpu->cpu_m.mcpu_cpi->cpi_chipid >= 0);
1602 }
1603 
1604 /*
1605  * AMD and Intel both implement the 64-bit variant of the syscall
1606  * instruction (syscallq), so if there's -any- support for syscall,
1607  * cpuid currently says "yes, we support this".
1608  *
1609  * However, Intel decided to -not- implement the 32-bit variant of the
1610  * syscall instruction, so we provide a predicate to allow our caller
1611  * to test that subtlety here.
1612  */
1613 /*ARGSUSED*/
1614 int
1615 cpuid_syscall32_insn(cpu_t *cpu)
1616 {
1617 	ASSERT(cpuid_checkpass((cpu == NULL ? CPU : cpu), 1));
1618 
1619 	if (x86_feature & X86_ASYSC)
1620 		return (x86_vendor != X86_VENDOR_Intel);
1621 	return (0);
1622 }
1623 
1624 int
1625 cpuid_getidstr(cpu_t *cpu, char *s, size_t n)
1626 {
1627 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
1628 
1629 	static const char fmt[] =
1630 	    "x86 (%s family %d model %d step %d clock %d MHz)";
1631 	static const char fmt_ht[] =
1632 	    "x86 (chipid 0x%x %s family %d model %d step %d clock %d MHz)";
1633 
1634 	ASSERT(cpuid_checkpass(cpu, 1));
1635 
1636 	if (cpuid_is_cmt(cpu))
1637 		return (snprintf(s, n, fmt_ht, cpi->cpi_chipid,
1638 		    cpi->cpi_vendorstr, cpi->cpi_family, cpi->cpi_model,
1639 		    cpi->cpi_step, cpu->cpu_type_info.pi_clock));
1640 	return (snprintf(s, n, fmt,
1641 	    cpi->cpi_vendorstr, cpi->cpi_family, cpi->cpi_model,
1642 	    cpi->cpi_step, cpu->cpu_type_info.pi_clock));
1643 }
1644 
1645 const char *
1646 cpuid_getvendorstr(cpu_t *cpu)
1647 {
1648 	ASSERT(cpuid_checkpass(cpu, 1));
1649 	return ((const char *)cpu->cpu_m.mcpu_cpi->cpi_vendorstr);
1650 }
1651 
1652 uint_t
1653 cpuid_getvendor(cpu_t *cpu)
1654 {
1655 	ASSERT(cpuid_checkpass(cpu, 1));
1656 	return (cpu->cpu_m.mcpu_cpi->cpi_vendor);
1657 }
1658 
1659 uint_t
1660 cpuid_getfamily(cpu_t *cpu)
1661 {
1662 	ASSERT(cpuid_checkpass(cpu, 1));
1663 	return (cpu->cpu_m.mcpu_cpi->cpi_family);
1664 }
1665 
1666 uint_t
1667 cpuid_getmodel(cpu_t *cpu)
1668 {
1669 	ASSERT(cpuid_checkpass(cpu, 1));
1670 	return (cpu->cpu_m.mcpu_cpi->cpi_model);
1671 }
1672 
1673 uint_t
1674 cpuid_get_ncpu_per_chip(cpu_t *cpu)
1675 {
1676 	ASSERT(cpuid_checkpass(cpu, 1));
1677 	return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_per_chip);
1678 }
1679 
1680 uint_t
1681 cpuid_get_ncore_per_chip(cpu_t *cpu)
1682 {
1683 	ASSERT(cpuid_checkpass(cpu, 1));
1684 	return (cpu->cpu_m.mcpu_cpi->cpi_ncore_per_chip);
1685 }
1686 
1687 uint_t
1688 cpuid_getstep(cpu_t *cpu)
1689 {
1690 	ASSERT(cpuid_checkpass(cpu, 1));
1691 	return (cpu->cpu_m.mcpu_cpi->cpi_step);
1692 }
1693 
1694 chipid_t
1695 chip_plat_get_chipid(cpu_t *cpu)
1696 {
1697 	ASSERT(cpuid_checkpass(cpu, 1));
1698 
1699 	if (cpuid_is_cmt(cpu))
1700 		return (cpu->cpu_m.mcpu_cpi->cpi_chipid);
1701 	return (cpu->cpu_id);
1702 }
1703 
1704 id_t
1705 chip_plat_get_coreid(cpu_t *cpu)
1706 {
1707 	ASSERT(cpuid_checkpass(cpu, 1));
1708 	return (cpu->cpu_m.mcpu_cpi->cpi_coreid);
1709 }
1710 
1711 int
1712 chip_plat_get_clogid(cpu_t *cpu)
1713 {
1714 	ASSERT(cpuid_checkpass(cpu, 1));
1715 	return (cpu->cpu_m.mcpu_cpi->cpi_clogid);
1716 }
1717 
1718 void
1719 cpuid_get_addrsize(cpu_t *cpu, uint_t *pabits, uint_t *vabits)
1720 {
1721 	struct cpuid_info *cpi;
1722 
1723 	if (cpu == NULL)
1724 		cpu = CPU;
1725 	cpi = cpu->cpu_m.mcpu_cpi;
1726 
1727 	ASSERT(cpuid_checkpass(cpu, 1));
1728 
1729 	if (pabits)
1730 		*pabits = cpi->cpi_pabits;
1731 	if (vabits)
1732 		*vabits = cpi->cpi_vabits;
1733 }
1734 
1735 /*
1736  * Returns the number of data TLB entries for a corresponding
1737  * pagesize.  If it can't be computed, or isn't known, the
1738  * routine returns zero.  If you ask about an architecturally
1739  * impossible pagesize, the routine will panic (so that the
1740  * hat implementor knows that things are inconsistent.)
1741  */
1742 uint_t
1743 cpuid_get_dtlb_nent(cpu_t *cpu, size_t pagesize)
1744 {
1745 	struct cpuid_info *cpi;
1746 	uint_t dtlb_nent = 0;
1747 
1748 	if (cpu == NULL)
1749 		cpu = CPU;
1750 	cpi = cpu->cpu_m.mcpu_cpi;
1751 
1752 	ASSERT(cpuid_checkpass(cpu, 1));
1753 
1754 	/*
1755 	 * Check the L2 TLB info
1756 	 */
1757 	if (cpi->cpi_xmaxeax >= 0x80000006) {
1758 		struct cpuid_regs *cp = &cpi->cpi_extd[6];
1759 
1760 		switch (pagesize) {
1761 
1762 		case 4 * 1024:
1763 			/*
1764 			 * All zero in the top 16 bits of the register
1765 			 * indicates a unified TLB. Size is in low 16 bits.
1766 			 */
1767 			if ((cp->cp_ebx & 0xffff0000) == 0)
1768 				dtlb_nent = cp->cp_ebx & 0x0000ffff;
1769 			else
1770 				dtlb_nent = BITX(cp->cp_ebx, 27, 16);
1771 			break;
1772 
1773 		case 2 * 1024 * 1024:
1774 			if ((cp->cp_eax & 0xffff0000) == 0)
1775 				dtlb_nent = cp->cp_eax & 0x0000ffff;
1776 			else
1777 				dtlb_nent = BITX(cp->cp_eax, 27, 16);
1778 			break;
1779 
1780 		default:
1781 			panic("unknown L2 pagesize");
1782 			/*NOTREACHED*/
1783 		}
1784 	}
1785 
1786 	if (dtlb_nent != 0)
1787 		return (dtlb_nent);
1788 
1789 	/*
1790 	 * No L2 TLB support for this size, try L1.
1791 	 */
1792 	if (cpi->cpi_xmaxeax >= 0x80000005) {
1793 		struct cpuid_regs *cp = &cpi->cpi_extd[5];
1794 
1795 		switch (pagesize) {
1796 		case 4 * 1024:
1797 			dtlb_nent = BITX(cp->cp_ebx, 23, 16);
1798 			break;
1799 		case 2 * 1024 * 1024:
1800 			dtlb_nent = BITX(cp->cp_eax, 23, 16);
1801 			break;
1802 		default:
1803 			panic("unknown L1 d-TLB pagesize");
1804 			/*NOTREACHED*/
1805 		}
1806 	}
1807 
1808 	return (dtlb_nent);
1809 }
1810 
1811 /*
1812  * Return 0 if the erratum is not present or not applicable, positive
1813  * if it is, and negative if the status of the erratum is unknown.
1814  *
1815  * See "Revision Guide for AMD Athlon(tm) 64 and AMD Opteron(tm)
1816  * Processors" #25759, Rev 3.57, August 2005
1817  */
1818 int
1819 cpuid_opteron_erratum(cpu_t *cpu, uint_t erratum)
1820 {
1821 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
1822 	uint_t eax;
1823 
1824 	if (cpi->cpi_vendor != X86_VENDOR_AMD)
1825 		return (0);
1826 
1827 	eax = cpi->cpi_std[1].cp_eax;
1828 
1829 #define	SH_B0(eax)	(eax == 0xf40 || eax == 0xf50)
1830 #define	SH_B3(eax) 	(eax == 0xf51)
1831 #define	B(eax)		(SH_B0(eax) || SH_B3(eax))
1832 
1833 #define	SH_C0(eax)	(eax == 0xf48 || eax == 0xf58)
1834 
1835 #define	SH_CG(eax)	(eax == 0xf4a || eax == 0xf5a || eax == 0xf7a)
1836 #define	DH_CG(eax)	(eax == 0xfc0 || eax == 0xfe0 || eax == 0xff0)
1837 #define	CH_CG(eax)	(eax == 0xf82 || eax == 0xfb2)
1838 #define	CG(eax)		(SH_CG(eax) || DH_CG(eax) || CH_CG(eax))
1839 
1840 #define	SH_D0(eax)	(eax == 0x10f40 || eax == 0x10f50 || eax == 0x10f70)
1841 #define	DH_D0(eax)	(eax == 0x10fc0 || eax == 0x10ff0)
1842 #define	CH_D0(eax)	(eax == 0x10f80 || eax == 0x10fb0)
1843 #define	D0(eax)		(SH_D0(eax) || DH_D0(eax) || CH_D0(eax))
1844 
1845 #define	SH_E0(eax)	(eax == 0x20f50 || eax == 0x20f40 || eax == 0x20f70)
1846 #define	JH_E1(eax)	(eax == 0x20f10)	/* JH8_E0 had 0x20f30 */
1847 #define	DH_E3(eax)	(eax == 0x20fc0 || eax == 0x20ff0)
1848 #define	SH_E4(eax)	(eax == 0x20f51 || eax == 0x20f71)
1849 #define	BH_E4(eax)	(eax == 0x20fb1)
1850 #define	SH_E5(eax)	(eax == 0x20f42)
1851 #define	DH_E6(eax)	(eax == 0x20ff2 || eax == 0x20fc2)
1852 #define	JH_E6(eax)	(eax == 0x20f12 || eax == 0x20f32)
1853 #define	EX(eax)		(SH_E0(eax) || JH_E1(eax) || DH_E3(eax) || \
1854 			    SH_E4(eax) || BH_E4(eax) || SH_E5(eax) || \
1855 			    DH_E6(eax) || JH_E6(eax))
1856 
1857 	switch (erratum) {
1858 	case 1:
1859 		return (1);
1860 	case 51:	/* what does the asterisk mean? */
1861 		return (B(eax) || SH_C0(eax) || CG(eax));
1862 	case 52:
1863 		return (B(eax));
1864 	case 57:
1865 		return (1);
1866 	case 58:
1867 		return (B(eax));
1868 	case 60:
1869 		return (1);
1870 	case 61:
1871 	case 62:
1872 	case 63:
1873 	case 64:
1874 	case 65:
1875 	case 66:
1876 	case 68:
1877 	case 69:
1878 	case 70:
1879 	case 71:
1880 		return (B(eax));
1881 	case 72:
1882 		return (SH_B0(eax));
1883 	case 74:
1884 		return (B(eax));
1885 	case 75:
1886 		return (1);
1887 	case 76:
1888 		return (B(eax));
1889 	case 77:
1890 		return (1);
1891 	case 78:
1892 		return (B(eax) || SH_C0(eax));
1893 	case 79:
1894 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
1895 	case 80:
1896 	case 81:
1897 	case 82:
1898 		return (B(eax));
1899 	case 83:
1900 		return (B(eax) || SH_C0(eax) || CG(eax));
1901 	case 85:
1902 		return (1);
1903 	case 86:
1904 		return (SH_C0(eax) || CG(eax));
1905 	case 88:
1906 #if !defined(__amd64)
1907 		return (0);
1908 #else
1909 		return (B(eax) || SH_C0(eax));
1910 #endif
1911 	case 89:
1912 		return (1);
1913 	case 90:
1914 		return (B(eax) || SH_C0(eax) || CG(eax));
1915 	case 91:
1916 	case 92:
1917 		return (B(eax) || SH_C0(eax));
1918 	case 93:
1919 		return (SH_C0(eax));
1920 	case 94:
1921 		return (B(eax) || SH_C0(eax) || CG(eax));
1922 	case 95:
1923 #if !defined(__amd64)
1924 		return (0);
1925 #else
1926 		return (B(eax) || SH_C0(eax));
1927 #endif
1928 	case 96:
1929 		return (B(eax) || SH_C0(eax) || CG(eax));
1930 	case 97:
1931 	case 98:
1932 		return (SH_C0(eax) || CG(eax));
1933 	case 99:
1934 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
1935 	case 100:
1936 		return (B(eax) || SH_C0(eax));
1937 	case 101:
1938 	case 103:
1939 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
1940 	case 104:
1941 		return (SH_C0(eax) || CG(eax) || D0(eax));
1942 	case 105:
1943 	case 106:
1944 	case 107:
1945 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
1946 	case 108:
1947 		return (DH_CG(eax));
1948 	case 109:
1949 		return (SH_C0(eax) || CG(eax) || D0(eax));
1950 	case 110:
1951 		return (D0(eax) || EX(eax));
1952 	case 111:
1953 		return (CG(eax));
1954 	case 112:
1955 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
1956 	case 113:
1957 		return (eax == 0x20fc0);
1958 	case 114:
1959 		return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax));
1960 	case 115:
1961 		return (SH_E0(eax) || JH_E1(eax));
1962 	case 116:
1963 		return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax));
1964 	case 117:
1965 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
1966 	case 118:
1967 		return (SH_E0(eax) || JH_E1(eax) || SH_E4(eax) || BH_E4(eax) ||
1968 		    JH_E6(eax));
1969 	case 121:
1970 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
1971 	case 122:
1972 		return (SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
1973 	case 123:
1974 		return (JH_E1(eax) || BH_E4(eax) || JH_E6(eax));
1975 	case 131:
1976 		return (1);
1977 	case 6336786:
1978 		/*
1979 		 * Test for AdvPowerMgmtInfo.TscPStateInvariant
1980 		 * if this is a K8 family processor
1981 		 */
1982 		if (CPI_FAMILY(cpi) == 0xf) {
1983 			struct cpuid_regs regs;
1984 			regs.cp_eax = 0x80000007;
1985 			(void) __cpuid_insn(&regs);
1986 			return (!(regs.cp_edx & 0x100));
1987 		}
1988 		return (0);
1989 	case 6323525:
1990 		return (((((eax >> 12) & 0xff00) + (eax & 0xf00)) |
1991 		    (((eax >> 4) & 0xf) | ((eax >> 12) & 0xf0))) < 0xf40);
1992 
1993 	default:
1994 		return (-1);
1995 	}
1996 }
1997 
1998 static const char assoc_str[] = "associativity";
1999 static const char line_str[] = "line-size";
2000 static const char size_str[] = "size";
2001 
2002 static void
2003 add_cache_prop(dev_info_t *devi, const char *label, const char *type,
2004     uint32_t val)
2005 {
2006 	char buf[128];
2007 
2008 	/*
2009 	 * ndi_prop_update_int() is used because it is desirable for
2010 	 * DDI_PROP_HW_DEF and DDI_PROP_DONTSLEEP to be set.
2011 	 */
2012 	if (snprintf(buf, sizeof (buf), "%s-%s", label, type) < sizeof (buf))
2013 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, devi, buf, val);
2014 }
2015 
2016 /*
2017  * Intel-style cache/tlb description
2018  *
2019  * Standard cpuid level 2 gives a randomly ordered
2020  * selection of tags that index into a table that describes
2021  * cache and tlb properties.
2022  */
2023 
2024 static const char l1_icache_str[] = "l1-icache";
2025 static const char l1_dcache_str[] = "l1-dcache";
2026 static const char l2_cache_str[] = "l2-cache";
2027 static const char itlb4k_str[] = "itlb-4K";
2028 static const char dtlb4k_str[] = "dtlb-4K";
2029 static const char itlb4M_str[] = "itlb-4M";
2030 static const char dtlb4M_str[] = "dtlb-4M";
2031 static const char itlb424_str[] = "itlb-4K-2M-4M";
2032 static const char dtlb44_str[] = "dtlb-4K-4M";
2033 static const char sl1_dcache_str[] = "sectored-l1-dcache";
2034 static const char sl2_cache_str[] = "sectored-l2-cache";
2035 static const char itrace_str[] = "itrace-cache";
2036 static const char sl3_cache_str[] = "sectored-l3-cache";
2037 
2038 static const struct cachetab {
2039 	uint8_t 	ct_code;
2040 	uint8_t		ct_assoc;
2041 	uint16_t 	ct_line_size;
2042 	size_t		ct_size;
2043 	const char	*ct_label;
2044 } intel_ctab[] = {
2045 	/* maintain descending order! */
2046 	{ 0xb3, 4, 0, 128, dtlb4k_str },
2047 	{ 0xb0, 4, 0, 128, itlb4k_str },
2048 	{ 0x87, 8, 64, 1024*1024, l2_cache_str},
2049 	{ 0x86, 4, 64, 512*1024, l2_cache_str},
2050 	{ 0x85, 8, 32, 2*1024*1024, l2_cache_str},
2051 	{ 0x84, 8, 32, 1024*1024, l2_cache_str},
2052 	{ 0x83, 8, 32, 512*1024, l2_cache_str},
2053 	{ 0x82, 8, 32, 256*1024, l2_cache_str},
2054 	{ 0x81, 8, 32, 128*1024, l2_cache_str},		/* suspect! */
2055 	{ 0x7f, 2, 64, 512*1024, l2_cache_str},
2056 	{ 0x7d, 8, 64, 2*1024*1024, sl2_cache_str},
2057 	{ 0x7c, 8, 64, 1024*1024, sl2_cache_str},
2058 	{ 0x7b, 8, 64, 512*1024, sl2_cache_str},
2059 	{ 0x7a, 8, 64, 256*1024, sl2_cache_str},
2060 	{ 0x79, 8, 64, 128*1024, sl2_cache_str},
2061 	{ 0x78, 8, 64, 1024*1024, l2_cache_str},
2062 	{ 0x72, 8, 0, 32*1024, itrace_str},
2063 	{ 0x71, 8, 0, 16*1024, itrace_str},
2064 	{ 0x70, 8, 0, 12*1024, itrace_str},
2065 	{ 0x68, 4, 64, 32*1024, sl1_dcache_str},
2066 	{ 0x67, 4, 64, 16*1024, sl1_dcache_str},
2067 	{ 0x66, 4, 64, 8*1024, sl1_dcache_str},
2068 	{ 0x60, 8, 64, 16*1024, sl1_dcache_str},
2069 	{ 0x5d, 0, 0, 256, dtlb44_str},
2070 	{ 0x5c, 0, 0, 128, dtlb44_str},
2071 	{ 0x5b, 0, 0, 64, dtlb44_str},
2072 	{ 0x52, 0, 0, 256, itlb424_str},
2073 	{ 0x51, 0, 0, 128, itlb424_str},
2074 	{ 0x50, 0, 0, 64, itlb424_str},
2075 	{ 0x45, 4, 32, 2*1024*1024, l2_cache_str},
2076 	{ 0x44, 4, 32, 1024*1024, l2_cache_str},
2077 	{ 0x43, 4, 32, 512*1024, l2_cache_str},
2078 	{ 0x42, 4, 32, 256*1024, l2_cache_str},
2079 	{ 0x41, 4, 32, 128*1024, l2_cache_str},
2080 	{ 0x3c, 4, 64, 256*1024, sl2_cache_str},
2081 	{ 0x3b, 2, 64, 128*1024, sl2_cache_str},
2082 	{ 0x39, 4, 64, 128*1024, sl2_cache_str},
2083 	{ 0x30, 8, 64, 32*1024, l1_icache_str},
2084 	{ 0x2c, 8, 64, 32*1024, l1_dcache_str},
2085 	{ 0x29, 8, 64, 4096*1024, sl3_cache_str},
2086 	{ 0x25, 8, 64, 2048*1024, sl3_cache_str},
2087 	{ 0x23, 8, 64, 1024*1024, sl3_cache_str},
2088 	{ 0x22, 4, 64, 512*1024, sl3_cache_str},
2089 	{ 0x0c, 4, 32, 16*1024, l1_dcache_str},
2090 	{ 0x0a, 2, 32, 8*1024, l1_dcache_str},
2091 	{ 0x08, 4, 32, 16*1024, l1_icache_str},
2092 	{ 0x06, 4, 32, 8*1024, l1_icache_str},
2093 	{ 0x04, 4, 0, 8, dtlb4M_str},
2094 	{ 0x03, 4, 0, 64, dtlb4k_str},
2095 	{ 0x02, 4, 0, 2, itlb4M_str},
2096 	{ 0x01, 4, 0, 32, itlb4k_str},
2097 	{ 0 }
2098 };
2099 
2100 static const struct cachetab cyrix_ctab[] = {
2101 	{ 0x70, 4, 0, 32, "tlb-4K" },
2102 	{ 0x80, 4, 16, 16*1024, "l1-cache" },
2103 	{ 0 }
2104 };
2105 
2106 /*
2107  * Search a cache table for a matching entry
2108  */
2109 static const struct cachetab *
2110 find_cacheent(const struct cachetab *ct, uint_t code)
2111 {
2112 	if (code != 0) {
2113 		for (; ct->ct_code != 0; ct++)
2114 			if (ct->ct_code <= code)
2115 				break;
2116 		if (ct->ct_code == code)
2117 			return (ct);
2118 	}
2119 	return (NULL);
2120 }
2121 
2122 /*
2123  * Walk the cacheinfo descriptor, applying 'func' to every valid element
2124  * The walk is terminated if the walker returns non-zero.
2125  */
2126 static void
2127 intel_walk_cacheinfo(struct cpuid_info *cpi,
2128     void *arg, int (*func)(void *, const struct cachetab *))
2129 {
2130 	const struct cachetab *ct;
2131 	uint8_t *dp;
2132 	int i;
2133 
2134 	if ((dp = cpi->cpi_cacheinfo) == NULL)
2135 		return;
2136 	for (i = 0; i < cpi->cpi_ncache; i++, dp++)
2137 		if ((ct = find_cacheent(intel_ctab, *dp)) != NULL) {
2138 			if (func(arg, ct) != 0)
2139 				break;
2140 		}
2141 }
2142 
2143 /*
2144  * (Like the Intel one, except for Cyrix CPUs)
2145  */
2146 static void
2147 cyrix_walk_cacheinfo(struct cpuid_info *cpi,
2148     void *arg, int (*func)(void *, const struct cachetab *))
2149 {
2150 	const struct cachetab *ct;
2151 	uint8_t *dp;
2152 	int i;
2153 
2154 	if ((dp = cpi->cpi_cacheinfo) == NULL)
2155 		return;
2156 	for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
2157 		/*
2158 		 * Search Cyrix-specific descriptor table first ..
2159 		 */
2160 		if ((ct = find_cacheent(cyrix_ctab, *dp)) != NULL) {
2161 			if (func(arg, ct) != 0)
2162 				break;
2163 			continue;
2164 		}
2165 		/*
2166 		 * .. else fall back to the Intel one
2167 		 */
2168 		if ((ct = find_cacheent(intel_ctab, *dp)) != NULL) {
2169 			if (func(arg, ct) != 0)
2170 				break;
2171 			continue;
2172 		}
2173 	}
2174 }
2175 
2176 /*
2177  * A cacheinfo walker that adds associativity, line-size, and size properties
2178  * to the devinfo node it is passed as an argument.
2179  */
2180 static int
2181 add_cacheent_props(void *arg, const struct cachetab *ct)
2182 {
2183 	dev_info_t *devi = arg;
2184 
2185 	add_cache_prop(devi, ct->ct_label, assoc_str, ct->ct_assoc);
2186 	if (ct->ct_line_size != 0)
2187 		add_cache_prop(devi, ct->ct_label, line_str,
2188 		    ct->ct_line_size);
2189 	add_cache_prop(devi, ct->ct_label, size_str, ct->ct_size);
2190 	return (0);
2191 }
2192 
2193 static const char fully_assoc[] = "fully-associative?";
2194 
2195 /*
2196  * AMD style cache/tlb description
2197  *
2198  * Extended functions 5 and 6 directly describe properties of
2199  * tlbs and various cache levels.
2200  */
2201 static void
2202 add_amd_assoc(dev_info_t *devi, const char *label, uint_t assoc)
2203 {
2204 	switch (assoc) {
2205 	case 0:	/* reserved; ignore */
2206 		break;
2207 	default:
2208 		add_cache_prop(devi, label, assoc_str, assoc);
2209 		break;
2210 	case 0xff:
2211 		add_cache_prop(devi, label, fully_assoc, 1);
2212 		break;
2213 	}
2214 }
2215 
2216 static void
2217 add_amd_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size)
2218 {
2219 	if (size == 0)
2220 		return;
2221 	add_cache_prop(devi, label, size_str, size);
2222 	add_amd_assoc(devi, label, assoc);
2223 }
2224 
2225 static void
2226 add_amd_cache(dev_info_t *devi, const char *label,
2227     uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size)
2228 {
2229 	if (size == 0 || line_size == 0)
2230 		return;
2231 	add_amd_assoc(devi, label, assoc);
2232 	/*
2233 	 * Most AMD parts have a sectored cache. Multiple cache lines are
2234 	 * associated with each tag. A sector consists of all cache lines
2235 	 * associated with a tag. For example, the AMD K6-III has a sector
2236 	 * size of 2 cache lines per tag.
2237 	 */
2238 	if (lines_per_tag != 0)
2239 		add_cache_prop(devi, label, "lines-per-tag", lines_per_tag);
2240 	add_cache_prop(devi, label, line_str, line_size);
2241 	add_cache_prop(devi, label, size_str, size * 1024);
2242 }
2243 
2244 static void
2245 add_amd_l2_assoc(dev_info_t *devi, const char *label, uint_t assoc)
2246 {
2247 	switch (assoc) {
2248 	case 0:	/* off */
2249 		break;
2250 	case 1:
2251 	case 2:
2252 	case 4:
2253 		add_cache_prop(devi, label, assoc_str, assoc);
2254 		break;
2255 	case 6:
2256 		add_cache_prop(devi, label, assoc_str, 8);
2257 		break;
2258 	case 8:
2259 		add_cache_prop(devi, label, assoc_str, 16);
2260 		break;
2261 	case 0xf:
2262 		add_cache_prop(devi, label, fully_assoc, 1);
2263 		break;
2264 	default: /* reserved; ignore */
2265 		break;
2266 	}
2267 }
2268 
2269 static void
2270 add_amd_l2_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size)
2271 {
2272 	if (size == 0 || assoc == 0)
2273 		return;
2274 	add_amd_l2_assoc(devi, label, assoc);
2275 	add_cache_prop(devi, label, size_str, size);
2276 }
2277 
2278 static void
2279 add_amd_l2_cache(dev_info_t *devi, const char *label,
2280     uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size)
2281 {
2282 	if (size == 0 || assoc == 0 || line_size == 0)
2283 		return;
2284 	add_amd_l2_assoc(devi, label, assoc);
2285 	if (lines_per_tag != 0)
2286 		add_cache_prop(devi, label, "lines-per-tag", lines_per_tag);
2287 	add_cache_prop(devi, label, line_str, line_size);
2288 	add_cache_prop(devi, label, size_str, size * 1024);
2289 }
2290 
2291 static void
2292 amd_cache_info(struct cpuid_info *cpi, dev_info_t *devi)
2293 {
2294 	struct cpuid_regs *cp;
2295 
2296 	if (cpi->cpi_xmaxeax < 0x80000005)
2297 		return;
2298 	cp = &cpi->cpi_extd[5];
2299 
2300 	/*
2301 	 * 4M/2M L1 TLB configuration
2302 	 *
2303 	 * We report the size for 2M pages because AMD uses two
2304 	 * TLB entries for one 4M page.
2305 	 */
2306 	add_amd_tlb(devi, "dtlb-2M",
2307 	    BITX(cp->cp_eax, 31, 24), BITX(cp->cp_eax, 23, 16));
2308 	add_amd_tlb(devi, "itlb-2M",
2309 	    BITX(cp->cp_eax, 15, 8), BITX(cp->cp_eax, 7, 0));
2310 
2311 	/*
2312 	 * 4K L1 TLB configuration
2313 	 */
2314 
2315 	switch (cpi->cpi_vendor) {
2316 		uint_t nentries;
2317 	case X86_VENDOR_TM:
2318 		if (cpi->cpi_family >= 5) {
2319 			/*
2320 			 * Crusoe processors have 256 TLB entries, but
2321 			 * cpuid data format constrains them to only
2322 			 * reporting 255 of them.
2323 			 */
2324 			if ((nentries = BITX(cp->cp_ebx, 23, 16)) == 255)
2325 				nentries = 256;
2326 			/*
2327 			 * Crusoe processors also have a unified TLB
2328 			 */
2329 			add_amd_tlb(devi, "tlb-4K", BITX(cp->cp_ebx, 31, 24),
2330 			    nentries);
2331 			break;
2332 		}
2333 		/*FALLTHROUGH*/
2334 	default:
2335 		add_amd_tlb(devi, itlb4k_str,
2336 		    BITX(cp->cp_ebx, 31, 24), BITX(cp->cp_ebx, 23, 16));
2337 		add_amd_tlb(devi, dtlb4k_str,
2338 		    BITX(cp->cp_ebx, 15, 8), BITX(cp->cp_ebx, 7, 0));
2339 		break;
2340 	}
2341 
2342 	/*
2343 	 * data L1 cache configuration
2344 	 */
2345 
2346 	add_amd_cache(devi, l1_dcache_str,
2347 	    BITX(cp->cp_ecx, 31, 24), BITX(cp->cp_ecx, 23, 16),
2348 	    BITX(cp->cp_ecx, 15, 8), BITX(cp->cp_ecx, 7, 0));
2349 
2350 	/*
2351 	 * code L1 cache configuration
2352 	 */
2353 
2354 	add_amd_cache(devi, l1_icache_str,
2355 	    BITX(cp->cp_edx, 31, 24), BITX(cp->cp_edx, 23, 16),
2356 	    BITX(cp->cp_edx, 15, 8), BITX(cp->cp_edx, 7, 0));
2357 
2358 	if (cpi->cpi_xmaxeax < 0x80000006)
2359 		return;
2360 	cp = &cpi->cpi_extd[6];
2361 
2362 	/* Check for a unified L2 TLB for large pages */
2363 
2364 	if (BITX(cp->cp_eax, 31, 16) == 0)
2365 		add_amd_l2_tlb(devi, "l2-tlb-2M",
2366 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
2367 	else {
2368 		add_amd_l2_tlb(devi, "l2-dtlb-2M",
2369 		    BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16));
2370 		add_amd_l2_tlb(devi, "l2-itlb-2M",
2371 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
2372 	}
2373 
2374 	/* Check for a unified L2 TLB for 4K pages */
2375 
2376 	if (BITX(cp->cp_ebx, 31, 16) == 0) {
2377 		add_amd_l2_tlb(devi, "l2-tlb-4K",
2378 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
2379 	} else {
2380 		add_amd_l2_tlb(devi, "l2-dtlb-4K",
2381 		    BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16));
2382 		add_amd_l2_tlb(devi, "l2-itlb-4K",
2383 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
2384 	}
2385 
2386 	add_amd_l2_cache(devi, l2_cache_str,
2387 	    BITX(cp->cp_ecx, 31, 16), BITX(cp->cp_ecx, 15, 12),
2388 	    BITX(cp->cp_ecx, 11, 8), BITX(cp->cp_ecx, 7, 0));
2389 }
2390 
2391 /*
2392  * There are two basic ways that the x86 world describes it cache
2393  * and tlb architecture - Intel's way and AMD's way.
2394  *
2395  * Return which flavor of cache architecture we should use
2396  */
2397 static int
2398 x86_which_cacheinfo(struct cpuid_info *cpi)
2399 {
2400 	switch (cpi->cpi_vendor) {
2401 	case X86_VENDOR_Intel:
2402 		if (cpi->cpi_maxeax >= 2)
2403 			return (X86_VENDOR_Intel);
2404 		break;
2405 	case X86_VENDOR_AMD:
2406 		/*
2407 		 * The K5 model 1 was the first part from AMD that reported
2408 		 * cache sizes via extended cpuid functions.
2409 		 */
2410 		if (cpi->cpi_family > 5 ||
2411 		    (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
2412 			return (X86_VENDOR_AMD);
2413 		break;
2414 	case X86_VENDOR_TM:
2415 		if (cpi->cpi_family >= 5)
2416 			return (X86_VENDOR_AMD);
2417 		/*FALLTHROUGH*/
2418 	default:
2419 		/*
2420 		 * If they have extended CPU data for 0x80000005
2421 		 * then we assume they have AMD-format cache
2422 		 * information.
2423 		 *
2424 		 * If not, and the vendor happens to be Cyrix,
2425 		 * then try our-Cyrix specific handler.
2426 		 *
2427 		 * If we're not Cyrix, then assume we're using Intel's
2428 		 * table-driven format instead.
2429 		 */
2430 		if (cpi->cpi_xmaxeax >= 0x80000005)
2431 			return (X86_VENDOR_AMD);
2432 		else if (cpi->cpi_vendor == X86_VENDOR_Cyrix)
2433 			return (X86_VENDOR_Cyrix);
2434 		else if (cpi->cpi_maxeax >= 2)
2435 			return (X86_VENDOR_Intel);
2436 		break;
2437 	}
2438 	return (-1);
2439 }
2440 
2441 /*
2442  * create a node for the given cpu under the prom root node.
2443  * Also, create a cpu node in the device tree.
2444  */
2445 static dev_info_t *cpu_nex_devi = NULL;
2446 static kmutex_t cpu_node_lock;
2447 
2448 /*
2449  * Called from post_startup() and mp_startup()
2450  */
2451 void
2452 add_cpunode2devtree(processorid_t cpu_id, struct cpuid_info *cpi)
2453 {
2454 	dev_info_t *cpu_devi;
2455 	int create;
2456 
2457 	mutex_enter(&cpu_node_lock);
2458 
2459 	/*
2460 	 * create a nexus node for all cpus identified as 'cpu_id' under
2461 	 * the root node.
2462 	 */
2463 	if (cpu_nex_devi == NULL) {
2464 		if (ndi_devi_alloc(ddi_root_node(), "cpus",
2465 		    (pnode_t)DEVI_SID_NODEID, &cpu_nex_devi) != NDI_SUCCESS) {
2466 			mutex_exit(&cpu_node_lock);
2467 			return;
2468 		}
2469 		(void) ndi_devi_online(cpu_nex_devi, 0);
2470 	}
2471 
2472 	/*
2473 	 * create a child node for cpu identified as 'cpu_id'
2474 	 */
2475 	cpu_devi = ddi_add_child(cpu_nex_devi, "cpu", DEVI_SID_NODEID,
2476 		cpu_id);
2477 	if (cpu_devi == NULL) {
2478 		mutex_exit(&cpu_node_lock);
2479 		return;
2480 	}
2481 
2482 	/* device_type */
2483 
2484 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
2485 	    "device_type", "cpu");
2486 
2487 	/* reg */
2488 
2489 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
2490 	    "reg", cpu_id);
2491 
2492 	/* cpu-mhz, and clock-frequency */
2493 
2494 	if (cpu_freq > 0) {
2495 		long long mul;
2496 
2497 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
2498 		    "cpu-mhz", cpu_freq);
2499 
2500 		if ((mul = cpu_freq * 1000000LL) <= INT_MAX)
2501 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
2502 			    "clock-frequency", (int)mul);
2503 	}
2504 
2505 	(void) ndi_devi_online(cpu_devi, 0);
2506 
2507 	if ((x86_feature & X86_CPUID) == 0) {
2508 		mutex_exit(&cpu_node_lock);
2509 		return;
2510 	}
2511 
2512 	/* vendor-id */
2513 
2514 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
2515 		"vendor-id", cpi->cpi_vendorstr);
2516 
2517 	if (cpi->cpi_maxeax == 0) {
2518 		mutex_exit(&cpu_node_lock);
2519 		return;
2520 	}
2521 
2522 	/*
2523 	 * family, model, and step
2524 	 */
2525 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
2526 		"family", CPI_FAMILY(cpi));
2527 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
2528 		"cpu-model", CPI_MODEL(cpi));
2529 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
2530 		"stepping-id", CPI_STEP(cpi));
2531 
2532 	/* type */
2533 
2534 	switch (cpi->cpi_vendor) {
2535 	case X86_VENDOR_Intel:
2536 		create = 1;
2537 		break;
2538 	default:
2539 		create = 0;
2540 		break;
2541 	}
2542 	if (create)
2543 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
2544 			"type", CPI_TYPE(cpi));
2545 
2546 	/* ext-family */
2547 
2548 	switch (cpi->cpi_vendor) {
2549 	case X86_VENDOR_Intel:
2550 	case X86_VENDOR_AMD:
2551 		create = cpi->cpi_family >= 0xf;
2552 		break;
2553 	default:
2554 		create = 0;
2555 		break;
2556 	}
2557 	if (create)
2558 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
2559 		    "ext-family", CPI_FAMILY_XTD(cpi));
2560 
2561 	/* ext-model */
2562 
2563 	switch (cpi->cpi_vendor) {
2564 	case X86_VENDOR_Intel:
2565 	case X86_VENDOR_AMD:
2566 		create = CPI_FAMILY(cpi) == 0xf;
2567 		break;
2568 	default:
2569 		create = 0;
2570 		break;
2571 	}
2572 	if (create)
2573 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
2574 			"ext-model", CPI_MODEL_XTD(cpi));
2575 
2576 	/* generation */
2577 
2578 	switch (cpi->cpi_vendor) {
2579 	case X86_VENDOR_AMD:
2580 		/*
2581 		 * AMD K5 model 1 was the first part to support this
2582 		 */
2583 		create = cpi->cpi_xmaxeax >= 0x80000001;
2584 		break;
2585 	default:
2586 		create = 0;
2587 		break;
2588 	}
2589 	if (create)
2590 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
2591 		    "generation", BITX((cpi)->cpi_extd[1].cp_eax, 11, 8));
2592 
2593 	/* brand-id */
2594 
2595 	switch (cpi->cpi_vendor) {
2596 	case X86_VENDOR_Intel:
2597 		/*
2598 		 * brand id first appeared on Pentium III Xeon model 8,
2599 		 * and Celeron model 8 processors and Opteron
2600 		 */
2601 		create = cpi->cpi_family > 6 ||
2602 		    (cpi->cpi_family == 6 && cpi->cpi_model >= 8);
2603 		break;
2604 	case X86_VENDOR_AMD:
2605 		create = cpi->cpi_family >= 0xf;
2606 		break;
2607 	default:
2608 		create = 0;
2609 		break;
2610 	}
2611 	if (create && cpi->cpi_brandid != 0) {
2612 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
2613 		    "brand-id", cpi->cpi_brandid);
2614 	}
2615 
2616 	/* chunks, and apic-id */
2617 
2618 	switch (cpi->cpi_vendor) {
2619 	case X86_VENDOR_Intel:
2620 	case X86_VENDOR_AMD:
2621 		/*
2622 		 * first available on Pentium IV and Opteron (K8)
2623 		 */
2624 		create = cpi->cpi_family >= 0xf;
2625 		break;
2626 	default:
2627 		create = 0;
2628 		break;
2629 	}
2630 	if (create) {
2631 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
2632 			"chunks", CPI_CHUNKS(cpi));
2633 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
2634 			"apic-id", CPI_APIC_ID(cpi));
2635 		if (cpi->cpi_chipid >= 0) {
2636 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
2637 			    "chip#", cpi->cpi_chipid);
2638 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
2639 			    "clog#", cpi->cpi_clogid);
2640 		}
2641 	}
2642 
2643 	/* cpuid-features */
2644 
2645 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
2646 	    "cpuid-features", CPI_FEATURES_EDX(cpi));
2647 
2648 
2649 	/* cpuid-features-ecx */
2650 
2651 	switch (cpi->cpi_vendor) {
2652 	case X86_VENDOR_Intel:
2653 		create = cpi->cpi_family >= 0xf;
2654 		break;
2655 	default:
2656 		create = 0;
2657 		break;
2658 	}
2659 	if (create)
2660 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
2661 		    "cpuid-features-ecx", CPI_FEATURES_ECX(cpi));
2662 
2663 	/* ext-cpuid-features */
2664 
2665 	switch (cpi->cpi_vendor) {
2666 	case X86_VENDOR_AMD:
2667 	case X86_VENDOR_Cyrix:
2668 	case X86_VENDOR_TM:
2669 	case X86_VENDOR_Centaur:
2670 		/*
2671 		 * The extended cpuid features are not relevant on
2672 		 * Intel but are available from the AMD K5 model 1
2673 		 * and most Cyrix GXm and later.
2674 		 */
2675 		create = cpi->cpi_xmaxeax >= 0x80000001;
2676 		break;
2677 	default:
2678 		create = 0;
2679 		break;
2680 	}
2681 	if (create)
2682 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
2683 			"ext-cpuid-features", CPI_FEATURES_XTD_EDX(cpi));
2684 
2685 	/*
2686 	 * Brand String first appeared in Intel Pentium IV, AMD K5
2687 	 * model 1, and Cyrix GXm.  On earlier models we try and
2688 	 * simulate something similar .. so this string should always
2689 	 * same -something- about the processor, however lame.
2690 	 */
2691 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
2692 	    "brand-string", cpi->cpi_brandstr);
2693 
2694 	/*
2695 	 * Finally, cache and tlb information
2696 	 */
2697 	switch (x86_which_cacheinfo(cpi)) {
2698 	case X86_VENDOR_Intel:
2699 		intel_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
2700 		break;
2701 	case X86_VENDOR_Cyrix:
2702 		cyrix_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
2703 		break;
2704 	case X86_VENDOR_AMD:
2705 		amd_cache_info(cpi, cpu_devi);
2706 		break;
2707 	default:
2708 		break;
2709 	}
2710 
2711 	mutex_exit(&cpu_node_lock);
2712 }
2713 
2714 struct l2info {
2715 	int *l2i_csz;
2716 	int *l2i_lsz;
2717 	int *l2i_assoc;
2718 	int l2i_ret;
2719 };
2720 
2721 /*
2722  * A cacheinfo walker that fetches the size, line-size and associativity
2723  * of the L2 cache
2724  */
2725 static int
2726 intel_l2cinfo(void *arg, const struct cachetab *ct)
2727 {
2728 	struct l2info *l2i = arg;
2729 	int *ip;
2730 
2731 	if (ct->ct_label != l2_cache_str &&
2732 	    ct->ct_label != sl2_cache_str)
2733 		return (0);	/* not an L2 -- keep walking */
2734 
2735 	if ((ip = l2i->l2i_csz) != NULL)
2736 		*ip = ct->ct_size;
2737 	if ((ip = l2i->l2i_lsz) != NULL)
2738 		*ip = ct->ct_line_size;
2739 	if ((ip = l2i->l2i_assoc) != NULL)
2740 		*ip = ct->ct_assoc;
2741 	l2i->l2i_ret = ct->ct_size;
2742 	return (1);		/* was an L2 -- terminate walk */
2743 }
2744 
2745 static void
2746 amd_l2cacheinfo(struct cpuid_info *cpi, struct l2info *l2i)
2747 {
2748 	struct cpuid_regs *cp;
2749 	uint_t size, assoc;
2750 	int *ip;
2751 
2752 	if (cpi->cpi_xmaxeax < 0x80000006)
2753 		return;
2754 	cp = &cpi->cpi_extd[6];
2755 
2756 	if ((assoc = BITX(cp->cp_ecx, 15, 12)) != 0 &&
2757 	    (size = BITX(cp->cp_ecx, 31, 16)) != 0) {
2758 		uint_t cachesz = size * 1024;
2759 
2760 
2761 		if ((ip = l2i->l2i_csz) != NULL)
2762 			*ip = cachesz;
2763 		if ((ip = l2i->l2i_lsz) != NULL)
2764 			*ip = BITX(cp->cp_ecx, 7, 0);
2765 		if ((ip = l2i->l2i_assoc) != NULL)
2766 			*ip = assoc;
2767 		l2i->l2i_ret = cachesz;
2768 	}
2769 }
2770 
2771 int
2772 getl2cacheinfo(cpu_t *cpu, int *csz, int *lsz, int *assoc)
2773 {
2774 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2775 	struct l2info __l2info, *l2i = &__l2info;
2776 
2777 	l2i->l2i_csz = csz;
2778 	l2i->l2i_lsz = lsz;
2779 	l2i->l2i_assoc = assoc;
2780 	l2i->l2i_ret = -1;
2781 
2782 	switch (x86_which_cacheinfo(cpi)) {
2783 	case X86_VENDOR_Intel:
2784 		intel_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
2785 		break;
2786 	case X86_VENDOR_Cyrix:
2787 		cyrix_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
2788 		break;
2789 	case X86_VENDOR_AMD:
2790 		amd_l2cacheinfo(cpi, l2i);
2791 		break;
2792 	default:
2793 		break;
2794 	}
2795 	return (l2i->l2i_ret);
2796 }
2797