xref: /titanic_41/usr/src/uts/i86pc/os/cpuid.c (revision e5351341b58845eee9d722bd71543d5a7c26b6cc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011 by Delphix. All rights reserved.
24  */
25 /*
26  * Copyright (c) 2010, Intel Corporation.
27  * All rights reserved.
28  */
29 /*
30  * Portions Copyright 2009 Advanced Micro Devices, Inc.
31  */
32 /*
33  * Copyright (c) 2011, Joyent, Inc. All rights reserved.
34  */
35 /*
36  * Various routines to handle identification
37  * and classification of x86 processors.
38  */
39 
40 #include <sys/types.h>
41 #include <sys/archsystm.h>
42 #include <sys/x86_archext.h>
43 #include <sys/kmem.h>
44 #include <sys/systm.h>
45 #include <sys/cmn_err.h>
46 #include <sys/sunddi.h>
47 #include <sys/sunndi.h>
48 #include <sys/cpuvar.h>
49 #include <sys/processor.h>
50 #include <sys/sysmacros.h>
51 #include <sys/pg.h>
52 #include <sys/fp.h>
53 #include <sys/controlregs.h>
54 #include <sys/bitmap.h>
55 #include <sys/auxv_386.h>
56 #include <sys/memnode.h>
57 #include <sys/pci_cfgspace.h>
58 
59 #ifdef __xpv
60 #include <sys/hypervisor.h>
61 #else
62 #include <sys/ontrap.h>
63 #endif
64 
65 /*
66  * Pass 0 of cpuid feature analysis happens in locore. It contains special code
67  * to recognize Cyrix processors that are not cpuid-compliant, and to deal with
68  * them accordingly. For most modern processors, feature detection occurs here
69  * in pass 1.
70  *
71  * Pass 1 of cpuid feature analysis happens just at the beginning of mlsetup()
72  * for the boot CPU and does the basic analysis that the early kernel needs.
73  * x86_featureset is set based on the return value of cpuid_pass1() of the boot
74  * CPU.
75  *
76  * Pass 1 includes:
77  *
78  *	o Determining vendor/model/family/stepping and setting x86_type and
79  *	  x86_vendor accordingly.
80  *	o Processing the feature flags returned by the cpuid instruction while
81  *	  applying any workarounds or tricks for the specific processor.
82  *	o Mapping the feature flags into Solaris feature bits (X86_*).
83  *	o Processing extended feature flags if supported by the processor,
84  *	  again while applying specific processor knowledge.
85  *	o Determining the CMT characteristics of the system.
86  *
87  * Pass 1 is done on non-boot CPUs during their initialization and the results
88  * are used only as a meager attempt at ensuring that all processors within the
89  * system support the same features.
90  *
91  * Pass 2 of cpuid feature analysis happens just at the beginning
92  * of startup().  It just copies in and corrects the remainder
93  * of the cpuid data we depend on: standard cpuid functions that we didn't
94  * need for pass1 feature analysis, and extended cpuid functions beyond the
95  * simple feature processing done in pass1.
96  *
97  * Pass 3 of cpuid analysis is invoked after basic kernel services; in
98  * particular kernel memory allocation has been made available. It creates a
99  * readable brand string based on the data collected in the first two passes.
100  *
101  * Pass 4 of cpuid analysis is invoked after post_startup() when all
102  * the support infrastructure for various hardware features has been
103  * initialized. It determines which processor features will be reported
104  * to userland via the aux vector.
105  *
106  * All passes are executed on all CPUs, but only the boot CPU determines what
107  * features the kernel will use.
108  *
109  * Much of the worst junk in this file is for the support of processors
110  * that didn't really implement the cpuid instruction properly.
111  *
112  * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon,
113  * the pass numbers.  Accordingly, changes to the pass code may require changes
114  * to the accessor code.
115  */
116 
117 uint_t x86_vendor = X86_VENDOR_IntelClone;
118 uint_t x86_type = X86_TYPE_OTHER;
119 uint_t x86_clflush_size = 0;
120 
121 uint_t pentiumpro_bug4046376;
122 uint_t pentiumpro_bug4064495;
123 
124 uchar_t x86_featureset[BT_SIZEOFMAP(NUM_X86_FEATURES)];
125 
126 static char *x86_feature_names[NUM_X86_FEATURES] = {
127 	"lgpg",
128 	"tsc",
129 	"msr",
130 	"mtrr",
131 	"pge",
132 	"de",
133 	"cmov",
134 	"mmx",
135 	"mca",
136 	"pae",
137 	"cv8",
138 	"pat",
139 	"sep",
140 	"sse",
141 	"sse2",
142 	"htt",
143 	"asysc",
144 	"nx",
145 	"sse3",
146 	"cx16",
147 	"cmp",
148 	"tscp",
149 	"mwait",
150 	"sse4a",
151 	"cpuid",
152 	"ssse3",
153 	"sse4_1",
154 	"sse4_2",
155 	"1gpg",
156 	"clfsh",
157 	"64",
158 	"aes",
159 	"pclmulqdq",
160 	"xsave",
161 	"avx",
162 	"vmx",
163 	"svm"
164 };
165 
166 boolean_t
167 is_x86_feature(void *featureset, uint_t feature)
168 {
169 	ASSERT(feature < NUM_X86_FEATURES);
170 	return (BT_TEST((ulong_t *)featureset, feature));
171 }
172 
173 void
174 add_x86_feature(void *featureset, uint_t feature)
175 {
176 	ASSERT(feature < NUM_X86_FEATURES);
177 	BT_SET((ulong_t *)featureset, feature);
178 }
179 
180 void
181 remove_x86_feature(void *featureset, uint_t feature)
182 {
183 	ASSERT(feature < NUM_X86_FEATURES);
184 	BT_CLEAR((ulong_t *)featureset, feature);
185 }
186 
187 boolean_t
188 compare_x86_featureset(void *setA, void *setB)
189 {
190 	/*
191 	 * We assume that the unused bits of the bitmap are always zero.
192 	 */
193 	if (memcmp(setA, setB, BT_SIZEOFMAP(NUM_X86_FEATURES)) == 0) {
194 		return (B_TRUE);
195 	} else {
196 		return (B_FALSE);
197 	}
198 }
199 
200 void
201 print_x86_featureset(void *featureset)
202 {
203 	uint_t i;
204 
205 	for (i = 0; i < NUM_X86_FEATURES; i++) {
206 		if (is_x86_feature(featureset, i)) {
207 			cmn_err(CE_CONT, "?x86_feature: %s\n",
208 			    x86_feature_names[i]);
209 		}
210 	}
211 }
212 
213 uint_t enable486;
214 
215 static size_t xsave_state_size = 0;
216 uint64_t xsave_bv_all = (XFEATURE_LEGACY_FP | XFEATURE_SSE);
217 boolean_t xsave_force_disable = B_FALSE;
218 
219 /*
220  * This is set to platform type Solaris is running on.
221  */
222 static int platform_type = -1;
223 
224 #if !defined(__xpv)
225 /*
226  * Variable to patch if hypervisor platform detection needs to be
227  * disabled (e.g. platform_type will always be HW_NATIVE if this is 0).
228  */
229 int enable_platform_detection = 1;
230 #endif
231 
232 /*
233  * monitor/mwait info.
234  *
235  * size_actual and buf_actual are the real address and size allocated to get
236  * proper mwait_buf alignement.  buf_actual and size_actual should be passed
237  * to kmem_free().  Currently kmem_alloc() and mwait happen to both use
238  * processor cache-line alignment, but this is not guarantied in the furture.
239  */
240 struct mwait_info {
241 	size_t		mon_min;	/* min size to avoid missed wakeups */
242 	size_t		mon_max;	/* size to avoid false wakeups */
243 	size_t		size_actual;	/* size actually allocated */
244 	void		*buf_actual;	/* memory actually allocated */
245 	uint32_t	support;	/* processor support of monitor/mwait */
246 };
247 
248 /*
249  * xsave/xrestor info.
250  *
251  * This structure contains HW feature bits and size of the xsave save area.
252  * Note: the kernel will use the maximum size required for all hardware
253  * features. It is not optimize for potential memory savings if features at
254  * the end of the save area are not enabled.
255  */
256 struct xsave_info {
257 	uint32_t	xsav_hw_features_low;   /* Supported HW features */
258 	uint32_t	xsav_hw_features_high;  /* Supported HW features */
259 	size_t		xsav_max_size;  /* max size save area for HW features */
260 	size_t		ymm_size;	/* AVX: size of ymm save area */
261 	size_t		ymm_offset;	/* AVX: offset for ymm save area */
262 };
263 
264 
265 /*
266  * These constants determine how many of the elements of the
267  * cpuid we cache in the cpuid_info data structure; the
268  * remaining elements are accessible via the cpuid instruction.
269  */
270 
271 #define	NMAX_CPI_STD	6		/* eax = 0 .. 5 */
272 #define	NMAX_CPI_EXTD	0x1c		/* eax = 0x80000000 .. 0x8000001b */
273 
274 /*
275  * Some terminology needs to be explained:
276  *  - Socket: Something that can be plugged into a motherboard.
277  *  - Package: Same as socket
278  *  - Chip: Same as socket. Note that AMD's documentation uses term "chip"
279  *    differently: there, chip is the same as processor node (below)
280  *  - Processor node: Some AMD processors have more than one
281  *    "subprocessor" embedded in a package. These subprocessors (nodes)
282  *    are fully-functional processors themselves with cores, caches,
283  *    memory controllers, PCI configuration spaces. They are connected
284  *    inside the package with Hypertransport links. On single-node
285  *    processors, processor node is equivalent to chip/socket/package.
286  */
287 
288 struct cpuid_info {
289 	uint_t cpi_pass;		/* last pass completed */
290 	/*
291 	 * standard function information
292 	 */
293 	uint_t cpi_maxeax;		/* fn 0: %eax */
294 	char cpi_vendorstr[13];		/* fn 0: %ebx:%ecx:%edx */
295 	uint_t cpi_vendor;		/* enum of cpi_vendorstr */
296 
297 	uint_t cpi_family;		/* fn 1: extended family */
298 	uint_t cpi_model;		/* fn 1: extended model */
299 	uint_t cpi_step;		/* fn 1: stepping */
300 	chipid_t cpi_chipid;		/* fn 1: %ebx:  Intel: chip # */
301 					/*		AMD: package/socket # */
302 	uint_t cpi_brandid;		/* fn 1: %ebx: brand ID */
303 	int cpi_clogid;			/* fn 1: %ebx: thread # */
304 	uint_t cpi_ncpu_per_chip;	/* fn 1: %ebx: logical cpu count */
305 	uint8_t cpi_cacheinfo[16];	/* fn 2: intel-style cache desc */
306 	uint_t cpi_ncache;		/* fn 2: number of elements */
307 	uint_t cpi_ncpu_shr_last_cache;	/* fn 4: %eax: ncpus sharing cache */
308 	id_t cpi_last_lvl_cacheid;	/* fn 4: %eax: derived cache id */
309 	uint_t cpi_std_4_size;		/* fn 4: number of fn 4 elements */
310 	struct cpuid_regs **cpi_std_4;	/* fn 4: %ecx == 0 .. fn4_size */
311 	struct cpuid_regs cpi_std[NMAX_CPI_STD];	/* 0 .. 5 */
312 	/*
313 	 * extended function information
314 	 */
315 	uint_t cpi_xmaxeax;		/* fn 0x80000000: %eax */
316 	char cpi_brandstr[49];		/* fn 0x8000000[234] */
317 	uint8_t cpi_pabits;		/* fn 0x80000006: %eax */
318 	uint8_t	cpi_vabits;		/* fn 0x80000006: %eax */
319 	struct	cpuid_regs cpi_extd[NMAX_CPI_EXTD];	/* 0x800000XX */
320 
321 	id_t cpi_coreid;		/* same coreid => strands share core */
322 	int cpi_pkgcoreid;		/* core number within single package */
323 	uint_t cpi_ncore_per_chip;	/* AMD: fn 0x80000008: %ecx[7-0] */
324 					/* Intel: fn 4: %eax[31-26] */
325 	/*
326 	 * supported feature information
327 	 */
328 	uint32_t cpi_support[5];
329 #define	STD_EDX_FEATURES	0
330 #define	AMD_EDX_FEATURES	1
331 #define	TM_EDX_FEATURES		2
332 #define	STD_ECX_FEATURES	3
333 #define	AMD_ECX_FEATURES	4
334 	/*
335 	 * Synthesized information, where known.
336 	 */
337 	uint32_t cpi_chiprev;		/* See X86_CHIPREV_* in x86_archext.h */
338 	const char *cpi_chiprevstr;	/* May be NULL if chiprev unknown */
339 	uint32_t cpi_socket;		/* Chip package/socket type */
340 
341 	struct mwait_info cpi_mwait;	/* fn 5: monitor/mwait info */
342 	uint32_t cpi_apicid;
343 	uint_t cpi_procnodeid;		/* AMD: nodeID on HT, Intel: chipid */
344 	uint_t cpi_procnodes_per_pkg;	/* AMD: # of nodes in the package */
345 					/* Intel: 1 */
346 
347 	struct xsave_info cpi_xsave;	/* fn D: xsave/xrestor info */
348 };
349 
350 
351 static struct cpuid_info cpuid_info0;
352 
353 /*
354  * These bit fields are defined by the Intel Application Note AP-485
355  * "Intel Processor Identification and the CPUID Instruction"
356  */
357 #define	CPI_FAMILY_XTD(cpi)	BITX((cpi)->cpi_std[1].cp_eax, 27, 20)
358 #define	CPI_MODEL_XTD(cpi)	BITX((cpi)->cpi_std[1].cp_eax, 19, 16)
359 #define	CPI_TYPE(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 13, 12)
360 #define	CPI_FAMILY(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 11, 8)
361 #define	CPI_STEP(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 3, 0)
362 #define	CPI_MODEL(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 7, 4)
363 
364 #define	CPI_FEATURES_EDX(cpi)		((cpi)->cpi_std[1].cp_edx)
365 #define	CPI_FEATURES_ECX(cpi)		((cpi)->cpi_std[1].cp_ecx)
366 #define	CPI_FEATURES_XTD_EDX(cpi)	((cpi)->cpi_extd[1].cp_edx)
367 #define	CPI_FEATURES_XTD_ECX(cpi)	((cpi)->cpi_extd[1].cp_ecx)
368 
369 #define	CPI_BRANDID(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 7, 0)
370 #define	CPI_CHUNKS(cpi)		BITX((cpi)->cpi_std[1].cp_ebx, 15, 7)
371 #define	CPI_CPU_COUNT(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 23, 16)
372 #define	CPI_APIC_ID(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 31, 24)
373 
374 #define	CPI_MAXEAX_MAX		0x100		/* sanity control */
375 #define	CPI_XMAXEAX_MAX		0x80000100
376 #define	CPI_FN4_ECX_MAX		0x20		/* sanity: max fn 4 levels */
377 #define	CPI_FNB_ECX_MAX		0x20		/* sanity: max fn B levels */
378 
379 /*
380  * Function 4 (Deterministic Cache Parameters) macros
381  * Defined by Intel Application Note AP-485
382  */
383 #define	CPI_NUM_CORES(regs)		BITX((regs)->cp_eax, 31, 26)
384 #define	CPI_NTHR_SHR_CACHE(regs)	BITX((regs)->cp_eax, 25, 14)
385 #define	CPI_FULL_ASSOC_CACHE(regs)	BITX((regs)->cp_eax, 9, 9)
386 #define	CPI_SELF_INIT_CACHE(regs)	BITX((regs)->cp_eax, 8, 8)
387 #define	CPI_CACHE_LVL(regs)		BITX((regs)->cp_eax, 7, 5)
388 #define	CPI_CACHE_TYPE(regs)		BITX((regs)->cp_eax, 4, 0)
389 #define	CPI_CPU_LEVEL_TYPE(regs)	BITX((regs)->cp_ecx, 15, 8)
390 
391 #define	CPI_CACHE_WAYS(regs)		BITX((regs)->cp_ebx, 31, 22)
392 #define	CPI_CACHE_PARTS(regs)		BITX((regs)->cp_ebx, 21, 12)
393 #define	CPI_CACHE_COH_LN_SZ(regs)	BITX((regs)->cp_ebx, 11, 0)
394 
395 #define	CPI_CACHE_SETS(regs)		BITX((regs)->cp_ecx, 31, 0)
396 
397 #define	CPI_PREFCH_STRIDE(regs)		BITX((regs)->cp_edx, 9, 0)
398 
399 
400 /*
401  * A couple of shorthand macros to identify "later" P6-family chips
402  * like the Pentium M and Core.  First, the "older" P6-based stuff
403  * (loosely defined as "pre-Pentium-4"):
404  * P6, PII, Mobile PII, PII Xeon, PIII, Mobile PIII, PIII Xeon
405  */
406 
407 #define	IS_LEGACY_P6(cpi) (			\
408 	cpi->cpi_family == 6 && 		\
409 		(cpi->cpi_model == 1 ||		\
410 		cpi->cpi_model == 3 ||		\
411 		cpi->cpi_model == 5 ||		\
412 		cpi->cpi_model == 6 ||		\
413 		cpi->cpi_model == 7 ||		\
414 		cpi->cpi_model == 8 ||		\
415 		cpi->cpi_model == 0xA ||	\
416 		cpi->cpi_model == 0xB)		\
417 )
418 
419 /* A "new F6" is everything with family 6 that's not the above */
420 #define	IS_NEW_F6(cpi) ((cpi->cpi_family == 6) && !IS_LEGACY_P6(cpi))
421 
422 /* Extended family/model support */
423 #define	IS_EXTENDED_MODEL_INTEL(cpi) (cpi->cpi_family == 0x6 || \
424 	cpi->cpi_family >= 0xf)
425 
426 /*
427  * Info for monitor/mwait idle loop.
428  *
429  * See cpuid section of "Intel 64 and IA-32 Architectures Software Developer's
430  * Manual Volume 2A: Instruction Set Reference, A-M" #25366-022US, November
431  * 2006.
432  * See MONITOR/MWAIT section of "AMD64 Architecture Programmer's Manual
433  * Documentation Updates" #33633, Rev 2.05, December 2006.
434  */
435 #define	MWAIT_SUPPORT		(0x00000001)	/* mwait supported */
436 #define	MWAIT_EXTENSIONS	(0x00000002)	/* extenstion supported */
437 #define	MWAIT_ECX_INT_ENABLE	(0x00000004)	/* ecx 1 extension supported */
438 #define	MWAIT_SUPPORTED(cpi)	((cpi)->cpi_std[1].cp_ecx & CPUID_INTC_ECX_MON)
439 #define	MWAIT_INT_ENABLE(cpi)	((cpi)->cpi_std[5].cp_ecx & 0x2)
440 #define	MWAIT_EXTENSION(cpi)	((cpi)->cpi_std[5].cp_ecx & 0x1)
441 #define	MWAIT_SIZE_MIN(cpi)	BITX((cpi)->cpi_std[5].cp_eax, 15, 0)
442 #define	MWAIT_SIZE_MAX(cpi)	BITX((cpi)->cpi_std[5].cp_ebx, 15, 0)
443 /*
444  * Number of sub-cstates for a given c-state.
445  */
446 #define	MWAIT_NUM_SUBC_STATES(cpi, c_state)			\
447 	BITX((cpi)->cpi_std[5].cp_edx, c_state + 3, c_state)
448 
449 /*
450  * XSAVE leaf 0xD enumeration
451  */
452 #define	CPUID_LEAFD_2_YMM_OFFSET	576
453 #define	CPUID_LEAFD_2_YMM_SIZE		256
454 
455 /*
456  * Functions we consune from cpuid_subr.c;  don't publish these in a header
457  * file to try and keep people using the expected cpuid_* interfaces.
458  */
459 extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t);
460 extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t);
461 extern uint32_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t);
462 extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t);
463 extern uint_t _cpuid_vendorstr_to_vendorcode(char *);
464 
465 /*
466  * Apply up various platform-dependent restrictions where the
467  * underlying platform restrictions mean the CPU can be marked
468  * as less capable than its cpuid instruction would imply.
469  */
470 #if defined(__xpv)
471 static void
472 platform_cpuid_mangle(uint_t vendor, uint32_t eax, struct cpuid_regs *cp)
473 {
474 	switch (eax) {
475 	case 1: {
476 		uint32_t mcamask = DOMAIN_IS_INITDOMAIN(xen_info) ?
477 		    0 : CPUID_INTC_EDX_MCA;
478 		cp->cp_edx &=
479 		    ~(mcamask |
480 		    CPUID_INTC_EDX_PSE |
481 		    CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE |
482 		    CPUID_INTC_EDX_SEP | CPUID_INTC_EDX_MTRR |
483 		    CPUID_INTC_EDX_PGE | CPUID_INTC_EDX_PAT |
484 		    CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP |
485 		    CPUID_INTC_EDX_PSE36 | CPUID_INTC_EDX_HTT);
486 		break;
487 	}
488 
489 	case 0x80000001:
490 		cp->cp_edx &=
491 		    ~(CPUID_AMD_EDX_PSE |
492 		    CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE |
493 		    CPUID_AMD_EDX_MTRR | CPUID_AMD_EDX_PGE |
494 		    CPUID_AMD_EDX_PAT | CPUID_AMD_EDX_PSE36 |
495 		    CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP |
496 		    CPUID_AMD_EDX_TSCP);
497 		cp->cp_ecx &= ~CPUID_AMD_ECX_CMP_LGCY;
498 		break;
499 	default:
500 		break;
501 	}
502 
503 	switch (vendor) {
504 	case X86_VENDOR_Intel:
505 		switch (eax) {
506 		case 4:
507 			/*
508 			 * Zero out the (ncores-per-chip - 1) field
509 			 */
510 			cp->cp_eax &= 0x03fffffff;
511 			break;
512 		default:
513 			break;
514 		}
515 		break;
516 	case X86_VENDOR_AMD:
517 		switch (eax) {
518 
519 		case 0x80000001:
520 			cp->cp_ecx &= ~CPUID_AMD_ECX_CR8D;
521 			break;
522 
523 		case 0x80000008:
524 			/*
525 			 * Zero out the (ncores-per-chip - 1) field
526 			 */
527 			cp->cp_ecx &= 0xffffff00;
528 			break;
529 		default:
530 			break;
531 		}
532 		break;
533 	default:
534 		break;
535 	}
536 }
537 #else
538 #define	platform_cpuid_mangle(vendor, eax, cp)	/* nothing */
539 #endif
540 
541 /*
542  *  Some undocumented ways of patching the results of the cpuid
543  *  instruction to permit running Solaris 10 on future cpus that
544  *  we don't currently support.  Could be set to non-zero values
545  *  via settings in eeprom.
546  */
547 
548 uint32_t cpuid_feature_ecx_include;
549 uint32_t cpuid_feature_ecx_exclude;
550 uint32_t cpuid_feature_edx_include;
551 uint32_t cpuid_feature_edx_exclude;
552 
553 /*
554  * Allocate space for mcpu_cpi in the machcpu structure for all non-boot CPUs.
555  */
556 void
557 cpuid_alloc_space(cpu_t *cpu)
558 {
559 	/*
560 	 * By convention, cpu0 is the boot cpu, which is set up
561 	 * before memory allocation is available.  All other cpus get
562 	 * their cpuid_info struct allocated here.
563 	 */
564 	ASSERT(cpu->cpu_id != 0);
565 	ASSERT(cpu->cpu_m.mcpu_cpi == NULL);
566 	cpu->cpu_m.mcpu_cpi =
567 	    kmem_zalloc(sizeof (*cpu->cpu_m.mcpu_cpi), KM_SLEEP);
568 }
569 
570 void
571 cpuid_free_space(cpu_t *cpu)
572 {
573 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
574 	int i;
575 
576 	ASSERT(cpi != NULL);
577 	ASSERT(cpi != &cpuid_info0);
578 
579 	/*
580 	 * Free up any function 4 related dynamic storage
581 	 */
582 	for (i = 1; i < cpi->cpi_std_4_size; i++)
583 		kmem_free(cpi->cpi_std_4[i], sizeof (struct cpuid_regs));
584 	if (cpi->cpi_std_4_size > 0)
585 		kmem_free(cpi->cpi_std_4,
586 		    cpi->cpi_std_4_size * sizeof (struct cpuid_regs *));
587 
588 	kmem_free(cpi, sizeof (*cpi));
589 	cpu->cpu_m.mcpu_cpi = NULL;
590 }
591 
592 #if !defined(__xpv)
593 
594 /*
595  * Determine the type of the underlying platform. This is used to customize
596  * initialization of various subsystems (e.g. TSC). determine_platform() must
597  * only ever be called once to prevent two processors from seeing different
598  * values of platform_type, it must be called before cpuid_pass1(), the
599  * earliest consumer to execute.
600  */
601 void
602 determine_platform(void)
603 {
604 	struct cpuid_regs cp;
605 	char *xen_str;
606 	uint32_t xen_signature[4], base;
607 
608 	ASSERT(platform_type == -1);
609 
610 	platform_type = HW_NATIVE;
611 
612 	if (!enable_platform_detection)
613 		return;
614 
615 	/*
616 	 * In a fully virtualized domain, Xen's pseudo-cpuid function
617 	 * returns a string representing the Xen signature in %ebx, %ecx,
618 	 * and %edx. %eax contains the maximum supported cpuid function.
619 	 * We need at least a (base + 2) leaf value to do what we want
620 	 * to do. Try different base values, since the hypervisor might
621 	 * use a different one depending on whether hyper-v emulation
622 	 * is switched on by default or not.
623 	 */
624 	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
625 		cp.cp_eax = base;
626 		(void) __cpuid_insn(&cp);
627 		xen_signature[0] = cp.cp_ebx;
628 		xen_signature[1] = cp.cp_ecx;
629 		xen_signature[2] = cp.cp_edx;
630 		xen_signature[3] = 0;
631 		xen_str = (char *)xen_signature;
632 		if (strcmp("XenVMMXenVMM", xen_str) == 0 &&
633 		    cp.cp_eax >= (base + 2)) {
634 			platform_type = HW_XEN_HVM;
635 			return;
636 		}
637 	}
638 
639 	if (vmware_platform()) /* running under vmware hypervisor? */
640 		platform_type = HW_VMWARE;
641 }
642 
643 int
644 get_hwenv(void)
645 {
646 	ASSERT(platform_type != -1);
647 	return (platform_type);
648 }
649 
650 int
651 is_controldom(void)
652 {
653 	return (0);
654 }
655 
656 #else
657 
658 int
659 get_hwenv(void)
660 {
661 	return (HW_XEN_PV);
662 }
663 
664 int
665 is_controldom(void)
666 {
667 	return (DOMAIN_IS_INITDOMAIN(xen_info));
668 }
669 
670 #endif	/* __xpv */
671 
672 static void
673 cpuid_intel_getids(cpu_t *cpu, void *feature)
674 {
675 	uint_t i;
676 	uint_t chipid_shift = 0;
677 	uint_t coreid_shift = 0;
678 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
679 
680 	for (i = 1; i < cpi->cpi_ncpu_per_chip; i <<= 1)
681 		chipid_shift++;
682 
683 	cpi->cpi_chipid = cpi->cpi_apicid >> chipid_shift;
684 	cpi->cpi_clogid = cpi->cpi_apicid & ((1 << chipid_shift) - 1);
685 
686 	if (is_x86_feature(feature, X86FSET_CMP)) {
687 		/*
688 		 * Multi-core (and possibly multi-threaded)
689 		 * processors.
690 		 */
691 		uint_t ncpu_per_core;
692 		if (cpi->cpi_ncore_per_chip == 1)
693 			ncpu_per_core = cpi->cpi_ncpu_per_chip;
694 		else if (cpi->cpi_ncore_per_chip > 1)
695 			ncpu_per_core = cpi->cpi_ncpu_per_chip /
696 			    cpi->cpi_ncore_per_chip;
697 		/*
698 		 * 8bit APIC IDs on dual core Pentiums
699 		 * look like this:
700 		 *
701 		 * +-----------------------+------+------+
702 		 * | Physical Package ID   |  MC  |  HT  |
703 		 * +-----------------------+------+------+
704 		 * <------- chipid -------->
705 		 * <------- coreid --------------->
706 		 *			   <--- clogid -->
707 		 *			   <------>
708 		 *			   pkgcoreid
709 		 *
710 		 * Where the number of bits necessary to
711 		 * represent MC and HT fields together equals
712 		 * to the minimum number of bits necessary to
713 		 * store the value of cpi->cpi_ncpu_per_chip.
714 		 * Of those bits, the MC part uses the number
715 		 * of bits necessary to store the value of
716 		 * cpi->cpi_ncore_per_chip.
717 		 */
718 		for (i = 1; i < ncpu_per_core; i <<= 1)
719 			coreid_shift++;
720 		cpi->cpi_coreid = cpi->cpi_apicid >> coreid_shift;
721 		cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
722 	} else if (is_x86_feature(feature, X86FSET_HTT)) {
723 		/*
724 		 * Single-core multi-threaded processors.
725 		 */
726 		cpi->cpi_coreid = cpi->cpi_chipid;
727 		cpi->cpi_pkgcoreid = 0;
728 	}
729 	cpi->cpi_procnodeid = cpi->cpi_chipid;
730 }
731 
732 static void
733 cpuid_amd_getids(cpu_t *cpu)
734 {
735 	int i, first_half, coreidsz;
736 	uint32_t nb_caps_reg;
737 	uint_t node2_1;
738 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
739 
740 	/*
741 	 * AMD CMP chips currently have a single thread per core.
742 	 *
743 	 * Since no two cpus share a core we must assign a distinct coreid
744 	 * per cpu, and we do this by using the cpu_id.  This scheme does not,
745 	 * however, guarantee that sibling cores of a chip will have sequential
746 	 * coreids starting at a multiple of the number of cores per chip -
747 	 * that is usually the case, but if the ACPI MADT table is presented
748 	 * in a different order then we need to perform a few more gymnastics
749 	 * for the pkgcoreid.
750 	 *
751 	 * All processors in the system have the same number of enabled
752 	 * cores. Cores within a processor are always numbered sequentially
753 	 * from 0 regardless of how many or which are disabled, and there
754 	 * is no way for operating system to discover the real core id when some
755 	 * are disabled.
756 	 */
757 
758 	cpi->cpi_coreid = cpu->cpu_id;
759 
760 	if (cpi->cpi_xmaxeax >= 0x80000008) {
761 
762 		coreidsz = BITX((cpi)->cpi_extd[8].cp_ecx, 15, 12);
763 
764 		/*
765 		 * In AMD parlance chip is really a node while Solaris
766 		 * sees chip as equivalent to socket/package.
767 		 */
768 		cpi->cpi_ncore_per_chip =
769 		    BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1;
770 		if (coreidsz == 0) {
771 			/* Use legacy method */
772 			for (i = 1; i < cpi->cpi_ncore_per_chip; i <<= 1)
773 				coreidsz++;
774 			if (coreidsz == 0)
775 				coreidsz = 1;
776 		}
777 	} else {
778 		/* Assume single-core part */
779 		cpi->cpi_ncore_per_chip = 1;
780 		coreidsz = 1;
781 	}
782 
783 	cpi->cpi_clogid = cpi->cpi_pkgcoreid =
784 	    cpi->cpi_apicid & ((1<<coreidsz) - 1);
785 	cpi->cpi_ncpu_per_chip = cpi->cpi_ncore_per_chip;
786 
787 	/* Get nodeID */
788 	if (cpi->cpi_family == 0xf) {
789 		cpi->cpi_procnodeid = (cpi->cpi_apicid >> coreidsz) & 7;
790 		cpi->cpi_chipid = cpi->cpi_procnodeid;
791 	} else if (cpi->cpi_family == 0x10) {
792 		/*
793 		 * See if we are a multi-node processor.
794 		 * All processors in the system have the same number of nodes
795 		 */
796 		nb_caps_reg =  pci_getl_func(0, 24, 3, 0xe8);
797 		if ((cpi->cpi_model < 8) || BITX(nb_caps_reg, 29, 29) == 0) {
798 			/* Single-node */
799 			cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 5,
800 			    coreidsz);
801 			cpi->cpi_chipid = cpi->cpi_procnodeid;
802 		} else {
803 
804 			/*
805 			 * Multi-node revision D (2 nodes per package
806 			 * are supported)
807 			 */
808 			cpi->cpi_procnodes_per_pkg = 2;
809 
810 			first_half = (cpi->cpi_pkgcoreid <=
811 			    (cpi->cpi_ncore_per_chip/2 - 1));
812 
813 			if (cpi->cpi_apicid == cpi->cpi_pkgcoreid) {
814 				/* We are BSP */
815 				cpi->cpi_procnodeid = (first_half ? 0 : 1);
816 				cpi->cpi_chipid = cpi->cpi_procnodeid >> 1;
817 			} else {
818 
819 				/* We are AP */
820 				/* NodeId[2:1] bits to use for reading F3xe8 */
821 				node2_1 = BITX(cpi->cpi_apicid, 5, 4) << 1;
822 
823 				nb_caps_reg =
824 				    pci_getl_func(0, 24 + node2_1, 3, 0xe8);
825 
826 				/*
827 				 * Check IntNodeNum bit (31:30, but bit 31 is
828 				 * always 0 on dual-node processors)
829 				 */
830 				if (BITX(nb_caps_reg, 30, 30) == 0)
831 					cpi->cpi_procnodeid = node2_1 +
832 					    !first_half;
833 				else
834 					cpi->cpi_procnodeid = node2_1 +
835 					    first_half;
836 
837 				cpi->cpi_chipid = cpi->cpi_procnodeid >> 1;
838 			}
839 		}
840 	} else if (cpi->cpi_family >= 0x11) {
841 		cpi->cpi_procnodeid = (cpi->cpi_apicid >> coreidsz) & 7;
842 		cpi->cpi_chipid = cpi->cpi_procnodeid;
843 	} else {
844 		cpi->cpi_procnodeid = 0;
845 		cpi->cpi_chipid = cpi->cpi_procnodeid;
846 	}
847 }
848 
849 /*
850  * Setup XFeature_Enabled_Mask register. Required by xsave feature.
851  */
852 void
853 setup_xfem(void)
854 {
855 	uint64_t flags = XFEATURE_LEGACY_FP;
856 
857 	ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
858 
859 	if (is_x86_feature(x86_featureset, X86FSET_SSE))
860 		flags |= XFEATURE_SSE;
861 
862 	if (is_x86_feature(x86_featureset, X86FSET_AVX))
863 		flags |= XFEATURE_AVX;
864 
865 	set_xcr(XFEATURE_ENABLED_MASK, flags);
866 
867 	xsave_bv_all = flags;
868 }
869 
870 void
871 cpuid_pass1(cpu_t *cpu, uchar_t *featureset)
872 {
873 	uint32_t mask_ecx, mask_edx;
874 	struct cpuid_info *cpi;
875 	struct cpuid_regs *cp;
876 	int xcpuid;
877 #if !defined(__xpv)
878 	extern int idle_cpu_prefer_mwait;
879 #endif
880 
881 	/*
882 	 * Space statically allocated for BSP, ensure pointer is set
883 	 */
884 	if (cpu->cpu_id == 0) {
885 		if (cpu->cpu_m.mcpu_cpi == NULL)
886 			cpu->cpu_m.mcpu_cpi = &cpuid_info0;
887 	}
888 
889 	add_x86_feature(featureset, X86FSET_CPUID);
890 
891 	cpi = cpu->cpu_m.mcpu_cpi;
892 	ASSERT(cpi != NULL);
893 	cp = &cpi->cpi_std[0];
894 	cp->cp_eax = 0;
895 	cpi->cpi_maxeax = __cpuid_insn(cp);
896 	{
897 		uint32_t *iptr = (uint32_t *)cpi->cpi_vendorstr;
898 		*iptr++ = cp->cp_ebx;
899 		*iptr++ = cp->cp_edx;
900 		*iptr++ = cp->cp_ecx;
901 		*(char *)&cpi->cpi_vendorstr[12] = '\0';
902 	}
903 
904 	cpi->cpi_vendor = _cpuid_vendorstr_to_vendorcode(cpi->cpi_vendorstr);
905 	x86_vendor = cpi->cpi_vendor; /* for compatibility */
906 
907 	/*
908 	 * Limit the range in case of weird hardware
909 	 */
910 	if (cpi->cpi_maxeax > CPI_MAXEAX_MAX)
911 		cpi->cpi_maxeax = CPI_MAXEAX_MAX;
912 	if (cpi->cpi_maxeax < 1)
913 		goto pass1_done;
914 
915 	cp = &cpi->cpi_std[1];
916 	cp->cp_eax = 1;
917 	(void) __cpuid_insn(cp);
918 
919 	/*
920 	 * Extract identifying constants for easy access.
921 	 */
922 	cpi->cpi_model = CPI_MODEL(cpi);
923 	cpi->cpi_family = CPI_FAMILY(cpi);
924 
925 	if (cpi->cpi_family == 0xf)
926 		cpi->cpi_family += CPI_FAMILY_XTD(cpi);
927 
928 	/*
929 	 * Beware: AMD uses "extended model" iff base *FAMILY* == 0xf.
930 	 * Intel, and presumably everyone else, uses model == 0xf, as
931 	 * one would expect (max value means possible overflow).  Sigh.
932 	 */
933 
934 	switch (cpi->cpi_vendor) {
935 	case X86_VENDOR_Intel:
936 		if (IS_EXTENDED_MODEL_INTEL(cpi))
937 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
938 		break;
939 	case X86_VENDOR_AMD:
940 		if (CPI_FAMILY(cpi) == 0xf)
941 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
942 		break;
943 	default:
944 		if (cpi->cpi_model == 0xf)
945 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
946 		break;
947 	}
948 
949 	cpi->cpi_step = CPI_STEP(cpi);
950 	cpi->cpi_brandid = CPI_BRANDID(cpi);
951 
952 	/*
953 	 * *default* assumptions:
954 	 * - believe %edx feature word
955 	 * - ignore %ecx feature word
956 	 * - 32-bit virtual and physical addressing
957 	 */
958 	mask_edx = 0xffffffff;
959 	mask_ecx = 0;
960 
961 	cpi->cpi_pabits = cpi->cpi_vabits = 32;
962 
963 	switch (cpi->cpi_vendor) {
964 	case X86_VENDOR_Intel:
965 		if (cpi->cpi_family == 5)
966 			x86_type = X86_TYPE_P5;
967 		else if (IS_LEGACY_P6(cpi)) {
968 			x86_type = X86_TYPE_P6;
969 			pentiumpro_bug4046376 = 1;
970 			pentiumpro_bug4064495 = 1;
971 			/*
972 			 * Clear the SEP bit when it was set erroneously
973 			 */
974 			if (cpi->cpi_model < 3 && cpi->cpi_step < 3)
975 				cp->cp_edx &= ~CPUID_INTC_EDX_SEP;
976 		} else if (IS_NEW_F6(cpi) || cpi->cpi_family == 0xf) {
977 			x86_type = X86_TYPE_P4;
978 			/*
979 			 * We don't currently depend on any of the %ecx
980 			 * features until Prescott, so we'll only check
981 			 * this from P4 onwards.  We might want to revisit
982 			 * that idea later.
983 			 */
984 			mask_ecx = 0xffffffff;
985 		} else if (cpi->cpi_family > 0xf)
986 			mask_ecx = 0xffffffff;
987 		/*
988 		 * We don't support MONITOR/MWAIT if leaf 5 is not available
989 		 * to obtain the monitor linesize.
990 		 */
991 		if (cpi->cpi_maxeax < 5)
992 			mask_ecx &= ~CPUID_INTC_ECX_MON;
993 		break;
994 	case X86_VENDOR_IntelClone:
995 	default:
996 		break;
997 	case X86_VENDOR_AMD:
998 #if defined(OPTERON_ERRATUM_108)
999 		if (cpi->cpi_family == 0xf && cpi->cpi_model == 0xe) {
1000 			cp->cp_eax = (0xf0f & cp->cp_eax) | 0xc0;
1001 			cpi->cpi_model = 0xc;
1002 		} else
1003 #endif
1004 		if (cpi->cpi_family == 5) {
1005 			/*
1006 			 * AMD K5 and K6
1007 			 *
1008 			 * These CPUs have an incomplete implementation
1009 			 * of MCA/MCE which we mask away.
1010 			 */
1011 			mask_edx &= ~(CPUID_INTC_EDX_MCE | CPUID_INTC_EDX_MCA);
1012 
1013 			/*
1014 			 * Model 0 uses the wrong (APIC) bit
1015 			 * to indicate PGE.  Fix it here.
1016 			 */
1017 			if (cpi->cpi_model == 0) {
1018 				if (cp->cp_edx & 0x200) {
1019 					cp->cp_edx &= ~0x200;
1020 					cp->cp_edx |= CPUID_INTC_EDX_PGE;
1021 				}
1022 			}
1023 
1024 			/*
1025 			 * Early models had problems w/ MMX; disable.
1026 			 */
1027 			if (cpi->cpi_model < 6)
1028 				mask_edx &= ~CPUID_INTC_EDX_MMX;
1029 		}
1030 
1031 		/*
1032 		 * For newer families, SSE3 and CX16, at least, are valid;
1033 		 * enable all
1034 		 */
1035 		if (cpi->cpi_family >= 0xf)
1036 			mask_ecx = 0xffffffff;
1037 		/*
1038 		 * We don't support MONITOR/MWAIT if leaf 5 is not available
1039 		 * to obtain the monitor linesize.
1040 		 */
1041 		if (cpi->cpi_maxeax < 5)
1042 			mask_ecx &= ~CPUID_INTC_ECX_MON;
1043 
1044 #if !defined(__xpv)
1045 		/*
1046 		 * Do not use MONITOR/MWAIT to halt in the idle loop on any AMD
1047 		 * processors.  AMD does not intend MWAIT to be used in the cpu
1048 		 * idle loop on current and future processors.  10h and future
1049 		 * AMD processors use more power in MWAIT than HLT.
1050 		 * Pre-family-10h Opterons do not have the MWAIT instruction.
1051 		 */
1052 		idle_cpu_prefer_mwait = 0;
1053 #endif
1054 
1055 		break;
1056 	case X86_VENDOR_TM:
1057 		/*
1058 		 * workaround the NT workaround in CMS 4.1
1059 		 */
1060 		if (cpi->cpi_family == 5 && cpi->cpi_model == 4 &&
1061 		    (cpi->cpi_step == 2 || cpi->cpi_step == 3))
1062 			cp->cp_edx |= CPUID_INTC_EDX_CX8;
1063 		break;
1064 	case X86_VENDOR_Centaur:
1065 		/*
1066 		 * workaround the NT workarounds again
1067 		 */
1068 		if (cpi->cpi_family == 6)
1069 			cp->cp_edx |= CPUID_INTC_EDX_CX8;
1070 		break;
1071 	case X86_VENDOR_Cyrix:
1072 		/*
1073 		 * We rely heavily on the probing in locore
1074 		 * to actually figure out what parts, if any,
1075 		 * of the Cyrix cpuid instruction to believe.
1076 		 */
1077 		switch (x86_type) {
1078 		case X86_TYPE_CYRIX_486:
1079 			mask_edx = 0;
1080 			break;
1081 		case X86_TYPE_CYRIX_6x86:
1082 			mask_edx = 0;
1083 			break;
1084 		case X86_TYPE_CYRIX_6x86L:
1085 			mask_edx =
1086 			    CPUID_INTC_EDX_DE |
1087 			    CPUID_INTC_EDX_CX8;
1088 			break;
1089 		case X86_TYPE_CYRIX_6x86MX:
1090 			mask_edx =
1091 			    CPUID_INTC_EDX_DE |
1092 			    CPUID_INTC_EDX_MSR |
1093 			    CPUID_INTC_EDX_CX8 |
1094 			    CPUID_INTC_EDX_PGE |
1095 			    CPUID_INTC_EDX_CMOV |
1096 			    CPUID_INTC_EDX_MMX;
1097 			break;
1098 		case X86_TYPE_CYRIX_GXm:
1099 			mask_edx =
1100 			    CPUID_INTC_EDX_MSR |
1101 			    CPUID_INTC_EDX_CX8 |
1102 			    CPUID_INTC_EDX_CMOV |
1103 			    CPUID_INTC_EDX_MMX;
1104 			break;
1105 		case X86_TYPE_CYRIX_MediaGX:
1106 			break;
1107 		case X86_TYPE_CYRIX_MII:
1108 		case X86_TYPE_VIA_CYRIX_III:
1109 			mask_edx =
1110 			    CPUID_INTC_EDX_DE |
1111 			    CPUID_INTC_EDX_TSC |
1112 			    CPUID_INTC_EDX_MSR |
1113 			    CPUID_INTC_EDX_CX8 |
1114 			    CPUID_INTC_EDX_PGE |
1115 			    CPUID_INTC_EDX_CMOV |
1116 			    CPUID_INTC_EDX_MMX;
1117 			break;
1118 		default:
1119 			break;
1120 		}
1121 		break;
1122 	}
1123 
1124 #if defined(__xpv)
1125 	/*
1126 	 * Do not support MONITOR/MWAIT under a hypervisor
1127 	 */
1128 	mask_ecx &= ~CPUID_INTC_ECX_MON;
1129 	/*
1130 	 * Do not support XSAVE under a hypervisor for now
1131 	 */
1132 	xsave_force_disable = B_TRUE;
1133 
1134 #endif	/* __xpv */
1135 
1136 	if (xsave_force_disable) {
1137 		mask_ecx &= ~CPUID_INTC_ECX_XSAVE;
1138 		mask_ecx &= ~CPUID_INTC_ECX_AVX;
1139 	}
1140 
1141 	/*
1142 	 * Now we've figured out the masks that determine
1143 	 * which bits we choose to believe, apply the masks
1144 	 * to the feature words, then map the kernel's view
1145 	 * of these feature words into its feature word.
1146 	 */
1147 	cp->cp_edx &= mask_edx;
1148 	cp->cp_ecx &= mask_ecx;
1149 
1150 	/*
1151 	 * apply any platform restrictions (we don't call this
1152 	 * immediately after __cpuid_insn here, because we need the
1153 	 * workarounds applied above first)
1154 	 */
1155 	platform_cpuid_mangle(cpi->cpi_vendor, 1, cp);
1156 
1157 	/*
1158 	 * fold in overrides from the "eeprom" mechanism
1159 	 */
1160 	cp->cp_edx |= cpuid_feature_edx_include;
1161 	cp->cp_edx &= ~cpuid_feature_edx_exclude;
1162 
1163 	cp->cp_ecx |= cpuid_feature_ecx_include;
1164 	cp->cp_ecx &= ~cpuid_feature_ecx_exclude;
1165 
1166 	if (cp->cp_edx & CPUID_INTC_EDX_PSE) {
1167 		add_x86_feature(featureset, X86FSET_LARGEPAGE);
1168 	}
1169 	if (cp->cp_edx & CPUID_INTC_EDX_TSC) {
1170 		add_x86_feature(featureset, X86FSET_TSC);
1171 	}
1172 	if (cp->cp_edx & CPUID_INTC_EDX_MSR) {
1173 		add_x86_feature(featureset, X86FSET_MSR);
1174 	}
1175 	if (cp->cp_edx & CPUID_INTC_EDX_MTRR) {
1176 		add_x86_feature(featureset, X86FSET_MTRR);
1177 	}
1178 	if (cp->cp_edx & CPUID_INTC_EDX_PGE) {
1179 		add_x86_feature(featureset, X86FSET_PGE);
1180 	}
1181 	if (cp->cp_edx & CPUID_INTC_EDX_CMOV) {
1182 		add_x86_feature(featureset, X86FSET_CMOV);
1183 	}
1184 	if (cp->cp_edx & CPUID_INTC_EDX_MMX) {
1185 		add_x86_feature(featureset, X86FSET_MMX);
1186 	}
1187 	if ((cp->cp_edx & CPUID_INTC_EDX_MCE) != 0 &&
1188 	    (cp->cp_edx & CPUID_INTC_EDX_MCA) != 0) {
1189 		add_x86_feature(featureset, X86FSET_MCA);
1190 	}
1191 	if (cp->cp_edx & CPUID_INTC_EDX_PAE) {
1192 		add_x86_feature(featureset, X86FSET_PAE);
1193 	}
1194 	if (cp->cp_edx & CPUID_INTC_EDX_CX8) {
1195 		add_x86_feature(featureset, X86FSET_CX8);
1196 	}
1197 	if (cp->cp_ecx & CPUID_INTC_ECX_CX16) {
1198 		add_x86_feature(featureset, X86FSET_CX16);
1199 	}
1200 	if (cp->cp_edx & CPUID_INTC_EDX_PAT) {
1201 		add_x86_feature(featureset, X86FSET_PAT);
1202 	}
1203 	if (cp->cp_edx & CPUID_INTC_EDX_SEP) {
1204 		add_x86_feature(featureset, X86FSET_SEP);
1205 	}
1206 	if (cp->cp_edx & CPUID_INTC_EDX_FXSR) {
1207 		/*
1208 		 * In our implementation, fxsave/fxrstor
1209 		 * are prerequisites before we'll even
1210 		 * try and do SSE things.
1211 		 */
1212 		if (cp->cp_edx & CPUID_INTC_EDX_SSE) {
1213 			add_x86_feature(featureset, X86FSET_SSE);
1214 		}
1215 		if (cp->cp_edx & CPUID_INTC_EDX_SSE2) {
1216 			add_x86_feature(featureset, X86FSET_SSE2);
1217 		}
1218 		if (cp->cp_ecx & CPUID_INTC_ECX_SSE3) {
1219 			add_x86_feature(featureset, X86FSET_SSE3);
1220 		}
1221 		if (cpi->cpi_vendor == X86_VENDOR_Intel) {
1222 			if (cp->cp_ecx & CPUID_INTC_ECX_SSSE3) {
1223 				add_x86_feature(featureset, X86FSET_SSSE3);
1224 			}
1225 			if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_1) {
1226 				add_x86_feature(featureset, X86FSET_SSE4_1);
1227 			}
1228 			if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_2) {
1229 				add_x86_feature(featureset, X86FSET_SSE4_2);
1230 			}
1231 			if (cp->cp_ecx & CPUID_INTC_ECX_AES) {
1232 				add_x86_feature(featureset, X86FSET_AES);
1233 			}
1234 			if (cp->cp_ecx & CPUID_INTC_ECX_PCLMULQDQ) {
1235 				add_x86_feature(featureset, X86FSET_PCLMULQDQ);
1236 			}
1237 
1238 			if (cp->cp_ecx & CPUID_INTC_ECX_XSAVE) {
1239 				add_x86_feature(featureset, X86FSET_XSAVE);
1240 				/* We only test AVX when there is XSAVE */
1241 				if (cp->cp_ecx & CPUID_INTC_ECX_AVX) {
1242 					add_x86_feature(featureset,
1243 					    X86FSET_AVX);
1244 				}
1245 			}
1246 		}
1247 	}
1248 	if (cp->cp_edx & CPUID_INTC_EDX_DE) {
1249 		add_x86_feature(featureset, X86FSET_DE);
1250 	}
1251 #if !defined(__xpv)
1252 	if (cp->cp_ecx & CPUID_INTC_ECX_MON) {
1253 
1254 		/*
1255 		 * We require the CLFLUSH instruction for erratum workaround
1256 		 * to use MONITOR/MWAIT.
1257 		 */
1258 		if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
1259 			cpi->cpi_mwait.support |= MWAIT_SUPPORT;
1260 			add_x86_feature(featureset, X86FSET_MWAIT);
1261 		} else {
1262 			extern int idle_cpu_assert_cflush_monitor;
1263 
1264 			/*
1265 			 * All processors we are aware of which have
1266 			 * MONITOR/MWAIT also have CLFLUSH.
1267 			 */
1268 			if (idle_cpu_assert_cflush_monitor) {
1269 				ASSERT((cp->cp_ecx & CPUID_INTC_ECX_MON) &&
1270 				    (cp->cp_edx & CPUID_INTC_EDX_CLFSH));
1271 			}
1272 		}
1273 	}
1274 #endif	/* __xpv */
1275 
1276 	if (cp->cp_ecx & CPUID_INTC_ECX_VMX) {
1277 		add_x86_feature(featureset, X86FSET_VMX);
1278 	}
1279 
1280 	/*
1281 	 * Only need it first time, rest of the cpus would follow suit.
1282 	 * we only capture this for the bootcpu.
1283 	 */
1284 	if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
1285 		add_x86_feature(featureset, X86FSET_CLFSH);
1286 		x86_clflush_size = (BITX(cp->cp_ebx, 15, 8) * 8);
1287 	}
1288 	if (is_x86_feature(featureset, X86FSET_PAE))
1289 		cpi->cpi_pabits = 36;
1290 
1291 	/*
1292 	 * Hyperthreading configuration is slightly tricky on Intel
1293 	 * and pure clones, and even trickier on AMD.
1294 	 *
1295 	 * (AMD chose to set the HTT bit on their CMP processors,
1296 	 * even though they're not actually hyperthreaded.  Thus it
1297 	 * takes a bit more work to figure out what's really going
1298 	 * on ... see the handling of the CMP_LGCY bit below)
1299 	 */
1300 	if (cp->cp_edx & CPUID_INTC_EDX_HTT) {
1301 		cpi->cpi_ncpu_per_chip = CPI_CPU_COUNT(cpi);
1302 		if (cpi->cpi_ncpu_per_chip > 1)
1303 			add_x86_feature(featureset, X86FSET_HTT);
1304 	} else {
1305 		cpi->cpi_ncpu_per_chip = 1;
1306 	}
1307 
1308 	/*
1309 	 * Work on the "extended" feature information, doing
1310 	 * some basic initialization for cpuid_pass2()
1311 	 */
1312 	xcpuid = 0;
1313 	switch (cpi->cpi_vendor) {
1314 	case X86_VENDOR_Intel:
1315 		if (IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf)
1316 			xcpuid++;
1317 		break;
1318 	case X86_VENDOR_AMD:
1319 		if (cpi->cpi_family > 5 ||
1320 		    (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
1321 			xcpuid++;
1322 		break;
1323 	case X86_VENDOR_Cyrix:
1324 		/*
1325 		 * Only these Cyrix CPUs are -known- to support
1326 		 * extended cpuid operations.
1327 		 */
1328 		if (x86_type == X86_TYPE_VIA_CYRIX_III ||
1329 		    x86_type == X86_TYPE_CYRIX_GXm)
1330 			xcpuid++;
1331 		break;
1332 	case X86_VENDOR_Centaur:
1333 	case X86_VENDOR_TM:
1334 	default:
1335 		xcpuid++;
1336 		break;
1337 	}
1338 
1339 	if (xcpuid) {
1340 		cp = &cpi->cpi_extd[0];
1341 		cp->cp_eax = 0x80000000;
1342 		cpi->cpi_xmaxeax = __cpuid_insn(cp);
1343 	}
1344 
1345 	if (cpi->cpi_xmaxeax & 0x80000000) {
1346 
1347 		if (cpi->cpi_xmaxeax > CPI_XMAXEAX_MAX)
1348 			cpi->cpi_xmaxeax = CPI_XMAXEAX_MAX;
1349 
1350 		switch (cpi->cpi_vendor) {
1351 		case X86_VENDOR_Intel:
1352 		case X86_VENDOR_AMD:
1353 			if (cpi->cpi_xmaxeax < 0x80000001)
1354 				break;
1355 			cp = &cpi->cpi_extd[1];
1356 			cp->cp_eax = 0x80000001;
1357 			(void) __cpuid_insn(cp);
1358 
1359 			if (cpi->cpi_vendor == X86_VENDOR_AMD &&
1360 			    cpi->cpi_family == 5 &&
1361 			    cpi->cpi_model == 6 &&
1362 			    cpi->cpi_step == 6) {
1363 				/*
1364 				 * K6 model 6 uses bit 10 to indicate SYSC
1365 				 * Later models use bit 11. Fix it here.
1366 				 */
1367 				if (cp->cp_edx & 0x400) {
1368 					cp->cp_edx &= ~0x400;
1369 					cp->cp_edx |= CPUID_AMD_EDX_SYSC;
1370 				}
1371 			}
1372 
1373 			platform_cpuid_mangle(cpi->cpi_vendor, 0x80000001, cp);
1374 
1375 			/*
1376 			 * Compute the additions to the kernel's feature word.
1377 			 */
1378 			if (cp->cp_edx & CPUID_AMD_EDX_NX) {
1379 				add_x86_feature(featureset, X86FSET_NX);
1380 			}
1381 
1382 			/*
1383 			 * Regardless whether or not we boot 64-bit,
1384 			 * we should have a way to identify whether
1385 			 * the CPU is capable of running 64-bit.
1386 			 */
1387 			if (cp->cp_edx & CPUID_AMD_EDX_LM) {
1388 				add_x86_feature(featureset, X86FSET_64);
1389 			}
1390 
1391 #if defined(__amd64)
1392 			/* 1 GB large page - enable only for 64 bit kernel */
1393 			if (cp->cp_edx & CPUID_AMD_EDX_1GPG) {
1394 				add_x86_feature(featureset, X86FSET_1GPG);
1395 			}
1396 #endif
1397 
1398 			if ((cpi->cpi_vendor == X86_VENDOR_AMD) &&
1399 			    (cpi->cpi_std[1].cp_edx & CPUID_INTC_EDX_FXSR) &&
1400 			    (cp->cp_ecx & CPUID_AMD_ECX_SSE4A)) {
1401 				add_x86_feature(featureset, X86FSET_SSE4A);
1402 			}
1403 
1404 			/*
1405 			 * If both the HTT and CMP_LGCY bits are set,
1406 			 * then we're not actually HyperThreaded.  Read
1407 			 * "AMD CPUID Specification" for more details.
1408 			 */
1409 			if (cpi->cpi_vendor == X86_VENDOR_AMD &&
1410 			    is_x86_feature(featureset, X86FSET_HTT) &&
1411 			    (cp->cp_ecx & CPUID_AMD_ECX_CMP_LGCY)) {
1412 				remove_x86_feature(featureset, X86FSET_HTT);
1413 				add_x86_feature(featureset, X86FSET_CMP);
1414 			}
1415 #if defined(__amd64)
1416 			/*
1417 			 * It's really tricky to support syscall/sysret in
1418 			 * the i386 kernel; we rely on sysenter/sysexit
1419 			 * instead.  In the amd64 kernel, things are -way-
1420 			 * better.
1421 			 */
1422 			if (cp->cp_edx & CPUID_AMD_EDX_SYSC) {
1423 				add_x86_feature(featureset, X86FSET_ASYSC);
1424 			}
1425 
1426 			/*
1427 			 * While we're thinking about system calls, note
1428 			 * that AMD processors don't support sysenter
1429 			 * in long mode at all, so don't try to program them.
1430 			 */
1431 			if (x86_vendor == X86_VENDOR_AMD) {
1432 				remove_x86_feature(featureset, X86FSET_SEP);
1433 			}
1434 #endif
1435 			if (cp->cp_edx & CPUID_AMD_EDX_TSCP) {
1436 				add_x86_feature(featureset, X86FSET_TSCP);
1437 			}
1438 
1439 			if (cp->cp_ecx & CPUID_AMD_ECX_SVM) {
1440 				add_x86_feature(featureset, X86FSET_SVM);
1441 			}
1442 			break;
1443 		default:
1444 			break;
1445 		}
1446 
1447 		/*
1448 		 * Get CPUID data about processor cores and hyperthreads.
1449 		 */
1450 		switch (cpi->cpi_vendor) {
1451 		case X86_VENDOR_Intel:
1452 			if (cpi->cpi_maxeax >= 4) {
1453 				cp = &cpi->cpi_std[4];
1454 				cp->cp_eax = 4;
1455 				cp->cp_ecx = 0;
1456 				(void) __cpuid_insn(cp);
1457 				platform_cpuid_mangle(cpi->cpi_vendor, 4, cp);
1458 			}
1459 			/*FALLTHROUGH*/
1460 		case X86_VENDOR_AMD:
1461 			if (cpi->cpi_xmaxeax < 0x80000008)
1462 				break;
1463 			cp = &cpi->cpi_extd[8];
1464 			cp->cp_eax = 0x80000008;
1465 			(void) __cpuid_insn(cp);
1466 			platform_cpuid_mangle(cpi->cpi_vendor, 0x80000008, cp);
1467 
1468 			/*
1469 			 * Virtual and physical address limits from
1470 			 * cpuid override previously guessed values.
1471 			 */
1472 			cpi->cpi_pabits = BITX(cp->cp_eax, 7, 0);
1473 			cpi->cpi_vabits = BITX(cp->cp_eax, 15, 8);
1474 			break;
1475 		default:
1476 			break;
1477 		}
1478 
1479 		/*
1480 		 * Derive the number of cores per chip
1481 		 */
1482 		switch (cpi->cpi_vendor) {
1483 		case X86_VENDOR_Intel:
1484 			if (cpi->cpi_maxeax < 4) {
1485 				cpi->cpi_ncore_per_chip = 1;
1486 				break;
1487 			} else {
1488 				cpi->cpi_ncore_per_chip =
1489 				    BITX((cpi)->cpi_std[4].cp_eax, 31, 26) + 1;
1490 			}
1491 			break;
1492 		case X86_VENDOR_AMD:
1493 			if (cpi->cpi_xmaxeax < 0x80000008) {
1494 				cpi->cpi_ncore_per_chip = 1;
1495 				break;
1496 			} else {
1497 				/*
1498 				 * On family 0xf cpuid fn 2 ECX[7:0] "NC" is
1499 				 * 1 less than the number of physical cores on
1500 				 * the chip.  In family 0x10 this value can
1501 				 * be affected by "downcoring" - it reflects
1502 				 * 1 less than the number of cores actually
1503 				 * enabled on this node.
1504 				 */
1505 				cpi->cpi_ncore_per_chip =
1506 				    BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1;
1507 			}
1508 			break;
1509 		default:
1510 			cpi->cpi_ncore_per_chip = 1;
1511 			break;
1512 		}
1513 
1514 		/*
1515 		 * Get CPUID data about TSC Invariance in Deep C-State.
1516 		 */
1517 		switch (cpi->cpi_vendor) {
1518 		case X86_VENDOR_Intel:
1519 			if (cpi->cpi_maxeax >= 7) {
1520 				cp = &cpi->cpi_extd[7];
1521 				cp->cp_eax = 0x80000007;
1522 				cp->cp_ecx = 0;
1523 				(void) __cpuid_insn(cp);
1524 			}
1525 			break;
1526 		default:
1527 			break;
1528 		}
1529 	} else {
1530 		cpi->cpi_ncore_per_chip = 1;
1531 	}
1532 
1533 	/*
1534 	 * If more than one core, then this processor is CMP.
1535 	 */
1536 	if (cpi->cpi_ncore_per_chip > 1) {
1537 		add_x86_feature(featureset, X86FSET_CMP);
1538 	}
1539 
1540 	/*
1541 	 * If the number of cores is the same as the number
1542 	 * of CPUs, then we cannot have HyperThreading.
1543 	 */
1544 	if (cpi->cpi_ncpu_per_chip == cpi->cpi_ncore_per_chip) {
1545 		remove_x86_feature(featureset, X86FSET_HTT);
1546 	}
1547 
1548 	cpi->cpi_apicid = CPI_APIC_ID(cpi);
1549 	cpi->cpi_procnodes_per_pkg = 1;
1550 	if (is_x86_feature(featureset, X86FSET_HTT) == B_FALSE &&
1551 	    is_x86_feature(featureset, X86FSET_CMP) == B_FALSE) {
1552 		/*
1553 		 * Single-core single-threaded processors.
1554 		 */
1555 		cpi->cpi_chipid = -1;
1556 		cpi->cpi_clogid = 0;
1557 		cpi->cpi_coreid = cpu->cpu_id;
1558 		cpi->cpi_pkgcoreid = 0;
1559 		if (cpi->cpi_vendor == X86_VENDOR_AMD)
1560 			cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 3, 0);
1561 		else
1562 			cpi->cpi_procnodeid = cpi->cpi_chipid;
1563 	} else if (cpi->cpi_ncpu_per_chip > 1) {
1564 		if (cpi->cpi_vendor == X86_VENDOR_Intel)
1565 			cpuid_intel_getids(cpu, featureset);
1566 		else if (cpi->cpi_vendor == X86_VENDOR_AMD)
1567 			cpuid_amd_getids(cpu);
1568 		else {
1569 			/*
1570 			 * All other processors are currently
1571 			 * assumed to have single cores.
1572 			 */
1573 			cpi->cpi_coreid = cpi->cpi_chipid;
1574 			cpi->cpi_pkgcoreid = 0;
1575 			cpi->cpi_procnodeid = cpi->cpi_chipid;
1576 		}
1577 	}
1578 
1579 	/*
1580 	 * Synthesize chip "revision" and socket type
1581 	 */
1582 	cpi->cpi_chiprev = _cpuid_chiprev(cpi->cpi_vendor, cpi->cpi_family,
1583 	    cpi->cpi_model, cpi->cpi_step);
1584 	cpi->cpi_chiprevstr = _cpuid_chiprevstr(cpi->cpi_vendor,
1585 	    cpi->cpi_family, cpi->cpi_model, cpi->cpi_step);
1586 	cpi->cpi_socket = _cpuid_skt(cpi->cpi_vendor, cpi->cpi_family,
1587 	    cpi->cpi_model, cpi->cpi_step);
1588 
1589 pass1_done:
1590 	cpi->cpi_pass = 1;
1591 }
1592 
1593 /*
1594  * Make copies of the cpuid table entries we depend on, in
1595  * part for ease of parsing now, in part so that we have only
1596  * one place to correct any of it, in part for ease of
1597  * later export to userland, and in part so we can look at
1598  * this stuff in a crash dump.
1599  */
1600 
1601 /*ARGSUSED*/
1602 void
1603 cpuid_pass2(cpu_t *cpu)
1604 {
1605 	uint_t n, nmax;
1606 	int i;
1607 	struct cpuid_regs *cp;
1608 	uint8_t *dp;
1609 	uint32_t *iptr;
1610 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
1611 
1612 	ASSERT(cpi->cpi_pass == 1);
1613 
1614 	if (cpi->cpi_maxeax < 1)
1615 		goto pass2_done;
1616 
1617 	if ((nmax = cpi->cpi_maxeax + 1) > NMAX_CPI_STD)
1618 		nmax = NMAX_CPI_STD;
1619 	/*
1620 	 * (We already handled n == 0 and n == 1 in pass 1)
1621 	 */
1622 	for (n = 2, cp = &cpi->cpi_std[2]; n < nmax; n++, cp++) {
1623 		cp->cp_eax = n;
1624 
1625 		/*
1626 		 * CPUID function 4 expects %ecx to be initialized
1627 		 * with an index which indicates which cache to return
1628 		 * information about. The OS is expected to call function 4
1629 		 * with %ecx set to 0, 1, 2, ... until it returns with
1630 		 * EAX[4:0] set to 0, which indicates there are no more
1631 		 * caches.
1632 		 *
1633 		 * Here, populate cpi_std[4] with the information returned by
1634 		 * function 4 when %ecx == 0, and do the rest in cpuid_pass3()
1635 		 * when dynamic memory allocation becomes available.
1636 		 *
1637 		 * Note: we need to explicitly initialize %ecx here, since
1638 		 * function 4 may have been previously invoked.
1639 		 */
1640 		if (n == 4)
1641 			cp->cp_ecx = 0;
1642 
1643 		(void) __cpuid_insn(cp);
1644 		platform_cpuid_mangle(cpi->cpi_vendor, n, cp);
1645 		switch (n) {
1646 		case 2:
1647 			/*
1648 			 * "the lower 8 bits of the %eax register
1649 			 * contain a value that identifies the number
1650 			 * of times the cpuid [instruction] has to be
1651 			 * executed to obtain a complete image of the
1652 			 * processor's caching systems."
1653 			 *
1654 			 * How *do* they make this stuff up?
1655 			 */
1656 			cpi->cpi_ncache = sizeof (*cp) *
1657 			    BITX(cp->cp_eax, 7, 0);
1658 			if (cpi->cpi_ncache == 0)
1659 				break;
1660 			cpi->cpi_ncache--;	/* skip count byte */
1661 
1662 			/*
1663 			 * Well, for now, rather than attempt to implement
1664 			 * this slightly dubious algorithm, we just look
1665 			 * at the first 15 ..
1666 			 */
1667 			if (cpi->cpi_ncache > (sizeof (*cp) - 1))
1668 				cpi->cpi_ncache = sizeof (*cp) - 1;
1669 
1670 			dp = cpi->cpi_cacheinfo;
1671 			if (BITX(cp->cp_eax, 31, 31) == 0) {
1672 				uint8_t *p = (void *)&cp->cp_eax;
1673 				for (i = 1; i < 4; i++)
1674 					if (p[i] != 0)
1675 						*dp++ = p[i];
1676 			}
1677 			if (BITX(cp->cp_ebx, 31, 31) == 0) {
1678 				uint8_t *p = (void *)&cp->cp_ebx;
1679 				for (i = 0; i < 4; i++)
1680 					if (p[i] != 0)
1681 						*dp++ = p[i];
1682 			}
1683 			if (BITX(cp->cp_ecx, 31, 31) == 0) {
1684 				uint8_t *p = (void *)&cp->cp_ecx;
1685 				for (i = 0; i < 4; i++)
1686 					if (p[i] != 0)
1687 						*dp++ = p[i];
1688 			}
1689 			if (BITX(cp->cp_edx, 31, 31) == 0) {
1690 				uint8_t *p = (void *)&cp->cp_edx;
1691 				for (i = 0; i < 4; i++)
1692 					if (p[i] != 0)
1693 						*dp++ = p[i];
1694 			}
1695 			break;
1696 
1697 		case 3:	/* Processor serial number, if PSN supported */
1698 			break;
1699 
1700 		case 4:	/* Deterministic cache parameters */
1701 			break;
1702 
1703 		case 5:	/* Monitor/Mwait parameters */
1704 		{
1705 			size_t mwait_size;
1706 
1707 			/*
1708 			 * check cpi_mwait.support which was set in cpuid_pass1
1709 			 */
1710 			if (!(cpi->cpi_mwait.support & MWAIT_SUPPORT))
1711 				break;
1712 
1713 			/*
1714 			 * Protect ourself from insane mwait line size.
1715 			 * Workaround for incomplete hardware emulator(s).
1716 			 */
1717 			mwait_size = (size_t)MWAIT_SIZE_MAX(cpi);
1718 			if (mwait_size < sizeof (uint32_t) ||
1719 			    !ISP2(mwait_size)) {
1720 #if DEBUG
1721 				cmn_err(CE_NOTE, "Cannot handle cpu %d mwait "
1722 				    "size %ld", cpu->cpu_id, (long)mwait_size);
1723 #endif
1724 				break;
1725 			}
1726 
1727 			cpi->cpi_mwait.mon_min = (size_t)MWAIT_SIZE_MIN(cpi);
1728 			cpi->cpi_mwait.mon_max = mwait_size;
1729 			if (MWAIT_EXTENSION(cpi)) {
1730 				cpi->cpi_mwait.support |= MWAIT_EXTENSIONS;
1731 				if (MWAIT_INT_ENABLE(cpi))
1732 					cpi->cpi_mwait.support |=
1733 					    MWAIT_ECX_INT_ENABLE;
1734 			}
1735 			break;
1736 		}
1737 		default:
1738 			break;
1739 		}
1740 	}
1741 
1742 	if (cpi->cpi_maxeax >= 0xB && cpi->cpi_vendor == X86_VENDOR_Intel) {
1743 		struct cpuid_regs regs;
1744 
1745 		cp = &regs;
1746 		cp->cp_eax = 0xB;
1747 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
1748 
1749 		(void) __cpuid_insn(cp);
1750 
1751 		/*
1752 		 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
1753 		 * indicates that the extended topology enumeration leaf is
1754 		 * available.
1755 		 */
1756 		if (cp->cp_ebx) {
1757 			uint32_t x2apic_id;
1758 			uint_t coreid_shift = 0;
1759 			uint_t ncpu_per_core = 1;
1760 			uint_t chipid_shift = 0;
1761 			uint_t ncpu_per_chip = 1;
1762 			uint_t i;
1763 			uint_t level;
1764 
1765 			for (i = 0; i < CPI_FNB_ECX_MAX; i++) {
1766 				cp->cp_eax = 0xB;
1767 				cp->cp_ecx = i;
1768 
1769 				(void) __cpuid_insn(cp);
1770 				level = CPI_CPU_LEVEL_TYPE(cp);
1771 
1772 				if (level == 1) {
1773 					x2apic_id = cp->cp_edx;
1774 					coreid_shift = BITX(cp->cp_eax, 4, 0);
1775 					ncpu_per_core = BITX(cp->cp_ebx, 15, 0);
1776 				} else if (level == 2) {
1777 					x2apic_id = cp->cp_edx;
1778 					chipid_shift = BITX(cp->cp_eax, 4, 0);
1779 					ncpu_per_chip = BITX(cp->cp_ebx, 15, 0);
1780 				}
1781 			}
1782 
1783 			cpi->cpi_apicid = x2apic_id;
1784 			cpi->cpi_ncpu_per_chip = ncpu_per_chip;
1785 			cpi->cpi_ncore_per_chip = ncpu_per_chip /
1786 			    ncpu_per_core;
1787 			cpi->cpi_chipid = x2apic_id >> chipid_shift;
1788 			cpi->cpi_clogid = x2apic_id & ((1 << chipid_shift) - 1);
1789 			cpi->cpi_coreid = x2apic_id >> coreid_shift;
1790 			cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
1791 		}
1792 
1793 		/* Make cp NULL so that we don't stumble on others */
1794 		cp = NULL;
1795 	}
1796 
1797 	/*
1798 	 * XSAVE enumeration
1799 	 */
1800 	if (cpi->cpi_maxeax >= 0xD && cpi->cpi_vendor == X86_VENDOR_Intel) {
1801 		struct cpuid_regs regs;
1802 		boolean_t cpuid_d_valid = B_TRUE;
1803 
1804 		cp = &regs;
1805 		cp->cp_eax = 0xD;
1806 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
1807 
1808 		(void) __cpuid_insn(cp);
1809 
1810 		/*
1811 		 * Sanity checks for debug
1812 		 */
1813 		if ((cp->cp_eax & XFEATURE_LEGACY_FP) == 0 ||
1814 		    (cp->cp_eax & XFEATURE_SSE) == 0) {
1815 			cpuid_d_valid = B_FALSE;
1816 		}
1817 
1818 		cpi->cpi_xsave.xsav_hw_features_low = cp->cp_eax;
1819 		cpi->cpi_xsave.xsav_hw_features_high = cp->cp_edx;
1820 		cpi->cpi_xsave.xsav_max_size = cp->cp_ecx;
1821 
1822 		/*
1823 		 * If the hw supports AVX, get the size and offset in the save
1824 		 * area for the ymm state.
1825 		 */
1826 		if (cpi->cpi_xsave.xsav_hw_features_low & XFEATURE_AVX) {
1827 			cp->cp_eax = 0xD;
1828 			cp->cp_ecx = 2;
1829 			cp->cp_edx = cp->cp_ebx = 0;
1830 
1831 			(void) __cpuid_insn(cp);
1832 
1833 			if (cp->cp_ebx != CPUID_LEAFD_2_YMM_OFFSET ||
1834 			    cp->cp_eax != CPUID_LEAFD_2_YMM_SIZE) {
1835 				cpuid_d_valid = B_FALSE;
1836 			}
1837 
1838 			cpi->cpi_xsave.ymm_size = cp->cp_eax;
1839 			cpi->cpi_xsave.ymm_offset = cp->cp_ebx;
1840 		}
1841 
1842 		if (is_x86_feature(x86_featureset, X86FSET_XSAVE)) {
1843 			xsave_state_size = 0;
1844 		} else if (cpuid_d_valid) {
1845 			xsave_state_size = cpi->cpi_xsave.xsav_max_size;
1846 		} else {
1847 			/* Broken CPUID 0xD, probably in HVM */
1848 			cmn_err(CE_WARN, "cpu%d: CPUID.0xD returns invalid "
1849 			    "value: hw_low = %d, hw_high = %d, xsave_size = %d"
1850 			    ", ymm_size = %d, ymm_offset = %d\n",
1851 			    cpu->cpu_id, cpi->cpi_xsave.xsav_hw_features_low,
1852 			    cpi->cpi_xsave.xsav_hw_features_high,
1853 			    (int)cpi->cpi_xsave.xsav_max_size,
1854 			    (int)cpi->cpi_xsave.ymm_size,
1855 			    (int)cpi->cpi_xsave.ymm_offset);
1856 
1857 			if (xsave_state_size != 0) {
1858 				/*
1859 				 * This must be a non-boot CPU. We cannot
1860 				 * continue, because boot cpu has already
1861 				 * enabled XSAVE.
1862 				 */
1863 				ASSERT(cpu->cpu_id != 0);
1864 				cmn_err(CE_PANIC, "cpu%d: we have already "
1865 				    "enabled XSAVE on boot cpu, cannot "
1866 				    "continue.", cpu->cpu_id);
1867 			} else {
1868 				/*
1869 				 * Must be from boot CPU, OK to disable XSAVE.
1870 				 */
1871 				ASSERT(cpu->cpu_id == 0);
1872 				remove_x86_feature(x86_featureset,
1873 				    X86FSET_XSAVE);
1874 				remove_x86_feature(x86_featureset, X86FSET_AVX);
1875 				CPI_FEATURES_ECX(cpi) &= ~CPUID_INTC_ECX_XSAVE;
1876 				CPI_FEATURES_ECX(cpi) &= ~CPUID_INTC_ECX_AVX;
1877 				xsave_force_disable = B_TRUE;
1878 			}
1879 		}
1880 	}
1881 
1882 
1883 	if ((cpi->cpi_xmaxeax & 0x80000000) == 0)
1884 		goto pass2_done;
1885 
1886 	if ((nmax = cpi->cpi_xmaxeax - 0x80000000 + 1) > NMAX_CPI_EXTD)
1887 		nmax = NMAX_CPI_EXTD;
1888 	/*
1889 	 * Copy the extended properties, fixing them as we go.
1890 	 * (We already handled n == 0 and n == 1 in pass 1)
1891 	 */
1892 	iptr = (void *)cpi->cpi_brandstr;
1893 	for (n = 2, cp = &cpi->cpi_extd[2]; n < nmax; cp++, n++) {
1894 		cp->cp_eax = 0x80000000 + n;
1895 		(void) __cpuid_insn(cp);
1896 		platform_cpuid_mangle(cpi->cpi_vendor, 0x80000000 + n, cp);
1897 		switch (n) {
1898 		case 2:
1899 		case 3:
1900 		case 4:
1901 			/*
1902 			 * Extract the brand string
1903 			 */
1904 			*iptr++ = cp->cp_eax;
1905 			*iptr++ = cp->cp_ebx;
1906 			*iptr++ = cp->cp_ecx;
1907 			*iptr++ = cp->cp_edx;
1908 			break;
1909 		case 5:
1910 			switch (cpi->cpi_vendor) {
1911 			case X86_VENDOR_AMD:
1912 				/*
1913 				 * The Athlon and Duron were the first
1914 				 * parts to report the sizes of the
1915 				 * TLB for large pages. Before then,
1916 				 * we don't trust the data.
1917 				 */
1918 				if (cpi->cpi_family < 6 ||
1919 				    (cpi->cpi_family == 6 &&
1920 				    cpi->cpi_model < 1))
1921 					cp->cp_eax = 0;
1922 				break;
1923 			default:
1924 				break;
1925 			}
1926 			break;
1927 		case 6:
1928 			switch (cpi->cpi_vendor) {
1929 			case X86_VENDOR_AMD:
1930 				/*
1931 				 * The Athlon and Duron were the first
1932 				 * AMD parts with L2 TLB's.
1933 				 * Before then, don't trust the data.
1934 				 */
1935 				if (cpi->cpi_family < 6 ||
1936 				    cpi->cpi_family == 6 &&
1937 				    cpi->cpi_model < 1)
1938 					cp->cp_eax = cp->cp_ebx = 0;
1939 				/*
1940 				 * AMD Duron rev A0 reports L2
1941 				 * cache size incorrectly as 1K
1942 				 * when it is really 64K
1943 				 */
1944 				if (cpi->cpi_family == 6 &&
1945 				    cpi->cpi_model == 3 &&
1946 				    cpi->cpi_step == 0) {
1947 					cp->cp_ecx &= 0xffff;
1948 					cp->cp_ecx |= 0x400000;
1949 				}
1950 				break;
1951 			case X86_VENDOR_Cyrix:	/* VIA C3 */
1952 				/*
1953 				 * VIA C3 processors are a bit messed
1954 				 * up w.r.t. encoding cache sizes in %ecx
1955 				 */
1956 				if (cpi->cpi_family != 6)
1957 					break;
1958 				/*
1959 				 * model 7 and 8 were incorrectly encoded
1960 				 *
1961 				 * xxx is model 8 really broken?
1962 				 */
1963 				if (cpi->cpi_model == 7 ||
1964 				    cpi->cpi_model == 8)
1965 					cp->cp_ecx =
1966 					    BITX(cp->cp_ecx, 31, 24) << 16 |
1967 					    BITX(cp->cp_ecx, 23, 16) << 12 |
1968 					    BITX(cp->cp_ecx, 15, 8) << 8 |
1969 					    BITX(cp->cp_ecx, 7, 0);
1970 				/*
1971 				 * model 9 stepping 1 has wrong associativity
1972 				 */
1973 				if (cpi->cpi_model == 9 && cpi->cpi_step == 1)
1974 					cp->cp_ecx |= 8 << 12;
1975 				break;
1976 			case X86_VENDOR_Intel:
1977 				/*
1978 				 * Extended L2 Cache features function.
1979 				 * First appeared on Prescott.
1980 				 */
1981 			default:
1982 				break;
1983 			}
1984 			break;
1985 		default:
1986 			break;
1987 		}
1988 	}
1989 
1990 pass2_done:
1991 	cpi->cpi_pass = 2;
1992 }
1993 
1994 static const char *
1995 intel_cpubrand(const struct cpuid_info *cpi)
1996 {
1997 	int i;
1998 
1999 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
2000 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
2001 		return ("i486");
2002 
2003 	switch (cpi->cpi_family) {
2004 	case 5:
2005 		return ("Intel Pentium(r)");
2006 	case 6:
2007 		switch (cpi->cpi_model) {
2008 			uint_t celeron, xeon;
2009 			const struct cpuid_regs *cp;
2010 		case 0:
2011 		case 1:
2012 		case 2:
2013 			return ("Intel Pentium(r) Pro");
2014 		case 3:
2015 		case 4:
2016 			return ("Intel Pentium(r) II");
2017 		case 6:
2018 			return ("Intel Celeron(r)");
2019 		case 5:
2020 		case 7:
2021 			celeron = xeon = 0;
2022 			cp = &cpi->cpi_std[2];	/* cache info */
2023 
2024 			for (i = 1; i < 4; i++) {
2025 				uint_t tmp;
2026 
2027 				tmp = (cp->cp_eax >> (8 * i)) & 0xff;
2028 				if (tmp == 0x40)
2029 					celeron++;
2030 				if (tmp >= 0x44 && tmp <= 0x45)
2031 					xeon++;
2032 			}
2033 
2034 			for (i = 0; i < 2; i++) {
2035 				uint_t tmp;
2036 
2037 				tmp = (cp->cp_ebx >> (8 * i)) & 0xff;
2038 				if (tmp == 0x40)
2039 					celeron++;
2040 				else if (tmp >= 0x44 && tmp <= 0x45)
2041 					xeon++;
2042 			}
2043 
2044 			for (i = 0; i < 4; i++) {
2045 				uint_t tmp;
2046 
2047 				tmp = (cp->cp_ecx >> (8 * i)) & 0xff;
2048 				if (tmp == 0x40)
2049 					celeron++;
2050 				else if (tmp >= 0x44 && tmp <= 0x45)
2051 					xeon++;
2052 			}
2053 
2054 			for (i = 0; i < 4; i++) {
2055 				uint_t tmp;
2056 
2057 				tmp = (cp->cp_edx >> (8 * i)) & 0xff;
2058 				if (tmp == 0x40)
2059 					celeron++;
2060 				else if (tmp >= 0x44 && tmp <= 0x45)
2061 					xeon++;
2062 			}
2063 
2064 			if (celeron)
2065 				return ("Intel Celeron(r)");
2066 			if (xeon)
2067 				return (cpi->cpi_model == 5 ?
2068 				    "Intel Pentium(r) II Xeon(tm)" :
2069 				    "Intel Pentium(r) III Xeon(tm)");
2070 			return (cpi->cpi_model == 5 ?
2071 			    "Intel Pentium(r) II or Pentium(r) II Xeon(tm)" :
2072 			    "Intel Pentium(r) III or Pentium(r) III Xeon(tm)");
2073 		default:
2074 			break;
2075 		}
2076 	default:
2077 		break;
2078 	}
2079 
2080 	/* BrandID is present if the field is nonzero */
2081 	if (cpi->cpi_brandid != 0) {
2082 		static const struct {
2083 			uint_t bt_bid;
2084 			const char *bt_str;
2085 		} brand_tbl[] = {
2086 			{ 0x1,	"Intel(r) Celeron(r)" },
2087 			{ 0x2,	"Intel(r) Pentium(r) III" },
2088 			{ 0x3,	"Intel(r) Pentium(r) III Xeon(tm)" },
2089 			{ 0x4,	"Intel(r) Pentium(r) III" },
2090 			{ 0x6,	"Mobile Intel(r) Pentium(r) III" },
2091 			{ 0x7,	"Mobile Intel(r) Celeron(r)" },
2092 			{ 0x8,	"Intel(r) Pentium(r) 4" },
2093 			{ 0x9,	"Intel(r) Pentium(r) 4" },
2094 			{ 0xa,	"Intel(r) Celeron(r)" },
2095 			{ 0xb,	"Intel(r) Xeon(tm)" },
2096 			{ 0xc,	"Intel(r) Xeon(tm) MP" },
2097 			{ 0xe,	"Mobile Intel(r) Pentium(r) 4" },
2098 			{ 0xf,	"Mobile Intel(r) Celeron(r)" },
2099 			{ 0x11, "Mobile Genuine Intel(r)" },
2100 			{ 0x12, "Intel(r) Celeron(r) M" },
2101 			{ 0x13, "Mobile Intel(r) Celeron(r)" },
2102 			{ 0x14, "Intel(r) Celeron(r)" },
2103 			{ 0x15, "Mobile Genuine Intel(r)" },
2104 			{ 0x16,	"Intel(r) Pentium(r) M" },
2105 			{ 0x17, "Mobile Intel(r) Celeron(r)" }
2106 		};
2107 		uint_t btblmax = sizeof (brand_tbl) / sizeof (brand_tbl[0]);
2108 		uint_t sgn;
2109 
2110 		sgn = (cpi->cpi_family << 8) |
2111 		    (cpi->cpi_model << 4) | cpi->cpi_step;
2112 
2113 		for (i = 0; i < btblmax; i++)
2114 			if (brand_tbl[i].bt_bid == cpi->cpi_brandid)
2115 				break;
2116 		if (i < btblmax) {
2117 			if (sgn == 0x6b1 && cpi->cpi_brandid == 3)
2118 				return ("Intel(r) Celeron(r)");
2119 			if (sgn < 0xf13 && cpi->cpi_brandid == 0xb)
2120 				return ("Intel(r) Xeon(tm) MP");
2121 			if (sgn < 0xf13 && cpi->cpi_brandid == 0xe)
2122 				return ("Intel(r) Xeon(tm)");
2123 			return (brand_tbl[i].bt_str);
2124 		}
2125 	}
2126 
2127 	return (NULL);
2128 }
2129 
2130 static const char *
2131 amd_cpubrand(const struct cpuid_info *cpi)
2132 {
2133 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
2134 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
2135 		return ("i486 compatible");
2136 
2137 	switch (cpi->cpi_family) {
2138 	case 5:
2139 		switch (cpi->cpi_model) {
2140 		case 0:
2141 		case 1:
2142 		case 2:
2143 		case 3:
2144 		case 4:
2145 		case 5:
2146 			return ("AMD-K5(r)");
2147 		case 6:
2148 		case 7:
2149 			return ("AMD-K6(r)");
2150 		case 8:
2151 			return ("AMD-K6(r)-2");
2152 		case 9:
2153 			return ("AMD-K6(r)-III");
2154 		default:
2155 			return ("AMD (family 5)");
2156 		}
2157 	case 6:
2158 		switch (cpi->cpi_model) {
2159 		case 1:
2160 			return ("AMD-K7(tm)");
2161 		case 0:
2162 		case 2:
2163 		case 4:
2164 			return ("AMD Athlon(tm)");
2165 		case 3:
2166 		case 7:
2167 			return ("AMD Duron(tm)");
2168 		case 6:
2169 		case 8:
2170 		case 10:
2171 			/*
2172 			 * Use the L2 cache size to distinguish
2173 			 */
2174 			return ((cpi->cpi_extd[6].cp_ecx >> 16) >= 256 ?
2175 			    "AMD Athlon(tm)" : "AMD Duron(tm)");
2176 		default:
2177 			return ("AMD (family 6)");
2178 		}
2179 	default:
2180 		break;
2181 	}
2182 
2183 	if (cpi->cpi_family == 0xf && cpi->cpi_model == 5 &&
2184 	    cpi->cpi_brandid != 0) {
2185 		switch (BITX(cpi->cpi_brandid, 7, 5)) {
2186 		case 3:
2187 			return ("AMD Opteron(tm) UP 1xx");
2188 		case 4:
2189 			return ("AMD Opteron(tm) DP 2xx");
2190 		case 5:
2191 			return ("AMD Opteron(tm) MP 8xx");
2192 		default:
2193 			return ("AMD Opteron(tm)");
2194 		}
2195 	}
2196 
2197 	return (NULL);
2198 }
2199 
2200 static const char *
2201 cyrix_cpubrand(struct cpuid_info *cpi, uint_t type)
2202 {
2203 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
2204 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5 ||
2205 	    type == X86_TYPE_CYRIX_486)
2206 		return ("i486 compatible");
2207 
2208 	switch (type) {
2209 	case X86_TYPE_CYRIX_6x86:
2210 		return ("Cyrix 6x86");
2211 	case X86_TYPE_CYRIX_6x86L:
2212 		return ("Cyrix 6x86L");
2213 	case X86_TYPE_CYRIX_6x86MX:
2214 		return ("Cyrix 6x86MX");
2215 	case X86_TYPE_CYRIX_GXm:
2216 		return ("Cyrix GXm");
2217 	case X86_TYPE_CYRIX_MediaGX:
2218 		return ("Cyrix MediaGX");
2219 	case X86_TYPE_CYRIX_MII:
2220 		return ("Cyrix M2");
2221 	case X86_TYPE_VIA_CYRIX_III:
2222 		return ("VIA Cyrix M3");
2223 	default:
2224 		/*
2225 		 * Have another wild guess ..
2226 		 */
2227 		if (cpi->cpi_family == 4 && cpi->cpi_model == 9)
2228 			return ("Cyrix 5x86");
2229 		else if (cpi->cpi_family == 5) {
2230 			switch (cpi->cpi_model) {
2231 			case 2:
2232 				return ("Cyrix 6x86");	/* Cyrix M1 */
2233 			case 4:
2234 				return ("Cyrix MediaGX");
2235 			default:
2236 				break;
2237 			}
2238 		} else if (cpi->cpi_family == 6) {
2239 			switch (cpi->cpi_model) {
2240 			case 0:
2241 				return ("Cyrix 6x86MX"); /* Cyrix M2? */
2242 			case 5:
2243 			case 6:
2244 			case 7:
2245 			case 8:
2246 			case 9:
2247 				return ("VIA C3");
2248 			default:
2249 				break;
2250 			}
2251 		}
2252 		break;
2253 	}
2254 	return (NULL);
2255 }
2256 
2257 /*
2258  * This only gets called in the case that the CPU extended
2259  * feature brand string (0x80000002, 0x80000003, 0x80000004)
2260  * aren't available, or contain null bytes for some reason.
2261  */
2262 static void
2263 fabricate_brandstr(struct cpuid_info *cpi)
2264 {
2265 	const char *brand = NULL;
2266 
2267 	switch (cpi->cpi_vendor) {
2268 	case X86_VENDOR_Intel:
2269 		brand = intel_cpubrand(cpi);
2270 		break;
2271 	case X86_VENDOR_AMD:
2272 		brand = amd_cpubrand(cpi);
2273 		break;
2274 	case X86_VENDOR_Cyrix:
2275 		brand = cyrix_cpubrand(cpi, x86_type);
2276 		break;
2277 	case X86_VENDOR_NexGen:
2278 		if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
2279 			brand = "NexGen Nx586";
2280 		break;
2281 	case X86_VENDOR_Centaur:
2282 		if (cpi->cpi_family == 5)
2283 			switch (cpi->cpi_model) {
2284 			case 4:
2285 				brand = "Centaur C6";
2286 				break;
2287 			case 8:
2288 				brand = "Centaur C2";
2289 				break;
2290 			case 9:
2291 				brand = "Centaur C3";
2292 				break;
2293 			default:
2294 				break;
2295 			}
2296 		break;
2297 	case X86_VENDOR_Rise:
2298 		if (cpi->cpi_family == 5 &&
2299 		    (cpi->cpi_model == 0 || cpi->cpi_model == 2))
2300 			brand = "Rise mP6";
2301 		break;
2302 	case X86_VENDOR_SiS:
2303 		if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
2304 			brand = "SiS 55x";
2305 		break;
2306 	case X86_VENDOR_TM:
2307 		if (cpi->cpi_family == 5 && cpi->cpi_model == 4)
2308 			brand = "Transmeta Crusoe TM3x00 or TM5x00";
2309 		break;
2310 	case X86_VENDOR_NSC:
2311 	case X86_VENDOR_UMC:
2312 	default:
2313 		break;
2314 	}
2315 	if (brand) {
2316 		(void) strcpy((char *)cpi->cpi_brandstr, brand);
2317 		return;
2318 	}
2319 
2320 	/*
2321 	 * If all else fails ...
2322 	 */
2323 	(void) snprintf(cpi->cpi_brandstr, sizeof (cpi->cpi_brandstr),
2324 	    "%s %d.%d.%d", cpi->cpi_vendorstr, cpi->cpi_family,
2325 	    cpi->cpi_model, cpi->cpi_step);
2326 }
2327 
2328 /*
2329  * This routine is called just after kernel memory allocation
2330  * becomes available on cpu0, and as part of mp_startup() on
2331  * the other cpus.
2332  *
2333  * Fixup the brand string, and collect any information from cpuid
2334  * that requires dynamicically allocated storage to represent.
2335  */
2336 /*ARGSUSED*/
2337 void
2338 cpuid_pass3(cpu_t *cpu)
2339 {
2340 	int	i, max, shft, level, size;
2341 	struct cpuid_regs regs;
2342 	struct cpuid_regs *cp;
2343 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2344 
2345 	ASSERT(cpi->cpi_pass == 2);
2346 
2347 	/*
2348 	 * Function 4: Deterministic cache parameters
2349 	 *
2350 	 * Take this opportunity to detect the number of threads
2351 	 * sharing the last level cache, and construct a corresponding
2352 	 * cache id. The respective cpuid_info members are initialized
2353 	 * to the default case of "no last level cache sharing".
2354 	 */
2355 	cpi->cpi_ncpu_shr_last_cache = 1;
2356 	cpi->cpi_last_lvl_cacheid = cpu->cpu_id;
2357 
2358 	if (cpi->cpi_maxeax >= 4 && cpi->cpi_vendor == X86_VENDOR_Intel) {
2359 
2360 		/*
2361 		 * Find the # of elements (size) returned by fn 4, and along
2362 		 * the way detect last level cache sharing details.
2363 		 */
2364 		bzero(&regs, sizeof (regs));
2365 		cp = &regs;
2366 		for (i = 0, max = 0; i < CPI_FN4_ECX_MAX; i++) {
2367 			cp->cp_eax = 4;
2368 			cp->cp_ecx = i;
2369 
2370 			(void) __cpuid_insn(cp);
2371 
2372 			if (CPI_CACHE_TYPE(cp) == 0)
2373 				break;
2374 			level = CPI_CACHE_LVL(cp);
2375 			if (level > max) {
2376 				max = level;
2377 				cpi->cpi_ncpu_shr_last_cache =
2378 				    CPI_NTHR_SHR_CACHE(cp) + 1;
2379 			}
2380 		}
2381 		cpi->cpi_std_4_size = size = i;
2382 
2383 		/*
2384 		 * Allocate the cpi_std_4 array. The first element
2385 		 * references the regs for fn 4, %ecx == 0, which
2386 		 * cpuid_pass2() stashed in cpi->cpi_std[4].
2387 		 */
2388 		if (size > 0) {
2389 			cpi->cpi_std_4 =
2390 			    kmem_alloc(size * sizeof (cp), KM_SLEEP);
2391 			cpi->cpi_std_4[0] = &cpi->cpi_std[4];
2392 
2393 			/*
2394 			 * Allocate storage to hold the additional regs
2395 			 * for function 4, %ecx == 1 .. cpi_std_4_size.
2396 			 *
2397 			 * The regs for fn 4, %ecx == 0 has already
2398 			 * been allocated as indicated above.
2399 			 */
2400 			for (i = 1; i < size; i++) {
2401 				cp = cpi->cpi_std_4[i] =
2402 				    kmem_zalloc(sizeof (regs), KM_SLEEP);
2403 				cp->cp_eax = 4;
2404 				cp->cp_ecx = i;
2405 
2406 				(void) __cpuid_insn(cp);
2407 			}
2408 		}
2409 		/*
2410 		 * Determine the number of bits needed to represent
2411 		 * the number of CPUs sharing the last level cache.
2412 		 *
2413 		 * Shift off that number of bits from the APIC id to
2414 		 * derive the cache id.
2415 		 */
2416 		shft = 0;
2417 		for (i = 1; i < cpi->cpi_ncpu_shr_last_cache; i <<= 1)
2418 			shft++;
2419 		cpi->cpi_last_lvl_cacheid = cpi->cpi_apicid >> shft;
2420 	}
2421 
2422 	/*
2423 	 * Now fixup the brand string
2424 	 */
2425 	if ((cpi->cpi_xmaxeax & 0x80000000) == 0) {
2426 		fabricate_brandstr(cpi);
2427 	} else {
2428 
2429 		/*
2430 		 * If we successfully extracted a brand string from the cpuid
2431 		 * instruction, clean it up by removing leading spaces and
2432 		 * similar junk.
2433 		 */
2434 		if (cpi->cpi_brandstr[0]) {
2435 			size_t maxlen = sizeof (cpi->cpi_brandstr);
2436 			char *src, *dst;
2437 
2438 			dst = src = (char *)cpi->cpi_brandstr;
2439 			src[maxlen - 1] = '\0';
2440 			/*
2441 			 * strip leading spaces
2442 			 */
2443 			while (*src == ' ')
2444 				src++;
2445 			/*
2446 			 * Remove any 'Genuine' or "Authentic" prefixes
2447 			 */
2448 			if (strncmp(src, "Genuine ", 8) == 0)
2449 				src += 8;
2450 			if (strncmp(src, "Authentic ", 10) == 0)
2451 				src += 10;
2452 
2453 			/*
2454 			 * Now do an in-place copy.
2455 			 * Map (R) to (r) and (TM) to (tm).
2456 			 * The era of teletypes is long gone, and there's
2457 			 * -really- no need to shout.
2458 			 */
2459 			while (*src != '\0') {
2460 				if (src[0] == '(') {
2461 					if (strncmp(src + 1, "R)", 2) == 0) {
2462 						(void) strncpy(dst, "(r)", 3);
2463 						src += 3;
2464 						dst += 3;
2465 						continue;
2466 					}
2467 					if (strncmp(src + 1, "TM)", 3) == 0) {
2468 						(void) strncpy(dst, "(tm)", 4);
2469 						src += 4;
2470 						dst += 4;
2471 						continue;
2472 					}
2473 				}
2474 				*dst++ = *src++;
2475 			}
2476 			*dst = '\0';
2477 
2478 			/*
2479 			 * Finally, remove any trailing spaces
2480 			 */
2481 			while (--dst > cpi->cpi_brandstr)
2482 				if (*dst == ' ')
2483 					*dst = '\0';
2484 				else
2485 					break;
2486 		} else
2487 			fabricate_brandstr(cpi);
2488 	}
2489 	cpi->cpi_pass = 3;
2490 }
2491 
2492 /*
2493  * This routine is called out of bind_hwcap() much later in the life
2494  * of the kernel (post_startup()).  The job of this routine is to resolve
2495  * the hardware feature support and kernel support for those features into
2496  * what we're actually going to tell applications via the aux vector.
2497  */
2498 uint_t
2499 cpuid_pass4(cpu_t *cpu)
2500 {
2501 	struct cpuid_info *cpi;
2502 	uint_t hwcap_flags = 0;
2503 
2504 	if (cpu == NULL)
2505 		cpu = CPU;
2506 	cpi = cpu->cpu_m.mcpu_cpi;
2507 
2508 	ASSERT(cpi->cpi_pass == 3);
2509 
2510 	if (cpi->cpi_maxeax >= 1) {
2511 		uint32_t *edx = &cpi->cpi_support[STD_EDX_FEATURES];
2512 		uint32_t *ecx = &cpi->cpi_support[STD_ECX_FEATURES];
2513 
2514 		*edx = CPI_FEATURES_EDX(cpi);
2515 		*ecx = CPI_FEATURES_ECX(cpi);
2516 
2517 		/*
2518 		 * [these require explicit kernel support]
2519 		 */
2520 		if (!is_x86_feature(x86_featureset, X86FSET_SEP))
2521 			*edx &= ~CPUID_INTC_EDX_SEP;
2522 
2523 		if (!is_x86_feature(x86_featureset, X86FSET_SSE))
2524 			*edx &= ~(CPUID_INTC_EDX_FXSR|CPUID_INTC_EDX_SSE);
2525 		if (!is_x86_feature(x86_featureset, X86FSET_SSE2))
2526 			*edx &= ~CPUID_INTC_EDX_SSE2;
2527 
2528 		if (!is_x86_feature(x86_featureset, X86FSET_HTT))
2529 			*edx &= ~CPUID_INTC_EDX_HTT;
2530 
2531 		if (!is_x86_feature(x86_featureset, X86FSET_SSE3))
2532 			*ecx &= ~CPUID_INTC_ECX_SSE3;
2533 
2534 		if (cpi->cpi_vendor == X86_VENDOR_Intel) {
2535 			if (!is_x86_feature(x86_featureset, X86FSET_SSSE3))
2536 				*ecx &= ~CPUID_INTC_ECX_SSSE3;
2537 			if (!is_x86_feature(x86_featureset, X86FSET_SSE4_1))
2538 				*ecx &= ~CPUID_INTC_ECX_SSE4_1;
2539 			if (!is_x86_feature(x86_featureset, X86FSET_SSE4_2))
2540 				*ecx &= ~CPUID_INTC_ECX_SSE4_2;
2541 			if (!is_x86_feature(x86_featureset, X86FSET_AES))
2542 				*ecx &= ~CPUID_INTC_ECX_AES;
2543 			if (!is_x86_feature(x86_featureset, X86FSET_PCLMULQDQ))
2544 				*ecx &= ~CPUID_INTC_ECX_PCLMULQDQ;
2545 			if (!is_x86_feature(x86_featureset, X86FSET_XSAVE))
2546 				*ecx &= ~(CPUID_INTC_ECX_XSAVE |
2547 				    CPUID_INTC_ECX_OSXSAVE);
2548 			if (!is_x86_feature(x86_featureset, X86FSET_AVX))
2549 				*ecx &= ~CPUID_INTC_ECX_AVX;
2550 		}
2551 
2552 		/*
2553 		 * [no explicit support required beyond x87 fp context]
2554 		 */
2555 		if (!fpu_exists)
2556 			*edx &= ~(CPUID_INTC_EDX_FPU | CPUID_INTC_EDX_MMX);
2557 
2558 		/*
2559 		 * Now map the supported feature vector to things that we
2560 		 * think userland will care about.
2561 		 */
2562 		if (*edx & CPUID_INTC_EDX_SEP)
2563 			hwcap_flags |= AV_386_SEP;
2564 		if (*edx & CPUID_INTC_EDX_SSE)
2565 			hwcap_flags |= AV_386_FXSR | AV_386_SSE;
2566 		if (*edx & CPUID_INTC_EDX_SSE2)
2567 			hwcap_flags |= AV_386_SSE2;
2568 		if (*ecx & CPUID_INTC_ECX_SSE3)
2569 			hwcap_flags |= AV_386_SSE3;
2570 		if (cpi->cpi_vendor == X86_VENDOR_Intel) {
2571 			if (*ecx & CPUID_INTC_ECX_SSSE3)
2572 				hwcap_flags |= AV_386_SSSE3;
2573 			if (*ecx & CPUID_INTC_ECX_SSE4_1)
2574 				hwcap_flags |= AV_386_SSE4_1;
2575 			if (*ecx & CPUID_INTC_ECX_SSE4_2)
2576 				hwcap_flags |= AV_386_SSE4_2;
2577 			if (*ecx & CPUID_INTC_ECX_MOVBE)
2578 				hwcap_flags |= AV_386_MOVBE;
2579 			if (*ecx & CPUID_INTC_ECX_AES)
2580 				hwcap_flags |= AV_386_AES;
2581 			if (*ecx & CPUID_INTC_ECX_PCLMULQDQ)
2582 				hwcap_flags |= AV_386_PCLMULQDQ;
2583 			if ((*ecx & CPUID_INTC_ECX_XSAVE) &&
2584 			    (*ecx & CPUID_INTC_ECX_OSXSAVE))
2585 				hwcap_flags |= AV_386_XSAVE;
2586 		}
2587 		if (*ecx & CPUID_INTC_ECX_VMX)
2588 			hwcap_flags |= AV_386_VMX;
2589 		if (*ecx & CPUID_INTC_ECX_POPCNT)
2590 			hwcap_flags |= AV_386_POPCNT;
2591 		if (*edx & CPUID_INTC_EDX_FPU)
2592 			hwcap_flags |= AV_386_FPU;
2593 		if (*edx & CPUID_INTC_EDX_MMX)
2594 			hwcap_flags |= AV_386_MMX;
2595 
2596 		if (*edx & CPUID_INTC_EDX_TSC)
2597 			hwcap_flags |= AV_386_TSC;
2598 		if (*edx & CPUID_INTC_EDX_CX8)
2599 			hwcap_flags |= AV_386_CX8;
2600 		if (*edx & CPUID_INTC_EDX_CMOV)
2601 			hwcap_flags |= AV_386_CMOV;
2602 		if (*ecx & CPUID_INTC_ECX_CX16)
2603 			hwcap_flags |= AV_386_CX16;
2604 	}
2605 
2606 	if (cpi->cpi_xmaxeax < 0x80000001)
2607 		goto pass4_done;
2608 
2609 	switch (cpi->cpi_vendor) {
2610 		struct cpuid_regs cp;
2611 		uint32_t *edx, *ecx;
2612 
2613 	case X86_VENDOR_Intel:
2614 		/*
2615 		 * Seems like Intel duplicated what we necessary
2616 		 * here to make the initial crop of 64-bit OS's work.
2617 		 * Hopefully, those are the only "extended" bits
2618 		 * they'll add.
2619 		 */
2620 		/*FALLTHROUGH*/
2621 
2622 	case X86_VENDOR_AMD:
2623 		edx = &cpi->cpi_support[AMD_EDX_FEATURES];
2624 		ecx = &cpi->cpi_support[AMD_ECX_FEATURES];
2625 
2626 		*edx = CPI_FEATURES_XTD_EDX(cpi);
2627 		*ecx = CPI_FEATURES_XTD_ECX(cpi);
2628 
2629 		/*
2630 		 * [these features require explicit kernel support]
2631 		 */
2632 		switch (cpi->cpi_vendor) {
2633 		case X86_VENDOR_Intel:
2634 			if (!is_x86_feature(x86_featureset, X86FSET_TSCP))
2635 				*edx &= ~CPUID_AMD_EDX_TSCP;
2636 			break;
2637 
2638 		case X86_VENDOR_AMD:
2639 			if (!is_x86_feature(x86_featureset, X86FSET_TSCP))
2640 				*edx &= ~CPUID_AMD_EDX_TSCP;
2641 			if (!is_x86_feature(x86_featureset, X86FSET_SSE4A))
2642 				*ecx &= ~CPUID_AMD_ECX_SSE4A;
2643 			break;
2644 
2645 		default:
2646 			break;
2647 		}
2648 
2649 		/*
2650 		 * [no explicit support required beyond
2651 		 * x87 fp context and exception handlers]
2652 		 */
2653 		if (!fpu_exists)
2654 			*edx &= ~(CPUID_AMD_EDX_MMXamd |
2655 			    CPUID_AMD_EDX_3DNow | CPUID_AMD_EDX_3DNowx);
2656 
2657 		if (!is_x86_feature(x86_featureset, X86FSET_NX))
2658 			*edx &= ~CPUID_AMD_EDX_NX;
2659 #if !defined(__amd64)
2660 		*edx &= ~CPUID_AMD_EDX_LM;
2661 #endif
2662 		/*
2663 		 * Now map the supported feature vector to
2664 		 * things that we think userland will care about.
2665 		 */
2666 #if defined(__amd64)
2667 		if (*edx & CPUID_AMD_EDX_SYSC)
2668 			hwcap_flags |= AV_386_AMD_SYSC;
2669 #endif
2670 		if (*edx & CPUID_AMD_EDX_MMXamd)
2671 			hwcap_flags |= AV_386_AMD_MMX;
2672 		if (*edx & CPUID_AMD_EDX_3DNow)
2673 			hwcap_flags |= AV_386_AMD_3DNow;
2674 		if (*edx & CPUID_AMD_EDX_3DNowx)
2675 			hwcap_flags |= AV_386_AMD_3DNowx;
2676 		if (*ecx & CPUID_AMD_ECX_SVM)
2677 			hwcap_flags |= AV_386_AMD_SVM;
2678 
2679 		switch (cpi->cpi_vendor) {
2680 		case X86_VENDOR_AMD:
2681 			if (*edx & CPUID_AMD_EDX_TSCP)
2682 				hwcap_flags |= AV_386_TSCP;
2683 			if (*ecx & CPUID_AMD_ECX_AHF64)
2684 				hwcap_flags |= AV_386_AHF;
2685 			if (*ecx & CPUID_AMD_ECX_SSE4A)
2686 				hwcap_flags |= AV_386_AMD_SSE4A;
2687 			if (*ecx & CPUID_AMD_ECX_LZCNT)
2688 				hwcap_flags |= AV_386_AMD_LZCNT;
2689 			break;
2690 
2691 		case X86_VENDOR_Intel:
2692 			if (*edx & CPUID_AMD_EDX_TSCP)
2693 				hwcap_flags |= AV_386_TSCP;
2694 			/*
2695 			 * Aarrgh.
2696 			 * Intel uses a different bit in the same word.
2697 			 */
2698 			if (*ecx & CPUID_INTC_ECX_AHF64)
2699 				hwcap_flags |= AV_386_AHF;
2700 			break;
2701 
2702 		default:
2703 			break;
2704 		}
2705 		break;
2706 
2707 	case X86_VENDOR_TM:
2708 		cp.cp_eax = 0x80860001;
2709 		(void) __cpuid_insn(&cp);
2710 		cpi->cpi_support[TM_EDX_FEATURES] = cp.cp_edx;
2711 		break;
2712 
2713 	default:
2714 		break;
2715 	}
2716 
2717 pass4_done:
2718 	cpi->cpi_pass = 4;
2719 	return (hwcap_flags);
2720 }
2721 
2722 
2723 /*
2724  * Simulate the cpuid instruction using the data we previously
2725  * captured about this CPU.  We try our best to return the truth
2726  * about the hardware, independently of kernel support.
2727  */
2728 uint32_t
2729 cpuid_insn(cpu_t *cpu, struct cpuid_regs *cp)
2730 {
2731 	struct cpuid_info *cpi;
2732 	struct cpuid_regs *xcp;
2733 
2734 	if (cpu == NULL)
2735 		cpu = CPU;
2736 	cpi = cpu->cpu_m.mcpu_cpi;
2737 
2738 	ASSERT(cpuid_checkpass(cpu, 3));
2739 
2740 	/*
2741 	 * CPUID data is cached in two separate places: cpi_std for standard
2742 	 * CPUID functions, and cpi_extd for extended CPUID functions.
2743 	 */
2744 	if (cp->cp_eax <= cpi->cpi_maxeax && cp->cp_eax < NMAX_CPI_STD)
2745 		xcp = &cpi->cpi_std[cp->cp_eax];
2746 	else if (cp->cp_eax >= 0x80000000 && cp->cp_eax <= cpi->cpi_xmaxeax &&
2747 	    cp->cp_eax < 0x80000000 + NMAX_CPI_EXTD)
2748 		xcp = &cpi->cpi_extd[cp->cp_eax - 0x80000000];
2749 	else
2750 		/*
2751 		 * The caller is asking for data from an input parameter which
2752 		 * the kernel has not cached.  In this case we go fetch from
2753 		 * the hardware and return the data directly to the user.
2754 		 */
2755 		return (__cpuid_insn(cp));
2756 
2757 	cp->cp_eax = xcp->cp_eax;
2758 	cp->cp_ebx = xcp->cp_ebx;
2759 	cp->cp_ecx = xcp->cp_ecx;
2760 	cp->cp_edx = xcp->cp_edx;
2761 	return (cp->cp_eax);
2762 }
2763 
2764 int
2765 cpuid_checkpass(cpu_t *cpu, int pass)
2766 {
2767 	return (cpu != NULL && cpu->cpu_m.mcpu_cpi != NULL &&
2768 	    cpu->cpu_m.mcpu_cpi->cpi_pass >= pass);
2769 }
2770 
2771 int
2772 cpuid_getbrandstr(cpu_t *cpu, char *s, size_t n)
2773 {
2774 	ASSERT(cpuid_checkpass(cpu, 3));
2775 
2776 	return (snprintf(s, n, "%s", cpu->cpu_m.mcpu_cpi->cpi_brandstr));
2777 }
2778 
2779 int
2780 cpuid_is_cmt(cpu_t *cpu)
2781 {
2782 	if (cpu == NULL)
2783 		cpu = CPU;
2784 
2785 	ASSERT(cpuid_checkpass(cpu, 1));
2786 
2787 	return (cpu->cpu_m.mcpu_cpi->cpi_chipid >= 0);
2788 }
2789 
2790 /*
2791  * AMD and Intel both implement the 64-bit variant of the syscall
2792  * instruction (syscallq), so if there's -any- support for syscall,
2793  * cpuid currently says "yes, we support this".
2794  *
2795  * However, Intel decided to -not- implement the 32-bit variant of the
2796  * syscall instruction, so we provide a predicate to allow our caller
2797  * to test that subtlety here.
2798  *
2799  * XXPV	Currently, 32-bit syscall instructions don't work via the hypervisor,
2800  *	even in the case where the hardware would in fact support it.
2801  */
2802 /*ARGSUSED*/
2803 int
2804 cpuid_syscall32_insn(cpu_t *cpu)
2805 {
2806 	ASSERT(cpuid_checkpass((cpu == NULL ? CPU : cpu), 1));
2807 
2808 #if !defined(__xpv)
2809 	if (cpu == NULL)
2810 		cpu = CPU;
2811 
2812 	/*CSTYLED*/
2813 	{
2814 		struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2815 
2816 		if (cpi->cpi_vendor == X86_VENDOR_AMD &&
2817 		    cpi->cpi_xmaxeax >= 0x80000001 &&
2818 		    (CPI_FEATURES_XTD_EDX(cpi) & CPUID_AMD_EDX_SYSC))
2819 			return (1);
2820 	}
2821 #endif
2822 	return (0);
2823 }
2824 
2825 int
2826 cpuid_getidstr(cpu_t *cpu, char *s, size_t n)
2827 {
2828 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2829 
2830 	static const char fmt[] =
2831 	    "x86 (%s %X family %d model %d step %d clock %d MHz)";
2832 	static const char fmt_ht[] =
2833 	    "x86 (chipid 0x%x %s %X family %d model %d step %d clock %d MHz)";
2834 
2835 	ASSERT(cpuid_checkpass(cpu, 1));
2836 
2837 	if (cpuid_is_cmt(cpu))
2838 		return (snprintf(s, n, fmt_ht, cpi->cpi_chipid,
2839 		    cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax,
2840 		    cpi->cpi_family, cpi->cpi_model,
2841 		    cpi->cpi_step, cpu->cpu_type_info.pi_clock));
2842 	return (snprintf(s, n, fmt,
2843 	    cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax,
2844 	    cpi->cpi_family, cpi->cpi_model,
2845 	    cpi->cpi_step, cpu->cpu_type_info.pi_clock));
2846 }
2847 
2848 const char *
2849 cpuid_getvendorstr(cpu_t *cpu)
2850 {
2851 	ASSERT(cpuid_checkpass(cpu, 1));
2852 	return ((const char *)cpu->cpu_m.mcpu_cpi->cpi_vendorstr);
2853 }
2854 
2855 uint_t
2856 cpuid_getvendor(cpu_t *cpu)
2857 {
2858 	ASSERT(cpuid_checkpass(cpu, 1));
2859 	return (cpu->cpu_m.mcpu_cpi->cpi_vendor);
2860 }
2861 
2862 uint_t
2863 cpuid_getfamily(cpu_t *cpu)
2864 {
2865 	ASSERT(cpuid_checkpass(cpu, 1));
2866 	return (cpu->cpu_m.mcpu_cpi->cpi_family);
2867 }
2868 
2869 uint_t
2870 cpuid_getmodel(cpu_t *cpu)
2871 {
2872 	ASSERT(cpuid_checkpass(cpu, 1));
2873 	return (cpu->cpu_m.mcpu_cpi->cpi_model);
2874 }
2875 
2876 uint_t
2877 cpuid_get_ncpu_per_chip(cpu_t *cpu)
2878 {
2879 	ASSERT(cpuid_checkpass(cpu, 1));
2880 	return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_per_chip);
2881 }
2882 
2883 uint_t
2884 cpuid_get_ncore_per_chip(cpu_t *cpu)
2885 {
2886 	ASSERT(cpuid_checkpass(cpu, 1));
2887 	return (cpu->cpu_m.mcpu_cpi->cpi_ncore_per_chip);
2888 }
2889 
2890 uint_t
2891 cpuid_get_ncpu_sharing_last_cache(cpu_t *cpu)
2892 {
2893 	ASSERT(cpuid_checkpass(cpu, 2));
2894 	return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_shr_last_cache);
2895 }
2896 
2897 id_t
2898 cpuid_get_last_lvl_cacheid(cpu_t *cpu)
2899 {
2900 	ASSERT(cpuid_checkpass(cpu, 2));
2901 	return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid);
2902 }
2903 
2904 uint_t
2905 cpuid_getstep(cpu_t *cpu)
2906 {
2907 	ASSERT(cpuid_checkpass(cpu, 1));
2908 	return (cpu->cpu_m.mcpu_cpi->cpi_step);
2909 }
2910 
2911 uint_t
2912 cpuid_getsig(struct cpu *cpu)
2913 {
2914 	ASSERT(cpuid_checkpass(cpu, 1));
2915 	return (cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_eax);
2916 }
2917 
2918 uint32_t
2919 cpuid_getchiprev(struct cpu *cpu)
2920 {
2921 	ASSERT(cpuid_checkpass(cpu, 1));
2922 	return (cpu->cpu_m.mcpu_cpi->cpi_chiprev);
2923 }
2924 
2925 const char *
2926 cpuid_getchiprevstr(struct cpu *cpu)
2927 {
2928 	ASSERT(cpuid_checkpass(cpu, 1));
2929 	return (cpu->cpu_m.mcpu_cpi->cpi_chiprevstr);
2930 }
2931 
2932 uint32_t
2933 cpuid_getsockettype(struct cpu *cpu)
2934 {
2935 	ASSERT(cpuid_checkpass(cpu, 1));
2936 	return (cpu->cpu_m.mcpu_cpi->cpi_socket);
2937 }
2938 
2939 const char *
2940 cpuid_getsocketstr(cpu_t *cpu)
2941 {
2942 	static const char *socketstr = NULL;
2943 	struct cpuid_info *cpi;
2944 
2945 	ASSERT(cpuid_checkpass(cpu, 1));
2946 	cpi = cpu->cpu_m.mcpu_cpi;
2947 
2948 	/* Assume that socket types are the same across the system */
2949 	if (socketstr == NULL)
2950 		socketstr = _cpuid_sktstr(cpi->cpi_vendor, cpi->cpi_family,
2951 		    cpi->cpi_model, cpi->cpi_step);
2952 
2953 
2954 	return (socketstr);
2955 }
2956 
2957 int
2958 cpuid_get_chipid(cpu_t *cpu)
2959 {
2960 	ASSERT(cpuid_checkpass(cpu, 1));
2961 
2962 	if (cpuid_is_cmt(cpu))
2963 		return (cpu->cpu_m.mcpu_cpi->cpi_chipid);
2964 	return (cpu->cpu_id);
2965 }
2966 
2967 id_t
2968 cpuid_get_coreid(cpu_t *cpu)
2969 {
2970 	ASSERT(cpuid_checkpass(cpu, 1));
2971 	return (cpu->cpu_m.mcpu_cpi->cpi_coreid);
2972 }
2973 
2974 int
2975 cpuid_get_pkgcoreid(cpu_t *cpu)
2976 {
2977 	ASSERT(cpuid_checkpass(cpu, 1));
2978 	return (cpu->cpu_m.mcpu_cpi->cpi_pkgcoreid);
2979 }
2980 
2981 int
2982 cpuid_get_clogid(cpu_t *cpu)
2983 {
2984 	ASSERT(cpuid_checkpass(cpu, 1));
2985 	return (cpu->cpu_m.mcpu_cpi->cpi_clogid);
2986 }
2987 
2988 int
2989 cpuid_get_cacheid(cpu_t *cpu)
2990 {
2991 	ASSERT(cpuid_checkpass(cpu, 1));
2992 	return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid);
2993 }
2994 
2995 uint_t
2996 cpuid_get_procnodeid(cpu_t *cpu)
2997 {
2998 	ASSERT(cpuid_checkpass(cpu, 1));
2999 	return (cpu->cpu_m.mcpu_cpi->cpi_procnodeid);
3000 }
3001 
3002 uint_t
3003 cpuid_get_procnodes_per_pkg(cpu_t *cpu)
3004 {
3005 	ASSERT(cpuid_checkpass(cpu, 1));
3006 	return (cpu->cpu_m.mcpu_cpi->cpi_procnodes_per_pkg);
3007 }
3008 
3009 /*ARGSUSED*/
3010 int
3011 cpuid_have_cr8access(cpu_t *cpu)
3012 {
3013 #if defined(__amd64)
3014 	return (1);
3015 #else
3016 	struct cpuid_info *cpi;
3017 
3018 	ASSERT(cpu != NULL);
3019 	cpi = cpu->cpu_m.mcpu_cpi;
3020 	if (cpi->cpi_vendor == X86_VENDOR_AMD && cpi->cpi_maxeax >= 1 &&
3021 	    (CPI_FEATURES_XTD_ECX(cpi) & CPUID_AMD_ECX_CR8D) != 0)
3022 		return (1);
3023 	return (0);
3024 #endif
3025 }
3026 
3027 uint32_t
3028 cpuid_get_apicid(cpu_t *cpu)
3029 {
3030 	ASSERT(cpuid_checkpass(cpu, 1));
3031 	if (cpu->cpu_m.mcpu_cpi->cpi_maxeax < 1) {
3032 		return (UINT32_MAX);
3033 	} else {
3034 		return (cpu->cpu_m.mcpu_cpi->cpi_apicid);
3035 	}
3036 }
3037 
3038 void
3039 cpuid_get_addrsize(cpu_t *cpu, uint_t *pabits, uint_t *vabits)
3040 {
3041 	struct cpuid_info *cpi;
3042 
3043 	if (cpu == NULL)
3044 		cpu = CPU;
3045 	cpi = cpu->cpu_m.mcpu_cpi;
3046 
3047 	ASSERT(cpuid_checkpass(cpu, 1));
3048 
3049 	if (pabits)
3050 		*pabits = cpi->cpi_pabits;
3051 	if (vabits)
3052 		*vabits = cpi->cpi_vabits;
3053 }
3054 
3055 /*
3056  * Returns the number of data TLB entries for a corresponding
3057  * pagesize.  If it can't be computed, or isn't known, the
3058  * routine returns zero.  If you ask about an architecturally
3059  * impossible pagesize, the routine will panic (so that the
3060  * hat implementor knows that things are inconsistent.)
3061  */
3062 uint_t
3063 cpuid_get_dtlb_nent(cpu_t *cpu, size_t pagesize)
3064 {
3065 	struct cpuid_info *cpi;
3066 	uint_t dtlb_nent = 0;
3067 
3068 	if (cpu == NULL)
3069 		cpu = CPU;
3070 	cpi = cpu->cpu_m.mcpu_cpi;
3071 
3072 	ASSERT(cpuid_checkpass(cpu, 1));
3073 
3074 	/*
3075 	 * Check the L2 TLB info
3076 	 */
3077 	if (cpi->cpi_xmaxeax >= 0x80000006) {
3078 		struct cpuid_regs *cp = &cpi->cpi_extd[6];
3079 
3080 		switch (pagesize) {
3081 
3082 		case 4 * 1024:
3083 			/*
3084 			 * All zero in the top 16 bits of the register
3085 			 * indicates a unified TLB. Size is in low 16 bits.
3086 			 */
3087 			if ((cp->cp_ebx & 0xffff0000) == 0)
3088 				dtlb_nent = cp->cp_ebx & 0x0000ffff;
3089 			else
3090 				dtlb_nent = BITX(cp->cp_ebx, 27, 16);
3091 			break;
3092 
3093 		case 2 * 1024 * 1024:
3094 			if ((cp->cp_eax & 0xffff0000) == 0)
3095 				dtlb_nent = cp->cp_eax & 0x0000ffff;
3096 			else
3097 				dtlb_nent = BITX(cp->cp_eax, 27, 16);
3098 			break;
3099 
3100 		default:
3101 			panic("unknown L2 pagesize");
3102 			/*NOTREACHED*/
3103 		}
3104 	}
3105 
3106 	if (dtlb_nent != 0)
3107 		return (dtlb_nent);
3108 
3109 	/*
3110 	 * No L2 TLB support for this size, try L1.
3111 	 */
3112 	if (cpi->cpi_xmaxeax >= 0x80000005) {
3113 		struct cpuid_regs *cp = &cpi->cpi_extd[5];
3114 
3115 		switch (pagesize) {
3116 		case 4 * 1024:
3117 			dtlb_nent = BITX(cp->cp_ebx, 23, 16);
3118 			break;
3119 		case 2 * 1024 * 1024:
3120 			dtlb_nent = BITX(cp->cp_eax, 23, 16);
3121 			break;
3122 		default:
3123 			panic("unknown L1 d-TLB pagesize");
3124 			/*NOTREACHED*/
3125 		}
3126 	}
3127 
3128 	return (dtlb_nent);
3129 }
3130 
3131 /*
3132  * Return 0 if the erratum is not present or not applicable, positive
3133  * if it is, and negative if the status of the erratum is unknown.
3134  *
3135  * See "Revision Guide for AMD Athlon(tm) 64 and AMD Opteron(tm)
3136  * Processors" #25759, Rev 3.57, August 2005
3137  */
3138 int
3139 cpuid_opteron_erratum(cpu_t *cpu, uint_t erratum)
3140 {
3141 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
3142 	uint_t eax;
3143 
3144 	/*
3145 	 * Bail out if this CPU isn't an AMD CPU, or if it's
3146 	 * a legacy (32-bit) AMD CPU.
3147 	 */
3148 	if (cpi->cpi_vendor != X86_VENDOR_AMD ||
3149 	    cpi->cpi_family == 4 || cpi->cpi_family == 5 ||
3150 	    cpi->cpi_family == 6)
3151 
3152 		return (0);
3153 
3154 	eax = cpi->cpi_std[1].cp_eax;
3155 
3156 #define	SH_B0(eax)	(eax == 0xf40 || eax == 0xf50)
3157 #define	SH_B3(eax) 	(eax == 0xf51)
3158 #define	B(eax)		(SH_B0(eax) || SH_B3(eax))
3159 
3160 #define	SH_C0(eax)	(eax == 0xf48 || eax == 0xf58)
3161 
3162 #define	SH_CG(eax)	(eax == 0xf4a || eax == 0xf5a || eax == 0xf7a)
3163 #define	DH_CG(eax)	(eax == 0xfc0 || eax == 0xfe0 || eax == 0xff0)
3164 #define	CH_CG(eax)	(eax == 0xf82 || eax == 0xfb2)
3165 #define	CG(eax)		(SH_CG(eax) || DH_CG(eax) || CH_CG(eax))
3166 
3167 #define	SH_D0(eax)	(eax == 0x10f40 || eax == 0x10f50 || eax == 0x10f70)
3168 #define	DH_D0(eax)	(eax == 0x10fc0 || eax == 0x10ff0)
3169 #define	CH_D0(eax)	(eax == 0x10f80 || eax == 0x10fb0)
3170 #define	D0(eax)		(SH_D0(eax) || DH_D0(eax) || CH_D0(eax))
3171 
3172 #define	SH_E0(eax)	(eax == 0x20f50 || eax == 0x20f40 || eax == 0x20f70)
3173 #define	JH_E1(eax)	(eax == 0x20f10)	/* JH8_E0 had 0x20f30 */
3174 #define	DH_E3(eax)	(eax == 0x20fc0 || eax == 0x20ff0)
3175 #define	SH_E4(eax)	(eax == 0x20f51 || eax == 0x20f71)
3176 #define	BH_E4(eax)	(eax == 0x20fb1)
3177 #define	SH_E5(eax)	(eax == 0x20f42)
3178 #define	DH_E6(eax)	(eax == 0x20ff2 || eax == 0x20fc2)
3179 #define	JH_E6(eax)	(eax == 0x20f12 || eax == 0x20f32)
3180 #define	EX(eax)		(SH_E0(eax) || JH_E1(eax) || DH_E3(eax) || \
3181 			    SH_E4(eax) || BH_E4(eax) || SH_E5(eax) || \
3182 			    DH_E6(eax) || JH_E6(eax))
3183 
3184 #define	DR_AX(eax)	(eax == 0x100f00 || eax == 0x100f01 || eax == 0x100f02)
3185 #define	DR_B0(eax)	(eax == 0x100f20)
3186 #define	DR_B1(eax)	(eax == 0x100f21)
3187 #define	DR_BA(eax)	(eax == 0x100f2a)
3188 #define	DR_B2(eax)	(eax == 0x100f22)
3189 #define	DR_B3(eax)	(eax == 0x100f23)
3190 #define	RB_C0(eax)	(eax == 0x100f40)
3191 
3192 	switch (erratum) {
3193 	case 1:
3194 		return (cpi->cpi_family < 0x10);
3195 	case 51:	/* what does the asterisk mean? */
3196 		return (B(eax) || SH_C0(eax) || CG(eax));
3197 	case 52:
3198 		return (B(eax));
3199 	case 57:
3200 		return (cpi->cpi_family <= 0x11);
3201 	case 58:
3202 		return (B(eax));
3203 	case 60:
3204 		return (cpi->cpi_family <= 0x11);
3205 	case 61:
3206 	case 62:
3207 	case 63:
3208 	case 64:
3209 	case 65:
3210 	case 66:
3211 	case 68:
3212 	case 69:
3213 	case 70:
3214 	case 71:
3215 		return (B(eax));
3216 	case 72:
3217 		return (SH_B0(eax));
3218 	case 74:
3219 		return (B(eax));
3220 	case 75:
3221 		return (cpi->cpi_family < 0x10);
3222 	case 76:
3223 		return (B(eax));
3224 	case 77:
3225 		return (cpi->cpi_family <= 0x11);
3226 	case 78:
3227 		return (B(eax) || SH_C0(eax));
3228 	case 79:
3229 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
3230 	case 80:
3231 	case 81:
3232 	case 82:
3233 		return (B(eax));
3234 	case 83:
3235 		return (B(eax) || SH_C0(eax) || CG(eax));
3236 	case 85:
3237 		return (cpi->cpi_family < 0x10);
3238 	case 86:
3239 		return (SH_C0(eax) || CG(eax));
3240 	case 88:
3241 #if !defined(__amd64)
3242 		return (0);
3243 #else
3244 		return (B(eax) || SH_C0(eax));
3245 #endif
3246 	case 89:
3247 		return (cpi->cpi_family < 0x10);
3248 	case 90:
3249 		return (B(eax) || SH_C0(eax) || CG(eax));
3250 	case 91:
3251 	case 92:
3252 		return (B(eax) || SH_C0(eax));
3253 	case 93:
3254 		return (SH_C0(eax));
3255 	case 94:
3256 		return (B(eax) || SH_C0(eax) || CG(eax));
3257 	case 95:
3258 #if !defined(__amd64)
3259 		return (0);
3260 #else
3261 		return (B(eax) || SH_C0(eax));
3262 #endif
3263 	case 96:
3264 		return (B(eax) || SH_C0(eax) || CG(eax));
3265 	case 97:
3266 	case 98:
3267 		return (SH_C0(eax) || CG(eax));
3268 	case 99:
3269 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3270 	case 100:
3271 		return (B(eax) || SH_C0(eax));
3272 	case 101:
3273 	case 103:
3274 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3275 	case 104:
3276 		return (SH_C0(eax) || CG(eax) || D0(eax));
3277 	case 105:
3278 	case 106:
3279 	case 107:
3280 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3281 	case 108:
3282 		return (DH_CG(eax));
3283 	case 109:
3284 		return (SH_C0(eax) || CG(eax) || D0(eax));
3285 	case 110:
3286 		return (D0(eax) || EX(eax));
3287 	case 111:
3288 		return (CG(eax));
3289 	case 112:
3290 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
3291 	case 113:
3292 		return (eax == 0x20fc0);
3293 	case 114:
3294 		return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax));
3295 	case 115:
3296 		return (SH_E0(eax) || JH_E1(eax));
3297 	case 116:
3298 		return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax));
3299 	case 117:
3300 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3301 	case 118:
3302 		return (SH_E0(eax) || JH_E1(eax) || SH_E4(eax) || BH_E4(eax) ||
3303 		    JH_E6(eax));
3304 	case 121:
3305 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
3306 	case 122:
3307 		return (cpi->cpi_family < 0x10 || cpi->cpi_family == 0x11);
3308 	case 123:
3309 		return (JH_E1(eax) || BH_E4(eax) || JH_E6(eax));
3310 	case 131:
3311 		return (cpi->cpi_family < 0x10);
3312 	case 6336786:
3313 		/*
3314 		 * Test for AdvPowerMgmtInfo.TscPStateInvariant
3315 		 * if this is a K8 family or newer processor
3316 		 */
3317 		if (CPI_FAMILY(cpi) == 0xf) {
3318 			struct cpuid_regs regs;
3319 			regs.cp_eax = 0x80000007;
3320 			(void) __cpuid_insn(&regs);
3321 			return (!(regs.cp_edx & 0x100));
3322 		}
3323 		return (0);
3324 	case 6323525:
3325 		return (((((eax >> 12) & 0xff00) + (eax & 0xf00)) |
3326 		    (((eax >> 4) & 0xf) | ((eax >> 12) & 0xf0))) < 0xf40);
3327 
3328 	case 6671130:
3329 		/*
3330 		 * check for processors (pre-Shanghai) that do not provide
3331 		 * optimal management of 1gb ptes in its tlb.
3332 		 */
3333 		return (cpi->cpi_family == 0x10 && cpi->cpi_model < 4);
3334 
3335 	case 298:
3336 		return (DR_AX(eax) || DR_B0(eax) || DR_B1(eax) || DR_BA(eax) ||
3337 		    DR_B2(eax) || RB_C0(eax));
3338 
3339 	default:
3340 		return (-1);
3341 
3342 	}
3343 }
3344 
3345 /*
3346  * Determine if specified erratum is present via OSVW (OS Visible Workaround).
3347  * Return 1 if erratum is present, 0 if not present and -1 if indeterminate.
3348  */
3349 int
3350 osvw_opteron_erratum(cpu_t *cpu, uint_t erratum)
3351 {
3352 	struct cpuid_info	*cpi;
3353 	uint_t			osvwid;
3354 	static int		osvwfeature = -1;
3355 	uint64_t		osvwlength;
3356 
3357 
3358 	cpi = cpu->cpu_m.mcpu_cpi;
3359 
3360 	/* confirm OSVW supported */
3361 	if (osvwfeature == -1) {
3362 		osvwfeature = cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW;
3363 	} else {
3364 		/* assert that osvw feature setting is consistent on all cpus */
3365 		ASSERT(osvwfeature ==
3366 		    (cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW));
3367 	}
3368 	if (!osvwfeature)
3369 		return (-1);
3370 
3371 	osvwlength = rdmsr(MSR_AMD_OSVW_ID_LEN) & OSVW_ID_LEN_MASK;
3372 
3373 	switch (erratum) {
3374 	case 298:	/* osvwid is 0 */
3375 		osvwid = 0;
3376 		if (osvwlength <= (uint64_t)osvwid) {
3377 			/* osvwid 0 is unknown */
3378 			return (-1);
3379 		}
3380 
3381 		/*
3382 		 * Check the OSVW STATUS MSR to determine the state
3383 		 * of the erratum where:
3384 		 *   0 - fixed by HW
3385 		 *   1 - BIOS has applied the workaround when BIOS
3386 		 *   workaround is available. (Or for other errata,
3387 		 *   OS workaround is required.)
3388 		 * For a value of 1, caller will confirm that the
3389 		 * erratum 298 workaround has indeed been applied by BIOS.
3390 		 *
3391 		 * A 1 may be set in cpus that have a HW fix
3392 		 * in a mixed cpu system. Regarding erratum 298:
3393 		 *   In a multiprocessor platform, the workaround above
3394 		 *   should be applied to all processors regardless of
3395 		 *   silicon revision when an affected processor is
3396 		 *   present.
3397 		 */
3398 
3399 		return (rdmsr(MSR_AMD_OSVW_STATUS +
3400 		    (osvwid / OSVW_ID_CNT_PER_MSR)) &
3401 		    (1ULL << (osvwid % OSVW_ID_CNT_PER_MSR)));
3402 
3403 	default:
3404 		return (-1);
3405 	}
3406 }
3407 
3408 static const char assoc_str[] = "associativity";
3409 static const char line_str[] = "line-size";
3410 static const char size_str[] = "size";
3411 
3412 static void
3413 add_cache_prop(dev_info_t *devi, const char *label, const char *type,
3414     uint32_t val)
3415 {
3416 	char buf[128];
3417 
3418 	/*
3419 	 * ndi_prop_update_int() is used because it is desirable for
3420 	 * DDI_PROP_HW_DEF and DDI_PROP_DONTSLEEP to be set.
3421 	 */
3422 	if (snprintf(buf, sizeof (buf), "%s-%s", label, type) < sizeof (buf))
3423 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, devi, buf, val);
3424 }
3425 
3426 /*
3427  * Intel-style cache/tlb description
3428  *
3429  * Standard cpuid level 2 gives a randomly ordered
3430  * selection of tags that index into a table that describes
3431  * cache and tlb properties.
3432  */
3433 
3434 static const char l1_icache_str[] = "l1-icache";
3435 static const char l1_dcache_str[] = "l1-dcache";
3436 static const char l2_cache_str[] = "l2-cache";
3437 static const char l3_cache_str[] = "l3-cache";
3438 static const char itlb4k_str[] = "itlb-4K";
3439 static const char dtlb4k_str[] = "dtlb-4K";
3440 static const char itlb2M_str[] = "itlb-2M";
3441 static const char itlb4M_str[] = "itlb-4M";
3442 static const char dtlb4M_str[] = "dtlb-4M";
3443 static const char dtlb24_str[] = "dtlb0-2M-4M";
3444 static const char itlb424_str[] = "itlb-4K-2M-4M";
3445 static const char itlb24_str[] = "itlb-2M-4M";
3446 static const char dtlb44_str[] = "dtlb-4K-4M";
3447 static const char sl1_dcache_str[] = "sectored-l1-dcache";
3448 static const char sl2_cache_str[] = "sectored-l2-cache";
3449 static const char itrace_str[] = "itrace-cache";
3450 static const char sl3_cache_str[] = "sectored-l3-cache";
3451 static const char sh_l2_tlb4k_str[] = "shared-l2-tlb-4k";
3452 
3453 static const struct cachetab {
3454 	uint8_t 	ct_code;
3455 	uint8_t		ct_assoc;
3456 	uint16_t 	ct_line_size;
3457 	size_t		ct_size;
3458 	const char	*ct_label;
3459 } intel_ctab[] = {
3460 	/*
3461 	 * maintain descending order!
3462 	 *
3463 	 * Codes ignored - Reason
3464 	 * ----------------------
3465 	 * 40H - intel_cpuid_4_cache_info() disambiguates l2/l3 cache
3466 	 * f0H/f1H - Currently we do not interpret prefetch size by design
3467 	 */
3468 	{ 0xe4, 16, 64, 8*1024*1024, l3_cache_str},
3469 	{ 0xe3, 16, 64, 4*1024*1024, l3_cache_str},
3470 	{ 0xe2, 16, 64, 2*1024*1024, l3_cache_str},
3471 	{ 0xde, 12, 64, 6*1024*1024, l3_cache_str},
3472 	{ 0xdd, 12, 64, 3*1024*1024, l3_cache_str},
3473 	{ 0xdc, 12, 64, ((1*1024*1024)+(512*1024)), l3_cache_str},
3474 	{ 0xd8, 8, 64, 4*1024*1024, l3_cache_str},
3475 	{ 0xd7, 8, 64, 2*1024*1024, l3_cache_str},
3476 	{ 0xd6, 8, 64, 1*1024*1024, l3_cache_str},
3477 	{ 0xd2, 4, 64, 2*1024*1024, l3_cache_str},
3478 	{ 0xd1, 4, 64, 1*1024*1024, l3_cache_str},
3479 	{ 0xd0, 4, 64, 512*1024, l3_cache_str},
3480 	{ 0xca, 4, 0, 512, sh_l2_tlb4k_str},
3481 	{ 0xc0, 4, 0, 8, dtlb44_str },
3482 	{ 0xba, 4, 0, 64, dtlb4k_str },
3483 	{ 0xb4, 4, 0, 256, dtlb4k_str },
3484 	{ 0xb3, 4, 0, 128, dtlb4k_str },
3485 	{ 0xb2, 4, 0, 64, itlb4k_str },
3486 	{ 0xb0, 4, 0, 128, itlb4k_str },
3487 	{ 0x87, 8, 64, 1024*1024, l2_cache_str},
3488 	{ 0x86, 4, 64, 512*1024, l2_cache_str},
3489 	{ 0x85, 8, 32, 2*1024*1024, l2_cache_str},
3490 	{ 0x84, 8, 32, 1024*1024, l2_cache_str},
3491 	{ 0x83, 8, 32, 512*1024, l2_cache_str},
3492 	{ 0x82, 8, 32, 256*1024, l2_cache_str},
3493 	{ 0x80, 8, 64, 512*1024, l2_cache_str},
3494 	{ 0x7f, 2, 64, 512*1024, l2_cache_str},
3495 	{ 0x7d, 8, 64, 2*1024*1024, sl2_cache_str},
3496 	{ 0x7c, 8, 64, 1024*1024, sl2_cache_str},
3497 	{ 0x7b, 8, 64, 512*1024, sl2_cache_str},
3498 	{ 0x7a, 8, 64, 256*1024, sl2_cache_str},
3499 	{ 0x79, 8, 64, 128*1024, sl2_cache_str},
3500 	{ 0x78, 8, 64, 1024*1024, l2_cache_str},
3501 	{ 0x73, 8, 0, 64*1024, itrace_str},
3502 	{ 0x72, 8, 0, 32*1024, itrace_str},
3503 	{ 0x71, 8, 0, 16*1024, itrace_str},
3504 	{ 0x70, 8, 0, 12*1024, itrace_str},
3505 	{ 0x68, 4, 64, 32*1024, sl1_dcache_str},
3506 	{ 0x67, 4, 64, 16*1024, sl1_dcache_str},
3507 	{ 0x66, 4, 64, 8*1024, sl1_dcache_str},
3508 	{ 0x60, 8, 64, 16*1024, sl1_dcache_str},
3509 	{ 0x5d, 0, 0, 256, dtlb44_str},
3510 	{ 0x5c, 0, 0, 128, dtlb44_str},
3511 	{ 0x5b, 0, 0, 64, dtlb44_str},
3512 	{ 0x5a, 4, 0, 32, dtlb24_str},
3513 	{ 0x59, 0, 0, 16, dtlb4k_str},
3514 	{ 0x57, 4, 0, 16, dtlb4k_str},
3515 	{ 0x56, 4, 0, 16, dtlb4M_str},
3516 	{ 0x55, 0, 0, 7, itlb24_str},
3517 	{ 0x52, 0, 0, 256, itlb424_str},
3518 	{ 0x51, 0, 0, 128, itlb424_str},
3519 	{ 0x50, 0, 0, 64, itlb424_str},
3520 	{ 0x4f, 0, 0, 32, itlb4k_str},
3521 	{ 0x4e, 24, 64, 6*1024*1024, l2_cache_str},
3522 	{ 0x4d, 16, 64, 16*1024*1024, l3_cache_str},
3523 	{ 0x4c, 12, 64, 12*1024*1024, l3_cache_str},
3524 	{ 0x4b, 16, 64, 8*1024*1024, l3_cache_str},
3525 	{ 0x4a, 12, 64, 6*1024*1024, l3_cache_str},
3526 	{ 0x49, 16, 64, 4*1024*1024, l3_cache_str},
3527 	{ 0x48, 12, 64, 3*1024*1024, l2_cache_str},
3528 	{ 0x47, 8, 64, 8*1024*1024, l3_cache_str},
3529 	{ 0x46, 4, 64, 4*1024*1024, l3_cache_str},
3530 	{ 0x45, 4, 32, 2*1024*1024, l2_cache_str},
3531 	{ 0x44, 4, 32, 1024*1024, l2_cache_str},
3532 	{ 0x43, 4, 32, 512*1024, l2_cache_str},
3533 	{ 0x42, 4, 32, 256*1024, l2_cache_str},
3534 	{ 0x41, 4, 32, 128*1024, l2_cache_str},
3535 	{ 0x3e, 4, 64, 512*1024, sl2_cache_str},
3536 	{ 0x3d, 6, 64, 384*1024, sl2_cache_str},
3537 	{ 0x3c, 4, 64, 256*1024, sl2_cache_str},
3538 	{ 0x3b, 2, 64, 128*1024, sl2_cache_str},
3539 	{ 0x3a, 6, 64, 192*1024, sl2_cache_str},
3540 	{ 0x39, 4, 64, 128*1024, sl2_cache_str},
3541 	{ 0x30, 8, 64, 32*1024, l1_icache_str},
3542 	{ 0x2c, 8, 64, 32*1024, l1_dcache_str},
3543 	{ 0x29, 8, 64, 4096*1024, sl3_cache_str},
3544 	{ 0x25, 8, 64, 2048*1024, sl3_cache_str},
3545 	{ 0x23, 8, 64, 1024*1024, sl3_cache_str},
3546 	{ 0x22, 4, 64, 512*1024, sl3_cache_str},
3547 	{ 0x0e, 6, 64, 24*1024, l1_dcache_str},
3548 	{ 0x0d, 4, 32, 16*1024, l1_dcache_str},
3549 	{ 0x0c, 4, 32, 16*1024, l1_dcache_str},
3550 	{ 0x0b, 4, 0, 4, itlb4M_str},
3551 	{ 0x0a, 2, 32, 8*1024, l1_dcache_str},
3552 	{ 0x08, 4, 32, 16*1024, l1_icache_str},
3553 	{ 0x06, 4, 32, 8*1024, l1_icache_str},
3554 	{ 0x05, 4, 0, 32, dtlb4M_str},
3555 	{ 0x04, 4, 0, 8, dtlb4M_str},
3556 	{ 0x03, 4, 0, 64, dtlb4k_str},
3557 	{ 0x02, 4, 0, 2, itlb4M_str},
3558 	{ 0x01, 4, 0, 32, itlb4k_str},
3559 	{ 0 }
3560 };
3561 
3562 static const struct cachetab cyrix_ctab[] = {
3563 	{ 0x70, 4, 0, 32, "tlb-4K" },
3564 	{ 0x80, 4, 16, 16*1024, "l1-cache" },
3565 	{ 0 }
3566 };
3567 
3568 /*
3569  * Search a cache table for a matching entry
3570  */
3571 static const struct cachetab *
3572 find_cacheent(const struct cachetab *ct, uint_t code)
3573 {
3574 	if (code != 0) {
3575 		for (; ct->ct_code != 0; ct++)
3576 			if (ct->ct_code <= code)
3577 				break;
3578 		if (ct->ct_code == code)
3579 			return (ct);
3580 	}
3581 	return (NULL);
3582 }
3583 
3584 /*
3585  * Populate cachetab entry with L2 or L3 cache-information using
3586  * cpuid function 4. This function is called from intel_walk_cacheinfo()
3587  * when descriptor 0x49 is encountered. It returns 0 if no such cache
3588  * information is found.
3589  */
3590 static int
3591 intel_cpuid_4_cache_info(struct cachetab *ct, struct cpuid_info *cpi)
3592 {
3593 	uint32_t level, i;
3594 	int ret = 0;
3595 
3596 	for (i = 0; i < cpi->cpi_std_4_size; i++) {
3597 		level = CPI_CACHE_LVL(cpi->cpi_std_4[i]);
3598 
3599 		if (level == 2 || level == 3) {
3600 			ct->ct_assoc = CPI_CACHE_WAYS(cpi->cpi_std_4[i]) + 1;
3601 			ct->ct_line_size =
3602 			    CPI_CACHE_COH_LN_SZ(cpi->cpi_std_4[i]) + 1;
3603 			ct->ct_size = ct->ct_assoc *
3604 			    (CPI_CACHE_PARTS(cpi->cpi_std_4[i]) + 1) *
3605 			    ct->ct_line_size *
3606 			    (cpi->cpi_std_4[i]->cp_ecx + 1);
3607 
3608 			if (level == 2) {
3609 				ct->ct_label = l2_cache_str;
3610 			} else if (level == 3) {
3611 				ct->ct_label = l3_cache_str;
3612 			}
3613 			ret = 1;
3614 		}
3615 	}
3616 
3617 	return (ret);
3618 }
3619 
3620 /*
3621  * Walk the cacheinfo descriptor, applying 'func' to every valid element
3622  * The walk is terminated if the walker returns non-zero.
3623  */
3624 static void
3625 intel_walk_cacheinfo(struct cpuid_info *cpi,
3626     void *arg, int (*func)(void *, const struct cachetab *))
3627 {
3628 	const struct cachetab *ct;
3629 	struct cachetab des_49_ct, des_b1_ct;
3630 	uint8_t *dp;
3631 	int i;
3632 
3633 	if ((dp = cpi->cpi_cacheinfo) == NULL)
3634 		return;
3635 	for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
3636 		/*
3637 		 * For overloaded descriptor 0x49 we use cpuid function 4
3638 		 * if supported by the current processor, to create
3639 		 * cache information.
3640 		 * For overloaded descriptor 0xb1 we use X86_PAE flag
3641 		 * to disambiguate the cache information.
3642 		 */
3643 		if (*dp == 0x49 && cpi->cpi_maxeax >= 0x4 &&
3644 		    intel_cpuid_4_cache_info(&des_49_ct, cpi) == 1) {
3645 				ct = &des_49_ct;
3646 		} else if (*dp == 0xb1) {
3647 			des_b1_ct.ct_code = 0xb1;
3648 			des_b1_ct.ct_assoc = 4;
3649 			des_b1_ct.ct_line_size = 0;
3650 			if (is_x86_feature(x86_featureset, X86FSET_PAE)) {
3651 				des_b1_ct.ct_size = 8;
3652 				des_b1_ct.ct_label = itlb2M_str;
3653 			} else {
3654 				des_b1_ct.ct_size = 4;
3655 				des_b1_ct.ct_label = itlb4M_str;
3656 			}
3657 			ct = &des_b1_ct;
3658 		} else {
3659 			if ((ct = find_cacheent(intel_ctab, *dp)) == NULL) {
3660 				continue;
3661 			}
3662 		}
3663 
3664 		if (func(arg, ct) != 0) {
3665 			break;
3666 		}
3667 	}
3668 }
3669 
3670 /*
3671  * (Like the Intel one, except for Cyrix CPUs)
3672  */
3673 static void
3674 cyrix_walk_cacheinfo(struct cpuid_info *cpi,
3675     void *arg, int (*func)(void *, const struct cachetab *))
3676 {
3677 	const struct cachetab *ct;
3678 	uint8_t *dp;
3679 	int i;
3680 
3681 	if ((dp = cpi->cpi_cacheinfo) == NULL)
3682 		return;
3683 	for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
3684 		/*
3685 		 * Search Cyrix-specific descriptor table first ..
3686 		 */
3687 		if ((ct = find_cacheent(cyrix_ctab, *dp)) != NULL) {
3688 			if (func(arg, ct) != 0)
3689 				break;
3690 			continue;
3691 		}
3692 		/*
3693 		 * .. else fall back to the Intel one
3694 		 */
3695 		if ((ct = find_cacheent(intel_ctab, *dp)) != NULL) {
3696 			if (func(arg, ct) != 0)
3697 				break;
3698 			continue;
3699 		}
3700 	}
3701 }
3702 
3703 /*
3704  * A cacheinfo walker that adds associativity, line-size, and size properties
3705  * to the devinfo node it is passed as an argument.
3706  */
3707 static int
3708 add_cacheent_props(void *arg, const struct cachetab *ct)
3709 {
3710 	dev_info_t *devi = arg;
3711 
3712 	add_cache_prop(devi, ct->ct_label, assoc_str, ct->ct_assoc);
3713 	if (ct->ct_line_size != 0)
3714 		add_cache_prop(devi, ct->ct_label, line_str,
3715 		    ct->ct_line_size);
3716 	add_cache_prop(devi, ct->ct_label, size_str, ct->ct_size);
3717 	return (0);
3718 }
3719 
3720 
3721 static const char fully_assoc[] = "fully-associative?";
3722 
3723 /*
3724  * AMD style cache/tlb description
3725  *
3726  * Extended functions 5 and 6 directly describe properties of
3727  * tlbs and various cache levels.
3728  */
3729 static void
3730 add_amd_assoc(dev_info_t *devi, const char *label, uint_t assoc)
3731 {
3732 	switch (assoc) {
3733 	case 0:	/* reserved; ignore */
3734 		break;
3735 	default:
3736 		add_cache_prop(devi, label, assoc_str, assoc);
3737 		break;
3738 	case 0xff:
3739 		add_cache_prop(devi, label, fully_assoc, 1);
3740 		break;
3741 	}
3742 }
3743 
3744 static void
3745 add_amd_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size)
3746 {
3747 	if (size == 0)
3748 		return;
3749 	add_cache_prop(devi, label, size_str, size);
3750 	add_amd_assoc(devi, label, assoc);
3751 }
3752 
3753 static void
3754 add_amd_cache(dev_info_t *devi, const char *label,
3755     uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size)
3756 {
3757 	if (size == 0 || line_size == 0)
3758 		return;
3759 	add_amd_assoc(devi, label, assoc);
3760 	/*
3761 	 * Most AMD parts have a sectored cache. Multiple cache lines are
3762 	 * associated with each tag. A sector consists of all cache lines
3763 	 * associated with a tag. For example, the AMD K6-III has a sector
3764 	 * size of 2 cache lines per tag.
3765 	 */
3766 	if (lines_per_tag != 0)
3767 		add_cache_prop(devi, label, "lines-per-tag", lines_per_tag);
3768 	add_cache_prop(devi, label, line_str, line_size);
3769 	add_cache_prop(devi, label, size_str, size * 1024);
3770 }
3771 
3772 static void
3773 add_amd_l2_assoc(dev_info_t *devi, const char *label, uint_t assoc)
3774 {
3775 	switch (assoc) {
3776 	case 0:	/* off */
3777 		break;
3778 	case 1:
3779 	case 2:
3780 	case 4:
3781 		add_cache_prop(devi, label, assoc_str, assoc);
3782 		break;
3783 	case 6:
3784 		add_cache_prop(devi, label, assoc_str, 8);
3785 		break;
3786 	case 8:
3787 		add_cache_prop(devi, label, assoc_str, 16);
3788 		break;
3789 	case 0xf:
3790 		add_cache_prop(devi, label, fully_assoc, 1);
3791 		break;
3792 	default: /* reserved; ignore */
3793 		break;
3794 	}
3795 }
3796 
3797 static void
3798 add_amd_l2_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size)
3799 {
3800 	if (size == 0 || assoc == 0)
3801 		return;
3802 	add_amd_l2_assoc(devi, label, assoc);
3803 	add_cache_prop(devi, label, size_str, size);
3804 }
3805 
3806 static void
3807 add_amd_l2_cache(dev_info_t *devi, const char *label,
3808     uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size)
3809 {
3810 	if (size == 0 || assoc == 0 || line_size == 0)
3811 		return;
3812 	add_amd_l2_assoc(devi, label, assoc);
3813 	if (lines_per_tag != 0)
3814 		add_cache_prop(devi, label, "lines-per-tag", lines_per_tag);
3815 	add_cache_prop(devi, label, line_str, line_size);
3816 	add_cache_prop(devi, label, size_str, size * 1024);
3817 }
3818 
3819 static void
3820 amd_cache_info(struct cpuid_info *cpi, dev_info_t *devi)
3821 {
3822 	struct cpuid_regs *cp;
3823 
3824 	if (cpi->cpi_xmaxeax < 0x80000005)
3825 		return;
3826 	cp = &cpi->cpi_extd[5];
3827 
3828 	/*
3829 	 * 4M/2M L1 TLB configuration
3830 	 *
3831 	 * We report the size for 2M pages because AMD uses two
3832 	 * TLB entries for one 4M page.
3833 	 */
3834 	add_amd_tlb(devi, "dtlb-2M",
3835 	    BITX(cp->cp_eax, 31, 24), BITX(cp->cp_eax, 23, 16));
3836 	add_amd_tlb(devi, "itlb-2M",
3837 	    BITX(cp->cp_eax, 15, 8), BITX(cp->cp_eax, 7, 0));
3838 
3839 	/*
3840 	 * 4K L1 TLB configuration
3841 	 */
3842 
3843 	switch (cpi->cpi_vendor) {
3844 		uint_t nentries;
3845 	case X86_VENDOR_TM:
3846 		if (cpi->cpi_family >= 5) {
3847 			/*
3848 			 * Crusoe processors have 256 TLB entries, but
3849 			 * cpuid data format constrains them to only
3850 			 * reporting 255 of them.
3851 			 */
3852 			if ((nentries = BITX(cp->cp_ebx, 23, 16)) == 255)
3853 				nentries = 256;
3854 			/*
3855 			 * Crusoe processors also have a unified TLB
3856 			 */
3857 			add_amd_tlb(devi, "tlb-4K", BITX(cp->cp_ebx, 31, 24),
3858 			    nentries);
3859 			break;
3860 		}
3861 		/*FALLTHROUGH*/
3862 	default:
3863 		add_amd_tlb(devi, itlb4k_str,
3864 		    BITX(cp->cp_ebx, 31, 24), BITX(cp->cp_ebx, 23, 16));
3865 		add_amd_tlb(devi, dtlb4k_str,
3866 		    BITX(cp->cp_ebx, 15, 8), BITX(cp->cp_ebx, 7, 0));
3867 		break;
3868 	}
3869 
3870 	/*
3871 	 * data L1 cache configuration
3872 	 */
3873 
3874 	add_amd_cache(devi, l1_dcache_str,
3875 	    BITX(cp->cp_ecx, 31, 24), BITX(cp->cp_ecx, 23, 16),
3876 	    BITX(cp->cp_ecx, 15, 8), BITX(cp->cp_ecx, 7, 0));
3877 
3878 	/*
3879 	 * code L1 cache configuration
3880 	 */
3881 
3882 	add_amd_cache(devi, l1_icache_str,
3883 	    BITX(cp->cp_edx, 31, 24), BITX(cp->cp_edx, 23, 16),
3884 	    BITX(cp->cp_edx, 15, 8), BITX(cp->cp_edx, 7, 0));
3885 
3886 	if (cpi->cpi_xmaxeax < 0x80000006)
3887 		return;
3888 	cp = &cpi->cpi_extd[6];
3889 
3890 	/* Check for a unified L2 TLB for large pages */
3891 
3892 	if (BITX(cp->cp_eax, 31, 16) == 0)
3893 		add_amd_l2_tlb(devi, "l2-tlb-2M",
3894 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
3895 	else {
3896 		add_amd_l2_tlb(devi, "l2-dtlb-2M",
3897 		    BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16));
3898 		add_amd_l2_tlb(devi, "l2-itlb-2M",
3899 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
3900 	}
3901 
3902 	/* Check for a unified L2 TLB for 4K pages */
3903 
3904 	if (BITX(cp->cp_ebx, 31, 16) == 0) {
3905 		add_amd_l2_tlb(devi, "l2-tlb-4K",
3906 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
3907 	} else {
3908 		add_amd_l2_tlb(devi, "l2-dtlb-4K",
3909 		    BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16));
3910 		add_amd_l2_tlb(devi, "l2-itlb-4K",
3911 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
3912 	}
3913 
3914 	add_amd_l2_cache(devi, l2_cache_str,
3915 	    BITX(cp->cp_ecx, 31, 16), BITX(cp->cp_ecx, 15, 12),
3916 	    BITX(cp->cp_ecx, 11, 8), BITX(cp->cp_ecx, 7, 0));
3917 }
3918 
3919 /*
3920  * There are two basic ways that the x86 world describes it cache
3921  * and tlb architecture - Intel's way and AMD's way.
3922  *
3923  * Return which flavor of cache architecture we should use
3924  */
3925 static int
3926 x86_which_cacheinfo(struct cpuid_info *cpi)
3927 {
3928 	switch (cpi->cpi_vendor) {
3929 	case X86_VENDOR_Intel:
3930 		if (cpi->cpi_maxeax >= 2)
3931 			return (X86_VENDOR_Intel);
3932 		break;
3933 	case X86_VENDOR_AMD:
3934 		/*
3935 		 * The K5 model 1 was the first part from AMD that reported
3936 		 * cache sizes via extended cpuid functions.
3937 		 */
3938 		if (cpi->cpi_family > 5 ||
3939 		    (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
3940 			return (X86_VENDOR_AMD);
3941 		break;
3942 	case X86_VENDOR_TM:
3943 		if (cpi->cpi_family >= 5)
3944 			return (X86_VENDOR_AMD);
3945 		/*FALLTHROUGH*/
3946 	default:
3947 		/*
3948 		 * If they have extended CPU data for 0x80000005
3949 		 * then we assume they have AMD-format cache
3950 		 * information.
3951 		 *
3952 		 * If not, and the vendor happens to be Cyrix,
3953 		 * then try our-Cyrix specific handler.
3954 		 *
3955 		 * If we're not Cyrix, then assume we're using Intel's
3956 		 * table-driven format instead.
3957 		 */
3958 		if (cpi->cpi_xmaxeax >= 0x80000005)
3959 			return (X86_VENDOR_AMD);
3960 		else if (cpi->cpi_vendor == X86_VENDOR_Cyrix)
3961 			return (X86_VENDOR_Cyrix);
3962 		else if (cpi->cpi_maxeax >= 2)
3963 			return (X86_VENDOR_Intel);
3964 		break;
3965 	}
3966 	return (-1);
3967 }
3968 
3969 void
3970 cpuid_set_cpu_properties(void *dip, processorid_t cpu_id,
3971     struct cpuid_info *cpi)
3972 {
3973 	dev_info_t *cpu_devi;
3974 	int create;
3975 
3976 	cpu_devi = (dev_info_t *)dip;
3977 
3978 	/* device_type */
3979 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
3980 	    "device_type", "cpu");
3981 
3982 	/* reg */
3983 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
3984 	    "reg", cpu_id);
3985 
3986 	/* cpu-mhz, and clock-frequency */
3987 	if (cpu_freq > 0) {
3988 		long long mul;
3989 
3990 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
3991 		    "cpu-mhz", cpu_freq);
3992 		if ((mul = cpu_freq * 1000000LL) <= INT_MAX)
3993 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
3994 			    "clock-frequency", (int)mul);
3995 	}
3996 
3997 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID)) {
3998 		return;
3999 	}
4000 
4001 	/* vendor-id */
4002 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
4003 	    "vendor-id", cpi->cpi_vendorstr);
4004 
4005 	if (cpi->cpi_maxeax == 0) {
4006 		return;
4007 	}
4008 
4009 	/*
4010 	 * family, model, and step
4011 	 */
4012 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4013 	    "family", CPI_FAMILY(cpi));
4014 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4015 	    "cpu-model", CPI_MODEL(cpi));
4016 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4017 	    "stepping-id", CPI_STEP(cpi));
4018 
4019 	/* type */
4020 	switch (cpi->cpi_vendor) {
4021 	case X86_VENDOR_Intel:
4022 		create = 1;
4023 		break;
4024 	default:
4025 		create = 0;
4026 		break;
4027 	}
4028 	if (create)
4029 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4030 		    "type", CPI_TYPE(cpi));
4031 
4032 	/* ext-family */
4033 	switch (cpi->cpi_vendor) {
4034 	case X86_VENDOR_Intel:
4035 	case X86_VENDOR_AMD:
4036 		create = cpi->cpi_family >= 0xf;
4037 		break;
4038 	default:
4039 		create = 0;
4040 		break;
4041 	}
4042 	if (create)
4043 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4044 		    "ext-family", CPI_FAMILY_XTD(cpi));
4045 
4046 	/* ext-model */
4047 	switch (cpi->cpi_vendor) {
4048 	case X86_VENDOR_Intel:
4049 		create = IS_EXTENDED_MODEL_INTEL(cpi);
4050 		break;
4051 	case X86_VENDOR_AMD:
4052 		create = CPI_FAMILY(cpi) == 0xf;
4053 		break;
4054 	default:
4055 		create = 0;
4056 		break;
4057 	}
4058 	if (create)
4059 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4060 		    "ext-model", CPI_MODEL_XTD(cpi));
4061 
4062 	/* generation */
4063 	switch (cpi->cpi_vendor) {
4064 	case X86_VENDOR_AMD:
4065 		/*
4066 		 * AMD K5 model 1 was the first part to support this
4067 		 */
4068 		create = cpi->cpi_xmaxeax >= 0x80000001;
4069 		break;
4070 	default:
4071 		create = 0;
4072 		break;
4073 	}
4074 	if (create)
4075 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4076 		    "generation", BITX((cpi)->cpi_extd[1].cp_eax, 11, 8));
4077 
4078 	/* brand-id */
4079 	switch (cpi->cpi_vendor) {
4080 	case X86_VENDOR_Intel:
4081 		/*
4082 		 * brand id first appeared on Pentium III Xeon model 8,
4083 		 * and Celeron model 8 processors and Opteron
4084 		 */
4085 		create = cpi->cpi_family > 6 ||
4086 		    (cpi->cpi_family == 6 && cpi->cpi_model >= 8);
4087 		break;
4088 	case X86_VENDOR_AMD:
4089 		create = cpi->cpi_family >= 0xf;
4090 		break;
4091 	default:
4092 		create = 0;
4093 		break;
4094 	}
4095 	if (create && cpi->cpi_brandid != 0) {
4096 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4097 		    "brand-id", cpi->cpi_brandid);
4098 	}
4099 
4100 	/* chunks, and apic-id */
4101 	switch (cpi->cpi_vendor) {
4102 		/*
4103 		 * first available on Pentium IV and Opteron (K8)
4104 		 */
4105 	case X86_VENDOR_Intel:
4106 		create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf;
4107 		break;
4108 	case X86_VENDOR_AMD:
4109 		create = cpi->cpi_family >= 0xf;
4110 		break;
4111 	default:
4112 		create = 0;
4113 		break;
4114 	}
4115 	if (create) {
4116 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4117 		    "chunks", CPI_CHUNKS(cpi));
4118 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4119 		    "apic-id", cpi->cpi_apicid);
4120 		if (cpi->cpi_chipid >= 0) {
4121 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4122 			    "chip#", cpi->cpi_chipid);
4123 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4124 			    "clog#", cpi->cpi_clogid);
4125 		}
4126 	}
4127 
4128 	/* cpuid-features */
4129 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4130 	    "cpuid-features", CPI_FEATURES_EDX(cpi));
4131 
4132 
4133 	/* cpuid-features-ecx */
4134 	switch (cpi->cpi_vendor) {
4135 	case X86_VENDOR_Intel:
4136 		create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf;
4137 		break;
4138 	default:
4139 		create = 0;
4140 		break;
4141 	}
4142 	if (create)
4143 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4144 		    "cpuid-features-ecx", CPI_FEATURES_ECX(cpi));
4145 
4146 	/* ext-cpuid-features */
4147 	switch (cpi->cpi_vendor) {
4148 	case X86_VENDOR_Intel:
4149 	case X86_VENDOR_AMD:
4150 	case X86_VENDOR_Cyrix:
4151 	case X86_VENDOR_TM:
4152 	case X86_VENDOR_Centaur:
4153 		create = cpi->cpi_xmaxeax >= 0x80000001;
4154 		break;
4155 	default:
4156 		create = 0;
4157 		break;
4158 	}
4159 	if (create) {
4160 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4161 		    "ext-cpuid-features", CPI_FEATURES_XTD_EDX(cpi));
4162 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4163 		    "ext-cpuid-features-ecx", CPI_FEATURES_XTD_ECX(cpi));
4164 	}
4165 
4166 	/*
4167 	 * Brand String first appeared in Intel Pentium IV, AMD K5
4168 	 * model 1, and Cyrix GXm.  On earlier models we try and
4169 	 * simulate something similar .. so this string should always
4170 	 * same -something- about the processor, however lame.
4171 	 */
4172 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
4173 	    "brand-string", cpi->cpi_brandstr);
4174 
4175 	/*
4176 	 * Finally, cache and tlb information
4177 	 */
4178 	switch (x86_which_cacheinfo(cpi)) {
4179 	case X86_VENDOR_Intel:
4180 		intel_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
4181 		break;
4182 	case X86_VENDOR_Cyrix:
4183 		cyrix_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
4184 		break;
4185 	case X86_VENDOR_AMD:
4186 		amd_cache_info(cpi, cpu_devi);
4187 		break;
4188 	default:
4189 		break;
4190 	}
4191 }
4192 
4193 struct l2info {
4194 	int *l2i_csz;
4195 	int *l2i_lsz;
4196 	int *l2i_assoc;
4197 	int l2i_ret;
4198 };
4199 
4200 /*
4201  * A cacheinfo walker that fetches the size, line-size and associativity
4202  * of the L2 cache
4203  */
4204 static int
4205 intel_l2cinfo(void *arg, const struct cachetab *ct)
4206 {
4207 	struct l2info *l2i = arg;
4208 	int *ip;
4209 
4210 	if (ct->ct_label != l2_cache_str &&
4211 	    ct->ct_label != sl2_cache_str)
4212 		return (0);	/* not an L2 -- keep walking */
4213 
4214 	if ((ip = l2i->l2i_csz) != NULL)
4215 		*ip = ct->ct_size;
4216 	if ((ip = l2i->l2i_lsz) != NULL)
4217 		*ip = ct->ct_line_size;
4218 	if ((ip = l2i->l2i_assoc) != NULL)
4219 		*ip = ct->ct_assoc;
4220 	l2i->l2i_ret = ct->ct_size;
4221 	return (1);		/* was an L2 -- terminate walk */
4222 }
4223 
4224 /*
4225  * AMD L2/L3 Cache and TLB Associativity Field Definition:
4226  *
4227  *	Unlike the associativity for the L1 cache and tlb where the 8 bit
4228  *	value is the associativity, the associativity for the L2 cache and
4229  *	tlb is encoded in the following table. The 4 bit L2 value serves as
4230  *	an index into the amd_afd[] array to determine the associativity.
4231  *	-1 is undefined. 0 is fully associative.
4232  */
4233 
4234 static int amd_afd[] =
4235 	{-1, 1, 2, -1, 4, -1, 8, -1, 16, -1, 32, 48, 64, 96, 128, 0};
4236 
4237 static void
4238 amd_l2cacheinfo(struct cpuid_info *cpi, struct l2info *l2i)
4239 {
4240 	struct cpuid_regs *cp;
4241 	uint_t size, assoc;
4242 	int i;
4243 	int *ip;
4244 
4245 	if (cpi->cpi_xmaxeax < 0x80000006)
4246 		return;
4247 	cp = &cpi->cpi_extd[6];
4248 
4249 	if ((i = BITX(cp->cp_ecx, 15, 12)) != 0 &&
4250 	    (size = BITX(cp->cp_ecx, 31, 16)) != 0) {
4251 		uint_t cachesz = size * 1024;
4252 		assoc = amd_afd[i];
4253 
4254 		ASSERT(assoc != -1);
4255 
4256 		if ((ip = l2i->l2i_csz) != NULL)
4257 			*ip = cachesz;
4258 		if ((ip = l2i->l2i_lsz) != NULL)
4259 			*ip = BITX(cp->cp_ecx, 7, 0);
4260 		if ((ip = l2i->l2i_assoc) != NULL)
4261 			*ip = assoc;
4262 		l2i->l2i_ret = cachesz;
4263 	}
4264 }
4265 
4266 int
4267 getl2cacheinfo(cpu_t *cpu, int *csz, int *lsz, int *assoc)
4268 {
4269 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
4270 	struct l2info __l2info, *l2i = &__l2info;
4271 
4272 	l2i->l2i_csz = csz;
4273 	l2i->l2i_lsz = lsz;
4274 	l2i->l2i_assoc = assoc;
4275 	l2i->l2i_ret = -1;
4276 
4277 	switch (x86_which_cacheinfo(cpi)) {
4278 	case X86_VENDOR_Intel:
4279 		intel_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
4280 		break;
4281 	case X86_VENDOR_Cyrix:
4282 		cyrix_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
4283 		break;
4284 	case X86_VENDOR_AMD:
4285 		amd_l2cacheinfo(cpi, l2i);
4286 		break;
4287 	default:
4288 		break;
4289 	}
4290 	return (l2i->l2i_ret);
4291 }
4292 
4293 #if !defined(__xpv)
4294 
4295 uint32_t *
4296 cpuid_mwait_alloc(cpu_t *cpu)
4297 {
4298 	uint32_t	*ret;
4299 	size_t		mwait_size;
4300 
4301 	ASSERT(cpuid_checkpass(CPU, 2));
4302 
4303 	mwait_size = CPU->cpu_m.mcpu_cpi->cpi_mwait.mon_max;
4304 	if (mwait_size == 0)
4305 		return (NULL);
4306 
4307 	/*
4308 	 * kmem_alloc() returns cache line size aligned data for mwait_size
4309 	 * allocations.  mwait_size is currently cache line sized.  Neither
4310 	 * of these implementation details are guarantied to be true in the
4311 	 * future.
4312 	 *
4313 	 * First try allocating mwait_size as kmem_alloc() currently returns
4314 	 * correctly aligned memory.  If kmem_alloc() does not return
4315 	 * mwait_size aligned memory, then use mwait_size ROUNDUP.
4316 	 *
4317 	 * Set cpi_mwait.buf_actual and cpi_mwait.size_actual in case we
4318 	 * decide to free this memory.
4319 	 */
4320 	ret = kmem_zalloc(mwait_size, KM_SLEEP);
4321 	if (ret == (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size)) {
4322 		cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret;
4323 		cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size;
4324 		*ret = MWAIT_RUNNING;
4325 		return (ret);
4326 	} else {
4327 		kmem_free(ret, mwait_size);
4328 		ret = kmem_zalloc(mwait_size * 2, KM_SLEEP);
4329 		cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret;
4330 		cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size * 2;
4331 		ret = (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size);
4332 		*ret = MWAIT_RUNNING;
4333 		return (ret);
4334 	}
4335 }
4336 
4337 void
4338 cpuid_mwait_free(cpu_t *cpu)
4339 {
4340 	if (cpu->cpu_m.mcpu_cpi == NULL) {
4341 		return;
4342 	}
4343 
4344 	if (cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual != NULL &&
4345 	    cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual > 0) {
4346 		kmem_free(cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual,
4347 		    cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual);
4348 	}
4349 
4350 	cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = NULL;
4351 	cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = 0;
4352 }
4353 
4354 void
4355 patch_tsc_read(int flag)
4356 {
4357 	size_t cnt;
4358 
4359 	switch (flag) {
4360 	case X86_NO_TSC:
4361 		cnt = &_no_rdtsc_end - &_no_rdtsc_start;
4362 		(void) memcpy((void *)tsc_read, (void *)&_no_rdtsc_start, cnt);
4363 		break;
4364 	case X86_HAVE_TSCP:
4365 		cnt = &_tscp_end - &_tscp_start;
4366 		(void) memcpy((void *)tsc_read, (void *)&_tscp_start, cnt);
4367 		break;
4368 	case X86_TSC_MFENCE:
4369 		cnt = &_tsc_mfence_end - &_tsc_mfence_start;
4370 		(void) memcpy((void *)tsc_read,
4371 		    (void *)&_tsc_mfence_start, cnt);
4372 		break;
4373 	case X86_TSC_LFENCE:
4374 		cnt = &_tsc_lfence_end - &_tsc_lfence_start;
4375 		(void) memcpy((void *)tsc_read,
4376 		    (void *)&_tsc_lfence_start, cnt);
4377 		break;
4378 	default:
4379 		break;
4380 	}
4381 }
4382 
4383 int
4384 cpuid_deep_cstates_supported(void)
4385 {
4386 	struct cpuid_info *cpi;
4387 	struct cpuid_regs regs;
4388 
4389 	ASSERT(cpuid_checkpass(CPU, 1));
4390 
4391 	cpi = CPU->cpu_m.mcpu_cpi;
4392 
4393 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID))
4394 		return (0);
4395 
4396 	switch (cpi->cpi_vendor) {
4397 	case X86_VENDOR_Intel:
4398 		if (cpi->cpi_xmaxeax < 0x80000007)
4399 			return (0);
4400 
4401 		/*
4402 		 * TSC run at a constant rate in all ACPI C-states?
4403 		 */
4404 		regs.cp_eax = 0x80000007;
4405 		(void) __cpuid_insn(&regs);
4406 		return (regs.cp_edx & CPUID_TSC_CSTATE_INVARIANCE);
4407 
4408 	default:
4409 		return (0);
4410 	}
4411 }
4412 
4413 #endif	/* !__xpv */
4414 
4415 void
4416 post_startup_cpu_fixups(void)
4417 {
4418 #ifndef __xpv
4419 	/*
4420 	 * Some AMD processors support C1E state. Entering this state will
4421 	 * cause the local APIC timer to stop, which we can't deal with at
4422 	 * this time.
4423 	 */
4424 	if (cpuid_getvendor(CPU) == X86_VENDOR_AMD) {
4425 		on_trap_data_t otd;
4426 		uint64_t reg;
4427 
4428 		if (!on_trap(&otd, OT_DATA_ACCESS)) {
4429 			reg = rdmsr(MSR_AMD_INT_PENDING_CMP_HALT);
4430 			/* Disable C1E state if it is enabled by BIOS */
4431 			if ((reg >> AMD_ACTONCMPHALT_SHIFT) &
4432 			    AMD_ACTONCMPHALT_MASK) {
4433 				reg &= ~(AMD_ACTONCMPHALT_MASK <<
4434 				    AMD_ACTONCMPHALT_SHIFT);
4435 				wrmsr(MSR_AMD_INT_PENDING_CMP_HALT, reg);
4436 			}
4437 		}
4438 		no_trap();
4439 	}
4440 #endif	/* !__xpv */
4441 }
4442 
4443 /*
4444  * Setup necessary registers to enable XSAVE feature on this processor.
4445  * This function needs to be called early enough, so that no xsave/xrstor
4446  * ops will execute on the processor before the MSRs are properly set up.
4447  *
4448  * Current implementation has the following assumption:
4449  * - cpuid_pass1() is done, so that X86 features are known.
4450  * - fpu_probe() is done, so that fp_save_mech is chosen.
4451  */
4452 void
4453 xsave_setup_msr(cpu_t *cpu)
4454 {
4455 	ASSERT(fp_save_mech == FP_XSAVE);
4456 	ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
4457 
4458 	/* Enable OSXSAVE in CR4. */
4459 	setcr4(getcr4() | CR4_OSXSAVE);
4460 	/*
4461 	 * Update SW copy of ECX, so that /dev/cpu/self/cpuid will report
4462 	 * correct value.
4463 	 */
4464 	cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_ecx |= CPUID_INTC_ECX_OSXSAVE;
4465 	setup_xfem();
4466 }
4467 
4468 /*
4469  * Starting with the Westmere processor the local
4470  * APIC timer will continue running in all C-states,
4471  * including the deepest C-states.
4472  */
4473 int
4474 cpuid_arat_supported(void)
4475 {
4476 	struct cpuid_info *cpi;
4477 	struct cpuid_regs regs;
4478 
4479 	ASSERT(cpuid_checkpass(CPU, 1));
4480 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
4481 
4482 	cpi = CPU->cpu_m.mcpu_cpi;
4483 
4484 	switch (cpi->cpi_vendor) {
4485 	case X86_VENDOR_Intel:
4486 		/*
4487 		 * Always-running Local APIC Timer is
4488 		 * indicated by CPUID.6.EAX[2].
4489 		 */
4490 		if (cpi->cpi_maxeax >= 6) {
4491 			regs.cp_eax = 6;
4492 			(void) cpuid_insn(NULL, &regs);
4493 			return (regs.cp_eax & CPUID_CSTATE_ARAT);
4494 		} else {
4495 			return (0);
4496 		}
4497 	default:
4498 		return (0);
4499 	}
4500 }
4501 
4502 /*
4503  * Check support for Intel ENERGY_PERF_BIAS feature
4504  */
4505 int
4506 cpuid_iepb_supported(struct cpu *cp)
4507 {
4508 	struct cpuid_info *cpi = cp->cpu_m.mcpu_cpi;
4509 	struct cpuid_regs regs;
4510 
4511 	ASSERT(cpuid_checkpass(cp, 1));
4512 
4513 	if (!(is_x86_feature(x86_featureset, X86FSET_CPUID)) ||
4514 	    !(is_x86_feature(x86_featureset, X86FSET_MSR))) {
4515 		return (0);
4516 	}
4517 
4518 	/*
4519 	 * Intel ENERGY_PERF_BIAS MSR is indicated by
4520 	 * capability bit CPUID.6.ECX.3
4521 	 */
4522 	if ((cpi->cpi_vendor != X86_VENDOR_Intel) || (cpi->cpi_maxeax < 6))
4523 		return (0);
4524 
4525 	regs.cp_eax = 0x6;
4526 	(void) cpuid_insn(NULL, &regs);
4527 	return (regs.cp_ecx & CPUID_EPB_SUPPORT);
4528 }
4529 
4530 /*
4531  * Check support for TSC deadline timer
4532  *
4533  * TSC deadline timer provides a superior software programming
4534  * model over local APIC timer that eliminates "time drifts".
4535  * Instead of specifying a relative time, software specifies an
4536  * absolute time as the target at which the processor should
4537  * generate a timer event.
4538  */
4539 int
4540 cpuid_deadline_tsc_supported(void)
4541 {
4542 	struct cpuid_info *cpi = CPU->cpu_m.mcpu_cpi;
4543 	struct cpuid_regs regs;
4544 
4545 	ASSERT(cpuid_checkpass(CPU, 1));
4546 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
4547 
4548 	switch (cpi->cpi_vendor) {
4549 	case X86_VENDOR_Intel:
4550 		if (cpi->cpi_maxeax >= 1) {
4551 			regs.cp_eax = 1;
4552 			(void) cpuid_insn(NULL, &regs);
4553 			return (regs.cp_ecx & CPUID_DEADLINE_TSC);
4554 		} else {
4555 			return (0);
4556 		}
4557 	default:
4558 		return (0);
4559 	}
4560 }
4561 
4562 #if defined(__amd64) && !defined(__xpv)
4563 /*
4564  * Patch in versions of bcopy for high performance Intel Nhm processors
4565  * and later...
4566  */
4567 void
4568 patch_memops(uint_t vendor)
4569 {
4570 	size_t cnt, i;
4571 	caddr_t to, from;
4572 
4573 	if ((vendor == X86_VENDOR_Intel) &&
4574 	    is_x86_feature(x86_featureset, X86FSET_SSE4_2)) {
4575 		cnt = &bcopy_patch_end - &bcopy_patch_start;
4576 		to = &bcopy_ck_size;
4577 		from = &bcopy_patch_start;
4578 		for (i = 0; i < cnt; i++) {
4579 			*to++ = *from++;
4580 		}
4581 	}
4582 }
4583 #endif  /* __amd64 && !__xpv */
4584 
4585 /*
4586  * This function finds the number of bits to represent the number of cores per
4587  * chip and the number of strands per core for the Intel platforms.
4588  * It re-uses the x2APIC cpuid code of the cpuid_pass2().
4589  */
4590 void
4591 cpuid_get_ext_topo(uint_t vendor, uint_t *core_nbits, uint_t *strand_nbits)
4592 {
4593 	struct cpuid_regs regs;
4594 	struct cpuid_regs *cp = &regs;
4595 
4596 	if (vendor != X86_VENDOR_Intel) {
4597 		return;
4598 	}
4599 
4600 	/* if the cpuid level is 0xB, extended topo is available. */
4601 	cp->cp_eax = 0;
4602 	if (__cpuid_insn(cp) >= 0xB) {
4603 
4604 		cp->cp_eax = 0xB;
4605 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
4606 		(void) __cpuid_insn(cp);
4607 
4608 		/*
4609 		 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
4610 		 * indicates that the extended topology enumeration leaf is
4611 		 * available.
4612 		 */
4613 		if (cp->cp_ebx) {
4614 			uint_t coreid_shift = 0;
4615 			uint_t chipid_shift = 0;
4616 			uint_t i;
4617 			uint_t level;
4618 
4619 			for (i = 0; i < CPI_FNB_ECX_MAX; i++) {
4620 				cp->cp_eax = 0xB;
4621 				cp->cp_ecx = i;
4622 
4623 				(void) __cpuid_insn(cp);
4624 				level = CPI_CPU_LEVEL_TYPE(cp);
4625 
4626 				if (level == 1) {
4627 					/*
4628 					 * Thread level processor topology
4629 					 * Number of bits shift right APIC ID
4630 					 * to get the coreid.
4631 					 */
4632 					coreid_shift = BITX(cp->cp_eax, 4, 0);
4633 				} else if (level == 2) {
4634 					/*
4635 					 * Core level processor topology
4636 					 * Number of bits shift right APIC ID
4637 					 * to get the chipid.
4638 					 */
4639 					chipid_shift = BITX(cp->cp_eax, 4, 0);
4640 				}
4641 			}
4642 
4643 			if (coreid_shift > 0 && chipid_shift > coreid_shift) {
4644 				*strand_nbits = coreid_shift;
4645 				*core_nbits = chipid_shift - coreid_shift;
4646 			}
4647 		}
4648 	}
4649 }
4650