xref: /titanic_41/usr/src/uts/i86pc/os/cpuid.c (revision cbca9407f17426a6bbd0fba98943cdb2f1cb6921)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011 by Delphix. All rights reserved.
24  * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
25  * Copyright 2014 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
26  */
27 /*
28  * Copyright (c) 2010, Intel Corporation.
29  * All rights reserved.
30  */
31 /*
32  * Portions Copyright 2009 Advanced Micro Devices, Inc.
33  */
34 /*
35  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
36  */
37 /*
38  * Various routines to handle identification
39  * and classification of x86 processors.
40  */
41 
42 #include <sys/types.h>
43 #include <sys/archsystm.h>
44 #include <sys/x86_archext.h>
45 #include <sys/kmem.h>
46 #include <sys/systm.h>
47 #include <sys/cmn_err.h>
48 #include <sys/sunddi.h>
49 #include <sys/sunndi.h>
50 #include <sys/cpuvar.h>
51 #include <sys/processor.h>
52 #include <sys/sysmacros.h>
53 #include <sys/pg.h>
54 #include <sys/fp.h>
55 #include <sys/controlregs.h>
56 #include <sys/bitmap.h>
57 #include <sys/auxv_386.h>
58 #include <sys/memnode.h>
59 #include <sys/pci_cfgspace.h>
60 
61 #ifdef __xpv
62 #include <sys/hypervisor.h>
63 #else
64 #include <sys/ontrap.h>
65 #endif
66 
67 /*
68  * Pass 0 of cpuid feature analysis happens in locore. It contains special code
69  * to recognize Cyrix processors that are not cpuid-compliant, and to deal with
70  * them accordingly. For most modern processors, feature detection occurs here
71  * in pass 1.
72  *
73  * Pass 1 of cpuid feature analysis happens just at the beginning of mlsetup()
74  * for the boot CPU and does the basic analysis that the early kernel needs.
75  * x86_featureset is set based on the return value of cpuid_pass1() of the boot
76  * CPU.
77  *
78  * Pass 1 includes:
79  *
80  *	o Determining vendor/model/family/stepping and setting x86_type and
81  *	  x86_vendor accordingly.
82  *	o Processing the feature flags returned by the cpuid instruction while
83  *	  applying any workarounds or tricks for the specific processor.
84  *	o Mapping the feature flags into Solaris feature bits (X86_*).
85  *	o Processing extended feature flags if supported by the processor,
86  *	  again while applying specific processor knowledge.
87  *	o Determining the CMT characteristics of the system.
88  *
89  * Pass 1 is done on non-boot CPUs during their initialization and the results
90  * are used only as a meager attempt at ensuring that all processors within the
91  * system support the same features.
92  *
93  * Pass 2 of cpuid feature analysis happens just at the beginning
94  * of startup().  It just copies in and corrects the remainder
95  * of the cpuid data we depend on: standard cpuid functions that we didn't
96  * need for pass1 feature analysis, and extended cpuid functions beyond the
97  * simple feature processing done in pass1.
98  *
99  * Pass 3 of cpuid analysis is invoked after basic kernel services; in
100  * particular kernel memory allocation has been made available. It creates a
101  * readable brand string based on the data collected in the first two passes.
102  *
103  * Pass 4 of cpuid analysis is invoked after post_startup() when all
104  * the support infrastructure for various hardware features has been
105  * initialized. It determines which processor features will be reported
106  * to userland via the aux vector.
107  *
108  * All passes are executed on all CPUs, but only the boot CPU determines what
109  * features the kernel will use.
110  *
111  * Much of the worst junk in this file is for the support of processors
112  * that didn't really implement the cpuid instruction properly.
113  *
114  * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon,
115  * the pass numbers.  Accordingly, changes to the pass code may require changes
116  * to the accessor code.
117  */
118 
119 uint_t x86_vendor = X86_VENDOR_IntelClone;
120 uint_t x86_type = X86_TYPE_OTHER;
121 uint_t x86_clflush_size = 0;
122 
123 uint_t pentiumpro_bug4046376;
124 
125 uchar_t x86_featureset[BT_SIZEOFMAP(NUM_X86_FEATURES)];
126 
127 static char *x86_feature_names[NUM_X86_FEATURES] = {
128 	"lgpg",
129 	"tsc",
130 	"msr",
131 	"mtrr",
132 	"pge",
133 	"de",
134 	"cmov",
135 	"mmx",
136 	"mca",
137 	"pae",
138 	"cv8",
139 	"pat",
140 	"sep",
141 	"sse",
142 	"sse2",
143 	"htt",
144 	"asysc",
145 	"nx",
146 	"sse3",
147 	"cx16",
148 	"cmp",
149 	"tscp",
150 	"mwait",
151 	"sse4a",
152 	"cpuid",
153 	"ssse3",
154 	"sse4_1",
155 	"sse4_2",
156 	"1gpg",
157 	"clfsh",
158 	"64",
159 	"aes",
160 	"pclmulqdq",
161 	"xsave",
162 	"avx",
163 	"vmx",
164 	"svm",
165 	"topoext",
166 	"f16c",
167 	"rdrand",
168 	"x2apic",
169 };
170 
171 boolean_t
172 is_x86_feature(void *featureset, uint_t feature)
173 {
174 	ASSERT(feature < NUM_X86_FEATURES);
175 	return (BT_TEST((ulong_t *)featureset, feature));
176 }
177 
178 void
179 add_x86_feature(void *featureset, uint_t feature)
180 {
181 	ASSERT(feature < NUM_X86_FEATURES);
182 	BT_SET((ulong_t *)featureset, feature);
183 }
184 
185 void
186 remove_x86_feature(void *featureset, uint_t feature)
187 {
188 	ASSERT(feature < NUM_X86_FEATURES);
189 	BT_CLEAR((ulong_t *)featureset, feature);
190 }
191 
192 boolean_t
193 compare_x86_featureset(void *setA, void *setB)
194 {
195 	/*
196 	 * We assume that the unused bits of the bitmap are always zero.
197 	 */
198 	if (memcmp(setA, setB, BT_SIZEOFMAP(NUM_X86_FEATURES)) == 0) {
199 		return (B_TRUE);
200 	} else {
201 		return (B_FALSE);
202 	}
203 }
204 
205 void
206 print_x86_featureset(void *featureset)
207 {
208 	uint_t i;
209 
210 	for (i = 0; i < NUM_X86_FEATURES; i++) {
211 		if (is_x86_feature(featureset, i)) {
212 			cmn_err(CE_CONT, "?x86_feature: %s\n",
213 			    x86_feature_names[i]);
214 		}
215 	}
216 }
217 
218 static size_t xsave_state_size = 0;
219 uint64_t xsave_bv_all = (XFEATURE_LEGACY_FP | XFEATURE_SSE);
220 boolean_t xsave_force_disable = B_FALSE;
221 
222 /*
223  * This is set to platform type we are running on.
224  */
225 static int platform_type = -1;
226 
227 #if !defined(__xpv)
228 /*
229  * Variable to patch if hypervisor platform detection needs to be
230  * disabled (e.g. platform_type will always be HW_NATIVE if this is 0).
231  */
232 int enable_platform_detection = 1;
233 #endif
234 
235 /*
236  * monitor/mwait info.
237  *
238  * size_actual and buf_actual are the real address and size allocated to get
239  * proper mwait_buf alignement.  buf_actual and size_actual should be passed
240  * to kmem_free().  Currently kmem_alloc() and mwait happen to both use
241  * processor cache-line alignment, but this is not guarantied in the furture.
242  */
243 struct mwait_info {
244 	size_t		mon_min;	/* min size to avoid missed wakeups */
245 	size_t		mon_max;	/* size to avoid false wakeups */
246 	size_t		size_actual;	/* size actually allocated */
247 	void		*buf_actual;	/* memory actually allocated */
248 	uint32_t	support;	/* processor support of monitor/mwait */
249 };
250 
251 /*
252  * xsave/xrestor info.
253  *
254  * This structure contains HW feature bits and size of the xsave save area.
255  * Note: the kernel will use the maximum size required for all hardware
256  * features. It is not optimize for potential memory savings if features at
257  * the end of the save area are not enabled.
258  */
259 struct xsave_info {
260 	uint32_t	xsav_hw_features_low;   /* Supported HW features */
261 	uint32_t	xsav_hw_features_high;  /* Supported HW features */
262 	size_t		xsav_max_size;  /* max size save area for HW features */
263 	size_t		ymm_size;	/* AVX: size of ymm save area */
264 	size_t		ymm_offset;	/* AVX: offset for ymm save area */
265 };
266 
267 
268 /*
269  * These constants determine how many of the elements of the
270  * cpuid we cache in the cpuid_info data structure; the
271  * remaining elements are accessible via the cpuid instruction.
272  */
273 
274 #define	NMAX_CPI_STD	6		/* eax = 0 .. 5 */
275 #define	NMAX_CPI_EXTD	0x1f		/* eax = 0x80000000 .. 0x8000001e */
276 
277 /*
278  * Some terminology needs to be explained:
279  *  - Socket: Something that can be plugged into a motherboard.
280  *  - Package: Same as socket
281  *  - Chip: Same as socket. Note that AMD's documentation uses term "chip"
282  *    differently: there, chip is the same as processor node (below)
283  *  - Processor node: Some AMD processors have more than one
284  *    "subprocessor" embedded in a package. These subprocessors (nodes)
285  *    are fully-functional processors themselves with cores, caches,
286  *    memory controllers, PCI configuration spaces. They are connected
287  *    inside the package with Hypertransport links. On single-node
288  *    processors, processor node is equivalent to chip/socket/package.
289  *  - Compute Unit: Some AMD processors pair cores in "compute units" that
290  *    share the FPU and the I$ and L2 caches.
291  */
292 
293 struct cpuid_info {
294 	uint_t cpi_pass;		/* last pass completed */
295 	/*
296 	 * standard function information
297 	 */
298 	uint_t cpi_maxeax;		/* fn 0: %eax */
299 	char cpi_vendorstr[13];		/* fn 0: %ebx:%ecx:%edx */
300 	uint_t cpi_vendor;		/* enum of cpi_vendorstr */
301 
302 	uint_t cpi_family;		/* fn 1: extended family */
303 	uint_t cpi_model;		/* fn 1: extended model */
304 	uint_t cpi_step;		/* fn 1: stepping */
305 	chipid_t cpi_chipid;		/* fn 1: %ebx:  Intel: chip # */
306 					/*		AMD: package/socket # */
307 	uint_t cpi_brandid;		/* fn 1: %ebx: brand ID */
308 	int cpi_clogid;			/* fn 1: %ebx: thread # */
309 	uint_t cpi_ncpu_per_chip;	/* fn 1: %ebx: logical cpu count */
310 	uint8_t cpi_cacheinfo[16];	/* fn 2: intel-style cache desc */
311 	uint_t cpi_ncache;		/* fn 2: number of elements */
312 	uint_t cpi_ncpu_shr_last_cache;	/* fn 4: %eax: ncpus sharing cache */
313 	id_t cpi_last_lvl_cacheid;	/* fn 4: %eax: derived cache id */
314 	uint_t cpi_std_4_size;		/* fn 4: number of fn 4 elements */
315 	struct cpuid_regs **cpi_std_4;	/* fn 4: %ecx == 0 .. fn4_size */
316 	struct cpuid_regs cpi_std[NMAX_CPI_STD];	/* 0 .. 5 */
317 	/*
318 	 * extended function information
319 	 */
320 	uint_t cpi_xmaxeax;		/* fn 0x80000000: %eax */
321 	char cpi_brandstr[49];		/* fn 0x8000000[234] */
322 	uint8_t cpi_pabits;		/* fn 0x80000006: %eax */
323 	uint8_t	cpi_vabits;		/* fn 0x80000006: %eax */
324 	struct	cpuid_regs cpi_extd[NMAX_CPI_EXTD];	/* 0x800000XX */
325 
326 	id_t cpi_coreid;		/* same coreid => strands share core */
327 	int cpi_pkgcoreid;		/* core number within single package */
328 	uint_t cpi_ncore_per_chip;	/* AMD: fn 0x80000008: %ecx[7-0] */
329 					/* Intel: fn 4: %eax[31-26] */
330 	/*
331 	 * supported feature information
332 	 */
333 	uint32_t cpi_support[5];
334 #define	STD_EDX_FEATURES	0
335 #define	AMD_EDX_FEATURES	1
336 #define	TM_EDX_FEATURES		2
337 #define	STD_ECX_FEATURES	3
338 #define	AMD_ECX_FEATURES	4
339 	/*
340 	 * Synthesized information, where known.
341 	 */
342 	uint32_t cpi_chiprev;		/* See X86_CHIPREV_* in x86_archext.h */
343 	const char *cpi_chiprevstr;	/* May be NULL if chiprev unknown */
344 	uint32_t cpi_socket;		/* Chip package/socket type */
345 
346 	struct mwait_info cpi_mwait;	/* fn 5: monitor/mwait info */
347 	uint32_t cpi_apicid;
348 	uint_t cpi_procnodeid;		/* AMD: nodeID on HT, Intel: chipid */
349 	uint_t cpi_procnodes_per_pkg;	/* AMD: # of nodes in the package */
350 					/* Intel: 1 */
351 	uint_t cpi_compunitid;		/* AMD: ComputeUnit ID, Intel: coreid */
352 	uint_t cpi_cores_per_compunit;	/* AMD: # of cores in the ComputeUnit */
353 
354 	struct xsave_info cpi_xsave;	/* fn D: xsave/xrestor info */
355 };
356 
357 
358 static struct cpuid_info cpuid_info0;
359 
360 /*
361  * These bit fields are defined by the Intel Application Note AP-485
362  * "Intel Processor Identification and the CPUID Instruction"
363  */
364 #define	CPI_FAMILY_XTD(cpi)	BITX((cpi)->cpi_std[1].cp_eax, 27, 20)
365 #define	CPI_MODEL_XTD(cpi)	BITX((cpi)->cpi_std[1].cp_eax, 19, 16)
366 #define	CPI_TYPE(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 13, 12)
367 #define	CPI_FAMILY(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 11, 8)
368 #define	CPI_STEP(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 3, 0)
369 #define	CPI_MODEL(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 7, 4)
370 
371 #define	CPI_FEATURES_EDX(cpi)		((cpi)->cpi_std[1].cp_edx)
372 #define	CPI_FEATURES_ECX(cpi)		((cpi)->cpi_std[1].cp_ecx)
373 #define	CPI_FEATURES_XTD_EDX(cpi)	((cpi)->cpi_extd[1].cp_edx)
374 #define	CPI_FEATURES_XTD_ECX(cpi)	((cpi)->cpi_extd[1].cp_ecx)
375 
376 #define	CPI_BRANDID(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 7, 0)
377 #define	CPI_CHUNKS(cpi)		BITX((cpi)->cpi_std[1].cp_ebx, 15, 7)
378 #define	CPI_CPU_COUNT(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 23, 16)
379 #define	CPI_APIC_ID(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 31, 24)
380 
381 #define	CPI_MAXEAX_MAX		0x100		/* sanity control */
382 #define	CPI_XMAXEAX_MAX		0x80000100
383 #define	CPI_FN4_ECX_MAX		0x20		/* sanity: max fn 4 levels */
384 #define	CPI_FNB_ECX_MAX		0x20		/* sanity: max fn B levels */
385 
386 /*
387  * Function 4 (Deterministic Cache Parameters) macros
388  * Defined by Intel Application Note AP-485
389  */
390 #define	CPI_NUM_CORES(regs)		BITX((regs)->cp_eax, 31, 26)
391 #define	CPI_NTHR_SHR_CACHE(regs)	BITX((regs)->cp_eax, 25, 14)
392 #define	CPI_FULL_ASSOC_CACHE(regs)	BITX((regs)->cp_eax, 9, 9)
393 #define	CPI_SELF_INIT_CACHE(regs)	BITX((regs)->cp_eax, 8, 8)
394 #define	CPI_CACHE_LVL(regs)		BITX((regs)->cp_eax, 7, 5)
395 #define	CPI_CACHE_TYPE(regs)		BITX((regs)->cp_eax, 4, 0)
396 #define	CPI_CPU_LEVEL_TYPE(regs)	BITX((regs)->cp_ecx, 15, 8)
397 
398 #define	CPI_CACHE_WAYS(regs)		BITX((regs)->cp_ebx, 31, 22)
399 #define	CPI_CACHE_PARTS(regs)		BITX((regs)->cp_ebx, 21, 12)
400 #define	CPI_CACHE_COH_LN_SZ(regs)	BITX((regs)->cp_ebx, 11, 0)
401 
402 #define	CPI_CACHE_SETS(regs)		BITX((regs)->cp_ecx, 31, 0)
403 
404 #define	CPI_PREFCH_STRIDE(regs)		BITX((regs)->cp_edx, 9, 0)
405 
406 
407 /*
408  * A couple of shorthand macros to identify "later" P6-family chips
409  * like the Pentium M and Core.  First, the "older" P6-based stuff
410  * (loosely defined as "pre-Pentium-4"):
411  * P6, PII, Mobile PII, PII Xeon, PIII, Mobile PIII, PIII Xeon
412  */
413 
414 #define	IS_LEGACY_P6(cpi) (			\
415 	cpi->cpi_family == 6 && 		\
416 		(cpi->cpi_model == 1 ||		\
417 		cpi->cpi_model == 3 ||		\
418 		cpi->cpi_model == 5 ||		\
419 		cpi->cpi_model == 6 ||		\
420 		cpi->cpi_model == 7 ||		\
421 		cpi->cpi_model == 8 ||		\
422 		cpi->cpi_model == 0xA ||	\
423 		cpi->cpi_model == 0xB)		\
424 )
425 
426 /* A "new F6" is everything with family 6 that's not the above */
427 #define	IS_NEW_F6(cpi) ((cpi->cpi_family == 6) && !IS_LEGACY_P6(cpi))
428 
429 /* Extended family/model support */
430 #define	IS_EXTENDED_MODEL_INTEL(cpi) (cpi->cpi_family == 0x6 || \
431 	cpi->cpi_family >= 0xf)
432 
433 /*
434  * Info for monitor/mwait idle loop.
435  *
436  * See cpuid section of "Intel 64 and IA-32 Architectures Software Developer's
437  * Manual Volume 2A: Instruction Set Reference, A-M" #25366-022US, November
438  * 2006.
439  * See MONITOR/MWAIT section of "AMD64 Architecture Programmer's Manual
440  * Documentation Updates" #33633, Rev 2.05, December 2006.
441  */
442 #define	MWAIT_SUPPORT		(0x00000001)	/* mwait supported */
443 #define	MWAIT_EXTENSIONS	(0x00000002)	/* extenstion supported */
444 #define	MWAIT_ECX_INT_ENABLE	(0x00000004)	/* ecx 1 extension supported */
445 #define	MWAIT_SUPPORTED(cpi)	((cpi)->cpi_std[1].cp_ecx & CPUID_INTC_ECX_MON)
446 #define	MWAIT_INT_ENABLE(cpi)	((cpi)->cpi_std[5].cp_ecx & 0x2)
447 #define	MWAIT_EXTENSION(cpi)	((cpi)->cpi_std[5].cp_ecx & 0x1)
448 #define	MWAIT_SIZE_MIN(cpi)	BITX((cpi)->cpi_std[5].cp_eax, 15, 0)
449 #define	MWAIT_SIZE_MAX(cpi)	BITX((cpi)->cpi_std[5].cp_ebx, 15, 0)
450 /*
451  * Number of sub-cstates for a given c-state.
452  */
453 #define	MWAIT_NUM_SUBC_STATES(cpi, c_state)			\
454 	BITX((cpi)->cpi_std[5].cp_edx, c_state + 3, c_state)
455 
456 /*
457  * XSAVE leaf 0xD enumeration
458  */
459 #define	CPUID_LEAFD_2_YMM_OFFSET	576
460 #define	CPUID_LEAFD_2_YMM_SIZE		256
461 
462 /*
463  * Functions we consune from cpuid_subr.c;  don't publish these in a header
464  * file to try and keep people using the expected cpuid_* interfaces.
465  */
466 extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t);
467 extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t);
468 extern uint32_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t);
469 extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t);
470 extern uint_t _cpuid_vendorstr_to_vendorcode(char *);
471 
472 /*
473  * Apply up various platform-dependent restrictions where the
474  * underlying platform restrictions mean the CPU can be marked
475  * as less capable than its cpuid instruction would imply.
476  */
477 #if defined(__xpv)
478 static void
479 platform_cpuid_mangle(uint_t vendor, uint32_t eax, struct cpuid_regs *cp)
480 {
481 	switch (eax) {
482 	case 1: {
483 		uint32_t mcamask = DOMAIN_IS_INITDOMAIN(xen_info) ?
484 		    0 : CPUID_INTC_EDX_MCA;
485 		cp->cp_edx &=
486 		    ~(mcamask |
487 		    CPUID_INTC_EDX_PSE |
488 		    CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE |
489 		    CPUID_INTC_EDX_SEP | CPUID_INTC_EDX_MTRR |
490 		    CPUID_INTC_EDX_PGE | CPUID_INTC_EDX_PAT |
491 		    CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP |
492 		    CPUID_INTC_EDX_PSE36 | CPUID_INTC_EDX_HTT);
493 		break;
494 	}
495 
496 	case 0x80000001:
497 		cp->cp_edx &=
498 		    ~(CPUID_AMD_EDX_PSE |
499 		    CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE |
500 		    CPUID_AMD_EDX_MTRR | CPUID_AMD_EDX_PGE |
501 		    CPUID_AMD_EDX_PAT | CPUID_AMD_EDX_PSE36 |
502 		    CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP |
503 		    CPUID_AMD_EDX_TSCP);
504 		cp->cp_ecx &= ~CPUID_AMD_ECX_CMP_LGCY;
505 		break;
506 	default:
507 		break;
508 	}
509 
510 	switch (vendor) {
511 	case X86_VENDOR_Intel:
512 		switch (eax) {
513 		case 4:
514 			/*
515 			 * Zero out the (ncores-per-chip - 1) field
516 			 */
517 			cp->cp_eax &= 0x03fffffff;
518 			break;
519 		default:
520 			break;
521 		}
522 		break;
523 	case X86_VENDOR_AMD:
524 		switch (eax) {
525 
526 		case 0x80000001:
527 			cp->cp_ecx &= ~CPUID_AMD_ECX_CR8D;
528 			break;
529 
530 		case 0x80000008:
531 			/*
532 			 * Zero out the (ncores-per-chip - 1) field
533 			 */
534 			cp->cp_ecx &= 0xffffff00;
535 			break;
536 		default:
537 			break;
538 		}
539 		break;
540 	default:
541 		break;
542 	}
543 }
544 #else
545 #define	platform_cpuid_mangle(vendor, eax, cp)	/* nothing */
546 #endif
547 
548 /*
549  *  Some undocumented ways of patching the results of the cpuid
550  *  instruction to permit running Solaris 10 on future cpus that
551  *  we don't currently support.  Could be set to non-zero values
552  *  via settings in eeprom.
553  */
554 
555 uint32_t cpuid_feature_ecx_include;
556 uint32_t cpuid_feature_ecx_exclude;
557 uint32_t cpuid_feature_edx_include;
558 uint32_t cpuid_feature_edx_exclude;
559 
560 /*
561  * Allocate space for mcpu_cpi in the machcpu structure for all non-boot CPUs.
562  */
563 void
564 cpuid_alloc_space(cpu_t *cpu)
565 {
566 	/*
567 	 * By convention, cpu0 is the boot cpu, which is set up
568 	 * before memory allocation is available.  All other cpus get
569 	 * their cpuid_info struct allocated here.
570 	 */
571 	ASSERT(cpu->cpu_id != 0);
572 	ASSERT(cpu->cpu_m.mcpu_cpi == NULL);
573 	cpu->cpu_m.mcpu_cpi =
574 	    kmem_zalloc(sizeof (*cpu->cpu_m.mcpu_cpi), KM_SLEEP);
575 }
576 
577 void
578 cpuid_free_space(cpu_t *cpu)
579 {
580 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
581 	int i;
582 
583 	ASSERT(cpi != NULL);
584 	ASSERT(cpi != &cpuid_info0);
585 
586 	/*
587 	 * Free up any function 4 related dynamic storage
588 	 */
589 	for (i = 1; i < cpi->cpi_std_4_size; i++)
590 		kmem_free(cpi->cpi_std_4[i], sizeof (struct cpuid_regs));
591 	if (cpi->cpi_std_4_size > 0)
592 		kmem_free(cpi->cpi_std_4,
593 		    cpi->cpi_std_4_size * sizeof (struct cpuid_regs *));
594 
595 	kmem_free(cpi, sizeof (*cpi));
596 	cpu->cpu_m.mcpu_cpi = NULL;
597 }
598 
599 #if !defined(__xpv)
600 /*
601  * Determine the type of the underlying platform. This is used to customize
602  * initialization of various subsystems (e.g. TSC). determine_platform() must
603  * only ever be called once to prevent two processors from seeing different
604  * values of platform_type. Must be called before cpuid_pass1(), the earliest
605  * consumer to execute (uses _cpuid_chiprev --> synth_amd_info --> get_hwenv).
606  */
607 void
608 determine_platform(void)
609 {
610 	struct cpuid_regs cp;
611 	uint32_t base;
612 	uint32_t regs[4];
613 	char *hvstr = (char *)regs;
614 
615 	ASSERT(platform_type == -1);
616 
617 	platform_type = HW_NATIVE;
618 
619 	if (!enable_platform_detection)
620 		return;
621 
622 	/*
623 	 * If Hypervisor CPUID bit is set, try to determine hypervisor
624 	 * vendor signature, and set platform type accordingly.
625 	 *
626 	 * References:
627 	 * http://lkml.org/lkml/2008/10/1/246
628 	 * http://kb.vmware.com/kb/1009458
629 	 */
630 	cp.cp_eax = 0x1;
631 	(void) __cpuid_insn(&cp);
632 	if ((cp.cp_ecx & CPUID_INTC_ECX_HV) != 0) {
633 		cp.cp_eax = 0x40000000;
634 		(void) __cpuid_insn(&cp);
635 		regs[0] = cp.cp_ebx;
636 		regs[1] = cp.cp_ecx;
637 		regs[2] = cp.cp_edx;
638 		regs[3] = 0;
639 		if (strcmp(hvstr, HVSIG_XEN_HVM) == 0) {
640 			platform_type = HW_XEN_HVM;
641 			return;
642 		}
643 		if (strcmp(hvstr, HVSIG_VMWARE) == 0) {
644 			platform_type = HW_VMWARE;
645 			return;
646 		}
647 		if (strcmp(hvstr, HVSIG_KVM) == 0) {
648 			platform_type = HW_KVM;
649 			return;
650 		}
651 		if (strcmp(hvstr, HVSIG_MICROSOFT) == 0)
652 			platform_type = HW_MICROSOFT;
653 	} else {
654 		/*
655 		 * Check older VMware hardware versions. VMware hypervisor is
656 		 * detected by performing an IN operation to VMware hypervisor
657 		 * port and checking that value returned in %ebx is VMware
658 		 * hypervisor magic value.
659 		 *
660 		 * References: http://kb.vmware.com/kb/1009458
661 		 */
662 		vmware_port(VMWARE_HVCMD_GETVERSION, regs);
663 		if (regs[1] == VMWARE_HVMAGIC) {
664 			platform_type = HW_VMWARE;
665 			return;
666 		}
667 	}
668 
669 	/*
670 	 * Check Xen hypervisor. In a fully virtualized domain,
671 	 * Xen's pseudo-cpuid function returns a string representing the
672 	 * Xen signature in %ebx, %ecx, and %edx. %eax contains the maximum
673 	 * supported cpuid function. We need at least a (base + 2) leaf value
674 	 * to do what we want to do. Try different base values, since the
675 	 * hypervisor might use a different one depending on whether Hyper-V
676 	 * emulation is switched on by default or not.
677 	 */
678 	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
679 		cp.cp_eax = base;
680 		(void) __cpuid_insn(&cp);
681 		regs[0] = cp.cp_ebx;
682 		regs[1] = cp.cp_ecx;
683 		regs[2] = cp.cp_edx;
684 		regs[3] = 0;
685 		if (strcmp(hvstr, HVSIG_XEN_HVM) == 0 &&
686 		    cp.cp_eax >= (base + 2)) {
687 			platform_type &= ~HW_NATIVE;
688 			platform_type |= HW_XEN_HVM;
689 			return;
690 		}
691 	}
692 }
693 
694 int
695 get_hwenv(void)
696 {
697 	ASSERT(platform_type != -1);
698 	return (platform_type);
699 }
700 
701 int
702 is_controldom(void)
703 {
704 	return (0);
705 }
706 
707 #else
708 
709 int
710 get_hwenv(void)
711 {
712 	return (HW_XEN_PV);
713 }
714 
715 int
716 is_controldom(void)
717 {
718 	return (DOMAIN_IS_INITDOMAIN(xen_info));
719 }
720 
721 #endif	/* __xpv */
722 
723 static void
724 cpuid_intel_getids(cpu_t *cpu, void *feature)
725 {
726 	uint_t i;
727 	uint_t chipid_shift = 0;
728 	uint_t coreid_shift = 0;
729 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
730 
731 	for (i = 1; i < cpi->cpi_ncpu_per_chip; i <<= 1)
732 		chipid_shift++;
733 
734 	cpi->cpi_chipid = cpi->cpi_apicid >> chipid_shift;
735 	cpi->cpi_clogid = cpi->cpi_apicid & ((1 << chipid_shift) - 1);
736 
737 	if (is_x86_feature(feature, X86FSET_CMP)) {
738 		/*
739 		 * Multi-core (and possibly multi-threaded)
740 		 * processors.
741 		 */
742 		uint_t ncpu_per_core;
743 		if (cpi->cpi_ncore_per_chip == 1)
744 			ncpu_per_core = cpi->cpi_ncpu_per_chip;
745 		else if (cpi->cpi_ncore_per_chip > 1)
746 			ncpu_per_core = cpi->cpi_ncpu_per_chip /
747 			    cpi->cpi_ncore_per_chip;
748 		/*
749 		 * 8bit APIC IDs on dual core Pentiums
750 		 * look like this:
751 		 *
752 		 * +-----------------------+------+------+
753 		 * | Physical Package ID   |  MC  |  HT  |
754 		 * +-----------------------+------+------+
755 		 * <------- chipid -------->
756 		 * <------- coreid --------------->
757 		 *			   <--- clogid -->
758 		 *			   <------>
759 		 *			   pkgcoreid
760 		 *
761 		 * Where the number of bits necessary to
762 		 * represent MC and HT fields together equals
763 		 * to the minimum number of bits necessary to
764 		 * store the value of cpi->cpi_ncpu_per_chip.
765 		 * Of those bits, the MC part uses the number
766 		 * of bits necessary to store the value of
767 		 * cpi->cpi_ncore_per_chip.
768 		 */
769 		for (i = 1; i < ncpu_per_core; i <<= 1)
770 			coreid_shift++;
771 		cpi->cpi_coreid = cpi->cpi_apicid >> coreid_shift;
772 		cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
773 	} else if (is_x86_feature(feature, X86FSET_HTT)) {
774 		/*
775 		 * Single-core multi-threaded processors.
776 		 */
777 		cpi->cpi_coreid = cpi->cpi_chipid;
778 		cpi->cpi_pkgcoreid = 0;
779 	}
780 	cpi->cpi_procnodeid = cpi->cpi_chipid;
781 	cpi->cpi_compunitid = cpi->cpi_coreid;
782 }
783 
784 static void
785 cpuid_amd_getids(cpu_t *cpu)
786 {
787 	int i, first_half, coreidsz;
788 	uint32_t nb_caps_reg;
789 	uint_t node2_1;
790 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
791 	struct cpuid_regs *cp;
792 
793 	/*
794 	 * AMD CMP chips currently have a single thread per core.
795 	 *
796 	 * Since no two cpus share a core we must assign a distinct coreid
797 	 * per cpu, and we do this by using the cpu_id.  This scheme does not,
798 	 * however, guarantee that sibling cores of a chip will have sequential
799 	 * coreids starting at a multiple of the number of cores per chip -
800 	 * that is usually the case, but if the ACPI MADT table is presented
801 	 * in a different order then we need to perform a few more gymnastics
802 	 * for the pkgcoreid.
803 	 *
804 	 * All processors in the system have the same number of enabled
805 	 * cores. Cores within a processor are always numbered sequentially
806 	 * from 0 regardless of how many or which are disabled, and there
807 	 * is no way for operating system to discover the real core id when some
808 	 * are disabled.
809 	 *
810 	 * In family 0x15, the cores come in pairs called compute units. They
811 	 * share I$ and L2 caches and the FPU. Enumeration of this feature is
812 	 * simplified by the new topology extensions CPUID leaf, indicated by
813 	 * the X86 feature X86FSET_TOPOEXT.
814 	 */
815 
816 	cpi->cpi_coreid = cpu->cpu_id;
817 	cpi->cpi_compunitid = cpu->cpu_id;
818 
819 	if (cpi->cpi_xmaxeax >= 0x80000008) {
820 
821 		coreidsz = BITX((cpi)->cpi_extd[8].cp_ecx, 15, 12);
822 
823 		/*
824 		 * In AMD parlance chip is really a node while Solaris
825 		 * sees chip as equivalent to socket/package.
826 		 */
827 		cpi->cpi_ncore_per_chip =
828 		    BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1;
829 		if (coreidsz == 0) {
830 			/* Use legacy method */
831 			for (i = 1; i < cpi->cpi_ncore_per_chip; i <<= 1)
832 				coreidsz++;
833 			if (coreidsz == 0)
834 				coreidsz = 1;
835 		}
836 	} else {
837 		/* Assume single-core part */
838 		cpi->cpi_ncore_per_chip = 1;
839 		coreidsz = 1;
840 	}
841 
842 	cpi->cpi_clogid = cpi->cpi_pkgcoreid =
843 	    cpi->cpi_apicid & ((1<<coreidsz) - 1);
844 	cpi->cpi_ncpu_per_chip = cpi->cpi_ncore_per_chip;
845 
846 	/* Get node ID, compute unit ID */
847 	if (is_x86_feature(x86_featureset, X86FSET_TOPOEXT) &&
848 	    cpi->cpi_xmaxeax >= 0x8000001e) {
849 		cp = &cpi->cpi_extd[0x1e];
850 		cp->cp_eax = 0x8000001e;
851 		(void) __cpuid_insn(cp);
852 
853 		cpi->cpi_procnodes_per_pkg = BITX(cp->cp_ecx, 10, 8) + 1;
854 		cpi->cpi_procnodeid = BITX(cp->cp_ecx, 7, 0);
855 		cpi->cpi_cores_per_compunit = BITX(cp->cp_ebx, 15, 8) + 1;
856 		cpi->cpi_compunitid = BITX(cp->cp_ebx, 7, 0)
857 		    + (cpi->cpi_ncore_per_chip / cpi->cpi_cores_per_compunit)
858 		    * (cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg);
859 	} else if (cpi->cpi_family == 0xf || cpi->cpi_family >= 0x11) {
860 		cpi->cpi_procnodeid = (cpi->cpi_apicid >> coreidsz) & 7;
861 	} else if (cpi->cpi_family == 0x10) {
862 		/*
863 		 * See if we are a multi-node processor.
864 		 * All processors in the system have the same number of nodes
865 		 */
866 		nb_caps_reg =  pci_getl_func(0, 24, 3, 0xe8);
867 		if ((cpi->cpi_model < 8) || BITX(nb_caps_reg, 29, 29) == 0) {
868 			/* Single-node */
869 			cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 5,
870 			    coreidsz);
871 		} else {
872 
873 			/*
874 			 * Multi-node revision D (2 nodes per package
875 			 * are supported)
876 			 */
877 			cpi->cpi_procnodes_per_pkg = 2;
878 
879 			first_half = (cpi->cpi_pkgcoreid <=
880 			    (cpi->cpi_ncore_per_chip/2 - 1));
881 
882 			if (cpi->cpi_apicid == cpi->cpi_pkgcoreid) {
883 				/* We are BSP */
884 				cpi->cpi_procnodeid = (first_half ? 0 : 1);
885 			} else {
886 
887 				/* We are AP */
888 				/* NodeId[2:1] bits to use for reading F3xe8 */
889 				node2_1 = BITX(cpi->cpi_apicid, 5, 4) << 1;
890 
891 				nb_caps_reg =
892 				    pci_getl_func(0, 24 + node2_1, 3, 0xe8);
893 
894 				/*
895 				 * Check IntNodeNum bit (31:30, but bit 31 is
896 				 * always 0 on dual-node processors)
897 				 */
898 				if (BITX(nb_caps_reg, 30, 30) == 0)
899 					cpi->cpi_procnodeid = node2_1 +
900 					    !first_half;
901 				else
902 					cpi->cpi_procnodeid = node2_1 +
903 					    first_half;
904 			}
905 		}
906 	} else {
907 		cpi->cpi_procnodeid = 0;
908 	}
909 
910 	cpi->cpi_chipid =
911 	    cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg;
912 }
913 
914 /*
915  * Setup XFeature_Enabled_Mask register. Required by xsave feature.
916  */
917 void
918 setup_xfem(void)
919 {
920 	uint64_t flags = XFEATURE_LEGACY_FP;
921 
922 	ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
923 
924 	if (is_x86_feature(x86_featureset, X86FSET_SSE))
925 		flags |= XFEATURE_SSE;
926 
927 	if (is_x86_feature(x86_featureset, X86FSET_AVX))
928 		flags |= XFEATURE_AVX;
929 
930 	set_xcr(XFEATURE_ENABLED_MASK, flags);
931 
932 	xsave_bv_all = flags;
933 }
934 
935 void
936 cpuid_pass1(cpu_t *cpu, uchar_t *featureset)
937 {
938 	uint32_t mask_ecx, mask_edx;
939 	struct cpuid_info *cpi;
940 	struct cpuid_regs *cp;
941 	int xcpuid;
942 #if !defined(__xpv)
943 	extern int idle_cpu_prefer_mwait;
944 #endif
945 
946 	/*
947 	 * Space statically allocated for BSP, ensure pointer is set
948 	 */
949 	if (cpu->cpu_id == 0) {
950 		if (cpu->cpu_m.mcpu_cpi == NULL)
951 			cpu->cpu_m.mcpu_cpi = &cpuid_info0;
952 	}
953 
954 	add_x86_feature(featureset, X86FSET_CPUID);
955 
956 	cpi = cpu->cpu_m.mcpu_cpi;
957 	ASSERT(cpi != NULL);
958 	cp = &cpi->cpi_std[0];
959 	cp->cp_eax = 0;
960 	cpi->cpi_maxeax = __cpuid_insn(cp);
961 	{
962 		uint32_t *iptr = (uint32_t *)cpi->cpi_vendorstr;
963 		*iptr++ = cp->cp_ebx;
964 		*iptr++ = cp->cp_edx;
965 		*iptr++ = cp->cp_ecx;
966 		*(char *)&cpi->cpi_vendorstr[12] = '\0';
967 	}
968 
969 	cpi->cpi_vendor = _cpuid_vendorstr_to_vendorcode(cpi->cpi_vendorstr);
970 	x86_vendor = cpi->cpi_vendor; /* for compatibility */
971 
972 	/*
973 	 * Limit the range in case of weird hardware
974 	 */
975 	if (cpi->cpi_maxeax > CPI_MAXEAX_MAX)
976 		cpi->cpi_maxeax = CPI_MAXEAX_MAX;
977 	if (cpi->cpi_maxeax < 1)
978 		goto pass1_done;
979 
980 	cp = &cpi->cpi_std[1];
981 	cp->cp_eax = 1;
982 	(void) __cpuid_insn(cp);
983 
984 	/*
985 	 * Extract identifying constants for easy access.
986 	 */
987 	cpi->cpi_model = CPI_MODEL(cpi);
988 	cpi->cpi_family = CPI_FAMILY(cpi);
989 
990 	if (cpi->cpi_family == 0xf)
991 		cpi->cpi_family += CPI_FAMILY_XTD(cpi);
992 
993 	/*
994 	 * Beware: AMD uses "extended model" iff base *FAMILY* == 0xf.
995 	 * Intel, and presumably everyone else, uses model == 0xf, as
996 	 * one would expect (max value means possible overflow).  Sigh.
997 	 */
998 
999 	switch (cpi->cpi_vendor) {
1000 	case X86_VENDOR_Intel:
1001 		if (IS_EXTENDED_MODEL_INTEL(cpi))
1002 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
1003 		break;
1004 	case X86_VENDOR_AMD:
1005 		if (CPI_FAMILY(cpi) == 0xf)
1006 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
1007 		break;
1008 	default:
1009 		if (cpi->cpi_model == 0xf)
1010 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
1011 		break;
1012 	}
1013 
1014 	cpi->cpi_step = CPI_STEP(cpi);
1015 	cpi->cpi_brandid = CPI_BRANDID(cpi);
1016 
1017 	/*
1018 	 * *default* assumptions:
1019 	 * - believe %edx feature word
1020 	 * - ignore %ecx feature word
1021 	 * - 32-bit virtual and physical addressing
1022 	 */
1023 	mask_edx = 0xffffffff;
1024 	mask_ecx = 0;
1025 
1026 	cpi->cpi_pabits = cpi->cpi_vabits = 32;
1027 
1028 	switch (cpi->cpi_vendor) {
1029 	case X86_VENDOR_Intel:
1030 		if (cpi->cpi_family == 5)
1031 			x86_type = X86_TYPE_P5;
1032 		else if (IS_LEGACY_P6(cpi)) {
1033 			x86_type = X86_TYPE_P6;
1034 			pentiumpro_bug4046376 = 1;
1035 			/*
1036 			 * Clear the SEP bit when it was set erroneously
1037 			 */
1038 			if (cpi->cpi_model < 3 && cpi->cpi_step < 3)
1039 				cp->cp_edx &= ~CPUID_INTC_EDX_SEP;
1040 		} else if (IS_NEW_F6(cpi) || cpi->cpi_family == 0xf) {
1041 			x86_type = X86_TYPE_P4;
1042 			/*
1043 			 * We don't currently depend on any of the %ecx
1044 			 * features until Prescott, so we'll only check
1045 			 * this from P4 onwards.  We might want to revisit
1046 			 * that idea later.
1047 			 */
1048 			mask_ecx = 0xffffffff;
1049 		} else if (cpi->cpi_family > 0xf)
1050 			mask_ecx = 0xffffffff;
1051 		/*
1052 		 * We don't support MONITOR/MWAIT if leaf 5 is not available
1053 		 * to obtain the monitor linesize.
1054 		 */
1055 		if (cpi->cpi_maxeax < 5)
1056 			mask_ecx &= ~CPUID_INTC_ECX_MON;
1057 		break;
1058 	case X86_VENDOR_IntelClone:
1059 	default:
1060 		break;
1061 	case X86_VENDOR_AMD:
1062 #if defined(OPTERON_ERRATUM_108)
1063 		if (cpi->cpi_family == 0xf && cpi->cpi_model == 0xe) {
1064 			cp->cp_eax = (0xf0f & cp->cp_eax) | 0xc0;
1065 			cpi->cpi_model = 0xc;
1066 		} else
1067 #endif
1068 		if (cpi->cpi_family == 5) {
1069 			/*
1070 			 * AMD K5 and K6
1071 			 *
1072 			 * These CPUs have an incomplete implementation
1073 			 * of MCA/MCE which we mask away.
1074 			 */
1075 			mask_edx &= ~(CPUID_INTC_EDX_MCE | CPUID_INTC_EDX_MCA);
1076 
1077 			/*
1078 			 * Model 0 uses the wrong (APIC) bit
1079 			 * to indicate PGE.  Fix it here.
1080 			 */
1081 			if (cpi->cpi_model == 0) {
1082 				if (cp->cp_edx & 0x200) {
1083 					cp->cp_edx &= ~0x200;
1084 					cp->cp_edx |= CPUID_INTC_EDX_PGE;
1085 				}
1086 			}
1087 
1088 			/*
1089 			 * Early models had problems w/ MMX; disable.
1090 			 */
1091 			if (cpi->cpi_model < 6)
1092 				mask_edx &= ~CPUID_INTC_EDX_MMX;
1093 		}
1094 
1095 		/*
1096 		 * For newer families, SSE3 and CX16, at least, are valid;
1097 		 * enable all
1098 		 */
1099 		if (cpi->cpi_family >= 0xf)
1100 			mask_ecx = 0xffffffff;
1101 		/*
1102 		 * We don't support MONITOR/MWAIT if leaf 5 is not available
1103 		 * to obtain the monitor linesize.
1104 		 */
1105 		if (cpi->cpi_maxeax < 5)
1106 			mask_ecx &= ~CPUID_INTC_ECX_MON;
1107 
1108 #if !defined(__xpv)
1109 		/*
1110 		 * Do not use MONITOR/MWAIT to halt in the idle loop on any AMD
1111 		 * processors.  AMD does not intend MWAIT to be used in the cpu
1112 		 * idle loop on current and future processors.  10h and future
1113 		 * AMD processors use more power in MWAIT than HLT.
1114 		 * Pre-family-10h Opterons do not have the MWAIT instruction.
1115 		 */
1116 		idle_cpu_prefer_mwait = 0;
1117 #endif
1118 
1119 		break;
1120 	case X86_VENDOR_TM:
1121 		/*
1122 		 * workaround the NT workaround in CMS 4.1
1123 		 */
1124 		if (cpi->cpi_family == 5 && cpi->cpi_model == 4 &&
1125 		    (cpi->cpi_step == 2 || cpi->cpi_step == 3))
1126 			cp->cp_edx |= CPUID_INTC_EDX_CX8;
1127 		break;
1128 	case X86_VENDOR_Centaur:
1129 		/*
1130 		 * workaround the NT workarounds again
1131 		 */
1132 		if (cpi->cpi_family == 6)
1133 			cp->cp_edx |= CPUID_INTC_EDX_CX8;
1134 		break;
1135 	case X86_VENDOR_Cyrix:
1136 		/*
1137 		 * We rely heavily on the probing in locore
1138 		 * to actually figure out what parts, if any,
1139 		 * of the Cyrix cpuid instruction to believe.
1140 		 */
1141 		switch (x86_type) {
1142 		case X86_TYPE_CYRIX_486:
1143 			mask_edx = 0;
1144 			break;
1145 		case X86_TYPE_CYRIX_6x86:
1146 			mask_edx = 0;
1147 			break;
1148 		case X86_TYPE_CYRIX_6x86L:
1149 			mask_edx =
1150 			    CPUID_INTC_EDX_DE |
1151 			    CPUID_INTC_EDX_CX8;
1152 			break;
1153 		case X86_TYPE_CYRIX_6x86MX:
1154 			mask_edx =
1155 			    CPUID_INTC_EDX_DE |
1156 			    CPUID_INTC_EDX_MSR |
1157 			    CPUID_INTC_EDX_CX8 |
1158 			    CPUID_INTC_EDX_PGE |
1159 			    CPUID_INTC_EDX_CMOV |
1160 			    CPUID_INTC_EDX_MMX;
1161 			break;
1162 		case X86_TYPE_CYRIX_GXm:
1163 			mask_edx =
1164 			    CPUID_INTC_EDX_MSR |
1165 			    CPUID_INTC_EDX_CX8 |
1166 			    CPUID_INTC_EDX_CMOV |
1167 			    CPUID_INTC_EDX_MMX;
1168 			break;
1169 		case X86_TYPE_CYRIX_MediaGX:
1170 			break;
1171 		case X86_TYPE_CYRIX_MII:
1172 		case X86_TYPE_VIA_CYRIX_III:
1173 			mask_edx =
1174 			    CPUID_INTC_EDX_DE |
1175 			    CPUID_INTC_EDX_TSC |
1176 			    CPUID_INTC_EDX_MSR |
1177 			    CPUID_INTC_EDX_CX8 |
1178 			    CPUID_INTC_EDX_PGE |
1179 			    CPUID_INTC_EDX_CMOV |
1180 			    CPUID_INTC_EDX_MMX;
1181 			break;
1182 		default:
1183 			break;
1184 		}
1185 		break;
1186 	}
1187 
1188 #if defined(__xpv)
1189 	/*
1190 	 * Do not support MONITOR/MWAIT under a hypervisor
1191 	 */
1192 	mask_ecx &= ~CPUID_INTC_ECX_MON;
1193 	/*
1194 	 * Do not support XSAVE under a hypervisor for now
1195 	 */
1196 	xsave_force_disable = B_TRUE;
1197 
1198 #endif	/* __xpv */
1199 
1200 	if (xsave_force_disable) {
1201 		mask_ecx &= ~CPUID_INTC_ECX_XSAVE;
1202 		mask_ecx &= ~CPUID_INTC_ECX_AVX;
1203 		mask_ecx &= ~CPUID_INTC_ECX_F16C;
1204 	}
1205 
1206 	/*
1207 	 * Now we've figured out the masks that determine
1208 	 * which bits we choose to believe, apply the masks
1209 	 * to the feature words, then map the kernel's view
1210 	 * of these feature words into its feature word.
1211 	 */
1212 	cp->cp_edx &= mask_edx;
1213 	cp->cp_ecx &= mask_ecx;
1214 
1215 	/*
1216 	 * apply any platform restrictions (we don't call this
1217 	 * immediately after __cpuid_insn here, because we need the
1218 	 * workarounds applied above first)
1219 	 */
1220 	platform_cpuid_mangle(cpi->cpi_vendor, 1, cp);
1221 
1222 	/*
1223 	 * fold in overrides from the "eeprom" mechanism
1224 	 */
1225 	cp->cp_edx |= cpuid_feature_edx_include;
1226 	cp->cp_edx &= ~cpuid_feature_edx_exclude;
1227 
1228 	cp->cp_ecx |= cpuid_feature_ecx_include;
1229 	cp->cp_ecx &= ~cpuid_feature_ecx_exclude;
1230 
1231 	if (cp->cp_edx & CPUID_INTC_EDX_PSE) {
1232 		add_x86_feature(featureset, X86FSET_LARGEPAGE);
1233 	}
1234 	if (cp->cp_edx & CPUID_INTC_EDX_TSC) {
1235 		add_x86_feature(featureset, X86FSET_TSC);
1236 	}
1237 	if (cp->cp_edx & CPUID_INTC_EDX_MSR) {
1238 		add_x86_feature(featureset, X86FSET_MSR);
1239 	}
1240 	if (cp->cp_edx & CPUID_INTC_EDX_MTRR) {
1241 		add_x86_feature(featureset, X86FSET_MTRR);
1242 	}
1243 	if (cp->cp_edx & CPUID_INTC_EDX_PGE) {
1244 		add_x86_feature(featureset, X86FSET_PGE);
1245 	}
1246 	if (cp->cp_edx & CPUID_INTC_EDX_CMOV) {
1247 		add_x86_feature(featureset, X86FSET_CMOV);
1248 	}
1249 	if (cp->cp_edx & CPUID_INTC_EDX_MMX) {
1250 		add_x86_feature(featureset, X86FSET_MMX);
1251 	}
1252 	if ((cp->cp_edx & CPUID_INTC_EDX_MCE) != 0 &&
1253 	    (cp->cp_edx & CPUID_INTC_EDX_MCA) != 0) {
1254 		add_x86_feature(featureset, X86FSET_MCA);
1255 	}
1256 	if (cp->cp_edx & CPUID_INTC_EDX_PAE) {
1257 		add_x86_feature(featureset, X86FSET_PAE);
1258 	}
1259 	if (cp->cp_edx & CPUID_INTC_EDX_CX8) {
1260 		add_x86_feature(featureset, X86FSET_CX8);
1261 	}
1262 	if (cp->cp_ecx & CPUID_INTC_ECX_CX16) {
1263 		add_x86_feature(featureset, X86FSET_CX16);
1264 	}
1265 	if (cp->cp_edx & CPUID_INTC_EDX_PAT) {
1266 		add_x86_feature(featureset, X86FSET_PAT);
1267 	}
1268 	if (cp->cp_edx & CPUID_INTC_EDX_SEP) {
1269 		add_x86_feature(featureset, X86FSET_SEP);
1270 	}
1271 	if (cp->cp_edx & CPUID_INTC_EDX_FXSR) {
1272 		/*
1273 		 * In our implementation, fxsave/fxrstor
1274 		 * are prerequisites before we'll even
1275 		 * try and do SSE things.
1276 		 */
1277 		if (cp->cp_edx & CPUID_INTC_EDX_SSE) {
1278 			add_x86_feature(featureset, X86FSET_SSE);
1279 		}
1280 		if (cp->cp_edx & CPUID_INTC_EDX_SSE2) {
1281 			add_x86_feature(featureset, X86FSET_SSE2);
1282 		}
1283 		if (cp->cp_ecx & CPUID_INTC_ECX_SSE3) {
1284 			add_x86_feature(featureset, X86FSET_SSE3);
1285 		}
1286 		if (cp->cp_ecx & CPUID_INTC_ECX_SSSE3) {
1287 			add_x86_feature(featureset, X86FSET_SSSE3);
1288 		}
1289 		if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_1) {
1290 			add_x86_feature(featureset, X86FSET_SSE4_1);
1291 		}
1292 		if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_2) {
1293 			add_x86_feature(featureset, X86FSET_SSE4_2);
1294 		}
1295 		if (cp->cp_ecx & CPUID_INTC_ECX_AES) {
1296 			add_x86_feature(featureset, X86FSET_AES);
1297 		}
1298 		if (cp->cp_ecx & CPUID_INTC_ECX_PCLMULQDQ) {
1299 			add_x86_feature(featureset, X86FSET_PCLMULQDQ);
1300 		}
1301 
1302 		if (cp->cp_ecx & CPUID_INTC_ECX_XSAVE) {
1303 			add_x86_feature(featureset, X86FSET_XSAVE);
1304 
1305 			/* We only test AVX when there is XSAVE */
1306 			if (cp->cp_ecx & CPUID_INTC_ECX_AVX) {
1307 				add_x86_feature(featureset,
1308 				    X86FSET_AVX);
1309 
1310 				if (cp->cp_ecx & CPUID_INTC_ECX_F16C)
1311 					add_x86_feature(featureset,
1312 					    X86FSET_F16C);
1313 			}
1314 		}
1315 	}
1316 	if (cp->cp_ecx & CPUID_INTC_ECX_X2APIC) {
1317 		add_x86_feature(featureset, X86FSET_X2APIC);
1318 	}
1319 	if (cp->cp_edx & CPUID_INTC_EDX_DE) {
1320 		add_x86_feature(featureset, X86FSET_DE);
1321 	}
1322 #if !defined(__xpv)
1323 	if (cp->cp_ecx & CPUID_INTC_ECX_MON) {
1324 
1325 		/*
1326 		 * We require the CLFLUSH instruction for erratum workaround
1327 		 * to use MONITOR/MWAIT.
1328 		 */
1329 		if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
1330 			cpi->cpi_mwait.support |= MWAIT_SUPPORT;
1331 			add_x86_feature(featureset, X86FSET_MWAIT);
1332 		} else {
1333 			extern int idle_cpu_assert_cflush_monitor;
1334 
1335 			/*
1336 			 * All processors we are aware of which have
1337 			 * MONITOR/MWAIT also have CLFLUSH.
1338 			 */
1339 			if (idle_cpu_assert_cflush_monitor) {
1340 				ASSERT((cp->cp_ecx & CPUID_INTC_ECX_MON) &&
1341 				    (cp->cp_edx & CPUID_INTC_EDX_CLFSH));
1342 			}
1343 		}
1344 	}
1345 #endif	/* __xpv */
1346 
1347 	if (cp->cp_ecx & CPUID_INTC_ECX_VMX) {
1348 		add_x86_feature(featureset, X86FSET_VMX);
1349 	}
1350 
1351 	if (cp->cp_ecx & CPUID_INTC_ECX_RDRAND)
1352 		add_x86_feature(featureset, X86FSET_RDRAND);
1353 
1354 	/*
1355 	 * Only need it first time, rest of the cpus would follow suit.
1356 	 * we only capture this for the bootcpu.
1357 	 */
1358 	if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
1359 		add_x86_feature(featureset, X86FSET_CLFSH);
1360 		x86_clflush_size = (BITX(cp->cp_ebx, 15, 8) * 8);
1361 	}
1362 	if (is_x86_feature(featureset, X86FSET_PAE))
1363 		cpi->cpi_pabits = 36;
1364 
1365 	/*
1366 	 * Hyperthreading configuration is slightly tricky on Intel
1367 	 * and pure clones, and even trickier on AMD.
1368 	 *
1369 	 * (AMD chose to set the HTT bit on their CMP processors,
1370 	 * even though they're not actually hyperthreaded.  Thus it
1371 	 * takes a bit more work to figure out what's really going
1372 	 * on ... see the handling of the CMP_LGCY bit below)
1373 	 */
1374 	if (cp->cp_edx & CPUID_INTC_EDX_HTT) {
1375 		cpi->cpi_ncpu_per_chip = CPI_CPU_COUNT(cpi);
1376 		if (cpi->cpi_ncpu_per_chip > 1)
1377 			add_x86_feature(featureset, X86FSET_HTT);
1378 	} else {
1379 		cpi->cpi_ncpu_per_chip = 1;
1380 	}
1381 
1382 	/*
1383 	 * Work on the "extended" feature information, doing
1384 	 * some basic initialization for cpuid_pass2()
1385 	 */
1386 	xcpuid = 0;
1387 	switch (cpi->cpi_vendor) {
1388 	case X86_VENDOR_Intel:
1389 		if (IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf)
1390 			xcpuid++;
1391 		break;
1392 	case X86_VENDOR_AMD:
1393 		if (cpi->cpi_family > 5 ||
1394 		    (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
1395 			xcpuid++;
1396 		break;
1397 	case X86_VENDOR_Cyrix:
1398 		/*
1399 		 * Only these Cyrix CPUs are -known- to support
1400 		 * extended cpuid operations.
1401 		 */
1402 		if (x86_type == X86_TYPE_VIA_CYRIX_III ||
1403 		    x86_type == X86_TYPE_CYRIX_GXm)
1404 			xcpuid++;
1405 		break;
1406 	case X86_VENDOR_Centaur:
1407 	case X86_VENDOR_TM:
1408 	default:
1409 		xcpuid++;
1410 		break;
1411 	}
1412 
1413 	if (xcpuid) {
1414 		cp = &cpi->cpi_extd[0];
1415 		cp->cp_eax = 0x80000000;
1416 		cpi->cpi_xmaxeax = __cpuid_insn(cp);
1417 	}
1418 
1419 	if (cpi->cpi_xmaxeax & 0x80000000) {
1420 
1421 		if (cpi->cpi_xmaxeax > CPI_XMAXEAX_MAX)
1422 			cpi->cpi_xmaxeax = CPI_XMAXEAX_MAX;
1423 
1424 		switch (cpi->cpi_vendor) {
1425 		case X86_VENDOR_Intel:
1426 		case X86_VENDOR_AMD:
1427 			if (cpi->cpi_xmaxeax < 0x80000001)
1428 				break;
1429 			cp = &cpi->cpi_extd[1];
1430 			cp->cp_eax = 0x80000001;
1431 			(void) __cpuid_insn(cp);
1432 
1433 			if (cpi->cpi_vendor == X86_VENDOR_AMD &&
1434 			    cpi->cpi_family == 5 &&
1435 			    cpi->cpi_model == 6 &&
1436 			    cpi->cpi_step == 6) {
1437 				/*
1438 				 * K6 model 6 uses bit 10 to indicate SYSC
1439 				 * Later models use bit 11. Fix it here.
1440 				 */
1441 				if (cp->cp_edx & 0x400) {
1442 					cp->cp_edx &= ~0x400;
1443 					cp->cp_edx |= CPUID_AMD_EDX_SYSC;
1444 				}
1445 			}
1446 
1447 			platform_cpuid_mangle(cpi->cpi_vendor, 0x80000001, cp);
1448 
1449 			/*
1450 			 * Compute the additions to the kernel's feature word.
1451 			 */
1452 			if (cp->cp_edx & CPUID_AMD_EDX_NX) {
1453 				add_x86_feature(featureset, X86FSET_NX);
1454 			}
1455 
1456 			/*
1457 			 * Regardless whether or not we boot 64-bit,
1458 			 * we should have a way to identify whether
1459 			 * the CPU is capable of running 64-bit.
1460 			 */
1461 			if (cp->cp_edx & CPUID_AMD_EDX_LM) {
1462 				add_x86_feature(featureset, X86FSET_64);
1463 			}
1464 
1465 #if defined(__amd64)
1466 			/* 1 GB large page - enable only for 64 bit kernel */
1467 			if (cp->cp_edx & CPUID_AMD_EDX_1GPG) {
1468 				add_x86_feature(featureset, X86FSET_1GPG);
1469 			}
1470 #endif
1471 
1472 			if ((cpi->cpi_vendor == X86_VENDOR_AMD) &&
1473 			    (cpi->cpi_std[1].cp_edx & CPUID_INTC_EDX_FXSR) &&
1474 			    (cp->cp_ecx & CPUID_AMD_ECX_SSE4A)) {
1475 				add_x86_feature(featureset, X86FSET_SSE4A);
1476 			}
1477 
1478 			/*
1479 			 * If both the HTT and CMP_LGCY bits are set,
1480 			 * then we're not actually HyperThreaded.  Read
1481 			 * "AMD CPUID Specification" for more details.
1482 			 */
1483 			if (cpi->cpi_vendor == X86_VENDOR_AMD &&
1484 			    is_x86_feature(featureset, X86FSET_HTT) &&
1485 			    (cp->cp_ecx & CPUID_AMD_ECX_CMP_LGCY)) {
1486 				remove_x86_feature(featureset, X86FSET_HTT);
1487 				add_x86_feature(featureset, X86FSET_CMP);
1488 			}
1489 #if defined(__amd64)
1490 			/*
1491 			 * It's really tricky to support syscall/sysret in
1492 			 * the i386 kernel; we rely on sysenter/sysexit
1493 			 * instead.  In the amd64 kernel, things are -way-
1494 			 * better.
1495 			 */
1496 			if (cp->cp_edx & CPUID_AMD_EDX_SYSC) {
1497 				add_x86_feature(featureset, X86FSET_ASYSC);
1498 			}
1499 
1500 			/*
1501 			 * While we're thinking about system calls, note
1502 			 * that AMD processors don't support sysenter
1503 			 * in long mode at all, so don't try to program them.
1504 			 */
1505 			if (x86_vendor == X86_VENDOR_AMD) {
1506 				remove_x86_feature(featureset, X86FSET_SEP);
1507 			}
1508 #endif
1509 			if (cp->cp_edx & CPUID_AMD_EDX_TSCP) {
1510 				add_x86_feature(featureset, X86FSET_TSCP);
1511 			}
1512 
1513 			if (cp->cp_ecx & CPUID_AMD_ECX_SVM) {
1514 				add_x86_feature(featureset, X86FSET_SVM);
1515 			}
1516 
1517 			if (cp->cp_ecx & CPUID_AMD_ECX_TOPOEXT) {
1518 				add_x86_feature(featureset, X86FSET_TOPOEXT);
1519 			}
1520 			break;
1521 		default:
1522 			break;
1523 		}
1524 
1525 		/*
1526 		 * Get CPUID data about processor cores and hyperthreads.
1527 		 */
1528 		switch (cpi->cpi_vendor) {
1529 		case X86_VENDOR_Intel:
1530 			if (cpi->cpi_maxeax >= 4) {
1531 				cp = &cpi->cpi_std[4];
1532 				cp->cp_eax = 4;
1533 				cp->cp_ecx = 0;
1534 				(void) __cpuid_insn(cp);
1535 				platform_cpuid_mangle(cpi->cpi_vendor, 4, cp);
1536 			}
1537 			/*FALLTHROUGH*/
1538 		case X86_VENDOR_AMD:
1539 			if (cpi->cpi_xmaxeax < 0x80000008)
1540 				break;
1541 			cp = &cpi->cpi_extd[8];
1542 			cp->cp_eax = 0x80000008;
1543 			(void) __cpuid_insn(cp);
1544 			platform_cpuid_mangle(cpi->cpi_vendor, 0x80000008, cp);
1545 
1546 			/*
1547 			 * Virtual and physical address limits from
1548 			 * cpuid override previously guessed values.
1549 			 */
1550 			cpi->cpi_pabits = BITX(cp->cp_eax, 7, 0);
1551 			cpi->cpi_vabits = BITX(cp->cp_eax, 15, 8);
1552 			break;
1553 		default:
1554 			break;
1555 		}
1556 
1557 		/*
1558 		 * Derive the number of cores per chip
1559 		 */
1560 		switch (cpi->cpi_vendor) {
1561 		case X86_VENDOR_Intel:
1562 			if (cpi->cpi_maxeax < 4) {
1563 				cpi->cpi_ncore_per_chip = 1;
1564 				break;
1565 			} else {
1566 				cpi->cpi_ncore_per_chip =
1567 				    BITX((cpi)->cpi_std[4].cp_eax, 31, 26) + 1;
1568 			}
1569 			break;
1570 		case X86_VENDOR_AMD:
1571 			if (cpi->cpi_xmaxeax < 0x80000008) {
1572 				cpi->cpi_ncore_per_chip = 1;
1573 				break;
1574 			} else {
1575 				/*
1576 				 * On family 0xf cpuid fn 2 ECX[7:0] "NC" is
1577 				 * 1 less than the number of physical cores on
1578 				 * the chip.  In family 0x10 this value can
1579 				 * be affected by "downcoring" - it reflects
1580 				 * 1 less than the number of cores actually
1581 				 * enabled on this node.
1582 				 */
1583 				cpi->cpi_ncore_per_chip =
1584 				    BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1;
1585 			}
1586 			break;
1587 		default:
1588 			cpi->cpi_ncore_per_chip = 1;
1589 			break;
1590 		}
1591 
1592 		/*
1593 		 * Get CPUID data about TSC Invariance in Deep C-State.
1594 		 */
1595 		switch (cpi->cpi_vendor) {
1596 		case X86_VENDOR_Intel:
1597 			if (cpi->cpi_maxeax >= 7) {
1598 				cp = &cpi->cpi_extd[7];
1599 				cp->cp_eax = 0x80000007;
1600 				cp->cp_ecx = 0;
1601 				(void) __cpuid_insn(cp);
1602 			}
1603 			break;
1604 		default:
1605 			break;
1606 		}
1607 	} else {
1608 		cpi->cpi_ncore_per_chip = 1;
1609 	}
1610 
1611 	/*
1612 	 * If more than one core, then this processor is CMP.
1613 	 */
1614 	if (cpi->cpi_ncore_per_chip > 1) {
1615 		add_x86_feature(featureset, X86FSET_CMP);
1616 	}
1617 
1618 	/*
1619 	 * If the number of cores is the same as the number
1620 	 * of CPUs, then we cannot have HyperThreading.
1621 	 */
1622 	if (cpi->cpi_ncpu_per_chip == cpi->cpi_ncore_per_chip) {
1623 		remove_x86_feature(featureset, X86FSET_HTT);
1624 	}
1625 
1626 	cpi->cpi_apicid = CPI_APIC_ID(cpi);
1627 	cpi->cpi_procnodes_per_pkg = 1;
1628 	cpi->cpi_cores_per_compunit = 1;
1629 	if (is_x86_feature(featureset, X86FSET_HTT) == B_FALSE &&
1630 	    is_x86_feature(featureset, X86FSET_CMP) == B_FALSE) {
1631 		/*
1632 		 * Single-core single-threaded processors.
1633 		 */
1634 		cpi->cpi_chipid = -1;
1635 		cpi->cpi_clogid = 0;
1636 		cpi->cpi_coreid = cpu->cpu_id;
1637 		cpi->cpi_pkgcoreid = 0;
1638 		if (cpi->cpi_vendor == X86_VENDOR_AMD)
1639 			cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 3, 0);
1640 		else
1641 			cpi->cpi_procnodeid = cpi->cpi_chipid;
1642 	} else if (cpi->cpi_ncpu_per_chip > 1) {
1643 		if (cpi->cpi_vendor == X86_VENDOR_Intel)
1644 			cpuid_intel_getids(cpu, featureset);
1645 		else if (cpi->cpi_vendor == X86_VENDOR_AMD)
1646 			cpuid_amd_getids(cpu);
1647 		else {
1648 			/*
1649 			 * All other processors are currently
1650 			 * assumed to have single cores.
1651 			 */
1652 			cpi->cpi_coreid = cpi->cpi_chipid;
1653 			cpi->cpi_pkgcoreid = 0;
1654 			cpi->cpi_procnodeid = cpi->cpi_chipid;
1655 			cpi->cpi_compunitid = cpi->cpi_chipid;
1656 		}
1657 	}
1658 
1659 	/*
1660 	 * Synthesize chip "revision" and socket type
1661 	 */
1662 	cpi->cpi_chiprev = _cpuid_chiprev(cpi->cpi_vendor, cpi->cpi_family,
1663 	    cpi->cpi_model, cpi->cpi_step);
1664 	cpi->cpi_chiprevstr = _cpuid_chiprevstr(cpi->cpi_vendor,
1665 	    cpi->cpi_family, cpi->cpi_model, cpi->cpi_step);
1666 	cpi->cpi_socket = _cpuid_skt(cpi->cpi_vendor, cpi->cpi_family,
1667 	    cpi->cpi_model, cpi->cpi_step);
1668 
1669 pass1_done:
1670 	cpi->cpi_pass = 1;
1671 }
1672 
1673 /*
1674  * Make copies of the cpuid table entries we depend on, in
1675  * part for ease of parsing now, in part so that we have only
1676  * one place to correct any of it, in part for ease of
1677  * later export to userland, and in part so we can look at
1678  * this stuff in a crash dump.
1679  */
1680 
1681 /*ARGSUSED*/
1682 void
1683 cpuid_pass2(cpu_t *cpu)
1684 {
1685 	uint_t n, nmax;
1686 	int i;
1687 	struct cpuid_regs *cp;
1688 	uint8_t *dp;
1689 	uint32_t *iptr;
1690 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
1691 
1692 	ASSERT(cpi->cpi_pass == 1);
1693 
1694 	if (cpi->cpi_maxeax < 1)
1695 		goto pass2_done;
1696 
1697 	if ((nmax = cpi->cpi_maxeax + 1) > NMAX_CPI_STD)
1698 		nmax = NMAX_CPI_STD;
1699 	/*
1700 	 * (We already handled n == 0 and n == 1 in pass 1)
1701 	 */
1702 	for (n = 2, cp = &cpi->cpi_std[2]; n < nmax; n++, cp++) {
1703 		cp->cp_eax = n;
1704 
1705 		/*
1706 		 * CPUID function 4 expects %ecx to be initialized
1707 		 * with an index which indicates which cache to return
1708 		 * information about. The OS is expected to call function 4
1709 		 * with %ecx set to 0, 1, 2, ... until it returns with
1710 		 * EAX[4:0] set to 0, which indicates there are no more
1711 		 * caches.
1712 		 *
1713 		 * Here, populate cpi_std[4] with the information returned by
1714 		 * function 4 when %ecx == 0, and do the rest in cpuid_pass3()
1715 		 * when dynamic memory allocation becomes available.
1716 		 *
1717 		 * Note: we need to explicitly initialize %ecx here, since
1718 		 * function 4 may have been previously invoked.
1719 		 */
1720 		if (n == 4)
1721 			cp->cp_ecx = 0;
1722 
1723 		(void) __cpuid_insn(cp);
1724 		platform_cpuid_mangle(cpi->cpi_vendor, n, cp);
1725 		switch (n) {
1726 		case 2:
1727 			/*
1728 			 * "the lower 8 bits of the %eax register
1729 			 * contain a value that identifies the number
1730 			 * of times the cpuid [instruction] has to be
1731 			 * executed to obtain a complete image of the
1732 			 * processor's caching systems."
1733 			 *
1734 			 * How *do* they make this stuff up?
1735 			 */
1736 			cpi->cpi_ncache = sizeof (*cp) *
1737 			    BITX(cp->cp_eax, 7, 0);
1738 			if (cpi->cpi_ncache == 0)
1739 				break;
1740 			cpi->cpi_ncache--;	/* skip count byte */
1741 
1742 			/*
1743 			 * Well, for now, rather than attempt to implement
1744 			 * this slightly dubious algorithm, we just look
1745 			 * at the first 15 ..
1746 			 */
1747 			if (cpi->cpi_ncache > (sizeof (*cp) - 1))
1748 				cpi->cpi_ncache = sizeof (*cp) - 1;
1749 
1750 			dp = cpi->cpi_cacheinfo;
1751 			if (BITX(cp->cp_eax, 31, 31) == 0) {
1752 				uint8_t *p = (void *)&cp->cp_eax;
1753 				for (i = 1; i < 4; i++)
1754 					if (p[i] != 0)
1755 						*dp++ = p[i];
1756 			}
1757 			if (BITX(cp->cp_ebx, 31, 31) == 0) {
1758 				uint8_t *p = (void *)&cp->cp_ebx;
1759 				for (i = 0; i < 4; i++)
1760 					if (p[i] != 0)
1761 						*dp++ = p[i];
1762 			}
1763 			if (BITX(cp->cp_ecx, 31, 31) == 0) {
1764 				uint8_t *p = (void *)&cp->cp_ecx;
1765 				for (i = 0; i < 4; i++)
1766 					if (p[i] != 0)
1767 						*dp++ = p[i];
1768 			}
1769 			if (BITX(cp->cp_edx, 31, 31) == 0) {
1770 				uint8_t *p = (void *)&cp->cp_edx;
1771 				for (i = 0; i < 4; i++)
1772 					if (p[i] != 0)
1773 						*dp++ = p[i];
1774 			}
1775 			break;
1776 
1777 		case 3:	/* Processor serial number, if PSN supported */
1778 			break;
1779 
1780 		case 4:	/* Deterministic cache parameters */
1781 			break;
1782 
1783 		case 5:	/* Monitor/Mwait parameters */
1784 		{
1785 			size_t mwait_size;
1786 
1787 			/*
1788 			 * check cpi_mwait.support which was set in cpuid_pass1
1789 			 */
1790 			if (!(cpi->cpi_mwait.support & MWAIT_SUPPORT))
1791 				break;
1792 
1793 			/*
1794 			 * Protect ourself from insane mwait line size.
1795 			 * Workaround for incomplete hardware emulator(s).
1796 			 */
1797 			mwait_size = (size_t)MWAIT_SIZE_MAX(cpi);
1798 			if (mwait_size < sizeof (uint32_t) ||
1799 			    !ISP2(mwait_size)) {
1800 #if DEBUG
1801 				cmn_err(CE_NOTE, "Cannot handle cpu %d mwait "
1802 				    "size %ld", cpu->cpu_id, (long)mwait_size);
1803 #endif
1804 				break;
1805 			}
1806 
1807 			cpi->cpi_mwait.mon_min = (size_t)MWAIT_SIZE_MIN(cpi);
1808 			cpi->cpi_mwait.mon_max = mwait_size;
1809 			if (MWAIT_EXTENSION(cpi)) {
1810 				cpi->cpi_mwait.support |= MWAIT_EXTENSIONS;
1811 				if (MWAIT_INT_ENABLE(cpi))
1812 					cpi->cpi_mwait.support |=
1813 					    MWAIT_ECX_INT_ENABLE;
1814 			}
1815 			break;
1816 		}
1817 		default:
1818 			break;
1819 		}
1820 	}
1821 
1822 	if (cpi->cpi_maxeax >= 0xB && cpi->cpi_vendor == X86_VENDOR_Intel) {
1823 		struct cpuid_regs regs;
1824 
1825 		cp = &regs;
1826 		cp->cp_eax = 0xB;
1827 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
1828 
1829 		(void) __cpuid_insn(cp);
1830 
1831 		/*
1832 		 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
1833 		 * indicates that the extended topology enumeration leaf is
1834 		 * available.
1835 		 */
1836 		if (cp->cp_ebx) {
1837 			uint32_t x2apic_id;
1838 			uint_t coreid_shift = 0;
1839 			uint_t ncpu_per_core = 1;
1840 			uint_t chipid_shift = 0;
1841 			uint_t ncpu_per_chip = 1;
1842 			uint_t i;
1843 			uint_t level;
1844 
1845 			for (i = 0; i < CPI_FNB_ECX_MAX; i++) {
1846 				cp->cp_eax = 0xB;
1847 				cp->cp_ecx = i;
1848 
1849 				(void) __cpuid_insn(cp);
1850 				level = CPI_CPU_LEVEL_TYPE(cp);
1851 
1852 				if (level == 1) {
1853 					x2apic_id = cp->cp_edx;
1854 					coreid_shift = BITX(cp->cp_eax, 4, 0);
1855 					ncpu_per_core = BITX(cp->cp_ebx, 15, 0);
1856 				} else if (level == 2) {
1857 					x2apic_id = cp->cp_edx;
1858 					chipid_shift = BITX(cp->cp_eax, 4, 0);
1859 					ncpu_per_chip = BITX(cp->cp_ebx, 15, 0);
1860 				}
1861 			}
1862 
1863 			cpi->cpi_apicid = x2apic_id;
1864 			cpi->cpi_ncpu_per_chip = ncpu_per_chip;
1865 			cpi->cpi_ncore_per_chip = ncpu_per_chip /
1866 			    ncpu_per_core;
1867 			cpi->cpi_chipid = x2apic_id >> chipid_shift;
1868 			cpi->cpi_clogid = x2apic_id & ((1 << chipid_shift) - 1);
1869 			cpi->cpi_coreid = x2apic_id >> coreid_shift;
1870 			cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
1871 		}
1872 
1873 		/* Make cp NULL so that we don't stumble on others */
1874 		cp = NULL;
1875 	}
1876 
1877 	/*
1878 	 * XSAVE enumeration
1879 	 */
1880 	if (cpi->cpi_maxeax >= 0xD) {
1881 		struct cpuid_regs regs;
1882 		boolean_t cpuid_d_valid = B_TRUE;
1883 
1884 		cp = &regs;
1885 		cp->cp_eax = 0xD;
1886 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
1887 
1888 		(void) __cpuid_insn(cp);
1889 
1890 		/*
1891 		 * Sanity checks for debug
1892 		 */
1893 		if ((cp->cp_eax & XFEATURE_LEGACY_FP) == 0 ||
1894 		    (cp->cp_eax & XFEATURE_SSE) == 0) {
1895 			cpuid_d_valid = B_FALSE;
1896 		}
1897 
1898 		cpi->cpi_xsave.xsav_hw_features_low = cp->cp_eax;
1899 		cpi->cpi_xsave.xsav_hw_features_high = cp->cp_edx;
1900 		cpi->cpi_xsave.xsav_max_size = cp->cp_ecx;
1901 
1902 		/*
1903 		 * If the hw supports AVX, get the size and offset in the save
1904 		 * area for the ymm state.
1905 		 */
1906 		if (cpi->cpi_xsave.xsav_hw_features_low & XFEATURE_AVX) {
1907 			cp->cp_eax = 0xD;
1908 			cp->cp_ecx = 2;
1909 			cp->cp_edx = cp->cp_ebx = 0;
1910 
1911 			(void) __cpuid_insn(cp);
1912 
1913 			if (cp->cp_ebx != CPUID_LEAFD_2_YMM_OFFSET ||
1914 			    cp->cp_eax != CPUID_LEAFD_2_YMM_SIZE) {
1915 				cpuid_d_valid = B_FALSE;
1916 			}
1917 
1918 			cpi->cpi_xsave.ymm_size = cp->cp_eax;
1919 			cpi->cpi_xsave.ymm_offset = cp->cp_ebx;
1920 		}
1921 
1922 		if (is_x86_feature(x86_featureset, X86FSET_XSAVE)) {
1923 			xsave_state_size = 0;
1924 		} else if (cpuid_d_valid) {
1925 			xsave_state_size = cpi->cpi_xsave.xsav_max_size;
1926 		} else {
1927 			/* Broken CPUID 0xD, probably in HVM */
1928 			cmn_err(CE_WARN, "cpu%d: CPUID.0xD returns invalid "
1929 			    "value: hw_low = %d, hw_high = %d, xsave_size = %d"
1930 			    ", ymm_size = %d, ymm_offset = %d\n",
1931 			    cpu->cpu_id, cpi->cpi_xsave.xsav_hw_features_low,
1932 			    cpi->cpi_xsave.xsav_hw_features_high,
1933 			    (int)cpi->cpi_xsave.xsav_max_size,
1934 			    (int)cpi->cpi_xsave.ymm_size,
1935 			    (int)cpi->cpi_xsave.ymm_offset);
1936 
1937 			if (xsave_state_size != 0) {
1938 				/*
1939 				 * This must be a non-boot CPU. We cannot
1940 				 * continue, because boot cpu has already
1941 				 * enabled XSAVE.
1942 				 */
1943 				ASSERT(cpu->cpu_id != 0);
1944 				cmn_err(CE_PANIC, "cpu%d: we have already "
1945 				    "enabled XSAVE on boot cpu, cannot "
1946 				    "continue.", cpu->cpu_id);
1947 			} else {
1948 				/*
1949 				 * Must be from boot CPU, OK to disable XSAVE.
1950 				 */
1951 				ASSERT(cpu->cpu_id == 0);
1952 				remove_x86_feature(x86_featureset,
1953 				    X86FSET_XSAVE);
1954 				remove_x86_feature(x86_featureset, X86FSET_AVX);
1955 				CPI_FEATURES_ECX(cpi) &= ~CPUID_INTC_ECX_XSAVE;
1956 				CPI_FEATURES_ECX(cpi) &= ~CPUID_INTC_ECX_AVX;
1957 				CPI_FEATURES_ECX(cpi) &= ~CPUID_INTC_ECX_F16C;
1958 				xsave_force_disable = B_TRUE;
1959 			}
1960 		}
1961 	}
1962 
1963 
1964 	if ((cpi->cpi_xmaxeax & 0x80000000) == 0)
1965 		goto pass2_done;
1966 
1967 	if ((nmax = cpi->cpi_xmaxeax - 0x80000000 + 1) > NMAX_CPI_EXTD)
1968 		nmax = NMAX_CPI_EXTD;
1969 	/*
1970 	 * Copy the extended properties, fixing them as we go.
1971 	 * (We already handled n == 0 and n == 1 in pass 1)
1972 	 */
1973 	iptr = (void *)cpi->cpi_brandstr;
1974 	for (n = 2, cp = &cpi->cpi_extd[2]; n < nmax; cp++, n++) {
1975 		cp->cp_eax = 0x80000000 + n;
1976 		(void) __cpuid_insn(cp);
1977 		platform_cpuid_mangle(cpi->cpi_vendor, 0x80000000 + n, cp);
1978 		switch (n) {
1979 		case 2:
1980 		case 3:
1981 		case 4:
1982 			/*
1983 			 * Extract the brand string
1984 			 */
1985 			*iptr++ = cp->cp_eax;
1986 			*iptr++ = cp->cp_ebx;
1987 			*iptr++ = cp->cp_ecx;
1988 			*iptr++ = cp->cp_edx;
1989 			break;
1990 		case 5:
1991 			switch (cpi->cpi_vendor) {
1992 			case X86_VENDOR_AMD:
1993 				/*
1994 				 * The Athlon and Duron were the first
1995 				 * parts to report the sizes of the
1996 				 * TLB for large pages. Before then,
1997 				 * we don't trust the data.
1998 				 */
1999 				if (cpi->cpi_family < 6 ||
2000 				    (cpi->cpi_family == 6 &&
2001 				    cpi->cpi_model < 1))
2002 					cp->cp_eax = 0;
2003 				break;
2004 			default:
2005 				break;
2006 			}
2007 			break;
2008 		case 6:
2009 			switch (cpi->cpi_vendor) {
2010 			case X86_VENDOR_AMD:
2011 				/*
2012 				 * The Athlon and Duron were the first
2013 				 * AMD parts with L2 TLB's.
2014 				 * Before then, don't trust the data.
2015 				 */
2016 				if (cpi->cpi_family < 6 ||
2017 				    cpi->cpi_family == 6 &&
2018 				    cpi->cpi_model < 1)
2019 					cp->cp_eax = cp->cp_ebx = 0;
2020 				/*
2021 				 * AMD Duron rev A0 reports L2
2022 				 * cache size incorrectly as 1K
2023 				 * when it is really 64K
2024 				 */
2025 				if (cpi->cpi_family == 6 &&
2026 				    cpi->cpi_model == 3 &&
2027 				    cpi->cpi_step == 0) {
2028 					cp->cp_ecx &= 0xffff;
2029 					cp->cp_ecx |= 0x400000;
2030 				}
2031 				break;
2032 			case X86_VENDOR_Cyrix:	/* VIA C3 */
2033 				/*
2034 				 * VIA C3 processors are a bit messed
2035 				 * up w.r.t. encoding cache sizes in %ecx
2036 				 */
2037 				if (cpi->cpi_family != 6)
2038 					break;
2039 				/*
2040 				 * model 7 and 8 were incorrectly encoded
2041 				 *
2042 				 * xxx is model 8 really broken?
2043 				 */
2044 				if (cpi->cpi_model == 7 ||
2045 				    cpi->cpi_model == 8)
2046 					cp->cp_ecx =
2047 					    BITX(cp->cp_ecx, 31, 24) << 16 |
2048 					    BITX(cp->cp_ecx, 23, 16) << 12 |
2049 					    BITX(cp->cp_ecx, 15, 8) << 8 |
2050 					    BITX(cp->cp_ecx, 7, 0);
2051 				/*
2052 				 * model 9 stepping 1 has wrong associativity
2053 				 */
2054 				if (cpi->cpi_model == 9 && cpi->cpi_step == 1)
2055 					cp->cp_ecx |= 8 << 12;
2056 				break;
2057 			case X86_VENDOR_Intel:
2058 				/*
2059 				 * Extended L2 Cache features function.
2060 				 * First appeared on Prescott.
2061 				 */
2062 			default:
2063 				break;
2064 			}
2065 			break;
2066 		default:
2067 			break;
2068 		}
2069 	}
2070 
2071 pass2_done:
2072 	cpi->cpi_pass = 2;
2073 }
2074 
2075 static const char *
2076 intel_cpubrand(const struct cpuid_info *cpi)
2077 {
2078 	int i;
2079 
2080 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
2081 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
2082 		return ("i486");
2083 
2084 	switch (cpi->cpi_family) {
2085 	case 5:
2086 		return ("Intel Pentium(r)");
2087 	case 6:
2088 		switch (cpi->cpi_model) {
2089 			uint_t celeron, xeon;
2090 			const struct cpuid_regs *cp;
2091 		case 0:
2092 		case 1:
2093 		case 2:
2094 			return ("Intel Pentium(r) Pro");
2095 		case 3:
2096 		case 4:
2097 			return ("Intel Pentium(r) II");
2098 		case 6:
2099 			return ("Intel Celeron(r)");
2100 		case 5:
2101 		case 7:
2102 			celeron = xeon = 0;
2103 			cp = &cpi->cpi_std[2];	/* cache info */
2104 
2105 			for (i = 1; i < 4; i++) {
2106 				uint_t tmp;
2107 
2108 				tmp = (cp->cp_eax >> (8 * i)) & 0xff;
2109 				if (tmp == 0x40)
2110 					celeron++;
2111 				if (tmp >= 0x44 && tmp <= 0x45)
2112 					xeon++;
2113 			}
2114 
2115 			for (i = 0; i < 2; i++) {
2116 				uint_t tmp;
2117 
2118 				tmp = (cp->cp_ebx >> (8 * i)) & 0xff;
2119 				if (tmp == 0x40)
2120 					celeron++;
2121 				else if (tmp >= 0x44 && tmp <= 0x45)
2122 					xeon++;
2123 			}
2124 
2125 			for (i = 0; i < 4; i++) {
2126 				uint_t tmp;
2127 
2128 				tmp = (cp->cp_ecx >> (8 * i)) & 0xff;
2129 				if (tmp == 0x40)
2130 					celeron++;
2131 				else if (tmp >= 0x44 && tmp <= 0x45)
2132 					xeon++;
2133 			}
2134 
2135 			for (i = 0; i < 4; i++) {
2136 				uint_t tmp;
2137 
2138 				tmp = (cp->cp_edx >> (8 * i)) & 0xff;
2139 				if (tmp == 0x40)
2140 					celeron++;
2141 				else if (tmp >= 0x44 && tmp <= 0x45)
2142 					xeon++;
2143 			}
2144 
2145 			if (celeron)
2146 				return ("Intel Celeron(r)");
2147 			if (xeon)
2148 				return (cpi->cpi_model == 5 ?
2149 				    "Intel Pentium(r) II Xeon(tm)" :
2150 				    "Intel Pentium(r) III Xeon(tm)");
2151 			return (cpi->cpi_model == 5 ?
2152 			    "Intel Pentium(r) II or Pentium(r) II Xeon(tm)" :
2153 			    "Intel Pentium(r) III or Pentium(r) III Xeon(tm)");
2154 		default:
2155 			break;
2156 		}
2157 	default:
2158 		break;
2159 	}
2160 
2161 	/* BrandID is present if the field is nonzero */
2162 	if (cpi->cpi_brandid != 0) {
2163 		static const struct {
2164 			uint_t bt_bid;
2165 			const char *bt_str;
2166 		} brand_tbl[] = {
2167 			{ 0x1,	"Intel(r) Celeron(r)" },
2168 			{ 0x2,	"Intel(r) Pentium(r) III" },
2169 			{ 0x3,	"Intel(r) Pentium(r) III Xeon(tm)" },
2170 			{ 0x4,	"Intel(r) Pentium(r) III" },
2171 			{ 0x6,	"Mobile Intel(r) Pentium(r) III" },
2172 			{ 0x7,	"Mobile Intel(r) Celeron(r)" },
2173 			{ 0x8,	"Intel(r) Pentium(r) 4" },
2174 			{ 0x9,	"Intel(r) Pentium(r) 4" },
2175 			{ 0xa,	"Intel(r) Celeron(r)" },
2176 			{ 0xb,	"Intel(r) Xeon(tm)" },
2177 			{ 0xc,	"Intel(r) Xeon(tm) MP" },
2178 			{ 0xe,	"Mobile Intel(r) Pentium(r) 4" },
2179 			{ 0xf,	"Mobile Intel(r) Celeron(r)" },
2180 			{ 0x11, "Mobile Genuine Intel(r)" },
2181 			{ 0x12, "Intel(r) Celeron(r) M" },
2182 			{ 0x13, "Mobile Intel(r) Celeron(r)" },
2183 			{ 0x14, "Intel(r) Celeron(r)" },
2184 			{ 0x15, "Mobile Genuine Intel(r)" },
2185 			{ 0x16,	"Intel(r) Pentium(r) M" },
2186 			{ 0x17, "Mobile Intel(r) Celeron(r)" }
2187 		};
2188 		uint_t btblmax = sizeof (brand_tbl) / sizeof (brand_tbl[0]);
2189 		uint_t sgn;
2190 
2191 		sgn = (cpi->cpi_family << 8) |
2192 		    (cpi->cpi_model << 4) | cpi->cpi_step;
2193 
2194 		for (i = 0; i < btblmax; i++)
2195 			if (brand_tbl[i].bt_bid == cpi->cpi_brandid)
2196 				break;
2197 		if (i < btblmax) {
2198 			if (sgn == 0x6b1 && cpi->cpi_brandid == 3)
2199 				return ("Intel(r) Celeron(r)");
2200 			if (sgn < 0xf13 && cpi->cpi_brandid == 0xb)
2201 				return ("Intel(r) Xeon(tm) MP");
2202 			if (sgn < 0xf13 && cpi->cpi_brandid == 0xe)
2203 				return ("Intel(r) Xeon(tm)");
2204 			return (brand_tbl[i].bt_str);
2205 		}
2206 	}
2207 
2208 	return (NULL);
2209 }
2210 
2211 static const char *
2212 amd_cpubrand(const struct cpuid_info *cpi)
2213 {
2214 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
2215 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
2216 		return ("i486 compatible");
2217 
2218 	switch (cpi->cpi_family) {
2219 	case 5:
2220 		switch (cpi->cpi_model) {
2221 		case 0:
2222 		case 1:
2223 		case 2:
2224 		case 3:
2225 		case 4:
2226 		case 5:
2227 			return ("AMD-K5(r)");
2228 		case 6:
2229 		case 7:
2230 			return ("AMD-K6(r)");
2231 		case 8:
2232 			return ("AMD-K6(r)-2");
2233 		case 9:
2234 			return ("AMD-K6(r)-III");
2235 		default:
2236 			return ("AMD (family 5)");
2237 		}
2238 	case 6:
2239 		switch (cpi->cpi_model) {
2240 		case 1:
2241 			return ("AMD-K7(tm)");
2242 		case 0:
2243 		case 2:
2244 		case 4:
2245 			return ("AMD Athlon(tm)");
2246 		case 3:
2247 		case 7:
2248 			return ("AMD Duron(tm)");
2249 		case 6:
2250 		case 8:
2251 		case 10:
2252 			/*
2253 			 * Use the L2 cache size to distinguish
2254 			 */
2255 			return ((cpi->cpi_extd[6].cp_ecx >> 16) >= 256 ?
2256 			    "AMD Athlon(tm)" : "AMD Duron(tm)");
2257 		default:
2258 			return ("AMD (family 6)");
2259 		}
2260 	default:
2261 		break;
2262 	}
2263 
2264 	if (cpi->cpi_family == 0xf && cpi->cpi_model == 5 &&
2265 	    cpi->cpi_brandid != 0) {
2266 		switch (BITX(cpi->cpi_brandid, 7, 5)) {
2267 		case 3:
2268 			return ("AMD Opteron(tm) UP 1xx");
2269 		case 4:
2270 			return ("AMD Opteron(tm) DP 2xx");
2271 		case 5:
2272 			return ("AMD Opteron(tm) MP 8xx");
2273 		default:
2274 			return ("AMD Opteron(tm)");
2275 		}
2276 	}
2277 
2278 	return (NULL);
2279 }
2280 
2281 static const char *
2282 cyrix_cpubrand(struct cpuid_info *cpi, uint_t type)
2283 {
2284 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
2285 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5 ||
2286 	    type == X86_TYPE_CYRIX_486)
2287 		return ("i486 compatible");
2288 
2289 	switch (type) {
2290 	case X86_TYPE_CYRIX_6x86:
2291 		return ("Cyrix 6x86");
2292 	case X86_TYPE_CYRIX_6x86L:
2293 		return ("Cyrix 6x86L");
2294 	case X86_TYPE_CYRIX_6x86MX:
2295 		return ("Cyrix 6x86MX");
2296 	case X86_TYPE_CYRIX_GXm:
2297 		return ("Cyrix GXm");
2298 	case X86_TYPE_CYRIX_MediaGX:
2299 		return ("Cyrix MediaGX");
2300 	case X86_TYPE_CYRIX_MII:
2301 		return ("Cyrix M2");
2302 	case X86_TYPE_VIA_CYRIX_III:
2303 		return ("VIA Cyrix M3");
2304 	default:
2305 		/*
2306 		 * Have another wild guess ..
2307 		 */
2308 		if (cpi->cpi_family == 4 && cpi->cpi_model == 9)
2309 			return ("Cyrix 5x86");
2310 		else if (cpi->cpi_family == 5) {
2311 			switch (cpi->cpi_model) {
2312 			case 2:
2313 				return ("Cyrix 6x86");	/* Cyrix M1 */
2314 			case 4:
2315 				return ("Cyrix MediaGX");
2316 			default:
2317 				break;
2318 			}
2319 		} else if (cpi->cpi_family == 6) {
2320 			switch (cpi->cpi_model) {
2321 			case 0:
2322 				return ("Cyrix 6x86MX"); /* Cyrix M2? */
2323 			case 5:
2324 			case 6:
2325 			case 7:
2326 			case 8:
2327 			case 9:
2328 				return ("VIA C3");
2329 			default:
2330 				break;
2331 			}
2332 		}
2333 		break;
2334 	}
2335 	return (NULL);
2336 }
2337 
2338 /*
2339  * This only gets called in the case that the CPU extended
2340  * feature brand string (0x80000002, 0x80000003, 0x80000004)
2341  * aren't available, or contain null bytes for some reason.
2342  */
2343 static void
2344 fabricate_brandstr(struct cpuid_info *cpi)
2345 {
2346 	const char *brand = NULL;
2347 
2348 	switch (cpi->cpi_vendor) {
2349 	case X86_VENDOR_Intel:
2350 		brand = intel_cpubrand(cpi);
2351 		break;
2352 	case X86_VENDOR_AMD:
2353 		brand = amd_cpubrand(cpi);
2354 		break;
2355 	case X86_VENDOR_Cyrix:
2356 		brand = cyrix_cpubrand(cpi, x86_type);
2357 		break;
2358 	case X86_VENDOR_NexGen:
2359 		if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
2360 			brand = "NexGen Nx586";
2361 		break;
2362 	case X86_VENDOR_Centaur:
2363 		if (cpi->cpi_family == 5)
2364 			switch (cpi->cpi_model) {
2365 			case 4:
2366 				brand = "Centaur C6";
2367 				break;
2368 			case 8:
2369 				brand = "Centaur C2";
2370 				break;
2371 			case 9:
2372 				brand = "Centaur C3";
2373 				break;
2374 			default:
2375 				break;
2376 			}
2377 		break;
2378 	case X86_VENDOR_Rise:
2379 		if (cpi->cpi_family == 5 &&
2380 		    (cpi->cpi_model == 0 || cpi->cpi_model == 2))
2381 			brand = "Rise mP6";
2382 		break;
2383 	case X86_VENDOR_SiS:
2384 		if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
2385 			brand = "SiS 55x";
2386 		break;
2387 	case X86_VENDOR_TM:
2388 		if (cpi->cpi_family == 5 && cpi->cpi_model == 4)
2389 			brand = "Transmeta Crusoe TM3x00 or TM5x00";
2390 		break;
2391 	case X86_VENDOR_NSC:
2392 	case X86_VENDOR_UMC:
2393 	default:
2394 		break;
2395 	}
2396 	if (brand) {
2397 		(void) strcpy((char *)cpi->cpi_brandstr, brand);
2398 		return;
2399 	}
2400 
2401 	/*
2402 	 * If all else fails ...
2403 	 */
2404 	(void) snprintf(cpi->cpi_brandstr, sizeof (cpi->cpi_brandstr),
2405 	    "%s %d.%d.%d", cpi->cpi_vendorstr, cpi->cpi_family,
2406 	    cpi->cpi_model, cpi->cpi_step);
2407 }
2408 
2409 /*
2410  * This routine is called just after kernel memory allocation
2411  * becomes available on cpu0, and as part of mp_startup() on
2412  * the other cpus.
2413  *
2414  * Fixup the brand string, and collect any information from cpuid
2415  * that requires dynamically allocated storage to represent.
2416  */
2417 /*ARGSUSED*/
2418 void
2419 cpuid_pass3(cpu_t *cpu)
2420 {
2421 	int	i, max, shft, level, size;
2422 	struct cpuid_regs regs;
2423 	struct cpuid_regs *cp;
2424 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2425 
2426 	ASSERT(cpi->cpi_pass == 2);
2427 
2428 	/*
2429 	 * Function 4: Deterministic cache parameters
2430 	 *
2431 	 * Take this opportunity to detect the number of threads
2432 	 * sharing the last level cache, and construct a corresponding
2433 	 * cache id. The respective cpuid_info members are initialized
2434 	 * to the default case of "no last level cache sharing".
2435 	 */
2436 	cpi->cpi_ncpu_shr_last_cache = 1;
2437 	cpi->cpi_last_lvl_cacheid = cpu->cpu_id;
2438 
2439 	if (cpi->cpi_maxeax >= 4 && cpi->cpi_vendor == X86_VENDOR_Intel) {
2440 
2441 		/*
2442 		 * Find the # of elements (size) returned by fn 4, and along
2443 		 * the way detect last level cache sharing details.
2444 		 */
2445 		bzero(&regs, sizeof (regs));
2446 		cp = &regs;
2447 		for (i = 0, max = 0; i < CPI_FN4_ECX_MAX; i++) {
2448 			cp->cp_eax = 4;
2449 			cp->cp_ecx = i;
2450 
2451 			(void) __cpuid_insn(cp);
2452 
2453 			if (CPI_CACHE_TYPE(cp) == 0)
2454 				break;
2455 			level = CPI_CACHE_LVL(cp);
2456 			if (level > max) {
2457 				max = level;
2458 				cpi->cpi_ncpu_shr_last_cache =
2459 				    CPI_NTHR_SHR_CACHE(cp) + 1;
2460 			}
2461 		}
2462 		cpi->cpi_std_4_size = size = i;
2463 
2464 		/*
2465 		 * Allocate the cpi_std_4 array. The first element
2466 		 * references the regs for fn 4, %ecx == 0, which
2467 		 * cpuid_pass2() stashed in cpi->cpi_std[4].
2468 		 */
2469 		if (size > 0) {
2470 			cpi->cpi_std_4 =
2471 			    kmem_alloc(size * sizeof (cp), KM_SLEEP);
2472 			cpi->cpi_std_4[0] = &cpi->cpi_std[4];
2473 
2474 			/*
2475 			 * Allocate storage to hold the additional regs
2476 			 * for function 4, %ecx == 1 .. cpi_std_4_size.
2477 			 *
2478 			 * The regs for fn 4, %ecx == 0 has already
2479 			 * been allocated as indicated above.
2480 			 */
2481 			for (i = 1; i < size; i++) {
2482 				cp = cpi->cpi_std_4[i] =
2483 				    kmem_zalloc(sizeof (regs), KM_SLEEP);
2484 				cp->cp_eax = 4;
2485 				cp->cp_ecx = i;
2486 
2487 				(void) __cpuid_insn(cp);
2488 			}
2489 		}
2490 		/*
2491 		 * Determine the number of bits needed to represent
2492 		 * the number of CPUs sharing the last level cache.
2493 		 *
2494 		 * Shift off that number of bits from the APIC id to
2495 		 * derive the cache id.
2496 		 */
2497 		shft = 0;
2498 		for (i = 1; i < cpi->cpi_ncpu_shr_last_cache; i <<= 1)
2499 			shft++;
2500 		cpi->cpi_last_lvl_cacheid = cpi->cpi_apicid >> shft;
2501 	}
2502 
2503 	/*
2504 	 * Now fixup the brand string
2505 	 */
2506 	if ((cpi->cpi_xmaxeax & 0x80000000) == 0) {
2507 		fabricate_brandstr(cpi);
2508 	} else {
2509 
2510 		/*
2511 		 * If we successfully extracted a brand string from the cpuid
2512 		 * instruction, clean it up by removing leading spaces and
2513 		 * similar junk.
2514 		 */
2515 		if (cpi->cpi_brandstr[0]) {
2516 			size_t maxlen = sizeof (cpi->cpi_brandstr);
2517 			char *src, *dst;
2518 
2519 			dst = src = (char *)cpi->cpi_brandstr;
2520 			src[maxlen - 1] = '\0';
2521 			/*
2522 			 * strip leading spaces
2523 			 */
2524 			while (*src == ' ')
2525 				src++;
2526 			/*
2527 			 * Remove any 'Genuine' or "Authentic" prefixes
2528 			 */
2529 			if (strncmp(src, "Genuine ", 8) == 0)
2530 				src += 8;
2531 			if (strncmp(src, "Authentic ", 10) == 0)
2532 				src += 10;
2533 
2534 			/*
2535 			 * Now do an in-place copy.
2536 			 * Map (R) to (r) and (TM) to (tm).
2537 			 * The era of teletypes is long gone, and there's
2538 			 * -really- no need to shout.
2539 			 */
2540 			while (*src != '\0') {
2541 				if (src[0] == '(') {
2542 					if (strncmp(src + 1, "R)", 2) == 0) {
2543 						(void) strncpy(dst, "(r)", 3);
2544 						src += 3;
2545 						dst += 3;
2546 						continue;
2547 					}
2548 					if (strncmp(src + 1, "TM)", 3) == 0) {
2549 						(void) strncpy(dst, "(tm)", 4);
2550 						src += 4;
2551 						dst += 4;
2552 						continue;
2553 					}
2554 				}
2555 				*dst++ = *src++;
2556 			}
2557 			*dst = '\0';
2558 
2559 			/*
2560 			 * Finally, remove any trailing spaces
2561 			 */
2562 			while (--dst > cpi->cpi_brandstr)
2563 				if (*dst == ' ')
2564 					*dst = '\0';
2565 				else
2566 					break;
2567 		} else
2568 			fabricate_brandstr(cpi);
2569 	}
2570 	cpi->cpi_pass = 3;
2571 }
2572 
2573 /*
2574  * This routine is called out of bind_hwcap() much later in the life
2575  * of the kernel (post_startup()).  The job of this routine is to resolve
2576  * the hardware feature support and kernel support for those features into
2577  * what we're actually going to tell applications via the aux vector.
2578  */
2579 void
2580 cpuid_pass4(cpu_t *cpu, uint_t *hwcap_out)
2581 {
2582 	struct cpuid_info *cpi;
2583 	uint_t hwcap_flags = 0, hwcap_flags_2 = 0;
2584 
2585 	if (cpu == NULL)
2586 		cpu = CPU;
2587 	cpi = cpu->cpu_m.mcpu_cpi;
2588 
2589 	ASSERT(cpi->cpi_pass == 3);
2590 
2591 	if (cpi->cpi_maxeax >= 1) {
2592 		uint32_t *edx = &cpi->cpi_support[STD_EDX_FEATURES];
2593 		uint32_t *ecx = &cpi->cpi_support[STD_ECX_FEATURES];
2594 
2595 		*edx = CPI_FEATURES_EDX(cpi);
2596 		*ecx = CPI_FEATURES_ECX(cpi);
2597 
2598 		/*
2599 		 * [these require explicit kernel support]
2600 		 */
2601 		if (!is_x86_feature(x86_featureset, X86FSET_SEP))
2602 			*edx &= ~CPUID_INTC_EDX_SEP;
2603 
2604 		if (!is_x86_feature(x86_featureset, X86FSET_SSE))
2605 			*edx &= ~(CPUID_INTC_EDX_FXSR|CPUID_INTC_EDX_SSE);
2606 		if (!is_x86_feature(x86_featureset, X86FSET_SSE2))
2607 			*edx &= ~CPUID_INTC_EDX_SSE2;
2608 
2609 		if (!is_x86_feature(x86_featureset, X86FSET_HTT))
2610 			*edx &= ~CPUID_INTC_EDX_HTT;
2611 
2612 		if (!is_x86_feature(x86_featureset, X86FSET_SSE3))
2613 			*ecx &= ~CPUID_INTC_ECX_SSE3;
2614 
2615 		if (!is_x86_feature(x86_featureset, X86FSET_SSSE3))
2616 			*ecx &= ~CPUID_INTC_ECX_SSSE3;
2617 		if (!is_x86_feature(x86_featureset, X86FSET_SSE4_1))
2618 			*ecx &= ~CPUID_INTC_ECX_SSE4_1;
2619 		if (!is_x86_feature(x86_featureset, X86FSET_SSE4_2))
2620 			*ecx &= ~CPUID_INTC_ECX_SSE4_2;
2621 		if (!is_x86_feature(x86_featureset, X86FSET_AES))
2622 			*ecx &= ~CPUID_INTC_ECX_AES;
2623 		if (!is_x86_feature(x86_featureset, X86FSET_PCLMULQDQ))
2624 			*ecx &= ~CPUID_INTC_ECX_PCLMULQDQ;
2625 		if (!is_x86_feature(x86_featureset, X86FSET_XSAVE))
2626 			*ecx &= ~(CPUID_INTC_ECX_XSAVE |
2627 			    CPUID_INTC_ECX_OSXSAVE);
2628 		if (!is_x86_feature(x86_featureset, X86FSET_AVX))
2629 			*ecx &= ~CPUID_INTC_ECX_AVX;
2630 		if (!is_x86_feature(x86_featureset, X86FSET_F16C))
2631 			*ecx &= ~CPUID_INTC_ECX_F16C;
2632 
2633 		/*
2634 		 * [no explicit support required beyond x87 fp context]
2635 		 */
2636 		if (!fpu_exists)
2637 			*edx &= ~(CPUID_INTC_EDX_FPU | CPUID_INTC_EDX_MMX);
2638 
2639 		/*
2640 		 * Now map the supported feature vector to things that we
2641 		 * think userland will care about.
2642 		 */
2643 		if (*edx & CPUID_INTC_EDX_SEP)
2644 			hwcap_flags |= AV_386_SEP;
2645 		if (*edx & CPUID_INTC_EDX_SSE)
2646 			hwcap_flags |= AV_386_FXSR | AV_386_SSE;
2647 		if (*edx & CPUID_INTC_EDX_SSE2)
2648 			hwcap_flags |= AV_386_SSE2;
2649 		if (*ecx & CPUID_INTC_ECX_SSE3)
2650 			hwcap_flags |= AV_386_SSE3;
2651 		if (*ecx & CPUID_INTC_ECX_SSSE3)
2652 			hwcap_flags |= AV_386_SSSE3;
2653 		if (*ecx & CPUID_INTC_ECX_SSE4_1)
2654 			hwcap_flags |= AV_386_SSE4_1;
2655 		if (*ecx & CPUID_INTC_ECX_SSE4_2)
2656 			hwcap_flags |= AV_386_SSE4_2;
2657 		if (*ecx & CPUID_INTC_ECX_MOVBE)
2658 			hwcap_flags |= AV_386_MOVBE;
2659 		if (*ecx & CPUID_INTC_ECX_AES)
2660 			hwcap_flags |= AV_386_AES;
2661 		if (*ecx & CPUID_INTC_ECX_PCLMULQDQ)
2662 			hwcap_flags |= AV_386_PCLMULQDQ;
2663 		if ((*ecx & CPUID_INTC_ECX_XSAVE) &&
2664 		    (*ecx & CPUID_INTC_ECX_OSXSAVE)) {
2665 			hwcap_flags |= AV_386_XSAVE;
2666 
2667 			if (*ecx & CPUID_INTC_ECX_AVX) {
2668 				hwcap_flags |= AV_386_AVX;
2669 				if (*ecx & CPUID_INTC_ECX_F16C)
2670 					hwcap_flags_2 |= AV_386_2_F16C;
2671 			}
2672 		}
2673 		if (*ecx & CPUID_INTC_ECX_VMX)
2674 			hwcap_flags |= AV_386_VMX;
2675 		if (*ecx & CPUID_INTC_ECX_POPCNT)
2676 			hwcap_flags |= AV_386_POPCNT;
2677 		if (*edx & CPUID_INTC_EDX_FPU)
2678 			hwcap_flags |= AV_386_FPU;
2679 		if (*edx & CPUID_INTC_EDX_MMX)
2680 			hwcap_flags |= AV_386_MMX;
2681 
2682 		if (*edx & CPUID_INTC_EDX_TSC)
2683 			hwcap_flags |= AV_386_TSC;
2684 		if (*edx & CPUID_INTC_EDX_CX8)
2685 			hwcap_flags |= AV_386_CX8;
2686 		if (*edx & CPUID_INTC_EDX_CMOV)
2687 			hwcap_flags |= AV_386_CMOV;
2688 		if (*ecx & CPUID_INTC_ECX_CX16)
2689 			hwcap_flags |= AV_386_CX16;
2690 
2691 		if (*ecx & CPUID_INTC_ECX_RDRAND)
2692 			hwcap_flags_2 |= AV_386_2_RDRAND;
2693 	}
2694 
2695 	if (cpi->cpi_xmaxeax < 0x80000001)
2696 		goto pass4_done;
2697 
2698 	switch (cpi->cpi_vendor) {
2699 		struct cpuid_regs cp;
2700 		uint32_t *edx, *ecx;
2701 
2702 	case X86_VENDOR_Intel:
2703 		/*
2704 		 * Seems like Intel duplicated what we necessary
2705 		 * here to make the initial crop of 64-bit OS's work.
2706 		 * Hopefully, those are the only "extended" bits
2707 		 * they'll add.
2708 		 */
2709 		/*FALLTHROUGH*/
2710 
2711 	case X86_VENDOR_AMD:
2712 		edx = &cpi->cpi_support[AMD_EDX_FEATURES];
2713 		ecx = &cpi->cpi_support[AMD_ECX_FEATURES];
2714 
2715 		*edx = CPI_FEATURES_XTD_EDX(cpi);
2716 		*ecx = CPI_FEATURES_XTD_ECX(cpi);
2717 
2718 		/*
2719 		 * [these features require explicit kernel support]
2720 		 */
2721 		switch (cpi->cpi_vendor) {
2722 		case X86_VENDOR_Intel:
2723 			if (!is_x86_feature(x86_featureset, X86FSET_TSCP))
2724 				*edx &= ~CPUID_AMD_EDX_TSCP;
2725 			break;
2726 
2727 		case X86_VENDOR_AMD:
2728 			if (!is_x86_feature(x86_featureset, X86FSET_TSCP))
2729 				*edx &= ~CPUID_AMD_EDX_TSCP;
2730 			if (!is_x86_feature(x86_featureset, X86FSET_SSE4A))
2731 				*ecx &= ~CPUID_AMD_ECX_SSE4A;
2732 			break;
2733 
2734 		default:
2735 			break;
2736 		}
2737 
2738 		/*
2739 		 * [no explicit support required beyond
2740 		 * x87 fp context and exception handlers]
2741 		 */
2742 		if (!fpu_exists)
2743 			*edx &= ~(CPUID_AMD_EDX_MMXamd |
2744 			    CPUID_AMD_EDX_3DNow | CPUID_AMD_EDX_3DNowx);
2745 
2746 		if (!is_x86_feature(x86_featureset, X86FSET_NX))
2747 			*edx &= ~CPUID_AMD_EDX_NX;
2748 #if !defined(__amd64)
2749 		*edx &= ~CPUID_AMD_EDX_LM;
2750 #endif
2751 		/*
2752 		 * Now map the supported feature vector to
2753 		 * things that we think userland will care about.
2754 		 */
2755 #if defined(__amd64)
2756 		if (*edx & CPUID_AMD_EDX_SYSC)
2757 			hwcap_flags |= AV_386_AMD_SYSC;
2758 #endif
2759 		if (*edx & CPUID_AMD_EDX_MMXamd)
2760 			hwcap_flags |= AV_386_AMD_MMX;
2761 		if (*edx & CPUID_AMD_EDX_3DNow)
2762 			hwcap_flags |= AV_386_AMD_3DNow;
2763 		if (*edx & CPUID_AMD_EDX_3DNowx)
2764 			hwcap_flags |= AV_386_AMD_3DNowx;
2765 		if (*ecx & CPUID_AMD_ECX_SVM)
2766 			hwcap_flags |= AV_386_AMD_SVM;
2767 
2768 		switch (cpi->cpi_vendor) {
2769 		case X86_VENDOR_AMD:
2770 			if (*edx & CPUID_AMD_EDX_TSCP)
2771 				hwcap_flags |= AV_386_TSCP;
2772 			if (*ecx & CPUID_AMD_ECX_AHF64)
2773 				hwcap_flags |= AV_386_AHF;
2774 			if (*ecx & CPUID_AMD_ECX_SSE4A)
2775 				hwcap_flags |= AV_386_AMD_SSE4A;
2776 			if (*ecx & CPUID_AMD_ECX_LZCNT)
2777 				hwcap_flags |= AV_386_AMD_LZCNT;
2778 			break;
2779 
2780 		case X86_VENDOR_Intel:
2781 			if (*edx & CPUID_AMD_EDX_TSCP)
2782 				hwcap_flags |= AV_386_TSCP;
2783 			/*
2784 			 * Aarrgh.
2785 			 * Intel uses a different bit in the same word.
2786 			 */
2787 			if (*ecx & CPUID_INTC_ECX_AHF64)
2788 				hwcap_flags |= AV_386_AHF;
2789 			break;
2790 
2791 		default:
2792 			break;
2793 		}
2794 		break;
2795 
2796 	case X86_VENDOR_TM:
2797 		cp.cp_eax = 0x80860001;
2798 		(void) __cpuid_insn(&cp);
2799 		cpi->cpi_support[TM_EDX_FEATURES] = cp.cp_edx;
2800 		break;
2801 
2802 	default:
2803 		break;
2804 	}
2805 
2806 pass4_done:
2807 	cpi->cpi_pass = 4;
2808 	if (hwcap_out != NULL) {
2809 		hwcap_out[0] = hwcap_flags;
2810 		hwcap_out[1] = hwcap_flags_2;
2811 	}
2812 }
2813 
2814 
2815 /*
2816  * Simulate the cpuid instruction using the data we previously
2817  * captured about this CPU.  We try our best to return the truth
2818  * about the hardware, independently of kernel support.
2819  */
2820 uint32_t
2821 cpuid_insn(cpu_t *cpu, struct cpuid_regs *cp)
2822 {
2823 	struct cpuid_info *cpi;
2824 	struct cpuid_regs *xcp;
2825 
2826 	if (cpu == NULL)
2827 		cpu = CPU;
2828 	cpi = cpu->cpu_m.mcpu_cpi;
2829 
2830 	ASSERT(cpuid_checkpass(cpu, 3));
2831 
2832 	/*
2833 	 * CPUID data is cached in two separate places: cpi_std for standard
2834 	 * CPUID functions, and cpi_extd for extended CPUID functions.
2835 	 */
2836 	if (cp->cp_eax <= cpi->cpi_maxeax && cp->cp_eax < NMAX_CPI_STD)
2837 		xcp = &cpi->cpi_std[cp->cp_eax];
2838 	else if (cp->cp_eax >= 0x80000000 && cp->cp_eax <= cpi->cpi_xmaxeax &&
2839 	    cp->cp_eax < 0x80000000 + NMAX_CPI_EXTD)
2840 		xcp = &cpi->cpi_extd[cp->cp_eax - 0x80000000];
2841 	else
2842 		/*
2843 		 * The caller is asking for data from an input parameter which
2844 		 * the kernel has not cached.  In this case we go fetch from
2845 		 * the hardware and return the data directly to the user.
2846 		 */
2847 		return (__cpuid_insn(cp));
2848 
2849 	cp->cp_eax = xcp->cp_eax;
2850 	cp->cp_ebx = xcp->cp_ebx;
2851 	cp->cp_ecx = xcp->cp_ecx;
2852 	cp->cp_edx = xcp->cp_edx;
2853 	return (cp->cp_eax);
2854 }
2855 
2856 int
2857 cpuid_checkpass(cpu_t *cpu, int pass)
2858 {
2859 	return (cpu != NULL && cpu->cpu_m.mcpu_cpi != NULL &&
2860 	    cpu->cpu_m.mcpu_cpi->cpi_pass >= pass);
2861 }
2862 
2863 int
2864 cpuid_getbrandstr(cpu_t *cpu, char *s, size_t n)
2865 {
2866 	ASSERT(cpuid_checkpass(cpu, 3));
2867 
2868 	return (snprintf(s, n, "%s", cpu->cpu_m.mcpu_cpi->cpi_brandstr));
2869 }
2870 
2871 int
2872 cpuid_is_cmt(cpu_t *cpu)
2873 {
2874 	if (cpu == NULL)
2875 		cpu = CPU;
2876 
2877 	ASSERT(cpuid_checkpass(cpu, 1));
2878 
2879 	return (cpu->cpu_m.mcpu_cpi->cpi_chipid >= 0);
2880 }
2881 
2882 /*
2883  * AMD and Intel both implement the 64-bit variant of the syscall
2884  * instruction (syscallq), so if there's -any- support for syscall,
2885  * cpuid currently says "yes, we support this".
2886  *
2887  * However, Intel decided to -not- implement the 32-bit variant of the
2888  * syscall instruction, so we provide a predicate to allow our caller
2889  * to test that subtlety here.
2890  *
2891  * XXPV	Currently, 32-bit syscall instructions don't work via the hypervisor,
2892  *	even in the case where the hardware would in fact support it.
2893  */
2894 /*ARGSUSED*/
2895 int
2896 cpuid_syscall32_insn(cpu_t *cpu)
2897 {
2898 	ASSERT(cpuid_checkpass((cpu == NULL ? CPU : cpu), 1));
2899 
2900 #if !defined(__xpv)
2901 	if (cpu == NULL)
2902 		cpu = CPU;
2903 
2904 	/*CSTYLED*/
2905 	{
2906 		struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2907 
2908 		if (cpi->cpi_vendor == X86_VENDOR_AMD &&
2909 		    cpi->cpi_xmaxeax >= 0x80000001 &&
2910 		    (CPI_FEATURES_XTD_EDX(cpi) & CPUID_AMD_EDX_SYSC))
2911 			return (1);
2912 	}
2913 #endif
2914 	return (0);
2915 }
2916 
2917 int
2918 cpuid_getidstr(cpu_t *cpu, char *s, size_t n)
2919 {
2920 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2921 
2922 	static const char fmt[] =
2923 	    "x86 (%s %X family %d model %d step %d clock %d MHz)";
2924 	static const char fmt_ht[] =
2925 	    "x86 (chipid 0x%x %s %X family %d model %d step %d clock %d MHz)";
2926 
2927 	ASSERT(cpuid_checkpass(cpu, 1));
2928 
2929 	if (cpuid_is_cmt(cpu))
2930 		return (snprintf(s, n, fmt_ht, cpi->cpi_chipid,
2931 		    cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax,
2932 		    cpi->cpi_family, cpi->cpi_model,
2933 		    cpi->cpi_step, cpu->cpu_type_info.pi_clock));
2934 	return (snprintf(s, n, fmt,
2935 	    cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax,
2936 	    cpi->cpi_family, cpi->cpi_model,
2937 	    cpi->cpi_step, cpu->cpu_type_info.pi_clock));
2938 }
2939 
2940 const char *
2941 cpuid_getvendorstr(cpu_t *cpu)
2942 {
2943 	ASSERT(cpuid_checkpass(cpu, 1));
2944 	return ((const char *)cpu->cpu_m.mcpu_cpi->cpi_vendorstr);
2945 }
2946 
2947 uint_t
2948 cpuid_getvendor(cpu_t *cpu)
2949 {
2950 	ASSERT(cpuid_checkpass(cpu, 1));
2951 	return (cpu->cpu_m.mcpu_cpi->cpi_vendor);
2952 }
2953 
2954 uint_t
2955 cpuid_getfamily(cpu_t *cpu)
2956 {
2957 	ASSERT(cpuid_checkpass(cpu, 1));
2958 	return (cpu->cpu_m.mcpu_cpi->cpi_family);
2959 }
2960 
2961 uint_t
2962 cpuid_getmodel(cpu_t *cpu)
2963 {
2964 	ASSERT(cpuid_checkpass(cpu, 1));
2965 	return (cpu->cpu_m.mcpu_cpi->cpi_model);
2966 }
2967 
2968 uint_t
2969 cpuid_get_ncpu_per_chip(cpu_t *cpu)
2970 {
2971 	ASSERT(cpuid_checkpass(cpu, 1));
2972 	return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_per_chip);
2973 }
2974 
2975 uint_t
2976 cpuid_get_ncore_per_chip(cpu_t *cpu)
2977 {
2978 	ASSERT(cpuid_checkpass(cpu, 1));
2979 	return (cpu->cpu_m.mcpu_cpi->cpi_ncore_per_chip);
2980 }
2981 
2982 uint_t
2983 cpuid_get_ncpu_sharing_last_cache(cpu_t *cpu)
2984 {
2985 	ASSERT(cpuid_checkpass(cpu, 2));
2986 	return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_shr_last_cache);
2987 }
2988 
2989 id_t
2990 cpuid_get_last_lvl_cacheid(cpu_t *cpu)
2991 {
2992 	ASSERT(cpuid_checkpass(cpu, 2));
2993 	return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid);
2994 }
2995 
2996 uint_t
2997 cpuid_getstep(cpu_t *cpu)
2998 {
2999 	ASSERT(cpuid_checkpass(cpu, 1));
3000 	return (cpu->cpu_m.mcpu_cpi->cpi_step);
3001 }
3002 
3003 uint_t
3004 cpuid_getsig(struct cpu *cpu)
3005 {
3006 	ASSERT(cpuid_checkpass(cpu, 1));
3007 	return (cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_eax);
3008 }
3009 
3010 uint32_t
3011 cpuid_getchiprev(struct cpu *cpu)
3012 {
3013 	ASSERT(cpuid_checkpass(cpu, 1));
3014 	return (cpu->cpu_m.mcpu_cpi->cpi_chiprev);
3015 }
3016 
3017 const char *
3018 cpuid_getchiprevstr(struct cpu *cpu)
3019 {
3020 	ASSERT(cpuid_checkpass(cpu, 1));
3021 	return (cpu->cpu_m.mcpu_cpi->cpi_chiprevstr);
3022 }
3023 
3024 uint32_t
3025 cpuid_getsockettype(struct cpu *cpu)
3026 {
3027 	ASSERT(cpuid_checkpass(cpu, 1));
3028 	return (cpu->cpu_m.mcpu_cpi->cpi_socket);
3029 }
3030 
3031 const char *
3032 cpuid_getsocketstr(cpu_t *cpu)
3033 {
3034 	static const char *socketstr = NULL;
3035 	struct cpuid_info *cpi;
3036 
3037 	ASSERT(cpuid_checkpass(cpu, 1));
3038 	cpi = cpu->cpu_m.mcpu_cpi;
3039 
3040 	/* Assume that socket types are the same across the system */
3041 	if (socketstr == NULL)
3042 		socketstr = _cpuid_sktstr(cpi->cpi_vendor, cpi->cpi_family,
3043 		    cpi->cpi_model, cpi->cpi_step);
3044 
3045 
3046 	return (socketstr);
3047 }
3048 
3049 int
3050 cpuid_get_chipid(cpu_t *cpu)
3051 {
3052 	ASSERT(cpuid_checkpass(cpu, 1));
3053 
3054 	if (cpuid_is_cmt(cpu))
3055 		return (cpu->cpu_m.mcpu_cpi->cpi_chipid);
3056 	return (cpu->cpu_id);
3057 }
3058 
3059 id_t
3060 cpuid_get_coreid(cpu_t *cpu)
3061 {
3062 	ASSERT(cpuid_checkpass(cpu, 1));
3063 	return (cpu->cpu_m.mcpu_cpi->cpi_coreid);
3064 }
3065 
3066 int
3067 cpuid_get_pkgcoreid(cpu_t *cpu)
3068 {
3069 	ASSERT(cpuid_checkpass(cpu, 1));
3070 	return (cpu->cpu_m.mcpu_cpi->cpi_pkgcoreid);
3071 }
3072 
3073 int
3074 cpuid_get_clogid(cpu_t *cpu)
3075 {
3076 	ASSERT(cpuid_checkpass(cpu, 1));
3077 	return (cpu->cpu_m.mcpu_cpi->cpi_clogid);
3078 }
3079 
3080 int
3081 cpuid_get_cacheid(cpu_t *cpu)
3082 {
3083 	ASSERT(cpuid_checkpass(cpu, 1));
3084 	return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid);
3085 }
3086 
3087 uint_t
3088 cpuid_get_procnodeid(cpu_t *cpu)
3089 {
3090 	ASSERT(cpuid_checkpass(cpu, 1));
3091 	return (cpu->cpu_m.mcpu_cpi->cpi_procnodeid);
3092 }
3093 
3094 uint_t
3095 cpuid_get_procnodes_per_pkg(cpu_t *cpu)
3096 {
3097 	ASSERT(cpuid_checkpass(cpu, 1));
3098 	return (cpu->cpu_m.mcpu_cpi->cpi_procnodes_per_pkg);
3099 }
3100 
3101 uint_t
3102 cpuid_get_compunitid(cpu_t *cpu)
3103 {
3104 	ASSERT(cpuid_checkpass(cpu, 1));
3105 	return (cpu->cpu_m.mcpu_cpi->cpi_compunitid);
3106 }
3107 
3108 uint_t
3109 cpuid_get_cores_per_compunit(cpu_t *cpu)
3110 {
3111 	ASSERT(cpuid_checkpass(cpu, 1));
3112 	return (cpu->cpu_m.mcpu_cpi->cpi_cores_per_compunit);
3113 }
3114 
3115 /*ARGSUSED*/
3116 int
3117 cpuid_have_cr8access(cpu_t *cpu)
3118 {
3119 #if defined(__amd64)
3120 	return (1);
3121 #else
3122 	struct cpuid_info *cpi;
3123 
3124 	ASSERT(cpu != NULL);
3125 	cpi = cpu->cpu_m.mcpu_cpi;
3126 	if (cpi->cpi_vendor == X86_VENDOR_AMD && cpi->cpi_maxeax >= 1 &&
3127 	    (CPI_FEATURES_XTD_ECX(cpi) & CPUID_AMD_ECX_CR8D) != 0)
3128 		return (1);
3129 	return (0);
3130 #endif
3131 }
3132 
3133 uint32_t
3134 cpuid_get_apicid(cpu_t *cpu)
3135 {
3136 	ASSERT(cpuid_checkpass(cpu, 1));
3137 	if (cpu->cpu_m.mcpu_cpi->cpi_maxeax < 1) {
3138 		return (UINT32_MAX);
3139 	} else {
3140 		return (cpu->cpu_m.mcpu_cpi->cpi_apicid);
3141 	}
3142 }
3143 
3144 void
3145 cpuid_get_addrsize(cpu_t *cpu, uint_t *pabits, uint_t *vabits)
3146 {
3147 	struct cpuid_info *cpi;
3148 
3149 	if (cpu == NULL)
3150 		cpu = CPU;
3151 	cpi = cpu->cpu_m.mcpu_cpi;
3152 
3153 	ASSERT(cpuid_checkpass(cpu, 1));
3154 
3155 	if (pabits)
3156 		*pabits = cpi->cpi_pabits;
3157 	if (vabits)
3158 		*vabits = cpi->cpi_vabits;
3159 }
3160 
3161 /*
3162  * Returns the number of data TLB entries for a corresponding
3163  * pagesize.  If it can't be computed, or isn't known, the
3164  * routine returns zero.  If you ask about an architecturally
3165  * impossible pagesize, the routine will panic (so that the
3166  * hat implementor knows that things are inconsistent.)
3167  */
3168 uint_t
3169 cpuid_get_dtlb_nent(cpu_t *cpu, size_t pagesize)
3170 {
3171 	struct cpuid_info *cpi;
3172 	uint_t dtlb_nent = 0;
3173 
3174 	if (cpu == NULL)
3175 		cpu = CPU;
3176 	cpi = cpu->cpu_m.mcpu_cpi;
3177 
3178 	ASSERT(cpuid_checkpass(cpu, 1));
3179 
3180 	/*
3181 	 * Check the L2 TLB info
3182 	 */
3183 	if (cpi->cpi_xmaxeax >= 0x80000006) {
3184 		struct cpuid_regs *cp = &cpi->cpi_extd[6];
3185 
3186 		switch (pagesize) {
3187 
3188 		case 4 * 1024:
3189 			/*
3190 			 * All zero in the top 16 bits of the register
3191 			 * indicates a unified TLB. Size is in low 16 bits.
3192 			 */
3193 			if ((cp->cp_ebx & 0xffff0000) == 0)
3194 				dtlb_nent = cp->cp_ebx & 0x0000ffff;
3195 			else
3196 				dtlb_nent = BITX(cp->cp_ebx, 27, 16);
3197 			break;
3198 
3199 		case 2 * 1024 * 1024:
3200 			if ((cp->cp_eax & 0xffff0000) == 0)
3201 				dtlb_nent = cp->cp_eax & 0x0000ffff;
3202 			else
3203 				dtlb_nent = BITX(cp->cp_eax, 27, 16);
3204 			break;
3205 
3206 		default:
3207 			panic("unknown L2 pagesize");
3208 			/*NOTREACHED*/
3209 		}
3210 	}
3211 
3212 	if (dtlb_nent != 0)
3213 		return (dtlb_nent);
3214 
3215 	/*
3216 	 * No L2 TLB support for this size, try L1.
3217 	 */
3218 	if (cpi->cpi_xmaxeax >= 0x80000005) {
3219 		struct cpuid_regs *cp = &cpi->cpi_extd[5];
3220 
3221 		switch (pagesize) {
3222 		case 4 * 1024:
3223 			dtlb_nent = BITX(cp->cp_ebx, 23, 16);
3224 			break;
3225 		case 2 * 1024 * 1024:
3226 			dtlb_nent = BITX(cp->cp_eax, 23, 16);
3227 			break;
3228 		default:
3229 			panic("unknown L1 d-TLB pagesize");
3230 			/*NOTREACHED*/
3231 		}
3232 	}
3233 
3234 	return (dtlb_nent);
3235 }
3236 
3237 /*
3238  * Return 0 if the erratum is not present or not applicable, positive
3239  * if it is, and negative if the status of the erratum is unknown.
3240  *
3241  * See "Revision Guide for AMD Athlon(tm) 64 and AMD Opteron(tm)
3242  * Processors" #25759, Rev 3.57, August 2005
3243  */
3244 int
3245 cpuid_opteron_erratum(cpu_t *cpu, uint_t erratum)
3246 {
3247 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
3248 	uint_t eax;
3249 
3250 	/*
3251 	 * Bail out if this CPU isn't an AMD CPU, or if it's
3252 	 * a legacy (32-bit) AMD CPU.
3253 	 */
3254 	if (cpi->cpi_vendor != X86_VENDOR_AMD ||
3255 	    cpi->cpi_family == 4 || cpi->cpi_family == 5 ||
3256 	    cpi->cpi_family == 6)
3257 
3258 		return (0);
3259 
3260 	eax = cpi->cpi_std[1].cp_eax;
3261 
3262 #define	SH_B0(eax)	(eax == 0xf40 || eax == 0xf50)
3263 #define	SH_B3(eax) 	(eax == 0xf51)
3264 #define	B(eax)		(SH_B0(eax) || SH_B3(eax))
3265 
3266 #define	SH_C0(eax)	(eax == 0xf48 || eax == 0xf58)
3267 
3268 #define	SH_CG(eax)	(eax == 0xf4a || eax == 0xf5a || eax == 0xf7a)
3269 #define	DH_CG(eax)	(eax == 0xfc0 || eax == 0xfe0 || eax == 0xff0)
3270 #define	CH_CG(eax)	(eax == 0xf82 || eax == 0xfb2)
3271 #define	CG(eax)		(SH_CG(eax) || DH_CG(eax) || CH_CG(eax))
3272 
3273 #define	SH_D0(eax)	(eax == 0x10f40 || eax == 0x10f50 || eax == 0x10f70)
3274 #define	DH_D0(eax)	(eax == 0x10fc0 || eax == 0x10ff0)
3275 #define	CH_D0(eax)	(eax == 0x10f80 || eax == 0x10fb0)
3276 #define	D0(eax)		(SH_D0(eax) || DH_D0(eax) || CH_D0(eax))
3277 
3278 #define	SH_E0(eax)	(eax == 0x20f50 || eax == 0x20f40 || eax == 0x20f70)
3279 #define	JH_E1(eax)	(eax == 0x20f10)	/* JH8_E0 had 0x20f30 */
3280 #define	DH_E3(eax)	(eax == 0x20fc0 || eax == 0x20ff0)
3281 #define	SH_E4(eax)	(eax == 0x20f51 || eax == 0x20f71)
3282 #define	BH_E4(eax)	(eax == 0x20fb1)
3283 #define	SH_E5(eax)	(eax == 0x20f42)
3284 #define	DH_E6(eax)	(eax == 0x20ff2 || eax == 0x20fc2)
3285 #define	JH_E6(eax)	(eax == 0x20f12 || eax == 0x20f32)
3286 #define	EX(eax)		(SH_E0(eax) || JH_E1(eax) || DH_E3(eax) || \
3287 			    SH_E4(eax) || BH_E4(eax) || SH_E5(eax) || \
3288 			    DH_E6(eax) || JH_E6(eax))
3289 
3290 #define	DR_AX(eax)	(eax == 0x100f00 || eax == 0x100f01 || eax == 0x100f02)
3291 #define	DR_B0(eax)	(eax == 0x100f20)
3292 #define	DR_B1(eax)	(eax == 0x100f21)
3293 #define	DR_BA(eax)	(eax == 0x100f2a)
3294 #define	DR_B2(eax)	(eax == 0x100f22)
3295 #define	DR_B3(eax)	(eax == 0x100f23)
3296 #define	RB_C0(eax)	(eax == 0x100f40)
3297 
3298 	switch (erratum) {
3299 	case 1:
3300 		return (cpi->cpi_family < 0x10);
3301 	case 51:	/* what does the asterisk mean? */
3302 		return (B(eax) || SH_C0(eax) || CG(eax));
3303 	case 52:
3304 		return (B(eax));
3305 	case 57:
3306 		return (cpi->cpi_family <= 0x11);
3307 	case 58:
3308 		return (B(eax));
3309 	case 60:
3310 		return (cpi->cpi_family <= 0x11);
3311 	case 61:
3312 	case 62:
3313 	case 63:
3314 	case 64:
3315 	case 65:
3316 	case 66:
3317 	case 68:
3318 	case 69:
3319 	case 70:
3320 	case 71:
3321 		return (B(eax));
3322 	case 72:
3323 		return (SH_B0(eax));
3324 	case 74:
3325 		return (B(eax));
3326 	case 75:
3327 		return (cpi->cpi_family < 0x10);
3328 	case 76:
3329 		return (B(eax));
3330 	case 77:
3331 		return (cpi->cpi_family <= 0x11);
3332 	case 78:
3333 		return (B(eax) || SH_C0(eax));
3334 	case 79:
3335 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
3336 	case 80:
3337 	case 81:
3338 	case 82:
3339 		return (B(eax));
3340 	case 83:
3341 		return (B(eax) || SH_C0(eax) || CG(eax));
3342 	case 85:
3343 		return (cpi->cpi_family < 0x10);
3344 	case 86:
3345 		return (SH_C0(eax) || CG(eax));
3346 	case 88:
3347 #if !defined(__amd64)
3348 		return (0);
3349 #else
3350 		return (B(eax) || SH_C0(eax));
3351 #endif
3352 	case 89:
3353 		return (cpi->cpi_family < 0x10);
3354 	case 90:
3355 		return (B(eax) || SH_C0(eax) || CG(eax));
3356 	case 91:
3357 	case 92:
3358 		return (B(eax) || SH_C0(eax));
3359 	case 93:
3360 		return (SH_C0(eax));
3361 	case 94:
3362 		return (B(eax) || SH_C0(eax) || CG(eax));
3363 	case 95:
3364 #if !defined(__amd64)
3365 		return (0);
3366 #else
3367 		return (B(eax) || SH_C0(eax));
3368 #endif
3369 	case 96:
3370 		return (B(eax) || SH_C0(eax) || CG(eax));
3371 	case 97:
3372 	case 98:
3373 		return (SH_C0(eax) || CG(eax));
3374 	case 99:
3375 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3376 	case 100:
3377 		return (B(eax) || SH_C0(eax));
3378 	case 101:
3379 	case 103:
3380 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3381 	case 104:
3382 		return (SH_C0(eax) || CG(eax) || D0(eax));
3383 	case 105:
3384 	case 106:
3385 	case 107:
3386 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3387 	case 108:
3388 		return (DH_CG(eax));
3389 	case 109:
3390 		return (SH_C0(eax) || CG(eax) || D0(eax));
3391 	case 110:
3392 		return (D0(eax) || EX(eax));
3393 	case 111:
3394 		return (CG(eax));
3395 	case 112:
3396 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
3397 	case 113:
3398 		return (eax == 0x20fc0);
3399 	case 114:
3400 		return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax));
3401 	case 115:
3402 		return (SH_E0(eax) || JH_E1(eax));
3403 	case 116:
3404 		return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax));
3405 	case 117:
3406 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3407 	case 118:
3408 		return (SH_E0(eax) || JH_E1(eax) || SH_E4(eax) || BH_E4(eax) ||
3409 		    JH_E6(eax));
3410 	case 121:
3411 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
3412 	case 122:
3413 		return (cpi->cpi_family < 0x10 || cpi->cpi_family == 0x11);
3414 	case 123:
3415 		return (JH_E1(eax) || BH_E4(eax) || JH_E6(eax));
3416 	case 131:
3417 		return (cpi->cpi_family < 0x10);
3418 	case 6336786:
3419 		/*
3420 		 * Test for AdvPowerMgmtInfo.TscPStateInvariant
3421 		 * if this is a K8 family or newer processor
3422 		 */
3423 		if (CPI_FAMILY(cpi) == 0xf) {
3424 			struct cpuid_regs regs;
3425 			regs.cp_eax = 0x80000007;
3426 			(void) __cpuid_insn(&regs);
3427 			return (!(regs.cp_edx & 0x100));
3428 		}
3429 		return (0);
3430 	case 6323525:
3431 		return (((((eax >> 12) & 0xff00) + (eax & 0xf00)) |
3432 		    (((eax >> 4) & 0xf) | ((eax >> 12) & 0xf0))) < 0xf40);
3433 
3434 	case 6671130:
3435 		/*
3436 		 * check for processors (pre-Shanghai) that do not provide
3437 		 * optimal management of 1gb ptes in its tlb.
3438 		 */
3439 		return (cpi->cpi_family == 0x10 && cpi->cpi_model < 4);
3440 
3441 	case 298:
3442 		return (DR_AX(eax) || DR_B0(eax) || DR_B1(eax) || DR_BA(eax) ||
3443 		    DR_B2(eax) || RB_C0(eax));
3444 
3445 	case 721:
3446 #if defined(__amd64)
3447 		return (cpi->cpi_family == 0x10 || cpi->cpi_family == 0x12);
3448 #else
3449 		return (0);
3450 #endif
3451 
3452 	default:
3453 		return (-1);
3454 
3455 	}
3456 }
3457 
3458 /*
3459  * Determine if specified erratum is present via OSVW (OS Visible Workaround).
3460  * Return 1 if erratum is present, 0 if not present and -1 if indeterminate.
3461  */
3462 int
3463 osvw_opteron_erratum(cpu_t *cpu, uint_t erratum)
3464 {
3465 	struct cpuid_info	*cpi;
3466 	uint_t			osvwid;
3467 	static int		osvwfeature = -1;
3468 	uint64_t		osvwlength;
3469 
3470 
3471 	cpi = cpu->cpu_m.mcpu_cpi;
3472 
3473 	/* confirm OSVW supported */
3474 	if (osvwfeature == -1) {
3475 		osvwfeature = cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW;
3476 	} else {
3477 		/* assert that osvw feature setting is consistent on all cpus */
3478 		ASSERT(osvwfeature ==
3479 		    (cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW));
3480 	}
3481 	if (!osvwfeature)
3482 		return (-1);
3483 
3484 	osvwlength = rdmsr(MSR_AMD_OSVW_ID_LEN) & OSVW_ID_LEN_MASK;
3485 
3486 	switch (erratum) {
3487 	case 298:	/* osvwid is 0 */
3488 		osvwid = 0;
3489 		if (osvwlength <= (uint64_t)osvwid) {
3490 			/* osvwid 0 is unknown */
3491 			return (-1);
3492 		}
3493 
3494 		/*
3495 		 * Check the OSVW STATUS MSR to determine the state
3496 		 * of the erratum where:
3497 		 *   0 - fixed by HW
3498 		 *   1 - BIOS has applied the workaround when BIOS
3499 		 *   workaround is available. (Or for other errata,
3500 		 *   OS workaround is required.)
3501 		 * For a value of 1, caller will confirm that the
3502 		 * erratum 298 workaround has indeed been applied by BIOS.
3503 		 *
3504 		 * A 1 may be set in cpus that have a HW fix
3505 		 * in a mixed cpu system. Regarding erratum 298:
3506 		 *   In a multiprocessor platform, the workaround above
3507 		 *   should be applied to all processors regardless of
3508 		 *   silicon revision when an affected processor is
3509 		 *   present.
3510 		 */
3511 
3512 		return (rdmsr(MSR_AMD_OSVW_STATUS +
3513 		    (osvwid / OSVW_ID_CNT_PER_MSR)) &
3514 		    (1ULL << (osvwid % OSVW_ID_CNT_PER_MSR)));
3515 
3516 	default:
3517 		return (-1);
3518 	}
3519 }
3520 
3521 static const char assoc_str[] = "associativity";
3522 static const char line_str[] = "line-size";
3523 static const char size_str[] = "size";
3524 
3525 static void
3526 add_cache_prop(dev_info_t *devi, const char *label, const char *type,
3527     uint32_t val)
3528 {
3529 	char buf[128];
3530 
3531 	/*
3532 	 * ndi_prop_update_int() is used because it is desirable for
3533 	 * DDI_PROP_HW_DEF and DDI_PROP_DONTSLEEP to be set.
3534 	 */
3535 	if (snprintf(buf, sizeof (buf), "%s-%s", label, type) < sizeof (buf))
3536 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, devi, buf, val);
3537 }
3538 
3539 /*
3540  * Intel-style cache/tlb description
3541  *
3542  * Standard cpuid level 2 gives a randomly ordered
3543  * selection of tags that index into a table that describes
3544  * cache and tlb properties.
3545  */
3546 
3547 static const char l1_icache_str[] = "l1-icache";
3548 static const char l1_dcache_str[] = "l1-dcache";
3549 static const char l2_cache_str[] = "l2-cache";
3550 static const char l3_cache_str[] = "l3-cache";
3551 static const char itlb4k_str[] = "itlb-4K";
3552 static const char dtlb4k_str[] = "dtlb-4K";
3553 static const char itlb2M_str[] = "itlb-2M";
3554 static const char itlb4M_str[] = "itlb-4M";
3555 static const char dtlb4M_str[] = "dtlb-4M";
3556 static const char dtlb24_str[] = "dtlb0-2M-4M";
3557 static const char itlb424_str[] = "itlb-4K-2M-4M";
3558 static const char itlb24_str[] = "itlb-2M-4M";
3559 static const char dtlb44_str[] = "dtlb-4K-4M";
3560 static const char sl1_dcache_str[] = "sectored-l1-dcache";
3561 static const char sl2_cache_str[] = "sectored-l2-cache";
3562 static const char itrace_str[] = "itrace-cache";
3563 static const char sl3_cache_str[] = "sectored-l3-cache";
3564 static const char sh_l2_tlb4k_str[] = "shared-l2-tlb-4k";
3565 
3566 static const struct cachetab {
3567 	uint8_t 	ct_code;
3568 	uint8_t		ct_assoc;
3569 	uint16_t 	ct_line_size;
3570 	size_t		ct_size;
3571 	const char	*ct_label;
3572 } intel_ctab[] = {
3573 	/*
3574 	 * maintain descending order!
3575 	 *
3576 	 * Codes ignored - Reason
3577 	 * ----------------------
3578 	 * 40H - intel_cpuid_4_cache_info() disambiguates l2/l3 cache
3579 	 * f0H/f1H - Currently we do not interpret prefetch size by design
3580 	 */
3581 	{ 0xe4, 16, 64, 8*1024*1024, l3_cache_str},
3582 	{ 0xe3, 16, 64, 4*1024*1024, l3_cache_str},
3583 	{ 0xe2, 16, 64, 2*1024*1024, l3_cache_str},
3584 	{ 0xde, 12, 64, 6*1024*1024, l3_cache_str},
3585 	{ 0xdd, 12, 64, 3*1024*1024, l3_cache_str},
3586 	{ 0xdc, 12, 64, ((1*1024*1024)+(512*1024)), l3_cache_str},
3587 	{ 0xd8, 8, 64, 4*1024*1024, l3_cache_str},
3588 	{ 0xd7, 8, 64, 2*1024*1024, l3_cache_str},
3589 	{ 0xd6, 8, 64, 1*1024*1024, l3_cache_str},
3590 	{ 0xd2, 4, 64, 2*1024*1024, l3_cache_str},
3591 	{ 0xd1, 4, 64, 1*1024*1024, l3_cache_str},
3592 	{ 0xd0, 4, 64, 512*1024, l3_cache_str},
3593 	{ 0xca, 4, 0, 512, sh_l2_tlb4k_str},
3594 	{ 0xc0, 4, 0, 8, dtlb44_str },
3595 	{ 0xba, 4, 0, 64, dtlb4k_str },
3596 	{ 0xb4, 4, 0, 256, dtlb4k_str },
3597 	{ 0xb3, 4, 0, 128, dtlb4k_str },
3598 	{ 0xb2, 4, 0, 64, itlb4k_str },
3599 	{ 0xb0, 4, 0, 128, itlb4k_str },
3600 	{ 0x87, 8, 64, 1024*1024, l2_cache_str},
3601 	{ 0x86, 4, 64, 512*1024, l2_cache_str},
3602 	{ 0x85, 8, 32, 2*1024*1024, l2_cache_str},
3603 	{ 0x84, 8, 32, 1024*1024, l2_cache_str},
3604 	{ 0x83, 8, 32, 512*1024, l2_cache_str},
3605 	{ 0x82, 8, 32, 256*1024, l2_cache_str},
3606 	{ 0x80, 8, 64, 512*1024, l2_cache_str},
3607 	{ 0x7f, 2, 64, 512*1024, l2_cache_str},
3608 	{ 0x7d, 8, 64, 2*1024*1024, sl2_cache_str},
3609 	{ 0x7c, 8, 64, 1024*1024, sl2_cache_str},
3610 	{ 0x7b, 8, 64, 512*1024, sl2_cache_str},
3611 	{ 0x7a, 8, 64, 256*1024, sl2_cache_str},
3612 	{ 0x79, 8, 64, 128*1024, sl2_cache_str},
3613 	{ 0x78, 8, 64, 1024*1024, l2_cache_str},
3614 	{ 0x73, 8, 0, 64*1024, itrace_str},
3615 	{ 0x72, 8, 0, 32*1024, itrace_str},
3616 	{ 0x71, 8, 0, 16*1024, itrace_str},
3617 	{ 0x70, 8, 0, 12*1024, itrace_str},
3618 	{ 0x68, 4, 64, 32*1024, sl1_dcache_str},
3619 	{ 0x67, 4, 64, 16*1024, sl1_dcache_str},
3620 	{ 0x66, 4, 64, 8*1024, sl1_dcache_str},
3621 	{ 0x60, 8, 64, 16*1024, sl1_dcache_str},
3622 	{ 0x5d, 0, 0, 256, dtlb44_str},
3623 	{ 0x5c, 0, 0, 128, dtlb44_str},
3624 	{ 0x5b, 0, 0, 64, dtlb44_str},
3625 	{ 0x5a, 4, 0, 32, dtlb24_str},
3626 	{ 0x59, 0, 0, 16, dtlb4k_str},
3627 	{ 0x57, 4, 0, 16, dtlb4k_str},
3628 	{ 0x56, 4, 0, 16, dtlb4M_str},
3629 	{ 0x55, 0, 0, 7, itlb24_str},
3630 	{ 0x52, 0, 0, 256, itlb424_str},
3631 	{ 0x51, 0, 0, 128, itlb424_str},
3632 	{ 0x50, 0, 0, 64, itlb424_str},
3633 	{ 0x4f, 0, 0, 32, itlb4k_str},
3634 	{ 0x4e, 24, 64, 6*1024*1024, l2_cache_str},
3635 	{ 0x4d, 16, 64, 16*1024*1024, l3_cache_str},
3636 	{ 0x4c, 12, 64, 12*1024*1024, l3_cache_str},
3637 	{ 0x4b, 16, 64, 8*1024*1024, l3_cache_str},
3638 	{ 0x4a, 12, 64, 6*1024*1024, l3_cache_str},
3639 	{ 0x49, 16, 64, 4*1024*1024, l3_cache_str},
3640 	{ 0x48, 12, 64, 3*1024*1024, l2_cache_str},
3641 	{ 0x47, 8, 64, 8*1024*1024, l3_cache_str},
3642 	{ 0x46, 4, 64, 4*1024*1024, l3_cache_str},
3643 	{ 0x45, 4, 32, 2*1024*1024, l2_cache_str},
3644 	{ 0x44, 4, 32, 1024*1024, l2_cache_str},
3645 	{ 0x43, 4, 32, 512*1024, l2_cache_str},
3646 	{ 0x42, 4, 32, 256*1024, l2_cache_str},
3647 	{ 0x41, 4, 32, 128*1024, l2_cache_str},
3648 	{ 0x3e, 4, 64, 512*1024, sl2_cache_str},
3649 	{ 0x3d, 6, 64, 384*1024, sl2_cache_str},
3650 	{ 0x3c, 4, 64, 256*1024, sl2_cache_str},
3651 	{ 0x3b, 2, 64, 128*1024, sl2_cache_str},
3652 	{ 0x3a, 6, 64, 192*1024, sl2_cache_str},
3653 	{ 0x39, 4, 64, 128*1024, sl2_cache_str},
3654 	{ 0x30, 8, 64, 32*1024, l1_icache_str},
3655 	{ 0x2c, 8, 64, 32*1024, l1_dcache_str},
3656 	{ 0x29, 8, 64, 4096*1024, sl3_cache_str},
3657 	{ 0x25, 8, 64, 2048*1024, sl3_cache_str},
3658 	{ 0x23, 8, 64, 1024*1024, sl3_cache_str},
3659 	{ 0x22, 4, 64, 512*1024, sl3_cache_str},
3660 	{ 0x0e, 6, 64, 24*1024, l1_dcache_str},
3661 	{ 0x0d, 4, 32, 16*1024, l1_dcache_str},
3662 	{ 0x0c, 4, 32, 16*1024, l1_dcache_str},
3663 	{ 0x0b, 4, 0, 4, itlb4M_str},
3664 	{ 0x0a, 2, 32, 8*1024, l1_dcache_str},
3665 	{ 0x08, 4, 32, 16*1024, l1_icache_str},
3666 	{ 0x06, 4, 32, 8*1024, l1_icache_str},
3667 	{ 0x05, 4, 0, 32, dtlb4M_str},
3668 	{ 0x04, 4, 0, 8, dtlb4M_str},
3669 	{ 0x03, 4, 0, 64, dtlb4k_str},
3670 	{ 0x02, 4, 0, 2, itlb4M_str},
3671 	{ 0x01, 4, 0, 32, itlb4k_str},
3672 	{ 0 }
3673 };
3674 
3675 static const struct cachetab cyrix_ctab[] = {
3676 	{ 0x70, 4, 0, 32, "tlb-4K" },
3677 	{ 0x80, 4, 16, 16*1024, "l1-cache" },
3678 	{ 0 }
3679 };
3680 
3681 /*
3682  * Search a cache table for a matching entry
3683  */
3684 static const struct cachetab *
3685 find_cacheent(const struct cachetab *ct, uint_t code)
3686 {
3687 	if (code != 0) {
3688 		for (; ct->ct_code != 0; ct++)
3689 			if (ct->ct_code <= code)
3690 				break;
3691 		if (ct->ct_code == code)
3692 			return (ct);
3693 	}
3694 	return (NULL);
3695 }
3696 
3697 /*
3698  * Populate cachetab entry with L2 or L3 cache-information using
3699  * cpuid function 4. This function is called from intel_walk_cacheinfo()
3700  * when descriptor 0x49 is encountered. It returns 0 if no such cache
3701  * information is found.
3702  */
3703 static int
3704 intel_cpuid_4_cache_info(struct cachetab *ct, struct cpuid_info *cpi)
3705 {
3706 	uint32_t level, i;
3707 	int ret = 0;
3708 
3709 	for (i = 0; i < cpi->cpi_std_4_size; i++) {
3710 		level = CPI_CACHE_LVL(cpi->cpi_std_4[i]);
3711 
3712 		if (level == 2 || level == 3) {
3713 			ct->ct_assoc = CPI_CACHE_WAYS(cpi->cpi_std_4[i]) + 1;
3714 			ct->ct_line_size =
3715 			    CPI_CACHE_COH_LN_SZ(cpi->cpi_std_4[i]) + 1;
3716 			ct->ct_size = ct->ct_assoc *
3717 			    (CPI_CACHE_PARTS(cpi->cpi_std_4[i]) + 1) *
3718 			    ct->ct_line_size *
3719 			    (cpi->cpi_std_4[i]->cp_ecx + 1);
3720 
3721 			if (level == 2) {
3722 				ct->ct_label = l2_cache_str;
3723 			} else if (level == 3) {
3724 				ct->ct_label = l3_cache_str;
3725 			}
3726 			ret = 1;
3727 		}
3728 	}
3729 
3730 	return (ret);
3731 }
3732 
3733 /*
3734  * Walk the cacheinfo descriptor, applying 'func' to every valid element
3735  * The walk is terminated if the walker returns non-zero.
3736  */
3737 static void
3738 intel_walk_cacheinfo(struct cpuid_info *cpi,
3739     void *arg, int (*func)(void *, const struct cachetab *))
3740 {
3741 	const struct cachetab *ct;
3742 	struct cachetab des_49_ct, des_b1_ct;
3743 	uint8_t *dp;
3744 	int i;
3745 
3746 	if ((dp = cpi->cpi_cacheinfo) == NULL)
3747 		return;
3748 	for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
3749 		/*
3750 		 * For overloaded descriptor 0x49 we use cpuid function 4
3751 		 * if supported by the current processor, to create
3752 		 * cache information.
3753 		 * For overloaded descriptor 0xb1 we use X86_PAE flag
3754 		 * to disambiguate the cache information.
3755 		 */
3756 		if (*dp == 0x49 && cpi->cpi_maxeax >= 0x4 &&
3757 		    intel_cpuid_4_cache_info(&des_49_ct, cpi) == 1) {
3758 				ct = &des_49_ct;
3759 		} else if (*dp == 0xb1) {
3760 			des_b1_ct.ct_code = 0xb1;
3761 			des_b1_ct.ct_assoc = 4;
3762 			des_b1_ct.ct_line_size = 0;
3763 			if (is_x86_feature(x86_featureset, X86FSET_PAE)) {
3764 				des_b1_ct.ct_size = 8;
3765 				des_b1_ct.ct_label = itlb2M_str;
3766 			} else {
3767 				des_b1_ct.ct_size = 4;
3768 				des_b1_ct.ct_label = itlb4M_str;
3769 			}
3770 			ct = &des_b1_ct;
3771 		} else {
3772 			if ((ct = find_cacheent(intel_ctab, *dp)) == NULL) {
3773 				continue;
3774 			}
3775 		}
3776 
3777 		if (func(arg, ct) != 0) {
3778 			break;
3779 		}
3780 	}
3781 }
3782 
3783 /*
3784  * (Like the Intel one, except for Cyrix CPUs)
3785  */
3786 static void
3787 cyrix_walk_cacheinfo(struct cpuid_info *cpi,
3788     void *arg, int (*func)(void *, const struct cachetab *))
3789 {
3790 	const struct cachetab *ct;
3791 	uint8_t *dp;
3792 	int i;
3793 
3794 	if ((dp = cpi->cpi_cacheinfo) == NULL)
3795 		return;
3796 	for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
3797 		/*
3798 		 * Search Cyrix-specific descriptor table first ..
3799 		 */
3800 		if ((ct = find_cacheent(cyrix_ctab, *dp)) != NULL) {
3801 			if (func(arg, ct) != 0)
3802 				break;
3803 			continue;
3804 		}
3805 		/*
3806 		 * .. else fall back to the Intel one
3807 		 */
3808 		if ((ct = find_cacheent(intel_ctab, *dp)) != NULL) {
3809 			if (func(arg, ct) != 0)
3810 				break;
3811 			continue;
3812 		}
3813 	}
3814 }
3815 
3816 /*
3817  * A cacheinfo walker that adds associativity, line-size, and size properties
3818  * to the devinfo node it is passed as an argument.
3819  */
3820 static int
3821 add_cacheent_props(void *arg, const struct cachetab *ct)
3822 {
3823 	dev_info_t *devi = arg;
3824 
3825 	add_cache_prop(devi, ct->ct_label, assoc_str, ct->ct_assoc);
3826 	if (ct->ct_line_size != 0)
3827 		add_cache_prop(devi, ct->ct_label, line_str,
3828 		    ct->ct_line_size);
3829 	add_cache_prop(devi, ct->ct_label, size_str, ct->ct_size);
3830 	return (0);
3831 }
3832 
3833 
3834 static const char fully_assoc[] = "fully-associative?";
3835 
3836 /*
3837  * AMD style cache/tlb description
3838  *
3839  * Extended functions 5 and 6 directly describe properties of
3840  * tlbs and various cache levels.
3841  */
3842 static void
3843 add_amd_assoc(dev_info_t *devi, const char *label, uint_t assoc)
3844 {
3845 	switch (assoc) {
3846 	case 0:	/* reserved; ignore */
3847 		break;
3848 	default:
3849 		add_cache_prop(devi, label, assoc_str, assoc);
3850 		break;
3851 	case 0xff:
3852 		add_cache_prop(devi, label, fully_assoc, 1);
3853 		break;
3854 	}
3855 }
3856 
3857 static void
3858 add_amd_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size)
3859 {
3860 	if (size == 0)
3861 		return;
3862 	add_cache_prop(devi, label, size_str, size);
3863 	add_amd_assoc(devi, label, assoc);
3864 }
3865 
3866 static void
3867 add_amd_cache(dev_info_t *devi, const char *label,
3868     uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size)
3869 {
3870 	if (size == 0 || line_size == 0)
3871 		return;
3872 	add_amd_assoc(devi, label, assoc);
3873 	/*
3874 	 * Most AMD parts have a sectored cache. Multiple cache lines are
3875 	 * associated with each tag. A sector consists of all cache lines
3876 	 * associated with a tag. For example, the AMD K6-III has a sector
3877 	 * size of 2 cache lines per tag.
3878 	 */
3879 	if (lines_per_tag != 0)
3880 		add_cache_prop(devi, label, "lines-per-tag", lines_per_tag);
3881 	add_cache_prop(devi, label, line_str, line_size);
3882 	add_cache_prop(devi, label, size_str, size * 1024);
3883 }
3884 
3885 static void
3886 add_amd_l2_assoc(dev_info_t *devi, const char *label, uint_t assoc)
3887 {
3888 	switch (assoc) {
3889 	case 0:	/* off */
3890 		break;
3891 	case 1:
3892 	case 2:
3893 	case 4:
3894 		add_cache_prop(devi, label, assoc_str, assoc);
3895 		break;
3896 	case 6:
3897 		add_cache_prop(devi, label, assoc_str, 8);
3898 		break;
3899 	case 8:
3900 		add_cache_prop(devi, label, assoc_str, 16);
3901 		break;
3902 	case 0xf:
3903 		add_cache_prop(devi, label, fully_assoc, 1);
3904 		break;
3905 	default: /* reserved; ignore */
3906 		break;
3907 	}
3908 }
3909 
3910 static void
3911 add_amd_l2_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size)
3912 {
3913 	if (size == 0 || assoc == 0)
3914 		return;
3915 	add_amd_l2_assoc(devi, label, assoc);
3916 	add_cache_prop(devi, label, size_str, size);
3917 }
3918 
3919 static void
3920 add_amd_l2_cache(dev_info_t *devi, const char *label,
3921     uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size)
3922 {
3923 	if (size == 0 || assoc == 0 || line_size == 0)
3924 		return;
3925 	add_amd_l2_assoc(devi, label, assoc);
3926 	if (lines_per_tag != 0)
3927 		add_cache_prop(devi, label, "lines-per-tag", lines_per_tag);
3928 	add_cache_prop(devi, label, line_str, line_size);
3929 	add_cache_prop(devi, label, size_str, size * 1024);
3930 }
3931 
3932 static void
3933 amd_cache_info(struct cpuid_info *cpi, dev_info_t *devi)
3934 {
3935 	struct cpuid_regs *cp;
3936 
3937 	if (cpi->cpi_xmaxeax < 0x80000005)
3938 		return;
3939 	cp = &cpi->cpi_extd[5];
3940 
3941 	/*
3942 	 * 4M/2M L1 TLB configuration
3943 	 *
3944 	 * We report the size for 2M pages because AMD uses two
3945 	 * TLB entries for one 4M page.
3946 	 */
3947 	add_amd_tlb(devi, "dtlb-2M",
3948 	    BITX(cp->cp_eax, 31, 24), BITX(cp->cp_eax, 23, 16));
3949 	add_amd_tlb(devi, "itlb-2M",
3950 	    BITX(cp->cp_eax, 15, 8), BITX(cp->cp_eax, 7, 0));
3951 
3952 	/*
3953 	 * 4K L1 TLB configuration
3954 	 */
3955 
3956 	switch (cpi->cpi_vendor) {
3957 		uint_t nentries;
3958 	case X86_VENDOR_TM:
3959 		if (cpi->cpi_family >= 5) {
3960 			/*
3961 			 * Crusoe processors have 256 TLB entries, but
3962 			 * cpuid data format constrains them to only
3963 			 * reporting 255 of them.
3964 			 */
3965 			if ((nentries = BITX(cp->cp_ebx, 23, 16)) == 255)
3966 				nentries = 256;
3967 			/*
3968 			 * Crusoe processors also have a unified TLB
3969 			 */
3970 			add_amd_tlb(devi, "tlb-4K", BITX(cp->cp_ebx, 31, 24),
3971 			    nentries);
3972 			break;
3973 		}
3974 		/*FALLTHROUGH*/
3975 	default:
3976 		add_amd_tlb(devi, itlb4k_str,
3977 		    BITX(cp->cp_ebx, 31, 24), BITX(cp->cp_ebx, 23, 16));
3978 		add_amd_tlb(devi, dtlb4k_str,
3979 		    BITX(cp->cp_ebx, 15, 8), BITX(cp->cp_ebx, 7, 0));
3980 		break;
3981 	}
3982 
3983 	/*
3984 	 * data L1 cache configuration
3985 	 */
3986 
3987 	add_amd_cache(devi, l1_dcache_str,
3988 	    BITX(cp->cp_ecx, 31, 24), BITX(cp->cp_ecx, 23, 16),
3989 	    BITX(cp->cp_ecx, 15, 8), BITX(cp->cp_ecx, 7, 0));
3990 
3991 	/*
3992 	 * code L1 cache configuration
3993 	 */
3994 
3995 	add_amd_cache(devi, l1_icache_str,
3996 	    BITX(cp->cp_edx, 31, 24), BITX(cp->cp_edx, 23, 16),
3997 	    BITX(cp->cp_edx, 15, 8), BITX(cp->cp_edx, 7, 0));
3998 
3999 	if (cpi->cpi_xmaxeax < 0x80000006)
4000 		return;
4001 	cp = &cpi->cpi_extd[6];
4002 
4003 	/* Check for a unified L2 TLB for large pages */
4004 
4005 	if (BITX(cp->cp_eax, 31, 16) == 0)
4006 		add_amd_l2_tlb(devi, "l2-tlb-2M",
4007 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
4008 	else {
4009 		add_amd_l2_tlb(devi, "l2-dtlb-2M",
4010 		    BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16));
4011 		add_amd_l2_tlb(devi, "l2-itlb-2M",
4012 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
4013 	}
4014 
4015 	/* Check for a unified L2 TLB for 4K pages */
4016 
4017 	if (BITX(cp->cp_ebx, 31, 16) == 0) {
4018 		add_amd_l2_tlb(devi, "l2-tlb-4K",
4019 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
4020 	} else {
4021 		add_amd_l2_tlb(devi, "l2-dtlb-4K",
4022 		    BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16));
4023 		add_amd_l2_tlb(devi, "l2-itlb-4K",
4024 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
4025 	}
4026 
4027 	add_amd_l2_cache(devi, l2_cache_str,
4028 	    BITX(cp->cp_ecx, 31, 16), BITX(cp->cp_ecx, 15, 12),
4029 	    BITX(cp->cp_ecx, 11, 8), BITX(cp->cp_ecx, 7, 0));
4030 }
4031 
4032 /*
4033  * There are two basic ways that the x86 world describes it cache
4034  * and tlb architecture - Intel's way and AMD's way.
4035  *
4036  * Return which flavor of cache architecture we should use
4037  */
4038 static int
4039 x86_which_cacheinfo(struct cpuid_info *cpi)
4040 {
4041 	switch (cpi->cpi_vendor) {
4042 	case X86_VENDOR_Intel:
4043 		if (cpi->cpi_maxeax >= 2)
4044 			return (X86_VENDOR_Intel);
4045 		break;
4046 	case X86_VENDOR_AMD:
4047 		/*
4048 		 * The K5 model 1 was the first part from AMD that reported
4049 		 * cache sizes via extended cpuid functions.
4050 		 */
4051 		if (cpi->cpi_family > 5 ||
4052 		    (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
4053 			return (X86_VENDOR_AMD);
4054 		break;
4055 	case X86_VENDOR_TM:
4056 		if (cpi->cpi_family >= 5)
4057 			return (X86_VENDOR_AMD);
4058 		/*FALLTHROUGH*/
4059 	default:
4060 		/*
4061 		 * If they have extended CPU data for 0x80000005
4062 		 * then we assume they have AMD-format cache
4063 		 * information.
4064 		 *
4065 		 * If not, and the vendor happens to be Cyrix,
4066 		 * then try our-Cyrix specific handler.
4067 		 *
4068 		 * If we're not Cyrix, then assume we're using Intel's
4069 		 * table-driven format instead.
4070 		 */
4071 		if (cpi->cpi_xmaxeax >= 0x80000005)
4072 			return (X86_VENDOR_AMD);
4073 		else if (cpi->cpi_vendor == X86_VENDOR_Cyrix)
4074 			return (X86_VENDOR_Cyrix);
4075 		else if (cpi->cpi_maxeax >= 2)
4076 			return (X86_VENDOR_Intel);
4077 		break;
4078 	}
4079 	return (-1);
4080 }
4081 
4082 void
4083 cpuid_set_cpu_properties(void *dip, processorid_t cpu_id,
4084     struct cpuid_info *cpi)
4085 {
4086 	dev_info_t *cpu_devi;
4087 	int create;
4088 
4089 	cpu_devi = (dev_info_t *)dip;
4090 
4091 	/* device_type */
4092 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
4093 	    "device_type", "cpu");
4094 
4095 	/* reg */
4096 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4097 	    "reg", cpu_id);
4098 
4099 	/* cpu-mhz, and clock-frequency */
4100 	if (cpu_freq > 0) {
4101 		long long mul;
4102 
4103 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4104 		    "cpu-mhz", cpu_freq);
4105 		if ((mul = cpu_freq * 1000000LL) <= INT_MAX)
4106 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4107 			    "clock-frequency", (int)mul);
4108 	}
4109 
4110 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID)) {
4111 		return;
4112 	}
4113 
4114 	/* vendor-id */
4115 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
4116 	    "vendor-id", cpi->cpi_vendorstr);
4117 
4118 	if (cpi->cpi_maxeax == 0) {
4119 		return;
4120 	}
4121 
4122 	/*
4123 	 * family, model, and step
4124 	 */
4125 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4126 	    "family", CPI_FAMILY(cpi));
4127 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4128 	    "cpu-model", CPI_MODEL(cpi));
4129 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4130 	    "stepping-id", CPI_STEP(cpi));
4131 
4132 	/* type */
4133 	switch (cpi->cpi_vendor) {
4134 	case X86_VENDOR_Intel:
4135 		create = 1;
4136 		break;
4137 	default:
4138 		create = 0;
4139 		break;
4140 	}
4141 	if (create)
4142 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4143 		    "type", CPI_TYPE(cpi));
4144 
4145 	/* ext-family */
4146 	switch (cpi->cpi_vendor) {
4147 	case X86_VENDOR_Intel:
4148 	case X86_VENDOR_AMD:
4149 		create = cpi->cpi_family >= 0xf;
4150 		break;
4151 	default:
4152 		create = 0;
4153 		break;
4154 	}
4155 	if (create)
4156 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4157 		    "ext-family", CPI_FAMILY_XTD(cpi));
4158 
4159 	/* ext-model */
4160 	switch (cpi->cpi_vendor) {
4161 	case X86_VENDOR_Intel:
4162 		create = IS_EXTENDED_MODEL_INTEL(cpi);
4163 		break;
4164 	case X86_VENDOR_AMD:
4165 		create = CPI_FAMILY(cpi) == 0xf;
4166 		break;
4167 	default:
4168 		create = 0;
4169 		break;
4170 	}
4171 	if (create)
4172 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4173 		    "ext-model", CPI_MODEL_XTD(cpi));
4174 
4175 	/* generation */
4176 	switch (cpi->cpi_vendor) {
4177 	case X86_VENDOR_AMD:
4178 		/*
4179 		 * AMD K5 model 1 was the first part to support this
4180 		 */
4181 		create = cpi->cpi_xmaxeax >= 0x80000001;
4182 		break;
4183 	default:
4184 		create = 0;
4185 		break;
4186 	}
4187 	if (create)
4188 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4189 		    "generation", BITX((cpi)->cpi_extd[1].cp_eax, 11, 8));
4190 
4191 	/* brand-id */
4192 	switch (cpi->cpi_vendor) {
4193 	case X86_VENDOR_Intel:
4194 		/*
4195 		 * brand id first appeared on Pentium III Xeon model 8,
4196 		 * and Celeron model 8 processors and Opteron
4197 		 */
4198 		create = cpi->cpi_family > 6 ||
4199 		    (cpi->cpi_family == 6 && cpi->cpi_model >= 8);
4200 		break;
4201 	case X86_VENDOR_AMD:
4202 		create = cpi->cpi_family >= 0xf;
4203 		break;
4204 	default:
4205 		create = 0;
4206 		break;
4207 	}
4208 	if (create && cpi->cpi_brandid != 0) {
4209 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4210 		    "brand-id", cpi->cpi_brandid);
4211 	}
4212 
4213 	/* chunks, and apic-id */
4214 	switch (cpi->cpi_vendor) {
4215 		/*
4216 		 * first available on Pentium IV and Opteron (K8)
4217 		 */
4218 	case X86_VENDOR_Intel:
4219 		create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf;
4220 		break;
4221 	case X86_VENDOR_AMD:
4222 		create = cpi->cpi_family >= 0xf;
4223 		break;
4224 	default:
4225 		create = 0;
4226 		break;
4227 	}
4228 	if (create) {
4229 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4230 		    "chunks", CPI_CHUNKS(cpi));
4231 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4232 		    "apic-id", cpi->cpi_apicid);
4233 		if (cpi->cpi_chipid >= 0) {
4234 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4235 			    "chip#", cpi->cpi_chipid);
4236 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4237 			    "clog#", cpi->cpi_clogid);
4238 		}
4239 	}
4240 
4241 	/* cpuid-features */
4242 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4243 	    "cpuid-features", CPI_FEATURES_EDX(cpi));
4244 
4245 
4246 	/* cpuid-features-ecx */
4247 	switch (cpi->cpi_vendor) {
4248 	case X86_VENDOR_Intel:
4249 		create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf;
4250 		break;
4251 	case X86_VENDOR_AMD:
4252 		create = cpi->cpi_family >= 0xf;
4253 		break;
4254 	default:
4255 		create = 0;
4256 		break;
4257 	}
4258 	if (create)
4259 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4260 		    "cpuid-features-ecx", CPI_FEATURES_ECX(cpi));
4261 
4262 	/* ext-cpuid-features */
4263 	switch (cpi->cpi_vendor) {
4264 	case X86_VENDOR_Intel:
4265 	case X86_VENDOR_AMD:
4266 	case X86_VENDOR_Cyrix:
4267 	case X86_VENDOR_TM:
4268 	case X86_VENDOR_Centaur:
4269 		create = cpi->cpi_xmaxeax >= 0x80000001;
4270 		break;
4271 	default:
4272 		create = 0;
4273 		break;
4274 	}
4275 	if (create) {
4276 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4277 		    "ext-cpuid-features", CPI_FEATURES_XTD_EDX(cpi));
4278 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4279 		    "ext-cpuid-features-ecx", CPI_FEATURES_XTD_ECX(cpi));
4280 	}
4281 
4282 	/*
4283 	 * Brand String first appeared in Intel Pentium IV, AMD K5
4284 	 * model 1, and Cyrix GXm.  On earlier models we try and
4285 	 * simulate something similar .. so this string should always
4286 	 * same -something- about the processor, however lame.
4287 	 */
4288 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
4289 	    "brand-string", cpi->cpi_brandstr);
4290 
4291 	/*
4292 	 * Finally, cache and tlb information
4293 	 */
4294 	switch (x86_which_cacheinfo(cpi)) {
4295 	case X86_VENDOR_Intel:
4296 		intel_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
4297 		break;
4298 	case X86_VENDOR_Cyrix:
4299 		cyrix_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
4300 		break;
4301 	case X86_VENDOR_AMD:
4302 		amd_cache_info(cpi, cpu_devi);
4303 		break;
4304 	default:
4305 		break;
4306 	}
4307 }
4308 
4309 struct l2info {
4310 	int *l2i_csz;
4311 	int *l2i_lsz;
4312 	int *l2i_assoc;
4313 	int l2i_ret;
4314 };
4315 
4316 /*
4317  * A cacheinfo walker that fetches the size, line-size and associativity
4318  * of the L2 cache
4319  */
4320 static int
4321 intel_l2cinfo(void *arg, const struct cachetab *ct)
4322 {
4323 	struct l2info *l2i = arg;
4324 	int *ip;
4325 
4326 	if (ct->ct_label != l2_cache_str &&
4327 	    ct->ct_label != sl2_cache_str)
4328 		return (0);	/* not an L2 -- keep walking */
4329 
4330 	if ((ip = l2i->l2i_csz) != NULL)
4331 		*ip = ct->ct_size;
4332 	if ((ip = l2i->l2i_lsz) != NULL)
4333 		*ip = ct->ct_line_size;
4334 	if ((ip = l2i->l2i_assoc) != NULL)
4335 		*ip = ct->ct_assoc;
4336 	l2i->l2i_ret = ct->ct_size;
4337 	return (1);		/* was an L2 -- terminate walk */
4338 }
4339 
4340 /*
4341  * AMD L2/L3 Cache and TLB Associativity Field Definition:
4342  *
4343  *	Unlike the associativity for the L1 cache and tlb where the 8 bit
4344  *	value is the associativity, the associativity for the L2 cache and
4345  *	tlb is encoded in the following table. The 4 bit L2 value serves as
4346  *	an index into the amd_afd[] array to determine the associativity.
4347  *	-1 is undefined. 0 is fully associative.
4348  */
4349 
4350 static int amd_afd[] =
4351 	{-1, 1, 2, -1, 4, -1, 8, -1, 16, -1, 32, 48, 64, 96, 128, 0};
4352 
4353 static void
4354 amd_l2cacheinfo(struct cpuid_info *cpi, struct l2info *l2i)
4355 {
4356 	struct cpuid_regs *cp;
4357 	uint_t size, assoc;
4358 	int i;
4359 	int *ip;
4360 
4361 	if (cpi->cpi_xmaxeax < 0x80000006)
4362 		return;
4363 	cp = &cpi->cpi_extd[6];
4364 
4365 	if ((i = BITX(cp->cp_ecx, 15, 12)) != 0 &&
4366 	    (size = BITX(cp->cp_ecx, 31, 16)) != 0) {
4367 		uint_t cachesz = size * 1024;
4368 		assoc = amd_afd[i];
4369 
4370 		ASSERT(assoc != -1);
4371 
4372 		if ((ip = l2i->l2i_csz) != NULL)
4373 			*ip = cachesz;
4374 		if ((ip = l2i->l2i_lsz) != NULL)
4375 			*ip = BITX(cp->cp_ecx, 7, 0);
4376 		if ((ip = l2i->l2i_assoc) != NULL)
4377 			*ip = assoc;
4378 		l2i->l2i_ret = cachesz;
4379 	}
4380 }
4381 
4382 int
4383 getl2cacheinfo(cpu_t *cpu, int *csz, int *lsz, int *assoc)
4384 {
4385 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
4386 	struct l2info __l2info, *l2i = &__l2info;
4387 
4388 	l2i->l2i_csz = csz;
4389 	l2i->l2i_lsz = lsz;
4390 	l2i->l2i_assoc = assoc;
4391 	l2i->l2i_ret = -1;
4392 
4393 	switch (x86_which_cacheinfo(cpi)) {
4394 	case X86_VENDOR_Intel:
4395 		intel_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
4396 		break;
4397 	case X86_VENDOR_Cyrix:
4398 		cyrix_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
4399 		break;
4400 	case X86_VENDOR_AMD:
4401 		amd_l2cacheinfo(cpi, l2i);
4402 		break;
4403 	default:
4404 		break;
4405 	}
4406 	return (l2i->l2i_ret);
4407 }
4408 
4409 #if !defined(__xpv)
4410 
4411 uint32_t *
4412 cpuid_mwait_alloc(cpu_t *cpu)
4413 {
4414 	uint32_t	*ret;
4415 	size_t		mwait_size;
4416 
4417 	ASSERT(cpuid_checkpass(CPU, 2));
4418 
4419 	mwait_size = CPU->cpu_m.mcpu_cpi->cpi_mwait.mon_max;
4420 	if (mwait_size == 0)
4421 		return (NULL);
4422 
4423 	/*
4424 	 * kmem_alloc() returns cache line size aligned data for mwait_size
4425 	 * allocations.  mwait_size is currently cache line sized.  Neither
4426 	 * of these implementation details are guarantied to be true in the
4427 	 * future.
4428 	 *
4429 	 * First try allocating mwait_size as kmem_alloc() currently returns
4430 	 * correctly aligned memory.  If kmem_alloc() does not return
4431 	 * mwait_size aligned memory, then use mwait_size ROUNDUP.
4432 	 *
4433 	 * Set cpi_mwait.buf_actual and cpi_mwait.size_actual in case we
4434 	 * decide to free this memory.
4435 	 */
4436 	ret = kmem_zalloc(mwait_size, KM_SLEEP);
4437 	if (ret == (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size)) {
4438 		cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret;
4439 		cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size;
4440 		*ret = MWAIT_RUNNING;
4441 		return (ret);
4442 	} else {
4443 		kmem_free(ret, mwait_size);
4444 		ret = kmem_zalloc(mwait_size * 2, KM_SLEEP);
4445 		cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret;
4446 		cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size * 2;
4447 		ret = (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size);
4448 		*ret = MWAIT_RUNNING;
4449 		return (ret);
4450 	}
4451 }
4452 
4453 void
4454 cpuid_mwait_free(cpu_t *cpu)
4455 {
4456 	if (cpu->cpu_m.mcpu_cpi == NULL) {
4457 		return;
4458 	}
4459 
4460 	if (cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual != NULL &&
4461 	    cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual > 0) {
4462 		kmem_free(cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual,
4463 		    cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual);
4464 	}
4465 
4466 	cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = NULL;
4467 	cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = 0;
4468 }
4469 
4470 void
4471 patch_tsc_read(int flag)
4472 {
4473 	size_t cnt;
4474 
4475 	switch (flag) {
4476 	case X86_NO_TSC:
4477 		cnt = &_no_rdtsc_end - &_no_rdtsc_start;
4478 		(void) memcpy((void *)tsc_read, (void *)&_no_rdtsc_start, cnt);
4479 		break;
4480 	case X86_HAVE_TSCP:
4481 		cnt = &_tscp_end - &_tscp_start;
4482 		(void) memcpy((void *)tsc_read, (void *)&_tscp_start, cnt);
4483 		break;
4484 	case X86_TSC_MFENCE:
4485 		cnt = &_tsc_mfence_end - &_tsc_mfence_start;
4486 		(void) memcpy((void *)tsc_read,
4487 		    (void *)&_tsc_mfence_start, cnt);
4488 		break;
4489 	case X86_TSC_LFENCE:
4490 		cnt = &_tsc_lfence_end - &_tsc_lfence_start;
4491 		(void) memcpy((void *)tsc_read,
4492 		    (void *)&_tsc_lfence_start, cnt);
4493 		break;
4494 	default:
4495 		break;
4496 	}
4497 }
4498 
4499 int
4500 cpuid_deep_cstates_supported(void)
4501 {
4502 	struct cpuid_info *cpi;
4503 	struct cpuid_regs regs;
4504 
4505 	ASSERT(cpuid_checkpass(CPU, 1));
4506 
4507 	cpi = CPU->cpu_m.mcpu_cpi;
4508 
4509 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID))
4510 		return (0);
4511 
4512 	switch (cpi->cpi_vendor) {
4513 	case X86_VENDOR_Intel:
4514 		if (cpi->cpi_xmaxeax < 0x80000007)
4515 			return (0);
4516 
4517 		/*
4518 		 * TSC run at a constant rate in all ACPI C-states?
4519 		 */
4520 		regs.cp_eax = 0x80000007;
4521 		(void) __cpuid_insn(&regs);
4522 		return (regs.cp_edx & CPUID_TSC_CSTATE_INVARIANCE);
4523 
4524 	default:
4525 		return (0);
4526 	}
4527 }
4528 
4529 #endif	/* !__xpv */
4530 
4531 void
4532 post_startup_cpu_fixups(void)
4533 {
4534 #ifndef __xpv
4535 	/*
4536 	 * Some AMD processors support C1E state. Entering this state will
4537 	 * cause the local APIC timer to stop, which we can't deal with at
4538 	 * this time.
4539 	 */
4540 	if (cpuid_getvendor(CPU) == X86_VENDOR_AMD) {
4541 		on_trap_data_t otd;
4542 		uint64_t reg;
4543 
4544 		if (!on_trap(&otd, OT_DATA_ACCESS)) {
4545 			reg = rdmsr(MSR_AMD_INT_PENDING_CMP_HALT);
4546 			/* Disable C1E state if it is enabled by BIOS */
4547 			if ((reg >> AMD_ACTONCMPHALT_SHIFT) &
4548 			    AMD_ACTONCMPHALT_MASK) {
4549 				reg &= ~(AMD_ACTONCMPHALT_MASK <<
4550 				    AMD_ACTONCMPHALT_SHIFT);
4551 				wrmsr(MSR_AMD_INT_PENDING_CMP_HALT, reg);
4552 			}
4553 		}
4554 		no_trap();
4555 	}
4556 #endif	/* !__xpv */
4557 }
4558 
4559 /*
4560  * Setup necessary registers to enable XSAVE feature on this processor.
4561  * This function needs to be called early enough, so that no xsave/xrstor
4562  * ops will execute on the processor before the MSRs are properly set up.
4563  *
4564  * Current implementation has the following assumption:
4565  * - cpuid_pass1() is done, so that X86 features are known.
4566  * - fpu_probe() is done, so that fp_save_mech is chosen.
4567  */
4568 void
4569 xsave_setup_msr(cpu_t *cpu)
4570 {
4571 	ASSERT(fp_save_mech == FP_XSAVE);
4572 	ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
4573 
4574 	/* Enable OSXSAVE in CR4. */
4575 	setcr4(getcr4() | CR4_OSXSAVE);
4576 	/*
4577 	 * Update SW copy of ECX, so that /dev/cpu/self/cpuid will report
4578 	 * correct value.
4579 	 */
4580 	cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_ecx |= CPUID_INTC_ECX_OSXSAVE;
4581 	setup_xfem();
4582 }
4583 
4584 /*
4585  * Starting with the Westmere processor the local
4586  * APIC timer will continue running in all C-states,
4587  * including the deepest C-states.
4588  */
4589 int
4590 cpuid_arat_supported(void)
4591 {
4592 	struct cpuid_info *cpi;
4593 	struct cpuid_regs regs;
4594 
4595 	ASSERT(cpuid_checkpass(CPU, 1));
4596 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
4597 
4598 	cpi = CPU->cpu_m.mcpu_cpi;
4599 
4600 	switch (cpi->cpi_vendor) {
4601 	case X86_VENDOR_Intel:
4602 		/*
4603 		 * Always-running Local APIC Timer is
4604 		 * indicated by CPUID.6.EAX[2].
4605 		 */
4606 		if (cpi->cpi_maxeax >= 6) {
4607 			regs.cp_eax = 6;
4608 			(void) cpuid_insn(NULL, &regs);
4609 			return (regs.cp_eax & CPUID_CSTATE_ARAT);
4610 		} else {
4611 			return (0);
4612 		}
4613 	default:
4614 		return (0);
4615 	}
4616 }
4617 
4618 /*
4619  * Check support for Intel ENERGY_PERF_BIAS feature
4620  */
4621 int
4622 cpuid_iepb_supported(struct cpu *cp)
4623 {
4624 	struct cpuid_info *cpi = cp->cpu_m.mcpu_cpi;
4625 	struct cpuid_regs regs;
4626 
4627 	ASSERT(cpuid_checkpass(cp, 1));
4628 
4629 	if (!(is_x86_feature(x86_featureset, X86FSET_CPUID)) ||
4630 	    !(is_x86_feature(x86_featureset, X86FSET_MSR))) {
4631 		return (0);
4632 	}
4633 
4634 	/*
4635 	 * Intel ENERGY_PERF_BIAS MSR is indicated by
4636 	 * capability bit CPUID.6.ECX.3
4637 	 */
4638 	if ((cpi->cpi_vendor != X86_VENDOR_Intel) || (cpi->cpi_maxeax < 6))
4639 		return (0);
4640 
4641 	regs.cp_eax = 0x6;
4642 	(void) cpuid_insn(NULL, &regs);
4643 	return (regs.cp_ecx & CPUID_EPB_SUPPORT);
4644 }
4645 
4646 /*
4647  * Check support for TSC deadline timer
4648  *
4649  * TSC deadline timer provides a superior software programming
4650  * model over local APIC timer that eliminates "time drifts".
4651  * Instead of specifying a relative time, software specifies an
4652  * absolute time as the target at which the processor should
4653  * generate a timer event.
4654  */
4655 int
4656 cpuid_deadline_tsc_supported(void)
4657 {
4658 	struct cpuid_info *cpi = CPU->cpu_m.mcpu_cpi;
4659 	struct cpuid_regs regs;
4660 
4661 	ASSERT(cpuid_checkpass(CPU, 1));
4662 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
4663 
4664 	switch (cpi->cpi_vendor) {
4665 	case X86_VENDOR_Intel:
4666 		if (cpi->cpi_maxeax >= 1) {
4667 			regs.cp_eax = 1;
4668 			(void) cpuid_insn(NULL, &regs);
4669 			return (regs.cp_ecx & CPUID_DEADLINE_TSC);
4670 		} else {
4671 			return (0);
4672 		}
4673 	default:
4674 		return (0);
4675 	}
4676 }
4677 
4678 #if defined(__amd64) && !defined(__xpv)
4679 /*
4680  * Patch in versions of bcopy for high performance Intel Nhm processors
4681  * and later...
4682  */
4683 void
4684 patch_memops(uint_t vendor)
4685 {
4686 	size_t cnt, i;
4687 	caddr_t to, from;
4688 
4689 	if ((vendor == X86_VENDOR_Intel) &&
4690 	    is_x86_feature(x86_featureset, X86FSET_SSE4_2)) {
4691 		cnt = &bcopy_patch_end - &bcopy_patch_start;
4692 		to = &bcopy_ck_size;
4693 		from = &bcopy_patch_start;
4694 		for (i = 0; i < cnt; i++) {
4695 			*to++ = *from++;
4696 		}
4697 	}
4698 }
4699 #endif  /* __amd64 && !__xpv */
4700 
4701 /*
4702  * This function finds the number of bits to represent the number of cores per
4703  * chip and the number of strands per core for the Intel platforms.
4704  * It re-uses the x2APIC cpuid code of the cpuid_pass2().
4705  */
4706 void
4707 cpuid_get_ext_topo(uint_t vendor, uint_t *core_nbits, uint_t *strand_nbits)
4708 {
4709 	struct cpuid_regs regs;
4710 	struct cpuid_regs *cp = &regs;
4711 
4712 	if (vendor != X86_VENDOR_Intel) {
4713 		return;
4714 	}
4715 
4716 	/* if the cpuid level is 0xB, extended topo is available. */
4717 	cp->cp_eax = 0;
4718 	if (__cpuid_insn(cp) >= 0xB) {
4719 
4720 		cp->cp_eax = 0xB;
4721 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
4722 		(void) __cpuid_insn(cp);
4723 
4724 		/*
4725 		 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
4726 		 * indicates that the extended topology enumeration leaf is
4727 		 * available.
4728 		 */
4729 		if (cp->cp_ebx) {
4730 			uint_t coreid_shift = 0;
4731 			uint_t chipid_shift = 0;
4732 			uint_t i;
4733 			uint_t level;
4734 
4735 			for (i = 0; i < CPI_FNB_ECX_MAX; i++) {
4736 				cp->cp_eax = 0xB;
4737 				cp->cp_ecx = i;
4738 
4739 				(void) __cpuid_insn(cp);
4740 				level = CPI_CPU_LEVEL_TYPE(cp);
4741 
4742 				if (level == 1) {
4743 					/*
4744 					 * Thread level processor topology
4745 					 * Number of bits shift right APIC ID
4746 					 * to get the coreid.
4747 					 */
4748 					coreid_shift = BITX(cp->cp_eax, 4, 0);
4749 				} else if (level == 2) {
4750 					/*
4751 					 * Core level processor topology
4752 					 * Number of bits shift right APIC ID
4753 					 * to get the chipid.
4754 					 */
4755 					chipid_shift = BITX(cp->cp_eax, 4, 0);
4756 				}
4757 			}
4758 
4759 			if (coreid_shift > 0 && chipid_shift > coreid_shift) {
4760 				*strand_nbits = coreid_shift;
4761 				*core_nbits = chipid_shift - coreid_shift;
4762 			}
4763 		}
4764 	}
4765 }
4766