xref: /titanic_44/usr/src/uts/i86pc/os/cpuid.c (revision f971a3462face662ae8ef220a18a98354d625d54)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011 by Delphix. All rights reserved.
24  */
25 /*
26  * Copyright (c) 2010, Intel Corporation.
27  * All rights reserved.
28  */
29 /*
30  * Portions Copyright 2009 Advanced Micro Devices, Inc.
31  */
32 /*
33  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
34  */
35 /*
36  * Various routines to handle identification
37  * and classification of x86 processors.
38  */
39 
40 #include <sys/types.h>
41 #include <sys/archsystm.h>
42 #include <sys/x86_archext.h>
43 #include <sys/kmem.h>
44 #include <sys/systm.h>
45 #include <sys/cmn_err.h>
46 #include <sys/sunddi.h>
47 #include <sys/sunndi.h>
48 #include <sys/cpuvar.h>
49 #include <sys/processor.h>
50 #include <sys/sysmacros.h>
51 #include <sys/pg.h>
52 #include <sys/fp.h>
53 #include <sys/controlregs.h>
54 #include <sys/bitmap.h>
55 #include <sys/auxv_386.h>
56 #include <sys/memnode.h>
57 #include <sys/pci_cfgspace.h>
58 
59 #ifdef __xpv
60 #include <sys/hypervisor.h>
61 #else
62 #include <sys/ontrap.h>
63 #endif
64 
65 /*
66  * Pass 0 of cpuid feature analysis happens in locore. It contains special code
67  * to recognize Cyrix processors that are not cpuid-compliant, and to deal with
68  * them accordingly. For most modern processors, feature detection occurs here
69  * in pass 1.
70  *
71  * Pass 1 of cpuid feature analysis happens just at the beginning of mlsetup()
72  * for the boot CPU and does the basic analysis that the early kernel needs.
73  * x86_featureset is set based on the return value of cpuid_pass1() of the boot
74  * CPU.
75  *
76  * Pass 1 includes:
77  *
78  *	o Determining vendor/model/family/stepping and setting x86_type and
79  *	  x86_vendor accordingly.
80  *	o Processing the feature flags returned by the cpuid instruction while
81  *	  applying any workarounds or tricks for the specific processor.
82  *	o Mapping the feature flags into Solaris feature bits (X86_*).
83  *	o Processing extended feature flags if supported by the processor,
84  *	  again while applying specific processor knowledge.
85  *	o Determining the CMT characteristics of the system.
86  *
87  * Pass 1 is done on non-boot CPUs during their initialization and the results
88  * are used only as a meager attempt at ensuring that all processors within the
89  * system support the same features.
90  *
91  * Pass 2 of cpuid feature analysis happens just at the beginning
92  * of startup().  It just copies in and corrects the remainder
93  * of the cpuid data we depend on: standard cpuid functions that we didn't
94  * need for pass1 feature analysis, and extended cpuid functions beyond the
95  * simple feature processing done in pass1.
96  *
97  * Pass 3 of cpuid analysis is invoked after basic kernel services; in
98  * particular kernel memory allocation has been made available. It creates a
99  * readable brand string based on the data collected in the first two passes.
100  *
101  * Pass 4 of cpuid analysis is invoked after post_startup() when all
102  * the support infrastructure for various hardware features has been
103  * initialized. It determines which processor features will be reported
104  * to userland via the aux vector.
105  *
106  * All passes are executed on all CPUs, but only the boot CPU determines what
107  * features the kernel will use.
108  *
109  * Much of the worst junk in this file is for the support of processors
110  * that didn't really implement the cpuid instruction properly.
111  *
112  * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon,
113  * the pass numbers.  Accordingly, changes to the pass code may require changes
114  * to the accessor code.
115  */
116 
117 uint_t x86_vendor = X86_VENDOR_IntelClone;
118 uint_t x86_type = X86_TYPE_OTHER;
119 uint_t x86_clflush_size = 0;
120 
121 uint_t pentiumpro_bug4046376;
122 uint_t pentiumpro_bug4064495;
123 
124 uchar_t x86_featureset[BT_SIZEOFMAP(NUM_X86_FEATURES)];
125 
126 static char *x86_feature_names[NUM_X86_FEATURES] = {
127 	"lgpg",
128 	"tsc",
129 	"msr",
130 	"mtrr",
131 	"pge",
132 	"de",
133 	"cmov",
134 	"mmx",
135 	"mca",
136 	"pae",
137 	"cv8",
138 	"pat",
139 	"sep",
140 	"sse",
141 	"sse2",
142 	"htt",
143 	"asysc",
144 	"nx",
145 	"sse3",
146 	"cx16",
147 	"cmp",
148 	"tscp",
149 	"mwait",
150 	"sse4a",
151 	"cpuid",
152 	"ssse3",
153 	"sse4_1",
154 	"sse4_2",
155 	"1gpg",
156 	"clfsh",
157 	"64",
158 	"aes",
159 	"pclmulqdq",
160 	"xsave",
161 	"avx",
162 	"vmx",
163 	"svm",
164 	"topoext",
165 	"f16c",
166 	"rdrand"
167 };
168 
169 boolean_t
170 is_x86_feature(void *featureset, uint_t feature)
171 {
172 	ASSERT(feature < NUM_X86_FEATURES);
173 	return (BT_TEST((ulong_t *)featureset, feature));
174 }
175 
176 void
177 add_x86_feature(void *featureset, uint_t feature)
178 {
179 	ASSERT(feature < NUM_X86_FEATURES);
180 	BT_SET((ulong_t *)featureset, feature);
181 }
182 
183 void
184 remove_x86_feature(void *featureset, uint_t feature)
185 {
186 	ASSERT(feature < NUM_X86_FEATURES);
187 	BT_CLEAR((ulong_t *)featureset, feature);
188 }
189 
190 boolean_t
191 compare_x86_featureset(void *setA, void *setB)
192 {
193 	/*
194 	 * We assume that the unused bits of the bitmap are always zero.
195 	 */
196 	if (memcmp(setA, setB, BT_SIZEOFMAP(NUM_X86_FEATURES)) == 0) {
197 		return (B_TRUE);
198 	} else {
199 		return (B_FALSE);
200 	}
201 }
202 
203 void
204 print_x86_featureset(void *featureset)
205 {
206 	uint_t i;
207 
208 	for (i = 0; i < NUM_X86_FEATURES; i++) {
209 		if (is_x86_feature(featureset, i)) {
210 			cmn_err(CE_CONT, "?x86_feature: %s\n",
211 			    x86_feature_names[i]);
212 		}
213 	}
214 }
215 
216 uint_t enable486;
217 
218 static size_t xsave_state_size = 0;
219 uint64_t xsave_bv_all = (XFEATURE_LEGACY_FP | XFEATURE_SSE);
220 boolean_t xsave_force_disable = B_FALSE;
221 
222 /*
223  * This is set to platform type Solaris is running on.
224  */
225 static int platform_type = -1;
226 
227 #if !defined(__xpv)
228 /*
229  * Variable to patch if hypervisor platform detection needs to be
230  * disabled (e.g. platform_type will always be HW_NATIVE if this is 0).
231  */
232 int enable_platform_detection = 1;
233 #endif
234 
235 /*
236  * monitor/mwait info.
237  *
238  * size_actual and buf_actual are the real address and size allocated to get
239  * proper mwait_buf alignement.  buf_actual and size_actual should be passed
240  * to kmem_free().  Currently kmem_alloc() and mwait happen to both use
241  * processor cache-line alignment, but this is not guarantied in the furture.
242  */
243 struct mwait_info {
244 	size_t		mon_min;	/* min size to avoid missed wakeups */
245 	size_t		mon_max;	/* size to avoid false wakeups */
246 	size_t		size_actual;	/* size actually allocated */
247 	void		*buf_actual;	/* memory actually allocated */
248 	uint32_t	support;	/* processor support of monitor/mwait */
249 };
250 
251 /*
252  * xsave/xrestor info.
253  *
254  * This structure contains HW feature bits and size of the xsave save area.
255  * Note: the kernel will use the maximum size required for all hardware
256  * features. It is not optimize for potential memory savings if features at
257  * the end of the save area are not enabled.
258  */
259 struct xsave_info {
260 	uint32_t	xsav_hw_features_low;   /* Supported HW features */
261 	uint32_t	xsav_hw_features_high;  /* Supported HW features */
262 	size_t		xsav_max_size;  /* max size save area for HW features */
263 	size_t		ymm_size;	/* AVX: size of ymm save area */
264 	size_t		ymm_offset;	/* AVX: offset for ymm save area */
265 };
266 
267 
268 /*
269  * These constants determine how many of the elements of the
270  * cpuid we cache in the cpuid_info data structure; the
271  * remaining elements are accessible via the cpuid instruction.
272  */
273 
274 #define	NMAX_CPI_STD	6		/* eax = 0 .. 5 */
275 #define	NMAX_CPI_EXTD	0x1f		/* eax = 0x80000000 .. 0x8000001e */
276 
277 /*
278  * Some terminology needs to be explained:
279  *  - Socket: Something that can be plugged into a motherboard.
280  *  - Package: Same as socket
281  *  - Chip: Same as socket. Note that AMD's documentation uses term "chip"
282  *    differently: there, chip is the same as processor node (below)
283  *  - Processor node: Some AMD processors have more than one
284  *    "subprocessor" embedded in a package. These subprocessors (nodes)
285  *    are fully-functional processors themselves with cores, caches,
286  *    memory controllers, PCI configuration spaces. They are connected
287  *    inside the package with Hypertransport links. On single-node
288  *    processors, processor node is equivalent to chip/socket/package.
289  *  - Compute Unit: Some AMD processors pair cores in "compute units" that
290  *    share the FPU and the I$ and L2 caches.
291  */
292 
293 struct cpuid_info {
294 	uint_t cpi_pass;		/* last pass completed */
295 	/*
296 	 * standard function information
297 	 */
298 	uint_t cpi_maxeax;		/* fn 0: %eax */
299 	char cpi_vendorstr[13];		/* fn 0: %ebx:%ecx:%edx */
300 	uint_t cpi_vendor;		/* enum of cpi_vendorstr */
301 
302 	uint_t cpi_family;		/* fn 1: extended family */
303 	uint_t cpi_model;		/* fn 1: extended model */
304 	uint_t cpi_step;		/* fn 1: stepping */
305 	chipid_t cpi_chipid;		/* fn 1: %ebx:  Intel: chip # */
306 					/*		AMD: package/socket # */
307 	uint_t cpi_brandid;		/* fn 1: %ebx: brand ID */
308 	int cpi_clogid;			/* fn 1: %ebx: thread # */
309 	uint_t cpi_ncpu_per_chip;	/* fn 1: %ebx: logical cpu count */
310 	uint8_t cpi_cacheinfo[16];	/* fn 2: intel-style cache desc */
311 	uint_t cpi_ncache;		/* fn 2: number of elements */
312 	uint_t cpi_ncpu_shr_last_cache;	/* fn 4: %eax: ncpus sharing cache */
313 	id_t cpi_last_lvl_cacheid;	/* fn 4: %eax: derived cache id */
314 	uint_t cpi_std_4_size;		/* fn 4: number of fn 4 elements */
315 	struct cpuid_regs **cpi_std_4;	/* fn 4: %ecx == 0 .. fn4_size */
316 	struct cpuid_regs cpi_std[NMAX_CPI_STD];	/* 0 .. 5 */
317 	/*
318 	 * extended function information
319 	 */
320 	uint_t cpi_xmaxeax;		/* fn 0x80000000: %eax */
321 	char cpi_brandstr[49];		/* fn 0x8000000[234] */
322 	uint8_t cpi_pabits;		/* fn 0x80000006: %eax */
323 	uint8_t	cpi_vabits;		/* fn 0x80000006: %eax */
324 	struct	cpuid_regs cpi_extd[NMAX_CPI_EXTD];	/* 0x800000XX */
325 
326 	id_t cpi_coreid;		/* same coreid => strands share core */
327 	int cpi_pkgcoreid;		/* core number within single package */
328 	uint_t cpi_ncore_per_chip;	/* AMD: fn 0x80000008: %ecx[7-0] */
329 					/* Intel: fn 4: %eax[31-26] */
330 	/*
331 	 * supported feature information
332 	 */
333 	uint32_t cpi_support[5];
334 #define	STD_EDX_FEATURES	0
335 #define	AMD_EDX_FEATURES	1
336 #define	TM_EDX_FEATURES		2
337 #define	STD_ECX_FEATURES	3
338 #define	AMD_ECX_FEATURES	4
339 	/*
340 	 * Synthesized information, where known.
341 	 */
342 	uint32_t cpi_chiprev;		/* See X86_CHIPREV_* in x86_archext.h */
343 	const char *cpi_chiprevstr;	/* May be NULL if chiprev unknown */
344 	uint32_t cpi_socket;		/* Chip package/socket type */
345 
346 	struct mwait_info cpi_mwait;	/* fn 5: monitor/mwait info */
347 	uint32_t cpi_apicid;
348 	uint_t cpi_procnodeid;		/* AMD: nodeID on HT, Intel: chipid */
349 	uint_t cpi_procnodes_per_pkg;	/* AMD: # of nodes in the package */
350 					/* Intel: 1 */
351 	uint_t cpi_compunitid;		/* AMD: ComputeUnit ID, Intel: coreid */
352 	uint_t cpi_cores_per_compunit;	/* AMD: # of cores in the ComputeUnit */
353 
354 	struct xsave_info cpi_xsave;	/* fn D: xsave/xrestor info */
355 };
356 
357 
358 static struct cpuid_info cpuid_info0;
359 
360 /*
361  * These bit fields are defined by the Intel Application Note AP-485
362  * "Intel Processor Identification and the CPUID Instruction"
363  */
364 #define	CPI_FAMILY_XTD(cpi)	BITX((cpi)->cpi_std[1].cp_eax, 27, 20)
365 #define	CPI_MODEL_XTD(cpi)	BITX((cpi)->cpi_std[1].cp_eax, 19, 16)
366 #define	CPI_TYPE(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 13, 12)
367 #define	CPI_FAMILY(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 11, 8)
368 #define	CPI_STEP(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 3, 0)
369 #define	CPI_MODEL(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 7, 4)
370 
371 #define	CPI_FEATURES_EDX(cpi)		((cpi)->cpi_std[1].cp_edx)
372 #define	CPI_FEATURES_ECX(cpi)		((cpi)->cpi_std[1].cp_ecx)
373 #define	CPI_FEATURES_XTD_EDX(cpi)	((cpi)->cpi_extd[1].cp_edx)
374 #define	CPI_FEATURES_XTD_ECX(cpi)	((cpi)->cpi_extd[1].cp_ecx)
375 
376 #define	CPI_BRANDID(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 7, 0)
377 #define	CPI_CHUNKS(cpi)		BITX((cpi)->cpi_std[1].cp_ebx, 15, 7)
378 #define	CPI_CPU_COUNT(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 23, 16)
379 #define	CPI_APIC_ID(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 31, 24)
380 
381 #define	CPI_MAXEAX_MAX		0x100		/* sanity control */
382 #define	CPI_XMAXEAX_MAX		0x80000100
383 #define	CPI_FN4_ECX_MAX		0x20		/* sanity: max fn 4 levels */
384 #define	CPI_FNB_ECX_MAX		0x20		/* sanity: max fn B levels */
385 
386 /*
387  * Function 4 (Deterministic Cache Parameters) macros
388  * Defined by Intel Application Note AP-485
389  */
390 #define	CPI_NUM_CORES(regs)		BITX((regs)->cp_eax, 31, 26)
391 #define	CPI_NTHR_SHR_CACHE(regs)	BITX((regs)->cp_eax, 25, 14)
392 #define	CPI_FULL_ASSOC_CACHE(regs)	BITX((regs)->cp_eax, 9, 9)
393 #define	CPI_SELF_INIT_CACHE(regs)	BITX((regs)->cp_eax, 8, 8)
394 #define	CPI_CACHE_LVL(regs)		BITX((regs)->cp_eax, 7, 5)
395 #define	CPI_CACHE_TYPE(regs)		BITX((regs)->cp_eax, 4, 0)
396 #define	CPI_CPU_LEVEL_TYPE(regs)	BITX((regs)->cp_ecx, 15, 8)
397 
398 #define	CPI_CACHE_WAYS(regs)		BITX((regs)->cp_ebx, 31, 22)
399 #define	CPI_CACHE_PARTS(regs)		BITX((regs)->cp_ebx, 21, 12)
400 #define	CPI_CACHE_COH_LN_SZ(regs)	BITX((regs)->cp_ebx, 11, 0)
401 
402 #define	CPI_CACHE_SETS(regs)		BITX((regs)->cp_ecx, 31, 0)
403 
404 #define	CPI_PREFCH_STRIDE(regs)		BITX((regs)->cp_edx, 9, 0)
405 
406 
407 /*
408  * A couple of shorthand macros to identify "later" P6-family chips
409  * like the Pentium M and Core.  First, the "older" P6-based stuff
410  * (loosely defined as "pre-Pentium-4"):
411  * P6, PII, Mobile PII, PII Xeon, PIII, Mobile PIII, PIII Xeon
412  */
413 
414 #define	IS_LEGACY_P6(cpi) (			\
415 	cpi->cpi_family == 6 && 		\
416 		(cpi->cpi_model == 1 ||		\
417 		cpi->cpi_model == 3 ||		\
418 		cpi->cpi_model == 5 ||		\
419 		cpi->cpi_model == 6 ||		\
420 		cpi->cpi_model == 7 ||		\
421 		cpi->cpi_model == 8 ||		\
422 		cpi->cpi_model == 0xA ||	\
423 		cpi->cpi_model == 0xB)		\
424 )
425 
426 /* A "new F6" is everything with family 6 that's not the above */
427 #define	IS_NEW_F6(cpi) ((cpi->cpi_family == 6) && !IS_LEGACY_P6(cpi))
428 
429 /* Extended family/model support */
430 #define	IS_EXTENDED_MODEL_INTEL(cpi) (cpi->cpi_family == 0x6 || \
431 	cpi->cpi_family >= 0xf)
432 
433 /*
434  * Info for monitor/mwait idle loop.
435  *
436  * See cpuid section of "Intel 64 and IA-32 Architectures Software Developer's
437  * Manual Volume 2A: Instruction Set Reference, A-M" #25366-022US, November
438  * 2006.
439  * See MONITOR/MWAIT section of "AMD64 Architecture Programmer's Manual
440  * Documentation Updates" #33633, Rev 2.05, December 2006.
441  */
442 #define	MWAIT_SUPPORT		(0x00000001)	/* mwait supported */
443 #define	MWAIT_EXTENSIONS	(0x00000002)	/* extenstion supported */
444 #define	MWAIT_ECX_INT_ENABLE	(0x00000004)	/* ecx 1 extension supported */
445 #define	MWAIT_SUPPORTED(cpi)	((cpi)->cpi_std[1].cp_ecx & CPUID_INTC_ECX_MON)
446 #define	MWAIT_INT_ENABLE(cpi)	((cpi)->cpi_std[5].cp_ecx & 0x2)
447 #define	MWAIT_EXTENSION(cpi)	((cpi)->cpi_std[5].cp_ecx & 0x1)
448 #define	MWAIT_SIZE_MIN(cpi)	BITX((cpi)->cpi_std[5].cp_eax, 15, 0)
449 #define	MWAIT_SIZE_MAX(cpi)	BITX((cpi)->cpi_std[5].cp_ebx, 15, 0)
450 /*
451  * Number of sub-cstates for a given c-state.
452  */
453 #define	MWAIT_NUM_SUBC_STATES(cpi, c_state)			\
454 	BITX((cpi)->cpi_std[5].cp_edx, c_state + 3, c_state)
455 
456 /*
457  * XSAVE leaf 0xD enumeration
458  */
459 #define	CPUID_LEAFD_2_YMM_OFFSET	576
460 #define	CPUID_LEAFD_2_YMM_SIZE		256
461 
462 /*
463  * Functions we consune from cpuid_subr.c;  don't publish these in a header
464  * file to try and keep people using the expected cpuid_* interfaces.
465  */
466 extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t);
467 extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t);
468 extern uint32_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t);
469 extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t);
470 extern uint_t _cpuid_vendorstr_to_vendorcode(char *);
471 
472 /*
473  * Apply up various platform-dependent restrictions where the
474  * underlying platform restrictions mean the CPU can be marked
475  * as less capable than its cpuid instruction would imply.
476  */
477 #if defined(__xpv)
478 static void
479 platform_cpuid_mangle(uint_t vendor, uint32_t eax, struct cpuid_regs *cp)
480 {
481 	switch (eax) {
482 	case 1: {
483 		uint32_t mcamask = DOMAIN_IS_INITDOMAIN(xen_info) ?
484 		    0 : CPUID_INTC_EDX_MCA;
485 		cp->cp_edx &=
486 		    ~(mcamask |
487 		    CPUID_INTC_EDX_PSE |
488 		    CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE |
489 		    CPUID_INTC_EDX_SEP | CPUID_INTC_EDX_MTRR |
490 		    CPUID_INTC_EDX_PGE | CPUID_INTC_EDX_PAT |
491 		    CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP |
492 		    CPUID_INTC_EDX_PSE36 | CPUID_INTC_EDX_HTT);
493 		break;
494 	}
495 
496 	case 0x80000001:
497 		cp->cp_edx &=
498 		    ~(CPUID_AMD_EDX_PSE |
499 		    CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE |
500 		    CPUID_AMD_EDX_MTRR | CPUID_AMD_EDX_PGE |
501 		    CPUID_AMD_EDX_PAT | CPUID_AMD_EDX_PSE36 |
502 		    CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP |
503 		    CPUID_AMD_EDX_TSCP);
504 		cp->cp_ecx &= ~CPUID_AMD_ECX_CMP_LGCY;
505 		break;
506 	default:
507 		break;
508 	}
509 
510 	switch (vendor) {
511 	case X86_VENDOR_Intel:
512 		switch (eax) {
513 		case 4:
514 			/*
515 			 * Zero out the (ncores-per-chip - 1) field
516 			 */
517 			cp->cp_eax &= 0x03fffffff;
518 			break;
519 		default:
520 			break;
521 		}
522 		break;
523 	case X86_VENDOR_AMD:
524 		switch (eax) {
525 
526 		case 0x80000001:
527 			cp->cp_ecx &= ~CPUID_AMD_ECX_CR8D;
528 			break;
529 
530 		case 0x80000008:
531 			/*
532 			 * Zero out the (ncores-per-chip - 1) field
533 			 */
534 			cp->cp_ecx &= 0xffffff00;
535 			break;
536 		default:
537 			break;
538 		}
539 		break;
540 	default:
541 		break;
542 	}
543 }
544 #else
545 #define	platform_cpuid_mangle(vendor, eax, cp)	/* nothing */
546 #endif
547 
548 /*
549  *  Some undocumented ways of patching the results of the cpuid
550  *  instruction to permit running Solaris 10 on future cpus that
551  *  we don't currently support.  Could be set to non-zero values
552  *  via settings in eeprom.
553  */
554 
555 uint32_t cpuid_feature_ecx_include;
556 uint32_t cpuid_feature_ecx_exclude;
557 uint32_t cpuid_feature_edx_include;
558 uint32_t cpuid_feature_edx_exclude;
559 
560 /*
561  * Allocate space for mcpu_cpi in the machcpu structure for all non-boot CPUs.
562  */
563 void
564 cpuid_alloc_space(cpu_t *cpu)
565 {
566 	/*
567 	 * By convention, cpu0 is the boot cpu, which is set up
568 	 * before memory allocation is available.  All other cpus get
569 	 * their cpuid_info struct allocated here.
570 	 */
571 	ASSERT(cpu->cpu_id != 0);
572 	ASSERT(cpu->cpu_m.mcpu_cpi == NULL);
573 	cpu->cpu_m.mcpu_cpi =
574 	    kmem_zalloc(sizeof (*cpu->cpu_m.mcpu_cpi), KM_SLEEP);
575 }
576 
577 void
578 cpuid_free_space(cpu_t *cpu)
579 {
580 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
581 	int i;
582 
583 	ASSERT(cpi != NULL);
584 	ASSERT(cpi != &cpuid_info0);
585 
586 	/*
587 	 * Free up any function 4 related dynamic storage
588 	 */
589 	for (i = 1; i < cpi->cpi_std_4_size; i++)
590 		kmem_free(cpi->cpi_std_4[i], sizeof (struct cpuid_regs));
591 	if (cpi->cpi_std_4_size > 0)
592 		kmem_free(cpi->cpi_std_4,
593 		    cpi->cpi_std_4_size * sizeof (struct cpuid_regs *));
594 
595 	kmem_free(cpi, sizeof (*cpi));
596 	cpu->cpu_m.mcpu_cpi = NULL;
597 }
598 
599 #if !defined(__xpv)
600 
601 /*
602  * Determine the type of the underlying platform. This is used to customize
603  * initialization of various subsystems (e.g. TSC). determine_platform() must
604  * only ever be called once to prevent two processors from seeing different
605  * values of platform_type, it must be called before cpuid_pass1(), the
606  * earliest consumer to execute.
607  */
608 void
609 determine_platform(void)
610 {
611 	struct cpuid_regs cp;
612 	char *xen_str;
613 	uint32_t xen_signature[4], base;
614 
615 	ASSERT(platform_type == -1);
616 
617 	platform_type = HW_NATIVE;
618 
619 	if (!enable_platform_detection)
620 		return;
621 
622 	/*
623 	 * In a fully virtualized domain, Xen's pseudo-cpuid function
624 	 * returns a string representing the Xen signature in %ebx, %ecx,
625 	 * and %edx. %eax contains the maximum supported cpuid function.
626 	 * We need at least a (base + 2) leaf value to do what we want
627 	 * to do. Try different base values, since the hypervisor might
628 	 * use a different one depending on whether hyper-v emulation
629 	 * is switched on by default or not.
630 	 */
631 	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
632 		cp.cp_eax = base;
633 		(void) __cpuid_insn(&cp);
634 		xen_signature[0] = cp.cp_ebx;
635 		xen_signature[1] = cp.cp_ecx;
636 		xen_signature[2] = cp.cp_edx;
637 		xen_signature[3] = 0;
638 		xen_str = (char *)xen_signature;
639 		if (strcmp("XenVMMXenVMM", xen_str) == 0 &&
640 		    cp.cp_eax >= (base + 2)) {
641 			platform_type = HW_XEN_HVM;
642 			return;
643 		}
644 	}
645 
646 	if (vmware_platform()) /* running under vmware hypervisor? */
647 		platform_type = HW_VMWARE;
648 }
649 
650 int
651 get_hwenv(void)
652 {
653 	ASSERT(platform_type != -1);
654 	return (platform_type);
655 }
656 
657 int
658 is_controldom(void)
659 {
660 	return (0);
661 }
662 
663 #else
664 
665 int
666 get_hwenv(void)
667 {
668 	return (HW_XEN_PV);
669 }
670 
671 int
672 is_controldom(void)
673 {
674 	return (DOMAIN_IS_INITDOMAIN(xen_info));
675 }
676 
677 #endif	/* __xpv */
678 
679 static void
680 cpuid_intel_getids(cpu_t *cpu, void *feature)
681 {
682 	uint_t i;
683 	uint_t chipid_shift = 0;
684 	uint_t coreid_shift = 0;
685 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
686 
687 	for (i = 1; i < cpi->cpi_ncpu_per_chip; i <<= 1)
688 		chipid_shift++;
689 
690 	cpi->cpi_chipid = cpi->cpi_apicid >> chipid_shift;
691 	cpi->cpi_clogid = cpi->cpi_apicid & ((1 << chipid_shift) - 1);
692 
693 	if (is_x86_feature(feature, X86FSET_CMP)) {
694 		/*
695 		 * Multi-core (and possibly multi-threaded)
696 		 * processors.
697 		 */
698 		uint_t ncpu_per_core;
699 		if (cpi->cpi_ncore_per_chip == 1)
700 			ncpu_per_core = cpi->cpi_ncpu_per_chip;
701 		else if (cpi->cpi_ncore_per_chip > 1)
702 			ncpu_per_core = cpi->cpi_ncpu_per_chip /
703 			    cpi->cpi_ncore_per_chip;
704 		/*
705 		 * 8bit APIC IDs on dual core Pentiums
706 		 * look like this:
707 		 *
708 		 * +-----------------------+------+------+
709 		 * | Physical Package ID   |  MC  |  HT  |
710 		 * +-----------------------+------+------+
711 		 * <------- chipid -------->
712 		 * <------- coreid --------------->
713 		 *			   <--- clogid -->
714 		 *			   <------>
715 		 *			   pkgcoreid
716 		 *
717 		 * Where the number of bits necessary to
718 		 * represent MC and HT fields together equals
719 		 * to the minimum number of bits necessary to
720 		 * store the value of cpi->cpi_ncpu_per_chip.
721 		 * Of those bits, the MC part uses the number
722 		 * of bits necessary to store the value of
723 		 * cpi->cpi_ncore_per_chip.
724 		 */
725 		for (i = 1; i < ncpu_per_core; i <<= 1)
726 			coreid_shift++;
727 		cpi->cpi_coreid = cpi->cpi_apicid >> coreid_shift;
728 		cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
729 	} else if (is_x86_feature(feature, X86FSET_HTT)) {
730 		/*
731 		 * Single-core multi-threaded processors.
732 		 */
733 		cpi->cpi_coreid = cpi->cpi_chipid;
734 		cpi->cpi_pkgcoreid = 0;
735 	}
736 	cpi->cpi_procnodeid = cpi->cpi_chipid;
737 	cpi->cpi_compunitid = cpi->cpi_coreid;
738 }
739 
740 static void
741 cpuid_amd_getids(cpu_t *cpu)
742 {
743 	int i, first_half, coreidsz;
744 	uint32_t nb_caps_reg;
745 	uint_t node2_1;
746 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
747 	struct cpuid_regs *cp;
748 
749 	/*
750 	 * AMD CMP chips currently have a single thread per core.
751 	 *
752 	 * Since no two cpus share a core we must assign a distinct coreid
753 	 * per cpu, and we do this by using the cpu_id.  This scheme does not,
754 	 * however, guarantee that sibling cores of a chip will have sequential
755 	 * coreids starting at a multiple of the number of cores per chip -
756 	 * that is usually the case, but if the ACPI MADT table is presented
757 	 * in a different order then we need to perform a few more gymnastics
758 	 * for the pkgcoreid.
759 	 *
760 	 * All processors in the system have the same number of enabled
761 	 * cores. Cores within a processor are always numbered sequentially
762 	 * from 0 regardless of how many or which are disabled, and there
763 	 * is no way for operating system to discover the real core id when some
764 	 * are disabled.
765 	 *
766 	 * In family 0x15, the cores come in pairs called compute units. They
767 	 * share I$ and L2 caches and the FPU. Enumeration of this feature is
768 	 * simplified by the new topology extensions CPUID leaf, indicated by
769 	 * the X86 feature X86FSET_TOPOEXT.
770 	 */
771 
772 	cpi->cpi_coreid = cpu->cpu_id;
773 	cpi->cpi_compunitid = cpu->cpu_id;
774 
775 	if (cpi->cpi_xmaxeax >= 0x80000008) {
776 
777 		coreidsz = BITX((cpi)->cpi_extd[8].cp_ecx, 15, 12);
778 
779 		/*
780 		 * In AMD parlance chip is really a node while Solaris
781 		 * sees chip as equivalent to socket/package.
782 		 */
783 		cpi->cpi_ncore_per_chip =
784 		    BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1;
785 		if (coreidsz == 0) {
786 			/* Use legacy method */
787 			for (i = 1; i < cpi->cpi_ncore_per_chip; i <<= 1)
788 				coreidsz++;
789 			if (coreidsz == 0)
790 				coreidsz = 1;
791 		}
792 	} else {
793 		/* Assume single-core part */
794 		cpi->cpi_ncore_per_chip = 1;
795 		coreidsz = 1;
796 	}
797 
798 	cpi->cpi_clogid = cpi->cpi_pkgcoreid =
799 	    cpi->cpi_apicid & ((1<<coreidsz) - 1);
800 	cpi->cpi_ncpu_per_chip = cpi->cpi_ncore_per_chip;
801 
802 	/* Get node ID, compute unit ID */
803 	if (is_x86_feature(x86_featureset, X86FSET_TOPOEXT) &&
804 	    cpi->cpi_xmaxeax >= 0x8000001e) {
805 		cp = &cpi->cpi_extd[0x1e];
806 		cp->cp_eax = 0x8000001e;
807 		(void) __cpuid_insn(cp);
808 
809 		cpi->cpi_procnodes_per_pkg = BITX(cp->cp_ecx, 10, 8) + 1;
810 		cpi->cpi_procnodeid = BITX(cp->cp_ecx, 7, 0);
811 		cpi->cpi_cores_per_compunit = BITX(cp->cp_ebx, 15, 8) + 1;
812 		cpi->cpi_compunitid = BITX(cp->cp_ebx, 7, 0)
813 		    + (cpi->cpi_ncore_per_chip / cpi->cpi_cores_per_compunit)
814 		    * (cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg);
815 	} else if (cpi->cpi_family == 0xf || cpi->cpi_family >= 0x11) {
816 		cpi->cpi_procnodeid = (cpi->cpi_apicid >> coreidsz) & 7;
817 	} else if (cpi->cpi_family == 0x10) {
818 		/*
819 		 * See if we are a multi-node processor.
820 		 * All processors in the system have the same number of nodes
821 		 */
822 		nb_caps_reg =  pci_getl_func(0, 24, 3, 0xe8);
823 		if ((cpi->cpi_model < 8) || BITX(nb_caps_reg, 29, 29) == 0) {
824 			/* Single-node */
825 			cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 5,
826 			    coreidsz);
827 		} else {
828 
829 			/*
830 			 * Multi-node revision D (2 nodes per package
831 			 * are supported)
832 			 */
833 			cpi->cpi_procnodes_per_pkg = 2;
834 
835 			first_half = (cpi->cpi_pkgcoreid <=
836 			    (cpi->cpi_ncore_per_chip/2 - 1));
837 
838 			if (cpi->cpi_apicid == cpi->cpi_pkgcoreid) {
839 				/* We are BSP */
840 				cpi->cpi_procnodeid = (first_half ? 0 : 1);
841 			} else {
842 
843 				/* We are AP */
844 				/* NodeId[2:1] bits to use for reading F3xe8 */
845 				node2_1 = BITX(cpi->cpi_apicid, 5, 4) << 1;
846 
847 				nb_caps_reg =
848 				    pci_getl_func(0, 24 + node2_1, 3, 0xe8);
849 
850 				/*
851 				 * Check IntNodeNum bit (31:30, but bit 31 is
852 				 * always 0 on dual-node processors)
853 				 */
854 				if (BITX(nb_caps_reg, 30, 30) == 0)
855 					cpi->cpi_procnodeid = node2_1 +
856 					    !first_half;
857 				else
858 					cpi->cpi_procnodeid = node2_1 +
859 					    first_half;
860 			}
861 		}
862 	} else {
863 		cpi->cpi_procnodeid = 0;
864 	}
865 
866 	cpi->cpi_chipid =
867 	    cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg;
868 }
869 
870 /*
871  * Setup XFeature_Enabled_Mask register. Required by xsave feature.
872  */
873 void
874 setup_xfem(void)
875 {
876 	uint64_t flags = XFEATURE_LEGACY_FP;
877 
878 	ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
879 
880 	if (is_x86_feature(x86_featureset, X86FSET_SSE))
881 		flags |= XFEATURE_SSE;
882 
883 	if (is_x86_feature(x86_featureset, X86FSET_AVX))
884 		flags |= XFEATURE_AVX;
885 
886 	set_xcr(XFEATURE_ENABLED_MASK, flags);
887 
888 	xsave_bv_all = flags;
889 }
890 
891 void
892 cpuid_pass1(cpu_t *cpu, uchar_t *featureset)
893 {
894 	uint32_t mask_ecx, mask_edx;
895 	struct cpuid_info *cpi;
896 	struct cpuid_regs *cp;
897 	int xcpuid;
898 #if !defined(__xpv)
899 	extern int idle_cpu_prefer_mwait;
900 #endif
901 
902 	/*
903 	 * Space statically allocated for BSP, ensure pointer is set
904 	 */
905 	if (cpu->cpu_id == 0) {
906 		if (cpu->cpu_m.mcpu_cpi == NULL)
907 			cpu->cpu_m.mcpu_cpi = &cpuid_info0;
908 	}
909 
910 	add_x86_feature(featureset, X86FSET_CPUID);
911 
912 	cpi = cpu->cpu_m.mcpu_cpi;
913 	ASSERT(cpi != NULL);
914 	cp = &cpi->cpi_std[0];
915 	cp->cp_eax = 0;
916 	cpi->cpi_maxeax = __cpuid_insn(cp);
917 	{
918 		uint32_t *iptr = (uint32_t *)cpi->cpi_vendorstr;
919 		*iptr++ = cp->cp_ebx;
920 		*iptr++ = cp->cp_edx;
921 		*iptr++ = cp->cp_ecx;
922 		*(char *)&cpi->cpi_vendorstr[12] = '\0';
923 	}
924 
925 	cpi->cpi_vendor = _cpuid_vendorstr_to_vendorcode(cpi->cpi_vendorstr);
926 	x86_vendor = cpi->cpi_vendor; /* for compatibility */
927 
928 	/*
929 	 * Limit the range in case of weird hardware
930 	 */
931 	if (cpi->cpi_maxeax > CPI_MAXEAX_MAX)
932 		cpi->cpi_maxeax = CPI_MAXEAX_MAX;
933 	if (cpi->cpi_maxeax < 1)
934 		goto pass1_done;
935 
936 	cp = &cpi->cpi_std[1];
937 	cp->cp_eax = 1;
938 	(void) __cpuid_insn(cp);
939 
940 	/*
941 	 * Extract identifying constants for easy access.
942 	 */
943 	cpi->cpi_model = CPI_MODEL(cpi);
944 	cpi->cpi_family = CPI_FAMILY(cpi);
945 
946 	if (cpi->cpi_family == 0xf)
947 		cpi->cpi_family += CPI_FAMILY_XTD(cpi);
948 
949 	/*
950 	 * Beware: AMD uses "extended model" iff base *FAMILY* == 0xf.
951 	 * Intel, and presumably everyone else, uses model == 0xf, as
952 	 * one would expect (max value means possible overflow).  Sigh.
953 	 */
954 
955 	switch (cpi->cpi_vendor) {
956 	case X86_VENDOR_Intel:
957 		if (IS_EXTENDED_MODEL_INTEL(cpi))
958 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
959 		break;
960 	case X86_VENDOR_AMD:
961 		if (CPI_FAMILY(cpi) == 0xf)
962 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
963 		break;
964 	default:
965 		if (cpi->cpi_model == 0xf)
966 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
967 		break;
968 	}
969 
970 	cpi->cpi_step = CPI_STEP(cpi);
971 	cpi->cpi_brandid = CPI_BRANDID(cpi);
972 
973 	/*
974 	 * *default* assumptions:
975 	 * - believe %edx feature word
976 	 * - ignore %ecx feature word
977 	 * - 32-bit virtual and physical addressing
978 	 */
979 	mask_edx = 0xffffffff;
980 	mask_ecx = 0;
981 
982 	cpi->cpi_pabits = cpi->cpi_vabits = 32;
983 
984 	switch (cpi->cpi_vendor) {
985 	case X86_VENDOR_Intel:
986 		if (cpi->cpi_family == 5)
987 			x86_type = X86_TYPE_P5;
988 		else if (IS_LEGACY_P6(cpi)) {
989 			x86_type = X86_TYPE_P6;
990 			pentiumpro_bug4046376 = 1;
991 			pentiumpro_bug4064495 = 1;
992 			/*
993 			 * Clear the SEP bit when it was set erroneously
994 			 */
995 			if (cpi->cpi_model < 3 && cpi->cpi_step < 3)
996 				cp->cp_edx &= ~CPUID_INTC_EDX_SEP;
997 		} else if (IS_NEW_F6(cpi) || cpi->cpi_family == 0xf) {
998 			x86_type = X86_TYPE_P4;
999 			/*
1000 			 * We don't currently depend on any of the %ecx
1001 			 * features until Prescott, so we'll only check
1002 			 * this from P4 onwards.  We might want to revisit
1003 			 * that idea later.
1004 			 */
1005 			mask_ecx = 0xffffffff;
1006 		} else if (cpi->cpi_family > 0xf)
1007 			mask_ecx = 0xffffffff;
1008 		/*
1009 		 * We don't support MONITOR/MWAIT if leaf 5 is not available
1010 		 * to obtain the monitor linesize.
1011 		 */
1012 		if (cpi->cpi_maxeax < 5)
1013 			mask_ecx &= ~CPUID_INTC_ECX_MON;
1014 		break;
1015 	case X86_VENDOR_IntelClone:
1016 	default:
1017 		break;
1018 	case X86_VENDOR_AMD:
1019 #if defined(OPTERON_ERRATUM_108)
1020 		if (cpi->cpi_family == 0xf && cpi->cpi_model == 0xe) {
1021 			cp->cp_eax = (0xf0f & cp->cp_eax) | 0xc0;
1022 			cpi->cpi_model = 0xc;
1023 		} else
1024 #endif
1025 		if (cpi->cpi_family == 5) {
1026 			/*
1027 			 * AMD K5 and K6
1028 			 *
1029 			 * These CPUs have an incomplete implementation
1030 			 * of MCA/MCE which we mask away.
1031 			 */
1032 			mask_edx &= ~(CPUID_INTC_EDX_MCE | CPUID_INTC_EDX_MCA);
1033 
1034 			/*
1035 			 * Model 0 uses the wrong (APIC) bit
1036 			 * to indicate PGE.  Fix it here.
1037 			 */
1038 			if (cpi->cpi_model == 0) {
1039 				if (cp->cp_edx & 0x200) {
1040 					cp->cp_edx &= ~0x200;
1041 					cp->cp_edx |= CPUID_INTC_EDX_PGE;
1042 				}
1043 			}
1044 
1045 			/*
1046 			 * Early models had problems w/ MMX; disable.
1047 			 */
1048 			if (cpi->cpi_model < 6)
1049 				mask_edx &= ~CPUID_INTC_EDX_MMX;
1050 		}
1051 
1052 		/*
1053 		 * For newer families, SSE3 and CX16, at least, are valid;
1054 		 * enable all
1055 		 */
1056 		if (cpi->cpi_family >= 0xf)
1057 			mask_ecx = 0xffffffff;
1058 		/*
1059 		 * We don't support MONITOR/MWAIT if leaf 5 is not available
1060 		 * to obtain the monitor linesize.
1061 		 */
1062 		if (cpi->cpi_maxeax < 5)
1063 			mask_ecx &= ~CPUID_INTC_ECX_MON;
1064 
1065 #if !defined(__xpv)
1066 		/*
1067 		 * Do not use MONITOR/MWAIT to halt in the idle loop on any AMD
1068 		 * processors.  AMD does not intend MWAIT to be used in the cpu
1069 		 * idle loop on current and future processors.  10h and future
1070 		 * AMD processors use more power in MWAIT than HLT.
1071 		 * Pre-family-10h Opterons do not have the MWAIT instruction.
1072 		 */
1073 		idle_cpu_prefer_mwait = 0;
1074 #endif
1075 
1076 		break;
1077 	case X86_VENDOR_TM:
1078 		/*
1079 		 * workaround the NT workaround in CMS 4.1
1080 		 */
1081 		if (cpi->cpi_family == 5 && cpi->cpi_model == 4 &&
1082 		    (cpi->cpi_step == 2 || cpi->cpi_step == 3))
1083 			cp->cp_edx |= CPUID_INTC_EDX_CX8;
1084 		break;
1085 	case X86_VENDOR_Centaur:
1086 		/*
1087 		 * workaround the NT workarounds again
1088 		 */
1089 		if (cpi->cpi_family == 6)
1090 			cp->cp_edx |= CPUID_INTC_EDX_CX8;
1091 		break;
1092 	case X86_VENDOR_Cyrix:
1093 		/*
1094 		 * We rely heavily on the probing in locore
1095 		 * to actually figure out what parts, if any,
1096 		 * of the Cyrix cpuid instruction to believe.
1097 		 */
1098 		switch (x86_type) {
1099 		case X86_TYPE_CYRIX_486:
1100 			mask_edx = 0;
1101 			break;
1102 		case X86_TYPE_CYRIX_6x86:
1103 			mask_edx = 0;
1104 			break;
1105 		case X86_TYPE_CYRIX_6x86L:
1106 			mask_edx =
1107 			    CPUID_INTC_EDX_DE |
1108 			    CPUID_INTC_EDX_CX8;
1109 			break;
1110 		case X86_TYPE_CYRIX_6x86MX:
1111 			mask_edx =
1112 			    CPUID_INTC_EDX_DE |
1113 			    CPUID_INTC_EDX_MSR |
1114 			    CPUID_INTC_EDX_CX8 |
1115 			    CPUID_INTC_EDX_PGE |
1116 			    CPUID_INTC_EDX_CMOV |
1117 			    CPUID_INTC_EDX_MMX;
1118 			break;
1119 		case X86_TYPE_CYRIX_GXm:
1120 			mask_edx =
1121 			    CPUID_INTC_EDX_MSR |
1122 			    CPUID_INTC_EDX_CX8 |
1123 			    CPUID_INTC_EDX_CMOV |
1124 			    CPUID_INTC_EDX_MMX;
1125 			break;
1126 		case X86_TYPE_CYRIX_MediaGX:
1127 			break;
1128 		case X86_TYPE_CYRIX_MII:
1129 		case X86_TYPE_VIA_CYRIX_III:
1130 			mask_edx =
1131 			    CPUID_INTC_EDX_DE |
1132 			    CPUID_INTC_EDX_TSC |
1133 			    CPUID_INTC_EDX_MSR |
1134 			    CPUID_INTC_EDX_CX8 |
1135 			    CPUID_INTC_EDX_PGE |
1136 			    CPUID_INTC_EDX_CMOV |
1137 			    CPUID_INTC_EDX_MMX;
1138 			break;
1139 		default:
1140 			break;
1141 		}
1142 		break;
1143 	}
1144 
1145 #if defined(__xpv)
1146 	/*
1147 	 * Do not support MONITOR/MWAIT under a hypervisor
1148 	 */
1149 	mask_ecx &= ~CPUID_INTC_ECX_MON;
1150 	/*
1151 	 * Do not support XSAVE under a hypervisor for now
1152 	 */
1153 	xsave_force_disable = B_TRUE;
1154 
1155 #endif	/* __xpv */
1156 
1157 	if (xsave_force_disable) {
1158 		mask_ecx &= ~CPUID_INTC_ECX_XSAVE;
1159 		mask_ecx &= ~CPUID_INTC_ECX_AVX;
1160 		mask_ecx &= ~CPUID_INTC_ECX_F16C;
1161 	}
1162 
1163 	/*
1164 	 * Now we've figured out the masks that determine
1165 	 * which bits we choose to believe, apply the masks
1166 	 * to the feature words, then map the kernel's view
1167 	 * of these feature words into its feature word.
1168 	 */
1169 	cp->cp_edx &= mask_edx;
1170 	cp->cp_ecx &= mask_ecx;
1171 
1172 	/*
1173 	 * apply any platform restrictions (we don't call this
1174 	 * immediately after __cpuid_insn here, because we need the
1175 	 * workarounds applied above first)
1176 	 */
1177 	platform_cpuid_mangle(cpi->cpi_vendor, 1, cp);
1178 
1179 	/*
1180 	 * fold in overrides from the "eeprom" mechanism
1181 	 */
1182 	cp->cp_edx |= cpuid_feature_edx_include;
1183 	cp->cp_edx &= ~cpuid_feature_edx_exclude;
1184 
1185 	cp->cp_ecx |= cpuid_feature_ecx_include;
1186 	cp->cp_ecx &= ~cpuid_feature_ecx_exclude;
1187 
1188 	if (cp->cp_edx & CPUID_INTC_EDX_PSE) {
1189 		add_x86_feature(featureset, X86FSET_LARGEPAGE);
1190 	}
1191 	if (cp->cp_edx & CPUID_INTC_EDX_TSC) {
1192 		add_x86_feature(featureset, X86FSET_TSC);
1193 	}
1194 	if (cp->cp_edx & CPUID_INTC_EDX_MSR) {
1195 		add_x86_feature(featureset, X86FSET_MSR);
1196 	}
1197 	if (cp->cp_edx & CPUID_INTC_EDX_MTRR) {
1198 		add_x86_feature(featureset, X86FSET_MTRR);
1199 	}
1200 	if (cp->cp_edx & CPUID_INTC_EDX_PGE) {
1201 		add_x86_feature(featureset, X86FSET_PGE);
1202 	}
1203 	if (cp->cp_edx & CPUID_INTC_EDX_CMOV) {
1204 		add_x86_feature(featureset, X86FSET_CMOV);
1205 	}
1206 	if (cp->cp_edx & CPUID_INTC_EDX_MMX) {
1207 		add_x86_feature(featureset, X86FSET_MMX);
1208 	}
1209 	if ((cp->cp_edx & CPUID_INTC_EDX_MCE) != 0 &&
1210 	    (cp->cp_edx & CPUID_INTC_EDX_MCA) != 0) {
1211 		add_x86_feature(featureset, X86FSET_MCA);
1212 	}
1213 	if (cp->cp_edx & CPUID_INTC_EDX_PAE) {
1214 		add_x86_feature(featureset, X86FSET_PAE);
1215 	}
1216 	if (cp->cp_edx & CPUID_INTC_EDX_CX8) {
1217 		add_x86_feature(featureset, X86FSET_CX8);
1218 	}
1219 	if (cp->cp_ecx & CPUID_INTC_ECX_CX16) {
1220 		add_x86_feature(featureset, X86FSET_CX16);
1221 	}
1222 	if (cp->cp_edx & CPUID_INTC_EDX_PAT) {
1223 		add_x86_feature(featureset, X86FSET_PAT);
1224 	}
1225 	if (cp->cp_edx & CPUID_INTC_EDX_SEP) {
1226 		add_x86_feature(featureset, X86FSET_SEP);
1227 	}
1228 	if (cp->cp_edx & CPUID_INTC_EDX_FXSR) {
1229 		/*
1230 		 * In our implementation, fxsave/fxrstor
1231 		 * are prerequisites before we'll even
1232 		 * try and do SSE things.
1233 		 */
1234 		if (cp->cp_edx & CPUID_INTC_EDX_SSE) {
1235 			add_x86_feature(featureset, X86FSET_SSE);
1236 		}
1237 		if (cp->cp_edx & CPUID_INTC_EDX_SSE2) {
1238 			add_x86_feature(featureset, X86FSET_SSE2);
1239 		}
1240 		if (cp->cp_ecx & CPUID_INTC_ECX_SSE3) {
1241 			add_x86_feature(featureset, X86FSET_SSE3);
1242 		}
1243 		if (cp->cp_ecx & CPUID_INTC_ECX_SSSE3) {
1244 			add_x86_feature(featureset, X86FSET_SSSE3);
1245 		}
1246 		if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_1) {
1247 			add_x86_feature(featureset, X86FSET_SSE4_1);
1248 		}
1249 		if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_2) {
1250 			add_x86_feature(featureset, X86FSET_SSE4_2);
1251 		}
1252 		if (cp->cp_ecx & CPUID_INTC_ECX_AES) {
1253 			add_x86_feature(featureset, X86FSET_AES);
1254 		}
1255 		if (cp->cp_ecx & CPUID_INTC_ECX_PCLMULQDQ) {
1256 			add_x86_feature(featureset, X86FSET_PCLMULQDQ);
1257 		}
1258 
1259 		if (cp->cp_ecx & CPUID_INTC_ECX_XSAVE) {
1260 			add_x86_feature(featureset, X86FSET_XSAVE);
1261 
1262 			/* We only test AVX when there is XSAVE */
1263 			if (cp->cp_ecx & CPUID_INTC_ECX_AVX) {
1264 				add_x86_feature(featureset,
1265 				    X86FSET_AVX);
1266 
1267 				if (cp->cp_ecx & CPUID_INTC_ECX_F16C)
1268 					add_x86_feature(featureset,
1269 					    X86FSET_F16C);
1270 			}
1271 		}
1272 	}
1273 	if (cp->cp_edx & CPUID_INTC_EDX_DE) {
1274 		add_x86_feature(featureset, X86FSET_DE);
1275 	}
1276 #if !defined(__xpv)
1277 	if (cp->cp_ecx & CPUID_INTC_ECX_MON) {
1278 
1279 		/*
1280 		 * We require the CLFLUSH instruction for erratum workaround
1281 		 * to use MONITOR/MWAIT.
1282 		 */
1283 		if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
1284 			cpi->cpi_mwait.support |= MWAIT_SUPPORT;
1285 			add_x86_feature(featureset, X86FSET_MWAIT);
1286 		} else {
1287 			extern int idle_cpu_assert_cflush_monitor;
1288 
1289 			/*
1290 			 * All processors we are aware of which have
1291 			 * MONITOR/MWAIT also have CLFLUSH.
1292 			 */
1293 			if (idle_cpu_assert_cflush_monitor) {
1294 				ASSERT((cp->cp_ecx & CPUID_INTC_ECX_MON) &&
1295 				    (cp->cp_edx & CPUID_INTC_EDX_CLFSH));
1296 			}
1297 		}
1298 	}
1299 #endif	/* __xpv */
1300 
1301 	if (cp->cp_ecx & CPUID_INTC_ECX_VMX) {
1302 		add_x86_feature(featureset, X86FSET_VMX);
1303 	}
1304 
1305 	if (cp->cp_ecx & CPUID_INTC_ECX_RDRAND)
1306 		add_x86_feature(featureset, X86FSET_RDRAND);
1307 
1308 	/*
1309 	 * Only need it first time, rest of the cpus would follow suit.
1310 	 * we only capture this for the bootcpu.
1311 	 */
1312 	if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
1313 		add_x86_feature(featureset, X86FSET_CLFSH);
1314 		x86_clflush_size = (BITX(cp->cp_ebx, 15, 8) * 8);
1315 	}
1316 	if (is_x86_feature(featureset, X86FSET_PAE))
1317 		cpi->cpi_pabits = 36;
1318 
1319 	/*
1320 	 * Hyperthreading configuration is slightly tricky on Intel
1321 	 * and pure clones, and even trickier on AMD.
1322 	 *
1323 	 * (AMD chose to set the HTT bit on their CMP processors,
1324 	 * even though they're not actually hyperthreaded.  Thus it
1325 	 * takes a bit more work to figure out what's really going
1326 	 * on ... see the handling of the CMP_LGCY bit below)
1327 	 */
1328 	if (cp->cp_edx & CPUID_INTC_EDX_HTT) {
1329 		cpi->cpi_ncpu_per_chip = CPI_CPU_COUNT(cpi);
1330 		if (cpi->cpi_ncpu_per_chip > 1)
1331 			add_x86_feature(featureset, X86FSET_HTT);
1332 	} else {
1333 		cpi->cpi_ncpu_per_chip = 1;
1334 	}
1335 
1336 	/*
1337 	 * Work on the "extended" feature information, doing
1338 	 * some basic initialization for cpuid_pass2()
1339 	 */
1340 	xcpuid = 0;
1341 	switch (cpi->cpi_vendor) {
1342 	case X86_VENDOR_Intel:
1343 		if (IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf)
1344 			xcpuid++;
1345 		break;
1346 	case X86_VENDOR_AMD:
1347 		if (cpi->cpi_family > 5 ||
1348 		    (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
1349 			xcpuid++;
1350 		break;
1351 	case X86_VENDOR_Cyrix:
1352 		/*
1353 		 * Only these Cyrix CPUs are -known- to support
1354 		 * extended cpuid operations.
1355 		 */
1356 		if (x86_type == X86_TYPE_VIA_CYRIX_III ||
1357 		    x86_type == X86_TYPE_CYRIX_GXm)
1358 			xcpuid++;
1359 		break;
1360 	case X86_VENDOR_Centaur:
1361 	case X86_VENDOR_TM:
1362 	default:
1363 		xcpuid++;
1364 		break;
1365 	}
1366 
1367 	if (xcpuid) {
1368 		cp = &cpi->cpi_extd[0];
1369 		cp->cp_eax = 0x80000000;
1370 		cpi->cpi_xmaxeax = __cpuid_insn(cp);
1371 	}
1372 
1373 	if (cpi->cpi_xmaxeax & 0x80000000) {
1374 
1375 		if (cpi->cpi_xmaxeax > CPI_XMAXEAX_MAX)
1376 			cpi->cpi_xmaxeax = CPI_XMAXEAX_MAX;
1377 
1378 		switch (cpi->cpi_vendor) {
1379 		case X86_VENDOR_Intel:
1380 		case X86_VENDOR_AMD:
1381 			if (cpi->cpi_xmaxeax < 0x80000001)
1382 				break;
1383 			cp = &cpi->cpi_extd[1];
1384 			cp->cp_eax = 0x80000001;
1385 			(void) __cpuid_insn(cp);
1386 
1387 			if (cpi->cpi_vendor == X86_VENDOR_AMD &&
1388 			    cpi->cpi_family == 5 &&
1389 			    cpi->cpi_model == 6 &&
1390 			    cpi->cpi_step == 6) {
1391 				/*
1392 				 * K6 model 6 uses bit 10 to indicate SYSC
1393 				 * Later models use bit 11. Fix it here.
1394 				 */
1395 				if (cp->cp_edx & 0x400) {
1396 					cp->cp_edx &= ~0x400;
1397 					cp->cp_edx |= CPUID_AMD_EDX_SYSC;
1398 				}
1399 			}
1400 
1401 			platform_cpuid_mangle(cpi->cpi_vendor, 0x80000001, cp);
1402 
1403 			/*
1404 			 * Compute the additions to the kernel's feature word.
1405 			 */
1406 			if (cp->cp_edx & CPUID_AMD_EDX_NX) {
1407 				add_x86_feature(featureset, X86FSET_NX);
1408 			}
1409 
1410 			/*
1411 			 * Regardless whether or not we boot 64-bit,
1412 			 * we should have a way to identify whether
1413 			 * the CPU is capable of running 64-bit.
1414 			 */
1415 			if (cp->cp_edx & CPUID_AMD_EDX_LM) {
1416 				add_x86_feature(featureset, X86FSET_64);
1417 			}
1418 
1419 #if defined(__amd64)
1420 			/* 1 GB large page - enable only for 64 bit kernel */
1421 			if (cp->cp_edx & CPUID_AMD_EDX_1GPG) {
1422 				add_x86_feature(featureset, X86FSET_1GPG);
1423 			}
1424 #endif
1425 
1426 			if ((cpi->cpi_vendor == X86_VENDOR_AMD) &&
1427 			    (cpi->cpi_std[1].cp_edx & CPUID_INTC_EDX_FXSR) &&
1428 			    (cp->cp_ecx & CPUID_AMD_ECX_SSE4A)) {
1429 				add_x86_feature(featureset, X86FSET_SSE4A);
1430 			}
1431 
1432 			/*
1433 			 * If both the HTT and CMP_LGCY bits are set,
1434 			 * then we're not actually HyperThreaded.  Read
1435 			 * "AMD CPUID Specification" for more details.
1436 			 */
1437 			if (cpi->cpi_vendor == X86_VENDOR_AMD &&
1438 			    is_x86_feature(featureset, X86FSET_HTT) &&
1439 			    (cp->cp_ecx & CPUID_AMD_ECX_CMP_LGCY)) {
1440 				remove_x86_feature(featureset, X86FSET_HTT);
1441 				add_x86_feature(featureset, X86FSET_CMP);
1442 			}
1443 #if defined(__amd64)
1444 			/*
1445 			 * It's really tricky to support syscall/sysret in
1446 			 * the i386 kernel; we rely on sysenter/sysexit
1447 			 * instead.  In the amd64 kernel, things are -way-
1448 			 * better.
1449 			 */
1450 			if (cp->cp_edx & CPUID_AMD_EDX_SYSC) {
1451 				add_x86_feature(featureset, X86FSET_ASYSC);
1452 			}
1453 
1454 			/*
1455 			 * While we're thinking about system calls, note
1456 			 * that AMD processors don't support sysenter
1457 			 * in long mode at all, so don't try to program them.
1458 			 */
1459 			if (x86_vendor == X86_VENDOR_AMD) {
1460 				remove_x86_feature(featureset, X86FSET_SEP);
1461 			}
1462 #endif
1463 			if (cp->cp_edx & CPUID_AMD_EDX_TSCP) {
1464 				add_x86_feature(featureset, X86FSET_TSCP);
1465 			}
1466 
1467 			if (cp->cp_ecx & CPUID_AMD_ECX_SVM) {
1468 				add_x86_feature(featureset, X86FSET_SVM);
1469 			}
1470 
1471 			if (cp->cp_ecx & CPUID_AMD_ECX_TOPOEXT) {
1472 				add_x86_feature(featureset, X86FSET_TOPOEXT);
1473 			}
1474 			break;
1475 		default:
1476 			break;
1477 		}
1478 
1479 		/*
1480 		 * Get CPUID data about processor cores and hyperthreads.
1481 		 */
1482 		switch (cpi->cpi_vendor) {
1483 		case X86_VENDOR_Intel:
1484 			if (cpi->cpi_maxeax >= 4) {
1485 				cp = &cpi->cpi_std[4];
1486 				cp->cp_eax = 4;
1487 				cp->cp_ecx = 0;
1488 				(void) __cpuid_insn(cp);
1489 				platform_cpuid_mangle(cpi->cpi_vendor, 4, cp);
1490 			}
1491 			/*FALLTHROUGH*/
1492 		case X86_VENDOR_AMD:
1493 			if (cpi->cpi_xmaxeax < 0x80000008)
1494 				break;
1495 			cp = &cpi->cpi_extd[8];
1496 			cp->cp_eax = 0x80000008;
1497 			(void) __cpuid_insn(cp);
1498 			platform_cpuid_mangle(cpi->cpi_vendor, 0x80000008, cp);
1499 
1500 			/*
1501 			 * Virtual and physical address limits from
1502 			 * cpuid override previously guessed values.
1503 			 */
1504 			cpi->cpi_pabits = BITX(cp->cp_eax, 7, 0);
1505 			cpi->cpi_vabits = BITX(cp->cp_eax, 15, 8);
1506 			break;
1507 		default:
1508 			break;
1509 		}
1510 
1511 		/*
1512 		 * Derive the number of cores per chip
1513 		 */
1514 		switch (cpi->cpi_vendor) {
1515 		case X86_VENDOR_Intel:
1516 			if (cpi->cpi_maxeax < 4) {
1517 				cpi->cpi_ncore_per_chip = 1;
1518 				break;
1519 			} else {
1520 				cpi->cpi_ncore_per_chip =
1521 				    BITX((cpi)->cpi_std[4].cp_eax, 31, 26) + 1;
1522 			}
1523 			break;
1524 		case X86_VENDOR_AMD:
1525 			if (cpi->cpi_xmaxeax < 0x80000008) {
1526 				cpi->cpi_ncore_per_chip = 1;
1527 				break;
1528 			} else {
1529 				/*
1530 				 * On family 0xf cpuid fn 2 ECX[7:0] "NC" is
1531 				 * 1 less than the number of physical cores on
1532 				 * the chip.  In family 0x10 this value can
1533 				 * be affected by "downcoring" - it reflects
1534 				 * 1 less than the number of cores actually
1535 				 * enabled on this node.
1536 				 */
1537 				cpi->cpi_ncore_per_chip =
1538 				    BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1;
1539 			}
1540 			break;
1541 		default:
1542 			cpi->cpi_ncore_per_chip = 1;
1543 			break;
1544 		}
1545 
1546 		/*
1547 		 * Get CPUID data about TSC Invariance in Deep C-State.
1548 		 */
1549 		switch (cpi->cpi_vendor) {
1550 		case X86_VENDOR_Intel:
1551 			if (cpi->cpi_maxeax >= 7) {
1552 				cp = &cpi->cpi_extd[7];
1553 				cp->cp_eax = 0x80000007;
1554 				cp->cp_ecx = 0;
1555 				(void) __cpuid_insn(cp);
1556 			}
1557 			break;
1558 		default:
1559 			break;
1560 		}
1561 	} else {
1562 		cpi->cpi_ncore_per_chip = 1;
1563 	}
1564 
1565 	/*
1566 	 * If more than one core, then this processor is CMP.
1567 	 */
1568 	if (cpi->cpi_ncore_per_chip > 1) {
1569 		add_x86_feature(featureset, X86FSET_CMP);
1570 	}
1571 
1572 	/*
1573 	 * If the number of cores is the same as the number
1574 	 * of CPUs, then we cannot have HyperThreading.
1575 	 */
1576 	if (cpi->cpi_ncpu_per_chip == cpi->cpi_ncore_per_chip) {
1577 		remove_x86_feature(featureset, X86FSET_HTT);
1578 	}
1579 
1580 	cpi->cpi_apicid = CPI_APIC_ID(cpi);
1581 	cpi->cpi_procnodes_per_pkg = 1;
1582 	cpi->cpi_cores_per_compunit = 1;
1583 	if (is_x86_feature(featureset, X86FSET_HTT) == B_FALSE &&
1584 	    is_x86_feature(featureset, X86FSET_CMP) == B_FALSE) {
1585 		/*
1586 		 * Single-core single-threaded processors.
1587 		 */
1588 		cpi->cpi_chipid = -1;
1589 		cpi->cpi_clogid = 0;
1590 		cpi->cpi_coreid = cpu->cpu_id;
1591 		cpi->cpi_pkgcoreid = 0;
1592 		if (cpi->cpi_vendor == X86_VENDOR_AMD)
1593 			cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 3, 0);
1594 		else
1595 			cpi->cpi_procnodeid = cpi->cpi_chipid;
1596 	} else if (cpi->cpi_ncpu_per_chip > 1) {
1597 		if (cpi->cpi_vendor == X86_VENDOR_Intel)
1598 			cpuid_intel_getids(cpu, featureset);
1599 		else if (cpi->cpi_vendor == X86_VENDOR_AMD)
1600 			cpuid_amd_getids(cpu);
1601 		else {
1602 			/*
1603 			 * All other processors are currently
1604 			 * assumed to have single cores.
1605 			 */
1606 			cpi->cpi_coreid = cpi->cpi_chipid;
1607 			cpi->cpi_pkgcoreid = 0;
1608 			cpi->cpi_procnodeid = cpi->cpi_chipid;
1609 			cpi->cpi_compunitid = cpi->cpi_chipid;
1610 		}
1611 	}
1612 
1613 	/*
1614 	 * Synthesize chip "revision" and socket type
1615 	 */
1616 	cpi->cpi_chiprev = _cpuid_chiprev(cpi->cpi_vendor, cpi->cpi_family,
1617 	    cpi->cpi_model, cpi->cpi_step);
1618 	cpi->cpi_chiprevstr = _cpuid_chiprevstr(cpi->cpi_vendor,
1619 	    cpi->cpi_family, cpi->cpi_model, cpi->cpi_step);
1620 	cpi->cpi_socket = _cpuid_skt(cpi->cpi_vendor, cpi->cpi_family,
1621 	    cpi->cpi_model, cpi->cpi_step);
1622 
1623 pass1_done:
1624 	cpi->cpi_pass = 1;
1625 }
1626 
1627 /*
1628  * Make copies of the cpuid table entries we depend on, in
1629  * part for ease of parsing now, in part so that we have only
1630  * one place to correct any of it, in part for ease of
1631  * later export to userland, and in part so we can look at
1632  * this stuff in a crash dump.
1633  */
1634 
1635 /*ARGSUSED*/
1636 void
1637 cpuid_pass2(cpu_t *cpu)
1638 {
1639 	uint_t n, nmax;
1640 	int i;
1641 	struct cpuid_regs *cp;
1642 	uint8_t *dp;
1643 	uint32_t *iptr;
1644 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
1645 
1646 	ASSERT(cpi->cpi_pass == 1);
1647 
1648 	if (cpi->cpi_maxeax < 1)
1649 		goto pass2_done;
1650 
1651 	if ((nmax = cpi->cpi_maxeax + 1) > NMAX_CPI_STD)
1652 		nmax = NMAX_CPI_STD;
1653 	/*
1654 	 * (We already handled n == 0 and n == 1 in pass 1)
1655 	 */
1656 	for (n = 2, cp = &cpi->cpi_std[2]; n < nmax; n++, cp++) {
1657 		cp->cp_eax = n;
1658 
1659 		/*
1660 		 * CPUID function 4 expects %ecx to be initialized
1661 		 * with an index which indicates which cache to return
1662 		 * information about. The OS is expected to call function 4
1663 		 * with %ecx set to 0, 1, 2, ... until it returns with
1664 		 * EAX[4:0] set to 0, which indicates there are no more
1665 		 * caches.
1666 		 *
1667 		 * Here, populate cpi_std[4] with the information returned by
1668 		 * function 4 when %ecx == 0, and do the rest in cpuid_pass3()
1669 		 * when dynamic memory allocation becomes available.
1670 		 *
1671 		 * Note: we need to explicitly initialize %ecx here, since
1672 		 * function 4 may have been previously invoked.
1673 		 */
1674 		if (n == 4)
1675 			cp->cp_ecx = 0;
1676 
1677 		(void) __cpuid_insn(cp);
1678 		platform_cpuid_mangle(cpi->cpi_vendor, n, cp);
1679 		switch (n) {
1680 		case 2:
1681 			/*
1682 			 * "the lower 8 bits of the %eax register
1683 			 * contain a value that identifies the number
1684 			 * of times the cpuid [instruction] has to be
1685 			 * executed to obtain a complete image of the
1686 			 * processor's caching systems."
1687 			 *
1688 			 * How *do* they make this stuff up?
1689 			 */
1690 			cpi->cpi_ncache = sizeof (*cp) *
1691 			    BITX(cp->cp_eax, 7, 0);
1692 			if (cpi->cpi_ncache == 0)
1693 				break;
1694 			cpi->cpi_ncache--;	/* skip count byte */
1695 
1696 			/*
1697 			 * Well, for now, rather than attempt to implement
1698 			 * this slightly dubious algorithm, we just look
1699 			 * at the first 15 ..
1700 			 */
1701 			if (cpi->cpi_ncache > (sizeof (*cp) - 1))
1702 				cpi->cpi_ncache = sizeof (*cp) - 1;
1703 
1704 			dp = cpi->cpi_cacheinfo;
1705 			if (BITX(cp->cp_eax, 31, 31) == 0) {
1706 				uint8_t *p = (void *)&cp->cp_eax;
1707 				for (i = 1; i < 4; i++)
1708 					if (p[i] != 0)
1709 						*dp++ = p[i];
1710 			}
1711 			if (BITX(cp->cp_ebx, 31, 31) == 0) {
1712 				uint8_t *p = (void *)&cp->cp_ebx;
1713 				for (i = 0; i < 4; i++)
1714 					if (p[i] != 0)
1715 						*dp++ = p[i];
1716 			}
1717 			if (BITX(cp->cp_ecx, 31, 31) == 0) {
1718 				uint8_t *p = (void *)&cp->cp_ecx;
1719 				for (i = 0; i < 4; i++)
1720 					if (p[i] != 0)
1721 						*dp++ = p[i];
1722 			}
1723 			if (BITX(cp->cp_edx, 31, 31) == 0) {
1724 				uint8_t *p = (void *)&cp->cp_edx;
1725 				for (i = 0; i < 4; i++)
1726 					if (p[i] != 0)
1727 						*dp++ = p[i];
1728 			}
1729 			break;
1730 
1731 		case 3:	/* Processor serial number, if PSN supported */
1732 			break;
1733 
1734 		case 4:	/* Deterministic cache parameters */
1735 			break;
1736 
1737 		case 5:	/* Monitor/Mwait parameters */
1738 		{
1739 			size_t mwait_size;
1740 
1741 			/*
1742 			 * check cpi_mwait.support which was set in cpuid_pass1
1743 			 */
1744 			if (!(cpi->cpi_mwait.support & MWAIT_SUPPORT))
1745 				break;
1746 
1747 			/*
1748 			 * Protect ourself from insane mwait line size.
1749 			 * Workaround for incomplete hardware emulator(s).
1750 			 */
1751 			mwait_size = (size_t)MWAIT_SIZE_MAX(cpi);
1752 			if (mwait_size < sizeof (uint32_t) ||
1753 			    !ISP2(mwait_size)) {
1754 #if DEBUG
1755 				cmn_err(CE_NOTE, "Cannot handle cpu %d mwait "
1756 				    "size %ld", cpu->cpu_id, (long)mwait_size);
1757 #endif
1758 				break;
1759 			}
1760 
1761 			cpi->cpi_mwait.mon_min = (size_t)MWAIT_SIZE_MIN(cpi);
1762 			cpi->cpi_mwait.mon_max = mwait_size;
1763 			if (MWAIT_EXTENSION(cpi)) {
1764 				cpi->cpi_mwait.support |= MWAIT_EXTENSIONS;
1765 				if (MWAIT_INT_ENABLE(cpi))
1766 					cpi->cpi_mwait.support |=
1767 					    MWAIT_ECX_INT_ENABLE;
1768 			}
1769 			break;
1770 		}
1771 		default:
1772 			break;
1773 		}
1774 	}
1775 
1776 	if (cpi->cpi_maxeax >= 0xB && cpi->cpi_vendor == X86_VENDOR_Intel) {
1777 		struct cpuid_regs regs;
1778 
1779 		cp = &regs;
1780 		cp->cp_eax = 0xB;
1781 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
1782 
1783 		(void) __cpuid_insn(cp);
1784 
1785 		/*
1786 		 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
1787 		 * indicates that the extended topology enumeration leaf is
1788 		 * available.
1789 		 */
1790 		if (cp->cp_ebx) {
1791 			uint32_t x2apic_id;
1792 			uint_t coreid_shift = 0;
1793 			uint_t ncpu_per_core = 1;
1794 			uint_t chipid_shift = 0;
1795 			uint_t ncpu_per_chip = 1;
1796 			uint_t i;
1797 			uint_t level;
1798 
1799 			for (i = 0; i < CPI_FNB_ECX_MAX; i++) {
1800 				cp->cp_eax = 0xB;
1801 				cp->cp_ecx = i;
1802 
1803 				(void) __cpuid_insn(cp);
1804 				level = CPI_CPU_LEVEL_TYPE(cp);
1805 
1806 				if (level == 1) {
1807 					x2apic_id = cp->cp_edx;
1808 					coreid_shift = BITX(cp->cp_eax, 4, 0);
1809 					ncpu_per_core = BITX(cp->cp_ebx, 15, 0);
1810 				} else if (level == 2) {
1811 					x2apic_id = cp->cp_edx;
1812 					chipid_shift = BITX(cp->cp_eax, 4, 0);
1813 					ncpu_per_chip = BITX(cp->cp_ebx, 15, 0);
1814 				}
1815 			}
1816 
1817 			cpi->cpi_apicid = x2apic_id;
1818 			cpi->cpi_ncpu_per_chip = ncpu_per_chip;
1819 			cpi->cpi_ncore_per_chip = ncpu_per_chip /
1820 			    ncpu_per_core;
1821 			cpi->cpi_chipid = x2apic_id >> chipid_shift;
1822 			cpi->cpi_clogid = x2apic_id & ((1 << chipid_shift) - 1);
1823 			cpi->cpi_coreid = x2apic_id >> coreid_shift;
1824 			cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
1825 		}
1826 
1827 		/* Make cp NULL so that we don't stumble on others */
1828 		cp = NULL;
1829 	}
1830 
1831 	/*
1832 	 * XSAVE enumeration
1833 	 */
1834 	if (cpi->cpi_maxeax >= 0xD) {
1835 		struct cpuid_regs regs;
1836 		boolean_t cpuid_d_valid = B_TRUE;
1837 
1838 		cp = &regs;
1839 		cp->cp_eax = 0xD;
1840 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
1841 
1842 		(void) __cpuid_insn(cp);
1843 
1844 		/*
1845 		 * Sanity checks for debug
1846 		 */
1847 		if ((cp->cp_eax & XFEATURE_LEGACY_FP) == 0 ||
1848 		    (cp->cp_eax & XFEATURE_SSE) == 0) {
1849 			cpuid_d_valid = B_FALSE;
1850 		}
1851 
1852 		cpi->cpi_xsave.xsav_hw_features_low = cp->cp_eax;
1853 		cpi->cpi_xsave.xsav_hw_features_high = cp->cp_edx;
1854 		cpi->cpi_xsave.xsav_max_size = cp->cp_ecx;
1855 
1856 		/*
1857 		 * If the hw supports AVX, get the size and offset in the save
1858 		 * area for the ymm state.
1859 		 */
1860 		if (cpi->cpi_xsave.xsav_hw_features_low & XFEATURE_AVX) {
1861 			cp->cp_eax = 0xD;
1862 			cp->cp_ecx = 2;
1863 			cp->cp_edx = cp->cp_ebx = 0;
1864 
1865 			(void) __cpuid_insn(cp);
1866 
1867 			if (cp->cp_ebx != CPUID_LEAFD_2_YMM_OFFSET ||
1868 			    cp->cp_eax != CPUID_LEAFD_2_YMM_SIZE) {
1869 				cpuid_d_valid = B_FALSE;
1870 			}
1871 
1872 			cpi->cpi_xsave.ymm_size = cp->cp_eax;
1873 			cpi->cpi_xsave.ymm_offset = cp->cp_ebx;
1874 		}
1875 
1876 		if (is_x86_feature(x86_featureset, X86FSET_XSAVE)) {
1877 			xsave_state_size = 0;
1878 		} else if (cpuid_d_valid) {
1879 			xsave_state_size = cpi->cpi_xsave.xsav_max_size;
1880 		} else {
1881 			/* Broken CPUID 0xD, probably in HVM */
1882 			cmn_err(CE_WARN, "cpu%d: CPUID.0xD returns invalid "
1883 			    "value: hw_low = %d, hw_high = %d, xsave_size = %d"
1884 			    ", ymm_size = %d, ymm_offset = %d\n",
1885 			    cpu->cpu_id, cpi->cpi_xsave.xsav_hw_features_low,
1886 			    cpi->cpi_xsave.xsav_hw_features_high,
1887 			    (int)cpi->cpi_xsave.xsav_max_size,
1888 			    (int)cpi->cpi_xsave.ymm_size,
1889 			    (int)cpi->cpi_xsave.ymm_offset);
1890 
1891 			if (xsave_state_size != 0) {
1892 				/*
1893 				 * This must be a non-boot CPU. We cannot
1894 				 * continue, because boot cpu has already
1895 				 * enabled XSAVE.
1896 				 */
1897 				ASSERT(cpu->cpu_id != 0);
1898 				cmn_err(CE_PANIC, "cpu%d: we have already "
1899 				    "enabled XSAVE on boot cpu, cannot "
1900 				    "continue.", cpu->cpu_id);
1901 			} else {
1902 				/*
1903 				 * Must be from boot CPU, OK to disable XSAVE.
1904 				 */
1905 				ASSERT(cpu->cpu_id == 0);
1906 				remove_x86_feature(x86_featureset,
1907 				    X86FSET_XSAVE);
1908 				remove_x86_feature(x86_featureset, X86FSET_AVX);
1909 				CPI_FEATURES_ECX(cpi) &= ~CPUID_INTC_ECX_XSAVE;
1910 				CPI_FEATURES_ECX(cpi) &= ~CPUID_INTC_ECX_AVX;
1911 				CPI_FEATURES_ECX(cpi) &= ~CPUID_INTC_ECX_F16C;
1912 				xsave_force_disable = B_TRUE;
1913 			}
1914 		}
1915 	}
1916 
1917 
1918 	if ((cpi->cpi_xmaxeax & 0x80000000) == 0)
1919 		goto pass2_done;
1920 
1921 	if ((nmax = cpi->cpi_xmaxeax - 0x80000000 + 1) > NMAX_CPI_EXTD)
1922 		nmax = NMAX_CPI_EXTD;
1923 	/*
1924 	 * Copy the extended properties, fixing them as we go.
1925 	 * (We already handled n == 0 and n == 1 in pass 1)
1926 	 */
1927 	iptr = (void *)cpi->cpi_brandstr;
1928 	for (n = 2, cp = &cpi->cpi_extd[2]; n < nmax; cp++, n++) {
1929 		cp->cp_eax = 0x80000000 + n;
1930 		(void) __cpuid_insn(cp);
1931 		platform_cpuid_mangle(cpi->cpi_vendor, 0x80000000 + n, cp);
1932 		switch (n) {
1933 		case 2:
1934 		case 3:
1935 		case 4:
1936 			/*
1937 			 * Extract the brand string
1938 			 */
1939 			*iptr++ = cp->cp_eax;
1940 			*iptr++ = cp->cp_ebx;
1941 			*iptr++ = cp->cp_ecx;
1942 			*iptr++ = cp->cp_edx;
1943 			break;
1944 		case 5:
1945 			switch (cpi->cpi_vendor) {
1946 			case X86_VENDOR_AMD:
1947 				/*
1948 				 * The Athlon and Duron were the first
1949 				 * parts to report the sizes of the
1950 				 * TLB for large pages. Before then,
1951 				 * we don't trust the data.
1952 				 */
1953 				if (cpi->cpi_family < 6 ||
1954 				    (cpi->cpi_family == 6 &&
1955 				    cpi->cpi_model < 1))
1956 					cp->cp_eax = 0;
1957 				break;
1958 			default:
1959 				break;
1960 			}
1961 			break;
1962 		case 6:
1963 			switch (cpi->cpi_vendor) {
1964 			case X86_VENDOR_AMD:
1965 				/*
1966 				 * The Athlon and Duron were the first
1967 				 * AMD parts with L2 TLB's.
1968 				 * Before then, don't trust the data.
1969 				 */
1970 				if (cpi->cpi_family < 6 ||
1971 				    cpi->cpi_family == 6 &&
1972 				    cpi->cpi_model < 1)
1973 					cp->cp_eax = cp->cp_ebx = 0;
1974 				/*
1975 				 * AMD Duron rev A0 reports L2
1976 				 * cache size incorrectly as 1K
1977 				 * when it is really 64K
1978 				 */
1979 				if (cpi->cpi_family == 6 &&
1980 				    cpi->cpi_model == 3 &&
1981 				    cpi->cpi_step == 0) {
1982 					cp->cp_ecx &= 0xffff;
1983 					cp->cp_ecx |= 0x400000;
1984 				}
1985 				break;
1986 			case X86_VENDOR_Cyrix:	/* VIA C3 */
1987 				/*
1988 				 * VIA C3 processors are a bit messed
1989 				 * up w.r.t. encoding cache sizes in %ecx
1990 				 */
1991 				if (cpi->cpi_family != 6)
1992 					break;
1993 				/*
1994 				 * model 7 and 8 were incorrectly encoded
1995 				 *
1996 				 * xxx is model 8 really broken?
1997 				 */
1998 				if (cpi->cpi_model == 7 ||
1999 				    cpi->cpi_model == 8)
2000 					cp->cp_ecx =
2001 					    BITX(cp->cp_ecx, 31, 24) << 16 |
2002 					    BITX(cp->cp_ecx, 23, 16) << 12 |
2003 					    BITX(cp->cp_ecx, 15, 8) << 8 |
2004 					    BITX(cp->cp_ecx, 7, 0);
2005 				/*
2006 				 * model 9 stepping 1 has wrong associativity
2007 				 */
2008 				if (cpi->cpi_model == 9 && cpi->cpi_step == 1)
2009 					cp->cp_ecx |= 8 << 12;
2010 				break;
2011 			case X86_VENDOR_Intel:
2012 				/*
2013 				 * Extended L2 Cache features function.
2014 				 * First appeared on Prescott.
2015 				 */
2016 			default:
2017 				break;
2018 			}
2019 			break;
2020 		default:
2021 			break;
2022 		}
2023 	}
2024 
2025 pass2_done:
2026 	cpi->cpi_pass = 2;
2027 }
2028 
2029 static const char *
2030 intel_cpubrand(const struct cpuid_info *cpi)
2031 {
2032 	int i;
2033 
2034 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
2035 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
2036 		return ("i486");
2037 
2038 	switch (cpi->cpi_family) {
2039 	case 5:
2040 		return ("Intel Pentium(r)");
2041 	case 6:
2042 		switch (cpi->cpi_model) {
2043 			uint_t celeron, xeon;
2044 			const struct cpuid_regs *cp;
2045 		case 0:
2046 		case 1:
2047 		case 2:
2048 			return ("Intel Pentium(r) Pro");
2049 		case 3:
2050 		case 4:
2051 			return ("Intel Pentium(r) II");
2052 		case 6:
2053 			return ("Intel Celeron(r)");
2054 		case 5:
2055 		case 7:
2056 			celeron = xeon = 0;
2057 			cp = &cpi->cpi_std[2];	/* cache info */
2058 
2059 			for (i = 1; i < 4; i++) {
2060 				uint_t tmp;
2061 
2062 				tmp = (cp->cp_eax >> (8 * i)) & 0xff;
2063 				if (tmp == 0x40)
2064 					celeron++;
2065 				if (tmp >= 0x44 && tmp <= 0x45)
2066 					xeon++;
2067 			}
2068 
2069 			for (i = 0; i < 2; i++) {
2070 				uint_t tmp;
2071 
2072 				tmp = (cp->cp_ebx >> (8 * i)) & 0xff;
2073 				if (tmp == 0x40)
2074 					celeron++;
2075 				else if (tmp >= 0x44 && tmp <= 0x45)
2076 					xeon++;
2077 			}
2078 
2079 			for (i = 0; i < 4; i++) {
2080 				uint_t tmp;
2081 
2082 				tmp = (cp->cp_ecx >> (8 * i)) & 0xff;
2083 				if (tmp == 0x40)
2084 					celeron++;
2085 				else if (tmp >= 0x44 && tmp <= 0x45)
2086 					xeon++;
2087 			}
2088 
2089 			for (i = 0; i < 4; i++) {
2090 				uint_t tmp;
2091 
2092 				tmp = (cp->cp_edx >> (8 * i)) & 0xff;
2093 				if (tmp == 0x40)
2094 					celeron++;
2095 				else if (tmp >= 0x44 && tmp <= 0x45)
2096 					xeon++;
2097 			}
2098 
2099 			if (celeron)
2100 				return ("Intel Celeron(r)");
2101 			if (xeon)
2102 				return (cpi->cpi_model == 5 ?
2103 				    "Intel Pentium(r) II Xeon(tm)" :
2104 				    "Intel Pentium(r) III Xeon(tm)");
2105 			return (cpi->cpi_model == 5 ?
2106 			    "Intel Pentium(r) II or Pentium(r) II Xeon(tm)" :
2107 			    "Intel Pentium(r) III or Pentium(r) III Xeon(tm)");
2108 		default:
2109 			break;
2110 		}
2111 	default:
2112 		break;
2113 	}
2114 
2115 	/* BrandID is present if the field is nonzero */
2116 	if (cpi->cpi_brandid != 0) {
2117 		static const struct {
2118 			uint_t bt_bid;
2119 			const char *bt_str;
2120 		} brand_tbl[] = {
2121 			{ 0x1,	"Intel(r) Celeron(r)" },
2122 			{ 0x2,	"Intel(r) Pentium(r) III" },
2123 			{ 0x3,	"Intel(r) Pentium(r) III Xeon(tm)" },
2124 			{ 0x4,	"Intel(r) Pentium(r) III" },
2125 			{ 0x6,	"Mobile Intel(r) Pentium(r) III" },
2126 			{ 0x7,	"Mobile Intel(r) Celeron(r)" },
2127 			{ 0x8,	"Intel(r) Pentium(r) 4" },
2128 			{ 0x9,	"Intel(r) Pentium(r) 4" },
2129 			{ 0xa,	"Intel(r) Celeron(r)" },
2130 			{ 0xb,	"Intel(r) Xeon(tm)" },
2131 			{ 0xc,	"Intel(r) Xeon(tm) MP" },
2132 			{ 0xe,	"Mobile Intel(r) Pentium(r) 4" },
2133 			{ 0xf,	"Mobile Intel(r) Celeron(r)" },
2134 			{ 0x11, "Mobile Genuine Intel(r)" },
2135 			{ 0x12, "Intel(r) Celeron(r) M" },
2136 			{ 0x13, "Mobile Intel(r) Celeron(r)" },
2137 			{ 0x14, "Intel(r) Celeron(r)" },
2138 			{ 0x15, "Mobile Genuine Intel(r)" },
2139 			{ 0x16,	"Intel(r) Pentium(r) M" },
2140 			{ 0x17, "Mobile Intel(r) Celeron(r)" }
2141 		};
2142 		uint_t btblmax = sizeof (brand_tbl) / sizeof (brand_tbl[0]);
2143 		uint_t sgn;
2144 
2145 		sgn = (cpi->cpi_family << 8) |
2146 		    (cpi->cpi_model << 4) | cpi->cpi_step;
2147 
2148 		for (i = 0; i < btblmax; i++)
2149 			if (brand_tbl[i].bt_bid == cpi->cpi_brandid)
2150 				break;
2151 		if (i < btblmax) {
2152 			if (sgn == 0x6b1 && cpi->cpi_brandid == 3)
2153 				return ("Intel(r) Celeron(r)");
2154 			if (sgn < 0xf13 && cpi->cpi_brandid == 0xb)
2155 				return ("Intel(r) Xeon(tm) MP");
2156 			if (sgn < 0xf13 && cpi->cpi_brandid == 0xe)
2157 				return ("Intel(r) Xeon(tm)");
2158 			return (brand_tbl[i].bt_str);
2159 		}
2160 	}
2161 
2162 	return (NULL);
2163 }
2164 
2165 static const char *
2166 amd_cpubrand(const struct cpuid_info *cpi)
2167 {
2168 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
2169 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
2170 		return ("i486 compatible");
2171 
2172 	switch (cpi->cpi_family) {
2173 	case 5:
2174 		switch (cpi->cpi_model) {
2175 		case 0:
2176 		case 1:
2177 		case 2:
2178 		case 3:
2179 		case 4:
2180 		case 5:
2181 			return ("AMD-K5(r)");
2182 		case 6:
2183 		case 7:
2184 			return ("AMD-K6(r)");
2185 		case 8:
2186 			return ("AMD-K6(r)-2");
2187 		case 9:
2188 			return ("AMD-K6(r)-III");
2189 		default:
2190 			return ("AMD (family 5)");
2191 		}
2192 	case 6:
2193 		switch (cpi->cpi_model) {
2194 		case 1:
2195 			return ("AMD-K7(tm)");
2196 		case 0:
2197 		case 2:
2198 		case 4:
2199 			return ("AMD Athlon(tm)");
2200 		case 3:
2201 		case 7:
2202 			return ("AMD Duron(tm)");
2203 		case 6:
2204 		case 8:
2205 		case 10:
2206 			/*
2207 			 * Use the L2 cache size to distinguish
2208 			 */
2209 			return ((cpi->cpi_extd[6].cp_ecx >> 16) >= 256 ?
2210 			    "AMD Athlon(tm)" : "AMD Duron(tm)");
2211 		default:
2212 			return ("AMD (family 6)");
2213 		}
2214 	default:
2215 		break;
2216 	}
2217 
2218 	if (cpi->cpi_family == 0xf && cpi->cpi_model == 5 &&
2219 	    cpi->cpi_brandid != 0) {
2220 		switch (BITX(cpi->cpi_brandid, 7, 5)) {
2221 		case 3:
2222 			return ("AMD Opteron(tm) UP 1xx");
2223 		case 4:
2224 			return ("AMD Opteron(tm) DP 2xx");
2225 		case 5:
2226 			return ("AMD Opteron(tm) MP 8xx");
2227 		default:
2228 			return ("AMD Opteron(tm)");
2229 		}
2230 	}
2231 
2232 	return (NULL);
2233 }
2234 
2235 static const char *
2236 cyrix_cpubrand(struct cpuid_info *cpi, uint_t type)
2237 {
2238 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
2239 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5 ||
2240 	    type == X86_TYPE_CYRIX_486)
2241 		return ("i486 compatible");
2242 
2243 	switch (type) {
2244 	case X86_TYPE_CYRIX_6x86:
2245 		return ("Cyrix 6x86");
2246 	case X86_TYPE_CYRIX_6x86L:
2247 		return ("Cyrix 6x86L");
2248 	case X86_TYPE_CYRIX_6x86MX:
2249 		return ("Cyrix 6x86MX");
2250 	case X86_TYPE_CYRIX_GXm:
2251 		return ("Cyrix GXm");
2252 	case X86_TYPE_CYRIX_MediaGX:
2253 		return ("Cyrix MediaGX");
2254 	case X86_TYPE_CYRIX_MII:
2255 		return ("Cyrix M2");
2256 	case X86_TYPE_VIA_CYRIX_III:
2257 		return ("VIA Cyrix M3");
2258 	default:
2259 		/*
2260 		 * Have another wild guess ..
2261 		 */
2262 		if (cpi->cpi_family == 4 && cpi->cpi_model == 9)
2263 			return ("Cyrix 5x86");
2264 		else if (cpi->cpi_family == 5) {
2265 			switch (cpi->cpi_model) {
2266 			case 2:
2267 				return ("Cyrix 6x86");	/* Cyrix M1 */
2268 			case 4:
2269 				return ("Cyrix MediaGX");
2270 			default:
2271 				break;
2272 			}
2273 		} else if (cpi->cpi_family == 6) {
2274 			switch (cpi->cpi_model) {
2275 			case 0:
2276 				return ("Cyrix 6x86MX"); /* Cyrix M2? */
2277 			case 5:
2278 			case 6:
2279 			case 7:
2280 			case 8:
2281 			case 9:
2282 				return ("VIA C3");
2283 			default:
2284 				break;
2285 			}
2286 		}
2287 		break;
2288 	}
2289 	return (NULL);
2290 }
2291 
2292 /*
2293  * This only gets called in the case that the CPU extended
2294  * feature brand string (0x80000002, 0x80000003, 0x80000004)
2295  * aren't available, or contain null bytes for some reason.
2296  */
2297 static void
2298 fabricate_brandstr(struct cpuid_info *cpi)
2299 {
2300 	const char *brand = NULL;
2301 
2302 	switch (cpi->cpi_vendor) {
2303 	case X86_VENDOR_Intel:
2304 		brand = intel_cpubrand(cpi);
2305 		break;
2306 	case X86_VENDOR_AMD:
2307 		brand = amd_cpubrand(cpi);
2308 		break;
2309 	case X86_VENDOR_Cyrix:
2310 		brand = cyrix_cpubrand(cpi, x86_type);
2311 		break;
2312 	case X86_VENDOR_NexGen:
2313 		if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
2314 			brand = "NexGen Nx586";
2315 		break;
2316 	case X86_VENDOR_Centaur:
2317 		if (cpi->cpi_family == 5)
2318 			switch (cpi->cpi_model) {
2319 			case 4:
2320 				brand = "Centaur C6";
2321 				break;
2322 			case 8:
2323 				brand = "Centaur C2";
2324 				break;
2325 			case 9:
2326 				brand = "Centaur C3";
2327 				break;
2328 			default:
2329 				break;
2330 			}
2331 		break;
2332 	case X86_VENDOR_Rise:
2333 		if (cpi->cpi_family == 5 &&
2334 		    (cpi->cpi_model == 0 || cpi->cpi_model == 2))
2335 			brand = "Rise mP6";
2336 		break;
2337 	case X86_VENDOR_SiS:
2338 		if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
2339 			brand = "SiS 55x";
2340 		break;
2341 	case X86_VENDOR_TM:
2342 		if (cpi->cpi_family == 5 && cpi->cpi_model == 4)
2343 			brand = "Transmeta Crusoe TM3x00 or TM5x00";
2344 		break;
2345 	case X86_VENDOR_NSC:
2346 	case X86_VENDOR_UMC:
2347 	default:
2348 		break;
2349 	}
2350 	if (brand) {
2351 		(void) strcpy((char *)cpi->cpi_brandstr, brand);
2352 		return;
2353 	}
2354 
2355 	/*
2356 	 * If all else fails ...
2357 	 */
2358 	(void) snprintf(cpi->cpi_brandstr, sizeof (cpi->cpi_brandstr),
2359 	    "%s %d.%d.%d", cpi->cpi_vendorstr, cpi->cpi_family,
2360 	    cpi->cpi_model, cpi->cpi_step);
2361 }
2362 
2363 /*
2364  * This routine is called just after kernel memory allocation
2365  * becomes available on cpu0, and as part of mp_startup() on
2366  * the other cpus.
2367  *
2368  * Fixup the brand string, and collect any information from cpuid
2369  * that requires dynamicically allocated storage to represent.
2370  */
2371 /*ARGSUSED*/
2372 void
2373 cpuid_pass3(cpu_t *cpu)
2374 {
2375 	int	i, max, shft, level, size;
2376 	struct cpuid_regs regs;
2377 	struct cpuid_regs *cp;
2378 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2379 
2380 	ASSERT(cpi->cpi_pass == 2);
2381 
2382 	/*
2383 	 * Function 4: Deterministic cache parameters
2384 	 *
2385 	 * Take this opportunity to detect the number of threads
2386 	 * sharing the last level cache, and construct a corresponding
2387 	 * cache id. The respective cpuid_info members are initialized
2388 	 * to the default case of "no last level cache sharing".
2389 	 */
2390 	cpi->cpi_ncpu_shr_last_cache = 1;
2391 	cpi->cpi_last_lvl_cacheid = cpu->cpu_id;
2392 
2393 	if (cpi->cpi_maxeax >= 4 && cpi->cpi_vendor == X86_VENDOR_Intel) {
2394 
2395 		/*
2396 		 * Find the # of elements (size) returned by fn 4, and along
2397 		 * the way detect last level cache sharing details.
2398 		 */
2399 		bzero(&regs, sizeof (regs));
2400 		cp = &regs;
2401 		for (i = 0, max = 0; i < CPI_FN4_ECX_MAX; i++) {
2402 			cp->cp_eax = 4;
2403 			cp->cp_ecx = i;
2404 
2405 			(void) __cpuid_insn(cp);
2406 
2407 			if (CPI_CACHE_TYPE(cp) == 0)
2408 				break;
2409 			level = CPI_CACHE_LVL(cp);
2410 			if (level > max) {
2411 				max = level;
2412 				cpi->cpi_ncpu_shr_last_cache =
2413 				    CPI_NTHR_SHR_CACHE(cp) + 1;
2414 			}
2415 		}
2416 		cpi->cpi_std_4_size = size = i;
2417 
2418 		/*
2419 		 * Allocate the cpi_std_4 array. The first element
2420 		 * references the regs for fn 4, %ecx == 0, which
2421 		 * cpuid_pass2() stashed in cpi->cpi_std[4].
2422 		 */
2423 		if (size > 0) {
2424 			cpi->cpi_std_4 =
2425 			    kmem_alloc(size * sizeof (cp), KM_SLEEP);
2426 			cpi->cpi_std_4[0] = &cpi->cpi_std[4];
2427 
2428 			/*
2429 			 * Allocate storage to hold the additional regs
2430 			 * for function 4, %ecx == 1 .. cpi_std_4_size.
2431 			 *
2432 			 * The regs for fn 4, %ecx == 0 has already
2433 			 * been allocated as indicated above.
2434 			 */
2435 			for (i = 1; i < size; i++) {
2436 				cp = cpi->cpi_std_4[i] =
2437 				    kmem_zalloc(sizeof (regs), KM_SLEEP);
2438 				cp->cp_eax = 4;
2439 				cp->cp_ecx = i;
2440 
2441 				(void) __cpuid_insn(cp);
2442 			}
2443 		}
2444 		/*
2445 		 * Determine the number of bits needed to represent
2446 		 * the number of CPUs sharing the last level cache.
2447 		 *
2448 		 * Shift off that number of bits from the APIC id to
2449 		 * derive the cache id.
2450 		 */
2451 		shft = 0;
2452 		for (i = 1; i < cpi->cpi_ncpu_shr_last_cache; i <<= 1)
2453 			shft++;
2454 		cpi->cpi_last_lvl_cacheid = cpi->cpi_apicid >> shft;
2455 	}
2456 
2457 	/*
2458 	 * Now fixup the brand string
2459 	 */
2460 	if ((cpi->cpi_xmaxeax & 0x80000000) == 0) {
2461 		fabricate_brandstr(cpi);
2462 	} else {
2463 
2464 		/*
2465 		 * If we successfully extracted a brand string from the cpuid
2466 		 * instruction, clean it up by removing leading spaces and
2467 		 * similar junk.
2468 		 */
2469 		if (cpi->cpi_brandstr[0]) {
2470 			size_t maxlen = sizeof (cpi->cpi_brandstr);
2471 			char *src, *dst;
2472 
2473 			dst = src = (char *)cpi->cpi_brandstr;
2474 			src[maxlen - 1] = '\0';
2475 			/*
2476 			 * strip leading spaces
2477 			 */
2478 			while (*src == ' ')
2479 				src++;
2480 			/*
2481 			 * Remove any 'Genuine' or "Authentic" prefixes
2482 			 */
2483 			if (strncmp(src, "Genuine ", 8) == 0)
2484 				src += 8;
2485 			if (strncmp(src, "Authentic ", 10) == 0)
2486 				src += 10;
2487 
2488 			/*
2489 			 * Now do an in-place copy.
2490 			 * Map (R) to (r) and (TM) to (tm).
2491 			 * The era of teletypes is long gone, and there's
2492 			 * -really- no need to shout.
2493 			 */
2494 			while (*src != '\0') {
2495 				if (src[0] == '(') {
2496 					if (strncmp(src + 1, "R)", 2) == 0) {
2497 						(void) strncpy(dst, "(r)", 3);
2498 						src += 3;
2499 						dst += 3;
2500 						continue;
2501 					}
2502 					if (strncmp(src + 1, "TM)", 3) == 0) {
2503 						(void) strncpy(dst, "(tm)", 4);
2504 						src += 4;
2505 						dst += 4;
2506 						continue;
2507 					}
2508 				}
2509 				*dst++ = *src++;
2510 			}
2511 			*dst = '\0';
2512 
2513 			/*
2514 			 * Finally, remove any trailing spaces
2515 			 */
2516 			while (--dst > cpi->cpi_brandstr)
2517 				if (*dst == ' ')
2518 					*dst = '\0';
2519 				else
2520 					break;
2521 		} else
2522 			fabricate_brandstr(cpi);
2523 	}
2524 	cpi->cpi_pass = 3;
2525 }
2526 
2527 /*
2528  * This routine is called out of bind_hwcap() much later in the life
2529  * of the kernel (post_startup()).  The job of this routine is to resolve
2530  * the hardware feature support and kernel support for those features into
2531  * what we're actually going to tell applications via the aux vector.
2532  */
2533 void
2534 cpuid_pass4(cpu_t *cpu, uint_t *hwcap_out)
2535 {
2536 	struct cpuid_info *cpi;
2537 	uint_t hwcap_flags = 0, hwcap_flags_2 = 0;
2538 
2539 	if (cpu == NULL)
2540 		cpu = CPU;
2541 	cpi = cpu->cpu_m.mcpu_cpi;
2542 
2543 	ASSERT(cpi->cpi_pass == 3);
2544 
2545 	if (cpi->cpi_maxeax >= 1) {
2546 		uint32_t *edx = &cpi->cpi_support[STD_EDX_FEATURES];
2547 		uint32_t *ecx = &cpi->cpi_support[STD_ECX_FEATURES];
2548 
2549 		*edx = CPI_FEATURES_EDX(cpi);
2550 		*ecx = CPI_FEATURES_ECX(cpi);
2551 
2552 		/*
2553 		 * [these require explicit kernel support]
2554 		 */
2555 		if (!is_x86_feature(x86_featureset, X86FSET_SEP))
2556 			*edx &= ~CPUID_INTC_EDX_SEP;
2557 
2558 		if (!is_x86_feature(x86_featureset, X86FSET_SSE))
2559 			*edx &= ~(CPUID_INTC_EDX_FXSR|CPUID_INTC_EDX_SSE);
2560 		if (!is_x86_feature(x86_featureset, X86FSET_SSE2))
2561 			*edx &= ~CPUID_INTC_EDX_SSE2;
2562 
2563 		if (!is_x86_feature(x86_featureset, X86FSET_HTT))
2564 			*edx &= ~CPUID_INTC_EDX_HTT;
2565 
2566 		if (!is_x86_feature(x86_featureset, X86FSET_SSE3))
2567 			*ecx &= ~CPUID_INTC_ECX_SSE3;
2568 
2569 		if (!is_x86_feature(x86_featureset, X86FSET_SSSE3))
2570 			*ecx &= ~CPUID_INTC_ECX_SSSE3;
2571 		if (!is_x86_feature(x86_featureset, X86FSET_SSE4_1))
2572 			*ecx &= ~CPUID_INTC_ECX_SSE4_1;
2573 		if (!is_x86_feature(x86_featureset, X86FSET_SSE4_2))
2574 			*ecx &= ~CPUID_INTC_ECX_SSE4_2;
2575 		if (!is_x86_feature(x86_featureset, X86FSET_AES))
2576 			*ecx &= ~CPUID_INTC_ECX_AES;
2577 		if (!is_x86_feature(x86_featureset, X86FSET_PCLMULQDQ))
2578 			*ecx &= ~CPUID_INTC_ECX_PCLMULQDQ;
2579 		if (!is_x86_feature(x86_featureset, X86FSET_XSAVE))
2580 			*ecx &= ~(CPUID_INTC_ECX_XSAVE |
2581 			    CPUID_INTC_ECX_OSXSAVE);
2582 		if (!is_x86_feature(x86_featureset, X86FSET_AVX))
2583 			*ecx &= ~CPUID_INTC_ECX_AVX;
2584 		if (!is_x86_feature(x86_featureset, X86FSET_F16C))
2585 			*ecx &= ~CPUID_INTC_ECX_F16C;
2586 
2587 		/*
2588 		 * [no explicit support required beyond x87 fp context]
2589 		 */
2590 		if (!fpu_exists)
2591 			*edx &= ~(CPUID_INTC_EDX_FPU | CPUID_INTC_EDX_MMX);
2592 
2593 		/*
2594 		 * Now map the supported feature vector to things that we
2595 		 * think userland will care about.
2596 		 */
2597 		if (*edx & CPUID_INTC_EDX_SEP)
2598 			hwcap_flags |= AV_386_SEP;
2599 		if (*edx & CPUID_INTC_EDX_SSE)
2600 			hwcap_flags |= AV_386_FXSR | AV_386_SSE;
2601 		if (*edx & CPUID_INTC_EDX_SSE2)
2602 			hwcap_flags |= AV_386_SSE2;
2603 		if (*ecx & CPUID_INTC_ECX_SSE3)
2604 			hwcap_flags |= AV_386_SSE3;
2605 		if (*ecx & CPUID_INTC_ECX_SSSE3)
2606 			hwcap_flags |= AV_386_SSSE3;
2607 		if (*ecx & CPUID_INTC_ECX_SSE4_1)
2608 			hwcap_flags |= AV_386_SSE4_1;
2609 		if (*ecx & CPUID_INTC_ECX_SSE4_2)
2610 			hwcap_flags |= AV_386_SSE4_2;
2611 		if (*ecx & CPUID_INTC_ECX_MOVBE)
2612 			hwcap_flags |= AV_386_MOVBE;
2613 		if (*ecx & CPUID_INTC_ECX_AES)
2614 			hwcap_flags |= AV_386_AES;
2615 		if (*ecx & CPUID_INTC_ECX_PCLMULQDQ)
2616 			hwcap_flags |= AV_386_PCLMULQDQ;
2617 		if ((*ecx & CPUID_INTC_ECX_XSAVE) &&
2618 		    (*ecx & CPUID_INTC_ECX_OSXSAVE)) {
2619 			hwcap_flags |= AV_386_XSAVE;
2620 
2621 			if (*ecx & CPUID_INTC_ECX_AVX) {
2622 				hwcap_flags |= AV_386_AVX;
2623 				if (*ecx & CPUID_INTC_ECX_F16C)
2624 					hwcap_flags_2 |= AV_386_2_F16C;
2625 			}
2626 		}
2627 		if (*ecx & CPUID_INTC_ECX_VMX)
2628 			hwcap_flags |= AV_386_VMX;
2629 		if (*ecx & CPUID_INTC_ECX_POPCNT)
2630 			hwcap_flags |= AV_386_POPCNT;
2631 		if (*edx & CPUID_INTC_EDX_FPU)
2632 			hwcap_flags |= AV_386_FPU;
2633 		if (*edx & CPUID_INTC_EDX_MMX)
2634 			hwcap_flags |= AV_386_MMX;
2635 
2636 		if (*edx & CPUID_INTC_EDX_TSC)
2637 			hwcap_flags |= AV_386_TSC;
2638 		if (*edx & CPUID_INTC_EDX_CX8)
2639 			hwcap_flags |= AV_386_CX8;
2640 		if (*edx & CPUID_INTC_EDX_CMOV)
2641 			hwcap_flags |= AV_386_CMOV;
2642 		if (*ecx & CPUID_INTC_ECX_CX16)
2643 			hwcap_flags |= AV_386_CX16;
2644 
2645 		if (*ecx & CPUID_INTC_ECX_RDRAND)
2646 			hwcap_flags_2 |= AV_386_2_RDRAND;
2647 	}
2648 
2649 	if (cpi->cpi_xmaxeax < 0x80000001)
2650 		goto pass4_done;
2651 
2652 	switch (cpi->cpi_vendor) {
2653 		struct cpuid_regs cp;
2654 		uint32_t *edx, *ecx;
2655 
2656 	case X86_VENDOR_Intel:
2657 		/*
2658 		 * Seems like Intel duplicated what we necessary
2659 		 * here to make the initial crop of 64-bit OS's work.
2660 		 * Hopefully, those are the only "extended" bits
2661 		 * they'll add.
2662 		 */
2663 		/*FALLTHROUGH*/
2664 
2665 	case X86_VENDOR_AMD:
2666 		edx = &cpi->cpi_support[AMD_EDX_FEATURES];
2667 		ecx = &cpi->cpi_support[AMD_ECX_FEATURES];
2668 
2669 		*edx = CPI_FEATURES_XTD_EDX(cpi);
2670 		*ecx = CPI_FEATURES_XTD_ECX(cpi);
2671 
2672 		/*
2673 		 * [these features require explicit kernel support]
2674 		 */
2675 		switch (cpi->cpi_vendor) {
2676 		case X86_VENDOR_Intel:
2677 			if (!is_x86_feature(x86_featureset, X86FSET_TSCP))
2678 				*edx &= ~CPUID_AMD_EDX_TSCP;
2679 			break;
2680 
2681 		case X86_VENDOR_AMD:
2682 			if (!is_x86_feature(x86_featureset, X86FSET_TSCP))
2683 				*edx &= ~CPUID_AMD_EDX_TSCP;
2684 			if (!is_x86_feature(x86_featureset, X86FSET_SSE4A))
2685 				*ecx &= ~CPUID_AMD_ECX_SSE4A;
2686 			break;
2687 
2688 		default:
2689 			break;
2690 		}
2691 
2692 		/*
2693 		 * [no explicit support required beyond
2694 		 * x87 fp context and exception handlers]
2695 		 */
2696 		if (!fpu_exists)
2697 			*edx &= ~(CPUID_AMD_EDX_MMXamd |
2698 			    CPUID_AMD_EDX_3DNow | CPUID_AMD_EDX_3DNowx);
2699 
2700 		if (!is_x86_feature(x86_featureset, X86FSET_NX))
2701 			*edx &= ~CPUID_AMD_EDX_NX;
2702 #if !defined(__amd64)
2703 		*edx &= ~CPUID_AMD_EDX_LM;
2704 #endif
2705 		/*
2706 		 * Now map the supported feature vector to
2707 		 * things that we think userland will care about.
2708 		 */
2709 #if defined(__amd64)
2710 		if (*edx & CPUID_AMD_EDX_SYSC)
2711 			hwcap_flags |= AV_386_AMD_SYSC;
2712 #endif
2713 		if (*edx & CPUID_AMD_EDX_MMXamd)
2714 			hwcap_flags |= AV_386_AMD_MMX;
2715 		if (*edx & CPUID_AMD_EDX_3DNow)
2716 			hwcap_flags |= AV_386_AMD_3DNow;
2717 		if (*edx & CPUID_AMD_EDX_3DNowx)
2718 			hwcap_flags |= AV_386_AMD_3DNowx;
2719 		if (*ecx & CPUID_AMD_ECX_SVM)
2720 			hwcap_flags |= AV_386_AMD_SVM;
2721 
2722 		switch (cpi->cpi_vendor) {
2723 		case X86_VENDOR_AMD:
2724 			if (*edx & CPUID_AMD_EDX_TSCP)
2725 				hwcap_flags |= AV_386_TSCP;
2726 			if (*ecx & CPUID_AMD_ECX_AHF64)
2727 				hwcap_flags |= AV_386_AHF;
2728 			if (*ecx & CPUID_AMD_ECX_SSE4A)
2729 				hwcap_flags |= AV_386_AMD_SSE4A;
2730 			if (*ecx & CPUID_AMD_ECX_LZCNT)
2731 				hwcap_flags |= AV_386_AMD_LZCNT;
2732 			break;
2733 
2734 		case X86_VENDOR_Intel:
2735 			if (*edx & CPUID_AMD_EDX_TSCP)
2736 				hwcap_flags |= AV_386_TSCP;
2737 			/*
2738 			 * Aarrgh.
2739 			 * Intel uses a different bit in the same word.
2740 			 */
2741 			if (*ecx & CPUID_INTC_ECX_AHF64)
2742 				hwcap_flags |= AV_386_AHF;
2743 			break;
2744 
2745 		default:
2746 			break;
2747 		}
2748 		break;
2749 
2750 	case X86_VENDOR_TM:
2751 		cp.cp_eax = 0x80860001;
2752 		(void) __cpuid_insn(&cp);
2753 		cpi->cpi_support[TM_EDX_FEATURES] = cp.cp_edx;
2754 		break;
2755 
2756 	default:
2757 		break;
2758 	}
2759 
2760 pass4_done:
2761 	cpi->cpi_pass = 4;
2762 	if (hwcap_out != NULL) {
2763 		hwcap_out[0] = hwcap_flags;
2764 		hwcap_out[1] = hwcap_flags_2;
2765 	}
2766 }
2767 
2768 
2769 /*
2770  * Simulate the cpuid instruction using the data we previously
2771  * captured about this CPU.  We try our best to return the truth
2772  * about the hardware, independently of kernel support.
2773  */
2774 uint32_t
2775 cpuid_insn(cpu_t *cpu, struct cpuid_regs *cp)
2776 {
2777 	struct cpuid_info *cpi;
2778 	struct cpuid_regs *xcp;
2779 
2780 	if (cpu == NULL)
2781 		cpu = CPU;
2782 	cpi = cpu->cpu_m.mcpu_cpi;
2783 
2784 	ASSERT(cpuid_checkpass(cpu, 3));
2785 
2786 	/*
2787 	 * CPUID data is cached in two separate places: cpi_std for standard
2788 	 * CPUID functions, and cpi_extd for extended CPUID functions.
2789 	 */
2790 	if (cp->cp_eax <= cpi->cpi_maxeax && cp->cp_eax < NMAX_CPI_STD)
2791 		xcp = &cpi->cpi_std[cp->cp_eax];
2792 	else if (cp->cp_eax >= 0x80000000 && cp->cp_eax <= cpi->cpi_xmaxeax &&
2793 	    cp->cp_eax < 0x80000000 + NMAX_CPI_EXTD)
2794 		xcp = &cpi->cpi_extd[cp->cp_eax - 0x80000000];
2795 	else
2796 		/*
2797 		 * The caller is asking for data from an input parameter which
2798 		 * the kernel has not cached.  In this case we go fetch from
2799 		 * the hardware and return the data directly to the user.
2800 		 */
2801 		return (__cpuid_insn(cp));
2802 
2803 	cp->cp_eax = xcp->cp_eax;
2804 	cp->cp_ebx = xcp->cp_ebx;
2805 	cp->cp_ecx = xcp->cp_ecx;
2806 	cp->cp_edx = xcp->cp_edx;
2807 	return (cp->cp_eax);
2808 }
2809 
2810 int
2811 cpuid_checkpass(cpu_t *cpu, int pass)
2812 {
2813 	return (cpu != NULL && cpu->cpu_m.mcpu_cpi != NULL &&
2814 	    cpu->cpu_m.mcpu_cpi->cpi_pass >= pass);
2815 }
2816 
2817 int
2818 cpuid_getbrandstr(cpu_t *cpu, char *s, size_t n)
2819 {
2820 	ASSERT(cpuid_checkpass(cpu, 3));
2821 
2822 	return (snprintf(s, n, "%s", cpu->cpu_m.mcpu_cpi->cpi_brandstr));
2823 }
2824 
2825 int
2826 cpuid_is_cmt(cpu_t *cpu)
2827 {
2828 	if (cpu == NULL)
2829 		cpu = CPU;
2830 
2831 	ASSERT(cpuid_checkpass(cpu, 1));
2832 
2833 	return (cpu->cpu_m.mcpu_cpi->cpi_chipid >= 0);
2834 }
2835 
2836 /*
2837  * AMD and Intel both implement the 64-bit variant of the syscall
2838  * instruction (syscallq), so if there's -any- support for syscall,
2839  * cpuid currently says "yes, we support this".
2840  *
2841  * However, Intel decided to -not- implement the 32-bit variant of the
2842  * syscall instruction, so we provide a predicate to allow our caller
2843  * to test that subtlety here.
2844  *
2845  * XXPV	Currently, 32-bit syscall instructions don't work via the hypervisor,
2846  *	even in the case where the hardware would in fact support it.
2847  */
2848 /*ARGSUSED*/
2849 int
2850 cpuid_syscall32_insn(cpu_t *cpu)
2851 {
2852 	ASSERT(cpuid_checkpass((cpu == NULL ? CPU : cpu), 1));
2853 
2854 #if !defined(__xpv)
2855 	if (cpu == NULL)
2856 		cpu = CPU;
2857 
2858 	/*CSTYLED*/
2859 	{
2860 		struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2861 
2862 		if (cpi->cpi_vendor == X86_VENDOR_AMD &&
2863 		    cpi->cpi_xmaxeax >= 0x80000001 &&
2864 		    (CPI_FEATURES_XTD_EDX(cpi) & CPUID_AMD_EDX_SYSC))
2865 			return (1);
2866 	}
2867 #endif
2868 	return (0);
2869 }
2870 
2871 int
2872 cpuid_getidstr(cpu_t *cpu, char *s, size_t n)
2873 {
2874 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2875 
2876 	static const char fmt[] =
2877 	    "x86 (%s %X family %d model %d step %d clock %d MHz)";
2878 	static const char fmt_ht[] =
2879 	    "x86 (chipid 0x%x %s %X family %d model %d step %d clock %d MHz)";
2880 
2881 	ASSERT(cpuid_checkpass(cpu, 1));
2882 
2883 	if (cpuid_is_cmt(cpu))
2884 		return (snprintf(s, n, fmt_ht, cpi->cpi_chipid,
2885 		    cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax,
2886 		    cpi->cpi_family, cpi->cpi_model,
2887 		    cpi->cpi_step, cpu->cpu_type_info.pi_clock));
2888 	return (snprintf(s, n, fmt,
2889 	    cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax,
2890 	    cpi->cpi_family, cpi->cpi_model,
2891 	    cpi->cpi_step, cpu->cpu_type_info.pi_clock));
2892 }
2893 
2894 const char *
2895 cpuid_getvendorstr(cpu_t *cpu)
2896 {
2897 	ASSERT(cpuid_checkpass(cpu, 1));
2898 	return ((const char *)cpu->cpu_m.mcpu_cpi->cpi_vendorstr);
2899 }
2900 
2901 uint_t
2902 cpuid_getvendor(cpu_t *cpu)
2903 {
2904 	ASSERT(cpuid_checkpass(cpu, 1));
2905 	return (cpu->cpu_m.mcpu_cpi->cpi_vendor);
2906 }
2907 
2908 uint_t
2909 cpuid_getfamily(cpu_t *cpu)
2910 {
2911 	ASSERT(cpuid_checkpass(cpu, 1));
2912 	return (cpu->cpu_m.mcpu_cpi->cpi_family);
2913 }
2914 
2915 uint_t
2916 cpuid_getmodel(cpu_t *cpu)
2917 {
2918 	ASSERT(cpuid_checkpass(cpu, 1));
2919 	return (cpu->cpu_m.mcpu_cpi->cpi_model);
2920 }
2921 
2922 uint_t
2923 cpuid_get_ncpu_per_chip(cpu_t *cpu)
2924 {
2925 	ASSERT(cpuid_checkpass(cpu, 1));
2926 	return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_per_chip);
2927 }
2928 
2929 uint_t
2930 cpuid_get_ncore_per_chip(cpu_t *cpu)
2931 {
2932 	ASSERT(cpuid_checkpass(cpu, 1));
2933 	return (cpu->cpu_m.mcpu_cpi->cpi_ncore_per_chip);
2934 }
2935 
2936 uint_t
2937 cpuid_get_ncpu_sharing_last_cache(cpu_t *cpu)
2938 {
2939 	ASSERT(cpuid_checkpass(cpu, 2));
2940 	return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_shr_last_cache);
2941 }
2942 
2943 id_t
2944 cpuid_get_last_lvl_cacheid(cpu_t *cpu)
2945 {
2946 	ASSERT(cpuid_checkpass(cpu, 2));
2947 	return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid);
2948 }
2949 
2950 uint_t
2951 cpuid_getstep(cpu_t *cpu)
2952 {
2953 	ASSERT(cpuid_checkpass(cpu, 1));
2954 	return (cpu->cpu_m.mcpu_cpi->cpi_step);
2955 }
2956 
2957 uint_t
2958 cpuid_getsig(struct cpu *cpu)
2959 {
2960 	ASSERT(cpuid_checkpass(cpu, 1));
2961 	return (cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_eax);
2962 }
2963 
2964 uint32_t
2965 cpuid_getchiprev(struct cpu *cpu)
2966 {
2967 	ASSERT(cpuid_checkpass(cpu, 1));
2968 	return (cpu->cpu_m.mcpu_cpi->cpi_chiprev);
2969 }
2970 
2971 const char *
2972 cpuid_getchiprevstr(struct cpu *cpu)
2973 {
2974 	ASSERT(cpuid_checkpass(cpu, 1));
2975 	return (cpu->cpu_m.mcpu_cpi->cpi_chiprevstr);
2976 }
2977 
2978 uint32_t
2979 cpuid_getsockettype(struct cpu *cpu)
2980 {
2981 	ASSERT(cpuid_checkpass(cpu, 1));
2982 	return (cpu->cpu_m.mcpu_cpi->cpi_socket);
2983 }
2984 
2985 const char *
2986 cpuid_getsocketstr(cpu_t *cpu)
2987 {
2988 	static const char *socketstr = NULL;
2989 	struct cpuid_info *cpi;
2990 
2991 	ASSERT(cpuid_checkpass(cpu, 1));
2992 	cpi = cpu->cpu_m.mcpu_cpi;
2993 
2994 	/* Assume that socket types are the same across the system */
2995 	if (socketstr == NULL)
2996 		socketstr = _cpuid_sktstr(cpi->cpi_vendor, cpi->cpi_family,
2997 		    cpi->cpi_model, cpi->cpi_step);
2998 
2999 
3000 	return (socketstr);
3001 }
3002 
3003 int
3004 cpuid_get_chipid(cpu_t *cpu)
3005 {
3006 	ASSERT(cpuid_checkpass(cpu, 1));
3007 
3008 	if (cpuid_is_cmt(cpu))
3009 		return (cpu->cpu_m.mcpu_cpi->cpi_chipid);
3010 	return (cpu->cpu_id);
3011 }
3012 
3013 id_t
3014 cpuid_get_coreid(cpu_t *cpu)
3015 {
3016 	ASSERT(cpuid_checkpass(cpu, 1));
3017 	return (cpu->cpu_m.mcpu_cpi->cpi_coreid);
3018 }
3019 
3020 int
3021 cpuid_get_pkgcoreid(cpu_t *cpu)
3022 {
3023 	ASSERT(cpuid_checkpass(cpu, 1));
3024 	return (cpu->cpu_m.mcpu_cpi->cpi_pkgcoreid);
3025 }
3026 
3027 int
3028 cpuid_get_clogid(cpu_t *cpu)
3029 {
3030 	ASSERT(cpuid_checkpass(cpu, 1));
3031 	return (cpu->cpu_m.mcpu_cpi->cpi_clogid);
3032 }
3033 
3034 int
3035 cpuid_get_cacheid(cpu_t *cpu)
3036 {
3037 	ASSERT(cpuid_checkpass(cpu, 1));
3038 	return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid);
3039 }
3040 
3041 uint_t
3042 cpuid_get_procnodeid(cpu_t *cpu)
3043 {
3044 	ASSERT(cpuid_checkpass(cpu, 1));
3045 	return (cpu->cpu_m.mcpu_cpi->cpi_procnodeid);
3046 }
3047 
3048 uint_t
3049 cpuid_get_procnodes_per_pkg(cpu_t *cpu)
3050 {
3051 	ASSERT(cpuid_checkpass(cpu, 1));
3052 	return (cpu->cpu_m.mcpu_cpi->cpi_procnodes_per_pkg);
3053 }
3054 
3055 uint_t
3056 cpuid_get_compunitid(cpu_t *cpu)
3057 {
3058 	ASSERT(cpuid_checkpass(cpu, 1));
3059 	return (cpu->cpu_m.mcpu_cpi->cpi_compunitid);
3060 }
3061 
3062 uint_t
3063 cpuid_get_cores_per_compunit(cpu_t *cpu)
3064 {
3065 	ASSERT(cpuid_checkpass(cpu, 1));
3066 	return (cpu->cpu_m.mcpu_cpi->cpi_cores_per_compunit);
3067 }
3068 
3069 /*ARGSUSED*/
3070 int
3071 cpuid_have_cr8access(cpu_t *cpu)
3072 {
3073 #if defined(__amd64)
3074 	return (1);
3075 #else
3076 	struct cpuid_info *cpi;
3077 
3078 	ASSERT(cpu != NULL);
3079 	cpi = cpu->cpu_m.mcpu_cpi;
3080 	if (cpi->cpi_vendor == X86_VENDOR_AMD && cpi->cpi_maxeax >= 1 &&
3081 	    (CPI_FEATURES_XTD_ECX(cpi) & CPUID_AMD_ECX_CR8D) != 0)
3082 		return (1);
3083 	return (0);
3084 #endif
3085 }
3086 
3087 uint32_t
3088 cpuid_get_apicid(cpu_t *cpu)
3089 {
3090 	ASSERT(cpuid_checkpass(cpu, 1));
3091 	if (cpu->cpu_m.mcpu_cpi->cpi_maxeax < 1) {
3092 		return (UINT32_MAX);
3093 	} else {
3094 		return (cpu->cpu_m.mcpu_cpi->cpi_apicid);
3095 	}
3096 }
3097 
3098 void
3099 cpuid_get_addrsize(cpu_t *cpu, uint_t *pabits, uint_t *vabits)
3100 {
3101 	struct cpuid_info *cpi;
3102 
3103 	if (cpu == NULL)
3104 		cpu = CPU;
3105 	cpi = cpu->cpu_m.mcpu_cpi;
3106 
3107 	ASSERT(cpuid_checkpass(cpu, 1));
3108 
3109 	if (pabits)
3110 		*pabits = cpi->cpi_pabits;
3111 	if (vabits)
3112 		*vabits = cpi->cpi_vabits;
3113 }
3114 
3115 /*
3116  * Returns the number of data TLB entries for a corresponding
3117  * pagesize.  If it can't be computed, or isn't known, the
3118  * routine returns zero.  If you ask about an architecturally
3119  * impossible pagesize, the routine will panic (so that the
3120  * hat implementor knows that things are inconsistent.)
3121  */
3122 uint_t
3123 cpuid_get_dtlb_nent(cpu_t *cpu, size_t pagesize)
3124 {
3125 	struct cpuid_info *cpi;
3126 	uint_t dtlb_nent = 0;
3127 
3128 	if (cpu == NULL)
3129 		cpu = CPU;
3130 	cpi = cpu->cpu_m.mcpu_cpi;
3131 
3132 	ASSERT(cpuid_checkpass(cpu, 1));
3133 
3134 	/*
3135 	 * Check the L2 TLB info
3136 	 */
3137 	if (cpi->cpi_xmaxeax >= 0x80000006) {
3138 		struct cpuid_regs *cp = &cpi->cpi_extd[6];
3139 
3140 		switch (pagesize) {
3141 
3142 		case 4 * 1024:
3143 			/*
3144 			 * All zero in the top 16 bits of the register
3145 			 * indicates a unified TLB. Size is in low 16 bits.
3146 			 */
3147 			if ((cp->cp_ebx & 0xffff0000) == 0)
3148 				dtlb_nent = cp->cp_ebx & 0x0000ffff;
3149 			else
3150 				dtlb_nent = BITX(cp->cp_ebx, 27, 16);
3151 			break;
3152 
3153 		case 2 * 1024 * 1024:
3154 			if ((cp->cp_eax & 0xffff0000) == 0)
3155 				dtlb_nent = cp->cp_eax & 0x0000ffff;
3156 			else
3157 				dtlb_nent = BITX(cp->cp_eax, 27, 16);
3158 			break;
3159 
3160 		default:
3161 			panic("unknown L2 pagesize");
3162 			/*NOTREACHED*/
3163 		}
3164 	}
3165 
3166 	if (dtlb_nent != 0)
3167 		return (dtlb_nent);
3168 
3169 	/*
3170 	 * No L2 TLB support for this size, try L1.
3171 	 */
3172 	if (cpi->cpi_xmaxeax >= 0x80000005) {
3173 		struct cpuid_regs *cp = &cpi->cpi_extd[5];
3174 
3175 		switch (pagesize) {
3176 		case 4 * 1024:
3177 			dtlb_nent = BITX(cp->cp_ebx, 23, 16);
3178 			break;
3179 		case 2 * 1024 * 1024:
3180 			dtlb_nent = BITX(cp->cp_eax, 23, 16);
3181 			break;
3182 		default:
3183 			panic("unknown L1 d-TLB pagesize");
3184 			/*NOTREACHED*/
3185 		}
3186 	}
3187 
3188 	return (dtlb_nent);
3189 }
3190 
3191 /*
3192  * Return 0 if the erratum is not present or not applicable, positive
3193  * if it is, and negative if the status of the erratum is unknown.
3194  *
3195  * See "Revision Guide for AMD Athlon(tm) 64 and AMD Opteron(tm)
3196  * Processors" #25759, Rev 3.57, August 2005
3197  */
3198 int
3199 cpuid_opteron_erratum(cpu_t *cpu, uint_t erratum)
3200 {
3201 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
3202 	uint_t eax;
3203 
3204 	/*
3205 	 * Bail out if this CPU isn't an AMD CPU, or if it's
3206 	 * a legacy (32-bit) AMD CPU.
3207 	 */
3208 	if (cpi->cpi_vendor != X86_VENDOR_AMD ||
3209 	    cpi->cpi_family == 4 || cpi->cpi_family == 5 ||
3210 	    cpi->cpi_family == 6)
3211 
3212 		return (0);
3213 
3214 	eax = cpi->cpi_std[1].cp_eax;
3215 
3216 #define	SH_B0(eax)	(eax == 0xf40 || eax == 0xf50)
3217 #define	SH_B3(eax) 	(eax == 0xf51)
3218 #define	B(eax)		(SH_B0(eax) || SH_B3(eax))
3219 
3220 #define	SH_C0(eax)	(eax == 0xf48 || eax == 0xf58)
3221 
3222 #define	SH_CG(eax)	(eax == 0xf4a || eax == 0xf5a || eax == 0xf7a)
3223 #define	DH_CG(eax)	(eax == 0xfc0 || eax == 0xfe0 || eax == 0xff0)
3224 #define	CH_CG(eax)	(eax == 0xf82 || eax == 0xfb2)
3225 #define	CG(eax)		(SH_CG(eax) || DH_CG(eax) || CH_CG(eax))
3226 
3227 #define	SH_D0(eax)	(eax == 0x10f40 || eax == 0x10f50 || eax == 0x10f70)
3228 #define	DH_D0(eax)	(eax == 0x10fc0 || eax == 0x10ff0)
3229 #define	CH_D0(eax)	(eax == 0x10f80 || eax == 0x10fb0)
3230 #define	D0(eax)		(SH_D0(eax) || DH_D0(eax) || CH_D0(eax))
3231 
3232 #define	SH_E0(eax)	(eax == 0x20f50 || eax == 0x20f40 || eax == 0x20f70)
3233 #define	JH_E1(eax)	(eax == 0x20f10)	/* JH8_E0 had 0x20f30 */
3234 #define	DH_E3(eax)	(eax == 0x20fc0 || eax == 0x20ff0)
3235 #define	SH_E4(eax)	(eax == 0x20f51 || eax == 0x20f71)
3236 #define	BH_E4(eax)	(eax == 0x20fb1)
3237 #define	SH_E5(eax)	(eax == 0x20f42)
3238 #define	DH_E6(eax)	(eax == 0x20ff2 || eax == 0x20fc2)
3239 #define	JH_E6(eax)	(eax == 0x20f12 || eax == 0x20f32)
3240 #define	EX(eax)		(SH_E0(eax) || JH_E1(eax) || DH_E3(eax) || \
3241 			    SH_E4(eax) || BH_E4(eax) || SH_E5(eax) || \
3242 			    DH_E6(eax) || JH_E6(eax))
3243 
3244 #define	DR_AX(eax)	(eax == 0x100f00 || eax == 0x100f01 || eax == 0x100f02)
3245 #define	DR_B0(eax)	(eax == 0x100f20)
3246 #define	DR_B1(eax)	(eax == 0x100f21)
3247 #define	DR_BA(eax)	(eax == 0x100f2a)
3248 #define	DR_B2(eax)	(eax == 0x100f22)
3249 #define	DR_B3(eax)	(eax == 0x100f23)
3250 #define	RB_C0(eax)	(eax == 0x100f40)
3251 
3252 	switch (erratum) {
3253 	case 1:
3254 		return (cpi->cpi_family < 0x10);
3255 	case 51:	/* what does the asterisk mean? */
3256 		return (B(eax) || SH_C0(eax) || CG(eax));
3257 	case 52:
3258 		return (B(eax));
3259 	case 57:
3260 		return (cpi->cpi_family <= 0x11);
3261 	case 58:
3262 		return (B(eax));
3263 	case 60:
3264 		return (cpi->cpi_family <= 0x11);
3265 	case 61:
3266 	case 62:
3267 	case 63:
3268 	case 64:
3269 	case 65:
3270 	case 66:
3271 	case 68:
3272 	case 69:
3273 	case 70:
3274 	case 71:
3275 		return (B(eax));
3276 	case 72:
3277 		return (SH_B0(eax));
3278 	case 74:
3279 		return (B(eax));
3280 	case 75:
3281 		return (cpi->cpi_family < 0x10);
3282 	case 76:
3283 		return (B(eax));
3284 	case 77:
3285 		return (cpi->cpi_family <= 0x11);
3286 	case 78:
3287 		return (B(eax) || SH_C0(eax));
3288 	case 79:
3289 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
3290 	case 80:
3291 	case 81:
3292 	case 82:
3293 		return (B(eax));
3294 	case 83:
3295 		return (B(eax) || SH_C0(eax) || CG(eax));
3296 	case 85:
3297 		return (cpi->cpi_family < 0x10);
3298 	case 86:
3299 		return (SH_C0(eax) || CG(eax));
3300 	case 88:
3301 #if !defined(__amd64)
3302 		return (0);
3303 #else
3304 		return (B(eax) || SH_C0(eax));
3305 #endif
3306 	case 89:
3307 		return (cpi->cpi_family < 0x10);
3308 	case 90:
3309 		return (B(eax) || SH_C0(eax) || CG(eax));
3310 	case 91:
3311 	case 92:
3312 		return (B(eax) || SH_C0(eax));
3313 	case 93:
3314 		return (SH_C0(eax));
3315 	case 94:
3316 		return (B(eax) || SH_C0(eax) || CG(eax));
3317 	case 95:
3318 #if !defined(__amd64)
3319 		return (0);
3320 #else
3321 		return (B(eax) || SH_C0(eax));
3322 #endif
3323 	case 96:
3324 		return (B(eax) || SH_C0(eax) || CG(eax));
3325 	case 97:
3326 	case 98:
3327 		return (SH_C0(eax) || CG(eax));
3328 	case 99:
3329 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3330 	case 100:
3331 		return (B(eax) || SH_C0(eax));
3332 	case 101:
3333 	case 103:
3334 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3335 	case 104:
3336 		return (SH_C0(eax) || CG(eax) || D0(eax));
3337 	case 105:
3338 	case 106:
3339 	case 107:
3340 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3341 	case 108:
3342 		return (DH_CG(eax));
3343 	case 109:
3344 		return (SH_C0(eax) || CG(eax) || D0(eax));
3345 	case 110:
3346 		return (D0(eax) || EX(eax));
3347 	case 111:
3348 		return (CG(eax));
3349 	case 112:
3350 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
3351 	case 113:
3352 		return (eax == 0x20fc0);
3353 	case 114:
3354 		return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax));
3355 	case 115:
3356 		return (SH_E0(eax) || JH_E1(eax));
3357 	case 116:
3358 		return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax));
3359 	case 117:
3360 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3361 	case 118:
3362 		return (SH_E0(eax) || JH_E1(eax) || SH_E4(eax) || BH_E4(eax) ||
3363 		    JH_E6(eax));
3364 	case 121:
3365 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
3366 	case 122:
3367 		return (cpi->cpi_family < 0x10 || cpi->cpi_family == 0x11);
3368 	case 123:
3369 		return (JH_E1(eax) || BH_E4(eax) || JH_E6(eax));
3370 	case 131:
3371 		return (cpi->cpi_family < 0x10);
3372 	case 6336786:
3373 		/*
3374 		 * Test for AdvPowerMgmtInfo.TscPStateInvariant
3375 		 * if this is a K8 family or newer processor
3376 		 */
3377 		if (CPI_FAMILY(cpi) == 0xf) {
3378 			struct cpuid_regs regs;
3379 			regs.cp_eax = 0x80000007;
3380 			(void) __cpuid_insn(&regs);
3381 			return (!(regs.cp_edx & 0x100));
3382 		}
3383 		return (0);
3384 	case 6323525:
3385 		return (((((eax >> 12) & 0xff00) + (eax & 0xf00)) |
3386 		    (((eax >> 4) & 0xf) | ((eax >> 12) & 0xf0))) < 0xf40);
3387 
3388 	case 6671130:
3389 		/*
3390 		 * check for processors (pre-Shanghai) that do not provide
3391 		 * optimal management of 1gb ptes in its tlb.
3392 		 */
3393 		return (cpi->cpi_family == 0x10 && cpi->cpi_model < 4);
3394 
3395 	case 298:
3396 		return (DR_AX(eax) || DR_B0(eax) || DR_B1(eax) || DR_BA(eax) ||
3397 		    DR_B2(eax) || RB_C0(eax));
3398 
3399 	case 721:
3400 #if defined(__amd64)
3401 		return (cpi->cpi_family == 0x10 || cpi->cpi_family == 0x12);
3402 #else
3403 		return (0);
3404 #endif
3405 
3406 	default:
3407 		return (-1);
3408 
3409 	}
3410 }
3411 
3412 /*
3413  * Determine if specified erratum is present via OSVW (OS Visible Workaround).
3414  * Return 1 if erratum is present, 0 if not present and -1 if indeterminate.
3415  */
3416 int
3417 osvw_opteron_erratum(cpu_t *cpu, uint_t erratum)
3418 {
3419 	struct cpuid_info	*cpi;
3420 	uint_t			osvwid;
3421 	static int		osvwfeature = -1;
3422 	uint64_t		osvwlength;
3423 
3424 
3425 	cpi = cpu->cpu_m.mcpu_cpi;
3426 
3427 	/* confirm OSVW supported */
3428 	if (osvwfeature == -1) {
3429 		osvwfeature = cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW;
3430 	} else {
3431 		/* assert that osvw feature setting is consistent on all cpus */
3432 		ASSERT(osvwfeature ==
3433 		    (cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW));
3434 	}
3435 	if (!osvwfeature)
3436 		return (-1);
3437 
3438 	osvwlength = rdmsr(MSR_AMD_OSVW_ID_LEN) & OSVW_ID_LEN_MASK;
3439 
3440 	switch (erratum) {
3441 	case 298:	/* osvwid is 0 */
3442 		osvwid = 0;
3443 		if (osvwlength <= (uint64_t)osvwid) {
3444 			/* osvwid 0 is unknown */
3445 			return (-1);
3446 		}
3447 
3448 		/*
3449 		 * Check the OSVW STATUS MSR to determine the state
3450 		 * of the erratum where:
3451 		 *   0 - fixed by HW
3452 		 *   1 - BIOS has applied the workaround when BIOS
3453 		 *   workaround is available. (Or for other errata,
3454 		 *   OS workaround is required.)
3455 		 * For a value of 1, caller will confirm that the
3456 		 * erratum 298 workaround has indeed been applied by BIOS.
3457 		 *
3458 		 * A 1 may be set in cpus that have a HW fix
3459 		 * in a mixed cpu system. Regarding erratum 298:
3460 		 *   In a multiprocessor platform, the workaround above
3461 		 *   should be applied to all processors regardless of
3462 		 *   silicon revision when an affected processor is
3463 		 *   present.
3464 		 */
3465 
3466 		return (rdmsr(MSR_AMD_OSVW_STATUS +
3467 		    (osvwid / OSVW_ID_CNT_PER_MSR)) &
3468 		    (1ULL << (osvwid % OSVW_ID_CNT_PER_MSR)));
3469 
3470 	default:
3471 		return (-1);
3472 	}
3473 }
3474 
3475 static const char assoc_str[] = "associativity";
3476 static const char line_str[] = "line-size";
3477 static const char size_str[] = "size";
3478 
3479 static void
3480 add_cache_prop(dev_info_t *devi, const char *label, const char *type,
3481     uint32_t val)
3482 {
3483 	char buf[128];
3484 
3485 	/*
3486 	 * ndi_prop_update_int() is used because it is desirable for
3487 	 * DDI_PROP_HW_DEF and DDI_PROP_DONTSLEEP to be set.
3488 	 */
3489 	if (snprintf(buf, sizeof (buf), "%s-%s", label, type) < sizeof (buf))
3490 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, devi, buf, val);
3491 }
3492 
3493 /*
3494  * Intel-style cache/tlb description
3495  *
3496  * Standard cpuid level 2 gives a randomly ordered
3497  * selection of tags that index into a table that describes
3498  * cache and tlb properties.
3499  */
3500 
3501 static const char l1_icache_str[] = "l1-icache";
3502 static const char l1_dcache_str[] = "l1-dcache";
3503 static const char l2_cache_str[] = "l2-cache";
3504 static const char l3_cache_str[] = "l3-cache";
3505 static const char itlb4k_str[] = "itlb-4K";
3506 static const char dtlb4k_str[] = "dtlb-4K";
3507 static const char itlb2M_str[] = "itlb-2M";
3508 static const char itlb4M_str[] = "itlb-4M";
3509 static const char dtlb4M_str[] = "dtlb-4M";
3510 static const char dtlb24_str[] = "dtlb0-2M-4M";
3511 static const char itlb424_str[] = "itlb-4K-2M-4M";
3512 static const char itlb24_str[] = "itlb-2M-4M";
3513 static const char dtlb44_str[] = "dtlb-4K-4M";
3514 static const char sl1_dcache_str[] = "sectored-l1-dcache";
3515 static const char sl2_cache_str[] = "sectored-l2-cache";
3516 static const char itrace_str[] = "itrace-cache";
3517 static const char sl3_cache_str[] = "sectored-l3-cache";
3518 static const char sh_l2_tlb4k_str[] = "shared-l2-tlb-4k";
3519 
3520 static const struct cachetab {
3521 	uint8_t 	ct_code;
3522 	uint8_t		ct_assoc;
3523 	uint16_t 	ct_line_size;
3524 	size_t		ct_size;
3525 	const char	*ct_label;
3526 } intel_ctab[] = {
3527 	/*
3528 	 * maintain descending order!
3529 	 *
3530 	 * Codes ignored - Reason
3531 	 * ----------------------
3532 	 * 40H - intel_cpuid_4_cache_info() disambiguates l2/l3 cache
3533 	 * f0H/f1H - Currently we do not interpret prefetch size by design
3534 	 */
3535 	{ 0xe4, 16, 64, 8*1024*1024, l3_cache_str},
3536 	{ 0xe3, 16, 64, 4*1024*1024, l3_cache_str},
3537 	{ 0xe2, 16, 64, 2*1024*1024, l3_cache_str},
3538 	{ 0xde, 12, 64, 6*1024*1024, l3_cache_str},
3539 	{ 0xdd, 12, 64, 3*1024*1024, l3_cache_str},
3540 	{ 0xdc, 12, 64, ((1*1024*1024)+(512*1024)), l3_cache_str},
3541 	{ 0xd8, 8, 64, 4*1024*1024, l3_cache_str},
3542 	{ 0xd7, 8, 64, 2*1024*1024, l3_cache_str},
3543 	{ 0xd6, 8, 64, 1*1024*1024, l3_cache_str},
3544 	{ 0xd2, 4, 64, 2*1024*1024, l3_cache_str},
3545 	{ 0xd1, 4, 64, 1*1024*1024, l3_cache_str},
3546 	{ 0xd0, 4, 64, 512*1024, l3_cache_str},
3547 	{ 0xca, 4, 0, 512, sh_l2_tlb4k_str},
3548 	{ 0xc0, 4, 0, 8, dtlb44_str },
3549 	{ 0xba, 4, 0, 64, dtlb4k_str },
3550 	{ 0xb4, 4, 0, 256, dtlb4k_str },
3551 	{ 0xb3, 4, 0, 128, dtlb4k_str },
3552 	{ 0xb2, 4, 0, 64, itlb4k_str },
3553 	{ 0xb0, 4, 0, 128, itlb4k_str },
3554 	{ 0x87, 8, 64, 1024*1024, l2_cache_str},
3555 	{ 0x86, 4, 64, 512*1024, l2_cache_str},
3556 	{ 0x85, 8, 32, 2*1024*1024, l2_cache_str},
3557 	{ 0x84, 8, 32, 1024*1024, l2_cache_str},
3558 	{ 0x83, 8, 32, 512*1024, l2_cache_str},
3559 	{ 0x82, 8, 32, 256*1024, l2_cache_str},
3560 	{ 0x80, 8, 64, 512*1024, l2_cache_str},
3561 	{ 0x7f, 2, 64, 512*1024, l2_cache_str},
3562 	{ 0x7d, 8, 64, 2*1024*1024, sl2_cache_str},
3563 	{ 0x7c, 8, 64, 1024*1024, sl2_cache_str},
3564 	{ 0x7b, 8, 64, 512*1024, sl2_cache_str},
3565 	{ 0x7a, 8, 64, 256*1024, sl2_cache_str},
3566 	{ 0x79, 8, 64, 128*1024, sl2_cache_str},
3567 	{ 0x78, 8, 64, 1024*1024, l2_cache_str},
3568 	{ 0x73, 8, 0, 64*1024, itrace_str},
3569 	{ 0x72, 8, 0, 32*1024, itrace_str},
3570 	{ 0x71, 8, 0, 16*1024, itrace_str},
3571 	{ 0x70, 8, 0, 12*1024, itrace_str},
3572 	{ 0x68, 4, 64, 32*1024, sl1_dcache_str},
3573 	{ 0x67, 4, 64, 16*1024, sl1_dcache_str},
3574 	{ 0x66, 4, 64, 8*1024, sl1_dcache_str},
3575 	{ 0x60, 8, 64, 16*1024, sl1_dcache_str},
3576 	{ 0x5d, 0, 0, 256, dtlb44_str},
3577 	{ 0x5c, 0, 0, 128, dtlb44_str},
3578 	{ 0x5b, 0, 0, 64, dtlb44_str},
3579 	{ 0x5a, 4, 0, 32, dtlb24_str},
3580 	{ 0x59, 0, 0, 16, dtlb4k_str},
3581 	{ 0x57, 4, 0, 16, dtlb4k_str},
3582 	{ 0x56, 4, 0, 16, dtlb4M_str},
3583 	{ 0x55, 0, 0, 7, itlb24_str},
3584 	{ 0x52, 0, 0, 256, itlb424_str},
3585 	{ 0x51, 0, 0, 128, itlb424_str},
3586 	{ 0x50, 0, 0, 64, itlb424_str},
3587 	{ 0x4f, 0, 0, 32, itlb4k_str},
3588 	{ 0x4e, 24, 64, 6*1024*1024, l2_cache_str},
3589 	{ 0x4d, 16, 64, 16*1024*1024, l3_cache_str},
3590 	{ 0x4c, 12, 64, 12*1024*1024, l3_cache_str},
3591 	{ 0x4b, 16, 64, 8*1024*1024, l3_cache_str},
3592 	{ 0x4a, 12, 64, 6*1024*1024, l3_cache_str},
3593 	{ 0x49, 16, 64, 4*1024*1024, l3_cache_str},
3594 	{ 0x48, 12, 64, 3*1024*1024, l2_cache_str},
3595 	{ 0x47, 8, 64, 8*1024*1024, l3_cache_str},
3596 	{ 0x46, 4, 64, 4*1024*1024, l3_cache_str},
3597 	{ 0x45, 4, 32, 2*1024*1024, l2_cache_str},
3598 	{ 0x44, 4, 32, 1024*1024, l2_cache_str},
3599 	{ 0x43, 4, 32, 512*1024, l2_cache_str},
3600 	{ 0x42, 4, 32, 256*1024, l2_cache_str},
3601 	{ 0x41, 4, 32, 128*1024, l2_cache_str},
3602 	{ 0x3e, 4, 64, 512*1024, sl2_cache_str},
3603 	{ 0x3d, 6, 64, 384*1024, sl2_cache_str},
3604 	{ 0x3c, 4, 64, 256*1024, sl2_cache_str},
3605 	{ 0x3b, 2, 64, 128*1024, sl2_cache_str},
3606 	{ 0x3a, 6, 64, 192*1024, sl2_cache_str},
3607 	{ 0x39, 4, 64, 128*1024, sl2_cache_str},
3608 	{ 0x30, 8, 64, 32*1024, l1_icache_str},
3609 	{ 0x2c, 8, 64, 32*1024, l1_dcache_str},
3610 	{ 0x29, 8, 64, 4096*1024, sl3_cache_str},
3611 	{ 0x25, 8, 64, 2048*1024, sl3_cache_str},
3612 	{ 0x23, 8, 64, 1024*1024, sl3_cache_str},
3613 	{ 0x22, 4, 64, 512*1024, sl3_cache_str},
3614 	{ 0x0e, 6, 64, 24*1024, l1_dcache_str},
3615 	{ 0x0d, 4, 32, 16*1024, l1_dcache_str},
3616 	{ 0x0c, 4, 32, 16*1024, l1_dcache_str},
3617 	{ 0x0b, 4, 0, 4, itlb4M_str},
3618 	{ 0x0a, 2, 32, 8*1024, l1_dcache_str},
3619 	{ 0x08, 4, 32, 16*1024, l1_icache_str},
3620 	{ 0x06, 4, 32, 8*1024, l1_icache_str},
3621 	{ 0x05, 4, 0, 32, dtlb4M_str},
3622 	{ 0x04, 4, 0, 8, dtlb4M_str},
3623 	{ 0x03, 4, 0, 64, dtlb4k_str},
3624 	{ 0x02, 4, 0, 2, itlb4M_str},
3625 	{ 0x01, 4, 0, 32, itlb4k_str},
3626 	{ 0 }
3627 };
3628 
3629 static const struct cachetab cyrix_ctab[] = {
3630 	{ 0x70, 4, 0, 32, "tlb-4K" },
3631 	{ 0x80, 4, 16, 16*1024, "l1-cache" },
3632 	{ 0 }
3633 };
3634 
3635 /*
3636  * Search a cache table for a matching entry
3637  */
3638 static const struct cachetab *
3639 find_cacheent(const struct cachetab *ct, uint_t code)
3640 {
3641 	if (code != 0) {
3642 		for (; ct->ct_code != 0; ct++)
3643 			if (ct->ct_code <= code)
3644 				break;
3645 		if (ct->ct_code == code)
3646 			return (ct);
3647 	}
3648 	return (NULL);
3649 }
3650 
3651 /*
3652  * Populate cachetab entry with L2 or L3 cache-information using
3653  * cpuid function 4. This function is called from intel_walk_cacheinfo()
3654  * when descriptor 0x49 is encountered. It returns 0 if no such cache
3655  * information is found.
3656  */
3657 static int
3658 intel_cpuid_4_cache_info(struct cachetab *ct, struct cpuid_info *cpi)
3659 {
3660 	uint32_t level, i;
3661 	int ret = 0;
3662 
3663 	for (i = 0; i < cpi->cpi_std_4_size; i++) {
3664 		level = CPI_CACHE_LVL(cpi->cpi_std_4[i]);
3665 
3666 		if (level == 2 || level == 3) {
3667 			ct->ct_assoc = CPI_CACHE_WAYS(cpi->cpi_std_4[i]) + 1;
3668 			ct->ct_line_size =
3669 			    CPI_CACHE_COH_LN_SZ(cpi->cpi_std_4[i]) + 1;
3670 			ct->ct_size = ct->ct_assoc *
3671 			    (CPI_CACHE_PARTS(cpi->cpi_std_4[i]) + 1) *
3672 			    ct->ct_line_size *
3673 			    (cpi->cpi_std_4[i]->cp_ecx + 1);
3674 
3675 			if (level == 2) {
3676 				ct->ct_label = l2_cache_str;
3677 			} else if (level == 3) {
3678 				ct->ct_label = l3_cache_str;
3679 			}
3680 			ret = 1;
3681 		}
3682 	}
3683 
3684 	return (ret);
3685 }
3686 
3687 /*
3688  * Walk the cacheinfo descriptor, applying 'func' to every valid element
3689  * The walk is terminated if the walker returns non-zero.
3690  */
3691 static void
3692 intel_walk_cacheinfo(struct cpuid_info *cpi,
3693     void *arg, int (*func)(void *, const struct cachetab *))
3694 {
3695 	const struct cachetab *ct;
3696 	struct cachetab des_49_ct, des_b1_ct;
3697 	uint8_t *dp;
3698 	int i;
3699 
3700 	if ((dp = cpi->cpi_cacheinfo) == NULL)
3701 		return;
3702 	for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
3703 		/*
3704 		 * For overloaded descriptor 0x49 we use cpuid function 4
3705 		 * if supported by the current processor, to create
3706 		 * cache information.
3707 		 * For overloaded descriptor 0xb1 we use X86_PAE flag
3708 		 * to disambiguate the cache information.
3709 		 */
3710 		if (*dp == 0x49 && cpi->cpi_maxeax >= 0x4 &&
3711 		    intel_cpuid_4_cache_info(&des_49_ct, cpi) == 1) {
3712 				ct = &des_49_ct;
3713 		} else if (*dp == 0xb1) {
3714 			des_b1_ct.ct_code = 0xb1;
3715 			des_b1_ct.ct_assoc = 4;
3716 			des_b1_ct.ct_line_size = 0;
3717 			if (is_x86_feature(x86_featureset, X86FSET_PAE)) {
3718 				des_b1_ct.ct_size = 8;
3719 				des_b1_ct.ct_label = itlb2M_str;
3720 			} else {
3721 				des_b1_ct.ct_size = 4;
3722 				des_b1_ct.ct_label = itlb4M_str;
3723 			}
3724 			ct = &des_b1_ct;
3725 		} else {
3726 			if ((ct = find_cacheent(intel_ctab, *dp)) == NULL) {
3727 				continue;
3728 			}
3729 		}
3730 
3731 		if (func(arg, ct) != 0) {
3732 			break;
3733 		}
3734 	}
3735 }
3736 
3737 /*
3738  * (Like the Intel one, except for Cyrix CPUs)
3739  */
3740 static void
3741 cyrix_walk_cacheinfo(struct cpuid_info *cpi,
3742     void *arg, int (*func)(void *, const struct cachetab *))
3743 {
3744 	const struct cachetab *ct;
3745 	uint8_t *dp;
3746 	int i;
3747 
3748 	if ((dp = cpi->cpi_cacheinfo) == NULL)
3749 		return;
3750 	for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
3751 		/*
3752 		 * Search Cyrix-specific descriptor table first ..
3753 		 */
3754 		if ((ct = find_cacheent(cyrix_ctab, *dp)) != NULL) {
3755 			if (func(arg, ct) != 0)
3756 				break;
3757 			continue;
3758 		}
3759 		/*
3760 		 * .. else fall back to the Intel one
3761 		 */
3762 		if ((ct = find_cacheent(intel_ctab, *dp)) != NULL) {
3763 			if (func(arg, ct) != 0)
3764 				break;
3765 			continue;
3766 		}
3767 	}
3768 }
3769 
3770 /*
3771  * A cacheinfo walker that adds associativity, line-size, and size properties
3772  * to the devinfo node it is passed as an argument.
3773  */
3774 static int
3775 add_cacheent_props(void *arg, const struct cachetab *ct)
3776 {
3777 	dev_info_t *devi = arg;
3778 
3779 	add_cache_prop(devi, ct->ct_label, assoc_str, ct->ct_assoc);
3780 	if (ct->ct_line_size != 0)
3781 		add_cache_prop(devi, ct->ct_label, line_str,
3782 		    ct->ct_line_size);
3783 	add_cache_prop(devi, ct->ct_label, size_str, ct->ct_size);
3784 	return (0);
3785 }
3786 
3787 
3788 static const char fully_assoc[] = "fully-associative?";
3789 
3790 /*
3791  * AMD style cache/tlb description
3792  *
3793  * Extended functions 5 and 6 directly describe properties of
3794  * tlbs and various cache levels.
3795  */
3796 static void
3797 add_amd_assoc(dev_info_t *devi, const char *label, uint_t assoc)
3798 {
3799 	switch (assoc) {
3800 	case 0:	/* reserved; ignore */
3801 		break;
3802 	default:
3803 		add_cache_prop(devi, label, assoc_str, assoc);
3804 		break;
3805 	case 0xff:
3806 		add_cache_prop(devi, label, fully_assoc, 1);
3807 		break;
3808 	}
3809 }
3810 
3811 static void
3812 add_amd_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size)
3813 {
3814 	if (size == 0)
3815 		return;
3816 	add_cache_prop(devi, label, size_str, size);
3817 	add_amd_assoc(devi, label, assoc);
3818 }
3819 
3820 static void
3821 add_amd_cache(dev_info_t *devi, const char *label,
3822     uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size)
3823 {
3824 	if (size == 0 || line_size == 0)
3825 		return;
3826 	add_amd_assoc(devi, label, assoc);
3827 	/*
3828 	 * Most AMD parts have a sectored cache. Multiple cache lines are
3829 	 * associated with each tag. A sector consists of all cache lines
3830 	 * associated with a tag. For example, the AMD K6-III has a sector
3831 	 * size of 2 cache lines per tag.
3832 	 */
3833 	if (lines_per_tag != 0)
3834 		add_cache_prop(devi, label, "lines-per-tag", lines_per_tag);
3835 	add_cache_prop(devi, label, line_str, line_size);
3836 	add_cache_prop(devi, label, size_str, size * 1024);
3837 }
3838 
3839 static void
3840 add_amd_l2_assoc(dev_info_t *devi, const char *label, uint_t assoc)
3841 {
3842 	switch (assoc) {
3843 	case 0:	/* off */
3844 		break;
3845 	case 1:
3846 	case 2:
3847 	case 4:
3848 		add_cache_prop(devi, label, assoc_str, assoc);
3849 		break;
3850 	case 6:
3851 		add_cache_prop(devi, label, assoc_str, 8);
3852 		break;
3853 	case 8:
3854 		add_cache_prop(devi, label, assoc_str, 16);
3855 		break;
3856 	case 0xf:
3857 		add_cache_prop(devi, label, fully_assoc, 1);
3858 		break;
3859 	default: /* reserved; ignore */
3860 		break;
3861 	}
3862 }
3863 
3864 static void
3865 add_amd_l2_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size)
3866 {
3867 	if (size == 0 || assoc == 0)
3868 		return;
3869 	add_amd_l2_assoc(devi, label, assoc);
3870 	add_cache_prop(devi, label, size_str, size);
3871 }
3872 
3873 static void
3874 add_amd_l2_cache(dev_info_t *devi, const char *label,
3875     uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size)
3876 {
3877 	if (size == 0 || assoc == 0 || line_size == 0)
3878 		return;
3879 	add_amd_l2_assoc(devi, label, assoc);
3880 	if (lines_per_tag != 0)
3881 		add_cache_prop(devi, label, "lines-per-tag", lines_per_tag);
3882 	add_cache_prop(devi, label, line_str, line_size);
3883 	add_cache_prop(devi, label, size_str, size * 1024);
3884 }
3885 
3886 static void
3887 amd_cache_info(struct cpuid_info *cpi, dev_info_t *devi)
3888 {
3889 	struct cpuid_regs *cp;
3890 
3891 	if (cpi->cpi_xmaxeax < 0x80000005)
3892 		return;
3893 	cp = &cpi->cpi_extd[5];
3894 
3895 	/*
3896 	 * 4M/2M L1 TLB configuration
3897 	 *
3898 	 * We report the size for 2M pages because AMD uses two
3899 	 * TLB entries for one 4M page.
3900 	 */
3901 	add_amd_tlb(devi, "dtlb-2M",
3902 	    BITX(cp->cp_eax, 31, 24), BITX(cp->cp_eax, 23, 16));
3903 	add_amd_tlb(devi, "itlb-2M",
3904 	    BITX(cp->cp_eax, 15, 8), BITX(cp->cp_eax, 7, 0));
3905 
3906 	/*
3907 	 * 4K L1 TLB configuration
3908 	 */
3909 
3910 	switch (cpi->cpi_vendor) {
3911 		uint_t nentries;
3912 	case X86_VENDOR_TM:
3913 		if (cpi->cpi_family >= 5) {
3914 			/*
3915 			 * Crusoe processors have 256 TLB entries, but
3916 			 * cpuid data format constrains them to only
3917 			 * reporting 255 of them.
3918 			 */
3919 			if ((nentries = BITX(cp->cp_ebx, 23, 16)) == 255)
3920 				nentries = 256;
3921 			/*
3922 			 * Crusoe processors also have a unified TLB
3923 			 */
3924 			add_amd_tlb(devi, "tlb-4K", BITX(cp->cp_ebx, 31, 24),
3925 			    nentries);
3926 			break;
3927 		}
3928 		/*FALLTHROUGH*/
3929 	default:
3930 		add_amd_tlb(devi, itlb4k_str,
3931 		    BITX(cp->cp_ebx, 31, 24), BITX(cp->cp_ebx, 23, 16));
3932 		add_amd_tlb(devi, dtlb4k_str,
3933 		    BITX(cp->cp_ebx, 15, 8), BITX(cp->cp_ebx, 7, 0));
3934 		break;
3935 	}
3936 
3937 	/*
3938 	 * data L1 cache configuration
3939 	 */
3940 
3941 	add_amd_cache(devi, l1_dcache_str,
3942 	    BITX(cp->cp_ecx, 31, 24), BITX(cp->cp_ecx, 23, 16),
3943 	    BITX(cp->cp_ecx, 15, 8), BITX(cp->cp_ecx, 7, 0));
3944 
3945 	/*
3946 	 * code L1 cache configuration
3947 	 */
3948 
3949 	add_amd_cache(devi, l1_icache_str,
3950 	    BITX(cp->cp_edx, 31, 24), BITX(cp->cp_edx, 23, 16),
3951 	    BITX(cp->cp_edx, 15, 8), BITX(cp->cp_edx, 7, 0));
3952 
3953 	if (cpi->cpi_xmaxeax < 0x80000006)
3954 		return;
3955 	cp = &cpi->cpi_extd[6];
3956 
3957 	/* Check for a unified L2 TLB for large pages */
3958 
3959 	if (BITX(cp->cp_eax, 31, 16) == 0)
3960 		add_amd_l2_tlb(devi, "l2-tlb-2M",
3961 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
3962 	else {
3963 		add_amd_l2_tlb(devi, "l2-dtlb-2M",
3964 		    BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16));
3965 		add_amd_l2_tlb(devi, "l2-itlb-2M",
3966 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
3967 	}
3968 
3969 	/* Check for a unified L2 TLB for 4K pages */
3970 
3971 	if (BITX(cp->cp_ebx, 31, 16) == 0) {
3972 		add_amd_l2_tlb(devi, "l2-tlb-4K",
3973 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
3974 	} else {
3975 		add_amd_l2_tlb(devi, "l2-dtlb-4K",
3976 		    BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16));
3977 		add_amd_l2_tlb(devi, "l2-itlb-4K",
3978 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
3979 	}
3980 
3981 	add_amd_l2_cache(devi, l2_cache_str,
3982 	    BITX(cp->cp_ecx, 31, 16), BITX(cp->cp_ecx, 15, 12),
3983 	    BITX(cp->cp_ecx, 11, 8), BITX(cp->cp_ecx, 7, 0));
3984 }
3985 
3986 /*
3987  * There are two basic ways that the x86 world describes it cache
3988  * and tlb architecture - Intel's way and AMD's way.
3989  *
3990  * Return which flavor of cache architecture we should use
3991  */
3992 static int
3993 x86_which_cacheinfo(struct cpuid_info *cpi)
3994 {
3995 	switch (cpi->cpi_vendor) {
3996 	case X86_VENDOR_Intel:
3997 		if (cpi->cpi_maxeax >= 2)
3998 			return (X86_VENDOR_Intel);
3999 		break;
4000 	case X86_VENDOR_AMD:
4001 		/*
4002 		 * The K5 model 1 was the first part from AMD that reported
4003 		 * cache sizes via extended cpuid functions.
4004 		 */
4005 		if (cpi->cpi_family > 5 ||
4006 		    (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
4007 			return (X86_VENDOR_AMD);
4008 		break;
4009 	case X86_VENDOR_TM:
4010 		if (cpi->cpi_family >= 5)
4011 			return (X86_VENDOR_AMD);
4012 		/*FALLTHROUGH*/
4013 	default:
4014 		/*
4015 		 * If they have extended CPU data for 0x80000005
4016 		 * then we assume they have AMD-format cache
4017 		 * information.
4018 		 *
4019 		 * If not, and the vendor happens to be Cyrix,
4020 		 * then try our-Cyrix specific handler.
4021 		 *
4022 		 * If we're not Cyrix, then assume we're using Intel's
4023 		 * table-driven format instead.
4024 		 */
4025 		if (cpi->cpi_xmaxeax >= 0x80000005)
4026 			return (X86_VENDOR_AMD);
4027 		else if (cpi->cpi_vendor == X86_VENDOR_Cyrix)
4028 			return (X86_VENDOR_Cyrix);
4029 		else if (cpi->cpi_maxeax >= 2)
4030 			return (X86_VENDOR_Intel);
4031 		break;
4032 	}
4033 	return (-1);
4034 }
4035 
4036 void
4037 cpuid_set_cpu_properties(void *dip, processorid_t cpu_id,
4038     struct cpuid_info *cpi)
4039 {
4040 	dev_info_t *cpu_devi;
4041 	int create;
4042 
4043 	cpu_devi = (dev_info_t *)dip;
4044 
4045 	/* device_type */
4046 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
4047 	    "device_type", "cpu");
4048 
4049 	/* reg */
4050 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4051 	    "reg", cpu_id);
4052 
4053 	/* cpu-mhz, and clock-frequency */
4054 	if (cpu_freq > 0) {
4055 		long long mul;
4056 
4057 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4058 		    "cpu-mhz", cpu_freq);
4059 		if ((mul = cpu_freq * 1000000LL) <= INT_MAX)
4060 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4061 			    "clock-frequency", (int)mul);
4062 	}
4063 
4064 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID)) {
4065 		return;
4066 	}
4067 
4068 	/* vendor-id */
4069 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
4070 	    "vendor-id", cpi->cpi_vendorstr);
4071 
4072 	if (cpi->cpi_maxeax == 0) {
4073 		return;
4074 	}
4075 
4076 	/*
4077 	 * family, model, and step
4078 	 */
4079 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4080 	    "family", CPI_FAMILY(cpi));
4081 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4082 	    "cpu-model", CPI_MODEL(cpi));
4083 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4084 	    "stepping-id", CPI_STEP(cpi));
4085 
4086 	/* type */
4087 	switch (cpi->cpi_vendor) {
4088 	case X86_VENDOR_Intel:
4089 		create = 1;
4090 		break;
4091 	default:
4092 		create = 0;
4093 		break;
4094 	}
4095 	if (create)
4096 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4097 		    "type", CPI_TYPE(cpi));
4098 
4099 	/* ext-family */
4100 	switch (cpi->cpi_vendor) {
4101 	case X86_VENDOR_Intel:
4102 	case X86_VENDOR_AMD:
4103 		create = cpi->cpi_family >= 0xf;
4104 		break;
4105 	default:
4106 		create = 0;
4107 		break;
4108 	}
4109 	if (create)
4110 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4111 		    "ext-family", CPI_FAMILY_XTD(cpi));
4112 
4113 	/* ext-model */
4114 	switch (cpi->cpi_vendor) {
4115 	case X86_VENDOR_Intel:
4116 		create = IS_EXTENDED_MODEL_INTEL(cpi);
4117 		break;
4118 	case X86_VENDOR_AMD:
4119 		create = CPI_FAMILY(cpi) == 0xf;
4120 		break;
4121 	default:
4122 		create = 0;
4123 		break;
4124 	}
4125 	if (create)
4126 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4127 		    "ext-model", CPI_MODEL_XTD(cpi));
4128 
4129 	/* generation */
4130 	switch (cpi->cpi_vendor) {
4131 	case X86_VENDOR_AMD:
4132 		/*
4133 		 * AMD K5 model 1 was the first part to support this
4134 		 */
4135 		create = cpi->cpi_xmaxeax >= 0x80000001;
4136 		break;
4137 	default:
4138 		create = 0;
4139 		break;
4140 	}
4141 	if (create)
4142 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4143 		    "generation", BITX((cpi)->cpi_extd[1].cp_eax, 11, 8));
4144 
4145 	/* brand-id */
4146 	switch (cpi->cpi_vendor) {
4147 	case X86_VENDOR_Intel:
4148 		/*
4149 		 * brand id first appeared on Pentium III Xeon model 8,
4150 		 * and Celeron model 8 processors and Opteron
4151 		 */
4152 		create = cpi->cpi_family > 6 ||
4153 		    (cpi->cpi_family == 6 && cpi->cpi_model >= 8);
4154 		break;
4155 	case X86_VENDOR_AMD:
4156 		create = cpi->cpi_family >= 0xf;
4157 		break;
4158 	default:
4159 		create = 0;
4160 		break;
4161 	}
4162 	if (create && cpi->cpi_brandid != 0) {
4163 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4164 		    "brand-id", cpi->cpi_brandid);
4165 	}
4166 
4167 	/* chunks, and apic-id */
4168 	switch (cpi->cpi_vendor) {
4169 		/*
4170 		 * first available on Pentium IV and Opteron (K8)
4171 		 */
4172 	case X86_VENDOR_Intel:
4173 		create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf;
4174 		break;
4175 	case X86_VENDOR_AMD:
4176 		create = cpi->cpi_family >= 0xf;
4177 		break;
4178 	default:
4179 		create = 0;
4180 		break;
4181 	}
4182 	if (create) {
4183 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4184 		    "chunks", CPI_CHUNKS(cpi));
4185 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4186 		    "apic-id", cpi->cpi_apicid);
4187 		if (cpi->cpi_chipid >= 0) {
4188 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4189 			    "chip#", cpi->cpi_chipid);
4190 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4191 			    "clog#", cpi->cpi_clogid);
4192 		}
4193 	}
4194 
4195 	/* cpuid-features */
4196 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4197 	    "cpuid-features", CPI_FEATURES_EDX(cpi));
4198 
4199 
4200 	/* cpuid-features-ecx */
4201 	switch (cpi->cpi_vendor) {
4202 	case X86_VENDOR_Intel:
4203 		create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf;
4204 		break;
4205 	case X86_VENDOR_AMD:
4206 		create = cpi->cpi_family >= 0xf;
4207 		break;
4208 	default:
4209 		create = 0;
4210 		break;
4211 	}
4212 	if (create)
4213 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4214 		    "cpuid-features-ecx", CPI_FEATURES_ECX(cpi));
4215 
4216 	/* ext-cpuid-features */
4217 	switch (cpi->cpi_vendor) {
4218 	case X86_VENDOR_Intel:
4219 	case X86_VENDOR_AMD:
4220 	case X86_VENDOR_Cyrix:
4221 	case X86_VENDOR_TM:
4222 	case X86_VENDOR_Centaur:
4223 		create = cpi->cpi_xmaxeax >= 0x80000001;
4224 		break;
4225 	default:
4226 		create = 0;
4227 		break;
4228 	}
4229 	if (create) {
4230 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4231 		    "ext-cpuid-features", CPI_FEATURES_XTD_EDX(cpi));
4232 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4233 		    "ext-cpuid-features-ecx", CPI_FEATURES_XTD_ECX(cpi));
4234 	}
4235 
4236 	/*
4237 	 * Brand String first appeared in Intel Pentium IV, AMD K5
4238 	 * model 1, and Cyrix GXm.  On earlier models we try and
4239 	 * simulate something similar .. so this string should always
4240 	 * same -something- about the processor, however lame.
4241 	 */
4242 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
4243 	    "brand-string", cpi->cpi_brandstr);
4244 
4245 	/*
4246 	 * Finally, cache and tlb information
4247 	 */
4248 	switch (x86_which_cacheinfo(cpi)) {
4249 	case X86_VENDOR_Intel:
4250 		intel_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
4251 		break;
4252 	case X86_VENDOR_Cyrix:
4253 		cyrix_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
4254 		break;
4255 	case X86_VENDOR_AMD:
4256 		amd_cache_info(cpi, cpu_devi);
4257 		break;
4258 	default:
4259 		break;
4260 	}
4261 }
4262 
4263 struct l2info {
4264 	int *l2i_csz;
4265 	int *l2i_lsz;
4266 	int *l2i_assoc;
4267 	int l2i_ret;
4268 };
4269 
4270 /*
4271  * A cacheinfo walker that fetches the size, line-size and associativity
4272  * of the L2 cache
4273  */
4274 static int
4275 intel_l2cinfo(void *arg, const struct cachetab *ct)
4276 {
4277 	struct l2info *l2i = arg;
4278 	int *ip;
4279 
4280 	if (ct->ct_label != l2_cache_str &&
4281 	    ct->ct_label != sl2_cache_str)
4282 		return (0);	/* not an L2 -- keep walking */
4283 
4284 	if ((ip = l2i->l2i_csz) != NULL)
4285 		*ip = ct->ct_size;
4286 	if ((ip = l2i->l2i_lsz) != NULL)
4287 		*ip = ct->ct_line_size;
4288 	if ((ip = l2i->l2i_assoc) != NULL)
4289 		*ip = ct->ct_assoc;
4290 	l2i->l2i_ret = ct->ct_size;
4291 	return (1);		/* was an L2 -- terminate walk */
4292 }
4293 
4294 /*
4295  * AMD L2/L3 Cache and TLB Associativity Field Definition:
4296  *
4297  *	Unlike the associativity for the L1 cache and tlb where the 8 bit
4298  *	value is the associativity, the associativity for the L2 cache and
4299  *	tlb is encoded in the following table. The 4 bit L2 value serves as
4300  *	an index into the amd_afd[] array to determine the associativity.
4301  *	-1 is undefined. 0 is fully associative.
4302  */
4303 
4304 static int amd_afd[] =
4305 	{-1, 1, 2, -1, 4, -1, 8, -1, 16, -1, 32, 48, 64, 96, 128, 0};
4306 
4307 static void
4308 amd_l2cacheinfo(struct cpuid_info *cpi, struct l2info *l2i)
4309 {
4310 	struct cpuid_regs *cp;
4311 	uint_t size, assoc;
4312 	int i;
4313 	int *ip;
4314 
4315 	if (cpi->cpi_xmaxeax < 0x80000006)
4316 		return;
4317 	cp = &cpi->cpi_extd[6];
4318 
4319 	if ((i = BITX(cp->cp_ecx, 15, 12)) != 0 &&
4320 	    (size = BITX(cp->cp_ecx, 31, 16)) != 0) {
4321 		uint_t cachesz = size * 1024;
4322 		assoc = amd_afd[i];
4323 
4324 		ASSERT(assoc != -1);
4325 
4326 		if ((ip = l2i->l2i_csz) != NULL)
4327 			*ip = cachesz;
4328 		if ((ip = l2i->l2i_lsz) != NULL)
4329 			*ip = BITX(cp->cp_ecx, 7, 0);
4330 		if ((ip = l2i->l2i_assoc) != NULL)
4331 			*ip = assoc;
4332 		l2i->l2i_ret = cachesz;
4333 	}
4334 }
4335 
4336 int
4337 getl2cacheinfo(cpu_t *cpu, int *csz, int *lsz, int *assoc)
4338 {
4339 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
4340 	struct l2info __l2info, *l2i = &__l2info;
4341 
4342 	l2i->l2i_csz = csz;
4343 	l2i->l2i_lsz = lsz;
4344 	l2i->l2i_assoc = assoc;
4345 	l2i->l2i_ret = -1;
4346 
4347 	switch (x86_which_cacheinfo(cpi)) {
4348 	case X86_VENDOR_Intel:
4349 		intel_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
4350 		break;
4351 	case X86_VENDOR_Cyrix:
4352 		cyrix_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
4353 		break;
4354 	case X86_VENDOR_AMD:
4355 		amd_l2cacheinfo(cpi, l2i);
4356 		break;
4357 	default:
4358 		break;
4359 	}
4360 	return (l2i->l2i_ret);
4361 }
4362 
4363 #if !defined(__xpv)
4364 
4365 uint32_t *
4366 cpuid_mwait_alloc(cpu_t *cpu)
4367 {
4368 	uint32_t	*ret;
4369 	size_t		mwait_size;
4370 
4371 	ASSERT(cpuid_checkpass(CPU, 2));
4372 
4373 	mwait_size = CPU->cpu_m.mcpu_cpi->cpi_mwait.mon_max;
4374 	if (mwait_size == 0)
4375 		return (NULL);
4376 
4377 	/*
4378 	 * kmem_alloc() returns cache line size aligned data for mwait_size
4379 	 * allocations.  mwait_size is currently cache line sized.  Neither
4380 	 * of these implementation details are guarantied to be true in the
4381 	 * future.
4382 	 *
4383 	 * First try allocating mwait_size as kmem_alloc() currently returns
4384 	 * correctly aligned memory.  If kmem_alloc() does not return
4385 	 * mwait_size aligned memory, then use mwait_size ROUNDUP.
4386 	 *
4387 	 * Set cpi_mwait.buf_actual and cpi_mwait.size_actual in case we
4388 	 * decide to free this memory.
4389 	 */
4390 	ret = kmem_zalloc(mwait_size, KM_SLEEP);
4391 	if (ret == (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size)) {
4392 		cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret;
4393 		cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size;
4394 		*ret = MWAIT_RUNNING;
4395 		return (ret);
4396 	} else {
4397 		kmem_free(ret, mwait_size);
4398 		ret = kmem_zalloc(mwait_size * 2, KM_SLEEP);
4399 		cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret;
4400 		cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size * 2;
4401 		ret = (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size);
4402 		*ret = MWAIT_RUNNING;
4403 		return (ret);
4404 	}
4405 }
4406 
4407 void
4408 cpuid_mwait_free(cpu_t *cpu)
4409 {
4410 	if (cpu->cpu_m.mcpu_cpi == NULL) {
4411 		return;
4412 	}
4413 
4414 	if (cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual != NULL &&
4415 	    cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual > 0) {
4416 		kmem_free(cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual,
4417 		    cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual);
4418 	}
4419 
4420 	cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = NULL;
4421 	cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = 0;
4422 }
4423 
4424 void
4425 patch_tsc_read(int flag)
4426 {
4427 	size_t cnt;
4428 
4429 	switch (flag) {
4430 	case X86_NO_TSC:
4431 		cnt = &_no_rdtsc_end - &_no_rdtsc_start;
4432 		(void) memcpy((void *)tsc_read, (void *)&_no_rdtsc_start, cnt);
4433 		break;
4434 	case X86_HAVE_TSCP:
4435 		cnt = &_tscp_end - &_tscp_start;
4436 		(void) memcpy((void *)tsc_read, (void *)&_tscp_start, cnt);
4437 		break;
4438 	case X86_TSC_MFENCE:
4439 		cnt = &_tsc_mfence_end - &_tsc_mfence_start;
4440 		(void) memcpy((void *)tsc_read,
4441 		    (void *)&_tsc_mfence_start, cnt);
4442 		break;
4443 	case X86_TSC_LFENCE:
4444 		cnt = &_tsc_lfence_end - &_tsc_lfence_start;
4445 		(void) memcpy((void *)tsc_read,
4446 		    (void *)&_tsc_lfence_start, cnt);
4447 		break;
4448 	default:
4449 		break;
4450 	}
4451 }
4452 
4453 int
4454 cpuid_deep_cstates_supported(void)
4455 {
4456 	struct cpuid_info *cpi;
4457 	struct cpuid_regs regs;
4458 
4459 	ASSERT(cpuid_checkpass(CPU, 1));
4460 
4461 	cpi = CPU->cpu_m.mcpu_cpi;
4462 
4463 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID))
4464 		return (0);
4465 
4466 	switch (cpi->cpi_vendor) {
4467 	case X86_VENDOR_Intel:
4468 		if (cpi->cpi_xmaxeax < 0x80000007)
4469 			return (0);
4470 
4471 		/*
4472 		 * TSC run at a constant rate in all ACPI C-states?
4473 		 */
4474 		regs.cp_eax = 0x80000007;
4475 		(void) __cpuid_insn(&regs);
4476 		return (regs.cp_edx & CPUID_TSC_CSTATE_INVARIANCE);
4477 
4478 	default:
4479 		return (0);
4480 	}
4481 }
4482 
4483 #endif	/* !__xpv */
4484 
4485 void
4486 post_startup_cpu_fixups(void)
4487 {
4488 #ifndef __xpv
4489 	/*
4490 	 * Some AMD processors support C1E state. Entering this state will
4491 	 * cause the local APIC timer to stop, which we can't deal with at
4492 	 * this time.
4493 	 */
4494 	if (cpuid_getvendor(CPU) == X86_VENDOR_AMD) {
4495 		on_trap_data_t otd;
4496 		uint64_t reg;
4497 
4498 		if (!on_trap(&otd, OT_DATA_ACCESS)) {
4499 			reg = rdmsr(MSR_AMD_INT_PENDING_CMP_HALT);
4500 			/* Disable C1E state if it is enabled by BIOS */
4501 			if ((reg >> AMD_ACTONCMPHALT_SHIFT) &
4502 			    AMD_ACTONCMPHALT_MASK) {
4503 				reg &= ~(AMD_ACTONCMPHALT_MASK <<
4504 				    AMD_ACTONCMPHALT_SHIFT);
4505 				wrmsr(MSR_AMD_INT_PENDING_CMP_HALT, reg);
4506 			}
4507 		}
4508 		no_trap();
4509 	}
4510 #endif	/* !__xpv */
4511 }
4512 
4513 /*
4514  * Setup necessary registers to enable XSAVE feature on this processor.
4515  * This function needs to be called early enough, so that no xsave/xrstor
4516  * ops will execute on the processor before the MSRs are properly set up.
4517  *
4518  * Current implementation has the following assumption:
4519  * - cpuid_pass1() is done, so that X86 features are known.
4520  * - fpu_probe() is done, so that fp_save_mech is chosen.
4521  */
4522 void
4523 xsave_setup_msr(cpu_t *cpu)
4524 {
4525 	ASSERT(fp_save_mech == FP_XSAVE);
4526 	ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
4527 
4528 	/* Enable OSXSAVE in CR4. */
4529 	setcr4(getcr4() | CR4_OSXSAVE);
4530 	/*
4531 	 * Update SW copy of ECX, so that /dev/cpu/self/cpuid will report
4532 	 * correct value.
4533 	 */
4534 	cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_ecx |= CPUID_INTC_ECX_OSXSAVE;
4535 	setup_xfem();
4536 }
4537 
4538 /*
4539  * Starting with the Westmere processor the local
4540  * APIC timer will continue running in all C-states,
4541  * including the deepest C-states.
4542  */
4543 int
4544 cpuid_arat_supported(void)
4545 {
4546 	struct cpuid_info *cpi;
4547 	struct cpuid_regs regs;
4548 
4549 	ASSERT(cpuid_checkpass(CPU, 1));
4550 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
4551 
4552 	cpi = CPU->cpu_m.mcpu_cpi;
4553 
4554 	switch (cpi->cpi_vendor) {
4555 	case X86_VENDOR_Intel:
4556 		/*
4557 		 * Always-running Local APIC Timer is
4558 		 * indicated by CPUID.6.EAX[2].
4559 		 */
4560 		if (cpi->cpi_maxeax >= 6) {
4561 			regs.cp_eax = 6;
4562 			(void) cpuid_insn(NULL, &regs);
4563 			return (regs.cp_eax & CPUID_CSTATE_ARAT);
4564 		} else {
4565 			return (0);
4566 		}
4567 	default:
4568 		return (0);
4569 	}
4570 }
4571 
4572 /*
4573  * Check support for Intel ENERGY_PERF_BIAS feature
4574  */
4575 int
4576 cpuid_iepb_supported(struct cpu *cp)
4577 {
4578 	struct cpuid_info *cpi = cp->cpu_m.mcpu_cpi;
4579 	struct cpuid_regs regs;
4580 
4581 	ASSERT(cpuid_checkpass(cp, 1));
4582 
4583 	if (!(is_x86_feature(x86_featureset, X86FSET_CPUID)) ||
4584 	    !(is_x86_feature(x86_featureset, X86FSET_MSR))) {
4585 		return (0);
4586 	}
4587 
4588 	/*
4589 	 * Intel ENERGY_PERF_BIAS MSR is indicated by
4590 	 * capability bit CPUID.6.ECX.3
4591 	 */
4592 	if ((cpi->cpi_vendor != X86_VENDOR_Intel) || (cpi->cpi_maxeax < 6))
4593 		return (0);
4594 
4595 	regs.cp_eax = 0x6;
4596 	(void) cpuid_insn(NULL, &regs);
4597 	return (regs.cp_ecx & CPUID_EPB_SUPPORT);
4598 }
4599 
4600 /*
4601  * Check support for TSC deadline timer
4602  *
4603  * TSC deadline timer provides a superior software programming
4604  * model over local APIC timer that eliminates "time drifts".
4605  * Instead of specifying a relative time, software specifies an
4606  * absolute time as the target at which the processor should
4607  * generate a timer event.
4608  */
4609 int
4610 cpuid_deadline_tsc_supported(void)
4611 {
4612 	struct cpuid_info *cpi = CPU->cpu_m.mcpu_cpi;
4613 	struct cpuid_regs regs;
4614 
4615 	ASSERT(cpuid_checkpass(CPU, 1));
4616 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
4617 
4618 	switch (cpi->cpi_vendor) {
4619 	case X86_VENDOR_Intel:
4620 		if (cpi->cpi_maxeax >= 1) {
4621 			regs.cp_eax = 1;
4622 			(void) cpuid_insn(NULL, &regs);
4623 			return (regs.cp_ecx & CPUID_DEADLINE_TSC);
4624 		} else {
4625 			return (0);
4626 		}
4627 	default:
4628 		return (0);
4629 	}
4630 }
4631 
4632 #if defined(__amd64) && !defined(__xpv)
4633 /*
4634  * Patch in versions of bcopy for high performance Intel Nhm processors
4635  * and later...
4636  */
4637 void
4638 patch_memops(uint_t vendor)
4639 {
4640 	size_t cnt, i;
4641 	caddr_t to, from;
4642 
4643 	if ((vendor == X86_VENDOR_Intel) &&
4644 	    is_x86_feature(x86_featureset, X86FSET_SSE4_2)) {
4645 		cnt = &bcopy_patch_end - &bcopy_patch_start;
4646 		to = &bcopy_ck_size;
4647 		from = &bcopy_patch_start;
4648 		for (i = 0; i < cnt; i++) {
4649 			*to++ = *from++;
4650 		}
4651 	}
4652 }
4653 #endif  /* __amd64 && !__xpv */
4654 
4655 /*
4656  * This function finds the number of bits to represent the number of cores per
4657  * chip and the number of strands per core for the Intel platforms.
4658  * It re-uses the x2APIC cpuid code of the cpuid_pass2().
4659  */
4660 void
4661 cpuid_get_ext_topo(uint_t vendor, uint_t *core_nbits, uint_t *strand_nbits)
4662 {
4663 	struct cpuid_regs regs;
4664 	struct cpuid_regs *cp = &regs;
4665 
4666 	if (vendor != X86_VENDOR_Intel) {
4667 		return;
4668 	}
4669 
4670 	/* if the cpuid level is 0xB, extended topo is available. */
4671 	cp->cp_eax = 0;
4672 	if (__cpuid_insn(cp) >= 0xB) {
4673 
4674 		cp->cp_eax = 0xB;
4675 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
4676 		(void) __cpuid_insn(cp);
4677 
4678 		/*
4679 		 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
4680 		 * indicates that the extended topology enumeration leaf is
4681 		 * available.
4682 		 */
4683 		if (cp->cp_ebx) {
4684 			uint_t coreid_shift = 0;
4685 			uint_t chipid_shift = 0;
4686 			uint_t i;
4687 			uint_t level;
4688 
4689 			for (i = 0; i < CPI_FNB_ECX_MAX; i++) {
4690 				cp->cp_eax = 0xB;
4691 				cp->cp_ecx = i;
4692 
4693 				(void) __cpuid_insn(cp);
4694 				level = CPI_CPU_LEVEL_TYPE(cp);
4695 
4696 				if (level == 1) {
4697 					/*
4698 					 * Thread level processor topology
4699 					 * Number of bits shift right APIC ID
4700 					 * to get the coreid.
4701 					 */
4702 					coreid_shift = BITX(cp->cp_eax, 4, 0);
4703 				} else if (level == 2) {
4704 					/*
4705 					 * Core level processor topology
4706 					 * Number of bits shift right APIC ID
4707 					 * to get the chipid.
4708 					 */
4709 					chipid_shift = BITX(cp->cp_eax, 4, 0);
4710 				}
4711 			}
4712 
4713 			if (coreid_shift > 0 && chipid_shift > coreid_shift) {
4714 				*strand_nbits = coreid_shift;
4715 				*core_nbits = chipid_shift - coreid_shift;
4716 			}
4717 		}
4718 	}
4719 }
4720