xref: /titanic_50/usr/src/uts/i86pc/os/cpuid.c (revision 38f4bddda7216cf3550c325e8cabe56d08a2bce9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011 by Delphix. All rights reserved.
24  * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
25  */
26 /*
27  * Copyright (c) 2010, Intel Corporation.
28  * All rights reserved.
29  */
30 /*
31  * Portions Copyright 2009 Advanced Micro Devices, Inc.
32  */
33 /*
34  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
35  */
36 /*
37  * Various routines to handle identification
38  * and classification of x86 processors.
39  */
40 
41 #include <sys/types.h>
42 #include <sys/archsystm.h>
43 #include <sys/x86_archext.h>
44 #include <sys/kmem.h>
45 #include <sys/systm.h>
46 #include <sys/cmn_err.h>
47 #include <sys/sunddi.h>
48 #include <sys/sunndi.h>
49 #include <sys/cpuvar.h>
50 #include <sys/processor.h>
51 #include <sys/sysmacros.h>
52 #include <sys/pg.h>
53 #include <sys/fp.h>
54 #include <sys/controlregs.h>
55 #include <sys/bitmap.h>
56 #include <sys/auxv_386.h>
57 #include <sys/memnode.h>
58 #include <sys/pci_cfgspace.h>
59 
60 #ifdef __xpv
61 #include <sys/hypervisor.h>
62 #else
63 #include <sys/ontrap.h>
64 #endif
65 
66 /*
67  * Pass 0 of cpuid feature analysis happens in locore. It contains special code
68  * to recognize Cyrix processors that are not cpuid-compliant, and to deal with
69  * them accordingly. For most modern processors, feature detection occurs here
70  * in pass 1.
71  *
72  * Pass 1 of cpuid feature analysis happens just at the beginning of mlsetup()
73  * for the boot CPU and does the basic analysis that the early kernel needs.
74  * x86_featureset is set based on the return value of cpuid_pass1() of the boot
75  * CPU.
76  *
77  * Pass 1 includes:
78  *
79  *	o Determining vendor/model/family/stepping and setting x86_type and
80  *	  x86_vendor accordingly.
81  *	o Processing the feature flags returned by the cpuid instruction while
82  *	  applying any workarounds or tricks for the specific processor.
83  *	o Mapping the feature flags into Solaris feature bits (X86_*).
84  *	o Processing extended feature flags if supported by the processor,
85  *	  again while applying specific processor knowledge.
86  *	o Determining the CMT characteristics of the system.
87  *
88  * Pass 1 is done on non-boot CPUs during their initialization and the results
89  * are used only as a meager attempt at ensuring that all processors within the
90  * system support the same features.
91  *
92  * Pass 2 of cpuid feature analysis happens just at the beginning
93  * of startup().  It just copies in and corrects the remainder
94  * of the cpuid data we depend on: standard cpuid functions that we didn't
95  * need for pass1 feature analysis, and extended cpuid functions beyond the
96  * simple feature processing done in pass1.
97  *
98  * Pass 3 of cpuid analysis is invoked after basic kernel services; in
99  * particular kernel memory allocation has been made available. It creates a
100  * readable brand string based on the data collected in the first two passes.
101  *
102  * Pass 4 of cpuid analysis is invoked after post_startup() when all
103  * the support infrastructure for various hardware features has been
104  * initialized. It determines which processor features will be reported
105  * to userland via the aux vector.
106  *
107  * All passes are executed on all CPUs, but only the boot CPU determines what
108  * features the kernel will use.
109  *
110  * Much of the worst junk in this file is for the support of processors
111  * that didn't really implement the cpuid instruction properly.
112  *
113  * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon,
114  * the pass numbers.  Accordingly, changes to the pass code may require changes
115  * to the accessor code.
116  */
117 
118 uint_t x86_vendor = X86_VENDOR_IntelClone;
119 uint_t x86_type = X86_TYPE_OTHER;
120 uint_t x86_clflush_size = 0;
121 
122 uint_t pentiumpro_bug4046376;
123 uint_t pentiumpro_bug4064495;
124 
125 uchar_t x86_featureset[BT_SIZEOFMAP(NUM_X86_FEATURES)];
126 
127 static char *x86_feature_names[NUM_X86_FEATURES] = {
128 	"lgpg",
129 	"tsc",
130 	"msr",
131 	"mtrr",
132 	"pge",
133 	"de",
134 	"cmov",
135 	"mmx",
136 	"mca",
137 	"pae",
138 	"cv8",
139 	"pat",
140 	"sep",
141 	"sse",
142 	"sse2",
143 	"htt",
144 	"asysc",
145 	"nx",
146 	"sse3",
147 	"cx16",
148 	"cmp",
149 	"tscp",
150 	"mwait",
151 	"sse4a",
152 	"cpuid",
153 	"ssse3",
154 	"sse4_1",
155 	"sse4_2",
156 	"1gpg",
157 	"clfsh",
158 	"64",
159 	"aes",
160 	"pclmulqdq",
161 	"xsave",
162 	"avx",
163 	"vmx",
164 	"svm",
165 	"topoext",
166 	"f16c",
167 	"rdrand"
168 };
169 
170 boolean_t
171 is_x86_feature(void *featureset, uint_t feature)
172 {
173 	ASSERT(feature < NUM_X86_FEATURES);
174 	return (BT_TEST((ulong_t *)featureset, feature));
175 }
176 
177 void
178 add_x86_feature(void *featureset, uint_t feature)
179 {
180 	ASSERT(feature < NUM_X86_FEATURES);
181 	BT_SET((ulong_t *)featureset, feature);
182 }
183 
184 void
185 remove_x86_feature(void *featureset, uint_t feature)
186 {
187 	ASSERT(feature < NUM_X86_FEATURES);
188 	BT_CLEAR((ulong_t *)featureset, feature);
189 }
190 
191 boolean_t
192 compare_x86_featureset(void *setA, void *setB)
193 {
194 	/*
195 	 * We assume that the unused bits of the bitmap are always zero.
196 	 */
197 	if (memcmp(setA, setB, BT_SIZEOFMAP(NUM_X86_FEATURES)) == 0) {
198 		return (B_TRUE);
199 	} else {
200 		return (B_FALSE);
201 	}
202 }
203 
204 void
205 print_x86_featureset(void *featureset)
206 {
207 	uint_t i;
208 
209 	for (i = 0; i < NUM_X86_FEATURES; i++) {
210 		if (is_x86_feature(featureset, i)) {
211 			cmn_err(CE_CONT, "?x86_feature: %s\n",
212 			    x86_feature_names[i]);
213 		}
214 	}
215 }
216 
217 uint_t enable486;
218 
219 static size_t xsave_state_size = 0;
220 uint64_t xsave_bv_all = (XFEATURE_LEGACY_FP | XFEATURE_SSE);
221 boolean_t xsave_force_disable = B_FALSE;
222 
223 /*
224  * This is set to platform type we are running on.
225  */
226 static int platform_type = -1;
227 
228 #if !defined(__xpv)
229 /*
230  * Variable to patch if hypervisor platform detection needs to be
231  * disabled (e.g. platform_type will always be HW_NATIVE if this is 0).
232  */
233 int enable_platform_detection = 1;
234 #endif
235 
236 /*
237  * monitor/mwait info.
238  *
239  * size_actual and buf_actual are the real address and size allocated to get
240  * proper mwait_buf alignement.  buf_actual and size_actual should be passed
241  * to kmem_free().  Currently kmem_alloc() and mwait happen to both use
242  * processor cache-line alignment, but this is not guarantied in the furture.
243  */
244 struct mwait_info {
245 	size_t		mon_min;	/* min size to avoid missed wakeups */
246 	size_t		mon_max;	/* size to avoid false wakeups */
247 	size_t		size_actual;	/* size actually allocated */
248 	void		*buf_actual;	/* memory actually allocated */
249 	uint32_t	support;	/* processor support of monitor/mwait */
250 };
251 
252 /*
253  * xsave/xrestor info.
254  *
255  * This structure contains HW feature bits and size of the xsave save area.
256  * Note: the kernel will use the maximum size required for all hardware
257  * features. It is not optimize for potential memory savings if features at
258  * the end of the save area are not enabled.
259  */
260 struct xsave_info {
261 	uint32_t	xsav_hw_features_low;   /* Supported HW features */
262 	uint32_t	xsav_hw_features_high;  /* Supported HW features */
263 	size_t		xsav_max_size;  /* max size save area for HW features */
264 	size_t		ymm_size;	/* AVX: size of ymm save area */
265 	size_t		ymm_offset;	/* AVX: offset for ymm save area */
266 };
267 
268 
269 /*
270  * These constants determine how many of the elements of the
271  * cpuid we cache in the cpuid_info data structure; the
272  * remaining elements are accessible via the cpuid instruction.
273  */
274 
275 #define	NMAX_CPI_STD	6		/* eax = 0 .. 5 */
276 #define	NMAX_CPI_EXTD	0x1f		/* eax = 0x80000000 .. 0x8000001e */
277 
278 /*
279  * Some terminology needs to be explained:
280  *  - Socket: Something that can be plugged into a motherboard.
281  *  - Package: Same as socket
282  *  - Chip: Same as socket. Note that AMD's documentation uses term "chip"
283  *    differently: there, chip is the same as processor node (below)
284  *  - Processor node: Some AMD processors have more than one
285  *    "subprocessor" embedded in a package. These subprocessors (nodes)
286  *    are fully-functional processors themselves with cores, caches,
287  *    memory controllers, PCI configuration spaces. They are connected
288  *    inside the package with Hypertransport links. On single-node
289  *    processors, processor node is equivalent to chip/socket/package.
290  *  - Compute Unit: Some AMD processors pair cores in "compute units" that
291  *    share the FPU and the I$ and L2 caches.
292  */
293 
294 struct cpuid_info {
295 	uint_t cpi_pass;		/* last pass completed */
296 	/*
297 	 * standard function information
298 	 */
299 	uint_t cpi_maxeax;		/* fn 0: %eax */
300 	char cpi_vendorstr[13];		/* fn 0: %ebx:%ecx:%edx */
301 	uint_t cpi_vendor;		/* enum of cpi_vendorstr */
302 
303 	uint_t cpi_family;		/* fn 1: extended family */
304 	uint_t cpi_model;		/* fn 1: extended model */
305 	uint_t cpi_step;		/* fn 1: stepping */
306 	chipid_t cpi_chipid;		/* fn 1: %ebx:  Intel: chip # */
307 					/*		AMD: package/socket # */
308 	uint_t cpi_brandid;		/* fn 1: %ebx: brand ID */
309 	int cpi_clogid;			/* fn 1: %ebx: thread # */
310 	uint_t cpi_ncpu_per_chip;	/* fn 1: %ebx: logical cpu count */
311 	uint8_t cpi_cacheinfo[16];	/* fn 2: intel-style cache desc */
312 	uint_t cpi_ncache;		/* fn 2: number of elements */
313 	uint_t cpi_ncpu_shr_last_cache;	/* fn 4: %eax: ncpus sharing cache */
314 	id_t cpi_last_lvl_cacheid;	/* fn 4: %eax: derived cache id */
315 	uint_t cpi_std_4_size;		/* fn 4: number of fn 4 elements */
316 	struct cpuid_regs **cpi_std_4;	/* fn 4: %ecx == 0 .. fn4_size */
317 	struct cpuid_regs cpi_std[NMAX_CPI_STD];	/* 0 .. 5 */
318 	/*
319 	 * extended function information
320 	 */
321 	uint_t cpi_xmaxeax;		/* fn 0x80000000: %eax */
322 	char cpi_brandstr[49];		/* fn 0x8000000[234] */
323 	uint8_t cpi_pabits;		/* fn 0x80000006: %eax */
324 	uint8_t	cpi_vabits;		/* fn 0x80000006: %eax */
325 	struct	cpuid_regs cpi_extd[NMAX_CPI_EXTD];	/* 0x800000XX */
326 
327 	id_t cpi_coreid;		/* same coreid => strands share core */
328 	int cpi_pkgcoreid;		/* core number within single package */
329 	uint_t cpi_ncore_per_chip;	/* AMD: fn 0x80000008: %ecx[7-0] */
330 					/* Intel: fn 4: %eax[31-26] */
331 	/*
332 	 * supported feature information
333 	 */
334 	uint32_t cpi_support[5];
335 #define	STD_EDX_FEATURES	0
336 #define	AMD_EDX_FEATURES	1
337 #define	TM_EDX_FEATURES		2
338 #define	STD_ECX_FEATURES	3
339 #define	AMD_ECX_FEATURES	4
340 	/*
341 	 * Synthesized information, where known.
342 	 */
343 	uint32_t cpi_chiprev;		/* See X86_CHIPREV_* in x86_archext.h */
344 	const char *cpi_chiprevstr;	/* May be NULL if chiprev unknown */
345 	uint32_t cpi_socket;		/* Chip package/socket type */
346 
347 	struct mwait_info cpi_mwait;	/* fn 5: monitor/mwait info */
348 	uint32_t cpi_apicid;
349 	uint_t cpi_procnodeid;		/* AMD: nodeID on HT, Intel: chipid */
350 	uint_t cpi_procnodes_per_pkg;	/* AMD: # of nodes in the package */
351 					/* Intel: 1 */
352 	uint_t cpi_compunitid;		/* AMD: ComputeUnit ID, Intel: coreid */
353 	uint_t cpi_cores_per_compunit;	/* AMD: # of cores in the ComputeUnit */
354 
355 	struct xsave_info cpi_xsave;	/* fn D: xsave/xrestor info */
356 };
357 
358 
359 static struct cpuid_info cpuid_info0;
360 
361 /*
362  * These bit fields are defined by the Intel Application Note AP-485
363  * "Intel Processor Identification and the CPUID Instruction"
364  */
365 #define	CPI_FAMILY_XTD(cpi)	BITX((cpi)->cpi_std[1].cp_eax, 27, 20)
366 #define	CPI_MODEL_XTD(cpi)	BITX((cpi)->cpi_std[1].cp_eax, 19, 16)
367 #define	CPI_TYPE(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 13, 12)
368 #define	CPI_FAMILY(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 11, 8)
369 #define	CPI_STEP(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 3, 0)
370 #define	CPI_MODEL(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 7, 4)
371 
372 #define	CPI_FEATURES_EDX(cpi)		((cpi)->cpi_std[1].cp_edx)
373 #define	CPI_FEATURES_ECX(cpi)		((cpi)->cpi_std[1].cp_ecx)
374 #define	CPI_FEATURES_XTD_EDX(cpi)	((cpi)->cpi_extd[1].cp_edx)
375 #define	CPI_FEATURES_XTD_ECX(cpi)	((cpi)->cpi_extd[1].cp_ecx)
376 
377 #define	CPI_BRANDID(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 7, 0)
378 #define	CPI_CHUNKS(cpi)		BITX((cpi)->cpi_std[1].cp_ebx, 15, 7)
379 #define	CPI_CPU_COUNT(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 23, 16)
380 #define	CPI_APIC_ID(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 31, 24)
381 
382 #define	CPI_MAXEAX_MAX		0x100		/* sanity control */
383 #define	CPI_XMAXEAX_MAX		0x80000100
384 #define	CPI_FN4_ECX_MAX		0x20		/* sanity: max fn 4 levels */
385 #define	CPI_FNB_ECX_MAX		0x20		/* sanity: max fn B levels */
386 
387 /*
388  * Function 4 (Deterministic Cache Parameters) macros
389  * Defined by Intel Application Note AP-485
390  */
391 #define	CPI_NUM_CORES(regs)		BITX((regs)->cp_eax, 31, 26)
392 #define	CPI_NTHR_SHR_CACHE(regs)	BITX((regs)->cp_eax, 25, 14)
393 #define	CPI_FULL_ASSOC_CACHE(regs)	BITX((regs)->cp_eax, 9, 9)
394 #define	CPI_SELF_INIT_CACHE(regs)	BITX((regs)->cp_eax, 8, 8)
395 #define	CPI_CACHE_LVL(regs)		BITX((regs)->cp_eax, 7, 5)
396 #define	CPI_CACHE_TYPE(regs)		BITX((regs)->cp_eax, 4, 0)
397 #define	CPI_CPU_LEVEL_TYPE(regs)	BITX((regs)->cp_ecx, 15, 8)
398 
399 #define	CPI_CACHE_WAYS(regs)		BITX((regs)->cp_ebx, 31, 22)
400 #define	CPI_CACHE_PARTS(regs)		BITX((regs)->cp_ebx, 21, 12)
401 #define	CPI_CACHE_COH_LN_SZ(regs)	BITX((regs)->cp_ebx, 11, 0)
402 
403 #define	CPI_CACHE_SETS(regs)		BITX((regs)->cp_ecx, 31, 0)
404 
405 #define	CPI_PREFCH_STRIDE(regs)		BITX((regs)->cp_edx, 9, 0)
406 
407 
408 /*
409  * A couple of shorthand macros to identify "later" P6-family chips
410  * like the Pentium M and Core.  First, the "older" P6-based stuff
411  * (loosely defined as "pre-Pentium-4"):
412  * P6, PII, Mobile PII, PII Xeon, PIII, Mobile PIII, PIII Xeon
413  */
414 
415 #define	IS_LEGACY_P6(cpi) (			\
416 	cpi->cpi_family == 6 && 		\
417 		(cpi->cpi_model == 1 ||		\
418 		cpi->cpi_model == 3 ||		\
419 		cpi->cpi_model == 5 ||		\
420 		cpi->cpi_model == 6 ||		\
421 		cpi->cpi_model == 7 ||		\
422 		cpi->cpi_model == 8 ||		\
423 		cpi->cpi_model == 0xA ||	\
424 		cpi->cpi_model == 0xB)		\
425 )
426 
427 /* A "new F6" is everything with family 6 that's not the above */
428 #define	IS_NEW_F6(cpi) ((cpi->cpi_family == 6) && !IS_LEGACY_P6(cpi))
429 
430 /* Extended family/model support */
431 #define	IS_EXTENDED_MODEL_INTEL(cpi) (cpi->cpi_family == 0x6 || \
432 	cpi->cpi_family >= 0xf)
433 
434 /*
435  * Info for monitor/mwait idle loop.
436  *
437  * See cpuid section of "Intel 64 and IA-32 Architectures Software Developer's
438  * Manual Volume 2A: Instruction Set Reference, A-M" #25366-022US, November
439  * 2006.
440  * See MONITOR/MWAIT section of "AMD64 Architecture Programmer's Manual
441  * Documentation Updates" #33633, Rev 2.05, December 2006.
442  */
443 #define	MWAIT_SUPPORT		(0x00000001)	/* mwait supported */
444 #define	MWAIT_EXTENSIONS	(0x00000002)	/* extenstion supported */
445 #define	MWAIT_ECX_INT_ENABLE	(0x00000004)	/* ecx 1 extension supported */
446 #define	MWAIT_SUPPORTED(cpi)	((cpi)->cpi_std[1].cp_ecx & CPUID_INTC_ECX_MON)
447 #define	MWAIT_INT_ENABLE(cpi)	((cpi)->cpi_std[5].cp_ecx & 0x2)
448 #define	MWAIT_EXTENSION(cpi)	((cpi)->cpi_std[5].cp_ecx & 0x1)
449 #define	MWAIT_SIZE_MIN(cpi)	BITX((cpi)->cpi_std[5].cp_eax, 15, 0)
450 #define	MWAIT_SIZE_MAX(cpi)	BITX((cpi)->cpi_std[5].cp_ebx, 15, 0)
451 /*
452  * Number of sub-cstates for a given c-state.
453  */
454 #define	MWAIT_NUM_SUBC_STATES(cpi, c_state)			\
455 	BITX((cpi)->cpi_std[5].cp_edx, c_state + 3, c_state)
456 
457 /*
458  * XSAVE leaf 0xD enumeration
459  */
460 #define	CPUID_LEAFD_2_YMM_OFFSET	576
461 #define	CPUID_LEAFD_2_YMM_SIZE		256
462 
463 /*
464  * Functions we consune from cpuid_subr.c;  don't publish these in a header
465  * file to try and keep people using the expected cpuid_* interfaces.
466  */
467 extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t);
468 extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t);
469 extern uint32_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t);
470 extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t);
471 extern uint_t _cpuid_vendorstr_to_vendorcode(char *);
472 
473 /*
474  * Apply up various platform-dependent restrictions where the
475  * underlying platform restrictions mean the CPU can be marked
476  * as less capable than its cpuid instruction would imply.
477  */
478 #if defined(__xpv)
479 static void
480 platform_cpuid_mangle(uint_t vendor, uint32_t eax, struct cpuid_regs *cp)
481 {
482 	switch (eax) {
483 	case 1: {
484 		uint32_t mcamask = DOMAIN_IS_INITDOMAIN(xen_info) ?
485 		    0 : CPUID_INTC_EDX_MCA;
486 		cp->cp_edx &=
487 		    ~(mcamask |
488 		    CPUID_INTC_EDX_PSE |
489 		    CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE |
490 		    CPUID_INTC_EDX_SEP | CPUID_INTC_EDX_MTRR |
491 		    CPUID_INTC_EDX_PGE | CPUID_INTC_EDX_PAT |
492 		    CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP |
493 		    CPUID_INTC_EDX_PSE36 | CPUID_INTC_EDX_HTT);
494 		break;
495 	}
496 
497 	case 0x80000001:
498 		cp->cp_edx &=
499 		    ~(CPUID_AMD_EDX_PSE |
500 		    CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE |
501 		    CPUID_AMD_EDX_MTRR | CPUID_AMD_EDX_PGE |
502 		    CPUID_AMD_EDX_PAT | CPUID_AMD_EDX_PSE36 |
503 		    CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP |
504 		    CPUID_AMD_EDX_TSCP);
505 		cp->cp_ecx &= ~CPUID_AMD_ECX_CMP_LGCY;
506 		break;
507 	default:
508 		break;
509 	}
510 
511 	switch (vendor) {
512 	case X86_VENDOR_Intel:
513 		switch (eax) {
514 		case 4:
515 			/*
516 			 * Zero out the (ncores-per-chip - 1) field
517 			 */
518 			cp->cp_eax &= 0x03fffffff;
519 			break;
520 		default:
521 			break;
522 		}
523 		break;
524 	case X86_VENDOR_AMD:
525 		switch (eax) {
526 
527 		case 0x80000001:
528 			cp->cp_ecx &= ~CPUID_AMD_ECX_CR8D;
529 			break;
530 
531 		case 0x80000008:
532 			/*
533 			 * Zero out the (ncores-per-chip - 1) field
534 			 */
535 			cp->cp_ecx &= 0xffffff00;
536 			break;
537 		default:
538 			break;
539 		}
540 		break;
541 	default:
542 		break;
543 	}
544 }
545 #else
546 #define	platform_cpuid_mangle(vendor, eax, cp)	/* nothing */
547 #endif
548 
549 /*
550  *  Some undocumented ways of patching the results of the cpuid
551  *  instruction to permit running Solaris 10 on future cpus that
552  *  we don't currently support.  Could be set to non-zero values
553  *  via settings in eeprom.
554  */
555 
556 uint32_t cpuid_feature_ecx_include;
557 uint32_t cpuid_feature_ecx_exclude;
558 uint32_t cpuid_feature_edx_include;
559 uint32_t cpuid_feature_edx_exclude;
560 
561 /*
562  * Allocate space for mcpu_cpi in the machcpu structure for all non-boot CPUs.
563  */
564 void
565 cpuid_alloc_space(cpu_t *cpu)
566 {
567 	/*
568 	 * By convention, cpu0 is the boot cpu, which is set up
569 	 * before memory allocation is available.  All other cpus get
570 	 * their cpuid_info struct allocated here.
571 	 */
572 	ASSERT(cpu->cpu_id != 0);
573 	ASSERT(cpu->cpu_m.mcpu_cpi == NULL);
574 	cpu->cpu_m.mcpu_cpi =
575 	    kmem_zalloc(sizeof (*cpu->cpu_m.mcpu_cpi), KM_SLEEP);
576 }
577 
578 void
579 cpuid_free_space(cpu_t *cpu)
580 {
581 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
582 	int i;
583 
584 	ASSERT(cpi != NULL);
585 	ASSERT(cpi != &cpuid_info0);
586 
587 	/*
588 	 * Free up any function 4 related dynamic storage
589 	 */
590 	for (i = 1; i < cpi->cpi_std_4_size; i++)
591 		kmem_free(cpi->cpi_std_4[i], sizeof (struct cpuid_regs));
592 	if (cpi->cpi_std_4_size > 0)
593 		kmem_free(cpi->cpi_std_4,
594 		    cpi->cpi_std_4_size * sizeof (struct cpuid_regs *));
595 
596 	kmem_free(cpi, sizeof (*cpi));
597 	cpu->cpu_m.mcpu_cpi = NULL;
598 }
599 
600 #if !defined(__xpv)
601 /*
602  * Determine the type of the underlying platform. This is used to customize
603  * initialization of various subsystems (e.g. TSC). determine_platform() must
604  * only ever be called once to prevent two processors from seeing different
605  * values of platform_type. Must be called before cpuid_pass1(), the earliest
606  * consumer to execute (uses _cpuid_chiprev --> synth_amd_info --> get_hwenv).
607  */
608 void
609 determine_platform(void)
610 {
611 	struct cpuid_regs cp;
612 	uint32_t base;
613 	uint32_t regs[4];
614 	char *hvstr = (char *)regs;
615 
616 	ASSERT(platform_type == -1);
617 
618 	platform_type = HW_NATIVE;
619 
620 	if (!enable_platform_detection)
621 		return;
622 
623 	/*
624 	 * If Hypervisor CPUID bit is set, try to determine hypervisor
625 	 * vendor signature, and set platform type accordingly.
626 	 *
627 	 * References:
628 	 * http://lkml.org/lkml/2008/10/1/246
629 	 * http://kb.vmware.com/kb/1009458
630 	 */
631 	cp.cp_eax = 0x1;
632 	(void) __cpuid_insn(&cp);
633 	if ((cp.cp_ecx & CPUID_INTC_ECX_HV) != 0) {
634 		cp.cp_eax = 0x40000000;
635 		(void) __cpuid_insn(&cp);
636 		regs[0] = cp.cp_ebx;
637 		regs[1] = cp.cp_ecx;
638 		regs[2] = cp.cp_edx;
639 		regs[3] = 0;
640 		if (strcmp(hvstr, HVSIG_XEN_HVM) == 0) {
641 			platform_type = HW_XEN_HVM;
642 			return;
643 		}
644 		if (strcmp(hvstr, HVSIG_VMWARE) == 0) {
645 			platform_type = HW_VMWARE;
646 			return;
647 		}
648 		if (strcmp(hvstr, HVSIG_KVM) == 0) {
649 			platform_type = HW_KVM;
650 			return;
651 		}
652 		if (strcmp(hvstr, HVSIG_MICROSOFT) == 0)
653 			platform_type = HW_MICROSOFT;
654 	} else {
655 		/*
656 		 * Check older VMware hardware versions. VMware hypervisor is
657 		 * detected by performing an IN operation to VMware hypervisor
658 		 * port and checking that value returned in %ebx is VMware
659 		 * hypervisor magic value.
660 		 *
661 		 * References: http://kb.vmware.com/kb/1009458
662 		 */
663 		vmware_port(VMWARE_HVCMD_GETVERSION, regs);
664 		if (regs[1] == VMWARE_HVMAGIC) {
665 			platform_type = HW_VMWARE;
666 			return;
667 		}
668 	}
669 
670 	/*
671 	 * Check Xen hypervisor. In a fully virtualized domain,
672 	 * Xen's pseudo-cpuid function returns a string representing the
673 	 * Xen signature in %ebx, %ecx, and %edx. %eax contains the maximum
674 	 * supported cpuid function. We need at least a (base + 2) leaf value
675 	 * to do what we want to do. Try different base values, since the
676 	 * hypervisor might use a different one depending on whether Hyper-V
677 	 * emulation is switched on by default or not.
678 	 */
679 	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
680 		cp.cp_eax = base;
681 		(void) __cpuid_insn(&cp);
682 		regs[0] = cp.cp_ebx;
683 		regs[1] = cp.cp_ecx;
684 		regs[2] = cp.cp_edx;
685 		regs[3] = 0;
686 		if (strcmp(hvstr, HVSIG_XEN_HVM) == 0 &&
687 		    cp.cp_eax >= (base + 2)) {
688 			platform_type &= ~HW_NATIVE;
689 			platform_type |= HW_XEN_HVM;
690 			return;
691 		}
692 	}
693 }
694 
695 int
696 get_hwenv(void)
697 {
698 	ASSERT(platform_type != -1);
699 	return (platform_type);
700 }
701 
702 int
703 is_controldom(void)
704 {
705 	return (0);
706 }
707 
708 #else
709 
710 int
711 get_hwenv(void)
712 {
713 	return (HW_XEN_PV);
714 }
715 
716 int
717 is_controldom(void)
718 {
719 	return (DOMAIN_IS_INITDOMAIN(xen_info));
720 }
721 
722 #endif	/* __xpv */
723 
724 static void
725 cpuid_intel_getids(cpu_t *cpu, void *feature)
726 {
727 	uint_t i;
728 	uint_t chipid_shift = 0;
729 	uint_t coreid_shift = 0;
730 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
731 
732 	for (i = 1; i < cpi->cpi_ncpu_per_chip; i <<= 1)
733 		chipid_shift++;
734 
735 	cpi->cpi_chipid = cpi->cpi_apicid >> chipid_shift;
736 	cpi->cpi_clogid = cpi->cpi_apicid & ((1 << chipid_shift) - 1);
737 
738 	if (is_x86_feature(feature, X86FSET_CMP)) {
739 		/*
740 		 * Multi-core (and possibly multi-threaded)
741 		 * processors.
742 		 */
743 		uint_t ncpu_per_core;
744 		if (cpi->cpi_ncore_per_chip == 1)
745 			ncpu_per_core = cpi->cpi_ncpu_per_chip;
746 		else if (cpi->cpi_ncore_per_chip > 1)
747 			ncpu_per_core = cpi->cpi_ncpu_per_chip /
748 			    cpi->cpi_ncore_per_chip;
749 		/*
750 		 * 8bit APIC IDs on dual core Pentiums
751 		 * look like this:
752 		 *
753 		 * +-----------------------+------+------+
754 		 * | Physical Package ID   |  MC  |  HT  |
755 		 * +-----------------------+------+------+
756 		 * <------- chipid -------->
757 		 * <------- coreid --------------->
758 		 *			   <--- clogid -->
759 		 *			   <------>
760 		 *			   pkgcoreid
761 		 *
762 		 * Where the number of bits necessary to
763 		 * represent MC and HT fields together equals
764 		 * to the minimum number of bits necessary to
765 		 * store the value of cpi->cpi_ncpu_per_chip.
766 		 * Of those bits, the MC part uses the number
767 		 * of bits necessary to store the value of
768 		 * cpi->cpi_ncore_per_chip.
769 		 */
770 		for (i = 1; i < ncpu_per_core; i <<= 1)
771 			coreid_shift++;
772 		cpi->cpi_coreid = cpi->cpi_apicid >> coreid_shift;
773 		cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
774 	} else if (is_x86_feature(feature, X86FSET_HTT)) {
775 		/*
776 		 * Single-core multi-threaded processors.
777 		 */
778 		cpi->cpi_coreid = cpi->cpi_chipid;
779 		cpi->cpi_pkgcoreid = 0;
780 	}
781 	cpi->cpi_procnodeid = cpi->cpi_chipid;
782 	cpi->cpi_compunitid = cpi->cpi_coreid;
783 }
784 
785 static void
786 cpuid_amd_getids(cpu_t *cpu)
787 {
788 	int i, first_half, coreidsz;
789 	uint32_t nb_caps_reg;
790 	uint_t node2_1;
791 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
792 	struct cpuid_regs *cp;
793 
794 	/*
795 	 * AMD CMP chips currently have a single thread per core.
796 	 *
797 	 * Since no two cpus share a core we must assign a distinct coreid
798 	 * per cpu, and we do this by using the cpu_id.  This scheme does not,
799 	 * however, guarantee that sibling cores of a chip will have sequential
800 	 * coreids starting at a multiple of the number of cores per chip -
801 	 * that is usually the case, but if the ACPI MADT table is presented
802 	 * in a different order then we need to perform a few more gymnastics
803 	 * for the pkgcoreid.
804 	 *
805 	 * All processors in the system have the same number of enabled
806 	 * cores. Cores within a processor are always numbered sequentially
807 	 * from 0 regardless of how many or which are disabled, and there
808 	 * is no way for operating system to discover the real core id when some
809 	 * are disabled.
810 	 *
811 	 * In family 0x15, the cores come in pairs called compute units. They
812 	 * share I$ and L2 caches and the FPU. Enumeration of this feature is
813 	 * simplified by the new topology extensions CPUID leaf, indicated by
814 	 * the X86 feature X86FSET_TOPOEXT.
815 	 */
816 
817 	cpi->cpi_coreid = cpu->cpu_id;
818 	cpi->cpi_compunitid = cpu->cpu_id;
819 
820 	if (cpi->cpi_xmaxeax >= 0x80000008) {
821 
822 		coreidsz = BITX((cpi)->cpi_extd[8].cp_ecx, 15, 12);
823 
824 		/*
825 		 * In AMD parlance chip is really a node while Solaris
826 		 * sees chip as equivalent to socket/package.
827 		 */
828 		cpi->cpi_ncore_per_chip =
829 		    BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1;
830 		if (coreidsz == 0) {
831 			/* Use legacy method */
832 			for (i = 1; i < cpi->cpi_ncore_per_chip; i <<= 1)
833 				coreidsz++;
834 			if (coreidsz == 0)
835 				coreidsz = 1;
836 		}
837 	} else {
838 		/* Assume single-core part */
839 		cpi->cpi_ncore_per_chip = 1;
840 		coreidsz = 1;
841 	}
842 
843 	cpi->cpi_clogid = cpi->cpi_pkgcoreid =
844 	    cpi->cpi_apicid & ((1<<coreidsz) - 1);
845 	cpi->cpi_ncpu_per_chip = cpi->cpi_ncore_per_chip;
846 
847 	/* Get node ID, compute unit ID */
848 	if (is_x86_feature(x86_featureset, X86FSET_TOPOEXT) &&
849 	    cpi->cpi_xmaxeax >= 0x8000001e) {
850 		cp = &cpi->cpi_extd[0x1e];
851 		cp->cp_eax = 0x8000001e;
852 		(void) __cpuid_insn(cp);
853 
854 		cpi->cpi_procnodes_per_pkg = BITX(cp->cp_ecx, 10, 8) + 1;
855 		cpi->cpi_procnodeid = BITX(cp->cp_ecx, 7, 0);
856 		cpi->cpi_cores_per_compunit = BITX(cp->cp_ebx, 15, 8) + 1;
857 		cpi->cpi_compunitid = BITX(cp->cp_ebx, 7, 0)
858 		    + (cpi->cpi_ncore_per_chip / cpi->cpi_cores_per_compunit)
859 		    * (cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg);
860 	} else if (cpi->cpi_family == 0xf || cpi->cpi_family >= 0x11) {
861 		cpi->cpi_procnodeid = (cpi->cpi_apicid >> coreidsz) & 7;
862 	} else if (cpi->cpi_family == 0x10) {
863 		/*
864 		 * See if we are a multi-node processor.
865 		 * All processors in the system have the same number of nodes
866 		 */
867 		nb_caps_reg =  pci_getl_func(0, 24, 3, 0xe8);
868 		if ((cpi->cpi_model < 8) || BITX(nb_caps_reg, 29, 29) == 0) {
869 			/* Single-node */
870 			cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 5,
871 			    coreidsz);
872 		} else {
873 
874 			/*
875 			 * Multi-node revision D (2 nodes per package
876 			 * are supported)
877 			 */
878 			cpi->cpi_procnodes_per_pkg = 2;
879 
880 			first_half = (cpi->cpi_pkgcoreid <=
881 			    (cpi->cpi_ncore_per_chip/2 - 1));
882 
883 			if (cpi->cpi_apicid == cpi->cpi_pkgcoreid) {
884 				/* We are BSP */
885 				cpi->cpi_procnodeid = (first_half ? 0 : 1);
886 			} else {
887 
888 				/* We are AP */
889 				/* NodeId[2:1] bits to use for reading F3xe8 */
890 				node2_1 = BITX(cpi->cpi_apicid, 5, 4) << 1;
891 
892 				nb_caps_reg =
893 				    pci_getl_func(0, 24 + node2_1, 3, 0xe8);
894 
895 				/*
896 				 * Check IntNodeNum bit (31:30, but bit 31 is
897 				 * always 0 on dual-node processors)
898 				 */
899 				if (BITX(nb_caps_reg, 30, 30) == 0)
900 					cpi->cpi_procnodeid = node2_1 +
901 					    !first_half;
902 				else
903 					cpi->cpi_procnodeid = node2_1 +
904 					    first_half;
905 			}
906 		}
907 	} else {
908 		cpi->cpi_procnodeid = 0;
909 	}
910 
911 	cpi->cpi_chipid =
912 	    cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg;
913 }
914 
915 /*
916  * Setup XFeature_Enabled_Mask register. Required by xsave feature.
917  */
918 void
919 setup_xfem(void)
920 {
921 	uint64_t flags = XFEATURE_LEGACY_FP;
922 
923 	ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
924 
925 	if (is_x86_feature(x86_featureset, X86FSET_SSE))
926 		flags |= XFEATURE_SSE;
927 
928 	if (is_x86_feature(x86_featureset, X86FSET_AVX))
929 		flags |= XFEATURE_AVX;
930 
931 	set_xcr(XFEATURE_ENABLED_MASK, flags);
932 
933 	xsave_bv_all = flags;
934 }
935 
936 void
937 cpuid_pass1(cpu_t *cpu, uchar_t *featureset)
938 {
939 	uint32_t mask_ecx, mask_edx;
940 	struct cpuid_info *cpi;
941 	struct cpuid_regs *cp;
942 	int xcpuid;
943 #if !defined(__xpv)
944 	extern int idle_cpu_prefer_mwait;
945 #endif
946 
947 	/*
948 	 * Space statically allocated for BSP, ensure pointer is set
949 	 */
950 	if (cpu->cpu_id == 0) {
951 		if (cpu->cpu_m.mcpu_cpi == NULL)
952 			cpu->cpu_m.mcpu_cpi = &cpuid_info0;
953 	}
954 
955 	add_x86_feature(featureset, X86FSET_CPUID);
956 
957 	cpi = cpu->cpu_m.mcpu_cpi;
958 	ASSERT(cpi != NULL);
959 	cp = &cpi->cpi_std[0];
960 	cp->cp_eax = 0;
961 	cpi->cpi_maxeax = __cpuid_insn(cp);
962 	{
963 		uint32_t *iptr = (uint32_t *)cpi->cpi_vendorstr;
964 		*iptr++ = cp->cp_ebx;
965 		*iptr++ = cp->cp_edx;
966 		*iptr++ = cp->cp_ecx;
967 		*(char *)&cpi->cpi_vendorstr[12] = '\0';
968 	}
969 
970 	cpi->cpi_vendor = _cpuid_vendorstr_to_vendorcode(cpi->cpi_vendorstr);
971 	x86_vendor = cpi->cpi_vendor; /* for compatibility */
972 
973 	/*
974 	 * Limit the range in case of weird hardware
975 	 */
976 	if (cpi->cpi_maxeax > CPI_MAXEAX_MAX)
977 		cpi->cpi_maxeax = CPI_MAXEAX_MAX;
978 	if (cpi->cpi_maxeax < 1)
979 		goto pass1_done;
980 
981 	cp = &cpi->cpi_std[1];
982 	cp->cp_eax = 1;
983 	(void) __cpuid_insn(cp);
984 
985 	/*
986 	 * Extract identifying constants for easy access.
987 	 */
988 	cpi->cpi_model = CPI_MODEL(cpi);
989 	cpi->cpi_family = CPI_FAMILY(cpi);
990 
991 	if (cpi->cpi_family == 0xf)
992 		cpi->cpi_family += CPI_FAMILY_XTD(cpi);
993 
994 	/*
995 	 * Beware: AMD uses "extended model" iff base *FAMILY* == 0xf.
996 	 * Intel, and presumably everyone else, uses model == 0xf, as
997 	 * one would expect (max value means possible overflow).  Sigh.
998 	 */
999 
1000 	switch (cpi->cpi_vendor) {
1001 	case X86_VENDOR_Intel:
1002 		if (IS_EXTENDED_MODEL_INTEL(cpi))
1003 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
1004 		break;
1005 	case X86_VENDOR_AMD:
1006 		if (CPI_FAMILY(cpi) == 0xf)
1007 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
1008 		break;
1009 	default:
1010 		if (cpi->cpi_model == 0xf)
1011 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
1012 		break;
1013 	}
1014 
1015 	cpi->cpi_step = CPI_STEP(cpi);
1016 	cpi->cpi_brandid = CPI_BRANDID(cpi);
1017 
1018 	/*
1019 	 * *default* assumptions:
1020 	 * - believe %edx feature word
1021 	 * - ignore %ecx feature word
1022 	 * - 32-bit virtual and physical addressing
1023 	 */
1024 	mask_edx = 0xffffffff;
1025 	mask_ecx = 0;
1026 
1027 	cpi->cpi_pabits = cpi->cpi_vabits = 32;
1028 
1029 	switch (cpi->cpi_vendor) {
1030 	case X86_VENDOR_Intel:
1031 		if (cpi->cpi_family == 5)
1032 			x86_type = X86_TYPE_P5;
1033 		else if (IS_LEGACY_P6(cpi)) {
1034 			x86_type = X86_TYPE_P6;
1035 			pentiumpro_bug4046376 = 1;
1036 			pentiumpro_bug4064495 = 1;
1037 			/*
1038 			 * Clear the SEP bit when it was set erroneously
1039 			 */
1040 			if (cpi->cpi_model < 3 && cpi->cpi_step < 3)
1041 				cp->cp_edx &= ~CPUID_INTC_EDX_SEP;
1042 		} else if (IS_NEW_F6(cpi) || cpi->cpi_family == 0xf) {
1043 			x86_type = X86_TYPE_P4;
1044 			/*
1045 			 * We don't currently depend on any of the %ecx
1046 			 * features until Prescott, so we'll only check
1047 			 * this from P4 onwards.  We might want to revisit
1048 			 * that idea later.
1049 			 */
1050 			mask_ecx = 0xffffffff;
1051 		} else if (cpi->cpi_family > 0xf)
1052 			mask_ecx = 0xffffffff;
1053 		/*
1054 		 * We don't support MONITOR/MWAIT if leaf 5 is not available
1055 		 * to obtain the monitor linesize.
1056 		 */
1057 		if (cpi->cpi_maxeax < 5)
1058 			mask_ecx &= ~CPUID_INTC_ECX_MON;
1059 		break;
1060 	case X86_VENDOR_IntelClone:
1061 	default:
1062 		break;
1063 	case X86_VENDOR_AMD:
1064 #if defined(OPTERON_ERRATUM_108)
1065 		if (cpi->cpi_family == 0xf && cpi->cpi_model == 0xe) {
1066 			cp->cp_eax = (0xf0f & cp->cp_eax) | 0xc0;
1067 			cpi->cpi_model = 0xc;
1068 		} else
1069 #endif
1070 		if (cpi->cpi_family == 5) {
1071 			/*
1072 			 * AMD K5 and K6
1073 			 *
1074 			 * These CPUs have an incomplete implementation
1075 			 * of MCA/MCE which we mask away.
1076 			 */
1077 			mask_edx &= ~(CPUID_INTC_EDX_MCE | CPUID_INTC_EDX_MCA);
1078 
1079 			/*
1080 			 * Model 0 uses the wrong (APIC) bit
1081 			 * to indicate PGE.  Fix it here.
1082 			 */
1083 			if (cpi->cpi_model == 0) {
1084 				if (cp->cp_edx & 0x200) {
1085 					cp->cp_edx &= ~0x200;
1086 					cp->cp_edx |= CPUID_INTC_EDX_PGE;
1087 				}
1088 			}
1089 
1090 			/*
1091 			 * Early models had problems w/ MMX; disable.
1092 			 */
1093 			if (cpi->cpi_model < 6)
1094 				mask_edx &= ~CPUID_INTC_EDX_MMX;
1095 		}
1096 
1097 		/*
1098 		 * For newer families, SSE3 and CX16, at least, are valid;
1099 		 * enable all
1100 		 */
1101 		if (cpi->cpi_family >= 0xf)
1102 			mask_ecx = 0xffffffff;
1103 		/*
1104 		 * We don't support MONITOR/MWAIT if leaf 5 is not available
1105 		 * to obtain the monitor linesize.
1106 		 */
1107 		if (cpi->cpi_maxeax < 5)
1108 			mask_ecx &= ~CPUID_INTC_ECX_MON;
1109 
1110 #if !defined(__xpv)
1111 		/*
1112 		 * Do not use MONITOR/MWAIT to halt in the idle loop on any AMD
1113 		 * processors.  AMD does not intend MWAIT to be used in the cpu
1114 		 * idle loop on current and future processors.  10h and future
1115 		 * AMD processors use more power in MWAIT than HLT.
1116 		 * Pre-family-10h Opterons do not have the MWAIT instruction.
1117 		 */
1118 		idle_cpu_prefer_mwait = 0;
1119 #endif
1120 
1121 		break;
1122 	case X86_VENDOR_TM:
1123 		/*
1124 		 * workaround the NT workaround in CMS 4.1
1125 		 */
1126 		if (cpi->cpi_family == 5 && cpi->cpi_model == 4 &&
1127 		    (cpi->cpi_step == 2 || cpi->cpi_step == 3))
1128 			cp->cp_edx |= CPUID_INTC_EDX_CX8;
1129 		break;
1130 	case X86_VENDOR_Centaur:
1131 		/*
1132 		 * workaround the NT workarounds again
1133 		 */
1134 		if (cpi->cpi_family == 6)
1135 			cp->cp_edx |= CPUID_INTC_EDX_CX8;
1136 		break;
1137 	case X86_VENDOR_Cyrix:
1138 		/*
1139 		 * We rely heavily on the probing in locore
1140 		 * to actually figure out what parts, if any,
1141 		 * of the Cyrix cpuid instruction to believe.
1142 		 */
1143 		switch (x86_type) {
1144 		case X86_TYPE_CYRIX_486:
1145 			mask_edx = 0;
1146 			break;
1147 		case X86_TYPE_CYRIX_6x86:
1148 			mask_edx = 0;
1149 			break;
1150 		case X86_TYPE_CYRIX_6x86L:
1151 			mask_edx =
1152 			    CPUID_INTC_EDX_DE |
1153 			    CPUID_INTC_EDX_CX8;
1154 			break;
1155 		case X86_TYPE_CYRIX_6x86MX:
1156 			mask_edx =
1157 			    CPUID_INTC_EDX_DE |
1158 			    CPUID_INTC_EDX_MSR |
1159 			    CPUID_INTC_EDX_CX8 |
1160 			    CPUID_INTC_EDX_PGE |
1161 			    CPUID_INTC_EDX_CMOV |
1162 			    CPUID_INTC_EDX_MMX;
1163 			break;
1164 		case X86_TYPE_CYRIX_GXm:
1165 			mask_edx =
1166 			    CPUID_INTC_EDX_MSR |
1167 			    CPUID_INTC_EDX_CX8 |
1168 			    CPUID_INTC_EDX_CMOV |
1169 			    CPUID_INTC_EDX_MMX;
1170 			break;
1171 		case X86_TYPE_CYRIX_MediaGX:
1172 			break;
1173 		case X86_TYPE_CYRIX_MII:
1174 		case X86_TYPE_VIA_CYRIX_III:
1175 			mask_edx =
1176 			    CPUID_INTC_EDX_DE |
1177 			    CPUID_INTC_EDX_TSC |
1178 			    CPUID_INTC_EDX_MSR |
1179 			    CPUID_INTC_EDX_CX8 |
1180 			    CPUID_INTC_EDX_PGE |
1181 			    CPUID_INTC_EDX_CMOV |
1182 			    CPUID_INTC_EDX_MMX;
1183 			break;
1184 		default:
1185 			break;
1186 		}
1187 		break;
1188 	}
1189 
1190 #if defined(__xpv)
1191 	/*
1192 	 * Do not support MONITOR/MWAIT under a hypervisor
1193 	 */
1194 	mask_ecx &= ~CPUID_INTC_ECX_MON;
1195 	/*
1196 	 * Do not support XSAVE under a hypervisor for now
1197 	 */
1198 	xsave_force_disable = B_TRUE;
1199 
1200 #endif	/* __xpv */
1201 
1202 	if (xsave_force_disable) {
1203 		mask_ecx &= ~CPUID_INTC_ECX_XSAVE;
1204 		mask_ecx &= ~CPUID_INTC_ECX_AVX;
1205 		mask_ecx &= ~CPUID_INTC_ECX_F16C;
1206 	}
1207 
1208 	/*
1209 	 * Now we've figured out the masks that determine
1210 	 * which bits we choose to believe, apply the masks
1211 	 * to the feature words, then map the kernel's view
1212 	 * of these feature words into its feature word.
1213 	 */
1214 	cp->cp_edx &= mask_edx;
1215 	cp->cp_ecx &= mask_ecx;
1216 
1217 	/*
1218 	 * apply any platform restrictions (we don't call this
1219 	 * immediately after __cpuid_insn here, because we need the
1220 	 * workarounds applied above first)
1221 	 */
1222 	platform_cpuid_mangle(cpi->cpi_vendor, 1, cp);
1223 
1224 	/*
1225 	 * fold in overrides from the "eeprom" mechanism
1226 	 */
1227 	cp->cp_edx |= cpuid_feature_edx_include;
1228 	cp->cp_edx &= ~cpuid_feature_edx_exclude;
1229 
1230 	cp->cp_ecx |= cpuid_feature_ecx_include;
1231 	cp->cp_ecx &= ~cpuid_feature_ecx_exclude;
1232 
1233 	if (cp->cp_edx & CPUID_INTC_EDX_PSE) {
1234 		add_x86_feature(featureset, X86FSET_LARGEPAGE);
1235 	}
1236 	if (cp->cp_edx & CPUID_INTC_EDX_TSC) {
1237 		add_x86_feature(featureset, X86FSET_TSC);
1238 	}
1239 	if (cp->cp_edx & CPUID_INTC_EDX_MSR) {
1240 		add_x86_feature(featureset, X86FSET_MSR);
1241 	}
1242 	if (cp->cp_edx & CPUID_INTC_EDX_MTRR) {
1243 		add_x86_feature(featureset, X86FSET_MTRR);
1244 	}
1245 	if (cp->cp_edx & CPUID_INTC_EDX_PGE) {
1246 		add_x86_feature(featureset, X86FSET_PGE);
1247 	}
1248 	if (cp->cp_edx & CPUID_INTC_EDX_CMOV) {
1249 		add_x86_feature(featureset, X86FSET_CMOV);
1250 	}
1251 	if (cp->cp_edx & CPUID_INTC_EDX_MMX) {
1252 		add_x86_feature(featureset, X86FSET_MMX);
1253 	}
1254 	if ((cp->cp_edx & CPUID_INTC_EDX_MCE) != 0 &&
1255 	    (cp->cp_edx & CPUID_INTC_EDX_MCA) != 0) {
1256 		add_x86_feature(featureset, X86FSET_MCA);
1257 	}
1258 	if (cp->cp_edx & CPUID_INTC_EDX_PAE) {
1259 		add_x86_feature(featureset, X86FSET_PAE);
1260 	}
1261 	if (cp->cp_edx & CPUID_INTC_EDX_CX8) {
1262 		add_x86_feature(featureset, X86FSET_CX8);
1263 	}
1264 	if (cp->cp_ecx & CPUID_INTC_ECX_CX16) {
1265 		add_x86_feature(featureset, X86FSET_CX16);
1266 	}
1267 	if (cp->cp_edx & CPUID_INTC_EDX_PAT) {
1268 		add_x86_feature(featureset, X86FSET_PAT);
1269 	}
1270 	if (cp->cp_edx & CPUID_INTC_EDX_SEP) {
1271 		add_x86_feature(featureset, X86FSET_SEP);
1272 	}
1273 	if (cp->cp_edx & CPUID_INTC_EDX_FXSR) {
1274 		/*
1275 		 * In our implementation, fxsave/fxrstor
1276 		 * are prerequisites before we'll even
1277 		 * try and do SSE things.
1278 		 */
1279 		if (cp->cp_edx & CPUID_INTC_EDX_SSE) {
1280 			add_x86_feature(featureset, X86FSET_SSE);
1281 		}
1282 		if (cp->cp_edx & CPUID_INTC_EDX_SSE2) {
1283 			add_x86_feature(featureset, X86FSET_SSE2);
1284 		}
1285 		if (cp->cp_ecx & CPUID_INTC_ECX_SSE3) {
1286 			add_x86_feature(featureset, X86FSET_SSE3);
1287 		}
1288 		if (cp->cp_ecx & CPUID_INTC_ECX_SSSE3) {
1289 			add_x86_feature(featureset, X86FSET_SSSE3);
1290 		}
1291 		if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_1) {
1292 			add_x86_feature(featureset, X86FSET_SSE4_1);
1293 		}
1294 		if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_2) {
1295 			add_x86_feature(featureset, X86FSET_SSE4_2);
1296 		}
1297 		if (cp->cp_ecx & CPUID_INTC_ECX_AES) {
1298 			add_x86_feature(featureset, X86FSET_AES);
1299 		}
1300 		if (cp->cp_ecx & CPUID_INTC_ECX_PCLMULQDQ) {
1301 			add_x86_feature(featureset, X86FSET_PCLMULQDQ);
1302 		}
1303 
1304 		if (cp->cp_ecx & CPUID_INTC_ECX_XSAVE) {
1305 			add_x86_feature(featureset, X86FSET_XSAVE);
1306 
1307 			/* We only test AVX when there is XSAVE */
1308 			if (cp->cp_ecx & CPUID_INTC_ECX_AVX) {
1309 				add_x86_feature(featureset,
1310 				    X86FSET_AVX);
1311 
1312 				if (cp->cp_ecx & CPUID_INTC_ECX_F16C)
1313 					add_x86_feature(featureset,
1314 					    X86FSET_F16C);
1315 			}
1316 		}
1317 	}
1318 	if (cp->cp_edx & CPUID_INTC_EDX_DE) {
1319 		add_x86_feature(featureset, X86FSET_DE);
1320 	}
1321 #if !defined(__xpv)
1322 	if (cp->cp_ecx & CPUID_INTC_ECX_MON) {
1323 
1324 		/*
1325 		 * We require the CLFLUSH instruction for erratum workaround
1326 		 * to use MONITOR/MWAIT.
1327 		 */
1328 		if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
1329 			cpi->cpi_mwait.support |= MWAIT_SUPPORT;
1330 			add_x86_feature(featureset, X86FSET_MWAIT);
1331 		} else {
1332 			extern int idle_cpu_assert_cflush_monitor;
1333 
1334 			/*
1335 			 * All processors we are aware of which have
1336 			 * MONITOR/MWAIT also have CLFLUSH.
1337 			 */
1338 			if (idle_cpu_assert_cflush_monitor) {
1339 				ASSERT((cp->cp_ecx & CPUID_INTC_ECX_MON) &&
1340 				    (cp->cp_edx & CPUID_INTC_EDX_CLFSH));
1341 			}
1342 		}
1343 	}
1344 #endif	/* __xpv */
1345 
1346 	if (cp->cp_ecx & CPUID_INTC_ECX_VMX) {
1347 		add_x86_feature(featureset, X86FSET_VMX);
1348 	}
1349 
1350 	if (cp->cp_ecx & CPUID_INTC_ECX_RDRAND)
1351 		add_x86_feature(featureset, X86FSET_RDRAND);
1352 
1353 	/*
1354 	 * Only need it first time, rest of the cpus would follow suit.
1355 	 * we only capture this for the bootcpu.
1356 	 */
1357 	if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
1358 		add_x86_feature(featureset, X86FSET_CLFSH);
1359 		x86_clflush_size = (BITX(cp->cp_ebx, 15, 8) * 8);
1360 	}
1361 	if (is_x86_feature(featureset, X86FSET_PAE))
1362 		cpi->cpi_pabits = 36;
1363 
1364 	/*
1365 	 * Hyperthreading configuration is slightly tricky on Intel
1366 	 * and pure clones, and even trickier on AMD.
1367 	 *
1368 	 * (AMD chose to set the HTT bit on their CMP processors,
1369 	 * even though they're not actually hyperthreaded.  Thus it
1370 	 * takes a bit more work to figure out what's really going
1371 	 * on ... see the handling of the CMP_LGCY bit below)
1372 	 */
1373 	if (cp->cp_edx & CPUID_INTC_EDX_HTT) {
1374 		cpi->cpi_ncpu_per_chip = CPI_CPU_COUNT(cpi);
1375 		if (cpi->cpi_ncpu_per_chip > 1)
1376 			add_x86_feature(featureset, X86FSET_HTT);
1377 	} else {
1378 		cpi->cpi_ncpu_per_chip = 1;
1379 	}
1380 
1381 	/*
1382 	 * Work on the "extended" feature information, doing
1383 	 * some basic initialization for cpuid_pass2()
1384 	 */
1385 	xcpuid = 0;
1386 	switch (cpi->cpi_vendor) {
1387 	case X86_VENDOR_Intel:
1388 		if (IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf)
1389 			xcpuid++;
1390 		break;
1391 	case X86_VENDOR_AMD:
1392 		if (cpi->cpi_family > 5 ||
1393 		    (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
1394 			xcpuid++;
1395 		break;
1396 	case X86_VENDOR_Cyrix:
1397 		/*
1398 		 * Only these Cyrix CPUs are -known- to support
1399 		 * extended cpuid operations.
1400 		 */
1401 		if (x86_type == X86_TYPE_VIA_CYRIX_III ||
1402 		    x86_type == X86_TYPE_CYRIX_GXm)
1403 			xcpuid++;
1404 		break;
1405 	case X86_VENDOR_Centaur:
1406 	case X86_VENDOR_TM:
1407 	default:
1408 		xcpuid++;
1409 		break;
1410 	}
1411 
1412 	if (xcpuid) {
1413 		cp = &cpi->cpi_extd[0];
1414 		cp->cp_eax = 0x80000000;
1415 		cpi->cpi_xmaxeax = __cpuid_insn(cp);
1416 	}
1417 
1418 	if (cpi->cpi_xmaxeax & 0x80000000) {
1419 
1420 		if (cpi->cpi_xmaxeax > CPI_XMAXEAX_MAX)
1421 			cpi->cpi_xmaxeax = CPI_XMAXEAX_MAX;
1422 
1423 		switch (cpi->cpi_vendor) {
1424 		case X86_VENDOR_Intel:
1425 		case X86_VENDOR_AMD:
1426 			if (cpi->cpi_xmaxeax < 0x80000001)
1427 				break;
1428 			cp = &cpi->cpi_extd[1];
1429 			cp->cp_eax = 0x80000001;
1430 			(void) __cpuid_insn(cp);
1431 
1432 			if (cpi->cpi_vendor == X86_VENDOR_AMD &&
1433 			    cpi->cpi_family == 5 &&
1434 			    cpi->cpi_model == 6 &&
1435 			    cpi->cpi_step == 6) {
1436 				/*
1437 				 * K6 model 6 uses bit 10 to indicate SYSC
1438 				 * Later models use bit 11. Fix it here.
1439 				 */
1440 				if (cp->cp_edx & 0x400) {
1441 					cp->cp_edx &= ~0x400;
1442 					cp->cp_edx |= CPUID_AMD_EDX_SYSC;
1443 				}
1444 			}
1445 
1446 			platform_cpuid_mangle(cpi->cpi_vendor, 0x80000001, cp);
1447 
1448 			/*
1449 			 * Compute the additions to the kernel's feature word.
1450 			 */
1451 			if (cp->cp_edx & CPUID_AMD_EDX_NX) {
1452 				add_x86_feature(featureset, X86FSET_NX);
1453 			}
1454 
1455 			/*
1456 			 * Regardless whether or not we boot 64-bit,
1457 			 * we should have a way to identify whether
1458 			 * the CPU is capable of running 64-bit.
1459 			 */
1460 			if (cp->cp_edx & CPUID_AMD_EDX_LM) {
1461 				add_x86_feature(featureset, X86FSET_64);
1462 			}
1463 
1464 #if defined(__amd64)
1465 			/* 1 GB large page - enable only for 64 bit kernel */
1466 			if (cp->cp_edx & CPUID_AMD_EDX_1GPG) {
1467 				add_x86_feature(featureset, X86FSET_1GPG);
1468 			}
1469 #endif
1470 
1471 			if ((cpi->cpi_vendor == X86_VENDOR_AMD) &&
1472 			    (cpi->cpi_std[1].cp_edx & CPUID_INTC_EDX_FXSR) &&
1473 			    (cp->cp_ecx & CPUID_AMD_ECX_SSE4A)) {
1474 				add_x86_feature(featureset, X86FSET_SSE4A);
1475 			}
1476 
1477 			/*
1478 			 * If both the HTT and CMP_LGCY bits are set,
1479 			 * then we're not actually HyperThreaded.  Read
1480 			 * "AMD CPUID Specification" for more details.
1481 			 */
1482 			if (cpi->cpi_vendor == X86_VENDOR_AMD &&
1483 			    is_x86_feature(featureset, X86FSET_HTT) &&
1484 			    (cp->cp_ecx & CPUID_AMD_ECX_CMP_LGCY)) {
1485 				remove_x86_feature(featureset, X86FSET_HTT);
1486 				add_x86_feature(featureset, X86FSET_CMP);
1487 			}
1488 #if defined(__amd64)
1489 			/*
1490 			 * It's really tricky to support syscall/sysret in
1491 			 * the i386 kernel; we rely on sysenter/sysexit
1492 			 * instead.  In the amd64 kernel, things are -way-
1493 			 * better.
1494 			 */
1495 			if (cp->cp_edx & CPUID_AMD_EDX_SYSC) {
1496 				add_x86_feature(featureset, X86FSET_ASYSC);
1497 			}
1498 
1499 			/*
1500 			 * While we're thinking about system calls, note
1501 			 * that AMD processors don't support sysenter
1502 			 * in long mode at all, so don't try to program them.
1503 			 */
1504 			if (x86_vendor == X86_VENDOR_AMD) {
1505 				remove_x86_feature(featureset, X86FSET_SEP);
1506 			}
1507 #endif
1508 			if (cp->cp_edx & CPUID_AMD_EDX_TSCP) {
1509 				add_x86_feature(featureset, X86FSET_TSCP);
1510 			}
1511 
1512 			if (cp->cp_ecx & CPUID_AMD_ECX_SVM) {
1513 				add_x86_feature(featureset, X86FSET_SVM);
1514 			}
1515 
1516 			if (cp->cp_ecx & CPUID_AMD_ECX_TOPOEXT) {
1517 				add_x86_feature(featureset, X86FSET_TOPOEXT);
1518 			}
1519 			break;
1520 		default:
1521 			break;
1522 		}
1523 
1524 		/*
1525 		 * Get CPUID data about processor cores and hyperthreads.
1526 		 */
1527 		switch (cpi->cpi_vendor) {
1528 		case X86_VENDOR_Intel:
1529 			if (cpi->cpi_maxeax >= 4) {
1530 				cp = &cpi->cpi_std[4];
1531 				cp->cp_eax = 4;
1532 				cp->cp_ecx = 0;
1533 				(void) __cpuid_insn(cp);
1534 				platform_cpuid_mangle(cpi->cpi_vendor, 4, cp);
1535 			}
1536 			/*FALLTHROUGH*/
1537 		case X86_VENDOR_AMD:
1538 			if (cpi->cpi_xmaxeax < 0x80000008)
1539 				break;
1540 			cp = &cpi->cpi_extd[8];
1541 			cp->cp_eax = 0x80000008;
1542 			(void) __cpuid_insn(cp);
1543 			platform_cpuid_mangle(cpi->cpi_vendor, 0x80000008, cp);
1544 
1545 			/*
1546 			 * Virtual and physical address limits from
1547 			 * cpuid override previously guessed values.
1548 			 */
1549 			cpi->cpi_pabits = BITX(cp->cp_eax, 7, 0);
1550 			cpi->cpi_vabits = BITX(cp->cp_eax, 15, 8);
1551 			break;
1552 		default:
1553 			break;
1554 		}
1555 
1556 		/*
1557 		 * Derive the number of cores per chip
1558 		 */
1559 		switch (cpi->cpi_vendor) {
1560 		case X86_VENDOR_Intel:
1561 			if (cpi->cpi_maxeax < 4) {
1562 				cpi->cpi_ncore_per_chip = 1;
1563 				break;
1564 			} else {
1565 				cpi->cpi_ncore_per_chip =
1566 				    BITX((cpi)->cpi_std[4].cp_eax, 31, 26) + 1;
1567 			}
1568 			break;
1569 		case X86_VENDOR_AMD:
1570 			if (cpi->cpi_xmaxeax < 0x80000008) {
1571 				cpi->cpi_ncore_per_chip = 1;
1572 				break;
1573 			} else {
1574 				/*
1575 				 * On family 0xf cpuid fn 2 ECX[7:0] "NC" is
1576 				 * 1 less than the number of physical cores on
1577 				 * the chip.  In family 0x10 this value can
1578 				 * be affected by "downcoring" - it reflects
1579 				 * 1 less than the number of cores actually
1580 				 * enabled on this node.
1581 				 */
1582 				cpi->cpi_ncore_per_chip =
1583 				    BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1;
1584 			}
1585 			break;
1586 		default:
1587 			cpi->cpi_ncore_per_chip = 1;
1588 			break;
1589 		}
1590 
1591 		/*
1592 		 * Get CPUID data about TSC Invariance in Deep C-State.
1593 		 */
1594 		switch (cpi->cpi_vendor) {
1595 		case X86_VENDOR_Intel:
1596 			if (cpi->cpi_maxeax >= 7) {
1597 				cp = &cpi->cpi_extd[7];
1598 				cp->cp_eax = 0x80000007;
1599 				cp->cp_ecx = 0;
1600 				(void) __cpuid_insn(cp);
1601 			}
1602 			break;
1603 		default:
1604 			break;
1605 		}
1606 	} else {
1607 		cpi->cpi_ncore_per_chip = 1;
1608 	}
1609 
1610 	/*
1611 	 * If more than one core, then this processor is CMP.
1612 	 */
1613 	if (cpi->cpi_ncore_per_chip > 1) {
1614 		add_x86_feature(featureset, X86FSET_CMP);
1615 	}
1616 
1617 	/*
1618 	 * If the number of cores is the same as the number
1619 	 * of CPUs, then we cannot have HyperThreading.
1620 	 */
1621 	if (cpi->cpi_ncpu_per_chip == cpi->cpi_ncore_per_chip) {
1622 		remove_x86_feature(featureset, X86FSET_HTT);
1623 	}
1624 
1625 	cpi->cpi_apicid = CPI_APIC_ID(cpi);
1626 	cpi->cpi_procnodes_per_pkg = 1;
1627 	cpi->cpi_cores_per_compunit = 1;
1628 	if (is_x86_feature(featureset, X86FSET_HTT) == B_FALSE &&
1629 	    is_x86_feature(featureset, X86FSET_CMP) == B_FALSE) {
1630 		/*
1631 		 * Single-core single-threaded processors.
1632 		 */
1633 		cpi->cpi_chipid = -1;
1634 		cpi->cpi_clogid = 0;
1635 		cpi->cpi_coreid = cpu->cpu_id;
1636 		cpi->cpi_pkgcoreid = 0;
1637 		if (cpi->cpi_vendor == X86_VENDOR_AMD)
1638 			cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 3, 0);
1639 		else
1640 			cpi->cpi_procnodeid = cpi->cpi_chipid;
1641 	} else if (cpi->cpi_ncpu_per_chip > 1) {
1642 		if (cpi->cpi_vendor == X86_VENDOR_Intel)
1643 			cpuid_intel_getids(cpu, featureset);
1644 		else if (cpi->cpi_vendor == X86_VENDOR_AMD)
1645 			cpuid_amd_getids(cpu);
1646 		else {
1647 			/*
1648 			 * All other processors are currently
1649 			 * assumed to have single cores.
1650 			 */
1651 			cpi->cpi_coreid = cpi->cpi_chipid;
1652 			cpi->cpi_pkgcoreid = 0;
1653 			cpi->cpi_procnodeid = cpi->cpi_chipid;
1654 			cpi->cpi_compunitid = cpi->cpi_chipid;
1655 		}
1656 	}
1657 
1658 	/*
1659 	 * Synthesize chip "revision" and socket type
1660 	 */
1661 	cpi->cpi_chiprev = _cpuid_chiprev(cpi->cpi_vendor, cpi->cpi_family,
1662 	    cpi->cpi_model, cpi->cpi_step);
1663 	cpi->cpi_chiprevstr = _cpuid_chiprevstr(cpi->cpi_vendor,
1664 	    cpi->cpi_family, cpi->cpi_model, cpi->cpi_step);
1665 	cpi->cpi_socket = _cpuid_skt(cpi->cpi_vendor, cpi->cpi_family,
1666 	    cpi->cpi_model, cpi->cpi_step);
1667 
1668 pass1_done:
1669 	cpi->cpi_pass = 1;
1670 }
1671 
1672 /*
1673  * Make copies of the cpuid table entries we depend on, in
1674  * part for ease of parsing now, in part so that we have only
1675  * one place to correct any of it, in part for ease of
1676  * later export to userland, and in part so we can look at
1677  * this stuff in a crash dump.
1678  */
1679 
1680 /*ARGSUSED*/
1681 void
1682 cpuid_pass2(cpu_t *cpu)
1683 {
1684 	uint_t n, nmax;
1685 	int i;
1686 	struct cpuid_regs *cp;
1687 	uint8_t *dp;
1688 	uint32_t *iptr;
1689 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
1690 
1691 	ASSERT(cpi->cpi_pass == 1);
1692 
1693 	if (cpi->cpi_maxeax < 1)
1694 		goto pass2_done;
1695 
1696 	if ((nmax = cpi->cpi_maxeax + 1) > NMAX_CPI_STD)
1697 		nmax = NMAX_CPI_STD;
1698 	/*
1699 	 * (We already handled n == 0 and n == 1 in pass 1)
1700 	 */
1701 	for (n = 2, cp = &cpi->cpi_std[2]; n < nmax; n++, cp++) {
1702 		cp->cp_eax = n;
1703 
1704 		/*
1705 		 * CPUID function 4 expects %ecx to be initialized
1706 		 * with an index which indicates which cache to return
1707 		 * information about. The OS is expected to call function 4
1708 		 * with %ecx set to 0, 1, 2, ... until it returns with
1709 		 * EAX[4:0] set to 0, which indicates there are no more
1710 		 * caches.
1711 		 *
1712 		 * Here, populate cpi_std[4] with the information returned by
1713 		 * function 4 when %ecx == 0, and do the rest in cpuid_pass3()
1714 		 * when dynamic memory allocation becomes available.
1715 		 *
1716 		 * Note: we need to explicitly initialize %ecx here, since
1717 		 * function 4 may have been previously invoked.
1718 		 */
1719 		if (n == 4)
1720 			cp->cp_ecx = 0;
1721 
1722 		(void) __cpuid_insn(cp);
1723 		platform_cpuid_mangle(cpi->cpi_vendor, n, cp);
1724 		switch (n) {
1725 		case 2:
1726 			/*
1727 			 * "the lower 8 bits of the %eax register
1728 			 * contain a value that identifies the number
1729 			 * of times the cpuid [instruction] has to be
1730 			 * executed to obtain a complete image of the
1731 			 * processor's caching systems."
1732 			 *
1733 			 * How *do* they make this stuff up?
1734 			 */
1735 			cpi->cpi_ncache = sizeof (*cp) *
1736 			    BITX(cp->cp_eax, 7, 0);
1737 			if (cpi->cpi_ncache == 0)
1738 				break;
1739 			cpi->cpi_ncache--;	/* skip count byte */
1740 
1741 			/*
1742 			 * Well, for now, rather than attempt to implement
1743 			 * this slightly dubious algorithm, we just look
1744 			 * at the first 15 ..
1745 			 */
1746 			if (cpi->cpi_ncache > (sizeof (*cp) - 1))
1747 				cpi->cpi_ncache = sizeof (*cp) - 1;
1748 
1749 			dp = cpi->cpi_cacheinfo;
1750 			if (BITX(cp->cp_eax, 31, 31) == 0) {
1751 				uint8_t *p = (void *)&cp->cp_eax;
1752 				for (i = 1; i < 4; i++)
1753 					if (p[i] != 0)
1754 						*dp++ = p[i];
1755 			}
1756 			if (BITX(cp->cp_ebx, 31, 31) == 0) {
1757 				uint8_t *p = (void *)&cp->cp_ebx;
1758 				for (i = 0; i < 4; i++)
1759 					if (p[i] != 0)
1760 						*dp++ = p[i];
1761 			}
1762 			if (BITX(cp->cp_ecx, 31, 31) == 0) {
1763 				uint8_t *p = (void *)&cp->cp_ecx;
1764 				for (i = 0; i < 4; i++)
1765 					if (p[i] != 0)
1766 						*dp++ = p[i];
1767 			}
1768 			if (BITX(cp->cp_edx, 31, 31) == 0) {
1769 				uint8_t *p = (void *)&cp->cp_edx;
1770 				for (i = 0; i < 4; i++)
1771 					if (p[i] != 0)
1772 						*dp++ = p[i];
1773 			}
1774 			break;
1775 
1776 		case 3:	/* Processor serial number, if PSN supported */
1777 			break;
1778 
1779 		case 4:	/* Deterministic cache parameters */
1780 			break;
1781 
1782 		case 5:	/* Monitor/Mwait parameters */
1783 		{
1784 			size_t mwait_size;
1785 
1786 			/*
1787 			 * check cpi_mwait.support which was set in cpuid_pass1
1788 			 */
1789 			if (!(cpi->cpi_mwait.support & MWAIT_SUPPORT))
1790 				break;
1791 
1792 			/*
1793 			 * Protect ourself from insane mwait line size.
1794 			 * Workaround for incomplete hardware emulator(s).
1795 			 */
1796 			mwait_size = (size_t)MWAIT_SIZE_MAX(cpi);
1797 			if (mwait_size < sizeof (uint32_t) ||
1798 			    !ISP2(mwait_size)) {
1799 #if DEBUG
1800 				cmn_err(CE_NOTE, "Cannot handle cpu %d mwait "
1801 				    "size %ld", cpu->cpu_id, (long)mwait_size);
1802 #endif
1803 				break;
1804 			}
1805 
1806 			cpi->cpi_mwait.mon_min = (size_t)MWAIT_SIZE_MIN(cpi);
1807 			cpi->cpi_mwait.mon_max = mwait_size;
1808 			if (MWAIT_EXTENSION(cpi)) {
1809 				cpi->cpi_mwait.support |= MWAIT_EXTENSIONS;
1810 				if (MWAIT_INT_ENABLE(cpi))
1811 					cpi->cpi_mwait.support |=
1812 					    MWAIT_ECX_INT_ENABLE;
1813 			}
1814 			break;
1815 		}
1816 		default:
1817 			break;
1818 		}
1819 	}
1820 
1821 	if (cpi->cpi_maxeax >= 0xB && cpi->cpi_vendor == X86_VENDOR_Intel) {
1822 		struct cpuid_regs regs;
1823 
1824 		cp = &regs;
1825 		cp->cp_eax = 0xB;
1826 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
1827 
1828 		(void) __cpuid_insn(cp);
1829 
1830 		/*
1831 		 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
1832 		 * indicates that the extended topology enumeration leaf is
1833 		 * available.
1834 		 */
1835 		if (cp->cp_ebx) {
1836 			uint32_t x2apic_id;
1837 			uint_t coreid_shift = 0;
1838 			uint_t ncpu_per_core = 1;
1839 			uint_t chipid_shift = 0;
1840 			uint_t ncpu_per_chip = 1;
1841 			uint_t i;
1842 			uint_t level;
1843 
1844 			for (i = 0; i < CPI_FNB_ECX_MAX; i++) {
1845 				cp->cp_eax = 0xB;
1846 				cp->cp_ecx = i;
1847 
1848 				(void) __cpuid_insn(cp);
1849 				level = CPI_CPU_LEVEL_TYPE(cp);
1850 
1851 				if (level == 1) {
1852 					x2apic_id = cp->cp_edx;
1853 					coreid_shift = BITX(cp->cp_eax, 4, 0);
1854 					ncpu_per_core = BITX(cp->cp_ebx, 15, 0);
1855 				} else if (level == 2) {
1856 					x2apic_id = cp->cp_edx;
1857 					chipid_shift = BITX(cp->cp_eax, 4, 0);
1858 					ncpu_per_chip = BITX(cp->cp_ebx, 15, 0);
1859 				}
1860 			}
1861 
1862 			cpi->cpi_apicid = x2apic_id;
1863 			cpi->cpi_ncpu_per_chip = ncpu_per_chip;
1864 			cpi->cpi_ncore_per_chip = ncpu_per_chip /
1865 			    ncpu_per_core;
1866 			cpi->cpi_chipid = x2apic_id >> chipid_shift;
1867 			cpi->cpi_clogid = x2apic_id & ((1 << chipid_shift) - 1);
1868 			cpi->cpi_coreid = x2apic_id >> coreid_shift;
1869 			cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
1870 		}
1871 
1872 		/* Make cp NULL so that we don't stumble on others */
1873 		cp = NULL;
1874 	}
1875 
1876 	/*
1877 	 * XSAVE enumeration
1878 	 */
1879 	if (cpi->cpi_maxeax >= 0xD) {
1880 		struct cpuid_regs regs;
1881 		boolean_t cpuid_d_valid = B_TRUE;
1882 
1883 		cp = &regs;
1884 		cp->cp_eax = 0xD;
1885 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
1886 
1887 		(void) __cpuid_insn(cp);
1888 
1889 		/*
1890 		 * Sanity checks for debug
1891 		 */
1892 		if ((cp->cp_eax & XFEATURE_LEGACY_FP) == 0 ||
1893 		    (cp->cp_eax & XFEATURE_SSE) == 0) {
1894 			cpuid_d_valid = B_FALSE;
1895 		}
1896 
1897 		cpi->cpi_xsave.xsav_hw_features_low = cp->cp_eax;
1898 		cpi->cpi_xsave.xsav_hw_features_high = cp->cp_edx;
1899 		cpi->cpi_xsave.xsav_max_size = cp->cp_ecx;
1900 
1901 		/*
1902 		 * If the hw supports AVX, get the size and offset in the save
1903 		 * area for the ymm state.
1904 		 */
1905 		if (cpi->cpi_xsave.xsav_hw_features_low & XFEATURE_AVX) {
1906 			cp->cp_eax = 0xD;
1907 			cp->cp_ecx = 2;
1908 			cp->cp_edx = cp->cp_ebx = 0;
1909 
1910 			(void) __cpuid_insn(cp);
1911 
1912 			if (cp->cp_ebx != CPUID_LEAFD_2_YMM_OFFSET ||
1913 			    cp->cp_eax != CPUID_LEAFD_2_YMM_SIZE) {
1914 				cpuid_d_valid = B_FALSE;
1915 			}
1916 
1917 			cpi->cpi_xsave.ymm_size = cp->cp_eax;
1918 			cpi->cpi_xsave.ymm_offset = cp->cp_ebx;
1919 		}
1920 
1921 		if (is_x86_feature(x86_featureset, X86FSET_XSAVE)) {
1922 			xsave_state_size = 0;
1923 		} else if (cpuid_d_valid) {
1924 			xsave_state_size = cpi->cpi_xsave.xsav_max_size;
1925 		} else {
1926 			/* Broken CPUID 0xD, probably in HVM */
1927 			cmn_err(CE_WARN, "cpu%d: CPUID.0xD returns invalid "
1928 			    "value: hw_low = %d, hw_high = %d, xsave_size = %d"
1929 			    ", ymm_size = %d, ymm_offset = %d\n",
1930 			    cpu->cpu_id, cpi->cpi_xsave.xsav_hw_features_low,
1931 			    cpi->cpi_xsave.xsav_hw_features_high,
1932 			    (int)cpi->cpi_xsave.xsav_max_size,
1933 			    (int)cpi->cpi_xsave.ymm_size,
1934 			    (int)cpi->cpi_xsave.ymm_offset);
1935 
1936 			if (xsave_state_size != 0) {
1937 				/*
1938 				 * This must be a non-boot CPU. We cannot
1939 				 * continue, because boot cpu has already
1940 				 * enabled XSAVE.
1941 				 */
1942 				ASSERT(cpu->cpu_id != 0);
1943 				cmn_err(CE_PANIC, "cpu%d: we have already "
1944 				    "enabled XSAVE on boot cpu, cannot "
1945 				    "continue.", cpu->cpu_id);
1946 			} else {
1947 				/*
1948 				 * Must be from boot CPU, OK to disable XSAVE.
1949 				 */
1950 				ASSERT(cpu->cpu_id == 0);
1951 				remove_x86_feature(x86_featureset,
1952 				    X86FSET_XSAVE);
1953 				remove_x86_feature(x86_featureset, X86FSET_AVX);
1954 				CPI_FEATURES_ECX(cpi) &= ~CPUID_INTC_ECX_XSAVE;
1955 				CPI_FEATURES_ECX(cpi) &= ~CPUID_INTC_ECX_AVX;
1956 				CPI_FEATURES_ECX(cpi) &= ~CPUID_INTC_ECX_F16C;
1957 				xsave_force_disable = B_TRUE;
1958 			}
1959 		}
1960 	}
1961 
1962 
1963 	if ((cpi->cpi_xmaxeax & 0x80000000) == 0)
1964 		goto pass2_done;
1965 
1966 	if ((nmax = cpi->cpi_xmaxeax - 0x80000000 + 1) > NMAX_CPI_EXTD)
1967 		nmax = NMAX_CPI_EXTD;
1968 	/*
1969 	 * Copy the extended properties, fixing them as we go.
1970 	 * (We already handled n == 0 and n == 1 in pass 1)
1971 	 */
1972 	iptr = (void *)cpi->cpi_brandstr;
1973 	for (n = 2, cp = &cpi->cpi_extd[2]; n < nmax; cp++, n++) {
1974 		cp->cp_eax = 0x80000000 + n;
1975 		(void) __cpuid_insn(cp);
1976 		platform_cpuid_mangle(cpi->cpi_vendor, 0x80000000 + n, cp);
1977 		switch (n) {
1978 		case 2:
1979 		case 3:
1980 		case 4:
1981 			/*
1982 			 * Extract the brand string
1983 			 */
1984 			*iptr++ = cp->cp_eax;
1985 			*iptr++ = cp->cp_ebx;
1986 			*iptr++ = cp->cp_ecx;
1987 			*iptr++ = cp->cp_edx;
1988 			break;
1989 		case 5:
1990 			switch (cpi->cpi_vendor) {
1991 			case X86_VENDOR_AMD:
1992 				/*
1993 				 * The Athlon and Duron were the first
1994 				 * parts to report the sizes of the
1995 				 * TLB for large pages. Before then,
1996 				 * we don't trust the data.
1997 				 */
1998 				if (cpi->cpi_family < 6 ||
1999 				    (cpi->cpi_family == 6 &&
2000 				    cpi->cpi_model < 1))
2001 					cp->cp_eax = 0;
2002 				break;
2003 			default:
2004 				break;
2005 			}
2006 			break;
2007 		case 6:
2008 			switch (cpi->cpi_vendor) {
2009 			case X86_VENDOR_AMD:
2010 				/*
2011 				 * The Athlon and Duron were the first
2012 				 * AMD parts with L2 TLB's.
2013 				 * Before then, don't trust the data.
2014 				 */
2015 				if (cpi->cpi_family < 6 ||
2016 				    cpi->cpi_family == 6 &&
2017 				    cpi->cpi_model < 1)
2018 					cp->cp_eax = cp->cp_ebx = 0;
2019 				/*
2020 				 * AMD Duron rev A0 reports L2
2021 				 * cache size incorrectly as 1K
2022 				 * when it is really 64K
2023 				 */
2024 				if (cpi->cpi_family == 6 &&
2025 				    cpi->cpi_model == 3 &&
2026 				    cpi->cpi_step == 0) {
2027 					cp->cp_ecx &= 0xffff;
2028 					cp->cp_ecx |= 0x400000;
2029 				}
2030 				break;
2031 			case X86_VENDOR_Cyrix:	/* VIA C3 */
2032 				/*
2033 				 * VIA C3 processors are a bit messed
2034 				 * up w.r.t. encoding cache sizes in %ecx
2035 				 */
2036 				if (cpi->cpi_family != 6)
2037 					break;
2038 				/*
2039 				 * model 7 and 8 were incorrectly encoded
2040 				 *
2041 				 * xxx is model 8 really broken?
2042 				 */
2043 				if (cpi->cpi_model == 7 ||
2044 				    cpi->cpi_model == 8)
2045 					cp->cp_ecx =
2046 					    BITX(cp->cp_ecx, 31, 24) << 16 |
2047 					    BITX(cp->cp_ecx, 23, 16) << 12 |
2048 					    BITX(cp->cp_ecx, 15, 8) << 8 |
2049 					    BITX(cp->cp_ecx, 7, 0);
2050 				/*
2051 				 * model 9 stepping 1 has wrong associativity
2052 				 */
2053 				if (cpi->cpi_model == 9 && cpi->cpi_step == 1)
2054 					cp->cp_ecx |= 8 << 12;
2055 				break;
2056 			case X86_VENDOR_Intel:
2057 				/*
2058 				 * Extended L2 Cache features function.
2059 				 * First appeared on Prescott.
2060 				 */
2061 			default:
2062 				break;
2063 			}
2064 			break;
2065 		default:
2066 			break;
2067 		}
2068 	}
2069 
2070 pass2_done:
2071 	cpi->cpi_pass = 2;
2072 }
2073 
2074 static const char *
2075 intel_cpubrand(const struct cpuid_info *cpi)
2076 {
2077 	int i;
2078 
2079 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
2080 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
2081 		return ("i486");
2082 
2083 	switch (cpi->cpi_family) {
2084 	case 5:
2085 		return ("Intel Pentium(r)");
2086 	case 6:
2087 		switch (cpi->cpi_model) {
2088 			uint_t celeron, xeon;
2089 			const struct cpuid_regs *cp;
2090 		case 0:
2091 		case 1:
2092 		case 2:
2093 			return ("Intel Pentium(r) Pro");
2094 		case 3:
2095 		case 4:
2096 			return ("Intel Pentium(r) II");
2097 		case 6:
2098 			return ("Intel Celeron(r)");
2099 		case 5:
2100 		case 7:
2101 			celeron = xeon = 0;
2102 			cp = &cpi->cpi_std[2];	/* cache info */
2103 
2104 			for (i = 1; i < 4; i++) {
2105 				uint_t tmp;
2106 
2107 				tmp = (cp->cp_eax >> (8 * i)) & 0xff;
2108 				if (tmp == 0x40)
2109 					celeron++;
2110 				if (tmp >= 0x44 && tmp <= 0x45)
2111 					xeon++;
2112 			}
2113 
2114 			for (i = 0; i < 2; i++) {
2115 				uint_t tmp;
2116 
2117 				tmp = (cp->cp_ebx >> (8 * i)) & 0xff;
2118 				if (tmp == 0x40)
2119 					celeron++;
2120 				else if (tmp >= 0x44 && tmp <= 0x45)
2121 					xeon++;
2122 			}
2123 
2124 			for (i = 0; i < 4; i++) {
2125 				uint_t tmp;
2126 
2127 				tmp = (cp->cp_ecx >> (8 * i)) & 0xff;
2128 				if (tmp == 0x40)
2129 					celeron++;
2130 				else if (tmp >= 0x44 && tmp <= 0x45)
2131 					xeon++;
2132 			}
2133 
2134 			for (i = 0; i < 4; i++) {
2135 				uint_t tmp;
2136 
2137 				tmp = (cp->cp_edx >> (8 * i)) & 0xff;
2138 				if (tmp == 0x40)
2139 					celeron++;
2140 				else if (tmp >= 0x44 && tmp <= 0x45)
2141 					xeon++;
2142 			}
2143 
2144 			if (celeron)
2145 				return ("Intel Celeron(r)");
2146 			if (xeon)
2147 				return (cpi->cpi_model == 5 ?
2148 				    "Intel Pentium(r) II Xeon(tm)" :
2149 				    "Intel Pentium(r) III Xeon(tm)");
2150 			return (cpi->cpi_model == 5 ?
2151 			    "Intel Pentium(r) II or Pentium(r) II Xeon(tm)" :
2152 			    "Intel Pentium(r) III or Pentium(r) III Xeon(tm)");
2153 		default:
2154 			break;
2155 		}
2156 	default:
2157 		break;
2158 	}
2159 
2160 	/* BrandID is present if the field is nonzero */
2161 	if (cpi->cpi_brandid != 0) {
2162 		static const struct {
2163 			uint_t bt_bid;
2164 			const char *bt_str;
2165 		} brand_tbl[] = {
2166 			{ 0x1,	"Intel(r) Celeron(r)" },
2167 			{ 0x2,	"Intel(r) Pentium(r) III" },
2168 			{ 0x3,	"Intel(r) Pentium(r) III Xeon(tm)" },
2169 			{ 0x4,	"Intel(r) Pentium(r) III" },
2170 			{ 0x6,	"Mobile Intel(r) Pentium(r) III" },
2171 			{ 0x7,	"Mobile Intel(r) Celeron(r)" },
2172 			{ 0x8,	"Intel(r) Pentium(r) 4" },
2173 			{ 0x9,	"Intel(r) Pentium(r) 4" },
2174 			{ 0xa,	"Intel(r) Celeron(r)" },
2175 			{ 0xb,	"Intel(r) Xeon(tm)" },
2176 			{ 0xc,	"Intel(r) Xeon(tm) MP" },
2177 			{ 0xe,	"Mobile Intel(r) Pentium(r) 4" },
2178 			{ 0xf,	"Mobile Intel(r) Celeron(r)" },
2179 			{ 0x11, "Mobile Genuine Intel(r)" },
2180 			{ 0x12, "Intel(r) Celeron(r) M" },
2181 			{ 0x13, "Mobile Intel(r) Celeron(r)" },
2182 			{ 0x14, "Intel(r) Celeron(r)" },
2183 			{ 0x15, "Mobile Genuine Intel(r)" },
2184 			{ 0x16,	"Intel(r) Pentium(r) M" },
2185 			{ 0x17, "Mobile Intel(r) Celeron(r)" }
2186 		};
2187 		uint_t btblmax = sizeof (brand_tbl) / sizeof (brand_tbl[0]);
2188 		uint_t sgn;
2189 
2190 		sgn = (cpi->cpi_family << 8) |
2191 		    (cpi->cpi_model << 4) | cpi->cpi_step;
2192 
2193 		for (i = 0; i < btblmax; i++)
2194 			if (brand_tbl[i].bt_bid == cpi->cpi_brandid)
2195 				break;
2196 		if (i < btblmax) {
2197 			if (sgn == 0x6b1 && cpi->cpi_brandid == 3)
2198 				return ("Intel(r) Celeron(r)");
2199 			if (sgn < 0xf13 && cpi->cpi_brandid == 0xb)
2200 				return ("Intel(r) Xeon(tm) MP");
2201 			if (sgn < 0xf13 && cpi->cpi_brandid == 0xe)
2202 				return ("Intel(r) Xeon(tm)");
2203 			return (brand_tbl[i].bt_str);
2204 		}
2205 	}
2206 
2207 	return (NULL);
2208 }
2209 
2210 static const char *
2211 amd_cpubrand(const struct cpuid_info *cpi)
2212 {
2213 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
2214 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
2215 		return ("i486 compatible");
2216 
2217 	switch (cpi->cpi_family) {
2218 	case 5:
2219 		switch (cpi->cpi_model) {
2220 		case 0:
2221 		case 1:
2222 		case 2:
2223 		case 3:
2224 		case 4:
2225 		case 5:
2226 			return ("AMD-K5(r)");
2227 		case 6:
2228 		case 7:
2229 			return ("AMD-K6(r)");
2230 		case 8:
2231 			return ("AMD-K6(r)-2");
2232 		case 9:
2233 			return ("AMD-K6(r)-III");
2234 		default:
2235 			return ("AMD (family 5)");
2236 		}
2237 	case 6:
2238 		switch (cpi->cpi_model) {
2239 		case 1:
2240 			return ("AMD-K7(tm)");
2241 		case 0:
2242 		case 2:
2243 		case 4:
2244 			return ("AMD Athlon(tm)");
2245 		case 3:
2246 		case 7:
2247 			return ("AMD Duron(tm)");
2248 		case 6:
2249 		case 8:
2250 		case 10:
2251 			/*
2252 			 * Use the L2 cache size to distinguish
2253 			 */
2254 			return ((cpi->cpi_extd[6].cp_ecx >> 16) >= 256 ?
2255 			    "AMD Athlon(tm)" : "AMD Duron(tm)");
2256 		default:
2257 			return ("AMD (family 6)");
2258 		}
2259 	default:
2260 		break;
2261 	}
2262 
2263 	if (cpi->cpi_family == 0xf && cpi->cpi_model == 5 &&
2264 	    cpi->cpi_brandid != 0) {
2265 		switch (BITX(cpi->cpi_brandid, 7, 5)) {
2266 		case 3:
2267 			return ("AMD Opteron(tm) UP 1xx");
2268 		case 4:
2269 			return ("AMD Opteron(tm) DP 2xx");
2270 		case 5:
2271 			return ("AMD Opteron(tm) MP 8xx");
2272 		default:
2273 			return ("AMD Opteron(tm)");
2274 		}
2275 	}
2276 
2277 	return (NULL);
2278 }
2279 
2280 static const char *
2281 cyrix_cpubrand(struct cpuid_info *cpi, uint_t type)
2282 {
2283 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
2284 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5 ||
2285 	    type == X86_TYPE_CYRIX_486)
2286 		return ("i486 compatible");
2287 
2288 	switch (type) {
2289 	case X86_TYPE_CYRIX_6x86:
2290 		return ("Cyrix 6x86");
2291 	case X86_TYPE_CYRIX_6x86L:
2292 		return ("Cyrix 6x86L");
2293 	case X86_TYPE_CYRIX_6x86MX:
2294 		return ("Cyrix 6x86MX");
2295 	case X86_TYPE_CYRIX_GXm:
2296 		return ("Cyrix GXm");
2297 	case X86_TYPE_CYRIX_MediaGX:
2298 		return ("Cyrix MediaGX");
2299 	case X86_TYPE_CYRIX_MII:
2300 		return ("Cyrix M2");
2301 	case X86_TYPE_VIA_CYRIX_III:
2302 		return ("VIA Cyrix M3");
2303 	default:
2304 		/*
2305 		 * Have another wild guess ..
2306 		 */
2307 		if (cpi->cpi_family == 4 && cpi->cpi_model == 9)
2308 			return ("Cyrix 5x86");
2309 		else if (cpi->cpi_family == 5) {
2310 			switch (cpi->cpi_model) {
2311 			case 2:
2312 				return ("Cyrix 6x86");	/* Cyrix M1 */
2313 			case 4:
2314 				return ("Cyrix MediaGX");
2315 			default:
2316 				break;
2317 			}
2318 		} else if (cpi->cpi_family == 6) {
2319 			switch (cpi->cpi_model) {
2320 			case 0:
2321 				return ("Cyrix 6x86MX"); /* Cyrix M2? */
2322 			case 5:
2323 			case 6:
2324 			case 7:
2325 			case 8:
2326 			case 9:
2327 				return ("VIA C3");
2328 			default:
2329 				break;
2330 			}
2331 		}
2332 		break;
2333 	}
2334 	return (NULL);
2335 }
2336 
2337 /*
2338  * This only gets called in the case that the CPU extended
2339  * feature brand string (0x80000002, 0x80000003, 0x80000004)
2340  * aren't available, or contain null bytes for some reason.
2341  */
2342 static void
2343 fabricate_brandstr(struct cpuid_info *cpi)
2344 {
2345 	const char *brand = NULL;
2346 
2347 	switch (cpi->cpi_vendor) {
2348 	case X86_VENDOR_Intel:
2349 		brand = intel_cpubrand(cpi);
2350 		break;
2351 	case X86_VENDOR_AMD:
2352 		brand = amd_cpubrand(cpi);
2353 		break;
2354 	case X86_VENDOR_Cyrix:
2355 		brand = cyrix_cpubrand(cpi, x86_type);
2356 		break;
2357 	case X86_VENDOR_NexGen:
2358 		if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
2359 			brand = "NexGen Nx586";
2360 		break;
2361 	case X86_VENDOR_Centaur:
2362 		if (cpi->cpi_family == 5)
2363 			switch (cpi->cpi_model) {
2364 			case 4:
2365 				brand = "Centaur C6";
2366 				break;
2367 			case 8:
2368 				brand = "Centaur C2";
2369 				break;
2370 			case 9:
2371 				brand = "Centaur C3";
2372 				break;
2373 			default:
2374 				break;
2375 			}
2376 		break;
2377 	case X86_VENDOR_Rise:
2378 		if (cpi->cpi_family == 5 &&
2379 		    (cpi->cpi_model == 0 || cpi->cpi_model == 2))
2380 			brand = "Rise mP6";
2381 		break;
2382 	case X86_VENDOR_SiS:
2383 		if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
2384 			brand = "SiS 55x";
2385 		break;
2386 	case X86_VENDOR_TM:
2387 		if (cpi->cpi_family == 5 && cpi->cpi_model == 4)
2388 			brand = "Transmeta Crusoe TM3x00 or TM5x00";
2389 		break;
2390 	case X86_VENDOR_NSC:
2391 	case X86_VENDOR_UMC:
2392 	default:
2393 		break;
2394 	}
2395 	if (brand) {
2396 		(void) strcpy((char *)cpi->cpi_brandstr, brand);
2397 		return;
2398 	}
2399 
2400 	/*
2401 	 * If all else fails ...
2402 	 */
2403 	(void) snprintf(cpi->cpi_brandstr, sizeof (cpi->cpi_brandstr),
2404 	    "%s %d.%d.%d", cpi->cpi_vendorstr, cpi->cpi_family,
2405 	    cpi->cpi_model, cpi->cpi_step);
2406 }
2407 
2408 /*
2409  * This routine is called just after kernel memory allocation
2410  * becomes available on cpu0, and as part of mp_startup() on
2411  * the other cpus.
2412  *
2413  * Fixup the brand string, and collect any information from cpuid
2414  * that requires dynamically allocated storage to represent.
2415  */
2416 /*ARGSUSED*/
2417 void
2418 cpuid_pass3(cpu_t *cpu)
2419 {
2420 	int	i, max, shft, level, size;
2421 	struct cpuid_regs regs;
2422 	struct cpuid_regs *cp;
2423 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2424 
2425 	ASSERT(cpi->cpi_pass == 2);
2426 
2427 	/*
2428 	 * Function 4: Deterministic cache parameters
2429 	 *
2430 	 * Take this opportunity to detect the number of threads
2431 	 * sharing the last level cache, and construct a corresponding
2432 	 * cache id. The respective cpuid_info members are initialized
2433 	 * to the default case of "no last level cache sharing".
2434 	 */
2435 	cpi->cpi_ncpu_shr_last_cache = 1;
2436 	cpi->cpi_last_lvl_cacheid = cpu->cpu_id;
2437 
2438 	if (cpi->cpi_maxeax >= 4 && cpi->cpi_vendor == X86_VENDOR_Intel) {
2439 
2440 		/*
2441 		 * Find the # of elements (size) returned by fn 4, and along
2442 		 * the way detect last level cache sharing details.
2443 		 */
2444 		bzero(&regs, sizeof (regs));
2445 		cp = &regs;
2446 		for (i = 0, max = 0; i < CPI_FN4_ECX_MAX; i++) {
2447 			cp->cp_eax = 4;
2448 			cp->cp_ecx = i;
2449 
2450 			(void) __cpuid_insn(cp);
2451 
2452 			if (CPI_CACHE_TYPE(cp) == 0)
2453 				break;
2454 			level = CPI_CACHE_LVL(cp);
2455 			if (level > max) {
2456 				max = level;
2457 				cpi->cpi_ncpu_shr_last_cache =
2458 				    CPI_NTHR_SHR_CACHE(cp) + 1;
2459 			}
2460 		}
2461 		cpi->cpi_std_4_size = size = i;
2462 
2463 		/*
2464 		 * Allocate the cpi_std_4 array. The first element
2465 		 * references the regs for fn 4, %ecx == 0, which
2466 		 * cpuid_pass2() stashed in cpi->cpi_std[4].
2467 		 */
2468 		if (size > 0) {
2469 			cpi->cpi_std_4 =
2470 			    kmem_alloc(size * sizeof (cp), KM_SLEEP);
2471 			cpi->cpi_std_4[0] = &cpi->cpi_std[4];
2472 
2473 			/*
2474 			 * Allocate storage to hold the additional regs
2475 			 * for function 4, %ecx == 1 .. cpi_std_4_size.
2476 			 *
2477 			 * The regs for fn 4, %ecx == 0 has already
2478 			 * been allocated as indicated above.
2479 			 */
2480 			for (i = 1; i < size; i++) {
2481 				cp = cpi->cpi_std_4[i] =
2482 				    kmem_zalloc(sizeof (regs), KM_SLEEP);
2483 				cp->cp_eax = 4;
2484 				cp->cp_ecx = i;
2485 
2486 				(void) __cpuid_insn(cp);
2487 			}
2488 		}
2489 		/*
2490 		 * Determine the number of bits needed to represent
2491 		 * the number of CPUs sharing the last level cache.
2492 		 *
2493 		 * Shift off that number of bits from the APIC id to
2494 		 * derive the cache id.
2495 		 */
2496 		shft = 0;
2497 		for (i = 1; i < cpi->cpi_ncpu_shr_last_cache; i <<= 1)
2498 			shft++;
2499 		cpi->cpi_last_lvl_cacheid = cpi->cpi_apicid >> shft;
2500 	}
2501 
2502 	/*
2503 	 * Now fixup the brand string
2504 	 */
2505 	if ((cpi->cpi_xmaxeax & 0x80000000) == 0) {
2506 		fabricate_brandstr(cpi);
2507 	} else {
2508 
2509 		/*
2510 		 * If we successfully extracted a brand string from the cpuid
2511 		 * instruction, clean it up by removing leading spaces and
2512 		 * similar junk.
2513 		 */
2514 		if (cpi->cpi_brandstr[0]) {
2515 			size_t maxlen = sizeof (cpi->cpi_brandstr);
2516 			char *src, *dst;
2517 
2518 			dst = src = (char *)cpi->cpi_brandstr;
2519 			src[maxlen - 1] = '\0';
2520 			/*
2521 			 * strip leading spaces
2522 			 */
2523 			while (*src == ' ')
2524 				src++;
2525 			/*
2526 			 * Remove any 'Genuine' or "Authentic" prefixes
2527 			 */
2528 			if (strncmp(src, "Genuine ", 8) == 0)
2529 				src += 8;
2530 			if (strncmp(src, "Authentic ", 10) == 0)
2531 				src += 10;
2532 
2533 			/*
2534 			 * Now do an in-place copy.
2535 			 * Map (R) to (r) and (TM) to (tm).
2536 			 * The era of teletypes is long gone, and there's
2537 			 * -really- no need to shout.
2538 			 */
2539 			while (*src != '\0') {
2540 				if (src[0] == '(') {
2541 					if (strncmp(src + 1, "R)", 2) == 0) {
2542 						(void) strncpy(dst, "(r)", 3);
2543 						src += 3;
2544 						dst += 3;
2545 						continue;
2546 					}
2547 					if (strncmp(src + 1, "TM)", 3) == 0) {
2548 						(void) strncpy(dst, "(tm)", 4);
2549 						src += 4;
2550 						dst += 4;
2551 						continue;
2552 					}
2553 				}
2554 				*dst++ = *src++;
2555 			}
2556 			*dst = '\0';
2557 
2558 			/*
2559 			 * Finally, remove any trailing spaces
2560 			 */
2561 			while (--dst > cpi->cpi_brandstr)
2562 				if (*dst == ' ')
2563 					*dst = '\0';
2564 				else
2565 					break;
2566 		} else
2567 			fabricate_brandstr(cpi);
2568 	}
2569 	cpi->cpi_pass = 3;
2570 }
2571 
2572 /*
2573  * This routine is called out of bind_hwcap() much later in the life
2574  * of the kernel (post_startup()).  The job of this routine is to resolve
2575  * the hardware feature support and kernel support for those features into
2576  * what we're actually going to tell applications via the aux vector.
2577  */
2578 void
2579 cpuid_pass4(cpu_t *cpu, uint_t *hwcap_out)
2580 {
2581 	struct cpuid_info *cpi;
2582 	uint_t hwcap_flags = 0, hwcap_flags_2 = 0;
2583 
2584 	if (cpu == NULL)
2585 		cpu = CPU;
2586 	cpi = cpu->cpu_m.mcpu_cpi;
2587 
2588 	ASSERT(cpi->cpi_pass == 3);
2589 
2590 	if (cpi->cpi_maxeax >= 1) {
2591 		uint32_t *edx = &cpi->cpi_support[STD_EDX_FEATURES];
2592 		uint32_t *ecx = &cpi->cpi_support[STD_ECX_FEATURES];
2593 
2594 		*edx = CPI_FEATURES_EDX(cpi);
2595 		*ecx = CPI_FEATURES_ECX(cpi);
2596 
2597 		/*
2598 		 * [these require explicit kernel support]
2599 		 */
2600 		if (!is_x86_feature(x86_featureset, X86FSET_SEP))
2601 			*edx &= ~CPUID_INTC_EDX_SEP;
2602 
2603 		if (!is_x86_feature(x86_featureset, X86FSET_SSE))
2604 			*edx &= ~(CPUID_INTC_EDX_FXSR|CPUID_INTC_EDX_SSE);
2605 		if (!is_x86_feature(x86_featureset, X86FSET_SSE2))
2606 			*edx &= ~CPUID_INTC_EDX_SSE2;
2607 
2608 		if (!is_x86_feature(x86_featureset, X86FSET_HTT))
2609 			*edx &= ~CPUID_INTC_EDX_HTT;
2610 
2611 		if (!is_x86_feature(x86_featureset, X86FSET_SSE3))
2612 			*ecx &= ~CPUID_INTC_ECX_SSE3;
2613 
2614 		if (!is_x86_feature(x86_featureset, X86FSET_SSSE3))
2615 			*ecx &= ~CPUID_INTC_ECX_SSSE3;
2616 		if (!is_x86_feature(x86_featureset, X86FSET_SSE4_1))
2617 			*ecx &= ~CPUID_INTC_ECX_SSE4_1;
2618 		if (!is_x86_feature(x86_featureset, X86FSET_SSE4_2))
2619 			*ecx &= ~CPUID_INTC_ECX_SSE4_2;
2620 		if (!is_x86_feature(x86_featureset, X86FSET_AES))
2621 			*ecx &= ~CPUID_INTC_ECX_AES;
2622 		if (!is_x86_feature(x86_featureset, X86FSET_PCLMULQDQ))
2623 			*ecx &= ~CPUID_INTC_ECX_PCLMULQDQ;
2624 		if (!is_x86_feature(x86_featureset, X86FSET_XSAVE))
2625 			*ecx &= ~(CPUID_INTC_ECX_XSAVE |
2626 			    CPUID_INTC_ECX_OSXSAVE);
2627 		if (!is_x86_feature(x86_featureset, X86FSET_AVX))
2628 			*ecx &= ~CPUID_INTC_ECX_AVX;
2629 		if (!is_x86_feature(x86_featureset, X86FSET_F16C))
2630 			*ecx &= ~CPUID_INTC_ECX_F16C;
2631 
2632 		/*
2633 		 * [no explicit support required beyond x87 fp context]
2634 		 */
2635 		if (!fpu_exists)
2636 			*edx &= ~(CPUID_INTC_EDX_FPU | CPUID_INTC_EDX_MMX);
2637 
2638 		/*
2639 		 * Now map the supported feature vector to things that we
2640 		 * think userland will care about.
2641 		 */
2642 		if (*edx & CPUID_INTC_EDX_SEP)
2643 			hwcap_flags |= AV_386_SEP;
2644 		if (*edx & CPUID_INTC_EDX_SSE)
2645 			hwcap_flags |= AV_386_FXSR | AV_386_SSE;
2646 		if (*edx & CPUID_INTC_EDX_SSE2)
2647 			hwcap_flags |= AV_386_SSE2;
2648 		if (*ecx & CPUID_INTC_ECX_SSE3)
2649 			hwcap_flags |= AV_386_SSE3;
2650 		if (*ecx & CPUID_INTC_ECX_SSSE3)
2651 			hwcap_flags |= AV_386_SSSE3;
2652 		if (*ecx & CPUID_INTC_ECX_SSE4_1)
2653 			hwcap_flags |= AV_386_SSE4_1;
2654 		if (*ecx & CPUID_INTC_ECX_SSE4_2)
2655 			hwcap_flags |= AV_386_SSE4_2;
2656 		if (*ecx & CPUID_INTC_ECX_MOVBE)
2657 			hwcap_flags |= AV_386_MOVBE;
2658 		if (*ecx & CPUID_INTC_ECX_AES)
2659 			hwcap_flags |= AV_386_AES;
2660 		if (*ecx & CPUID_INTC_ECX_PCLMULQDQ)
2661 			hwcap_flags |= AV_386_PCLMULQDQ;
2662 		if ((*ecx & CPUID_INTC_ECX_XSAVE) &&
2663 		    (*ecx & CPUID_INTC_ECX_OSXSAVE)) {
2664 			hwcap_flags |= AV_386_XSAVE;
2665 
2666 			if (*ecx & CPUID_INTC_ECX_AVX) {
2667 				hwcap_flags |= AV_386_AVX;
2668 				if (*ecx & CPUID_INTC_ECX_F16C)
2669 					hwcap_flags_2 |= AV_386_2_F16C;
2670 			}
2671 		}
2672 		if (*ecx & CPUID_INTC_ECX_VMX)
2673 			hwcap_flags |= AV_386_VMX;
2674 		if (*ecx & CPUID_INTC_ECX_POPCNT)
2675 			hwcap_flags |= AV_386_POPCNT;
2676 		if (*edx & CPUID_INTC_EDX_FPU)
2677 			hwcap_flags |= AV_386_FPU;
2678 		if (*edx & CPUID_INTC_EDX_MMX)
2679 			hwcap_flags |= AV_386_MMX;
2680 
2681 		if (*edx & CPUID_INTC_EDX_TSC)
2682 			hwcap_flags |= AV_386_TSC;
2683 		if (*edx & CPUID_INTC_EDX_CX8)
2684 			hwcap_flags |= AV_386_CX8;
2685 		if (*edx & CPUID_INTC_EDX_CMOV)
2686 			hwcap_flags |= AV_386_CMOV;
2687 		if (*ecx & CPUID_INTC_ECX_CX16)
2688 			hwcap_flags |= AV_386_CX16;
2689 
2690 		if (*ecx & CPUID_INTC_ECX_RDRAND)
2691 			hwcap_flags_2 |= AV_386_2_RDRAND;
2692 	}
2693 
2694 	if (cpi->cpi_xmaxeax < 0x80000001)
2695 		goto pass4_done;
2696 
2697 	switch (cpi->cpi_vendor) {
2698 		struct cpuid_regs cp;
2699 		uint32_t *edx, *ecx;
2700 
2701 	case X86_VENDOR_Intel:
2702 		/*
2703 		 * Seems like Intel duplicated what we necessary
2704 		 * here to make the initial crop of 64-bit OS's work.
2705 		 * Hopefully, those are the only "extended" bits
2706 		 * they'll add.
2707 		 */
2708 		/*FALLTHROUGH*/
2709 
2710 	case X86_VENDOR_AMD:
2711 		edx = &cpi->cpi_support[AMD_EDX_FEATURES];
2712 		ecx = &cpi->cpi_support[AMD_ECX_FEATURES];
2713 
2714 		*edx = CPI_FEATURES_XTD_EDX(cpi);
2715 		*ecx = CPI_FEATURES_XTD_ECX(cpi);
2716 
2717 		/*
2718 		 * [these features require explicit kernel support]
2719 		 */
2720 		switch (cpi->cpi_vendor) {
2721 		case X86_VENDOR_Intel:
2722 			if (!is_x86_feature(x86_featureset, X86FSET_TSCP))
2723 				*edx &= ~CPUID_AMD_EDX_TSCP;
2724 			break;
2725 
2726 		case X86_VENDOR_AMD:
2727 			if (!is_x86_feature(x86_featureset, X86FSET_TSCP))
2728 				*edx &= ~CPUID_AMD_EDX_TSCP;
2729 			if (!is_x86_feature(x86_featureset, X86FSET_SSE4A))
2730 				*ecx &= ~CPUID_AMD_ECX_SSE4A;
2731 			break;
2732 
2733 		default:
2734 			break;
2735 		}
2736 
2737 		/*
2738 		 * [no explicit support required beyond
2739 		 * x87 fp context and exception handlers]
2740 		 */
2741 		if (!fpu_exists)
2742 			*edx &= ~(CPUID_AMD_EDX_MMXamd |
2743 			    CPUID_AMD_EDX_3DNow | CPUID_AMD_EDX_3DNowx);
2744 
2745 		if (!is_x86_feature(x86_featureset, X86FSET_NX))
2746 			*edx &= ~CPUID_AMD_EDX_NX;
2747 #if !defined(__amd64)
2748 		*edx &= ~CPUID_AMD_EDX_LM;
2749 #endif
2750 		/*
2751 		 * Now map the supported feature vector to
2752 		 * things that we think userland will care about.
2753 		 */
2754 #if defined(__amd64)
2755 		if (*edx & CPUID_AMD_EDX_SYSC)
2756 			hwcap_flags |= AV_386_AMD_SYSC;
2757 #endif
2758 		if (*edx & CPUID_AMD_EDX_MMXamd)
2759 			hwcap_flags |= AV_386_AMD_MMX;
2760 		if (*edx & CPUID_AMD_EDX_3DNow)
2761 			hwcap_flags |= AV_386_AMD_3DNow;
2762 		if (*edx & CPUID_AMD_EDX_3DNowx)
2763 			hwcap_flags |= AV_386_AMD_3DNowx;
2764 		if (*ecx & CPUID_AMD_ECX_SVM)
2765 			hwcap_flags |= AV_386_AMD_SVM;
2766 
2767 		switch (cpi->cpi_vendor) {
2768 		case X86_VENDOR_AMD:
2769 			if (*edx & CPUID_AMD_EDX_TSCP)
2770 				hwcap_flags |= AV_386_TSCP;
2771 			if (*ecx & CPUID_AMD_ECX_AHF64)
2772 				hwcap_flags |= AV_386_AHF;
2773 			if (*ecx & CPUID_AMD_ECX_SSE4A)
2774 				hwcap_flags |= AV_386_AMD_SSE4A;
2775 			if (*ecx & CPUID_AMD_ECX_LZCNT)
2776 				hwcap_flags |= AV_386_AMD_LZCNT;
2777 			break;
2778 
2779 		case X86_VENDOR_Intel:
2780 			if (*edx & CPUID_AMD_EDX_TSCP)
2781 				hwcap_flags |= AV_386_TSCP;
2782 			/*
2783 			 * Aarrgh.
2784 			 * Intel uses a different bit in the same word.
2785 			 */
2786 			if (*ecx & CPUID_INTC_ECX_AHF64)
2787 				hwcap_flags |= AV_386_AHF;
2788 			break;
2789 
2790 		default:
2791 			break;
2792 		}
2793 		break;
2794 
2795 	case X86_VENDOR_TM:
2796 		cp.cp_eax = 0x80860001;
2797 		(void) __cpuid_insn(&cp);
2798 		cpi->cpi_support[TM_EDX_FEATURES] = cp.cp_edx;
2799 		break;
2800 
2801 	default:
2802 		break;
2803 	}
2804 
2805 pass4_done:
2806 	cpi->cpi_pass = 4;
2807 	if (hwcap_out != NULL) {
2808 		hwcap_out[0] = hwcap_flags;
2809 		hwcap_out[1] = hwcap_flags_2;
2810 	}
2811 }
2812 
2813 
2814 /*
2815  * Simulate the cpuid instruction using the data we previously
2816  * captured about this CPU.  We try our best to return the truth
2817  * about the hardware, independently of kernel support.
2818  */
2819 uint32_t
2820 cpuid_insn(cpu_t *cpu, struct cpuid_regs *cp)
2821 {
2822 	struct cpuid_info *cpi;
2823 	struct cpuid_regs *xcp;
2824 
2825 	if (cpu == NULL)
2826 		cpu = CPU;
2827 	cpi = cpu->cpu_m.mcpu_cpi;
2828 
2829 	ASSERT(cpuid_checkpass(cpu, 3));
2830 
2831 	/*
2832 	 * CPUID data is cached in two separate places: cpi_std for standard
2833 	 * CPUID functions, and cpi_extd for extended CPUID functions.
2834 	 */
2835 	if (cp->cp_eax <= cpi->cpi_maxeax && cp->cp_eax < NMAX_CPI_STD)
2836 		xcp = &cpi->cpi_std[cp->cp_eax];
2837 	else if (cp->cp_eax >= 0x80000000 && cp->cp_eax <= cpi->cpi_xmaxeax &&
2838 	    cp->cp_eax < 0x80000000 + NMAX_CPI_EXTD)
2839 		xcp = &cpi->cpi_extd[cp->cp_eax - 0x80000000];
2840 	else
2841 		/*
2842 		 * The caller is asking for data from an input parameter which
2843 		 * the kernel has not cached.  In this case we go fetch from
2844 		 * the hardware and return the data directly to the user.
2845 		 */
2846 		return (__cpuid_insn(cp));
2847 
2848 	cp->cp_eax = xcp->cp_eax;
2849 	cp->cp_ebx = xcp->cp_ebx;
2850 	cp->cp_ecx = xcp->cp_ecx;
2851 	cp->cp_edx = xcp->cp_edx;
2852 	return (cp->cp_eax);
2853 }
2854 
2855 int
2856 cpuid_checkpass(cpu_t *cpu, int pass)
2857 {
2858 	return (cpu != NULL && cpu->cpu_m.mcpu_cpi != NULL &&
2859 	    cpu->cpu_m.mcpu_cpi->cpi_pass >= pass);
2860 }
2861 
2862 int
2863 cpuid_getbrandstr(cpu_t *cpu, char *s, size_t n)
2864 {
2865 	ASSERT(cpuid_checkpass(cpu, 3));
2866 
2867 	return (snprintf(s, n, "%s", cpu->cpu_m.mcpu_cpi->cpi_brandstr));
2868 }
2869 
2870 int
2871 cpuid_is_cmt(cpu_t *cpu)
2872 {
2873 	if (cpu == NULL)
2874 		cpu = CPU;
2875 
2876 	ASSERT(cpuid_checkpass(cpu, 1));
2877 
2878 	return (cpu->cpu_m.mcpu_cpi->cpi_chipid >= 0);
2879 }
2880 
2881 /*
2882  * AMD and Intel both implement the 64-bit variant of the syscall
2883  * instruction (syscallq), so if there's -any- support for syscall,
2884  * cpuid currently says "yes, we support this".
2885  *
2886  * However, Intel decided to -not- implement the 32-bit variant of the
2887  * syscall instruction, so we provide a predicate to allow our caller
2888  * to test that subtlety here.
2889  *
2890  * XXPV	Currently, 32-bit syscall instructions don't work via the hypervisor,
2891  *	even in the case where the hardware would in fact support it.
2892  */
2893 /*ARGSUSED*/
2894 int
2895 cpuid_syscall32_insn(cpu_t *cpu)
2896 {
2897 	ASSERT(cpuid_checkpass((cpu == NULL ? CPU : cpu), 1));
2898 
2899 #if !defined(__xpv)
2900 	if (cpu == NULL)
2901 		cpu = CPU;
2902 
2903 	/*CSTYLED*/
2904 	{
2905 		struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2906 
2907 		if (cpi->cpi_vendor == X86_VENDOR_AMD &&
2908 		    cpi->cpi_xmaxeax >= 0x80000001 &&
2909 		    (CPI_FEATURES_XTD_EDX(cpi) & CPUID_AMD_EDX_SYSC))
2910 			return (1);
2911 	}
2912 #endif
2913 	return (0);
2914 }
2915 
2916 int
2917 cpuid_getidstr(cpu_t *cpu, char *s, size_t n)
2918 {
2919 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2920 
2921 	static const char fmt[] =
2922 	    "x86 (%s %X family %d model %d step %d clock %d MHz)";
2923 	static const char fmt_ht[] =
2924 	    "x86 (chipid 0x%x %s %X family %d model %d step %d clock %d MHz)";
2925 
2926 	ASSERT(cpuid_checkpass(cpu, 1));
2927 
2928 	if (cpuid_is_cmt(cpu))
2929 		return (snprintf(s, n, fmt_ht, cpi->cpi_chipid,
2930 		    cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax,
2931 		    cpi->cpi_family, cpi->cpi_model,
2932 		    cpi->cpi_step, cpu->cpu_type_info.pi_clock));
2933 	return (snprintf(s, n, fmt,
2934 	    cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax,
2935 	    cpi->cpi_family, cpi->cpi_model,
2936 	    cpi->cpi_step, cpu->cpu_type_info.pi_clock));
2937 }
2938 
2939 const char *
2940 cpuid_getvendorstr(cpu_t *cpu)
2941 {
2942 	ASSERT(cpuid_checkpass(cpu, 1));
2943 	return ((const char *)cpu->cpu_m.mcpu_cpi->cpi_vendorstr);
2944 }
2945 
2946 uint_t
2947 cpuid_getvendor(cpu_t *cpu)
2948 {
2949 	ASSERT(cpuid_checkpass(cpu, 1));
2950 	return (cpu->cpu_m.mcpu_cpi->cpi_vendor);
2951 }
2952 
2953 uint_t
2954 cpuid_getfamily(cpu_t *cpu)
2955 {
2956 	ASSERT(cpuid_checkpass(cpu, 1));
2957 	return (cpu->cpu_m.mcpu_cpi->cpi_family);
2958 }
2959 
2960 uint_t
2961 cpuid_getmodel(cpu_t *cpu)
2962 {
2963 	ASSERT(cpuid_checkpass(cpu, 1));
2964 	return (cpu->cpu_m.mcpu_cpi->cpi_model);
2965 }
2966 
2967 uint_t
2968 cpuid_get_ncpu_per_chip(cpu_t *cpu)
2969 {
2970 	ASSERT(cpuid_checkpass(cpu, 1));
2971 	return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_per_chip);
2972 }
2973 
2974 uint_t
2975 cpuid_get_ncore_per_chip(cpu_t *cpu)
2976 {
2977 	ASSERT(cpuid_checkpass(cpu, 1));
2978 	return (cpu->cpu_m.mcpu_cpi->cpi_ncore_per_chip);
2979 }
2980 
2981 uint_t
2982 cpuid_get_ncpu_sharing_last_cache(cpu_t *cpu)
2983 {
2984 	ASSERT(cpuid_checkpass(cpu, 2));
2985 	return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_shr_last_cache);
2986 }
2987 
2988 id_t
2989 cpuid_get_last_lvl_cacheid(cpu_t *cpu)
2990 {
2991 	ASSERT(cpuid_checkpass(cpu, 2));
2992 	return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid);
2993 }
2994 
2995 uint_t
2996 cpuid_getstep(cpu_t *cpu)
2997 {
2998 	ASSERT(cpuid_checkpass(cpu, 1));
2999 	return (cpu->cpu_m.mcpu_cpi->cpi_step);
3000 }
3001 
3002 uint_t
3003 cpuid_getsig(struct cpu *cpu)
3004 {
3005 	ASSERT(cpuid_checkpass(cpu, 1));
3006 	return (cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_eax);
3007 }
3008 
3009 uint32_t
3010 cpuid_getchiprev(struct cpu *cpu)
3011 {
3012 	ASSERT(cpuid_checkpass(cpu, 1));
3013 	return (cpu->cpu_m.mcpu_cpi->cpi_chiprev);
3014 }
3015 
3016 const char *
3017 cpuid_getchiprevstr(struct cpu *cpu)
3018 {
3019 	ASSERT(cpuid_checkpass(cpu, 1));
3020 	return (cpu->cpu_m.mcpu_cpi->cpi_chiprevstr);
3021 }
3022 
3023 uint32_t
3024 cpuid_getsockettype(struct cpu *cpu)
3025 {
3026 	ASSERT(cpuid_checkpass(cpu, 1));
3027 	return (cpu->cpu_m.mcpu_cpi->cpi_socket);
3028 }
3029 
3030 const char *
3031 cpuid_getsocketstr(cpu_t *cpu)
3032 {
3033 	static const char *socketstr = NULL;
3034 	struct cpuid_info *cpi;
3035 
3036 	ASSERT(cpuid_checkpass(cpu, 1));
3037 	cpi = cpu->cpu_m.mcpu_cpi;
3038 
3039 	/* Assume that socket types are the same across the system */
3040 	if (socketstr == NULL)
3041 		socketstr = _cpuid_sktstr(cpi->cpi_vendor, cpi->cpi_family,
3042 		    cpi->cpi_model, cpi->cpi_step);
3043 
3044 
3045 	return (socketstr);
3046 }
3047 
3048 int
3049 cpuid_get_chipid(cpu_t *cpu)
3050 {
3051 	ASSERT(cpuid_checkpass(cpu, 1));
3052 
3053 	if (cpuid_is_cmt(cpu))
3054 		return (cpu->cpu_m.mcpu_cpi->cpi_chipid);
3055 	return (cpu->cpu_id);
3056 }
3057 
3058 id_t
3059 cpuid_get_coreid(cpu_t *cpu)
3060 {
3061 	ASSERT(cpuid_checkpass(cpu, 1));
3062 	return (cpu->cpu_m.mcpu_cpi->cpi_coreid);
3063 }
3064 
3065 int
3066 cpuid_get_pkgcoreid(cpu_t *cpu)
3067 {
3068 	ASSERT(cpuid_checkpass(cpu, 1));
3069 	return (cpu->cpu_m.mcpu_cpi->cpi_pkgcoreid);
3070 }
3071 
3072 int
3073 cpuid_get_clogid(cpu_t *cpu)
3074 {
3075 	ASSERT(cpuid_checkpass(cpu, 1));
3076 	return (cpu->cpu_m.mcpu_cpi->cpi_clogid);
3077 }
3078 
3079 int
3080 cpuid_get_cacheid(cpu_t *cpu)
3081 {
3082 	ASSERT(cpuid_checkpass(cpu, 1));
3083 	return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid);
3084 }
3085 
3086 uint_t
3087 cpuid_get_procnodeid(cpu_t *cpu)
3088 {
3089 	ASSERT(cpuid_checkpass(cpu, 1));
3090 	return (cpu->cpu_m.mcpu_cpi->cpi_procnodeid);
3091 }
3092 
3093 uint_t
3094 cpuid_get_procnodes_per_pkg(cpu_t *cpu)
3095 {
3096 	ASSERT(cpuid_checkpass(cpu, 1));
3097 	return (cpu->cpu_m.mcpu_cpi->cpi_procnodes_per_pkg);
3098 }
3099 
3100 uint_t
3101 cpuid_get_compunitid(cpu_t *cpu)
3102 {
3103 	ASSERT(cpuid_checkpass(cpu, 1));
3104 	return (cpu->cpu_m.mcpu_cpi->cpi_compunitid);
3105 }
3106 
3107 uint_t
3108 cpuid_get_cores_per_compunit(cpu_t *cpu)
3109 {
3110 	ASSERT(cpuid_checkpass(cpu, 1));
3111 	return (cpu->cpu_m.mcpu_cpi->cpi_cores_per_compunit);
3112 }
3113 
3114 /*ARGSUSED*/
3115 int
3116 cpuid_have_cr8access(cpu_t *cpu)
3117 {
3118 #if defined(__amd64)
3119 	return (1);
3120 #else
3121 	struct cpuid_info *cpi;
3122 
3123 	ASSERT(cpu != NULL);
3124 	cpi = cpu->cpu_m.mcpu_cpi;
3125 	if (cpi->cpi_vendor == X86_VENDOR_AMD && cpi->cpi_maxeax >= 1 &&
3126 	    (CPI_FEATURES_XTD_ECX(cpi) & CPUID_AMD_ECX_CR8D) != 0)
3127 		return (1);
3128 	return (0);
3129 #endif
3130 }
3131 
3132 uint32_t
3133 cpuid_get_apicid(cpu_t *cpu)
3134 {
3135 	ASSERT(cpuid_checkpass(cpu, 1));
3136 	if (cpu->cpu_m.mcpu_cpi->cpi_maxeax < 1) {
3137 		return (UINT32_MAX);
3138 	} else {
3139 		return (cpu->cpu_m.mcpu_cpi->cpi_apicid);
3140 	}
3141 }
3142 
3143 void
3144 cpuid_get_addrsize(cpu_t *cpu, uint_t *pabits, uint_t *vabits)
3145 {
3146 	struct cpuid_info *cpi;
3147 
3148 	if (cpu == NULL)
3149 		cpu = CPU;
3150 	cpi = cpu->cpu_m.mcpu_cpi;
3151 
3152 	ASSERT(cpuid_checkpass(cpu, 1));
3153 
3154 	if (pabits)
3155 		*pabits = cpi->cpi_pabits;
3156 	if (vabits)
3157 		*vabits = cpi->cpi_vabits;
3158 }
3159 
3160 /*
3161  * Returns the number of data TLB entries for a corresponding
3162  * pagesize.  If it can't be computed, or isn't known, the
3163  * routine returns zero.  If you ask about an architecturally
3164  * impossible pagesize, the routine will panic (so that the
3165  * hat implementor knows that things are inconsistent.)
3166  */
3167 uint_t
3168 cpuid_get_dtlb_nent(cpu_t *cpu, size_t pagesize)
3169 {
3170 	struct cpuid_info *cpi;
3171 	uint_t dtlb_nent = 0;
3172 
3173 	if (cpu == NULL)
3174 		cpu = CPU;
3175 	cpi = cpu->cpu_m.mcpu_cpi;
3176 
3177 	ASSERT(cpuid_checkpass(cpu, 1));
3178 
3179 	/*
3180 	 * Check the L2 TLB info
3181 	 */
3182 	if (cpi->cpi_xmaxeax >= 0x80000006) {
3183 		struct cpuid_regs *cp = &cpi->cpi_extd[6];
3184 
3185 		switch (pagesize) {
3186 
3187 		case 4 * 1024:
3188 			/*
3189 			 * All zero in the top 16 bits of the register
3190 			 * indicates a unified TLB. Size is in low 16 bits.
3191 			 */
3192 			if ((cp->cp_ebx & 0xffff0000) == 0)
3193 				dtlb_nent = cp->cp_ebx & 0x0000ffff;
3194 			else
3195 				dtlb_nent = BITX(cp->cp_ebx, 27, 16);
3196 			break;
3197 
3198 		case 2 * 1024 * 1024:
3199 			if ((cp->cp_eax & 0xffff0000) == 0)
3200 				dtlb_nent = cp->cp_eax & 0x0000ffff;
3201 			else
3202 				dtlb_nent = BITX(cp->cp_eax, 27, 16);
3203 			break;
3204 
3205 		default:
3206 			panic("unknown L2 pagesize");
3207 			/*NOTREACHED*/
3208 		}
3209 	}
3210 
3211 	if (dtlb_nent != 0)
3212 		return (dtlb_nent);
3213 
3214 	/*
3215 	 * No L2 TLB support for this size, try L1.
3216 	 */
3217 	if (cpi->cpi_xmaxeax >= 0x80000005) {
3218 		struct cpuid_regs *cp = &cpi->cpi_extd[5];
3219 
3220 		switch (pagesize) {
3221 		case 4 * 1024:
3222 			dtlb_nent = BITX(cp->cp_ebx, 23, 16);
3223 			break;
3224 		case 2 * 1024 * 1024:
3225 			dtlb_nent = BITX(cp->cp_eax, 23, 16);
3226 			break;
3227 		default:
3228 			panic("unknown L1 d-TLB pagesize");
3229 			/*NOTREACHED*/
3230 		}
3231 	}
3232 
3233 	return (dtlb_nent);
3234 }
3235 
3236 /*
3237  * Return 0 if the erratum is not present or not applicable, positive
3238  * if it is, and negative if the status of the erratum is unknown.
3239  *
3240  * See "Revision Guide for AMD Athlon(tm) 64 and AMD Opteron(tm)
3241  * Processors" #25759, Rev 3.57, August 2005
3242  */
3243 int
3244 cpuid_opteron_erratum(cpu_t *cpu, uint_t erratum)
3245 {
3246 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
3247 	uint_t eax;
3248 
3249 	/*
3250 	 * Bail out if this CPU isn't an AMD CPU, or if it's
3251 	 * a legacy (32-bit) AMD CPU.
3252 	 */
3253 	if (cpi->cpi_vendor != X86_VENDOR_AMD ||
3254 	    cpi->cpi_family == 4 || cpi->cpi_family == 5 ||
3255 	    cpi->cpi_family == 6)
3256 
3257 		return (0);
3258 
3259 	eax = cpi->cpi_std[1].cp_eax;
3260 
3261 #define	SH_B0(eax)	(eax == 0xf40 || eax == 0xf50)
3262 #define	SH_B3(eax) 	(eax == 0xf51)
3263 #define	B(eax)		(SH_B0(eax) || SH_B3(eax))
3264 
3265 #define	SH_C0(eax)	(eax == 0xf48 || eax == 0xf58)
3266 
3267 #define	SH_CG(eax)	(eax == 0xf4a || eax == 0xf5a || eax == 0xf7a)
3268 #define	DH_CG(eax)	(eax == 0xfc0 || eax == 0xfe0 || eax == 0xff0)
3269 #define	CH_CG(eax)	(eax == 0xf82 || eax == 0xfb2)
3270 #define	CG(eax)		(SH_CG(eax) || DH_CG(eax) || CH_CG(eax))
3271 
3272 #define	SH_D0(eax)	(eax == 0x10f40 || eax == 0x10f50 || eax == 0x10f70)
3273 #define	DH_D0(eax)	(eax == 0x10fc0 || eax == 0x10ff0)
3274 #define	CH_D0(eax)	(eax == 0x10f80 || eax == 0x10fb0)
3275 #define	D0(eax)		(SH_D0(eax) || DH_D0(eax) || CH_D0(eax))
3276 
3277 #define	SH_E0(eax)	(eax == 0x20f50 || eax == 0x20f40 || eax == 0x20f70)
3278 #define	JH_E1(eax)	(eax == 0x20f10)	/* JH8_E0 had 0x20f30 */
3279 #define	DH_E3(eax)	(eax == 0x20fc0 || eax == 0x20ff0)
3280 #define	SH_E4(eax)	(eax == 0x20f51 || eax == 0x20f71)
3281 #define	BH_E4(eax)	(eax == 0x20fb1)
3282 #define	SH_E5(eax)	(eax == 0x20f42)
3283 #define	DH_E6(eax)	(eax == 0x20ff2 || eax == 0x20fc2)
3284 #define	JH_E6(eax)	(eax == 0x20f12 || eax == 0x20f32)
3285 #define	EX(eax)		(SH_E0(eax) || JH_E1(eax) || DH_E3(eax) || \
3286 			    SH_E4(eax) || BH_E4(eax) || SH_E5(eax) || \
3287 			    DH_E6(eax) || JH_E6(eax))
3288 
3289 #define	DR_AX(eax)	(eax == 0x100f00 || eax == 0x100f01 || eax == 0x100f02)
3290 #define	DR_B0(eax)	(eax == 0x100f20)
3291 #define	DR_B1(eax)	(eax == 0x100f21)
3292 #define	DR_BA(eax)	(eax == 0x100f2a)
3293 #define	DR_B2(eax)	(eax == 0x100f22)
3294 #define	DR_B3(eax)	(eax == 0x100f23)
3295 #define	RB_C0(eax)	(eax == 0x100f40)
3296 
3297 	switch (erratum) {
3298 	case 1:
3299 		return (cpi->cpi_family < 0x10);
3300 	case 51:	/* what does the asterisk mean? */
3301 		return (B(eax) || SH_C0(eax) || CG(eax));
3302 	case 52:
3303 		return (B(eax));
3304 	case 57:
3305 		return (cpi->cpi_family <= 0x11);
3306 	case 58:
3307 		return (B(eax));
3308 	case 60:
3309 		return (cpi->cpi_family <= 0x11);
3310 	case 61:
3311 	case 62:
3312 	case 63:
3313 	case 64:
3314 	case 65:
3315 	case 66:
3316 	case 68:
3317 	case 69:
3318 	case 70:
3319 	case 71:
3320 		return (B(eax));
3321 	case 72:
3322 		return (SH_B0(eax));
3323 	case 74:
3324 		return (B(eax));
3325 	case 75:
3326 		return (cpi->cpi_family < 0x10);
3327 	case 76:
3328 		return (B(eax));
3329 	case 77:
3330 		return (cpi->cpi_family <= 0x11);
3331 	case 78:
3332 		return (B(eax) || SH_C0(eax));
3333 	case 79:
3334 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
3335 	case 80:
3336 	case 81:
3337 	case 82:
3338 		return (B(eax));
3339 	case 83:
3340 		return (B(eax) || SH_C0(eax) || CG(eax));
3341 	case 85:
3342 		return (cpi->cpi_family < 0x10);
3343 	case 86:
3344 		return (SH_C0(eax) || CG(eax));
3345 	case 88:
3346 #if !defined(__amd64)
3347 		return (0);
3348 #else
3349 		return (B(eax) || SH_C0(eax));
3350 #endif
3351 	case 89:
3352 		return (cpi->cpi_family < 0x10);
3353 	case 90:
3354 		return (B(eax) || SH_C0(eax) || CG(eax));
3355 	case 91:
3356 	case 92:
3357 		return (B(eax) || SH_C0(eax));
3358 	case 93:
3359 		return (SH_C0(eax));
3360 	case 94:
3361 		return (B(eax) || SH_C0(eax) || CG(eax));
3362 	case 95:
3363 #if !defined(__amd64)
3364 		return (0);
3365 #else
3366 		return (B(eax) || SH_C0(eax));
3367 #endif
3368 	case 96:
3369 		return (B(eax) || SH_C0(eax) || CG(eax));
3370 	case 97:
3371 	case 98:
3372 		return (SH_C0(eax) || CG(eax));
3373 	case 99:
3374 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3375 	case 100:
3376 		return (B(eax) || SH_C0(eax));
3377 	case 101:
3378 	case 103:
3379 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3380 	case 104:
3381 		return (SH_C0(eax) || CG(eax) || D0(eax));
3382 	case 105:
3383 	case 106:
3384 	case 107:
3385 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3386 	case 108:
3387 		return (DH_CG(eax));
3388 	case 109:
3389 		return (SH_C0(eax) || CG(eax) || D0(eax));
3390 	case 110:
3391 		return (D0(eax) || EX(eax));
3392 	case 111:
3393 		return (CG(eax));
3394 	case 112:
3395 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
3396 	case 113:
3397 		return (eax == 0x20fc0);
3398 	case 114:
3399 		return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax));
3400 	case 115:
3401 		return (SH_E0(eax) || JH_E1(eax));
3402 	case 116:
3403 		return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax));
3404 	case 117:
3405 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3406 	case 118:
3407 		return (SH_E0(eax) || JH_E1(eax) || SH_E4(eax) || BH_E4(eax) ||
3408 		    JH_E6(eax));
3409 	case 121:
3410 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
3411 	case 122:
3412 		return (cpi->cpi_family < 0x10 || cpi->cpi_family == 0x11);
3413 	case 123:
3414 		return (JH_E1(eax) || BH_E4(eax) || JH_E6(eax));
3415 	case 131:
3416 		return (cpi->cpi_family < 0x10);
3417 	case 6336786:
3418 		/*
3419 		 * Test for AdvPowerMgmtInfo.TscPStateInvariant
3420 		 * if this is a K8 family or newer processor
3421 		 */
3422 		if (CPI_FAMILY(cpi) == 0xf) {
3423 			struct cpuid_regs regs;
3424 			regs.cp_eax = 0x80000007;
3425 			(void) __cpuid_insn(&regs);
3426 			return (!(regs.cp_edx & 0x100));
3427 		}
3428 		return (0);
3429 	case 6323525:
3430 		return (((((eax >> 12) & 0xff00) + (eax & 0xf00)) |
3431 		    (((eax >> 4) & 0xf) | ((eax >> 12) & 0xf0))) < 0xf40);
3432 
3433 	case 6671130:
3434 		/*
3435 		 * check for processors (pre-Shanghai) that do not provide
3436 		 * optimal management of 1gb ptes in its tlb.
3437 		 */
3438 		return (cpi->cpi_family == 0x10 && cpi->cpi_model < 4);
3439 
3440 	case 298:
3441 		return (DR_AX(eax) || DR_B0(eax) || DR_B1(eax) || DR_BA(eax) ||
3442 		    DR_B2(eax) || RB_C0(eax));
3443 
3444 	case 721:
3445 #if defined(__amd64)
3446 		return (cpi->cpi_family == 0x10 || cpi->cpi_family == 0x12);
3447 #else
3448 		return (0);
3449 #endif
3450 
3451 	default:
3452 		return (-1);
3453 
3454 	}
3455 }
3456 
3457 /*
3458  * Determine if specified erratum is present via OSVW (OS Visible Workaround).
3459  * Return 1 if erratum is present, 0 if not present and -1 if indeterminate.
3460  */
3461 int
3462 osvw_opteron_erratum(cpu_t *cpu, uint_t erratum)
3463 {
3464 	struct cpuid_info	*cpi;
3465 	uint_t			osvwid;
3466 	static int		osvwfeature = -1;
3467 	uint64_t		osvwlength;
3468 
3469 
3470 	cpi = cpu->cpu_m.mcpu_cpi;
3471 
3472 	/* confirm OSVW supported */
3473 	if (osvwfeature == -1) {
3474 		osvwfeature = cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW;
3475 	} else {
3476 		/* assert that osvw feature setting is consistent on all cpus */
3477 		ASSERT(osvwfeature ==
3478 		    (cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW));
3479 	}
3480 	if (!osvwfeature)
3481 		return (-1);
3482 
3483 	osvwlength = rdmsr(MSR_AMD_OSVW_ID_LEN) & OSVW_ID_LEN_MASK;
3484 
3485 	switch (erratum) {
3486 	case 298:	/* osvwid is 0 */
3487 		osvwid = 0;
3488 		if (osvwlength <= (uint64_t)osvwid) {
3489 			/* osvwid 0 is unknown */
3490 			return (-1);
3491 		}
3492 
3493 		/*
3494 		 * Check the OSVW STATUS MSR to determine the state
3495 		 * of the erratum where:
3496 		 *   0 - fixed by HW
3497 		 *   1 - BIOS has applied the workaround when BIOS
3498 		 *   workaround is available. (Or for other errata,
3499 		 *   OS workaround is required.)
3500 		 * For a value of 1, caller will confirm that the
3501 		 * erratum 298 workaround has indeed been applied by BIOS.
3502 		 *
3503 		 * A 1 may be set in cpus that have a HW fix
3504 		 * in a mixed cpu system. Regarding erratum 298:
3505 		 *   In a multiprocessor platform, the workaround above
3506 		 *   should be applied to all processors regardless of
3507 		 *   silicon revision when an affected processor is
3508 		 *   present.
3509 		 */
3510 
3511 		return (rdmsr(MSR_AMD_OSVW_STATUS +
3512 		    (osvwid / OSVW_ID_CNT_PER_MSR)) &
3513 		    (1ULL << (osvwid % OSVW_ID_CNT_PER_MSR)));
3514 
3515 	default:
3516 		return (-1);
3517 	}
3518 }
3519 
3520 static const char assoc_str[] = "associativity";
3521 static const char line_str[] = "line-size";
3522 static const char size_str[] = "size";
3523 
3524 static void
3525 add_cache_prop(dev_info_t *devi, const char *label, const char *type,
3526     uint32_t val)
3527 {
3528 	char buf[128];
3529 
3530 	/*
3531 	 * ndi_prop_update_int() is used because it is desirable for
3532 	 * DDI_PROP_HW_DEF and DDI_PROP_DONTSLEEP to be set.
3533 	 */
3534 	if (snprintf(buf, sizeof (buf), "%s-%s", label, type) < sizeof (buf))
3535 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, devi, buf, val);
3536 }
3537 
3538 /*
3539  * Intel-style cache/tlb description
3540  *
3541  * Standard cpuid level 2 gives a randomly ordered
3542  * selection of tags that index into a table that describes
3543  * cache and tlb properties.
3544  */
3545 
3546 static const char l1_icache_str[] = "l1-icache";
3547 static const char l1_dcache_str[] = "l1-dcache";
3548 static const char l2_cache_str[] = "l2-cache";
3549 static const char l3_cache_str[] = "l3-cache";
3550 static const char itlb4k_str[] = "itlb-4K";
3551 static const char dtlb4k_str[] = "dtlb-4K";
3552 static const char itlb2M_str[] = "itlb-2M";
3553 static const char itlb4M_str[] = "itlb-4M";
3554 static const char dtlb4M_str[] = "dtlb-4M";
3555 static const char dtlb24_str[] = "dtlb0-2M-4M";
3556 static const char itlb424_str[] = "itlb-4K-2M-4M";
3557 static const char itlb24_str[] = "itlb-2M-4M";
3558 static const char dtlb44_str[] = "dtlb-4K-4M";
3559 static const char sl1_dcache_str[] = "sectored-l1-dcache";
3560 static const char sl2_cache_str[] = "sectored-l2-cache";
3561 static const char itrace_str[] = "itrace-cache";
3562 static const char sl3_cache_str[] = "sectored-l3-cache";
3563 static const char sh_l2_tlb4k_str[] = "shared-l2-tlb-4k";
3564 
3565 static const struct cachetab {
3566 	uint8_t 	ct_code;
3567 	uint8_t		ct_assoc;
3568 	uint16_t 	ct_line_size;
3569 	size_t		ct_size;
3570 	const char	*ct_label;
3571 } intel_ctab[] = {
3572 	/*
3573 	 * maintain descending order!
3574 	 *
3575 	 * Codes ignored - Reason
3576 	 * ----------------------
3577 	 * 40H - intel_cpuid_4_cache_info() disambiguates l2/l3 cache
3578 	 * f0H/f1H - Currently we do not interpret prefetch size by design
3579 	 */
3580 	{ 0xe4, 16, 64, 8*1024*1024, l3_cache_str},
3581 	{ 0xe3, 16, 64, 4*1024*1024, l3_cache_str},
3582 	{ 0xe2, 16, 64, 2*1024*1024, l3_cache_str},
3583 	{ 0xde, 12, 64, 6*1024*1024, l3_cache_str},
3584 	{ 0xdd, 12, 64, 3*1024*1024, l3_cache_str},
3585 	{ 0xdc, 12, 64, ((1*1024*1024)+(512*1024)), l3_cache_str},
3586 	{ 0xd8, 8, 64, 4*1024*1024, l3_cache_str},
3587 	{ 0xd7, 8, 64, 2*1024*1024, l3_cache_str},
3588 	{ 0xd6, 8, 64, 1*1024*1024, l3_cache_str},
3589 	{ 0xd2, 4, 64, 2*1024*1024, l3_cache_str},
3590 	{ 0xd1, 4, 64, 1*1024*1024, l3_cache_str},
3591 	{ 0xd0, 4, 64, 512*1024, l3_cache_str},
3592 	{ 0xca, 4, 0, 512, sh_l2_tlb4k_str},
3593 	{ 0xc0, 4, 0, 8, dtlb44_str },
3594 	{ 0xba, 4, 0, 64, dtlb4k_str },
3595 	{ 0xb4, 4, 0, 256, dtlb4k_str },
3596 	{ 0xb3, 4, 0, 128, dtlb4k_str },
3597 	{ 0xb2, 4, 0, 64, itlb4k_str },
3598 	{ 0xb0, 4, 0, 128, itlb4k_str },
3599 	{ 0x87, 8, 64, 1024*1024, l2_cache_str},
3600 	{ 0x86, 4, 64, 512*1024, l2_cache_str},
3601 	{ 0x85, 8, 32, 2*1024*1024, l2_cache_str},
3602 	{ 0x84, 8, 32, 1024*1024, l2_cache_str},
3603 	{ 0x83, 8, 32, 512*1024, l2_cache_str},
3604 	{ 0x82, 8, 32, 256*1024, l2_cache_str},
3605 	{ 0x80, 8, 64, 512*1024, l2_cache_str},
3606 	{ 0x7f, 2, 64, 512*1024, l2_cache_str},
3607 	{ 0x7d, 8, 64, 2*1024*1024, sl2_cache_str},
3608 	{ 0x7c, 8, 64, 1024*1024, sl2_cache_str},
3609 	{ 0x7b, 8, 64, 512*1024, sl2_cache_str},
3610 	{ 0x7a, 8, 64, 256*1024, sl2_cache_str},
3611 	{ 0x79, 8, 64, 128*1024, sl2_cache_str},
3612 	{ 0x78, 8, 64, 1024*1024, l2_cache_str},
3613 	{ 0x73, 8, 0, 64*1024, itrace_str},
3614 	{ 0x72, 8, 0, 32*1024, itrace_str},
3615 	{ 0x71, 8, 0, 16*1024, itrace_str},
3616 	{ 0x70, 8, 0, 12*1024, itrace_str},
3617 	{ 0x68, 4, 64, 32*1024, sl1_dcache_str},
3618 	{ 0x67, 4, 64, 16*1024, sl1_dcache_str},
3619 	{ 0x66, 4, 64, 8*1024, sl1_dcache_str},
3620 	{ 0x60, 8, 64, 16*1024, sl1_dcache_str},
3621 	{ 0x5d, 0, 0, 256, dtlb44_str},
3622 	{ 0x5c, 0, 0, 128, dtlb44_str},
3623 	{ 0x5b, 0, 0, 64, dtlb44_str},
3624 	{ 0x5a, 4, 0, 32, dtlb24_str},
3625 	{ 0x59, 0, 0, 16, dtlb4k_str},
3626 	{ 0x57, 4, 0, 16, dtlb4k_str},
3627 	{ 0x56, 4, 0, 16, dtlb4M_str},
3628 	{ 0x55, 0, 0, 7, itlb24_str},
3629 	{ 0x52, 0, 0, 256, itlb424_str},
3630 	{ 0x51, 0, 0, 128, itlb424_str},
3631 	{ 0x50, 0, 0, 64, itlb424_str},
3632 	{ 0x4f, 0, 0, 32, itlb4k_str},
3633 	{ 0x4e, 24, 64, 6*1024*1024, l2_cache_str},
3634 	{ 0x4d, 16, 64, 16*1024*1024, l3_cache_str},
3635 	{ 0x4c, 12, 64, 12*1024*1024, l3_cache_str},
3636 	{ 0x4b, 16, 64, 8*1024*1024, l3_cache_str},
3637 	{ 0x4a, 12, 64, 6*1024*1024, l3_cache_str},
3638 	{ 0x49, 16, 64, 4*1024*1024, l3_cache_str},
3639 	{ 0x48, 12, 64, 3*1024*1024, l2_cache_str},
3640 	{ 0x47, 8, 64, 8*1024*1024, l3_cache_str},
3641 	{ 0x46, 4, 64, 4*1024*1024, l3_cache_str},
3642 	{ 0x45, 4, 32, 2*1024*1024, l2_cache_str},
3643 	{ 0x44, 4, 32, 1024*1024, l2_cache_str},
3644 	{ 0x43, 4, 32, 512*1024, l2_cache_str},
3645 	{ 0x42, 4, 32, 256*1024, l2_cache_str},
3646 	{ 0x41, 4, 32, 128*1024, l2_cache_str},
3647 	{ 0x3e, 4, 64, 512*1024, sl2_cache_str},
3648 	{ 0x3d, 6, 64, 384*1024, sl2_cache_str},
3649 	{ 0x3c, 4, 64, 256*1024, sl2_cache_str},
3650 	{ 0x3b, 2, 64, 128*1024, sl2_cache_str},
3651 	{ 0x3a, 6, 64, 192*1024, sl2_cache_str},
3652 	{ 0x39, 4, 64, 128*1024, sl2_cache_str},
3653 	{ 0x30, 8, 64, 32*1024, l1_icache_str},
3654 	{ 0x2c, 8, 64, 32*1024, l1_dcache_str},
3655 	{ 0x29, 8, 64, 4096*1024, sl3_cache_str},
3656 	{ 0x25, 8, 64, 2048*1024, sl3_cache_str},
3657 	{ 0x23, 8, 64, 1024*1024, sl3_cache_str},
3658 	{ 0x22, 4, 64, 512*1024, sl3_cache_str},
3659 	{ 0x0e, 6, 64, 24*1024, l1_dcache_str},
3660 	{ 0x0d, 4, 32, 16*1024, l1_dcache_str},
3661 	{ 0x0c, 4, 32, 16*1024, l1_dcache_str},
3662 	{ 0x0b, 4, 0, 4, itlb4M_str},
3663 	{ 0x0a, 2, 32, 8*1024, l1_dcache_str},
3664 	{ 0x08, 4, 32, 16*1024, l1_icache_str},
3665 	{ 0x06, 4, 32, 8*1024, l1_icache_str},
3666 	{ 0x05, 4, 0, 32, dtlb4M_str},
3667 	{ 0x04, 4, 0, 8, dtlb4M_str},
3668 	{ 0x03, 4, 0, 64, dtlb4k_str},
3669 	{ 0x02, 4, 0, 2, itlb4M_str},
3670 	{ 0x01, 4, 0, 32, itlb4k_str},
3671 	{ 0 }
3672 };
3673 
3674 static const struct cachetab cyrix_ctab[] = {
3675 	{ 0x70, 4, 0, 32, "tlb-4K" },
3676 	{ 0x80, 4, 16, 16*1024, "l1-cache" },
3677 	{ 0 }
3678 };
3679 
3680 /*
3681  * Search a cache table for a matching entry
3682  */
3683 static const struct cachetab *
3684 find_cacheent(const struct cachetab *ct, uint_t code)
3685 {
3686 	if (code != 0) {
3687 		for (; ct->ct_code != 0; ct++)
3688 			if (ct->ct_code <= code)
3689 				break;
3690 		if (ct->ct_code == code)
3691 			return (ct);
3692 	}
3693 	return (NULL);
3694 }
3695 
3696 /*
3697  * Populate cachetab entry with L2 or L3 cache-information using
3698  * cpuid function 4. This function is called from intel_walk_cacheinfo()
3699  * when descriptor 0x49 is encountered. It returns 0 if no such cache
3700  * information is found.
3701  */
3702 static int
3703 intel_cpuid_4_cache_info(struct cachetab *ct, struct cpuid_info *cpi)
3704 {
3705 	uint32_t level, i;
3706 	int ret = 0;
3707 
3708 	for (i = 0; i < cpi->cpi_std_4_size; i++) {
3709 		level = CPI_CACHE_LVL(cpi->cpi_std_4[i]);
3710 
3711 		if (level == 2 || level == 3) {
3712 			ct->ct_assoc = CPI_CACHE_WAYS(cpi->cpi_std_4[i]) + 1;
3713 			ct->ct_line_size =
3714 			    CPI_CACHE_COH_LN_SZ(cpi->cpi_std_4[i]) + 1;
3715 			ct->ct_size = ct->ct_assoc *
3716 			    (CPI_CACHE_PARTS(cpi->cpi_std_4[i]) + 1) *
3717 			    ct->ct_line_size *
3718 			    (cpi->cpi_std_4[i]->cp_ecx + 1);
3719 
3720 			if (level == 2) {
3721 				ct->ct_label = l2_cache_str;
3722 			} else if (level == 3) {
3723 				ct->ct_label = l3_cache_str;
3724 			}
3725 			ret = 1;
3726 		}
3727 	}
3728 
3729 	return (ret);
3730 }
3731 
3732 /*
3733  * Walk the cacheinfo descriptor, applying 'func' to every valid element
3734  * The walk is terminated if the walker returns non-zero.
3735  */
3736 static void
3737 intel_walk_cacheinfo(struct cpuid_info *cpi,
3738     void *arg, int (*func)(void *, const struct cachetab *))
3739 {
3740 	const struct cachetab *ct;
3741 	struct cachetab des_49_ct, des_b1_ct;
3742 	uint8_t *dp;
3743 	int i;
3744 
3745 	if ((dp = cpi->cpi_cacheinfo) == NULL)
3746 		return;
3747 	for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
3748 		/*
3749 		 * For overloaded descriptor 0x49 we use cpuid function 4
3750 		 * if supported by the current processor, to create
3751 		 * cache information.
3752 		 * For overloaded descriptor 0xb1 we use X86_PAE flag
3753 		 * to disambiguate the cache information.
3754 		 */
3755 		if (*dp == 0x49 && cpi->cpi_maxeax >= 0x4 &&
3756 		    intel_cpuid_4_cache_info(&des_49_ct, cpi) == 1) {
3757 				ct = &des_49_ct;
3758 		} else if (*dp == 0xb1) {
3759 			des_b1_ct.ct_code = 0xb1;
3760 			des_b1_ct.ct_assoc = 4;
3761 			des_b1_ct.ct_line_size = 0;
3762 			if (is_x86_feature(x86_featureset, X86FSET_PAE)) {
3763 				des_b1_ct.ct_size = 8;
3764 				des_b1_ct.ct_label = itlb2M_str;
3765 			} else {
3766 				des_b1_ct.ct_size = 4;
3767 				des_b1_ct.ct_label = itlb4M_str;
3768 			}
3769 			ct = &des_b1_ct;
3770 		} else {
3771 			if ((ct = find_cacheent(intel_ctab, *dp)) == NULL) {
3772 				continue;
3773 			}
3774 		}
3775 
3776 		if (func(arg, ct) != 0) {
3777 			break;
3778 		}
3779 	}
3780 }
3781 
3782 /*
3783  * (Like the Intel one, except for Cyrix CPUs)
3784  */
3785 static void
3786 cyrix_walk_cacheinfo(struct cpuid_info *cpi,
3787     void *arg, int (*func)(void *, const struct cachetab *))
3788 {
3789 	const struct cachetab *ct;
3790 	uint8_t *dp;
3791 	int i;
3792 
3793 	if ((dp = cpi->cpi_cacheinfo) == NULL)
3794 		return;
3795 	for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
3796 		/*
3797 		 * Search Cyrix-specific descriptor table first ..
3798 		 */
3799 		if ((ct = find_cacheent(cyrix_ctab, *dp)) != NULL) {
3800 			if (func(arg, ct) != 0)
3801 				break;
3802 			continue;
3803 		}
3804 		/*
3805 		 * .. else fall back to the Intel one
3806 		 */
3807 		if ((ct = find_cacheent(intel_ctab, *dp)) != NULL) {
3808 			if (func(arg, ct) != 0)
3809 				break;
3810 			continue;
3811 		}
3812 	}
3813 }
3814 
3815 /*
3816  * A cacheinfo walker that adds associativity, line-size, and size properties
3817  * to the devinfo node it is passed as an argument.
3818  */
3819 static int
3820 add_cacheent_props(void *arg, const struct cachetab *ct)
3821 {
3822 	dev_info_t *devi = arg;
3823 
3824 	add_cache_prop(devi, ct->ct_label, assoc_str, ct->ct_assoc);
3825 	if (ct->ct_line_size != 0)
3826 		add_cache_prop(devi, ct->ct_label, line_str,
3827 		    ct->ct_line_size);
3828 	add_cache_prop(devi, ct->ct_label, size_str, ct->ct_size);
3829 	return (0);
3830 }
3831 
3832 
3833 static const char fully_assoc[] = "fully-associative?";
3834 
3835 /*
3836  * AMD style cache/tlb description
3837  *
3838  * Extended functions 5 and 6 directly describe properties of
3839  * tlbs and various cache levels.
3840  */
3841 static void
3842 add_amd_assoc(dev_info_t *devi, const char *label, uint_t assoc)
3843 {
3844 	switch (assoc) {
3845 	case 0:	/* reserved; ignore */
3846 		break;
3847 	default:
3848 		add_cache_prop(devi, label, assoc_str, assoc);
3849 		break;
3850 	case 0xff:
3851 		add_cache_prop(devi, label, fully_assoc, 1);
3852 		break;
3853 	}
3854 }
3855 
3856 static void
3857 add_amd_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size)
3858 {
3859 	if (size == 0)
3860 		return;
3861 	add_cache_prop(devi, label, size_str, size);
3862 	add_amd_assoc(devi, label, assoc);
3863 }
3864 
3865 static void
3866 add_amd_cache(dev_info_t *devi, const char *label,
3867     uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size)
3868 {
3869 	if (size == 0 || line_size == 0)
3870 		return;
3871 	add_amd_assoc(devi, label, assoc);
3872 	/*
3873 	 * Most AMD parts have a sectored cache. Multiple cache lines are
3874 	 * associated with each tag. A sector consists of all cache lines
3875 	 * associated with a tag. For example, the AMD K6-III has a sector
3876 	 * size of 2 cache lines per tag.
3877 	 */
3878 	if (lines_per_tag != 0)
3879 		add_cache_prop(devi, label, "lines-per-tag", lines_per_tag);
3880 	add_cache_prop(devi, label, line_str, line_size);
3881 	add_cache_prop(devi, label, size_str, size * 1024);
3882 }
3883 
3884 static void
3885 add_amd_l2_assoc(dev_info_t *devi, const char *label, uint_t assoc)
3886 {
3887 	switch (assoc) {
3888 	case 0:	/* off */
3889 		break;
3890 	case 1:
3891 	case 2:
3892 	case 4:
3893 		add_cache_prop(devi, label, assoc_str, assoc);
3894 		break;
3895 	case 6:
3896 		add_cache_prop(devi, label, assoc_str, 8);
3897 		break;
3898 	case 8:
3899 		add_cache_prop(devi, label, assoc_str, 16);
3900 		break;
3901 	case 0xf:
3902 		add_cache_prop(devi, label, fully_assoc, 1);
3903 		break;
3904 	default: /* reserved; ignore */
3905 		break;
3906 	}
3907 }
3908 
3909 static void
3910 add_amd_l2_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size)
3911 {
3912 	if (size == 0 || assoc == 0)
3913 		return;
3914 	add_amd_l2_assoc(devi, label, assoc);
3915 	add_cache_prop(devi, label, size_str, size);
3916 }
3917 
3918 static void
3919 add_amd_l2_cache(dev_info_t *devi, const char *label,
3920     uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size)
3921 {
3922 	if (size == 0 || assoc == 0 || line_size == 0)
3923 		return;
3924 	add_amd_l2_assoc(devi, label, assoc);
3925 	if (lines_per_tag != 0)
3926 		add_cache_prop(devi, label, "lines-per-tag", lines_per_tag);
3927 	add_cache_prop(devi, label, line_str, line_size);
3928 	add_cache_prop(devi, label, size_str, size * 1024);
3929 }
3930 
3931 static void
3932 amd_cache_info(struct cpuid_info *cpi, dev_info_t *devi)
3933 {
3934 	struct cpuid_regs *cp;
3935 
3936 	if (cpi->cpi_xmaxeax < 0x80000005)
3937 		return;
3938 	cp = &cpi->cpi_extd[5];
3939 
3940 	/*
3941 	 * 4M/2M L1 TLB configuration
3942 	 *
3943 	 * We report the size for 2M pages because AMD uses two
3944 	 * TLB entries for one 4M page.
3945 	 */
3946 	add_amd_tlb(devi, "dtlb-2M",
3947 	    BITX(cp->cp_eax, 31, 24), BITX(cp->cp_eax, 23, 16));
3948 	add_amd_tlb(devi, "itlb-2M",
3949 	    BITX(cp->cp_eax, 15, 8), BITX(cp->cp_eax, 7, 0));
3950 
3951 	/*
3952 	 * 4K L1 TLB configuration
3953 	 */
3954 
3955 	switch (cpi->cpi_vendor) {
3956 		uint_t nentries;
3957 	case X86_VENDOR_TM:
3958 		if (cpi->cpi_family >= 5) {
3959 			/*
3960 			 * Crusoe processors have 256 TLB entries, but
3961 			 * cpuid data format constrains them to only
3962 			 * reporting 255 of them.
3963 			 */
3964 			if ((nentries = BITX(cp->cp_ebx, 23, 16)) == 255)
3965 				nentries = 256;
3966 			/*
3967 			 * Crusoe processors also have a unified TLB
3968 			 */
3969 			add_amd_tlb(devi, "tlb-4K", BITX(cp->cp_ebx, 31, 24),
3970 			    nentries);
3971 			break;
3972 		}
3973 		/*FALLTHROUGH*/
3974 	default:
3975 		add_amd_tlb(devi, itlb4k_str,
3976 		    BITX(cp->cp_ebx, 31, 24), BITX(cp->cp_ebx, 23, 16));
3977 		add_amd_tlb(devi, dtlb4k_str,
3978 		    BITX(cp->cp_ebx, 15, 8), BITX(cp->cp_ebx, 7, 0));
3979 		break;
3980 	}
3981 
3982 	/*
3983 	 * data L1 cache configuration
3984 	 */
3985 
3986 	add_amd_cache(devi, l1_dcache_str,
3987 	    BITX(cp->cp_ecx, 31, 24), BITX(cp->cp_ecx, 23, 16),
3988 	    BITX(cp->cp_ecx, 15, 8), BITX(cp->cp_ecx, 7, 0));
3989 
3990 	/*
3991 	 * code L1 cache configuration
3992 	 */
3993 
3994 	add_amd_cache(devi, l1_icache_str,
3995 	    BITX(cp->cp_edx, 31, 24), BITX(cp->cp_edx, 23, 16),
3996 	    BITX(cp->cp_edx, 15, 8), BITX(cp->cp_edx, 7, 0));
3997 
3998 	if (cpi->cpi_xmaxeax < 0x80000006)
3999 		return;
4000 	cp = &cpi->cpi_extd[6];
4001 
4002 	/* Check for a unified L2 TLB for large pages */
4003 
4004 	if (BITX(cp->cp_eax, 31, 16) == 0)
4005 		add_amd_l2_tlb(devi, "l2-tlb-2M",
4006 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
4007 	else {
4008 		add_amd_l2_tlb(devi, "l2-dtlb-2M",
4009 		    BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16));
4010 		add_amd_l2_tlb(devi, "l2-itlb-2M",
4011 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
4012 	}
4013 
4014 	/* Check for a unified L2 TLB for 4K pages */
4015 
4016 	if (BITX(cp->cp_ebx, 31, 16) == 0) {
4017 		add_amd_l2_tlb(devi, "l2-tlb-4K",
4018 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
4019 	} else {
4020 		add_amd_l2_tlb(devi, "l2-dtlb-4K",
4021 		    BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16));
4022 		add_amd_l2_tlb(devi, "l2-itlb-4K",
4023 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
4024 	}
4025 
4026 	add_amd_l2_cache(devi, l2_cache_str,
4027 	    BITX(cp->cp_ecx, 31, 16), BITX(cp->cp_ecx, 15, 12),
4028 	    BITX(cp->cp_ecx, 11, 8), BITX(cp->cp_ecx, 7, 0));
4029 }
4030 
4031 /*
4032  * There are two basic ways that the x86 world describes it cache
4033  * and tlb architecture - Intel's way and AMD's way.
4034  *
4035  * Return which flavor of cache architecture we should use
4036  */
4037 static int
4038 x86_which_cacheinfo(struct cpuid_info *cpi)
4039 {
4040 	switch (cpi->cpi_vendor) {
4041 	case X86_VENDOR_Intel:
4042 		if (cpi->cpi_maxeax >= 2)
4043 			return (X86_VENDOR_Intel);
4044 		break;
4045 	case X86_VENDOR_AMD:
4046 		/*
4047 		 * The K5 model 1 was the first part from AMD that reported
4048 		 * cache sizes via extended cpuid functions.
4049 		 */
4050 		if (cpi->cpi_family > 5 ||
4051 		    (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
4052 			return (X86_VENDOR_AMD);
4053 		break;
4054 	case X86_VENDOR_TM:
4055 		if (cpi->cpi_family >= 5)
4056 			return (X86_VENDOR_AMD);
4057 		/*FALLTHROUGH*/
4058 	default:
4059 		/*
4060 		 * If they have extended CPU data for 0x80000005
4061 		 * then we assume they have AMD-format cache
4062 		 * information.
4063 		 *
4064 		 * If not, and the vendor happens to be Cyrix,
4065 		 * then try our-Cyrix specific handler.
4066 		 *
4067 		 * If we're not Cyrix, then assume we're using Intel's
4068 		 * table-driven format instead.
4069 		 */
4070 		if (cpi->cpi_xmaxeax >= 0x80000005)
4071 			return (X86_VENDOR_AMD);
4072 		else if (cpi->cpi_vendor == X86_VENDOR_Cyrix)
4073 			return (X86_VENDOR_Cyrix);
4074 		else if (cpi->cpi_maxeax >= 2)
4075 			return (X86_VENDOR_Intel);
4076 		break;
4077 	}
4078 	return (-1);
4079 }
4080 
4081 void
4082 cpuid_set_cpu_properties(void *dip, processorid_t cpu_id,
4083     struct cpuid_info *cpi)
4084 {
4085 	dev_info_t *cpu_devi;
4086 	int create;
4087 
4088 	cpu_devi = (dev_info_t *)dip;
4089 
4090 	/* device_type */
4091 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
4092 	    "device_type", "cpu");
4093 
4094 	/* reg */
4095 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4096 	    "reg", cpu_id);
4097 
4098 	/* cpu-mhz, and clock-frequency */
4099 	if (cpu_freq > 0) {
4100 		long long mul;
4101 
4102 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4103 		    "cpu-mhz", cpu_freq);
4104 		if ((mul = cpu_freq * 1000000LL) <= INT_MAX)
4105 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4106 			    "clock-frequency", (int)mul);
4107 	}
4108 
4109 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID)) {
4110 		return;
4111 	}
4112 
4113 	/* vendor-id */
4114 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
4115 	    "vendor-id", cpi->cpi_vendorstr);
4116 
4117 	if (cpi->cpi_maxeax == 0) {
4118 		return;
4119 	}
4120 
4121 	/*
4122 	 * family, model, and step
4123 	 */
4124 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4125 	    "family", CPI_FAMILY(cpi));
4126 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4127 	    "cpu-model", CPI_MODEL(cpi));
4128 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4129 	    "stepping-id", CPI_STEP(cpi));
4130 
4131 	/* type */
4132 	switch (cpi->cpi_vendor) {
4133 	case X86_VENDOR_Intel:
4134 		create = 1;
4135 		break;
4136 	default:
4137 		create = 0;
4138 		break;
4139 	}
4140 	if (create)
4141 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4142 		    "type", CPI_TYPE(cpi));
4143 
4144 	/* ext-family */
4145 	switch (cpi->cpi_vendor) {
4146 	case X86_VENDOR_Intel:
4147 	case X86_VENDOR_AMD:
4148 		create = cpi->cpi_family >= 0xf;
4149 		break;
4150 	default:
4151 		create = 0;
4152 		break;
4153 	}
4154 	if (create)
4155 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4156 		    "ext-family", CPI_FAMILY_XTD(cpi));
4157 
4158 	/* ext-model */
4159 	switch (cpi->cpi_vendor) {
4160 	case X86_VENDOR_Intel:
4161 		create = IS_EXTENDED_MODEL_INTEL(cpi);
4162 		break;
4163 	case X86_VENDOR_AMD:
4164 		create = CPI_FAMILY(cpi) == 0xf;
4165 		break;
4166 	default:
4167 		create = 0;
4168 		break;
4169 	}
4170 	if (create)
4171 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4172 		    "ext-model", CPI_MODEL_XTD(cpi));
4173 
4174 	/* generation */
4175 	switch (cpi->cpi_vendor) {
4176 	case X86_VENDOR_AMD:
4177 		/*
4178 		 * AMD K5 model 1 was the first part to support this
4179 		 */
4180 		create = cpi->cpi_xmaxeax >= 0x80000001;
4181 		break;
4182 	default:
4183 		create = 0;
4184 		break;
4185 	}
4186 	if (create)
4187 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4188 		    "generation", BITX((cpi)->cpi_extd[1].cp_eax, 11, 8));
4189 
4190 	/* brand-id */
4191 	switch (cpi->cpi_vendor) {
4192 	case X86_VENDOR_Intel:
4193 		/*
4194 		 * brand id first appeared on Pentium III Xeon model 8,
4195 		 * and Celeron model 8 processors and Opteron
4196 		 */
4197 		create = cpi->cpi_family > 6 ||
4198 		    (cpi->cpi_family == 6 && cpi->cpi_model >= 8);
4199 		break;
4200 	case X86_VENDOR_AMD:
4201 		create = cpi->cpi_family >= 0xf;
4202 		break;
4203 	default:
4204 		create = 0;
4205 		break;
4206 	}
4207 	if (create && cpi->cpi_brandid != 0) {
4208 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4209 		    "brand-id", cpi->cpi_brandid);
4210 	}
4211 
4212 	/* chunks, and apic-id */
4213 	switch (cpi->cpi_vendor) {
4214 		/*
4215 		 * first available on Pentium IV and Opteron (K8)
4216 		 */
4217 	case X86_VENDOR_Intel:
4218 		create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf;
4219 		break;
4220 	case X86_VENDOR_AMD:
4221 		create = cpi->cpi_family >= 0xf;
4222 		break;
4223 	default:
4224 		create = 0;
4225 		break;
4226 	}
4227 	if (create) {
4228 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4229 		    "chunks", CPI_CHUNKS(cpi));
4230 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4231 		    "apic-id", cpi->cpi_apicid);
4232 		if (cpi->cpi_chipid >= 0) {
4233 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4234 			    "chip#", cpi->cpi_chipid);
4235 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4236 			    "clog#", cpi->cpi_clogid);
4237 		}
4238 	}
4239 
4240 	/* cpuid-features */
4241 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4242 	    "cpuid-features", CPI_FEATURES_EDX(cpi));
4243 
4244 
4245 	/* cpuid-features-ecx */
4246 	switch (cpi->cpi_vendor) {
4247 	case X86_VENDOR_Intel:
4248 		create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf;
4249 		break;
4250 	case X86_VENDOR_AMD:
4251 		create = cpi->cpi_family >= 0xf;
4252 		break;
4253 	default:
4254 		create = 0;
4255 		break;
4256 	}
4257 	if (create)
4258 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4259 		    "cpuid-features-ecx", CPI_FEATURES_ECX(cpi));
4260 
4261 	/* ext-cpuid-features */
4262 	switch (cpi->cpi_vendor) {
4263 	case X86_VENDOR_Intel:
4264 	case X86_VENDOR_AMD:
4265 	case X86_VENDOR_Cyrix:
4266 	case X86_VENDOR_TM:
4267 	case X86_VENDOR_Centaur:
4268 		create = cpi->cpi_xmaxeax >= 0x80000001;
4269 		break;
4270 	default:
4271 		create = 0;
4272 		break;
4273 	}
4274 	if (create) {
4275 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4276 		    "ext-cpuid-features", CPI_FEATURES_XTD_EDX(cpi));
4277 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4278 		    "ext-cpuid-features-ecx", CPI_FEATURES_XTD_ECX(cpi));
4279 	}
4280 
4281 	/*
4282 	 * Brand String first appeared in Intel Pentium IV, AMD K5
4283 	 * model 1, and Cyrix GXm.  On earlier models we try and
4284 	 * simulate something similar .. so this string should always
4285 	 * same -something- about the processor, however lame.
4286 	 */
4287 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
4288 	    "brand-string", cpi->cpi_brandstr);
4289 
4290 	/*
4291 	 * Finally, cache and tlb information
4292 	 */
4293 	switch (x86_which_cacheinfo(cpi)) {
4294 	case X86_VENDOR_Intel:
4295 		intel_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
4296 		break;
4297 	case X86_VENDOR_Cyrix:
4298 		cyrix_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
4299 		break;
4300 	case X86_VENDOR_AMD:
4301 		amd_cache_info(cpi, cpu_devi);
4302 		break;
4303 	default:
4304 		break;
4305 	}
4306 }
4307 
4308 struct l2info {
4309 	int *l2i_csz;
4310 	int *l2i_lsz;
4311 	int *l2i_assoc;
4312 	int l2i_ret;
4313 };
4314 
4315 /*
4316  * A cacheinfo walker that fetches the size, line-size and associativity
4317  * of the L2 cache
4318  */
4319 static int
4320 intel_l2cinfo(void *arg, const struct cachetab *ct)
4321 {
4322 	struct l2info *l2i = arg;
4323 	int *ip;
4324 
4325 	if (ct->ct_label != l2_cache_str &&
4326 	    ct->ct_label != sl2_cache_str)
4327 		return (0);	/* not an L2 -- keep walking */
4328 
4329 	if ((ip = l2i->l2i_csz) != NULL)
4330 		*ip = ct->ct_size;
4331 	if ((ip = l2i->l2i_lsz) != NULL)
4332 		*ip = ct->ct_line_size;
4333 	if ((ip = l2i->l2i_assoc) != NULL)
4334 		*ip = ct->ct_assoc;
4335 	l2i->l2i_ret = ct->ct_size;
4336 	return (1);		/* was an L2 -- terminate walk */
4337 }
4338 
4339 /*
4340  * AMD L2/L3 Cache and TLB Associativity Field Definition:
4341  *
4342  *	Unlike the associativity for the L1 cache and tlb where the 8 bit
4343  *	value is the associativity, the associativity for the L2 cache and
4344  *	tlb is encoded in the following table. The 4 bit L2 value serves as
4345  *	an index into the amd_afd[] array to determine the associativity.
4346  *	-1 is undefined. 0 is fully associative.
4347  */
4348 
4349 static int amd_afd[] =
4350 	{-1, 1, 2, -1, 4, -1, 8, -1, 16, -1, 32, 48, 64, 96, 128, 0};
4351 
4352 static void
4353 amd_l2cacheinfo(struct cpuid_info *cpi, struct l2info *l2i)
4354 {
4355 	struct cpuid_regs *cp;
4356 	uint_t size, assoc;
4357 	int i;
4358 	int *ip;
4359 
4360 	if (cpi->cpi_xmaxeax < 0x80000006)
4361 		return;
4362 	cp = &cpi->cpi_extd[6];
4363 
4364 	if ((i = BITX(cp->cp_ecx, 15, 12)) != 0 &&
4365 	    (size = BITX(cp->cp_ecx, 31, 16)) != 0) {
4366 		uint_t cachesz = size * 1024;
4367 		assoc = amd_afd[i];
4368 
4369 		ASSERT(assoc != -1);
4370 
4371 		if ((ip = l2i->l2i_csz) != NULL)
4372 			*ip = cachesz;
4373 		if ((ip = l2i->l2i_lsz) != NULL)
4374 			*ip = BITX(cp->cp_ecx, 7, 0);
4375 		if ((ip = l2i->l2i_assoc) != NULL)
4376 			*ip = assoc;
4377 		l2i->l2i_ret = cachesz;
4378 	}
4379 }
4380 
4381 int
4382 getl2cacheinfo(cpu_t *cpu, int *csz, int *lsz, int *assoc)
4383 {
4384 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
4385 	struct l2info __l2info, *l2i = &__l2info;
4386 
4387 	l2i->l2i_csz = csz;
4388 	l2i->l2i_lsz = lsz;
4389 	l2i->l2i_assoc = assoc;
4390 	l2i->l2i_ret = -1;
4391 
4392 	switch (x86_which_cacheinfo(cpi)) {
4393 	case X86_VENDOR_Intel:
4394 		intel_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
4395 		break;
4396 	case X86_VENDOR_Cyrix:
4397 		cyrix_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
4398 		break;
4399 	case X86_VENDOR_AMD:
4400 		amd_l2cacheinfo(cpi, l2i);
4401 		break;
4402 	default:
4403 		break;
4404 	}
4405 	return (l2i->l2i_ret);
4406 }
4407 
4408 #if !defined(__xpv)
4409 
4410 uint32_t *
4411 cpuid_mwait_alloc(cpu_t *cpu)
4412 {
4413 	uint32_t	*ret;
4414 	size_t		mwait_size;
4415 
4416 	ASSERT(cpuid_checkpass(CPU, 2));
4417 
4418 	mwait_size = CPU->cpu_m.mcpu_cpi->cpi_mwait.mon_max;
4419 	if (mwait_size == 0)
4420 		return (NULL);
4421 
4422 	/*
4423 	 * kmem_alloc() returns cache line size aligned data for mwait_size
4424 	 * allocations.  mwait_size is currently cache line sized.  Neither
4425 	 * of these implementation details are guarantied to be true in the
4426 	 * future.
4427 	 *
4428 	 * First try allocating mwait_size as kmem_alloc() currently returns
4429 	 * correctly aligned memory.  If kmem_alloc() does not return
4430 	 * mwait_size aligned memory, then use mwait_size ROUNDUP.
4431 	 *
4432 	 * Set cpi_mwait.buf_actual and cpi_mwait.size_actual in case we
4433 	 * decide to free this memory.
4434 	 */
4435 	ret = kmem_zalloc(mwait_size, KM_SLEEP);
4436 	if (ret == (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size)) {
4437 		cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret;
4438 		cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size;
4439 		*ret = MWAIT_RUNNING;
4440 		return (ret);
4441 	} else {
4442 		kmem_free(ret, mwait_size);
4443 		ret = kmem_zalloc(mwait_size * 2, KM_SLEEP);
4444 		cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret;
4445 		cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size * 2;
4446 		ret = (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size);
4447 		*ret = MWAIT_RUNNING;
4448 		return (ret);
4449 	}
4450 }
4451 
4452 void
4453 cpuid_mwait_free(cpu_t *cpu)
4454 {
4455 	if (cpu->cpu_m.mcpu_cpi == NULL) {
4456 		return;
4457 	}
4458 
4459 	if (cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual != NULL &&
4460 	    cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual > 0) {
4461 		kmem_free(cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual,
4462 		    cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual);
4463 	}
4464 
4465 	cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = NULL;
4466 	cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = 0;
4467 }
4468 
4469 void
4470 patch_tsc_read(int flag)
4471 {
4472 	size_t cnt;
4473 
4474 	switch (flag) {
4475 	case X86_NO_TSC:
4476 		cnt = &_no_rdtsc_end - &_no_rdtsc_start;
4477 		(void) memcpy((void *)tsc_read, (void *)&_no_rdtsc_start, cnt);
4478 		break;
4479 	case X86_HAVE_TSCP:
4480 		cnt = &_tscp_end - &_tscp_start;
4481 		(void) memcpy((void *)tsc_read, (void *)&_tscp_start, cnt);
4482 		break;
4483 	case X86_TSC_MFENCE:
4484 		cnt = &_tsc_mfence_end - &_tsc_mfence_start;
4485 		(void) memcpy((void *)tsc_read,
4486 		    (void *)&_tsc_mfence_start, cnt);
4487 		break;
4488 	case X86_TSC_LFENCE:
4489 		cnt = &_tsc_lfence_end - &_tsc_lfence_start;
4490 		(void) memcpy((void *)tsc_read,
4491 		    (void *)&_tsc_lfence_start, cnt);
4492 		break;
4493 	default:
4494 		break;
4495 	}
4496 }
4497 
4498 int
4499 cpuid_deep_cstates_supported(void)
4500 {
4501 	struct cpuid_info *cpi;
4502 	struct cpuid_regs regs;
4503 
4504 	ASSERT(cpuid_checkpass(CPU, 1));
4505 
4506 	cpi = CPU->cpu_m.mcpu_cpi;
4507 
4508 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID))
4509 		return (0);
4510 
4511 	switch (cpi->cpi_vendor) {
4512 	case X86_VENDOR_Intel:
4513 		if (cpi->cpi_xmaxeax < 0x80000007)
4514 			return (0);
4515 
4516 		/*
4517 		 * TSC run at a constant rate in all ACPI C-states?
4518 		 */
4519 		regs.cp_eax = 0x80000007;
4520 		(void) __cpuid_insn(&regs);
4521 		return (regs.cp_edx & CPUID_TSC_CSTATE_INVARIANCE);
4522 
4523 	default:
4524 		return (0);
4525 	}
4526 }
4527 
4528 #endif	/* !__xpv */
4529 
4530 void
4531 post_startup_cpu_fixups(void)
4532 {
4533 #ifndef __xpv
4534 	/*
4535 	 * Some AMD processors support C1E state. Entering this state will
4536 	 * cause the local APIC timer to stop, which we can't deal with at
4537 	 * this time.
4538 	 */
4539 	if (cpuid_getvendor(CPU) == X86_VENDOR_AMD) {
4540 		on_trap_data_t otd;
4541 		uint64_t reg;
4542 
4543 		if (!on_trap(&otd, OT_DATA_ACCESS)) {
4544 			reg = rdmsr(MSR_AMD_INT_PENDING_CMP_HALT);
4545 			/* Disable C1E state if it is enabled by BIOS */
4546 			if ((reg >> AMD_ACTONCMPHALT_SHIFT) &
4547 			    AMD_ACTONCMPHALT_MASK) {
4548 				reg &= ~(AMD_ACTONCMPHALT_MASK <<
4549 				    AMD_ACTONCMPHALT_SHIFT);
4550 				wrmsr(MSR_AMD_INT_PENDING_CMP_HALT, reg);
4551 			}
4552 		}
4553 		no_trap();
4554 	}
4555 #endif	/* !__xpv */
4556 }
4557 
4558 /*
4559  * Setup necessary registers to enable XSAVE feature on this processor.
4560  * This function needs to be called early enough, so that no xsave/xrstor
4561  * ops will execute on the processor before the MSRs are properly set up.
4562  *
4563  * Current implementation has the following assumption:
4564  * - cpuid_pass1() is done, so that X86 features are known.
4565  * - fpu_probe() is done, so that fp_save_mech is chosen.
4566  */
4567 void
4568 xsave_setup_msr(cpu_t *cpu)
4569 {
4570 	ASSERT(fp_save_mech == FP_XSAVE);
4571 	ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
4572 
4573 	/* Enable OSXSAVE in CR4. */
4574 	setcr4(getcr4() | CR4_OSXSAVE);
4575 	/*
4576 	 * Update SW copy of ECX, so that /dev/cpu/self/cpuid will report
4577 	 * correct value.
4578 	 */
4579 	cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_ecx |= CPUID_INTC_ECX_OSXSAVE;
4580 	setup_xfem();
4581 }
4582 
4583 /*
4584  * Starting with the Westmere processor the local
4585  * APIC timer will continue running in all C-states,
4586  * including the deepest C-states.
4587  */
4588 int
4589 cpuid_arat_supported(void)
4590 {
4591 	struct cpuid_info *cpi;
4592 	struct cpuid_regs regs;
4593 
4594 	ASSERT(cpuid_checkpass(CPU, 1));
4595 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
4596 
4597 	cpi = CPU->cpu_m.mcpu_cpi;
4598 
4599 	switch (cpi->cpi_vendor) {
4600 	case X86_VENDOR_Intel:
4601 		/*
4602 		 * Always-running Local APIC Timer is
4603 		 * indicated by CPUID.6.EAX[2].
4604 		 */
4605 		if (cpi->cpi_maxeax >= 6) {
4606 			regs.cp_eax = 6;
4607 			(void) cpuid_insn(NULL, &regs);
4608 			return (regs.cp_eax & CPUID_CSTATE_ARAT);
4609 		} else {
4610 			return (0);
4611 		}
4612 	default:
4613 		return (0);
4614 	}
4615 }
4616 
4617 /*
4618  * Check support for Intel ENERGY_PERF_BIAS feature
4619  */
4620 int
4621 cpuid_iepb_supported(struct cpu *cp)
4622 {
4623 	struct cpuid_info *cpi = cp->cpu_m.mcpu_cpi;
4624 	struct cpuid_regs regs;
4625 
4626 	ASSERT(cpuid_checkpass(cp, 1));
4627 
4628 	if (!(is_x86_feature(x86_featureset, X86FSET_CPUID)) ||
4629 	    !(is_x86_feature(x86_featureset, X86FSET_MSR))) {
4630 		return (0);
4631 	}
4632 
4633 	/*
4634 	 * Intel ENERGY_PERF_BIAS MSR is indicated by
4635 	 * capability bit CPUID.6.ECX.3
4636 	 */
4637 	if ((cpi->cpi_vendor != X86_VENDOR_Intel) || (cpi->cpi_maxeax < 6))
4638 		return (0);
4639 
4640 	regs.cp_eax = 0x6;
4641 	(void) cpuid_insn(NULL, &regs);
4642 	return (regs.cp_ecx & CPUID_EPB_SUPPORT);
4643 }
4644 
4645 /*
4646  * Check support for TSC deadline timer
4647  *
4648  * TSC deadline timer provides a superior software programming
4649  * model over local APIC timer that eliminates "time drifts".
4650  * Instead of specifying a relative time, software specifies an
4651  * absolute time as the target at which the processor should
4652  * generate a timer event.
4653  */
4654 int
4655 cpuid_deadline_tsc_supported(void)
4656 {
4657 	struct cpuid_info *cpi = CPU->cpu_m.mcpu_cpi;
4658 	struct cpuid_regs regs;
4659 
4660 	ASSERT(cpuid_checkpass(CPU, 1));
4661 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
4662 
4663 	switch (cpi->cpi_vendor) {
4664 	case X86_VENDOR_Intel:
4665 		if (cpi->cpi_maxeax >= 1) {
4666 			regs.cp_eax = 1;
4667 			(void) cpuid_insn(NULL, &regs);
4668 			return (regs.cp_ecx & CPUID_DEADLINE_TSC);
4669 		} else {
4670 			return (0);
4671 		}
4672 	default:
4673 		return (0);
4674 	}
4675 }
4676 
4677 #if defined(__amd64) && !defined(__xpv)
4678 /*
4679  * Patch in versions of bcopy for high performance Intel Nhm processors
4680  * and later...
4681  */
4682 void
4683 patch_memops(uint_t vendor)
4684 {
4685 	size_t cnt, i;
4686 	caddr_t to, from;
4687 
4688 	if ((vendor == X86_VENDOR_Intel) &&
4689 	    is_x86_feature(x86_featureset, X86FSET_SSE4_2)) {
4690 		cnt = &bcopy_patch_end - &bcopy_patch_start;
4691 		to = &bcopy_ck_size;
4692 		from = &bcopy_patch_start;
4693 		for (i = 0; i < cnt; i++) {
4694 			*to++ = *from++;
4695 		}
4696 	}
4697 }
4698 #endif  /* __amd64 && !__xpv */
4699 
4700 /*
4701  * This function finds the number of bits to represent the number of cores per
4702  * chip and the number of strands per core for the Intel platforms.
4703  * It re-uses the x2APIC cpuid code of the cpuid_pass2().
4704  */
4705 void
4706 cpuid_get_ext_topo(uint_t vendor, uint_t *core_nbits, uint_t *strand_nbits)
4707 {
4708 	struct cpuid_regs regs;
4709 	struct cpuid_regs *cp = &regs;
4710 
4711 	if (vendor != X86_VENDOR_Intel) {
4712 		return;
4713 	}
4714 
4715 	/* if the cpuid level is 0xB, extended topo is available. */
4716 	cp->cp_eax = 0;
4717 	if (__cpuid_insn(cp) >= 0xB) {
4718 
4719 		cp->cp_eax = 0xB;
4720 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
4721 		(void) __cpuid_insn(cp);
4722 
4723 		/*
4724 		 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
4725 		 * indicates that the extended topology enumeration leaf is
4726 		 * available.
4727 		 */
4728 		if (cp->cp_ebx) {
4729 			uint_t coreid_shift = 0;
4730 			uint_t chipid_shift = 0;
4731 			uint_t i;
4732 			uint_t level;
4733 
4734 			for (i = 0; i < CPI_FNB_ECX_MAX; i++) {
4735 				cp->cp_eax = 0xB;
4736 				cp->cp_ecx = i;
4737 
4738 				(void) __cpuid_insn(cp);
4739 				level = CPI_CPU_LEVEL_TYPE(cp);
4740 
4741 				if (level == 1) {
4742 					/*
4743 					 * Thread level processor topology
4744 					 * Number of bits shift right APIC ID
4745 					 * to get the coreid.
4746 					 */
4747 					coreid_shift = BITX(cp->cp_eax, 4, 0);
4748 				} else if (level == 2) {
4749 					/*
4750 					 * Core level processor topology
4751 					 * Number of bits shift right APIC ID
4752 					 * to get the chipid.
4753 					 */
4754 					chipid_shift = BITX(cp->cp_eax, 4, 0);
4755 				}
4756 			}
4757 
4758 			if (coreid_shift > 0 && chipid_shift > coreid_shift) {
4759 				*strand_nbits = coreid_shift;
4760 				*core_nbits = chipid_shift - coreid_shift;
4761 			}
4762 		}
4763 	}
4764 }
4765