xref: /titanic_44/usr/src/uts/i86pc/os/cpuid.c (revision a05fd0c9b9aa46cf66ddea7617e56facdf1f4aaf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011 by Delphix. All rights reserved.
24  * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
25  * Copyright 2014 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
26  */
27 /*
28  * Copyright (c) 2010, Intel Corporation.
29  * All rights reserved.
30  */
31 /*
32  * Portions Copyright 2009 Advanced Micro Devices, Inc.
33  */
34 /*
35  * Copyright (c) 2014, Joyent, Inc. All rights reserved.
36  */
37 /*
38  * Various routines to handle identification
39  * and classification of x86 processors.
40  */
41 
42 #include <sys/types.h>
43 #include <sys/archsystm.h>
44 #include <sys/x86_archext.h>
45 #include <sys/kmem.h>
46 #include <sys/systm.h>
47 #include <sys/cmn_err.h>
48 #include <sys/sunddi.h>
49 #include <sys/sunndi.h>
50 #include <sys/cpuvar.h>
51 #include <sys/processor.h>
52 #include <sys/sysmacros.h>
53 #include <sys/pg.h>
54 #include <sys/fp.h>
55 #include <sys/controlregs.h>
56 #include <sys/bitmap.h>
57 #include <sys/auxv_386.h>
58 #include <sys/memnode.h>
59 #include <sys/pci_cfgspace.h>
60 
61 #ifdef __xpv
62 #include <sys/hypervisor.h>
63 #else
64 #include <sys/ontrap.h>
65 #endif
66 
67 /*
68  * Pass 0 of cpuid feature analysis happens in locore. It contains special code
69  * to recognize Cyrix processors that are not cpuid-compliant, and to deal with
70  * them accordingly. For most modern processors, feature detection occurs here
71  * in pass 1.
72  *
73  * Pass 1 of cpuid feature analysis happens just at the beginning of mlsetup()
74  * for the boot CPU and does the basic analysis that the early kernel needs.
75  * x86_featureset is set based on the return value of cpuid_pass1() of the boot
76  * CPU.
77  *
78  * Pass 1 includes:
79  *
80  *	o Determining vendor/model/family/stepping and setting x86_type and
81  *	  x86_vendor accordingly.
82  *	o Processing the feature flags returned by the cpuid instruction while
83  *	  applying any workarounds or tricks for the specific processor.
84  *	o Mapping the feature flags into Solaris feature bits (X86_*).
85  *	o Processing extended feature flags if supported by the processor,
86  *	  again while applying specific processor knowledge.
87  *	o Determining the CMT characteristics of the system.
88  *
89  * Pass 1 is done on non-boot CPUs during their initialization and the results
90  * are used only as a meager attempt at ensuring that all processors within the
91  * system support the same features.
92  *
93  * Pass 2 of cpuid feature analysis happens just at the beginning
94  * of startup().  It just copies in and corrects the remainder
95  * of the cpuid data we depend on: standard cpuid functions that we didn't
96  * need for pass1 feature analysis, and extended cpuid functions beyond the
97  * simple feature processing done in pass1.
98  *
99  * Pass 3 of cpuid analysis is invoked after basic kernel services; in
100  * particular kernel memory allocation has been made available. It creates a
101  * readable brand string based on the data collected in the first two passes.
102  *
103  * Pass 4 of cpuid analysis is invoked after post_startup() when all
104  * the support infrastructure for various hardware features has been
105  * initialized. It determines which processor features will be reported
106  * to userland via the aux vector.
107  *
108  * All passes are executed on all CPUs, but only the boot CPU determines what
109  * features the kernel will use.
110  *
111  * Much of the worst junk in this file is for the support of processors
112  * that didn't really implement the cpuid instruction properly.
113  *
114  * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon,
115  * the pass numbers.  Accordingly, changes to the pass code may require changes
116  * to the accessor code.
117  */
118 
119 uint_t x86_vendor = X86_VENDOR_IntelClone;
120 uint_t x86_type = X86_TYPE_OTHER;
121 uint_t x86_clflush_size = 0;
122 
123 uint_t pentiumpro_bug4046376;
124 
125 uchar_t x86_featureset[BT_SIZEOFMAP(NUM_X86_FEATURES)];
126 
127 static char *x86_feature_names[NUM_X86_FEATURES] = {
128 	"lgpg",
129 	"tsc",
130 	"msr",
131 	"mtrr",
132 	"pge",
133 	"de",
134 	"cmov",
135 	"mmx",
136 	"mca",
137 	"pae",
138 	"cv8",
139 	"pat",
140 	"sep",
141 	"sse",
142 	"sse2",
143 	"htt",
144 	"asysc",
145 	"nx",
146 	"sse3",
147 	"cx16",
148 	"cmp",
149 	"tscp",
150 	"mwait",
151 	"sse4a",
152 	"cpuid",
153 	"ssse3",
154 	"sse4_1",
155 	"sse4_2",
156 	"1gpg",
157 	"clfsh",
158 	"64",
159 	"aes",
160 	"pclmulqdq",
161 	"xsave",
162 	"avx",
163 	"vmx",
164 	"svm",
165 	"topoext",
166 	"f16c",
167 	"rdrand",
168 	"x2apic",
169 };
170 
171 boolean_t
172 is_x86_feature(void *featureset, uint_t feature)
173 {
174 	ASSERT(feature < NUM_X86_FEATURES);
175 	return (BT_TEST((ulong_t *)featureset, feature));
176 }
177 
178 void
179 add_x86_feature(void *featureset, uint_t feature)
180 {
181 	ASSERT(feature < NUM_X86_FEATURES);
182 	BT_SET((ulong_t *)featureset, feature);
183 }
184 
185 void
186 remove_x86_feature(void *featureset, uint_t feature)
187 {
188 	ASSERT(feature < NUM_X86_FEATURES);
189 	BT_CLEAR((ulong_t *)featureset, feature);
190 }
191 
192 boolean_t
193 compare_x86_featureset(void *setA, void *setB)
194 {
195 	/*
196 	 * We assume that the unused bits of the bitmap are always zero.
197 	 */
198 	if (memcmp(setA, setB, BT_SIZEOFMAP(NUM_X86_FEATURES)) == 0) {
199 		return (B_TRUE);
200 	} else {
201 		return (B_FALSE);
202 	}
203 }
204 
205 void
206 print_x86_featureset(void *featureset)
207 {
208 	uint_t i;
209 
210 	for (i = 0; i < NUM_X86_FEATURES; i++) {
211 		if (is_x86_feature(featureset, i)) {
212 			cmn_err(CE_CONT, "?x86_feature: %s\n",
213 			    x86_feature_names[i]);
214 		}
215 	}
216 }
217 
218 static size_t xsave_state_size = 0;
219 uint64_t xsave_bv_all = (XFEATURE_LEGACY_FP | XFEATURE_SSE);
220 boolean_t xsave_force_disable = B_FALSE;
221 
222 /*
223  * This is set to platform type we are running on.
224  */
225 static int platform_type = -1;
226 
227 #if !defined(__xpv)
228 /*
229  * Variable to patch if hypervisor platform detection needs to be
230  * disabled (e.g. platform_type will always be HW_NATIVE if this is 0).
231  */
232 int enable_platform_detection = 1;
233 #endif
234 
235 /*
236  * monitor/mwait info.
237  *
238  * size_actual and buf_actual are the real address and size allocated to get
239  * proper mwait_buf alignement.  buf_actual and size_actual should be passed
240  * to kmem_free().  Currently kmem_alloc() and mwait happen to both use
241  * processor cache-line alignment, but this is not guarantied in the furture.
242  */
243 struct mwait_info {
244 	size_t		mon_min;	/* min size to avoid missed wakeups */
245 	size_t		mon_max;	/* size to avoid false wakeups */
246 	size_t		size_actual;	/* size actually allocated */
247 	void		*buf_actual;	/* memory actually allocated */
248 	uint32_t	support;	/* processor support of monitor/mwait */
249 };
250 
251 /*
252  * xsave/xrestor info.
253  *
254  * This structure contains HW feature bits and size of the xsave save area.
255  * Note: the kernel will use the maximum size required for all hardware
256  * features. It is not optimize for potential memory savings if features at
257  * the end of the save area are not enabled.
258  */
259 struct xsave_info {
260 	uint32_t	xsav_hw_features_low;   /* Supported HW features */
261 	uint32_t	xsav_hw_features_high;  /* Supported HW features */
262 	size_t		xsav_max_size;  /* max size save area for HW features */
263 	size_t		ymm_size;	/* AVX: size of ymm save area */
264 	size_t		ymm_offset;	/* AVX: offset for ymm save area */
265 };
266 
267 
268 /*
269  * These constants determine how many of the elements of the
270  * cpuid we cache in the cpuid_info data structure; the
271  * remaining elements are accessible via the cpuid instruction.
272  */
273 
274 #define	NMAX_CPI_STD	6		/* eax = 0 .. 5 */
275 #define	NMAX_CPI_EXTD	0x1f		/* eax = 0x80000000 .. 0x8000001e */
276 
277 /*
278  * Some terminology needs to be explained:
279  *  - Socket: Something that can be plugged into a motherboard.
280  *  - Package: Same as socket
281  *  - Chip: Same as socket. Note that AMD's documentation uses term "chip"
282  *    differently: there, chip is the same as processor node (below)
283  *  - Processor node: Some AMD processors have more than one
284  *    "subprocessor" embedded in a package. These subprocessors (nodes)
285  *    are fully-functional processors themselves with cores, caches,
286  *    memory controllers, PCI configuration spaces. They are connected
287  *    inside the package with Hypertransport links. On single-node
288  *    processors, processor node is equivalent to chip/socket/package.
289  *  - Compute Unit: Some AMD processors pair cores in "compute units" that
290  *    share the FPU and the I$ and L2 caches.
291  */
292 
293 struct cpuid_info {
294 	uint_t cpi_pass;		/* last pass completed */
295 	/*
296 	 * standard function information
297 	 */
298 	uint_t cpi_maxeax;		/* fn 0: %eax */
299 	char cpi_vendorstr[13];		/* fn 0: %ebx:%ecx:%edx */
300 	uint_t cpi_vendor;		/* enum of cpi_vendorstr */
301 
302 	uint_t cpi_family;		/* fn 1: extended family */
303 	uint_t cpi_model;		/* fn 1: extended model */
304 	uint_t cpi_step;		/* fn 1: stepping */
305 	chipid_t cpi_chipid;		/* fn 1: %ebx:  Intel: chip # */
306 					/*		AMD: package/socket # */
307 	uint_t cpi_brandid;		/* fn 1: %ebx: brand ID */
308 	int cpi_clogid;			/* fn 1: %ebx: thread # */
309 	uint_t cpi_ncpu_per_chip;	/* fn 1: %ebx: logical cpu count */
310 	uint8_t cpi_cacheinfo[16];	/* fn 2: intel-style cache desc */
311 	uint_t cpi_ncache;		/* fn 2: number of elements */
312 	uint_t cpi_ncpu_shr_last_cache;	/* fn 4: %eax: ncpus sharing cache */
313 	id_t cpi_last_lvl_cacheid;	/* fn 4: %eax: derived cache id */
314 	uint_t cpi_std_4_size;		/* fn 4: number of fn 4 elements */
315 	struct cpuid_regs **cpi_std_4;	/* fn 4: %ecx == 0 .. fn4_size */
316 	struct cpuid_regs cpi_std[NMAX_CPI_STD];	/* 0 .. 5 */
317 	/*
318 	 * extended function information
319 	 */
320 	uint_t cpi_xmaxeax;		/* fn 0x80000000: %eax */
321 	char cpi_brandstr[49];		/* fn 0x8000000[234] */
322 	uint8_t cpi_pabits;		/* fn 0x80000006: %eax */
323 	uint8_t	cpi_vabits;		/* fn 0x80000006: %eax */
324 	struct	cpuid_regs cpi_extd[NMAX_CPI_EXTD];	/* 0x800000XX */
325 
326 	id_t cpi_coreid;		/* same coreid => strands share core */
327 	int cpi_pkgcoreid;		/* core number within single package */
328 	uint_t cpi_ncore_per_chip;	/* AMD: fn 0x80000008: %ecx[7-0] */
329 					/* Intel: fn 4: %eax[31-26] */
330 	/*
331 	 * supported feature information
332 	 */
333 	uint32_t cpi_support[5];
334 #define	STD_EDX_FEATURES	0
335 #define	AMD_EDX_FEATURES	1
336 #define	TM_EDX_FEATURES		2
337 #define	STD_ECX_FEATURES	3
338 #define	AMD_ECX_FEATURES	4
339 	/*
340 	 * Synthesized information, where known.
341 	 */
342 	uint32_t cpi_chiprev;		/* See X86_CHIPREV_* in x86_archext.h */
343 	const char *cpi_chiprevstr;	/* May be NULL if chiprev unknown */
344 	uint32_t cpi_socket;		/* Chip package/socket type */
345 
346 	struct mwait_info cpi_mwait;	/* fn 5: monitor/mwait info */
347 	uint32_t cpi_apicid;
348 	uint_t cpi_procnodeid;		/* AMD: nodeID on HT, Intel: chipid */
349 	uint_t cpi_procnodes_per_pkg;	/* AMD: # of nodes in the package */
350 					/* Intel: 1 */
351 	uint_t cpi_compunitid;		/* AMD: ComputeUnit ID, Intel: coreid */
352 	uint_t cpi_cores_per_compunit;	/* AMD: # of cores in the ComputeUnit */
353 
354 	struct xsave_info cpi_xsave;	/* fn D: xsave/xrestor info */
355 };
356 
357 
358 static struct cpuid_info cpuid_info0;
359 
360 /*
361  * These bit fields are defined by the Intel Application Note AP-485
362  * "Intel Processor Identification and the CPUID Instruction"
363  */
364 #define	CPI_FAMILY_XTD(cpi)	BITX((cpi)->cpi_std[1].cp_eax, 27, 20)
365 #define	CPI_MODEL_XTD(cpi)	BITX((cpi)->cpi_std[1].cp_eax, 19, 16)
366 #define	CPI_TYPE(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 13, 12)
367 #define	CPI_FAMILY(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 11, 8)
368 #define	CPI_STEP(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 3, 0)
369 #define	CPI_MODEL(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 7, 4)
370 
371 #define	CPI_FEATURES_EDX(cpi)		((cpi)->cpi_std[1].cp_edx)
372 #define	CPI_FEATURES_ECX(cpi)		((cpi)->cpi_std[1].cp_ecx)
373 #define	CPI_FEATURES_XTD_EDX(cpi)	((cpi)->cpi_extd[1].cp_edx)
374 #define	CPI_FEATURES_XTD_ECX(cpi)	((cpi)->cpi_extd[1].cp_ecx)
375 
376 #define	CPI_BRANDID(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 7, 0)
377 #define	CPI_CHUNKS(cpi)		BITX((cpi)->cpi_std[1].cp_ebx, 15, 7)
378 #define	CPI_CPU_COUNT(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 23, 16)
379 #define	CPI_APIC_ID(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 31, 24)
380 
381 #define	CPI_MAXEAX_MAX		0x100		/* sanity control */
382 #define	CPI_XMAXEAX_MAX		0x80000100
383 #define	CPI_FN4_ECX_MAX		0x20		/* sanity: max fn 4 levels */
384 #define	CPI_FNB_ECX_MAX		0x20		/* sanity: max fn B levels */
385 
386 /*
387  * Function 4 (Deterministic Cache Parameters) macros
388  * Defined by Intel Application Note AP-485
389  */
390 #define	CPI_NUM_CORES(regs)		BITX((regs)->cp_eax, 31, 26)
391 #define	CPI_NTHR_SHR_CACHE(regs)	BITX((regs)->cp_eax, 25, 14)
392 #define	CPI_FULL_ASSOC_CACHE(regs)	BITX((regs)->cp_eax, 9, 9)
393 #define	CPI_SELF_INIT_CACHE(regs)	BITX((regs)->cp_eax, 8, 8)
394 #define	CPI_CACHE_LVL(regs)		BITX((regs)->cp_eax, 7, 5)
395 #define	CPI_CACHE_TYPE(regs)		BITX((regs)->cp_eax, 4, 0)
396 #define	CPI_CPU_LEVEL_TYPE(regs)	BITX((regs)->cp_ecx, 15, 8)
397 
398 #define	CPI_CACHE_WAYS(regs)		BITX((regs)->cp_ebx, 31, 22)
399 #define	CPI_CACHE_PARTS(regs)		BITX((regs)->cp_ebx, 21, 12)
400 #define	CPI_CACHE_COH_LN_SZ(regs)	BITX((regs)->cp_ebx, 11, 0)
401 
402 #define	CPI_CACHE_SETS(regs)		BITX((regs)->cp_ecx, 31, 0)
403 
404 #define	CPI_PREFCH_STRIDE(regs)		BITX((regs)->cp_edx, 9, 0)
405 
406 
407 /*
408  * A couple of shorthand macros to identify "later" P6-family chips
409  * like the Pentium M and Core.  First, the "older" P6-based stuff
410  * (loosely defined as "pre-Pentium-4"):
411  * P6, PII, Mobile PII, PII Xeon, PIII, Mobile PIII, PIII Xeon
412  */
413 
414 #define	IS_LEGACY_P6(cpi) (			\
415 	cpi->cpi_family == 6 && 		\
416 		(cpi->cpi_model == 1 ||		\
417 		cpi->cpi_model == 3 ||		\
418 		cpi->cpi_model == 5 ||		\
419 		cpi->cpi_model == 6 ||		\
420 		cpi->cpi_model == 7 ||		\
421 		cpi->cpi_model == 8 ||		\
422 		cpi->cpi_model == 0xA ||	\
423 		cpi->cpi_model == 0xB)		\
424 )
425 
426 /* A "new F6" is everything with family 6 that's not the above */
427 #define	IS_NEW_F6(cpi) ((cpi->cpi_family == 6) && !IS_LEGACY_P6(cpi))
428 
429 /* Extended family/model support */
430 #define	IS_EXTENDED_MODEL_INTEL(cpi) (cpi->cpi_family == 0x6 || \
431 	cpi->cpi_family >= 0xf)
432 
433 /*
434  * Info for monitor/mwait idle loop.
435  *
436  * See cpuid section of "Intel 64 and IA-32 Architectures Software Developer's
437  * Manual Volume 2A: Instruction Set Reference, A-M" #25366-022US, November
438  * 2006.
439  * See MONITOR/MWAIT section of "AMD64 Architecture Programmer's Manual
440  * Documentation Updates" #33633, Rev 2.05, December 2006.
441  */
442 #define	MWAIT_SUPPORT		(0x00000001)	/* mwait supported */
443 #define	MWAIT_EXTENSIONS	(0x00000002)	/* extenstion supported */
444 #define	MWAIT_ECX_INT_ENABLE	(0x00000004)	/* ecx 1 extension supported */
445 #define	MWAIT_SUPPORTED(cpi)	((cpi)->cpi_std[1].cp_ecx & CPUID_INTC_ECX_MON)
446 #define	MWAIT_INT_ENABLE(cpi)	((cpi)->cpi_std[5].cp_ecx & 0x2)
447 #define	MWAIT_EXTENSION(cpi)	((cpi)->cpi_std[5].cp_ecx & 0x1)
448 #define	MWAIT_SIZE_MIN(cpi)	BITX((cpi)->cpi_std[5].cp_eax, 15, 0)
449 #define	MWAIT_SIZE_MAX(cpi)	BITX((cpi)->cpi_std[5].cp_ebx, 15, 0)
450 /*
451  * Number of sub-cstates for a given c-state.
452  */
453 #define	MWAIT_NUM_SUBC_STATES(cpi, c_state)			\
454 	BITX((cpi)->cpi_std[5].cp_edx, c_state + 3, c_state)
455 
456 /*
457  * XSAVE leaf 0xD enumeration
458  */
459 #define	CPUID_LEAFD_2_YMM_OFFSET	576
460 #define	CPUID_LEAFD_2_YMM_SIZE		256
461 
462 /*
463  * Functions we consune from cpuid_subr.c;  don't publish these in a header
464  * file to try and keep people using the expected cpuid_* interfaces.
465  */
466 extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t);
467 extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t);
468 extern uint32_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t);
469 extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t);
470 extern uint_t _cpuid_vendorstr_to_vendorcode(char *);
471 
472 /*
473  * Apply up various platform-dependent restrictions where the
474  * underlying platform restrictions mean the CPU can be marked
475  * as less capable than its cpuid instruction would imply.
476  */
477 #if defined(__xpv)
478 static void
479 platform_cpuid_mangle(uint_t vendor, uint32_t eax, struct cpuid_regs *cp)
480 {
481 	switch (eax) {
482 	case 1: {
483 		uint32_t mcamask = DOMAIN_IS_INITDOMAIN(xen_info) ?
484 		    0 : CPUID_INTC_EDX_MCA;
485 		cp->cp_edx &=
486 		    ~(mcamask |
487 		    CPUID_INTC_EDX_PSE |
488 		    CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE |
489 		    CPUID_INTC_EDX_SEP | CPUID_INTC_EDX_MTRR |
490 		    CPUID_INTC_EDX_PGE | CPUID_INTC_EDX_PAT |
491 		    CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP |
492 		    CPUID_INTC_EDX_PSE36 | CPUID_INTC_EDX_HTT);
493 		break;
494 	}
495 
496 	case 0x80000001:
497 		cp->cp_edx &=
498 		    ~(CPUID_AMD_EDX_PSE |
499 		    CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE |
500 		    CPUID_AMD_EDX_MTRR | CPUID_AMD_EDX_PGE |
501 		    CPUID_AMD_EDX_PAT | CPUID_AMD_EDX_PSE36 |
502 		    CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP |
503 		    CPUID_AMD_EDX_TSCP);
504 		cp->cp_ecx &= ~CPUID_AMD_ECX_CMP_LGCY;
505 		break;
506 	default:
507 		break;
508 	}
509 
510 	switch (vendor) {
511 	case X86_VENDOR_Intel:
512 		switch (eax) {
513 		case 4:
514 			/*
515 			 * Zero out the (ncores-per-chip - 1) field
516 			 */
517 			cp->cp_eax &= 0x03fffffff;
518 			break;
519 		default:
520 			break;
521 		}
522 		break;
523 	case X86_VENDOR_AMD:
524 		switch (eax) {
525 
526 		case 0x80000001:
527 			cp->cp_ecx &= ~CPUID_AMD_ECX_CR8D;
528 			break;
529 
530 		case 0x80000008:
531 			/*
532 			 * Zero out the (ncores-per-chip - 1) field
533 			 */
534 			cp->cp_ecx &= 0xffffff00;
535 			break;
536 		default:
537 			break;
538 		}
539 		break;
540 	default:
541 		break;
542 	}
543 }
544 #else
545 #define	platform_cpuid_mangle(vendor, eax, cp)	/* nothing */
546 #endif
547 
548 /*
549  *  Some undocumented ways of patching the results of the cpuid
550  *  instruction to permit running Solaris 10 on future cpus that
551  *  we don't currently support.  Could be set to non-zero values
552  *  via settings in eeprom.
553  */
554 
555 uint32_t cpuid_feature_ecx_include;
556 uint32_t cpuid_feature_ecx_exclude;
557 uint32_t cpuid_feature_edx_include;
558 uint32_t cpuid_feature_edx_exclude;
559 
560 /*
561  * Allocate space for mcpu_cpi in the machcpu structure for all non-boot CPUs.
562  */
563 void
564 cpuid_alloc_space(cpu_t *cpu)
565 {
566 	/*
567 	 * By convention, cpu0 is the boot cpu, which is set up
568 	 * before memory allocation is available.  All other cpus get
569 	 * their cpuid_info struct allocated here.
570 	 */
571 	ASSERT(cpu->cpu_id != 0);
572 	ASSERT(cpu->cpu_m.mcpu_cpi == NULL);
573 	cpu->cpu_m.mcpu_cpi =
574 	    kmem_zalloc(sizeof (*cpu->cpu_m.mcpu_cpi), KM_SLEEP);
575 }
576 
577 void
578 cpuid_free_space(cpu_t *cpu)
579 {
580 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
581 	int i;
582 
583 	ASSERT(cpi != NULL);
584 	ASSERT(cpi != &cpuid_info0);
585 
586 	/*
587 	 * Free up any function 4 related dynamic storage
588 	 */
589 	for (i = 1; i < cpi->cpi_std_4_size; i++)
590 		kmem_free(cpi->cpi_std_4[i], sizeof (struct cpuid_regs));
591 	if (cpi->cpi_std_4_size > 0)
592 		kmem_free(cpi->cpi_std_4,
593 		    cpi->cpi_std_4_size * sizeof (struct cpuid_regs *));
594 
595 	kmem_free(cpi, sizeof (*cpi));
596 	cpu->cpu_m.mcpu_cpi = NULL;
597 }
598 
599 #if !defined(__xpv)
600 /*
601  * Determine the type of the underlying platform. This is used to customize
602  * initialization of various subsystems (e.g. TSC). determine_platform() must
603  * only ever be called once to prevent two processors from seeing different
604  * values of platform_type. Must be called before cpuid_pass1(), the earliest
605  * consumer to execute (uses _cpuid_chiprev --> synth_amd_info --> get_hwenv).
606  */
607 void
608 determine_platform(void)
609 {
610 	struct cpuid_regs cp;
611 	uint32_t base;
612 	uint32_t regs[4];
613 	char *hvstr = (char *)regs;
614 
615 	ASSERT(platform_type == -1);
616 
617 	platform_type = HW_NATIVE;
618 
619 	if (!enable_platform_detection)
620 		return;
621 
622 	/*
623 	 * If Hypervisor CPUID bit is set, try to determine hypervisor
624 	 * vendor signature, and set platform type accordingly.
625 	 *
626 	 * References:
627 	 * http://lkml.org/lkml/2008/10/1/246
628 	 * http://kb.vmware.com/kb/1009458
629 	 */
630 	cp.cp_eax = 0x1;
631 	(void) __cpuid_insn(&cp);
632 	if ((cp.cp_ecx & CPUID_INTC_ECX_HV) != 0) {
633 		cp.cp_eax = 0x40000000;
634 		(void) __cpuid_insn(&cp);
635 		regs[0] = cp.cp_ebx;
636 		regs[1] = cp.cp_ecx;
637 		regs[2] = cp.cp_edx;
638 		regs[3] = 0;
639 		if (strcmp(hvstr, HVSIG_XEN_HVM) == 0) {
640 			platform_type = HW_XEN_HVM;
641 			return;
642 		}
643 		if (strcmp(hvstr, HVSIG_VMWARE) == 0) {
644 			platform_type = HW_VMWARE;
645 			return;
646 		}
647 		if (strcmp(hvstr, HVSIG_KVM) == 0) {
648 			platform_type = HW_KVM;
649 			return;
650 		}
651 		if (strcmp(hvstr, HVSIG_MICROSOFT) == 0)
652 			platform_type = HW_MICROSOFT;
653 	} else {
654 		/*
655 		 * Check older VMware hardware versions. VMware hypervisor is
656 		 * detected by performing an IN operation to VMware hypervisor
657 		 * port and checking that value returned in %ebx is VMware
658 		 * hypervisor magic value.
659 		 *
660 		 * References: http://kb.vmware.com/kb/1009458
661 		 */
662 		vmware_port(VMWARE_HVCMD_GETVERSION, regs);
663 		if (regs[1] == VMWARE_HVMAGIC) {
664 			platform_type = HW_VMWARE;
665 			return;
666 		}
667 	}
668 
669 	/*
670 	 * Check Xen hypervisor. In a fully virtualized domain,
671 	 * Xen's pseudo-cpuid function returns a string representing the
672 	 * Xen signature in %ebx, %ecx, and %edx. %eax contains the maximum
673 	 * supported cpuid function. We need at least a (base + 2) leaf value
674 	 * to do what we want to do. Try different base values, since the
675 	 * hypervisor might use a different one depending on whether Hyper-V
676 	 * emulation is switched on by default or not.
677 	 */
678 	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
679 		cp.cp_eax = base;
680 		(void) __cpuid_insn(&cp);
681 		regs[0] = cp.cp_ebx;
682 		regs[1] = cp.cp_ecx;
683 		regs[2] = cp.cp_edx;
684 		regs[3] = 0;
685 		if (strcmp(hvstr, HVSIG_XEN_HVM) == 0 &&
686 		    cp.cp_eax >= (base + 2)) {
687 			platform_type &= ~HW_NATIVE;
688 			platform_type |= HW_XEN_HVM;
689 			return;
690 		}
691 	}
692 }
693 
694 int
695 get_hwenv(void)
696 {
697 	ASSERT(platform_type != -1);
698 	return (platform_type);
699 }
700 
701 int
702 is_controldom(void)
703 {
704 	return (0);
705 }
706 
707 #else
708 
709 int
710 get_hwenv(void)
711 {
712 	return (HW_XEN_PV);
713 }
714 
715 int
716 is_controldom(void)
717 {
718 	return (DOMAIN_IS_INITDOMAIN(xen_info));
719 }
720 
721 #endif	/* __xpv */
722 
723 static void
724 cpuid_intel_getids(cpu_t *cpu, void *feature)
725 {
726 	uint_t i;
727 	uint_t chipid_shift = 0;
728 	uint_t coreid_shift = 0;
729 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
730 
731 	for (i = 1; i < cpi->cpi_ncpu_per_chip; i <<= 1)
732 		chipid_shift++;
733 
734 	cpi->cpi_chipid = cpi->cpi_apicid >> chipid_shift;
735 	cpi->cpi_clogid = cpi->cpi_apicid & ((1 << chipid_shift) - 1);
736 
737 	if (is_x86_feature(feature, X86FSET_CMP)) {
738 		/*
739 		 * Multi-core (and possibly multi-threaded)
740 		 * processors.
741 		 */
742 		uint_t ncpu_per_core;
743 		if (cpi->cpi_ncore_per_chip == 1)
744 			ncpu_per_core = cpi->cpi_ncpu_per_chip;
745 		else if (cpi->cpi_ncore_per_chip > 1)
746 			ncpu_per_core = cpi->cpi_ncpu_per_chip /
747 			    cpi->cpi_ncore_per_chip;
748 		/*
749 		 * 8bit APIC IDs on dual core Pentiums
750 		 * look like this:
751 		 *
752 		 * +-----------------------+------+------+
753 		 * | Physical Package ID   |  MC  |  HT  |
754 		 * +-----------------------+------+------+
755 		 * <------- chipid -------->
756 		 * <------- coreid --------------->
757 		 *			   <--- clogid -->
758 		 *			   <------>
759 		 *			   pkgcoreid
760 		 *
761 		 * Where the number of bits necessary to
762 		 * represent MC and HT fields together equals
763 		 * to the minimum number of bits necessary to
764 		 * store the value of cpi->cpi_ncpu_per_chip.
765 		 * Of those bits, the MC part uses the number
766 		 * of bits necessary to store the value of
767 		 * cpi->cpi_ncore_per_chip.
768 		 */
769 		for (i = 1; i < ncpu_per_core; i <<= 1)
770 			coreid_shift++;
771 		cpi->cpi_coreid = cpi->cpi_apicid >> coreid_shift;
772 		cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
773 	} else if (is_x86_feature(feature, X86FSET_HTT)) {
774 		/*
775 		 * Single-core multi-threaded processors.
776 		 */
777 		cpi->cpi_coreid = cpi->cpi_chipid;
778 		cpi->cpi_pkgcoreid = 0;
779 	}
780 	cpi->cpi_procnodeid = cpi->cpi_chipid;
781 	cpi->cpi_compunitid = cpi->cpi_coreid;
782 }
783 
784 static void
785 cpuid_amd_getids(cpu_t *cpu)
786 {
787 	int i, first_half, coreidsz;
788 	uint32_t nb_caps_reg;
789 	uint_t node2_1;
790 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
791 	struct cpuid_regs *cp;
792 
793 	/*
794 	 * AMD CMP chips currently have a single thread per core.
795 	 *
796 	 * Since no two cpus share a core we must assign a distinct coreid
797 	 * per cpu, and we do this by using the cpu_id.  This scheme does not,
798 	 * however, guarantee that sibling cores of a chip will have sequential
799 	 * coreids starting at a multiple of the number of cores per chip -
800 	 * that is usually the case, but if the ACPI MADT table is presented
801 	 * in a different order then we need to perform a few more gymnastics
802 	 * for the pkgcoreid.
803 	 *
804 	 * All processors in the system have the same number of enabled
805 	 * cores. Cores within a processor are always numbered sequentially
806 	 * from 0 regardless of how many or which are disabled, and there
807 	 * is no way for operating system to discover the real core id when some
808 	 * are disabled.
809 	 *
810 	 * In family 0x15, the cores come in pairs called compute units. They
811 	 * share I$ and L2 caches and the FPU. Enumeration of this feature is
812 	 * simplified by the new topology extensions CPUID leaf, indicated by
813 	 * the X86 feature X86FSET_TOPOEXT.
814 	 */
815 
816 	cpi->cpi_coreid = cpu->cpu_id;
817 	cpi->cpi_compunitid = cpu->cpu_id;
818 
819 	if (cpi->cpi_xmaxeax >= 0x80000008) {
820 
821 		coreidsz = BITX((cpi)->cpi_extd[8].cp_ecx, 15, 12);
822 
823 		/*
824 		 * In AMD parlance chip is really a node while Solaris
825 		 * sees chip as equivalent to socket/package.
826 		 */
827 		cpi->cpi_ncore_per_chip =
828 		    BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1;
829 		if (coreidsz == 0) {
830 			/* Use legacy method */
831 			for (i = 1; i < cpi->cpi_ncore_per_chip; i <<= 1)
832 				coreidsz++;
833 			if (coreidsz == 0)
834 				coreidsz = 1;
835 		}
836 	} else {
837 		/* Assume single-core part */
838 		cpi->cpi_ncore_per_chip = 1;
839 		coreidsz = 1;
840 	}
841 
842 	cpi->cpi_clogid = cpi->cpi_pkgcoreid =
843 	    cpi->cpi_apicid & ((1<<coreidsz) - 1);
844 	cpi->cpi_ncpu_per_chip = cpi->cpi_ncore_per_chip;
845 
846 	/* Get node ID, compute unit ID */
847 	if (is_x86_feature(x86_featureset, X86FSET_TOPOEXT) &&
848 	    cpi->cpi_xmaxeax >= 0x8000001e) {
849 		cp = &cpi->cpi_extd[0x1e];
850 		cp->cp_eax = 0x8000001e;
851 		(void) __cpuid_insn(cp);
852 
853 		cpi->cpi_procnodes_per_pkg = BITX(cp->cp_ecx, 10, 8) + 1;
854 		cpi->cpi_procnodeid = BITX(cp->cp_ecx, 7, 0);
855 		cpi->cpi_cores_per_compunit = BITX(cp->cp_ebx, 15, 8) + 1;
856 		cpi->cpi_compunitid = BITX(cp->cp_ebx, 7, 0)
857 		    + (cpi->cpi_ncore_per_chip / cpi->cpi_cores_per_compunit)
858 		    * (cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg);
859 	} else if (cpi->cpi_family == 0xf || cpi->cpi_family >= 0x11) {
860 		cpi->cpi_procnodeid = (cpi->cpi_apicid >> coreidsz) & 7;
861 	} else if (cpi->cpi_family == 0x10) {
862 		/*
863 		 * See if we are a multi-node processor.
864 		 * All processors in the system have the same number of nodes
865 		 */
866 		nb_caps_reg =  pci_getl_func(0, 24, 3, 0xe8);
867 		if ((cpi->cpi_model < 8) || BITX(nb_caps_reg, 29, 29) == 0) {
868 			/* Single-node */
869 			cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 5,
870 			    coreidsz);
871 		} else {
872 
873 			/*
874 			 * Multi-node revision D (2 nodes per package
875 			 * are supported)
876 			 */
877 			cpi->cpi_procnodes_per_pkg = 2;
878 
879 			first_half = (cpi->cpi_pkgcoreid <=
880 			    (cpi->cpi_ncore_per_chip/2 - 1));
881 
882 			if (cpi->cpi_apicid == cpi->cpi_pkgcoreid) {
883 				/* We are BSP */
884 				cpi->cpi_procnodeid = (first_half ? 0 : 1);
885 			} else {
886 
887 				/* We are AP */
888 				/* NodeId[2:1] bits to use for reading F3xe8 */
889 				node2_1 = BITX(cpi->cpi_apicid, 5, 4) << 1;
890 
891 				nb_caps_reg =
892 				    pci_getl_func(0, 24 + node2_1, 3, 0xe8);
893 
894 				/*
895 				 * Check IntNodeNum bit (31:30, but bit 31 is
896 				 * always 0 on dual-node processors)
897 				 */
898 				if (BITX(nb_caps_reg, 30, 30) == 0)
899 					cpi->cpi_procnodeid = node2_1 +
900 					    !first_half;
901 				else
902 					cpi->cpi_procnodeid = node2_1 +
903 					    first_half;
904 			}
905 		}
906 	} else {
907 		cpi->cpi_procnodeid = 0;
908 	}
909 
910 	cpi->cpi_chipid =
911 	    cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg;
912 }
913 
914 /*
915  * Setup XFeature_Enabled_Mask register. Required by xsave feature.
916  */
917 void
918 setup_xfem(void)
919 {
920 	uint64_t flags = XFEATURE_LEGACY_FP;
921 
922 	ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
923 
924 	if (is_x86_feature(x86_featureset, X86FSET_SSE))
925 		flags |= XFEATURE_SSE;
926 
927 	if (is_x86_feature(x86_featureset, X86FSET_AVX))
928 		flags |= XFEATURE_AVX;
929 
930 	set_xcr(XFEATURE_ENABLED_MASK, flags);
931 
932 	xsave_bv_all = flags;
933 }
934 
935 void
936 cpuid_pass1(cpu_t *cpu, uchar_t *featureset)
937 {
938 	uint32_t mask_ecx, mask_edx;
939 	struct cpuid_info *cpi;
940 	struct cpuid_regs *cp;
941 	int xcpuid;
942 #if !defined(__xpv)
943 	extern int idle_cpu_prefer_mwait;
944 #endif
945 
946 	/*
947 	 * Space statically allocated for BSP, ensure pointer is set
948 	 */
949 	if (cpu->cpu_id == 0) {
950 		if (cpu->cpu_m.mcpu_cpi == NULL)
951 			cpu->cpu_m.mcpu_cpi = &cpuid_info0;
952 	}
953 
954 	add_x86_feature(featureset, X86FSET_CPUID);
955 
956 	cpi = cpu->cpu_m.mcpu_cpi;
957 	ASSERT(cpi != NULL);
958 	cp = &cpi->cpi_std[0];
959 	cp->cp_eax = 0;
960 	cpi->cpi_maxeax = __cpuid_insn(cp);
961 	{
962 		uint32_t *iptr = (uint32_t *)cpi->cpi_vendorstr;
963 		*iptr++ = cp->cp_ebx;
964 		*iptr++ = cp->cp_edx;
965 		*iptr++ = cp->cp_ecx;
966 		*(char *)&cpi->cpi_vendorstr[12] = '\0';
967 	}
968 
969 	cpi->cpi_vendor = _cpuid_vendorstr_to_vendorcode(cpi->cpi_vendorstr);
970 	x86_vendor = cpi->cpi_vendor; /* for compatibility */
971 
972 	/*
973 	 * Limit the range in case of weird hardware
974 	 */
975 	if (cpi->cpi_maxeax > CPI_MAXEAX_MAX)
976 		cpi->cpi_maxeax = CPI_MAXEAX_MAX;
977 	if (cpi->cpi_maxeax < 1)
978 		goto pass1_done;
979 
980 	cp = &cpi->cpi_std[1];
981 	cp->cp_eax = 1;
982 	(void) __cpuid_insn(cp);
983 
984 	/*
985 	 * Extract identifying constants for easy access.
986 	 */
987 	cpi->cpi_model = CPI_MODEL(cpi);
988 	cpi->cpi_family = CPI_FAMILY(cpi);
989 
990 	if (cpi->cpi_family == 0xf)
991 		cpi->cpi_family += CPI_FAMILY_XTD(cpi);
992 
993 	/*
994 	 * Beware: AMD uses "extended model" iff base *FAMILY* == 0xf.
995 	 * Intel, and presumably everyone else, uses model == 0xf, as
996 	 * one would expect (max value means possible overflow).  Sigh.
997 	 */
998 
999 	switch (cpi->cpi_vendor) {
1000 	case X86_VENDOR_Intel:
1001 		if (IS_EXTENDED_MODEL_INTEL(cpi))
1002 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
1003 		break;
1004 	case X86_VENDOR_AMD:
1005 		if (CPI_FAMILY(cpi) == 0xf)
1006 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
1007 		break;
1008 	default:
1009 		if (cpi->cpi_model == 0xf)
1010 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
1011 		break;
1012 	}
1013 
1014 	cpi->cpi_step = CPI_STEP(cpi);
1015 	cpi->cpi_brandid = CPI_BRANDID(cpi);
1016 
1017 	/*
1018 	 * *default* assumptions:
1019 	 * - believe %edx feature word
1020 	 * - ignore %ecx feature word
1021 	 * - 32-bit virtual and physical addressing
1022 	 */
1023 	mask_edx = 0xffffffff;
1024 	mask_ecx = 0;
1025 
1026 	cpi->cpi_pabits = cpi->cpi_vabits = 32;
1027 
1028 	switch (cpi->cpi_vendor) {
1029 	case X86_VENDOR_Intel:
1030 		if (cpi->cpi_family == 5)
1031 			x86_type = X86_TYPE_P5;
1032 		else if (IS_LEGACY_P6(cpi)) {
1033 			x86_type = X86_TYPE_P6;
1034 			pentiumpro_bug4046376 = 1;
1035 			/*
1036 			 * Clear the SEP bit when it was set erroneously
1037 			 */
1038 			if (cpi->cpi_model < 3 && cpi->cpi_step < 3)
1039 				cp->cp_edx &= ~CPUID_INTC_EDX_SEP;
1040 		} else if (IS_NEW_F6(cpi) || cpi->cpi_family == 0xf) {
1041 			x86_type = X86_TYPE_P4;
1042 			/*
1043 			 * We don't currently depend on any of the %ecx
1044 			 * features until Prescott, so we'll only check
1045 			 * this from P4 onwards.  We might want to revisit
1046 			 * that idea later.
1047 			 */
1048 			mask_ecx = 0xffffffff;
1049 		} else if (cpi->cpi_family > 0xf)
1050 			mask_ecx = 0xffffffff;
1051 		/*
1052 		 * We don't support MONITOR/MWAIT if leaf 5 is not available
1053 		 * to obtain the monitor linesize.
1054 		 */
1055 		if (cpi->cpi_maxeax < 5)
1056 			mask_ecx &= ~CPUID_INTC_ECX_MON;
1057 		break;
1058 	case X86_VENDOR_IntelClone:
1059 	default:
1060 		break;
1061 	case X86_VENDOR_AMD:
1062 #if defined(OPTERON_ERRATUM_108)
1063 		if (cpi->cpi_family == 0xf && cpi->cpi_model == 0xe) {
1064 			cp->cp_eax = (0xf0f & cp->cp_eax) | 0xc0;
1065 			cpi->cpi_model = 0xc;
1066 		} else
1067 #endif
1068 		if (cpi->cpi_family == 5) {
1069 			/*
1070 			 * AMD K5 and K6
1071 			 *
1072 			 * These CPUs have an incomplete implementation
1073 			 * of MCA/MCE which we mask away.
1074 			 */
1075 			mask_edx &= ~(CPUID_INTC_EDX_MCE | CPUID_INTC_EDX_MCA);
1076 
1077 			/*
1078 			 * Model 0 uses the wrong (APIC) bit
1079 			 * to indicate PGE.  Fix it here.
1080 			 */
1081 			if (cpi->cpi_model == 0) {
1082 				if (cp->cp_edx & 0x200) {
1083 					cp->cp_edx &= ~0x200;
1084 					cp->cp_edx |= CPUID_INTC_EDX_PGE;
1085 				}
1086 			}
1087 
1088 			/*
1089 			 * Early models had problems w/ MMX; disable.
1090 			 */
1091 			if (cpi->cpi_model < 6)
1092 				mask_edx &= ~CPUID_INTC_EDX_MMX;
1093 		}
1094 
1095 		/*
1096 		 * For newer families, SSE3 and CX16, at least, are valid;
1097 		 * enable all
1098 		 */
1099 		if (cpi->cpi_family >= 0xf)
1100 			mask_ecx = 0xffffffff;
1101 		/*
1102 		 * We don't support MONITOR/MWAIT if leaf 5 is not available
1103 		 * to obtain the monitor linesize.
1104 		 */
1105 		if (cpi->cpi_maxeax < 5)
1106 			mask_ecx &= ~CPUID_INTC_ECX_MON;
1107 
1108 #if !defined(__xpv)
1109 		/*
1110 		 * Do not use MONITOR/MWAIT to halt in the idle loop on any AMD
1111 		 * processors.  AMD does not intend MWAIT to be used in the cpu
1112 		 * idle loop on current and future processors.  10h and future
1113 		 * AMD processors use more power in MWAIT than HLT.
1114 		 * Pre-family-10h Opterons do not have the MWAIT instruction.
1115 		 */
1116 		idle_cpu_prefer_mwait = 0;
1117 #endif
1118 
1119 		break;
1120 	case X86_VENDOR_TM:
1121 		/*
1122 		 * workaround the NT workaround in CMS 4.1
1123 		 */
1124 		if (cpi->cpi_family == 5 && cpi->cpi_model == 4 &&
1125 		    (cpi->cpi_step == 2 || cpi->cpi_step == 3))
1126 			cp->cp_edx |= CPUID_INTC_EDX_CX8;
1127 		break;
1128 	case X86_VENDOR_Centaur:
1129 		/*
1130 		 * workaround the NT workarounds again
1131 		 */
1132 		if (cpi->cpi_family == 6)
1133 			cp->cp_edx |= CPUID_INTC_EDX_CX8;
1134 		break;
1135 	case X86_VENDOR_Cyrix:
1136 		/*
1137 		 * We rely heavily on the probing in locore
1138 		 * to actually figure out what parts, if any,
1139 		 * of the Cyrix cpuid instruction to believe.
1140 		 */
1141 		switch (x86_type) {
1142 		case X86_TYPE_CYRIX_486:
1143 			mask_edx = 0;
1144 			break;
1145 		case X86_TYPE_CYRIX_6x86:
1146 			mask_edx = 0;
1147 			break;
1148 		case X86_TYPE_CYRIX_6x86L:
1149 			mask_edx =
1150 			    CPUID_INTC_EDX_DE |
1151 			    CPUID_INTC_EDX_CX8;
1152 			break;
1153 		case X86_TYPE_CYRIX_6x86MX:
1154 			mask_edx =
1155 			    CPUID_INTC_EDX_DE |
1156 			    CPUID_INTC_EDX_MSR |
1157 			    CPUID_INTC_EDX_CX8 |
1158 			    CPUID_INTC_EDX_PGE |
1159 			    CPUID_INTC_EDX_CMOV |
1160 			    CPUID_INTC_EDX_MMX;
1161 			break;
1162 		case X86_TYPE_CYRIX_GXm:
1163 			mask_edx =
1164 			    CPUID_INTC_EDX_MSR |
1165 			    CPUID_INTC_EDX_CX8 |
1166 			    CPUID_INTC_EDX_CMOV |
1167 			    CPUID_INTC_EDX_MMX;
1168 			break;
1169 		case X86_TYPE_CYRIX_MediaGX:
1170 			break;
1171 		case X86_TYPE_CYRIX_MII:
1172 		case X86_TYPE_VIA_CYRIX_III:
1173 			mask_edx =
1174 			    CPUID_INTC_EDX_DE |
1175 			    CPUID_INTC_EDX_TSC |
1176 			    CPUID_INTC_EDX_MSR |
1177 			    CPUID_INTC_EDX_CX8 |
1178 			    CPUID_INTC_EDX_PGE |
1179 			    CPUID_INTC_EDX_CMOV |
1180 			    CPUID_INTC_EDX_MMX;
1181 			break;
1182 		default:
1183 			break;
1184 		}
1185 		break;
1186 	}
1187 
1188 #if defined(__xpv)
1189 	/*
1190 	 * Do not support MONITOR/MWAIT under a hypervisor
1191 	 */
1192 	mask_ecx &= ~CPUID_INTC_ECX_MON;
1193 	/*
1194 	 * Do not support XSAVE under a hypervisor for now
1195 	 */
1196 	xsave_force_disable = B_TRUE;
1197 
1198 #endif	/* __xpv */
1199 
1200 	if (xsave_force_disable) {
1201 		mask_ecx &= ~CPUID_INTC_ECX_XSAVE;
1202 		mask_ecx &= ~CPUID_INTC_ECX_AVX;
1203 		mask_ecx &= ~CPUID_INTC_ECX_F16C;
1204 	}
1205 
1206 	/*
1207 	 * Now we've figured out the masks that determine
1208 	 * which bits we choose to believe, apply the masks
1209 	 * to the feature words, then map the kernel's view
1210 	 * of these feature words into its feature word.
1211 	 */
1212 	cp->cp_edx &= mask_edx;
1213 	cp->cp_ecx &= mask_ecx;
1214 
1215 	/*
1216 	 * apply any platform restrictions (we don't call this
1217 	 * immediately after __cpuid_insn here, because we need the
1218 	 * workarounds applied above first)
1219 	 */
1220 	platform_cpuid_mangle(cpi->cpi_vendor, 1, cp);
1221 
1222 	/*
1223 	 * fold in overrides from the "eeprom" mechanism
1224 	 */
1225 	cp->cp_edx |= cpuid_feature_edx_include;
1226 	cp->cp_edx &= ~cpuid_feature_edx_exclude;
1227 
1228 	cp->cp_ecx |= cpuid_feature_ecx_include;
1229 	cp->cp_ecx &= ~cpuid_feature_ecx_exclude;
1230 
1231 	if (cp->cp_edx & CPUID_INTC_EDX_PSE) {
1232 		add_x86_feature(featureset, X86FSET_LARGEPAGE);
1233 	}
1234 	if (cp->cp_edx & CPUID_INTC_EDX_TSC) {
1235 		add_x86_feature(featureset, X86FSET_TSC);
1236 	}
1237 	if (cp->cp_edx & CPUID_INTC_EDX_MSR) {
1238 		add_x86_feature(featureset, X86FSET_MSR);
1239 	}
1240 	if (cp->cp_edx & CPUID_INTC_EDX_MTRR) {
1241 		add_x86_feature(featureset, X86FSET_MTRR);
1242 	}
1243 	if (cp->cp_edx & CPUID_INTC_EDX_PGE) {
1244 		add_x86_feature(featureset, X86FSET_PGE);
1245 	}
1246 	if (cp->cp_edx & CPUID_INTC_EDX_CMOV) {
1247 		add_x86_feature(featureset, X86FSET_CMOV);
1248 	}
1249 	if (cp->cp_edx & CPUID_INTC_EDX_MMX) {
1250 		add_x86_feature(featureset, X86FSET_MMX);
1251 	}
1252 	if ((cp->cp_edx & CPUID_INTC_EDX_MCE) != 0 &&
1253 	    (cp->cp_edx & CPUID_INTC_EDX_MCA) != 0) {
1254 		add_x86_feature(featureset, X86FSET_MCA);
1255 	}
1256 	if (cp->cp_edx & CPUID_INTC_EDX_PAE) {
1257 		add_x86_feature(featureset, X86FSET_PAE);
1258 	}
1259 	if (cp->cp_edx & CPUID_INTC_EDX_CX8) {
1260 		add_x86_feature(featureset, X86FSET_CX8);
1261 	}
1262 	if (cp->cp_ecx & CPUID_INTC_ECX_CX16) {
1263 		add_x86_feature(featureset, X86FSET_CX16);
1264 	}
1265 	if (cp->cp_edx & CPUID_INTC_EDX_PAT) {
1266 		add_x86_feature(featureset, X86FSET_PAT);
1267 	}
1268 	if (cp->cp_edx & CPUID_INTC_EDX_SEP) {
1269 		add_x86_feature(featureset, X86FSET_SEP);
1270 	}
1271 	if (cp->cp_edx & CPUID_INTC_EDX_FXSR) {
1272 		/*
1273 		 * In our implementation, fxsave/fxrstor
1274 		 * are prerequisites before we'll even
1275 		 * try and do SSE things.
1276 		 */
1277 		if (cp->cp_edx & CPUID_INTC_EDX_SSE) {
1278 			add_x86_feature(featureset, X86FSET_SSE);
1279 		}
1280 		if (cp->cp_edx & CPUID_INTC_EDX_SSE2) {
1281 			add_x86_feature(featureset, X86FSET_SSE2);
1282 		}
1283 		if (cp->cp_ecx & CPUID_INTC_ECX_SSE3) {
1284 			add_x86_feature(featureset, X86FSET_SSE3);
1285 		}
1286 		if (cp->cp_ecx & CPUID_INTC_ECX_SSSE3) {
1287 			add_x86_feature(featureset, X86FSET_SSSE3);
1288 		}
1289 		if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_1) {
1290 			add_x86_feature(featureset, X86FSET_SSE4_1);
1291 		}
1292 		if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_2) {
1293 			add_x86_feature(featureset, X86FSET_SSE4_2);
1294 		}
1295 		if (cp->cp_ecx & CPUID_INTC_ECX_AES) {
1296 			add_x86_feature(featureset, X86FSET_AES);
1297 		}
1298 		if (cp->cp_ecx & CPUID_INTC_ECX_PCLMULQDQ) {
1299 			add_x86_feature(featureset, X86FSET_PCLMULQDQ);
1300 		}
1301 
1302 		if (cp->cp_ecx & CPUID_INTC_ECX_XSAVE) {
1303 			add_x86_feature(featureset, X86FSET_XSAVE);
1304 
1305 			/* We only test AVX when there is XSAVE */
1306 			if (cp->cp_ecx & CPUID_INTC_ECX_AVX) {
1307 				add_x86_feature(featureset,
1308 				    X86FSET_AVX);
1309 
1310 				if (cp->cp_ecx & CPUID_INTC_ECX_F16C)
1311 					add_x86_feature(featureset,
1312 					    X86FSET_F16C);
1313 			}
1314 		}
1315 	}
1316 	if (cp->cp_ecx & CPUID_INTC_ECX_X2APIC) {
1317 		add_x86_feature(featureset, X86FSET_X2APIC);
1318 	}
1319 	if (cp->cp_edx & CPUID_INTC_EDX_DE) {
1320 		add_x86_feature(featureset, X86FSET_DE);
1321 	}
1322 #if !defined(__xpv)
1323 	if (cp->cp_ecx & CPUID_INTC_ECX_MON) {
1324 
1325 		/*
1326 		 * We require the CLFLUSH instruction for erratum workaround
1327 		 * to use MONITOR/MWAIT.
1328 		 */
1329 		if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
1330 			cpi->cpi_mwait.support |= MWAIT_SUPPORT;
1331 			add_x86_feature(featureset, X86FSET_MWAIT);
1332 		} else {
1333 			extern int idle_cpu_assert_cflush_monitor;
1334 
1335 			/*
1336 			 * All processors we are aware of which have
1337 			 * MONITOR/MWAIT also have CLFLUSH.
1338 			 */
1339 			if (idle_cpu_assert_cflush_monitor) {
1340 				ASSERT((cp->cp_ecx & CPUID_INTC_ECX_MON) &&
1341 				    (cp->cp_edx & CPUID_INTC_EDX_CLFSH));
1342 			}
1343 		}
1344 	}
1345 #endif	/* __xpv */
1346 
1347 	if (cp->cp_ecx & CPUID_INTC_ECX_VMX) {
1348 		add_x86_feature(featureset, X86FSET_VMX);
1349 	}
1350 
1351 	if (cp->cp_ecx & CPUID_INTC_ECX_RDRAND)
1352 		add_x86_feature(featureset, X86FSET_RDRAND);
1353 
1354 	/*
1355 	 * Only need it first time, rest of the cpus would follow suit.
1356 	 * we only capture this for the bootcpu.
1357 	 */
1358 	if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
1359 		add_x86_feature(featureset, X86FSET_CLFSH);
1360 		x86_clflush_size = (BITX(cp->cp_ebx, 15, 8) * 8);
1361 	}
1362 	if (is_x86_feature(featureset, X86FSET_PAE))
1363 		cpi->cpi_pabits = 36;
1364 
1365 	/*
1366 	 * Hyperthreading configuration is slightly tricky on Intel
1367 	 * and pure clones, and even trickier on AMD.
1368 	 *
1369 	 * (AMD chose to set the HTT bit on their CMP processors,
1370 	 * even though they're not actually hyperthreaded.  Thus it
1371 	 * takes a bit more work to figure out what's really going
1372 	 * on ... see the handling of the CMP_LGCY bit below)
1373 	 */
1374 	if (cp->cp_edx & CPUID_INTC_EDX_HTT) {
1375 		cpi->cpi_ncpu_per_chip = CPI_CPU_COUNT(cpi);
1376 		if (cpi->cpi_ncpu_per_chip > 1)
1377 			add_x86_feature(featureset, X86FSET_HTT);
1378 	} else {
1379 		cpi->cpi_ncpu_per_chip = 1;
1380 	}
1381 
1382 	/*
1383 	 * Work on the "extended" feature information, doing
1384 	 * some basic initialization for cpuid_pass2()
1385 	 */
1386 	xcpuid = 0;
1387 	switch (cpi->cpi_vendor) {
1388 	case X86_VENDOR_Intel:
1389 		if (IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf)
1390 			xcpuid++;
1391 		break;
1392 	case X86_VENDOR_AMD:
1393 		if (cpi->cpi_family > 5 ||
1394 		    (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
1395 			xcpuid++;
1396 		break;
1397 	case X86_VENDOR_Cyrix:
1398 		/*
1399 		 * Only these Cyrix CPUs are -known- to support
1400 		 * extended cpuid operations.
1401 		 */
1402 		if (x86_type == X86_TYPE_VIA_CYRIX_III ||
1403 		    x86_type == X86_TYPE_CYRIX_GXm)
1404 			xcpuid++;
1405 		break;
1406 	case X86_VENDOR_Centaur:
1407 	case X86_VENDOR_TM:
1408 	default:
1409 		xcpuid++;
1410 		break;
1411 	}
1412 
1413 	if (xcpuid) {
1414 		cp = &cpi->cpi_extd[0];
1415 		cp->cp_eax = 0x80000000;
1416 		cpi->cpi_xmaxeax = __cpuid_insn(cp);
1417 	}
1418 
1419 	if (cpi->cpi_xmaxeax & 0x80000000) {
1420 
1421 		if (cpi->cpi_xmaxeax > CPI_XMAXEAX_MAX)
1422 			cpi->cpi_xmaxeax = CPI_XMAXEAX_MAX;
1423 
1424 		switch (cpi->cpi_vendor) {
1425 		case X86_VENDOR_Intel:
1426 		case X86_VENDOR_AMD:
1427 			if (cpi->cpi_xmaxeax < 0x80000001)
1428 				break;
1429 			cp = &cpi->cpi_extd[1];
1430 			cp->cp_eax = 0x80000001;
1431 			(void) __cpuid_insn(cp);
1432 
1433 			if (cpi->cpi_vendor == X86_VENDOR_AMD &&
1434 			    cpi->cpi_family == 5 &&
1435 			    cpi->cpi_model == 6 &&
1436 			    cpi->cpi_step == 6) {
1437 				/*
1438 				 * K6 model 6 uses bit 10 to indicate SYSC
1439 				 * Later models use bit 11. Fix it here.
1440 				 */
1441 				if (cp->cp_edx & 0x400) {
1442 					cp->cp_edx &= ~0x400;
1443 					cp->cp_edx |= CPUID_AMD_EDX_SYSC;
1444 				}
1445 			}
1446 
1447 			platform_cpuid_mangle(cpi->cpi_vendor, 0x80000001, cp);
1448 
1449 			/*
1450 			 * Compute the additions to the kernel's feature word.
1451 			 */
1452 			if (cp->cp_edx & CPUID_AMD_EDX_NX) {
1453 				add_x86_feature(featureset, X86FSET_NX);
1454 			}
1455 
1456 			/*
1457 			 * Regardless whether or not we boot 64-bit,
1458 			 * we should have a way to identify whether
1459 			 * the CPU is capable of running 64-bit.
1460 			 */
1461 			if (cp->cp_edx & CPUID_AMD_EDX_LM) {
1462 				add_x86_feature(featureset, X86FSET_64);
1463 			}
1464 
1465 #if defined(__amd64)
1466 			/* 1 GB large page - enable only for 64 bit kernel */
1467 			if (cp->cp_edx & CPUID_AMD_EDX_1GPG) {
1468 				add_x86_feature(featureset, X86FSET_1GPG);
1469 			}
1470 #endif
1471 
1472 			if ((cpi->cpi_vendor == X86_VENDOR_AMD) &&
1473 			    (cpi->cpi_std[1].cp_edx & CPUID_INTC_EDX_FXSR) &&
1474 			    (cp->cp_ecx & CPUID_AMD_ECX_SSE4A)) {
1475 				add_x86_feature(featureset, X86FSET_SSE4A);
1476 			}
1477 
1478 			/*
1479 			 * If both the HTT and CMP_LGCY bits are set,
1480 			 * then we're not actually HyperThreaded.  Read
1481 			 * "AMD CPUID Specification" for more details.
1482 			 */
1483 			if (cpi->cpi_vendor == X86_VENDOR_AMD &&
1484 			    is_x86_feature(featureset, X86FSET_HTT) &&
1485 			    (cp->cp_ecx & CPUID_AMD_ECX_CMP_LGCY)) {
1486 				remove_x86_feature(featureset, X86FSET_HTT);
1487 				add_x86_feature(featureset, X86FSET_CMP);
1488 			}
1489 #if defined(__amd64)
1490 			/*
1491 			 * It's really tricky to support syscall/sysret in
1492 			 * the i386 kernel; we rely on sysenter/sysexit
1493 			 * instead.  In the amd64 kernel, things are -way-
1494 			 * better.
1495 			 */
1496 			if (cp->cp_edx & CPUID_AMD_EDX_SYSC) {
1497 				add_x86_feature(featureset, X86FSET_ASYSC);
1498 			}
1499 
1500 			/*
1501 			 * While we're thinking about system calls, note
1502 			 * that AMD processors don't support sysenter
1503 			 * in long mode at all, so don't try to program them.
1504 			 */
1505 			if (x86_vendor == X86_VENDOR_AMD) {
1506 				remove_x86_feature(featureset, X86FSET_SEP);
1507 			}
1508 #endif
1509 			if (cp->cp_edx & CPUID_AMD_EDX_TSCP) {
1510 				add_x86_feature(featureset, X86FSET_TSCP);
1511 			}
1512 
1513 			if (cp->cp_ecx & CPUID_AMD_ECX_SVM) {
1514 				add_x86_feature(featureset, X86FSET_SVM);
1515 			}
1516 
1517 			if (cp->cp_ecx & CPUID_AMD_ECX_TOPOEXT) {
1518 				add_x86_feature(featureset, X86FSET_TOPOEXT);
1519 			}
1520 			break;
1521 		default:
1522 			break;
1523 		}
1524 
1525 		/*
1526 		 * Get CPUID data about processor cores and hyperthreads.
1527 		 */
1528 		switch (cpi->cpi_vendor) {
1529 		case X86_VENDOR_Intel:
1530 			if (cpi->cpi_maxeax >= 4) {
1531 				cp = &cpi->cpi_std[4];
1532 				cp->cp_eax = 4;
1533 				cp->cp_ecx = 0;
1534 				(void) __cpuid_insn(cp);
1535 				platform_cpuid_mangle(cpi->cpi_vendor, 4, cp);
1536 			}
1537 			/*FALLTHROUGH*/
1538 		case X86_VENDOR_AMD:
1539 			if (cpi->cpi_xmaxeax < 0x80000008)
1540 				break;
1541 			cp = &cpi->cpi_extd[8];
1542 			cp->cp_eax = 0x80000008;
1543 			(void) __cpuid_insn(cp);
1544 			platform_cpuid_mangle(cpi->cpi_vendor, 0x80000008, cp);
1545 
1546 			/*
1547 			 * Virtual and physical address limits from
1548 			 * cpuid override previously guessed values.
1549 			 */
1550 			cpi->cpi_pabits = BITX(cp->cp_eax, 7, 0);
1551 			cpi->cpi_vabits = BITX(cp->cp_eax, 15, 8);
1552 			break;
1553 		default:
1554 			break;
1555 		}
1556 
1557 		/*
1558 		 * Derive the number of cores per chip
1559 		 */
1560 		switch (cpi->cpi_vendor) {
1561 		case X86_VENDOR_Intel:
1562 			if (cpi->cpi_maxeax < 4) {
1563 				cpi->cpi_ncore_per_chip = 1;
1564 				break;
1565 			} else {
1566 				cpi->cpi_ncore_per_chip =
1567 				    BITX((cpi)->cpi_std[4].cp_eax, 31, 26) + 1;
1568 			}
1569 			break;
1570 		case X86_VENDOR_AMD:
1571 			if (cpi->cpi_xmaxeax < 0x80000008) {
1572 				cpi->cpi_ncore_per_chip = 1;
1573 				break;
1574 			} else {
1575 				/*
1576 				 * On family 0xf cpuid fn 2 ECX[7:0] "NC" is
1577 				 * 1 less than the number of physical cores on
1578 				 * the chip.  In family 0x10 this value can
1579 				 * be affected by "downcoring" - it reflects
1580 				 * 1 less than the number of cores actually
1581 				 * enabled on this node.
1582 				 */
1583 				cpi->cpi_ncore_per_chip =
1584 				    BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1;
1585 			}
1586 			break;
1587 		default:
1588 			cpi->cpi_ncore_per_chip = 1;
1589 			break;
1590 		}
1591 
1592 		/*
1593 		 * Get CPUID data about TSC Invariance in Deep C-State.
1594 		 */
1595 		switch (cpi->cpi_vendor) {
1596 		case X86_VENDOR_Intel:
1597 			if (cpi->cpi_maxeax >= 7) {
1598 				cp = &cpi->cpi_extd[7];
1599 				cp->cp_eax = 0x80000007;
1600 				cp->cp_ecx = 0;
1601 				(void) __cpuid_insn(cp);
1602 			}
1603 			break;
1604 		default:
1605 			break;
1606 		}
1607 	} else {
1608 		cpi->cpi_ncore_per_chip = 1;
1609 	}
1610 
1611 	/*
1612 	 * If more than one core, then this processor is CMP.
1613 	 */
1614 	if (cpi->cpi_ncore_per_chip > 1) {
1615 		add_x86_feature(featureset, X86FSET_CMP);
1616 	}
1617 
1618 	/*
1619 	 * If the number of cores is the same as the number
1620 	 * of CPUs, then we cannot have HyperThreading.
1621 	 */
1622 	if (cpi->cpi_ncpu_per_chip == cpi->cpi_ncore_per_chip) {
1623 		remove_x86_feature(featureset, X86FSET_HTT);
1624 	}
1625 
1626 	cpi->cpi_apicid = CPI_APIC_ID(cpi);
1627 	cpi->cpi_procnodes_per_pkg = 1;
1628 	cpi->cpi_cores_per_compunit = 1;
1629 	if (is_x86_feature(featureset, X86FSET_HTT) == B_FALSE &&
1630 	    is_x86_feature(featureset, X86FSET_CMP) == B_FALSE) {
1631 		/*
1632 		 * Single-core single-threaded processors.
1633 		 */
1634 		cpi->cpi_chipid = -1;
1635 		cpi->cpi_clogid = 0;
1636 		cpi->cpi_coreid = cpu->cpu_id;
1637 		cpi->cpi_pkgcoreid = 0;
1638 		if (cpi->cpi_vendor == X86_VENDOR_AMD)
1639 			cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 3, 0);
1640 		else
1641 			cpi->cpi_procnodeid = cpi->cpi_chipid;
1642 	} else if (cpi->cpi_ncpu_per_chip > 1) {
1643 		if (cpi->cpi_vendor == X86_VENDOR_Intel)
1644 			cpuid_intel_getids(cpu, featureset);
1645 		else if (cpi->cpi_vendor == X86_VENDOR_AMD)
1646 			cpuid_amd_getids(cpu);
1647 		else {
1648 			/*
1649 			 * All other processors are currently
1650 			 * assumed to have single cores.
1651 			 */
1652 			cpi->cpi_coreid = cpi->cpi_chipid;
1653 			cpi->cpi_pkgcoreid = 0;
1654 			cpi->cpi_procnodeid = cpi->cpi_chipid;
1655 			cpi->cpi_compunitid = cpi->cpi_chipid;
1656 		}
1657 	}
1658 
1659 	/*
1660 	 * Synthesize chip "revision" and socket type
1661 	 */
1662 	cpi->cpi_chiprev = _cpuid_chiprev(cpi->cpi_vendor, cpi->cpi_family,
1663 	    cpi->cpi_model, cpi->cpi_step);
1664 	cpi->cpi_chiprevstr = _cpuid_chiprevstr(cpi->cpi_vendor,
1665 	    cpi->cpi_family, cpi->cpi_model, cpi->cpi_step);
1666 	cpi->cpi_socket = _cpuid_skt(cpi->cpi_vendor, cpi->cpi_family,
1667 	    cpi->cpi_model, cpi->cpi_step);
1668 
1669 pass1_done:
1670 	cpi->cpi_pass = 1;
1671 }
1672 
1673 /*
1674  * Make copies of the cpuid table entries we depend on, in
1675  * part for ease of parsing now, in part so that we have only
1676  * one place to correct any of it, in part for ease of
1677  * later export to userland, and in part so we can look at
1678  * this stuff in a crash dump.
1679  */
1680 
1681 /*ARGSUSED*/
1682 void
1683 cpuid_pass2(cpu_t *cpu)
1684 {
1685 	uint_t n, nmax;
1686 	int i;
1687 	struct cpuid_regs *cp;
1688 	uint8_t *dp;
1689 	uint32_t *iptr;
1690 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
1691 
1692 	ASSERT(cpi->cpi_pass == 1);
1693 
1694 	if (cpi->cpi_maxeax < 1)
1695 		goto pass2_done;
1696 
1697 	if ((nmax = cpi->cpi_maxeax + 1) > NMAX_CPI_STD)
1698 		nmax = NMAX_CPI_STD;
1699 	/*
1700 	 * (We already handled n == 0 and n == 1 in pass 1)
1701 	 */
1702 	for (n = 2, cp = &cpi->cpi_std[2]; n < nmax; n++, cp++) {
1703 		cp->cp_eax = n;
1704 
1705 		/*
1706 		 * CPUID function 4 expects %ecx to be initialized
1707 		 * with an index which indicates which cache to return
1708 		 * information about. The OS is expected to call function 4
1709 		 * with %ecx set to 0, 1, 2, ... until it returns with
1710 		 * EAX[4:0] set to 0, which indicates there are no more
1711 		 * caches.
1712 		 *
1713 		 * Here, populate cpi_std[4] with the information returned by
1714 		 * function 4 when %ecx == 0, and do the rest in cpuid_pass3()
1715 		 * when dynamic memory allocation becomes available.
1716 		 *
1717 		 * Note: we need to explicitly initialize %ecx here, since
1718 		 * function 4 may have been previously invoked.
1719 		 */
1720 		if (n == 4)
1721 			cp->cp_ecx = 0;
1722 
1723 		(void) __cpuid_insn(cp);
1724 		platform_cpuid_mangle(cpi->cpi_vendor, n, cp);
1725 		switch (n) {
1726 		case 2:
1727 			/*
1728 			 * "the lower 8 bits of the %eax register
1729 			 * contain a value that identifies the number
1730 			 * of times the cpuid [instruction] has to be
1731 			 * executed to obtain a complete image of the
1732 			 * processor's caching systems."
1733 			 *
1734 			 * How *do* they make this stuff up?
1735 			 */
1736 			cpi->cpi_ncache = sizeof (*cp) *
1737 			    BITX(cp->cp_eax, 7, 0);
1738 			if (cpi->cpi_ncache == 0)
1739 				break;
1740 			cpi->cpi_ncache--;	/* skip count byte */
1741 
1742 			/*
1743 			 * Well, for now, rather than attempt to implement
1744 			 * this slightly dubious algorithm, we just look
1745 			 * at the first 15 ..
1746 			 */
1747 			if (cpi->cpi_ncache > (sizeof (*cp) - 1))
1748 				cpi->cpi_ncache = sizeof (*cp) - 1;
1749 
1750 			dp = cpi->cpi_cacheinfo;
1751 			if (BITX(cp->cp_eax, 31, 31) == 0) {
1752 				uint8_t *p = (void *)&cp->cp_eax;
1753 				for (i = 1; i < 4; i++)
1754 					if (p[i] != 0)
1755 						*dp++ = p[i];
1756 			}
1757 			if (BITX(cp->cp_ebx, 31, 31) == 0) {
1758 				uint8_t *p = (void *)&cp->cp_ebx;
1759 				for (i = 0; i < 4; i++)
1760 					if (p[i] != 0)
1761 						*dp++ = p[i];
1762 			}
1763 			if (BITX(cp->cp_ecx, 31, 31) == 0) {
1764 				uint8_t *p = (void *)&cp->cp_ecx;
1765 				for (i = 0; i < 4; i++)
1766 					if (p[i] != 0)
1767 						*dp++ = p[i];
1768 			}
1769 			if (BITX(cp->cp_edx, 31, 31) == 0) {
1770 				uint8_t *p = (void *)&cp->cp_edx;
1771 				for (i = 0; i < 4; i++)
1772 					if (p[i] != 0)
1773 						*dp++ = p[i];
1774 			}
1775 			break;
1776 
1777 		case 3:	/* Processor serial number, if PSN supported */
1778 			break;
1779 
1780 		case 4:	/* Deterministic cache parameters */
1781 			break;
1782 
1783 		case 5:	/* Monitor/Mwait parameters */
1784 		{
1785 			size_t mwait_size;
1786 
1787 			/*
1788 			 * check cpi_mwait.support which was set in cpuid_pass1
1789 			 */
1790 			if (!(cpi->cpi_mwait.support & MWAIT_SUPPORT))
1791 				break;
1792 
1793 			/*
1794 			 * Protect ourself from insane mwait line size.
1795 			 * Workaround for incomplete hardware emulator(s).
1796 			 */
1797 			mwait_size = (size_t)MWAIT_SIZE_MAX(cpi);
1798 			if (mwait_size < sizeof (uint32_t) ||
1799 			    !ISP2(mwait_size)) {
1800 #if DEBUG
1801 				cmn_err(CE_NOTE, "Cannot handle cpu %d mwait "
1802 				    "size %ld", cpu->cpu_id, (long)mwait_size);
1803 #endif
1804 				break;
1805 			}
1806 
1807 			cpi->cpi_mwait.mon_min = (size_t)MWAIT_SIZE_MIN(cpi);
1808 			cpi->cpi_mwait.mon_max = mwait_size;
1809 			if (MWAIT_EXTENSION(cpi)) {
1810 				cpi->cpi_mwait.support |= MWAIT_EXTENSIONS;
1811 				if (MWAIT_INT_ENABLE(cpi))
1812 					cpi->cpi_mwait.support |=
1813 					    MWAIT_ECX_INT_ENABLE;
1814 			}
1815 			break;
1816 		}
1817 		default:
1818 			break;
1819 		}
1820 	}
1821 
1822 	if (cpi->cpi_maxeax >= 0xB && cpi->cpi_vendor == X86_VENDOR_Intel) {
1823 		struct cpuid_regs regs;
1824 
1825 		cp = &regs;
1826 		cp->cp_eax = 0xB;
1827 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
1828 
1829 		(void) __cpuid_insn(cp);
1830 
1831 		/*
1832 		 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
1833 		 * indicates that the extended topology enumeration leaf is
1834 		 * available.
1835 		 */
1836 		if (cp->cp_ebx) {
1837 			uint32_t x2apic_id;
1838 			uint_t coreid_shift = 0;
1839 			uint_t ncpu_per_core = 1;
1840 			uint_t chipid_shift = 0;
1841 			uint_t ncpu_per_chip = 1;
1842 			uint_t i;
1843 			uint_t level;
1844 
1845 			for (i = 0; i < CPI_FNB_ECX_MAX; i++) {
1846 				cp->cp_eax = 0xB;
1847 				cp->cp_ecx = i;
1848 
1849 				(void) __cpuid_insn(cp);
1850 				level = CPI_CPU_LEVEL_TYPE(cp);
1851 
1852 				if (level == 1) {
1853 					x2apic_id = cp->cp_edx;
1854 					coreid_shift = BITX(cp->cp_eax, 4, 0);
1855 					ncpu_per_core = BITX(cp->cp_ebx, 15, 0);
1856 				} else if (level == 2) {
1857 					x2apic_id = cp->cp_edx;
1858 					chipid_shift = BITX(cp->cp_eax, 4, 0);
1859 					ncpu_per_chip = BITX(cp->cp_ebx, 15, 0);
1860 				}
1861 			}
1862 
1863 			cpi->cpi_apicid = x2apic_id;
1864 			cpi->cpi_ncpu_per_chip = ncpu_per_chip;
1865 			cpi->cpi_ncore_per_chip = ncpu_per_chip /
1866 			    ncpu_per_core;
1867 			cpi->cpi_chipid = x2apic_id >> chipid_shift;
1868 			cpi->cpi_clogid = x2apic_id & ((1 << chipid_shift) - 1);
1869 			cpi->cpi_coreid = x2apic_id >> coreid_shift;
1870 			cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
1871 		}
1872 
1873 		/* Make cp NULL so that we don't stumble on others */
1874 		cp = NULL;
1875 	}
1876 
1877 	/*
1878 	 * XSAVE enumeration
1879 	 */
1880 	if (cpi->cpi_maxeax >= 0xD) {
1881 		struct cpuid_regs regs;
1882 		boolean_t cpuid_d_valid = B_TRUE;
1883 
1884 		cp = &regs;
1885 		cp->cp_eax = 0xD;
1886 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
1887 
1888 		(void) __cpuid_insn(cp);
1889 
1890 		/*
1891 		 * Sanity checks for debug
1892 		 */
1893 		if ((cp->cp_eax & XFEATURE_LEGACY_FP) == 0 ||
1894 		    (cp->cp_eax & XFEATURE_SSE) == 0) {
1895 			cpuid_d_valid = B_FALSE;
1896 		}
1897 
1898 		cpi->cpi_xsave.xsav_hw_features_low = cp->cp_eax;
1899 		cpi->cpi_xsave.xsav_hw_features_high = cp->cp_edx;
1900 		cpi->cpi_xsave.xsav_max_size = cp->cp_ecx;
1901 
1902 		/*
1903 		 * If the hw supports AVX, get the size and offset in the save
1904 		 * area for the ymm state.
1905 		 */
1906 		if (cpi->cpi_xsave.xsav_hw_features_low & XFEATURE_AVX) {
1907 			cp->cp_eax = 0xD;
1908 			cp->cp_ecx = 2;
1909 			cp->cp_edx = cp->cp_ebx = 0;
1910 
1911 			(void) __cpuid_insn(cp);
1912 
1913 			if (cp->cp_ebx != CPUID_LEAFD_2_YMM_OFFSET ||
1914 			    cp->cp_eax != CPUID_LEAFD_2_YMM_SIZE) {
1915 				cpuid_d_valid = B_FALSE;
1916 			}
1917 
1918 			cpi->cpi_xsave.ymm_size = cp->cp_eax;
1919 			cpi->cpi_xsave.ymm_offset = cp->cp_ebx;
1920 		}
1921 
1922 		if (is_x86_feature(x86_featureset, X86FSET_XSAVE)) {
1923 			xsave_state_size = 0;
1924 		} else if (cpuid_d_valid) {
1925 			xsave_state_size = cpi->cpi_xsave.xsav_max_size;
1926 		} else {
1927 			/* Broken CPUID 0xD, probably in HVM */
1928 			cmn_err(CE_WARN, "cpu%d: CPUID.0xD returns invalid "
1929 			    "value: hw_low = %d, hw_high = %d, xsave_size = %d"
1930 			    ", ymm_size = %d, ymm_offset = %d\n",
1931 			    cpu->cpu_id, cpi->cpi_xsave.xsav_hw_features_low,
1932 			    cpi->cpi_xsave.xsav_hw_features_high,
1933 			    (int)cpi->cpi_xsave.xsav_max_size,
1934 			    (int)cpi->cpi_xsave.ymm_size,
1935 			    (int)cpi->cpi_xsave.ymm_offset);
1936 
1937 			if (xsave_state_size != 0) {
1938 				/*
1939 				 * This must be a non-boot CPU. We cannot
1940 				 * continue, because boot cpu has already
1941 				 * enabled XSAVE.
1942 				 */
1943 				ASSERT(cpu->cpu_id != 0);
1944 				cmn_err(CE_PANIC, "cpu%d: we have already "
1945 				    "enabled XSAVE on boot cpu, cannot "
1946 				    "continue.", cpu->cpu_id);
1947 			} else {
1948 				/*
1949 				 * If we reached here on the boot CPU, it's also
1950 				 * almost certain that we'll reach here on the
1951 				 * non-boot CPUs. When we're here on a boot CPU
1952 				 * we should disable the feature, on a non-boot
1953 				 * CPU we need to confirm that we have.
1954 				 */
1955 				if (cpu->cpu_id == 0) {
1956 					remove_x86_feature(x86_featureset,
1957 					    X86FSET_XSAVE);
1958 					remove_x86_feature(x86_featureset,
1959 					    X86FSET_AVX);
1960 					CPI_FEATURES_ECX(cpi) &=
1961 					    ~CPUID_INTC_ECX_XSAVE;
1962 					CPI_FEATURES_ECX(cpi) &=
1963 					    ~CPUID_INTC_ECX_AVX;
1964 					CPI_FEATURES_ECX(cpi) &=
1965 					    ~CPUID_INTC_ECX_F16C;
1966 					xsave_force_disable = B_TRUE;
1967 				} else {
1968 					VERIFY(is_x86_feature(x86_featureset,
1969 					    X86FSET_XSAVE) == B_FALSE);
1970 				}
1971 			}
1972 		}
1973 	}
1974 
1975 
1976 	if ((cpi->cpi_xmaxeax & 0x80000000) == 0)
1977 		goto pass2_done;
1978 
1979 	if ((nmax = cpi->cpi_xmaxeax - 0x80000000 + 1) > NMAX_CPI_EXTD)
1980 		nmax = NMAX_CPI_EXTD;
1981 	/*
1982 	 * Copy the extended properties, fixing them as we go.
1983 	 * (We already handled n == 0 and n == 1 in pass 1)
1984 	 */
1985 	iptr = (void *)cpi->cpi_brandstr;
1986 	for (n = 2, cp = &cpi->cpi_extd[2]; n < nmax; cp++, n++) {
1987 		cp->cp_eax = 0x80000000 + n;
1988 		(void) __cpuid_insn(cp);
1989 		platform_cpuid_mangle(cpi->cpi_vendor, 0x80000000 + n, cp);
1990 		switch (n) {
1991 		case 2:
1992 		case 3:
1993 		case 4:
1994 			/*
1995 			 * Extract the brand string
1996 			 */
1997 			*iptr++ = cp->cp_eax;
1998 			*iptr++ = cp->cp_ebx;
1999 			*iptr++ = cp->cp_ecx;
2000 			*iptr++ = cp->cp_edx;
2001 			break;
2002 		case 5:
2003 			switch (cpi->cpi_vendor) {
2004 			case X86_VENDOR_AMD:
2005 				/*
2006 				 * The Athlon and Duron were the first
2007 				 * parts to report the sizes of the
2008 				 * TLB for large pages. Before then,
2009 				 * we don't trust the data.
2010 				 */
2011 				if (cpi->cpi_family < 6 ||
2012 				    (cpi->cpi_family == 6 &&
2013 				    cpi->cpi_model < 1))
2014 					cp->cp_eax = 0;
2015 				break;
2016 			default:
2017 				break;
2018 			}
2019 			break;
2020 		case 6:
2021 			switch (cpi->cpi_vendor) {
2022 			case X86_VENDOR_AMD:
2023 				/*
2024 				 * The Athlon and Duron were the first
2025 				 * AMD parts with L2 TLB's.
2026 				 * Before then, don't trust the data.
2027 				 */
2028 				if (cpi->cpi_family < 6 ||
2029 				    cpi->cpi_family == 6 &&
2030 				    cpi->cpi_model < 1)
2031 					cp->cp_eax = cp->cp_ebx = 0;
2032 				/*
2033 				 * AMD Duron rev A0 reports L2
2034 				 * cache size incorrectly as 1K
2035 				 * when it is really 64K
2036 				 */
2037 				if (cpi->cpi_family == 6 &&
2038 				    cpi->cpi_model == 3 &&
2039 				    cpi->cpi_step == 0) {
2040 					cp->cp_ecx &= 0xffff;
2041 					cp->cp_ecx |= 0x400000;
2042 				}
2043 				break;
2044 			case X86_VENDOR_Cyrix:	/* VIA C3 */
2045 				/*
2046 				 * VIA C3 processors are a bit messed
2047 				 * up w.r.t. encoding cache sizes in %ecx
2048 				 */
2049 				if (cpi->cpi_family != 6)
2050 					break;
2051 				/*
2052 				 * model 7 and 8 were incorrectly encoded
2053 				 *
2054 				 * xxx is model 8 really broken?
2055 				 */
2056 				if (cpi->cpi_model == 7 ||
2057 				    cpi->cpi_model == 8)
2058 					cp->cp_ecx =
2059 					    BITX(cp->cp_ecx, 31, 24) << 16 |
2060 					    BITX(cp->cp_ecx, 23, 16) << 12 |
2061 					    BITX(cp->cp_ecx, 15, 8) << 8 |
2062 					    BITX(cp->cp_ecx, 7, 0);
2063 				/*
2064 				 * model 9 stepping 1 has wrong associativity
2065 				 */
2066 				if (cpi->cpi_model == 9 && cpi->cpi_step == 1)
2067 					cp->cp_ecx |= 8 << 12;
2068 				break;
2069 			case X86_VENDOR_Intel:
2070 				/*
2071 				 * Extended L2 Cache features function.
2072 				 * First appeared on Prescott.
2073 				 */
2074 			default:
2075 				break;
2076 			}
2077 			break;
2078 		default:
2079 			break;
2080 		}
2081 	}
2082 
2083 pass2_done:
2084 	cpi->cpi_pass = 2;
2085 }
2086 
2087 static const char *
2088 intel_cpubrand(const struct cpuid_info *cpi)
2089 {
2090 	int i;
2091 
2092 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
2093 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
2094 		return ("i486");
2095 
2096 	switch (cpi->cpi_family) {
2097 	case 5:
2098 		return ("Intel Pentium(r)");
2099 	case 6:
2100 		switch (cpi->cpi_model) {
2101 			uint_t celeron, xeon;
2102 			const struct cpuid_regs *cp;
2103 		case 0:
2104 		case 1:
2105 		case 2:
2106 			return ("Intel Pentium(r) Pro");
2107 		case 3:
2108 		case 4:
2109 			return ("Intel Pentium(r) II");
2110 		case 6:
2111 			return ("Intel Celeron(r)");
2112 		case 5:
2113 		case 7:
2114 			celeron = xeon = 0;
2115 			cp = &cpi->cpi_std[2];	/* cache info */
2116 
2117 			for (i = 1; i < 4; i++) {
2118 				uint_t tmp;
2119 
2120 				tmp = (cp->cp_eax >> (8 * i)) & 0xff;
2121 				if (tmp == 0x40)
2122 					celeron++;
2123 				if (tmp >= 0x44 && tmp <= 0x45)
2124 					xeon++;
2125 			}
2126 
2127 			for (i = 0; i < 2; i++) {
2128 				uint_t tmp;
2129 
2130 				tmp = (cp->cp_ebx >> (8 * i)) & 0xff;
2131 				if (tmp == 0x40)
2132 					celeron++;
2133 				else if (tmp >= 0x44 && tmp <= 0x45)
2134 					xeon++;
2135 			}
2136 
2137 			for (i = 0; i < 4; i++) {
2138 				uint_t tmp;
2139 
2140 				tmp = (cp->cp_ecx >> (8 * i)) & 0xff;
2141 				if (tmp == 0x40)
2142 					celeron++;
2143 				else if (tmp >= 0x44 && tmp <= 0x45)
2144 					xeon++;
2145 			}
2146 
2147 			for (i = 0; i < 4; i++) {
2148 				uint_t tmp;
2149 
2150 				tmp = (cp->cp_edx >> (8 * i)) & 0xff;
2151 				if (tmp == 0x40)
2152 					celeron++;
2153 				else if (tmp >= 0x44 && tmp <= 0x45)
2154 					xeon++;
2155 			}
2156 
2157 			if (celeron)
2158 				return ("Intel Celeron(r)");
2159 			if (xeon)
2160 				return (cpi->cpi_model == 5 ?
2161 				    "Intel Pentium(r) II Xeon(tm)" :
2162 				    "Intel Pentium(r) III Xeon(tm)");
2163 			return (cpi->cpi_model == 5 ?
2164 			    "Intel Pentium(r) II or Pentium(r) II Xeon(tm)" :
2165 			    "Intel Pentium(r) III or Pentium(r) III Xeon(tm)");
2166 		default:
2167 			break;
2168 		}
2169 	default:
2170 		break;
2171 	}
2172 
2173 	/* BrandID is present if the field is nonzero */
2174 	if (cpi->cpi_brandid != 0) {
2175 		static const struct {
2176 			uint_t bt_bid;
2177 			const char *bt_str;
2178 		} brand_tbl[] = {
2179 			{ 0x1,	"Intel(r) Celeron(r)" },
2180 			{ 0x2,	"Intel(r) Pentium(r) III" },
2181 			{ 0x3,	"Intel(r) Pentium(r) III Xeon(tm)" },
2182 			{ 0x4,	"Intel(r) Pentium(r) III" },
2183 			{ 0x6,	"Mobile Intel(r) Pentium(r) III" },
2184 			{ 0x7,	"Mobile Intel(r) Celeron(r)" },
2185 			{ 0x8,	"Intel(r) Pentium(r) 4" },
2186 			{ 0x9,	"Intel(r) Pentium(r) 4" },
2187 			{ 0xa,	"Intel(r) Celeron(r)" },
2188 			{ 0xb,	"Intel(r) Xeon(tm)" },
2189 			{ 0xc,	"Intel(r) Xeon(tm) MP" },
2190 			{ 0xe,	"Mobile Intel(r) Pentium(r) 4" },
2191 			{ 0xf,	"Mobile Intel(r) Celeron(r)" },
2192 			{ 0x11, "Mobile Genuine Intel(r)" },
2193 			{ 0x12, "Intel(r) Celeron(r) M" },
2194 			{ 0x13, "Mobile Intel(r) Celeron(r)" },
2195 			{ 0x14, "Intel(r) Celeron(r)" },
2196 			{ 0x15, "Mobile Genuine Intel(r)" },
2197 			{ 0x16,	"Intel(r) Pentium(r) M" },
2198 			{ 0x17, "Mobile Intel(r) Celeron(r)" }
2199 		};
2200 		uint_t btblmax = sizeof (brand_tbl) / sizeof (brand_tbl[0]);
2201 		uint_t sgn;
2202 
2203 		sgn = (cpi->cpi_family << 8) |
2204 		    (cpi->cpi_model << 4) | cpi->cpi_step;
2205 
2206 		for (i = 0; i < btblmax; i++)
2207 			if (brand_tbl[i].bt_bid == cpi->cpi_brandid)
2208 				break;
2209 		if (i < btblmax) {
2210 			if (sgn == 0x6b1 && cpi->cpi_brandid == 3)
2211 				return ("Intel(r) Celeron(r)");
2212 			if (sgn < 0xf13 && cpi->cpi_brandid == 0xb)
2213 				return ("Intel(r) Xeon(tm) MP");
2214 			if (sgn < 0xf13 && cpi->cpi_brandid == 0xe)
2215 				return ("Intel(r) Xeon(tm)");
2216 			return (brand_tbl[i].bt_str);
2217 		}
2218 	}
2219 
2220 	return (NULL);
2221 }
2222 
2223 static const char *
2224 amd_cpubrand(const struct cpuid_info *cpi)
2225 {
2226 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
2227 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
2228 		return ("i486 compatible");
2229 
2230 	switch (cpi->cpi_family) {
2231 	case 5:
2232 		switch (cpi->cpi_model) {
2233 		case 0:
2234 		case 1:
2235 		case 2:
2236 		case 3:
2237 		case 4:
2238 		case 5:
2239 			return ("AMD-K5(r)");
2240 		case 6:
2241 		case 7:
2242 			return ("AMD-K6(r)");
2243 		case 8:
2244 			return ("AMD-K6(r)-2");
2245 		case 9:
2246 			return ("AMD-K6(r)-III");
2247 		default:
2248 			return ("AMD (family 5)");
2249 		}
2250 	case 6:
2251 		switch (cpi->cpi_model) {
2252 		case 1:
2253 			return ("AMD-K7(tm)");
2254 		case 0:
2255 		case 2:
2256 		case 4:
2257 			return ("AMD Athlon(tm)");
2258 		case 3:
2259 		case 7:
2260 			return ("AMD Duron(tm)");
2261 		case 6:
2262 		case 8:
2263 		case 10:
2264 			/*
2265 			 * Use the L2 cache size to distinguish
2266 			 */
2267 			return ((cpi->cpi_extd[6].cp_ecx >> 16) >= 256 ?
2268 			    "AMD Athlon(tm)" : "AMD Duron(tm)");
2269 		default:
2270 			return ("AMD (family 6)");
2271 		}
2272 	default:
2273 		break;
2274 	}
2275 
2276 	if (cpi->cpi_family == 0xf && cpi->cpi_model == 5 &&
2277 	    cpi->cpi_brandid != 0) {
2278 		switch (BITX(cpi->cpi_brandid, 7, 5)) {
2279 		case 3:
2280 			return ("AMD Opteron(tm) UP 1xx");
2281 		case 4:
2282 			return ("AMD Opteron(tm) DP 2xx");
2283 		case 5:
2284 			return ("AMD Opteron(tm) MP 8xx");
2285 		default:
2286 			return ("AMD Opteron(tm)");
2287 		}
2288 	}
2289 
2290 	return (NULL);
2291 }
2292 
2293 static const char *
2294 cyrix_cpubrand(struct cpuid_info *cpi, uint_t type)
2295 {
2296 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
2297 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5 ||
2298 	    type == X86_TYPE_CYRIX_486)
2299 		return ("i486 compatible");
2300 
2301 	switch (type) {
2302 	case X86_TYPE_CYRIX_6x86:
2303 		return ("Cyrix 6x86");
2304 	case X86_TYPE_CYRIX_6x86L:
2305 		return ("Cyrix 6x86L");
2306 	case X86_TYPE_CYRIX_6x86MX:
2307 		return ("Cyrix 6x86MX");
2308 	case X86_TYPE_CYRIX_GXm:
2309 		return ("Cyrix GXm");
2310 	case X86_TYPE_CYRIX_MediaGX:
2311 		return ("Cyrix MediaGX");
2312 	case X86_TYPE_CYRIX_MII:
2313 		return ("Cyrix M2");
2314 	case X86_TYPE_VIA_CYRIX_III:
2315 		return ("VIA Cyrix M3");
2316 	default:
2317 		/*
2318 		 * Have another wild guess ..
2319 		 */
2320 		if (cpi->cpi_family == 4 && cpi->cpi_model == 9)
2321 			return ("Cyrix 5x86");
2322 		else if (cpi->cpi_family == 5) {
2323 			switch (cpi->cpi_model) {
2324 			case 2:
2325 				return ("Cyrix 6x86");	/* Cyrix M1 */
2326 			case 4:
2327 				return ("Cyrix MediaGX");
2328 			default:
2329 				break;
2330 			}
2331 		} else if (cpi->cpi_family == 6) {
2332 			switch (cpi->cpi_model) {
2333 			case 0:
2334 				return ("Cyrix 6x86MX"); /* Cyrix M2? */
2335 			case 5:
2336 			case 6:
2337 			case 7:
2338 			case 8:
2339 			case 9:
2340 				return ("VIA C3");
2341 			default:
2342 				break;
2343 			}
2344 		}
2345 		break;
2346 	}
2347 	return (NULL);
2348 }
2349 
2350 /*
2351  * This only gets called in the case that the CPU extended
2352  * feature brand string (0x80000002, 0x80000003, 0x80000004)
2353  * aren't available, or contain null bytes for some reason.
2354  */
2355 static void
2356 fabricate_brandstr(struct cpuid_info *cpi)
2357 {
2358 	const char *brand = NULL;
2359 
2360 	switch (cpi->cpi_vendor) {
2361 	case X86_VENDOR_Intel:
2362 		brand = intel_cpubrand(cpi);
2363 		break;
2364 	case X86_VENDOR_AMD:
2365 		brand = amd_cpubrand(cpi);
2366 		break;
2367 	case X86_VENDOR_Cyrix:
2368 		brand = cyrix_cpubrand(cpi, x86_type);
2369 		break;
2370 	case X86_VENDOR_NexGen:
2371 		if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
2372 			brand = "NexGen Nx586";
2373 		break;
2374 	case X86_VENDOR_Centaur:
2375 		if (cpi->cpi_family == 5)
2376 			switch (cpi->cpi_model) {
2377 			case 4:
2378 				brand = "Centaur C6";
2379 				break;
2380 			case 8:
2381 				brand = "Centaur C2";
2382 				break;
2383 			case 9:
2384 				brand = "Centaur C3";
2385 				break;
2386 			default:
2387 				break;
2388 			}
2389 		break;
2390 	case X86_VENDOR_Rise:
2391 		if (cpi->cpi_family == 5 &&
2392 		    (cpi->cpi_model == 0 || cpi->cpi_model == 2))
2393 			brand = "Rise mP6";
2394 		break;
2395 	case X86_VENDOR_SiS:
2396 		if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
2397 			brand = "SiS 55x";
2398 		break;
2399 	case X86_VENDOR_TM:
2400 		if (cpi->cpi_family == 5 && cpi->cpi_model == 4)
2401 			brand = "Transmeta Crusoe TM3x00 or TM5x00";
2402 		break;
2403 	case X86_VENDOR_NSC:
2404 	case X86_VENDOR_UMC:
2405 	default:
2406 		break;
2407 	}
2408 	if (brand) {
2409 		(void) strcpy((char *)cpi->cpi_brandstr, brand);
2410 		return;
2411 	}
2412 
2413 	/*
2414 	 * If all else fails ...
2415 	 */
2416 	(void) snprintf(cpi->cpi_brandstr, sizeof (cpi->cpi_brandstr),
2417 	    "%s %d.%d.%d", cpi->cpi_vendorstr, cpi->cpi_family,
2418 	    cpi->cpi_model, cpi->cpi_step);
2419 }
2420 
2421 /*
2422  * This routine is called just after kernel memory allocation
2423  * becomes available on cpu0, and as part of mp_startup() on
2424  * the other cpus.
2425  *
2426  * Fixup the brand string, and collect any information from cpuid
2427  * that requires dynamically allocated storage to represent.
2428  */
2429 /*ARGSUSED*/
2430 void
2431 cpuid_pass3(cpu_t *cpu)
2432 {
2433 	int	i, max, shft, level, size;
2434 	struct cpuid_regs regs;
2435 	struct cpuid_regs *cp;
2436 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2437 
2438 	ASSERT(cpi->cpi_pass == 2);
2439 
2440 	/*
2441 	 * Function 4: Deterministic cache parameters
2442 	 *
2443 	 * Take this opportunity to detect the number of threads
2444 	 * sharing the last level cache, and construct a corresponding
2445 	 * cache id. The respective cpuid_info members are initialized
2446 	 * to the default case of "no last level cache sharing".
2447 	 */
2448 	cpi->cpi_ncpu_shr_last_cache = 1;
2449 	cpi->cpi_last_lvl_cacheid = cpu->cpu_id;
2450 
2451 	if (cpi->cpi_maxeax >= 4 && cpi->cpi_vendor == X86_VENDOR_Intel) {
2452 
2453 		/*
2454 		 * Find the # of elements (size) returned by fn 4, and along
2455 		 * the way detect last level cache sharing details.
2456 		 */
2457 		bzero(&regs, sizeof (regs));
2458 		cp = &regs;
2459 		for (i = 0, max = 0; i < CPI_FN4_ECX_MAX; i++) {
2460 			cp->cp_eax = 4;
2461 			cp->cp_ecx = i;
2462 
2463 			(void) __cpuid_insn(cp);
2464 
2465 			if (CPI_CACHE_TYPE(cp) == 0)
2466 				break;
2467 			level = CPI_CACHE_LVL(cp);
2468 			if (level > max) {
2469 				max = level;
2470 				cpi->cpi_ncpu_shr_last_cache =
2471 				    CPI_NTHR_SHR_CACHE(cp) + 1;
2472 			}
2473 		}
2474 		cpi->cpi_std_4_size = size = i;
2475 
2476 		/*
2477 		 * Allocate the cpi_std_4 array. The first element
2478 		 * references the regs for fn 4, %ecx == 0, which
2479 		 * cpuid_pass2() stashed in cpi->cpi_std[4].
2480 		 */
2481 		if (size > 0) {
2482 			cpi->cpi_std_4 =
2483 			    kmem_alloc(size * sizeof (cp), KM_SLEEP);
2484 			cpi->cpi_std_4[0] = &cpi->cpi_std[4];
2485 
2486 			/*
2487 			 * Allocate storage to hold the additional regs
2488 			 * for function 4, %ecx == 1 .. cpi_std_4_size.
2489 			 *
2490 			 * The regs for fn 4, %ecx == 0 has already
2491 			 * been allocated as indicated above.
2492 			 */
2493 			for (i = 1; i < size; i++) {
2494 				cp = cpi->cpi_std_4[i] =
2495 				    kmem_zalloc(sizeof (regs), KM_SLEEP);
2496 				cp->cp_eax = 4;
2497 				cp->cp_ecx = i;
2498 
2499 				(void) __cpuid_insn(cp);
2500 			}
2501 		}
2502 		/*
2503 		 * Determine the number of bits needed to represent
2504 		 * the number of CPUs sharing the last level cache.
2505 		 *
2506 		 * Shift off that number of bits from the APIC id to
2507 		 * derive the cache id.
2508 		 */
2509 		shft = 0;
2510 		for (i = 1; i < cpi->cpi_ncpu_shr_last_cache; i <<= 1)
2511 			shft++;
2512 		cpi->cpi_last_lvl_cacheid = cpi->cpi_apicid >> shft;
2513 	}
2514 
2515 	/*
2516 	 * Now fixup the brand string
2517 	 */
2518 	if ((cpi->cpi_xmaxeax & 0x80000000) == 0) {
2519 		fabricate_brandstr(cpi);
2520 	} else {
2521 
2522 		/*
2523 		 * If we successfully extracted a brand string from the cpuid
2524 		 * instruction, clean it up by removing leading spaces and
2525 		 * similar junk.
2526 		 */
2527 		if (cpi->cpi_brandstr[0]) {
2528 			size_t maxlen = sizeof (cpi->cpi_brandstr);
2529 			char *src, *dst;
2530 
2531 			dst = src = (char *)cpi->cpi_brandstr;
2532 			src[maxlen - 1] = '\0';
2533 			/*
2534 			 * strip leading spaces
2535 			 */
2536 			while (*src == ' ')
2537 				src++;
2538 			/*
2539 			 * Remove any 'Genuine' or "Authentic" prefixes
2540 			 */
2541 			if (strncmp(src, "Genuine ", 8) == 0)
2542 				src += 8;
2543 			if (strncmp(src, "Authentic ", 10) == 0)
2544 				src += 10;
2545 
2546 			/*
2547 			 * Now do an in-place copy.
2548 			 * Map (R) to (r) and (TM) to (tm).
2549 			 * The era of teletypes is long gone, and there's
2550 			 * -really- no need to shout.
2551 			 */
2552 			while (*src != '\0') {
2553 				if (src[0] == '(') {
2554 					if (strncmp(src + 1, "R)", 2) == 0) {
2555 						(void) strncpy(dst, "(r)", 3);
2556 						src += 3;
2557 						dst += 3;
2558 						continue;
2559 					}
2560 					if (strncmp(src + 1, "TM)", 3) == 0) {
2561 						(void) strncpy(dst, "(tm)", 4);
2562 						src += 4;
2563 						dst += 4;
2564 						continue;
2565 					}
2566 				}
2567 				*dst++ = *src++;
2568 			}
2569 			*dst = '\0';
2570 
2571 			/*
2572 			 * Finally, remove any trailing spaces
2573 			 */
2574 			while (--dst > cpi->cpi_brandstr)
2575 				if (*dst == ' ')
2576 					*dst = '\0';
2577 				else
2578 					break;
2579 		} else
2580 			fabricate_brandstr(cpi);
2581 	}
2582 	cpi->cpi_pass = 3;
2583 }
2584 
2585 /*
2586  * This routine is called out of bind_hwcap() much later in the life
2587  * of the kernel (post_startup()).  The job of this routine is to resolve
2588  * the hardware feature support and kernel support for those features into
2589  * what we're actually going to tell applications via the aux vector.
2590  */
2591 void
2592 cpuid_pass4(cpu_t *cpu, uint_t *hwcap_out)
2593 {
2594 	struct cpuid_info *cpi;
2595 	uint_t hwcap_flags = 0, hwcap_flags_2 = 0;
2596 
2597 	if (cpu == NULL)
2598 		cpu = CPU;
2599 	cpi = cpu->cpu_m.mcpu_cpi;
2600 
2601 	ASSERT(cpi->cpi_pass == 3);
2602 
2603 	if (cpi->cpi_maxeax >= 1) {
2604 		uint32_t *edx = &cpi->cpi_support[STD_EDX_FEATURES];
2605 		uint32_t *ecx = &cpi->cpi_support[STD_ECX_FEATURES];
2606 
2607 		*edx = CPI_FEATURES_EDX(cpi);
2608 		*ecx = CPI_FEATURES_ECX(cpi);
2609 
2610 		/*
2611 		 * [these require explicit kernel support]
2612 		 */
2613 		if (!is_x86_feature(x86_featureset, X86FSET_SEP))
2614 			*edx &= ~CPUID_INTC_EDX_SEP;
2615 
2616 		if (!is_x86_feature(x86_featureset, X86FSET_SSE))
2617 			*edx &= ~(CPUID_INTC_EDX_FXSR|CPUID_INTC_EDX_SSE);
2618 		if (!is_x86_feature(x86_featureset, X86FSET_SSE2))
2619 			*edx &= ~CPUID_INTC_EDX_SSE2;
2620 
2621 		if (!is_x86_feature(x86_featureset, X86FSET_HTT))
2622 			*edx &= ~CPUID_INTC_EDX_HTT;
2623 
2624 		if (!is_x86_feature(x86_featureset, X86FSET_SSE3))
2625 			*ecx &= ~CPUID_INTC_ECX_SSE3;
2626 
2627 		if (!is_x86_feature(x86_featureset, X86FSET_SSSE3))
2628 			*ecx &= ~CPUID_INTC_ECX_SSSE3;
2629 		if (!is_x86_feature(x86_featureset, X86FSET_SSE4_1))
2630 			*ecx &= ~CPUID_INTC_ECX_SSE4_1;
2631 		if (!is_x86_feature(x86_featureset, X86FSET_SSE4_2))
2632 			*ecx &= ~CPUID_INTC_ECX_SSE4_2;
2633 		if (!is_x86_feature(x86_featureset, X86FSET_AES))
2634 			*ecx &= ~CPUID_INTC_ECX_AES;
2635 		if (!is_x86_feature(x86_featureset, X86FSET_PCLMULQDQ))
2636 			*ecx &= ~CPUID_INTC_ECX_PCLMULQDQ;
2637 		if (!is_x86_feature(x86_featureset, X86FSET_XSAVE))
2638 			*ecx &= ~(CPUID_INTC_ECX_XSAVE |
2639 			    CPUID_INTC_ECX_OSXSAVE);
2640 		if (!is_x86_feature(x86_featureset, X86FSET_AVX))
2641 			*ecx &= ~CPUID_INTC_ECX_AVX;
2642 		if (!is_x86_feature(x86_featureset, X86FSET_F16C))
2643 			*ecx &= ~CPUID_INTC_ECX_F16C;
2644 
2645 		/*
2646 		 * [no explicit support required beyond x87 fp context]
2647 		 */
2648 		if (!fpu_exists)
2649 			*edx &= ~(CPUID_INTC_EDX_FPU | CPUID_INTC_EDX_MMX);
2650 
2651 		/*
2652 		 * Now map the supported feature vector to things that we
2653 		 * think userland will care about.
2654 		 */
2655 		if (*edx & CPUID_INTC_EDX_SEP)
2656 			hwcap_flags |= AV_386_SEP;
2657 		if (*edx & CPUID_INTC_EDX_SSE)
2658 			hwcap_flags |= AV_386_FXSR | AV_386_SSE;
2659 		if (*edx & CPUID_INTC_EDX_SSE2)
2660 			hwcap_flags |= AV_386_SSE2;
2661 		if (*ecx & CPUID_INTC_ECX_SSE3)
2662 			hwcap_flags |= AV_386_SSE3;
2663 		if (*ecx & CPUID_INTC_ECX_SSSE3)
2664 			hwcap_flags |= AV_386_SSSE3;
2665 		if (*ecx & CPUID_INTC_ECX_SSE4_1)
2666 			hwcap_flags |= AV_386_SSE4_1;
2667 		if (*ecx & CPUID_INTC_ECX_SSE4_2)
2668 			hwcap_flags |= AV_386_SSE4_2;
2669 		if (*ecx & CPUID_INTC_ECX_MOVBE)
2670 			hwcap_flags |= AV_386_MOVBE;
2671 		if (*ecx & CPUID_INTC_ECX_AES)
2672 			hwcap_flags |= AV_386_AES;
2673 		if (*ecx & CPUID_INTC_ECX_PCLMULQDQ)
2674 			hwcap_flags |= AV_386_PCLMULQDQ;
2675 		if ((*ecx & CPUID_INTC_ECX_XSAVE) &&
2676 		    (*ecx & CPUID_INTC_ECX_OSXSAVE)) {
2677 			hwcap_flags |= AV_386_XSAVE;
2678 
2679 			if (*ecx & CPUID_INTC_ECX_AVX) {
2680 				hwcap_flags |= AV_386_AVX;
2681 				if (*ecx & CPUID_INTC_ECX_F16C)
2682 					hwcap_flags_2 |= AV_386_2_F16C;
2683 			}
2684 		}
2685 		if (*ecx & CPUID_INTC_ECX_VMX)
2686 			hwcap_flags |= AV_386_VMX;
2687 		if (*ecx & CPUID_INTC_ECX_POPCNT)
2688 			hwcap_flags |= AV_386_POPCNT;
2689 		if (*edx & CPUID_INTC_EDX_FPU)
2690 			hwcap_flags |= AV_386_FPU;
2691 		if (*edx & CPUID_INTC_EDX_MMX)
2692 			hwcap_flags |= AV_386_MMX;
2693 
2694 		if (*edx & CPUID_INTC_EDX_TSC)
2695 			hwcap_flags |= AV_386_TSC;
2696 		if (*edx & CPUID_INTC_EDX_CX8)
2697 			hwcap_flags |= AV_386_CX8;
2698 		if (*edx & CPUID_INTC_EDX_CMOV)
2699 			hwcap_flags |= AV_386_CMOV;
2700 		if (*ecx & CPUID_INTC_ECX_CX16)
2701 			hwcap_flags |= AV_386_CX16;
2702 
2703 		if (*ecx & CPUID_INTC_ECX_RDRAND)
2704 			hwcap_flags_2 |= AV_386_2_RDRAND;
2705 	}
2706 
2707 	if (cpi->cpi_xmaxeax < 0x80000001)
2708 		goto pass4_done;
2709 
2710 	switch (cpi->cpi_vendor) {
2711 		struct cpuid_regs cp;
2712 		uint32_t *edx, *ecx;
2713 
2714 	case X86_VENDOR_Intel:
2715 		/*
2716 		 * Seems like Intel duplicated what we necessary
2717 		 * here to make the initial crop of 64-bit OS's work.
2718 		 * Hopefully, those are the only "extended" bits
2719 		 * they'll add.
2720 		 */
2721 		/*FALLTHROUGH*/
2722 
2723 	case X86_VENDOR_AMD:
2724 		edx = &cpi->cpi_support[AMD_EDX_FEATURES];
2725 		ecx = &cpi->cpi_support[AMD_ECX_FEATURES];
2726 
2727 		*edx = CPI_FEATURES_XTD_EDX(cpi);
2728 		*ecx = CPI_FEATURES_XTD_ECX(cpi);
2729 
2730 		/*
2731 		 * [these features require explicit kernel support]
2732 		 */
2733 		switch (cpi->cpi_vendor) {
2734 		case X86_VENDOR_Intel:
2735 			if (!is_x86_feature(x86_featureset, X86FSET_TSCP))
2736 				*edx &= ~CPUID_AMD_EDX_TSCP;
2737 			break;
2738 
2739 		case X86_VENDOR_AMD:
2740 			if (!is_x86_feature(x86_featureset, X86FSET_TSCP))
2741 				*edx &= ~CPUID_AMD_EDX_TSCP;
2742 			if (!is_x86_feature(x86_featureset, X86FSET_SSE4A))
2743 				*ecx &= ~CPUID_AMD_ECX_SSE4A;
2744 			break;
2745 
2746 		default:
2747 			break;
2748 		}
2749 
2750 		/*
2751 		 * [no explicit support required beyond
2752 		 * x87 fp context and exception handlers]
2753 		 */
2754 		if (!fpu_exists)
2755 			*edx &= ~(CPUID_AMD_EDX_MMXamd |
2756 			    CPUID_AMD_EDX_3DNow | CPUID_AMD_EDX_3DNowx);
2757 
2758 		if (!is_x86_feature(x86_featureset, X86FSET_NX))
2759 			*edx &= ~CPUID_AMD_EDX_NX;
2760 #if !defined(__amd64)
2761 		*edx &= ~CPUID_AMD_EDX_LM;
2762 #endif
2763 		/*
2764 		 * Now map the supported feature vector to
2765 		 * things that we think userland will care about.
2766 		 */
2767 #if defined(__amd64)
2768 		if (*edx & CPUID_AMD_EDX_SYSC)
2769 			hwcap_flags |= AV_386_AMD_SYSC;
2770 #endif
2771 		if (*edx & CPUID_AMD_EDX_MMXamd)
2772 			hwcap_flags |= AV_386_AMD_MMX;
2773 		if (*edx & CPUID_AMD_EDX_3DNow)
2774 			hwcap_flags |= AV_386_AMD_3DNow;
2775 		if (*edx & CPUID_AMD_EDX_3DNowx)
2776 			hwcap_flags |= AV_386_AMD_3DNowx;
2777 		if (*ecx & CPUID_AMD_ECX_SVM)
2778 			hwcap_flags |= AV_386_AMD_SVM;
2779 
2780 		switch (cpi->cpi_vendor) {
2781 		case X86_VENDOR_AMD:
2782 			if (*edx & CPUID_AMD_EDX_TSCP)
2783 				hwcap_flags |= AV_386_TSCP;
2784 			if (*ecx & CPUID_AMD_ECX_AHF64)
2785 				hwcap_flags |= AV_386_AHF;
2786 			if (*ecx & CPUID_AMD_ECX_SSE4A)
2787 				hwcap_flags |= AV_386_AMD_SSE4A;
2788 			if (*ecx & CPUID_AMD_ECX_LZCNT)
2789 				hwcap_flags |= AV_386_AMD_LZCNT;
2790 			break;
2791 
2792 		case X86_VENDOR_Intel:
2793 			if (*edx & CPUID_AMD_EDX_TSCP)
2794 				hwcap_flags |= AV_386_TSCP;
2795 			/*
2796 			 * Aarrgh.
2797 			 * Intel uses a different bit in the same word.
2798 			 */
2799 			if (*ecx & CPUID_INTC_ECX_AHF64)
2800 				hwcap_flags |= AV_386_AHF;
2801 			break;
2802 
2803 		default:
2804 			break;
2805 		}
2806 		break;
2807 
2808 	case X86_VENDOR_TM:
2809 		cp.cp_eax = 0x80860001;
2810 		(void) __cpuid_insn(&cp);
2811 		cpi->cpi_support[TM_EDX_FEATURES] = cp.cp_edx;
2812 		break;
2813 
2814 	default:
2815 		break;
2816 	}
2817 
2818 pass4_done:
2819 	cpi->cpi_pass = 4;
2820 	if (hwcap_out != NULL) {
2821 		hwcap_out[0] = hwcap_flags;
2822 		hwcap_out[1] = hwcap_flags_2;
2823 	}
2824 }
2825 
2826 
2827 /*
2828  * Simulate the cpuid instruction using the data we previously
2829  * captured about this CPU.  We try our best to return the truth
2830  * about the hardware, independently of kernel support.
2831  */
2832 uint32_t
2833 cpuid_insn(cpu_t *cpu, struct cpuid_regs *cp)
2834 {
2835 	struct cpuid_info *cpi;
2836 	struct cpuid_regs *xcp;
2837 
2838 	if (cpu == NULL)
2839 		cpu = CPU;
2840 	cpi = cpu->cpu_m.mcpu_cpi;
2841 
2842 	ASSERT(cpuid_checkpass(cpu, 3));
2843 
2844 	/*
2845 	 * CPUID data is cached in two separate places: cpi_std for standard
2846 	 * CPUID functions, and cpi_extd for extended CPUID functions.
2847 	 */
2848 	if (cp->cp_eax <= cpi->cpi_maxeax && cp->cp_eax < NMAX_CPI_STD)
2849 		xcp = &cpi->cpi_std[cp->cp_eax];
2850 	else if (cp->cp_eax >= 0x80000000 && cp->cp_eax <= cpi->cpi_xmaxeax &&
2851 	    cp->cp_eax < 0x80000000 + NMAX_CPI_EXTD)
2852 		xcp = &cpi->cpi_extd[cp->cp_eax - 0x80000000];
2853 	else
2854 		/*
2855 		 * The caller is asking for data from an input parameter which
2856 		 * the kernel has not cached.  In this case we go fetch from
2857 		 * the hardware and return the data directly to the user.
2858 		 */
2859 		return (__cpuid_insn(cp));
2860 
2861 	cp->cp_eax = xcp->cp_eax;
2862 	cp->cp_ebx = xcp->cp_ebx;
2863 	cp->cp_ecx = xcp->cp_ecx;
2864 	cp->cp_edx = xcp->cp_edx;
2865 	return (cp->cp_eax);
2866 }
2867 
2868 int
2869 cpuid_checkpass(cpu_t *cpu, int pass)
2870 {
2871 	return (cpu != NULL && cpu->cpu_m.mcpu_cpi != NULL &&
2872 	    cpu->cpu_m.mcpu_cpi->cpi_pass >= pass);
2873 }
2874 
2875 int
2876 cpuid_getbrandstr(cpu_t *cpu, char *s, size_t n)
2877 {
2878 	ASSERT(cpuid_checkpass(cpu, 3));
2879 
2880 	return (snprintf(s, n, "%s", cpu->cpu_m.mcpu_cpi->cpi_brandstr));
2881 }
2882 
2883 int
2884 cpuid_is_cmt(cpu_t *cpu)
2885 {
2886 	if (cpu == NULL)
2887 		cpu = CPU;
2888 
2889 	ASSERT(cpuid_checkpass(cpu, 1));
2890 
2891 	return (cpu->cpu_m.mcpu_cpi->cpi_chipid >= 0);
2892 }
2893 
2894 /*
2895  * AMD and Intel both implement the 64-bit variant of the syscall
2896  * instruction (syscallq), so if there's -any- support for syscall,
2897  * cpuid currently says "yes, we support this".
2898  *
2899  * However, Intel decided to -not- implement the 32-bit variant of the
2900  * syscall instruction, so we provide a predicate to allow our caller
2901  * to test that subtlety here.
2902  *
2903  * XXPV	Currently, 32-bit syscall instructions don't work via the hypervisor,
2904  *	even in the case where the hardware would in fact support it.
2905  */
2906 /*ARGSUSED*/
2907 int
2908 cpuid_syscall32_insn(cpu_t *cpu)
2909 {
2910 	ASSERT(cpuid_checkpass((cpu == NULL ? CPU : cpu), 1));
2911 
2912 #if !defined(__xpv)
2913 	if (cpu == NULL)
2914 		cpu = CPU;
2915 
2916 	/*CSTYLED*/
2917 	{
2918 		struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2919 
2920 		if (cpi->cpi_vendor == X86_VENDOR_AMD &&
2921 		    cpi->cpi_xmaxeax >= 0x80000001 &&
2922 		    (CPI_FEATURES_XTD_EDX(cpi) & CPUID_AMD_EDX_SYSC))
2923 			return (1);
2924 	}
2925 #endif
2926 	return (0);
2927 }
2928 
2929 int
2930 cpuid_getidstr(cpu_t *cpu, char *s, size_t n)
2931 {
2932 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2933 
2934 	static const char fmt[] =
2935 	    "x86 (%s %X family %d model %d step %d clock %d MHz)";
2936 	static const char fmt_ht[] =
2937 	    "x86 (chipid 0x%x %s %X family %d model %d step %d clock %d MHz)";
2938 
2939 	ASSERT(cpuid_checkpass(cpu, 1));
2940 
2941 	if (cpuid_is_cmt(cpu))
2942 		return (snprintf(s, n, fmt_ht, cpi->cpi_chipid,
2943 		    cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax,
2944 		    cpi->cpi_family, cpi->cpi_model,
2945 		    cpi->cpi_step, cpu->cpu_type_info.pi_clock));
2946 	return (snprintf(s, n, fmt,
2947 	    cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax,
2948 	    cpi->cpi_family, cpi->cpi_model,
2949 	    cpi->cpi_step, cpu->cpu_type_info.pi_clock));
2950 }
2951 
2952 const char *
2953 cpuid_getvendorstr(cpu_t *cpu)
2954 {
2955 	ASSERT(cpuid_checkpass(cpu, 1));
2956 	return ((const char *)cpu->cpu_m.mcpu_cpi->cpi_vendorstr);
2957 }
2958 
2959 uint_t
2960 cpuid_getvendor(cpu_t *cpu)
2961 {
2962 	ASSERT(cpuid_checkpass(cpu, 1));
2963 	return (cpu->cpu_m.mcpu_cpi->cpi_vendor);
2964 }
2965 
2966 uint_t
2967 cpuid_getfamily(cpu_t *cpu)
2968 {
2969 	ASSERT(cpuid_checkpass(cpu, 1));
2970 	return (cpu->cpu_m.mcpu_cpi->cpi_family);
2971 }
2972 
2973 uint_t
2974 cpuid_getmodel(cpu_t *cpu)
2975 {
2976 	ASSERT(cpuid_checkpass(cpu, 1));
2977 	return (cpu->cpu_m.mcpu_cpi->cpi_model);
2978 }
2979 
2980 uint_t
2981 cpuid_get_ncpu_per_chip(cpu_t *cpu)
2982 {
2983 	ASSERT(cpuid_checkpass(cpu, 1));
2984 	return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_per_chip);
2985 }
2986 
2987 uint_t
2988 cpuid_get_ncore_per_chip(cpu_t *cpu)
2989 {
2990 	ASSERT(cpuid_checkpass(cpu, 1));
2991 	return (cpu->cpu_m.mcpu_cpi->cpi_ncore_per_chip);
2992 }
2993 
2994 uint_t
2995 cpuid_get_ncpu_sharing_last_cache(cpu_t *cpu)
2996 {
2997 	ASSERT(cpuid_checkpass(cpu, 2));
2998 	return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_shr_last_cache);
2999 }
3000 
3001 id_t
3002 cpuid_get_last_lvl_cacheid(cpu_t *cpu)
3003 {
3004 	ASSERT(cpuid_checkpass(cpu, 2));
3005 	return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid);
3006 }
3007 
3008 uint_t
3009 cpuid_getstep(cpu_t *cpu)
3010 {
3011 	ASSERT(cpuid_checkpass(cpu, 1));
3012 	return (cpu->cpu_m.mcpu_cpi->cpi_step);
3013 }
3014 
3015 uint_t
3016 cpuid_getsig(struct cpu *cpu)
3017 {
3018 	ASSERT(cpuid_checkpass(cpu, 1));
3019 	return (cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_eax);
3020 }
3021 
3022 uint32_t
3023 cpuid_getchiprev(struct cpu *cpu)
3024 {
3025 	ASSERT(cpuid_checkpass(cpu, 1));
3026 	return (cpu->cpu_m.mcpu_cpi->cpi_chiprev);
3027 }
3028 
3029 const char *
3030 cpuid_getchiprevstr(struct cpu *cpu)
3031 {
3032 	ASSERT(cpuid_checkpass(cpu, 1));
3033 	return (cpu->cpu_m.mcpu_cpi->cpi_chiprevstr);
3034 }
3035 
3036 uint32_t
3037 cpuid_getsockettype(struct cpu *cpu)
3038 {
3039 	ASSERT(cpuid_checkpass(cpu, 1));
3040 	return (cpu->cpu_m.mcpu_cpi->cpi_socket);
3041 }
3042 
3043 const char *
3044 cpuid_getsocketstr(cpu_t *cpu)
3045 {
3046 	static const char *socketstr = NULL;
3047 	struct cpuid_info *cpi;
3048 
3049 	ASSERT(cpuid_checkpass(cpu, 1));
3050 	cpi = cpu->cpu_m.mcpu_cpi;
3051 
3052 	/* Assume that socket types are the same across the system */
3053 	if (socketstr == NULL)
3054 		socketstr = _cpuid_sktstr(cpi->cpi_vendor, cpi->cpi_family,
3055 		    cpi->cpi_model, cpi->cpi_step);
3056 
3057 
3058 	return (socketstr);
3059 }
3060 
3061 int
3062 cpuid_get_chipid(cpu_t *cpu)
3063 {
3064 	ASSERT(cpuid_checkpass(cpu, 1));
3065 
3066 	if (cpuid_is_cmt(cpu))
3067 		return (cpu->cpu_m.mcpu_cpi->cpi_chipid);
3068 	return (cpu->cpu_id);
3069 }
3070 
3071 id_t
3072 cpuid_get_coreid(cpu_t *cpu)
3073 {
3074 	ASSERT(cpuid_checkpass(cpu, 1));
3075 	return (cpu->cpu_m.mcpu_cpi->cpi_coreid);
3076 }
3077 
3078 int
3079 cpuid_get_pkgcoreid(cpu_t *cpu)
3080 {
3081 	ASSERT(cpuid_checkpass(cpu, 1));
3082 	return (cpu->cpu_m.mcpu_cpi->cpi_pkgcoreid);
3083 }
3084 
3085 int
3086 cpuid_get_clogid(cpu_t *cpu)
3087 {
3088 	ASSERT(cpuid_checkpass(cpu, 1));
3089 	return (cpu->cpu_m.mcpu_cpi->cpi_clogid);
3090 }
3091 
3092 int
3093 cpuid_get_cacheid(cpu_t *cpu)
3094 {
3095 	ASSERT(cpuid_checkpass(cpu, 1));
3096 	return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid);
3097 }
3098 
3099 uint_t
3100 cpuid_get_procnodeid(cpu_t *cpu)
3101 {
3102 	ASSERT(cpuid_checkpass(cpu, 1));
3103 	return (cpu->cpu_m.mcpu_cpi->cpi_procnodeid);
3104 }
3105 
3106 uint_t
3107 cpuid_get_procnodes_per_pkg(cpu_t *cpu)
3108 {
3109 	ASSERT(cpuid_checkpass(cpu, 1));
3110 	return (cpu->cpu_m.mcpu_cpi->cpi_procnodes_per_pkg);
3111 }
3112 
3113 uint_t
3114 cpuid_get_compunitid(cpu_t *cpu)
3115 {
3116 	ASSERT(cpuid_checkpass(cpu, 1));
3117 	return (cpu->cpu_m.mcpu_cpi->cpi_compunitid);
3118 }
3119 
3120 uint_t
3121 cpuid_get_cores_per_compunit(cpu_t *cpu)
3122 {
3123 	ASSERT(cpuid_checkpass(cpu, 1));
3124 	return (cpu->cpu_m.mcpu_cpi->cpi_cores_per_compunit);
3125 }
3126 
3127 /*ARGSUSED*/
3128 int
3129 cpuid_have_cr8access(cpu_t *cpu)
3130 {
3131 #if defined(__amd64)
3132 	return (1);
3133 #else
3134 	struct cpuid_info *cpi;
3135 
3136 	ASSERT(cpu != NULL);
3137 	cpi = cpu->cpu_m.mcpu_cpi;
3138 	if (cpi->cpi_vendor == X86_VENDOR_AMD && cpi->cpi_maxeax >= 1 &&
3139 	    (CPI_FEATURES_XTD_ECX(cpi) & CPUID_AMD_ECX_CR8D) != 0)
3140 		return (1);
3141 	return (0);
3142 #endif
3143 }
3144 
3145 uint32_t
3146 cpuid_get_apicid(cpu_t *cpu)
3147 {
3148 	ASSERT(cpuid_checkpass(cpu, 1));
3149 	if (cpu->cpu_m.mcpu_cpi->cpi_maxeax < 1) {
3150 		return (UINT32_MAX);
3151 	} else {
3152 		return (cpu->cpu_m.mcpu_cpi->cpi_apicid);
3153 	}
3154 }
3155 
3156 void
3157 cpuid_get_addrsize(cpu_t *cpu, uint_t *pabits, uint_t *vabits)
3158 {
3159 	struct cpuid_info *cpi;
3160 
3161 	if (cpu == NULL)
3162 		cpu = CPU;
3163 	cpi = cpu->cpu_m.mcpu_cpi;
3164 
3165 	ASSERT(cpuid_checkpass(cpu, 1));
3166 
3167 	if (pabits)
3168 		*pabits = cpi->cpi_pabits;
3169 	if (vabits)
3170 		*vabits = cpi->cpi_vabits;
3171 }
3172 
3173 /*
3174  * Returns the number of data TLB entries for a corresponding
3175  * pagesize.  If it can't be computed, or isn't known, the
3176  * routine returns zero.  If you ask about an architecturally
3177  * impossible pagesize, the routine will panic (so that the
3178  * hat implementor knows that things are inconsistent.)
3179  */
3180 uint_t
3181 cpuid_get_dtlb_nent(cpu_t *cpu, size_t pagesize)
3182 {
3183 	struct cpuid_info *cpi;
3184 	uint_t dtlb_nent = 0;
3185 
3186 	if (cpu == NULL)
3187 		cpu = CPU;
3188 	cpi = cpu->cpu_m.mcpu_cpi;
3189 
3190 	ASSERT(cpuid_checkpass(cpu, 1));
3191 
3192 	/*
3193 	 * Check the L2 TLB info
3194 	 */
3195 	if (cpi->cpi_xmaxeax >= 0x80000006) {
3196 		struct cpuid_regs *cp = &cpi->cpi_extd[6];
3197 
3198 		switch (pagesize) {
3199 
3200 		case 4 * 1024:
3201 			/*
3202 			 * All zero in the top 16 bits of the register
3203 			 * indicates a unified TLB. Size is in low 16 bits.
3204 			 */
3205 			if ((cp->cp_ebx & 0xffff0000) == 0)
3206 				dtlb_nent = cp->cp_ebx & 0x0000ffff;
3207 			else
3208 				dtlb_nent = BITX(cp->cp_ebx, 27, 16);
3209 			break;
3210 
3211 		case 2 * 1024 * 1024:
3212 			if ((cp->cp_eax & 0xffff0000) == 0)
3213 				dtlb_nent = cp->cp_eax & 0x0000ffff;
3214 			else
3215 				dtlb_nent = BITX(cp->cp_eax, 27, 16);
3216 			break;
3217 
3218 		default:
3219 			panic("unknown L2 pagesize");
3220 			/*NOTREACHED*/
3221 		}
3222 	}
3223 
3224 	if (dtlb_nent != 0)
3225 		return (dtlb_nent);
3226 
3227 	/*
3228 	 * No L2 TLB support for this size, try L1.
3229 	 */
3230 	if (cpi->cpi_xmaxeax >= 0x80000005) {
3231 		struct cpuid_regs *cp = &cpi->cpi_extd[5];
3232 
3233 		switch (pagesize) {
3234 		case 4 * 1024:
3235 			dtlb_nent = BITX(cp->cp_ebx, 23, 16);
3236 			break;
3237 		case 2 * 1024 * 1024:
3238 			dtlb_nent = BITX(cp->cp_eax, 23, 16);
3239 			break;
3240 		default:
3241 			panic("unknown L1 d-TLB pagesize");
3242 			/*NOTREACHED*/
3243 		}
3244 	}
3245 
3246 	return (dtlb_nent);
3247 }
3248 
3249 /*
3250  * Return 0 if the erratum is not present or not applicable, positive
3251  * if it is, and negative if the status of the erratum is unknown.
3252  *
3253  * See "Revision Guide for AMD Athlon(tm) 64 and AMD Opteron(tm)
3254  * Processors" #25759, Rev 3.57, August 2005
3255  */
3256 int
3257 cpuid_opteron_erratum(cpu_t *cpu, uint_t erratum)
3258 {
3259 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
3260 	uint_t eax;
3261 
3262 	/*
3263 	 * Bail out if this CPU isn't an AMD CPU, or if it's
3264 	 * a legacy (32-bit) AMD CPU.
3265 	 */
3266 	if (cpi->cpi_vendor != X86_VENDOR_AMD ||
3267 	    cpi->cpi_family == 4 || cpi->cpi_family == 5 ||
3268 	    cpi->cpi_family == 6)
3269 
3270 		return (0);
3271 
3272 	eax = cpi->cpi_std[1].cp_eax;
3273 
3274 #define	SH_B0(eax)	(eax == 0xf40 || eax == 0xf50)
3275 #define	SH_B3(eax) 	(eax == 0xf51)
3276 #define	B(eax)		(SH_B0(eax) || SH_B3(eax))
3277 
3278 #define	SH_C0(eax)	(eax == 0xf48 || eax == 0xf58)
3279 
3280 #define	SH_CG(eax)	(eax == 0xf4a || eax == 0xf5a || eax == 0xf7a)
3281 #define	DH_CG(eax)	(eax == 0xfc0 || eax == 0xfe0 || eax == 0xff0)
3282 #define	CH_CG(eax)	(eax == 0xf82 || eax == 0xfb2)
3283 #define	CG(eax)		(SH_CG(eax) || DH_CG(eax) || CH_CG(eax))
3284 
3285 #define	SH_D0(eax)	(eax == 0x10f40 || eax == 0x10f50 || eax == 0x10f70)
3286 #define	DH_D0(eax)	(eax == 0x10fc0 || eax == 0x10ff0)
3287 #define	CH_D0(eax)	(eax == 0x10f80 || eax == 0x10fb0)
3288 #define	D0(eax)		(SH_D0(eax) || DH_D0(eax) || CH_D0(eax))
3289 
3290 #define	SH_E0(eax)	(eax == 0x20f50 || eax == 0x20f40 || eax == 0x20f70)
3291 #define	JH_E1(eax)	(eax == 0x20f10)	/* JH8_E0 had 0x20f30 */
3292 #define	DH_E3(eax)	(eax == 0x20fc0 || eax == 0x20ff0)
3293 #define	SH_E4(eax)	(eax == 0x20f51 || eax == 0x20f71)
3294 #define	BH_E4(eax)	(eax == 0x20fb1)
3295 #define	SH_E5(eax)	(eax == 0x20f42)
3296 #define	DH_E6(eax)	(eax == 0x20ff2 || eax == 0x20fc2)
3297 #define	JH_E6(eax)	(eax == 0x20f12 || eax == 0x20f32)
3298 #define	EX(eax)		(SH_E0(eax) || JH_E1(eax) || DH_E3(eax) || \
3299 			    SH_E4(eax) || BH_E4(eax) || SH_E5(eax) || \
3300 			    DH_E6(eax) || JH_E6(eax))
3301 
3302 #define	DR_AX(eax)	(eax == 0x100f00 || eax == 0x100f01 || eax == 0x100f02)
3303 #define	DR_B0(eax)	(eax == 0x100f20)
3304 #define	DR_B1(eax)	(eax == 0x100f21)
3305 #define	DR_BA(eax)	(eax == 0x100f2a)
3306 #define	DR_B2(eax)	(eax == 0x100f22)
3307 #define	DR_B3(eax)	(eax == 0x100f23)
3308 #define	RB_C0(eax)	(eax == 0x100f40)
3309 
3310 	switch (erratum) {
3311 	case 1:
3312 		return (cpi->cpi_family < 0x10);
3313 	case 51:	/* what does the asterisk mean? */
3314 		return (B(eax) || SH_C0(eax) || CG(eax));
3315 	case 52:
3316 		return (B(eax));
3317 	case 57:
3318 		return (cpi->cpi_family <= 0x11);
3319 	case 58:
3320 		return (B(eax));
3321 	case 60:
3322 		return (cpi->cpi_family <= 0x11);
3323 	case 61:
3324 	case 62:
3325 	case 63:
3326 	case 64:
3327 	case 65:
3328 	case 66:
3329 	case 68:
3330 	case 69:
3331 	case 70:
3332 	case 71:
3333 		return (B(eax));
3334 	case 72:
3335 		return (SH_B0(eax));
3336 	case 74:
3337 		return (B(eax));
3338 	case 75:
3339 		return (cpi->cpi_family < 0x10);
3340 	case 76:
3341 		return (B(eax));
3342 	case 77:
3343 		return (cpi->cpi_family <= 0x11);
3344 	case 78:
3345 		return (B(eax) || SH_C0(eax));
3346 	case 79:
3347 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
3348 	case 80:
3349 	case 81:
3350 	case 82:
3351 		return (B(eax));
3352 	case 83:
3353 		return (B(eax) || SH_C0(eax) || CG(eax));
3354 	case 85:
3355 		return (cpi->cpi_family < 0x10);
3356 	case 86:
3357 		return (SH_C0(eax) || CG(eax));
3358 	case 88:
3359 #if !defined(__amd64)
3360 		return (0);
3361 #else
3362 		return (B(eax) || SH_C0(eax));
3363 #endif
3364 	case 89:
3365 		return (cpi->cpi_family < 0x10);
3366 	case 90:
3367 		return (B(eax) || SH_C0(eax) || CG(eax));
3368 	case 91:
3369 	case 92:
3370 		return (B(eax) || SH_C0(eax));
3371 	case 93:
3372 		return (SH_C0(eax));
3373 	case 94:
3374 		return (B(eax) || SH_C0(eax) || CG(eax));
3375 	case 95:
3376 #if !defined(__amd64)
3377 		return (0);
3378 #else
3379 		return (B(eax) || SH_C0(eax));
3380 #endif
3381 	case 96:
3382 		return (B(eax) || SH_C0(eax) || CG(eax));
3383 	case 97:
3384 	case 98:
3385 		return (SH_C0(eax) || CG(eax));
3386 	case 99:
3387 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3388 	case 100:
3389 		return (B(eax) || SH_C0(eax));
3390 	case 101:
3391 	case 103:
3392 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3393 	case 104:
3394 		return (SH_C0(eax) || CG(eax) || D0(eax));
3395 	case 105:
3396 	case 106:
3397 	case 107:
3398 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3399 	case 108:
3400 		return (DH_CG(eax));
3401 	case 109:
3402 		return (SH_C0(eax) || CG(eax) || D0(eax));
3403 	case 110:
3404 		return (D0(eax) || EX(eax));
3405 	case 111:
3406 		return (CG(eax));
3407 	case 112:
3408 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
3409 	case 113:
3410 		return (eax == 0x20fc0);
3411 	case 114:
3412 		return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax));
3413 	case 115:
3414 		return (SH_E0(eax) || JH_E1(eax));
3415 	case 116:
3416 		return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax));
3417 	case 117:
3418 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
3419 	case 118:
3420 		return (SH_E0(eax) || JH_E1(eax) || SH_E4(eax) || BH_E4(eax) ||
3421 		    JH_E6(eax));
3422 	case 121:
3423 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
3424 	case 122:
3425 		return (cpi->cpi_family < 0x10 || cpi->cpi_family == 0x11);
3426 	case 123:
3427 		return (JH_E1(eax) || BH_E4(eax) || JH_E6(eax));
3428 	case 131:
3429 		return (cpi->cpi_family < 0x10);
3430 	case 6336786:
3431 		/*
3432 		 * Test for AdvPowerMgmtInfo.TscPStateInvariant
3433 		 * if this is a K8 family or newer processor
3434 		 */
3435 		if (CPI_FAMILY(cpi) == 0xf) {
3436 			struct cpuid_regs regs;
3437 			regs.cp_eax = 0x80000007;
3438 			(void) __cpuid_insn(&regs);
3439 			return (!(regs.cp_edx & 0x100));
3440 		}
3441 		return (0);
3442 	case 6323525:
3443 		return (((((eax >> 12) & 0xff00) + (eax & 0xf00)) |
3444 		    (((eax >> 4) & 0xf) | ((eax >> 12) & 0xf0))) < 0xf40);
3445 
3446 	case 6671130:
3447 		/*
3448 		 * check for processors (pre-Shanghai) that do not provide
3449 		 * optimal management of 1gb ptes in its tlb.
3450 		 */
3451 		return (cpi->cpi_family == 0x10 && cpi->cpi_model < 4);
3452 
3453 	case 298:
3454 		return (DR_AX(eax) || DR_B0(eax) || DR_B1(eax) || DR_BA(eax) ||
3455 		    DR_B2(eax) || RB_C0(eax));
3456 
3457 	case 721:
3458 #if defined(__amd64)
3459 		return (cpi->cpi_family == 0x10 || cpi->cpi_family == 0x12);
3460 #else
3461 		return (0);
3462 #endif
3463 
3464 	default:
3465 		return (-1);
3466 
3467 	}
3468 }
3469 
3470 /*
3471  * Determine if specified erratum is present via OSVW (OS Visible Workaround).
3472  * Return 1 if erratum is present, 0 if not present and -1 if indeterminate.
3473  */
3474 int
3475 osvw_opteron_erratum(cpu_t *cpu, uint_t erratum)
3476 {
3477 	struct cpuid_info	*cpi;
3478 	uint_t			osvwid;
3479 	static int		osvwfeature = -1;
3480 	uint64_t		osvwlength;
3481 
3482 
3483 	cpi = cpu->cpu_m.mcpu_cpi;
3484 
3485 	/* confirm OSVW supported */
3486 	if (osvwfeature == -1) {
3487 		osvwfeature = cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW;
3488 	} else {
3489 		/* assert that osvw feature setting is consistent on all cpus */
3490 		ASSERT(osvwfeature ==
3491 		    (cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW));
3492 	}
3493 	if (!osvwfeature)
3494 		return (-1);
3495 
3496 	osvwlength = rdmsr(MSR_AMD_OSVW_ID_LEN) & OSVW_ID_LEN_MASK;
3497 
3498 	switch (erratum) {
3499 	case 298:	/* osvwid is 0 */
3500 		osvwid = 0;
3501 		if (osvwlength <= (uint64_t)osvwid) {
3502 			/* osvwid 0 is unknown */
3503 			return (-1);
3504 		}
3505 
3506 		/*
3507 		 * Check the OSVW STATUS MSR to determine the state
3508 		 * of the erratum where:
3509 		 *   0 - fixed by HW
3510 		 *   1 - BIOS has applied the workaround when BIOS
3511 		 *   workaround is available. (Or for other errata,
3512 		 *   OS workaround is required.)
3513 		 * For a value of 1, caller will confirm that the
3514 		 * erratum 298 workaround has indeed been applied by BIOS.
3515 		 *
3516 		 * A 1 may be set in cpus that have a HW fix
3517 		 * in a mixed cpu system. Regarding erratum 298:
3518 		 *   In a multiprocessor platform, the workaround above
3519 		 *   should be applied to all processors regardless of
3520 		 *   silicon revision when an affected processor is
3521 		 *   present.
3522 		 */
3523 
3524 		return (rdmsr(MSR_AMD_OSVW_STATUS +
3525 		    (osvwid / OSVW_ID_CNT_PER_MSR)) &
3526 		    (1ULL << (osvwid % OSVW_ID_CNT_PER_MSR)));
3527 
3528 	default:
3529 		return (-1);
3530 	}
3531 }
3532 
3533 static const char assoc_str[] = "associativity";
3534 static const char line_str[] = "line-size";
3535 static const char size_str[] = "size";
3536 
3537 static void
3538 add_cache_prop(dev_info_t *devi, const char *label, const char *type,
3539     uint32_t val)
3540 {
3541 	char buf[128];
3542 
3543 	/*
3544 	 * ndi_prop_update_int() is used because it is desirable for
3545 	 * DDI_PROP_HW_DEF and DDI_PROP_DONTSLEEP to be set.
3546 	 */
3547 	if (snprintf(buf, sizeof (buf), "%s-%s", label, type) < sizeof (buf))
3548 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, devi, buf, val);
3549 }
3550 
3551 /*
3552  * Intel-style cache/tlb description
3553  *
3554  * Standard cpuid level 2 gives a randomly ordered
3555  * selection of tags that index into a table that describes
3556  * cache and tlb properties.
3557  */
3558 
3559 static const char l1_icache_str[] = "l1-icache";
3560 static const char l1_dcache_str[] = "l1-dcache";
3561 static const char l2_cache_str[] = "l2-cache";
3562 static const char l3_cache_str[] = "l3-cache";
3563 static const char itlb4k_str[] = "itlb-4K";
3564 static const char dtlb4k_str[] = "dtlb-4K";
3565 static const char itlb2M_str[] = "itlb-2M";
3566 static const char itlb4M_str[] = "itlb-4M";
3567 static const char dtlb4M_str[] = "dtlb-4M";
3568 static const char dtlb24_str[] = "dtlb0-2M-4M";
3569 static const char itlb424_str[] = "itlb-4K-2M-4M";
3570 static const char itlb24_str[] = "itlb-2M-4M";
3571 static const char dtlb44_str[] = "dtlb-4K-4M";
3572 static const char sl1_dcache_str[] = "sectored-l1-dcache";
3573 static const char sl2_cache_str[] = "sectored-l2-cache";
3574 static const char itrace_str[] = "itrace-cache";
3575 static const char sl3_cache_str[] = "sectored-l3-cache";
3576 static const char sh_l2_tlb4k_str[] = "shared-l2-tlb-4k";
3577 
3578 static const struct cachetab {
3579 	uint8_t 	ct_code;
3580 	uint8_t		ct_assoc;
3581 	uint16_t 	ct_line_size;
3582 	size_t		ct_size;
3583 	const char	*ct_label;
3584 } intel_ctab[] = {
3585 	/*
3586 	 * maintain descending order!
3587 	 *
3588 	 * Codes ignored - Reason
3589 	 * ----------------------
3590 	 * 40H - intel_cpuid_4_cache_info() disambiguates l2/l3 cache
3591 	 * f0H/f1H - Currently we do not interpret prefetch size by design
3592 	 */
3593 	{ 0xe4, 16, 64, 8*1024*1024, l3_cache_str},
3594 	{ 0xe3, 16, 64, 4*1024*1024, l3_cache_str},
3595 	{ 0xe2, 16, 64, 2*1024*1024, l3_cache_str},
3596 	{ 0xde, 12, 64, 6*1024*1024, l3_cache_str},
3597 	{ 0xdd, 12, 64, 3*1024*1024, l3_cache_str},
3598 	{ 0xdc, 12, 64, ((1*1024*1024)+(512*1024)), l3_cache_str},
3599 	{ 0xd8, 8, 64, 4*1024*1024, l3_cache_str},
3600 	{ 0xd7, 8, 64, 2*1024*1024, l3_cache_str},
3601 	{ 0xd6, 8, 64, 1*1024*1024, l3_cache_str},
3602 	{ 0xd2, 4, 64, 2*1024*1024, l3_cache_str},
3603 	{ 0xd1, 4, 64, 1*1024*1024, l3_cache_str},
3604 	{ 0xd0, 4, 64, 512*1024, l3_cache_str},
3605 	{ 0xca, 4, 0, 512, sh_l2_tlb4k_str},
3606 	{ 0xc0, 4, 0, 8, dtlb44_str },
3607 	{ 0xba, 4, 0, 64, dtlb4k_str },
3608 	{ 0xb4, 4, 0, 256, dtlb4k_str },
3609 	{ 0xb3, 4, 0, 128, dtlb4k_str },
3610 	{ 0xb2, 4, 0, 64, itlb4k_str },
3611 	{ 0xb0, 4, 0, 128, itlb4k_str },
3612 	{ 0x87, 8, 64, 1024*1024, l2_cache_str},
3613 	{ 0x86, 4, 64, 512*1024, l2_cache_str},
3614 	{ 0x85, 8, 32, 2*1024*1024, l2_cache_str},
3615 	{ 0x84, 8, 32, 1024*1024, l2_cache_str},
3616 	{ 0x83, 8, 32, 512*1024, l2_cache_str},
3617 	{ 0x82, 8, 32, 256*1024, l2_cache_str},
3618 	{ 0x80, 8, 64, 512*1024, l2_cache_str},
3619 	{ 0x7f, 2, 64, 512*1024, l2_cache_str},
3620 	{ 0x7d, 8, 64, 2*1024*1024, sl2_cache_str},
3621 	{ 0x7c, 8, 64, 1024*1024, sl2_cache_str},
3622 	{ 0x7b, 8, 64, 512*1024, sl2_cache_str},
3623 	{ 0x7a, 8, 64, 256*1024, sl2_cache_str},
3624 	{ 0x79, 8, 64, 128*1024, sl2_cache_str},
3625 	{ 0x78, 8, 64, 1024*1024, l2_cache_str},
3626 	{ 0x73, 8, 0, 64*1024, itrace_str},
3627 	{ 0x72, 8, 0, 32*1024, itrace_str},
3628 	{ 0x71, 8, 0, 16*1024, itrace_str},
3629 	{ 0x70, 8, 0, 12*1024, itrace_str},
3630 	{ 0x68, 4, 64, 32*1024, sl1_dcache_str},
3631 	{ 0x67, 4, 64, 16*1024, sl1_dcache_str},
3632 	{ 0x66, 4, 64, 8*1024, sl1_dcache_str},
3633 	{ 0x60, 8, 64, 16*1024, sl1_dcache_str},
3634 	{ 0x5d, 0, 0, 256, dtlb44_str},
3635 	{ 0x5c, 0, 0, 128, dtlb44_str},
3636 	{ 0x5b, 0, 0, 64, dtlb44_str},
3637 	{ 0x5a, 4, 0, 32, dtlb24_str},
3638 	{ 0x59, 0, 0, 16, dtlb4k_str},
3639 	{ 0x57, 4, 0, 16, dtlb4k_str},
3640 	{ 0x56, 4, 0, 16, dtlb4M_str},
3641 	{ 0x55, 0, 0, 7, itlb24_str},
3642 	{ 0x52, 0, 0, 256, itlb424_str},
3643 	{ 0x51, 0, 0, 128, itlb424_str},
3644 	{ 0x50, 0, 0, 64, itlb424_str},
3645 	{ 0x4f, 0, 0, 32, itlb4k_str},
3646 	{ 0x4e, 24, 64, 6*1024*1024, l2_cache_str},
3647 	{ 0x4d, 16, 64, 16*1024*1024, l3_cache_str},
3648 	{ 0x4c, 12, 64, 12*1024*1024, l3_cache_str},
3649 	{ 0x4b, 16, 64, 8*1024*1024, l3_cache_str},
3650 	{ 0x4a, 12, 64, 6*1024*1024, l3_cache_str},
3651 	{ 0x49, 16, 64, 4*1024*1024, l3_cache_str},
3652 	{ 0x48, 12, 64, 3*1024*1024, l2_cache_str},
3653 	{ 0x47, 8, 64, 8*1024*1024, l3_cache_str},
3654 	{ 0x46, 4, 64, 4*1024*1024, l3_cache_str},
3655 	{ 0x45, 4, 32, 2*1024*1024, l2_cache_str},
3656 	{ 0x44, 4, 32, 1024*1024, l2_cache_str},
3657 	{ 0x43, 4, 32, 512*1024, l2_cache_str},
3658 	{ 0x42, 4, 32, 256*1024, l2_cache_str},
3659 	{ 0x41, 4, 32, 128*1024, l2_cache_str},
3660 	{ 0x3e, 4, 64, 512*1024, sl2_cache_str},
3661 	{ 0x3d, 6, 64, 384*1024, sl2_cache_str},
3662 	{ 0x3c, 4, 64, 256*1024, sl2_cache_str},
3663 	{ 0x3b, 2, 64, 128*1024, sl2_cache_str},
3664 	{ 0x3a, 6, 64, 192*1024, sl2_cache_str},
3665 	{ 0x39, 4, 64, 128*1024, sl2_cache_str},
3666 	{ 0x30, 8, 64, 32*1024, l1_icache_str},
3667 	{ 0x2c, 8, 64, 32*1024, l1_dcache_str},
3668 	{ 0x29, 8, 64, 4096*1024, sl3_cache_str},
3669 	{ 0x25, 8, 64, 2048*1024, sl3_cache_str},
3670 	{ 0x23, 8, 64, 1024*1024, sl3_cache_str},
3671 	{ 0x22, 4, 64, 512*1024, sl3_cache_str},
3672 	{ 0x0e, 6, 64, 24*1024, l1_dcache_str},
3673 	{ 0x0d, 4, 32, 16*1024, l1_dcache_str},
3674 	{ 0x0c, 4, 32, 16*1024, l1_dcache_str},
3675 	{ 0x0b, 4, 0, 4, itlb4M_str},
3676 	{ 0x0a, 2, 32, 8*1024, l1_dcache_str},
3677 	{ 0x08, 4, 32, 16*1024, l1_icache_str},
3678 	{ 0x06, 4, 32, 8*1024, l1_icache_str},
3679 	{ 0x05, 4, 0, 32, dtlb4M_str},
3680 	{ 0x04, 4, 0, 8, dtlb4M_str},
3681 	{ 0x03, 4, 0, 64, dtlb4k_str},
3682 	{ 0x02, 4, 0, 2, itlb4M_str},
3683 	{ 0x01, 4, 0, 32, itlb4k_str},
3684 	{ 0 }
3685 };
3686 
3687 static const struct cachetab cyrix_ctab[] = {
3688 	{ 0x70, 4, 0, 32, "tlb-4K" },
3689 	{ 0x80, 4, 16, 16*1024, "l1-cache" },
3690 	{ 0 }
3691 };
3692 
3693 /*
3694  * Search a cache table for a matching entry
3695  */
3696 static const struct cachetab *
3697 find_cacheent(const struct cachetab *ct, uint_t code)
3698 {
3699 	if (code != 0) {
3700 		for (; ct->ct_code != 0; ct++)
3701 			if (ct->ct_code <= code)
3702 				break;
3703 		if (ct->ct_code == code)
3704 			return (ct);
3705 	}
3706 	return (NULL);
3707 }
3708 
3709 /*
3710  * Populate cachetab entry with L2 or L3 cache-information using
3711  * cpuid function 4. This function is called from intel_walk_cacheinfo()
3712  * when descriptor 0x49 is encountered. It returns 0 if no such cache
3713  * information is found.
3714  */
3715 static int
3716 intel_cpuid_4_cache_info(struct cachetab *ct, struct cpuid_info *cpi)
3717 {
3718 	uint32_t level, i;
3719 	int ret = 0;
3720 
3721 	for (i = 0; i < cpi->cpi_std_4_size; i++) {
3722 		level = CPI_CACHE_LVL(cpi->cpi_std_4[i]);
3723 
3724 		if (level == 2 || level == 3) {
3725 			ct->ct_assoc = CPI_CACHE_WAYS(cpi->cpi_std_4[i]) + 1;
3726 			ct->ct_line_size =
3727 			    CPI_CACHE_COH_LN_SZ(cpi->cpi_std_4[i]) + 1;
3728 			ct->ct_size = ct->ct_assoc *
3729 			    (CPI_CACHE_PARTS(cpi->cpi_std_4[i]) + 1) *
3730 			    ct->ct_line_size *
3731 			    (cpi->cpi_std_4[i]->cp_ecx + 1);
3732 
3733 			if (level == 2) {
3734 				ct->ct_label = l2_cache_str;
3735 			} else if (level == 3) {
3736 				ct->ct_label = l3_cache_str;
3737 			}
3738 			ret = 1;
3739 		}
3740 	}
3741 
3742 	return (ret);
3743 }
3744 
3745 /*
3746  * Walk the cacheinfo descriptor, applying 'func' to every valid element
3747  * The walk is terminated if the walker returns non-zero.
3748  */
3749 static void
3750 intel_walk_cacheinfo(struct cpuid_info *cpi,
3751     void *arg, int (*func)(void *, const struct cachetab *))
3752 {
3753 	const struct cachetab *ct;
3754 	struct cachetab des_49_ct, des_b1_ct;
3755 	uint8_t *dp;
3756 	int i;
3757 
3758 	if ((dp = cpi->cpi_cacheinfo) == NULL)
3759 		return;
3760 	for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
3761 		/*
3762 		 * For overloaded descriptor 0x49 we use cpuid function 4
3763 		 * if supported by the current processor, to create
3764 		 * cache information.
3765 		 * For overloaded descriptor 0xb1 we use X86_PAE flag
3766 		 * to disambiguate the cache information.
3767 		 */
3768 		if (*dp == 0x49 && cpi->cpi_maxeax >= 0x4 &&
3769 		    intel_cpuid_4_cache_info(&des_49_ct, cpi) == 1) {
3770 				ct = &des_49_ct;
3771 		} else if (*dp == 0xb1) {
3772 			des_b1_ct.ct_code = 0xb1;
3773 			des_b1_ct.ct_assoc = 4;
3774 			des_b1_ct.ct_line_size = 0;
3775 			if (is_x86_feature(x86_featureset, X86FSET_PAE)) {
3776 				des_b1_ct.ct_size = 8;
3777 				des_b1_ct.ct_label = itlb2M_str;
3778 			} else {
3779 				des_b1_ct.ct_size = 4;
3780 				des_b1_ct.ct_label = itlb4M_str;
3781 			}
3782 			ct = &des_b1_ct;
3783 		} else {
3784 			if ((ct = find_cacheent(intel_ctab, *dp)) == NULL) {
3785 				continue;
3786 			}
3787 		}
3788 
3789 		if (func(arg, ct) != 0) {
3790 			break;
3791 		}
3792 	}
3793 }
3794 
3795 /*
3796  * (Like the Intel one, except for Cyrix CPUs)
3797  */
3798 static void
3799 cyrix_walk_cacheinfo(struct cpuid_info *cpi,
3800     void *arg, int (*func)(void *, const struct cachetab *))
3801 {
3802 	const struct cachetab *ct;
3803 	uint8_t *dp;
3804 	int i;
3805 
3806 	if ((dp = cpi->cpi_cacheinfo) == NULL)
3807 		return;
3808 	for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
3809 		/*
3810 		 * Search Cyrix-specific descriptor table first ..
3811 		 */
3812 		if ((ct = find_cacheent(cyrix_ctab, *dp)) != NULL) {
3813 			if (func(arg, ct) != 0)
3814 				break;
3815 			continue;
3816 		}
3817 		/*
3818 		 * .. else fall back to the Intel one
3819 		 */
3820 		if ((ct = find_cacheent(intel_ctab, *dp)) != NULL) {
3821 			if (func(arg, ct) != 0)
3822 				break;
3823 			continue;
3824 		}
3825 	}
3826 }
3827 
3828 /*
3829  * A cacheinfo walker that adds associativity, line-size, and size properties
3830  * to the devinfo node it is passed as an argument.
3831  */
3832 static int
3833 add_cacheent_props(void *arg, const struct cachetab *ct)
3834 {
3835 	dev_info_t *devi = arg;
3836 
3837 	add_cache_prop(devi, ct->ct_label, assoc_str, ct->ct_assoc);
3838 	if (ct->ct_line_size != 0)
3839 		add_cache_prop(devi, ct->ct_label, line_str,
3840 		    ct->ct_line_size);
3841 	add_cache_prop(devi, ct->ct_label, size_str, ct->ct_size);
3842 	return (0);
3843 }
3844 
3845 
3846 static const char fully_assoc[] = "fully-associative?";
3847 
3848 /*
3849  * AMD style cache/tlb description
3850  *
3851  * Extended functions 5 and 6 directly describe properties of
3852  * tlbs and various cache levels.
3853  */
3854 static void
3855 add_amd_assoc(dev_info_t *devi, const char *label, uint_t assoc)
3856 {
3857 	switch (assoc) {
3858 	case 0:	/* reserved; ignore */
3859 		break;
3860 	default:
3861 		add_cache_prop(devi, label, assoc_str, assoc);
3862 		break;
3863 	case 0xff:
3864 		add_cache_prop(devi, label, fully_assoc, 1);
3865 		break;
3866 	}
3867 }
3868 
3869 static void
3870 add_amd_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size)
3871 {
3872 	if (size == 0)
3873 		return;
3874 	add_cache_prop(devi, label, size_str, size);
3875 	add_amd_assoc(devi, label, assoc);
3876 }
3877 
3878 static void
3879 add_amd_cache(dev_info_t *devi, const char *label,
3880     uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size)
3881 {
3882 	if (size == 0 || line_size == 0)
3883 		return;
3884 	add_amd_assoc(devi, label, assoc);
3885 	/*
3886 	 * Most AMD parts have a sectored cache. Multiple cache lines are
3887 	 * associated with each tag. A sector consists of all cache lines
3888 	 * associated with a tag. For example, the AMD K6-III has a sector
3889 	 * size of 2 cache lines per tag.
3890 	 */
3891 	if (lines_per_tag != 0)
3892 		add_cache_prop(devi, label, "lines-per-tag", lines_per_tag);
3893 	add_cache_prop(devi, label, line_str, line_size);
3894 	add_cache_prop(devi, label, size_str, size * 1024);
3895 }
3896 
3897 static void
3898 add_amd_l2_assoc(dev_info_t *devi, const char *label, uint_t assoc)
3899 {
3900 	switch (assoc) {
3901 	case 0:	/* off */
3902 		break;
3903 	case 1:
3904 	case 2:
3905 	case 4:
3906 		add_cache_prop(devi, label, assoc_str, assoc);
3907 		break;
3908 	case 6:
3909 		add_cache_prop(devi, label, assoc_str, 8);
3910 		break;
3911 	case 8:
3912 		add_cache_prop(devi, label, assoc_str, 16);
3913 		break;
3914 	case 0xf:
3915 		add_cache_prop(devi, label, fully_assoc, 1);
3916 		break;
3917 	default: /* reserved; ignore */
3918 		break;
3919 	}
3920 }
3921 
3922 static void
3923 add_amd_l2_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size)
3924 {
3925 	if (size == 0 || assoc == 0)
3926 		return;
3927 	add_amd_l2_assoc(devi, label, assoc);
3928 	add_cache_prop(devi, label, size_str, size);
3929 }
3930 
3931 static void
3932 add_amd_l2_cache(dev_info_t *devi, const char *label,
3933     uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size)
3934 {
3935 	if (size == 0 || assoc == 0 || line_size == 0)
3936 		return;
3937 	add_amd_l2_assoc(devi, label, assoc);
3938 	if (lines_per_tag != 0)
3939 		add_cache_prop(devi, label, "lines-per-tag", lines_per_tag);
3940 	add_cache_prop(devi, label, line_str, line_size);
3941 	add_cache_prop(devi, label, size_str, size * 1024);
3942 }
3943 
3944 static void
3945 amd_cache_info(struct cpuid_info *cpi, dev_info_t *devi)
3946 {
3947 	struct cpuid_regs *cp;
3948 
3949 	if (cpi->cpi_xmaxeax < 0x80000005)
3950 		return;
3951 	cp = &cpi->cpi_extd[5];
3952 
3953 	/*
3954 	 * 4M/2M L1 TLB configuration
3955 	 *
3956 	 * We report the size for 2M pages because AMD uses two
3957 	 * TLB entries for one 4M page.
3958 	 */
3959 	add_amd_tlb(devi, "dtlb-2M",
3960 	    BITX(cp->cp_eax, 31, 24), BITX(cp->cp_eax, 23, 16));
3961 	add_amd_tlb(devi, "itlb-2M",
3962 	    BITX(cp->cp_eax, 15, 8), BITX(cp->cp_eax, 7, 0));
3963 
3964 	/*
3965 	 * 4K L1 TLB configuration
3966 	 */
3967 
3968 	switch (cpi->cpi_vendor) {
3969 		uint_t nentries;
3970 	case X86_VENDOR_TM:
3971 		if (cpi->cpi_family >= 5) {
3972 			/*
3973 			 * Crusoe processors have 256 TLB entries, but
3974 			 * cpuid data format constrains them to only
3975 			 * reporting 255 of them.
3976 			 */
3977 			if ((nentries = BITX(cp->cp_ebx, 23, 16)) == 255)
3978 				nentries = 256;
3979 			/*
3980 			 * Crusoe processors also have a unified TLB
3981 			 */
3982 			add_amd_tlb(devi, "tlb-4K", BITX(cp->cp_ebx, 31, 24),
3983 			    nentries);
3984 			break;
3985 		}
3986 		/*FALLTHROUGH*/
3987 	default:
3988 		add_amd_tlb(devi, itlb4k_str,
3989 		    BITX(cp->cp_ebx, 31, 24), BITX(cp->cp_ebx, 23, 16));
3990 		add_amd_tlb(devi, dtlb4k_str,
3991 		    BITX(cp->cp_ebx, 15, 8), BITX(cp->cp_ebx, 7, 0));
3992 		break;
3993 	}
3994 
3995 	/*
3996 	 * data L1 cache configuration
3997 	 */
3998 
3999 	add_amd_cache(devi, l1_dcache_str,
4000 	    BITX(cp->cp_ecx, 31, 24), BITX(cp->cp_ecx, 23, 16),
4001 	    BITX(cp->cp_ecx, 15, 8), BITX(cp->cp_ecx, 7, 0));
4002 
4003 	/*
4004 	 * code L1 cache configuration
4005 	 */
4006 
4007 	add_amd_cache(devi, l1_icache_str,
4008 	    BITX(cp->cp_edx, 31, 24), BITX(cp->cp_edx, 23, 16),
4009 	    BITX(cp->cp_edx, 15, 8), BITX(cp->cp_edx, 7, 0));
4010 
4011 	if (cpi->cpi_xmaxeax < 0x80000006)
4012 		return;
4013 	cp = &cpi->cpi_extd[6];
4014 
4015 	/* Check for a unified L2 TLB for large pages */
4016 
4017 	if (BITX(cp->cp_eax, 31, 16) == 0)
4018 		add_amd_l2_tlb(devi, "l2-tlb-2M",
4019 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
4020 	else {
4021 		add_amd_l2_tlb(devi, "l2-dtlb-2M",
4022 		    BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16));
4023 		add_amd_l2_tlb(devi, "l2-itlb-2M",
4024 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
4025 	}
4026 
4027 	/* Check for a unified L2 TLB for 4K pages */
4028 
4029 	if (BITX(cp->cp_ebx, 31, 16) == 0) {
4030 		add_amd_l2_tlb(devi, "l2-tlb-4K",
4031 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
4032 	} else {
4033 		add_amd_l2_tlb(devi, "l2-dtlb-4K",
4034 		    BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16));
4035 		add_amd_l2_tlb(devi, "l2-itlb-4K",
4036 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
4037 	}
4038 
4039 	add_amd_l2_cache(devi, l2_cache_str,
4040 	    BITX(cp->cp_ecx, 31, 16), BITX(cp->cp_ecx, 15, 12),
4041 	    BITX(cp->cp_ecx, 11, 8), BITX(cp->cp_ecx, 7, 0));
4042 }
4043 
4044 /*
4045  * There are two basic ways that the x86 world describes it cache
4046  * and tlb architecture - Intel's way and AMD's way.
4047  *
4048  * Return which flavor of cache architecture we should use
4049  */
4050 static int
4051 x86_which_cacheinfo(struct cpuid_info *cpi)
4052 {
4053 	switch (cpi->cpi_vendor) {
4054 	case X86_VENDOR_Intel:
4055 		if (cpi->cpi_maxeax >= 2)
4056 			return (X86_VENDOR_Intel);
4057 		break;
4058 	case X86_VENDOR_AMD:
4059 		/*
4060 		 * The K5 model 1 was the first part from AMD that reported
4061 		 * cache sizes via extended cpuid functions.
4062 		 */
4063 		if (cpi->cpi_family > 5 ||
4064 		    (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
4065 			return (X86_VENDOR_AMD);
4066 		break;
4067 	case X86_VENDOR_TM:
4068 		if (cpi->cpi_family >= 5)
4069 			return (X86_VENDOR_AMD);
4070 		/*FALLTHROUGH*/
4071 	default:
4072 		/*
4073 		 * If they have extended CPU data for 0x80000005
4074 		 * then we assume they have AMD-format cache
4075 		 * information.
4076 		 *
4077 		 * If not, and the vendor happens to be Cyrix,
4078 		 * then try our-Cyrix specific handler.
4079 		 *
4080 		 * If we're not Cyrix, then assume we're using Intel's
4081 		 * table-driven format instead.
4082 		 */
4083 		if (cpi->cpi_xmaxeax >= 0x80000005)
4084 			return (X86_VENDOR_AMD);
4085 		else if (cpi->cpi_vendor == X86_VENDOR_Cyrix)
4086 			return (X86_VENDOR_Cyrix);
4087 		else if (cpi->cpi_maxeax >= 2)
4088 			return (X86_VENDOR_Intel);
4089 		break;
4090 	}
4091 	return (-1);
4092 }
4093 
4094 void
4095 cpuid_set_cpu_properties(void *dip, processorid_t cpu_id,
4096     struct cpuid_info *cpi)
4097 {
4098 	dev_info_t *cpu_devi;
4099 	int create;
4100 
4101 	cpu_devi = (dev_info_t *)dip;
4102 
4103 	/* device_type */
4104 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
4105 	    "device_type", "cpu");
4106 
4107 	/* reg */
4108 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4109 	    "reg", cpu_id);
4110 
4111 	/* cpu-mhz, and clock-frequency */
4112 	if (cpu_freq > 0) {
4113 		long long mul;
4114 
4115 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4116 		    "cpu-mhz", cpu_freq);
4117 		if ((mul = cpu_freq * 1000000LL) <= INT_MAX)
4118 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4119 			    "clock-frequency", (int)mul);
4120 	}
4121 
4122 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID)) {
4123 		return;
4124 	}
4125 
4126 	/* vendor-id */
4127 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
4128 	    "vendor-id", cpi->cpi_vendorstr);
4129 
4130 	if (cpi->cpi_maxeax == 0) {
4131 		return;
4132 	}
4133 
4134 	/*
4135 	 * family, model, and step
4136 	 */
4137 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4138 	    "family", CPI_FAMILY(cpi));
4139 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4140 	    "cpu-model", CPI_MODEL(cpi));
4141 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4142 	    "stepping-id", CPI_STEP(cpi));
4143 
4144 	/* type */
4145 	switch (cpi->cpi_vendor) {
4146 	case X86_VENDOR_Intel:
4147 		create = 1;
4148 		break;
4149 	default:
4150 		create = 0;
4151 		break;
4152 	}
4153 	if (create)
4154 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4155 		    "type", CPI_TYPE(cpi));
4156 
4157 	/* ext-family */
4158 	switch (cpi->cpi_vendor) {
4159 	case X86_VENDOR_Intel:
4160 	case X86_VENDOR_AMD:
4161 		create = cpi->cpi_family >= 0xf;
4162 		break;
4163 	default:
4164 		create = 0;
4165 		break;
4166 	}
4167 	if (create)
4168 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4169 		    "ext-family", CPI_FAMILY_XTD(cpi));
4170 
4171 	/* ext-model */
4172 	switch (cpi->cpi_vendor) {
4173 	case X86_VENDOR_Intel:
4174 		create = IS_EXTENDED_MODEL_INTEL(cpi);
4175 		break;
4176 	case X86_VENDOR_AMD:
4177 		create = CPI_FAMILY(cpi) == 0xf;
4178 		break;
4179 	default:
4180 		create = 0;
4181 		break;
4182 	}
4183 	if (create)
4184 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4185 		    "ext-model", CPI_MODEL_XTD(cpi));
4186 
4187 	/* generation */
4188 	switch (cpi->cpi_vendor) {
4189 	case X86_VENDOR_AMD:
4190 		/*
4191 		 * AMD K5 model 1 was the first part to support this
4192 		 */
4193 		create = cpi->cpi_xmaxeax >= 0x80000001;
4194 		break;
4195 	default:
4196 		create = 0;
4197 		break;
4198 	}
4199 	if (create)
4200 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4201 		    "generation", BITX((cpi)->cpi_extd[1].cp_eax, 11, 8));
4202 
4203 	/* brand-id */
4204 	switch (cpi->cpi_vendor) {
4205 	case X86_VENDOR_Intel:
4206 		/*
4207 		 * brand id first appeared on Pentium III Xeon model 8,
4208 		 * and Celeron model 8 processors and Opteron
4209 		 */
4210 		create = cpi->cpi_family > 6 ||
4211 		    (cpi->cpi_family == 6 && cpi->cpi_model >= 8);
4212 		break;
4213 	case X86_VENDOR_AMD:
4214 		create = cpi->cpi_family >= 0xf;
4215 		break;
4216 	default:
4217 		create = 0;
4218 		break;
4219 	}
4220 	if (create && cpi->cpi_brandid != 0) {
4221 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4222 		    "brand-id", cpi->cpi_brandid);
4223 	}
4224 
4225 	/* chunks, and apic-id */
4226 	switch (cpi->cpi_vendor) {
4227 		/*
4228 		 * first available on Pentium IV and Opteron (K8)
4229 		 */
4230 	case X86_VENDOR_Intel:
4231 		create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf;
4232 		break;
4233 	case X86_VENDOR_AMD:
4234 		create = cpi->cpi_family >= 0xf;
4235 		break;
4236 	default:
4237 		create = 0;
4238 		break;
4239 	}
4240 	if (create) {
4241 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4242 		    "chunks", CPI_CHUNKS(cpi));
4243 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4244 		    "apic-id", cpi->cpi_apicid);
4245 		if (cpi->cpi_chipid >= 0) {
4246 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4247 			    "chip#", cpi->cpi_chipid);
4248 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4249 			    "clog#", cpi->cpi_clogid);
4250 		}
4251 	}
4252 
4253 	/* cpuid-features */
4254 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4255 	    "cpuid-features", CPI_FEATURES_EDX(cpi));
4256 
4257 
4258 	/* cpuid-features-ecx */
4259 	switch (cpi->cpi_vendor) {
4260 	case X86_VENDOR_Intel:
4261 		create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf;
4262 		break;
4263 	case X86_VENDOR_AMD:
4264 		create = cpi->cpi_family >= 0xf;
4265 		break;
4266 	default:
4267 		create = 0;
4268 		break;
4269 	}
4270 	if (create)
4271 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4272 		    "cpuid-features-ecx", CPI_FEATURES_ECX(cpi));
4273 
4274 	/* ext-cpuid-features */
4275 	switch (cpi->cpi_vendor) {
4276 	case X86_VENDOR_Intel:
4277 	case X86_VENDOR_AMD:
4278 	case X86_VENDOR_Cyrix:
4279 	case X86_VENDOR_TM:
4280 	case X86_VENDOR_Centaur:
4281 		create = cpi->cpi_xmaxeax >= 0x80000001;
4282 		break;
4283 	default:
4284 		create = 0;
4285 		break;
4286 	}
4287 	if (create) {
4288 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4289 		    "ext-cpuid-features", CPI_FEATURES_XTD_EDX(cpi));
4290 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
4291 		    "ext-cpuid-features-ecx", CPI_FEATURES_XTD_ECX(cpi));
4292 	}
4293 
4294 	/*
4295 	 * Brand String first appeared in Intel Pentium IV, AMD K5
4296 	 * model 1, and Cyrix GXm.  On earlier models we try and
4297 	 * simulate something similar .. so this string should always
4298 	 * same -something- about the processor, however lame.
4299 	 */
4300 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
4301 	    "brand-string", cpi->cpi_brandstr);
4302 
4303 	/*
4304 	 * Finally, cache and tlb information
4305 	 */
4306 	switch (x86_which_cacheinfo(cpi)) {
4307 	case X86_VENDOR_Intel:
4308 		intel_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
4309 		break;
4310 	case X86_VENDOR_Cyrix:
4311 		cyrix_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
4312 		break;
4313 	case X86_VENDOR_AMD:
4314 		amd_cache_info(cpi, cpu_devi);
4315 		break;
4316 	default:
4317 		break;
4318 	}
4319 }
4320 
4321 struct l2info {
4322 	int *l2i_csz;
4323 	int *l2i_lsz;
4324 	int *l2i_assoc;
4325 	int l2i_ret;
4326 };
4327 
4328 /*
4329  * A cacheinfo walker that fetches the size, line-size and associativity
4330  * of the L2 cache
4331  */
4332 static int
4333 intel_l2cinfo(void *arg, const struct cachetab *ct)
4334 {
4335 	struct l2info *l2i = arg;
4336 	int *ip;
4337 
4338 	if (ct->ct_label != l2_cache_str &&
4339 	    ct->ct_label != sl2_cache_str)
4340 		return (0);	/* not an L2 -- keep walking */
4341 
4342 	if ((ip = l2i->l2i_csz) != NULL)
4343 		*ip = ct->ct_size;
4344 	if ((ip = l2i->l2i_lsz) != NULL)
4345 		*ip = ct->ct_line_size;
4346 	if ((ip = l2i->l2i_assoc) != NULL)
4347 		*ip = ct->ct_assoc;
4348 	l2i->l2i_ret = ct->ct_size;
4349 	return (1);		/* was an L2 -- terminate walk */
4350 }
4351 
4352 /*
4353  * AMD L2/L3 Cache and TLB Associativity Field Definition:
4354  *
4355  *	Unlike the associativity for the L1 cache and tlb where the 8 bit
4356  *	value is the associativity, the associativity for the L2 cache and
4357  *	tlb is encoded in the following table. The 4 bit L2 value serves as
4358  *	an index into the amd_afd[] array to determine the associativity.
4359  *	-1 is undefined. 0 is fully associative.
4360  */
4361 
4362 static int amd_afd[] =
4363 	{-1, 1, 2, -1, 4, -1, 8, -1, 16, -1, 32, 48, 64, 96, 128, 0};
4364 
4365 static void
4366 amd_l2cacheinfo(struct cpuid_info *cpi, struct l2info *l2i)
4367 {
4368 	struct cpuid_regs *cp;
4369 	uint_t size, assoc;
4370 	int i;
4371 	int *ip;
4372 
4373 	if (cpi->cpi_xmaxeax < 0x80000006)
4374 		return;
4375 	cp = &cpi->cpi_extd[6];
4376 
4377 	if ((i = BITX(cp->cp_ecx, 15, 12)) != 0 &&
4378 	    (size = BITX(cp->cp_ecx, 31, 16)) != 0) {
4379 		uint_t cachesz = size * 1024;
4380 		assoc = amd_afd[i];
4381 
4382 		ASSERT(assoc != -1);
4383 
4384 		if ((ip = l2i->l2i_csz) != NULL)
4385 			*ip = cachesz;
4386 		if ((ip = l2i->l2i_lsz) != NULL)
4387 			*ip = BITX(cp->cp_ecx, 7, 0);
4388 		if ((ip = l2i->l2i_assoc) != NULL)
4389 			*ip = assoc;
4390 		l2i->l2i_ret = cachesz;
4391 	}
4392 }
4393 
4394 int
4395 getl2cacheinfo(cpu_t *cpu, int *csz, int *lsz, int *assoc)
4396 {
4397 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
4398 	struct l2info __l2info, *l2i = &__l2info;
4399 
4400 	l2i->l2i_csz = csz;
4401 	l2i->l2i_lsz = lsz;
4402 	l2i->l2i_assoc = assoc;
4403 	l2i->l2i_ret = -1;
4404 
4405 	switch (x86_which_cacheinfo(cpi)) {
4406 	case X86_VENDOR_Intel:
4407 		intel_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
4408 		break;
4409 	case X86_VENDOR_Cyrix:
4410 		cyrix_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
4411 		break;
4412 	case X86_VENDOR_AMD:
4413 		amd_l2cacheinfo(cpi, l2i);
4414 		break;
4415 	default:
4416 		break;
4417 	}
4418 	return (l2i->l2i_ret);
4419 }
4420 
4421 #if !defined(__xpv)
4422 
4423 uint32_t *
4424 cpuid_mwait_alloc(cpu_t *cpu)
4425 {
4426 	uint32_t	*ret;
4427 	size_t		mwait_size;
4428 
4429 	ASSERT(cpuid_checkpass(CPU, 2));
4430 
4431 	mwait_size = CPU->cpu_m.mcpu_cpi->cpi_mwait.mon_max;
4432 	if (mwait_size == 0)
4433 		return (NULL);
4434 
4435 	/*
4436 	 * kmem_alloc() returns cache line size aligned data for mwait_size
4437 	 * allocations.  mwait_size is currently cache line sized.  Neither
4438 	 * of these implementation details are guarantied to be true in the
4439 	 * future.
4440 	 *
4441 	 * First try allocating mwait_size as kmem_alloc() currently returns
4442 	 * correctly aligned memory.  If kmem_alloc() does not return
4443 	 * mwait_size aligned memory, then use mwait_size ROUNDUP.
4444 	 *
4445 	 * Set cpi_mwait.buf_actual and cpi_mwait.size_actual in case we
4446 	 * decide to free this memory.
4447 	 */
4448 	ret = kmem_zalloc(mwait_size, KM_SLEEP);
4449 	if (ret == (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size)) {
4450 		cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret;
4451 		cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size;
4452 		*ret = MWAIT_RUNNING;
4453 		return (ret);
4454 	} else {
4455 		kmem_free(ret, mwait_size);
4456 		ret = kmem_zalloc(mwait_size * 2, KM_SLEEP);
4457 		cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret;
4458 		cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size * 2;
4459 		ret = (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size);
4460 		*ret = MWAIT_RUNNING;
4461 		return (ret);
4462 	}
4463 }
4464 
4465 void
4466 cpuid_mwait_free(cpu_t *cpu)
4467 {
4468 	if (cpu->cpu_m.mcpu_cpi == NULL) {
4469 		return;
4470 	}
4471 
4472 	if (cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual != NULL &&
4473 	    cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual > 0) {
4474 		kmem_free(cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual,
4475 		    cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual);
4476 	}
4477 
4478 	cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = NULL;
4479 	cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = 0;
4480 }
4481 
4482 void
4483 patch_tsc_read(int flag)
4484 {
4485 	size_t cnt;
4486 
4487 	switch (flag) {
4488 	case X86_NO_TSC:
4489 		cnt = &_no_rdtsc_end - &_no_rdtsc_start;
4490 		(void) memcpy((void *)tsc_read, (void *)&_no_rdtsc_start, cnt);
4491 		break;
4492 	case X86_HAVE_TSCP:
4493 		cnt = &_tscp_end - &_tscp_start;
4494 		(void) memcpy((void *)tsc_read, (void *)&_tscp_start, cnt);
4495 		break;
4496 	case X86_TSC_MFENCE:
4497 		cnt = &_tsc_mfence_end - &_tsc_mfence_start;
4498 		(void) memcpy((void *)tsc_read,
4499 		    (void *)&_tsc_mfence_start, cnt);
4500 		break;
4501 	case X86_TSC_LFENCE:
4502 		cnt = &_tsc_lfence_end - &_tsc_lfence_start;
4503 		(void) memcpy((void *)tsc_read,
4504 		    (void *)&_tsc_lfence_start, cnt);
4505 		break;
4506 	default:
4507 		break;
4508 	}
4509 }
4510 
4511 int
4512 cpuid_deep_cstates_supported(void)
4513 {
4514 	struct cpuid_info *cpi;
4515 	struct cpuid_regs regs;
4516 
4517 	ASSERT(cpuid_checkpass(CPU, 1));
4518 
4519 	cpi = CPU->cpu_m.mcpu_cpi;
4520 
4521 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID))
4522 		return (0);
4523 
4524 	switch (cpi->cpi_vendor) {
4525 	case X86_VENDOR_Intel:
4526 		if (cpi->cpi_xmaxeax < 0x80000007)
4527 			return (0);
4528 
4529 		/*
4530 		 * TSC run at a constant rate in all ACPI C-states?
4531 		 */
4532 		regs.cp_eax = 0x80000007;
4533 		(void) __cpuid_insn(&regs);
4534 		return (regs.cp_edx & CPUID_TSC_CSTATE_INVARIANCE);
4535 
4536 	default:
4537 		return (0);
4538 	}
4539 }
4540 
4541 #endif	/* !__xpv */
4542 
4543 void
4544 post_startup_cpu_fixups(void)
4545 {
4546 #ifndef __xpv
4547 	/*
4548 	 * Some AMD processors support C1E state. Entering this state will
4549 	 * cause the local APIC timer to stop, which we can't deal with at
4550 	 * this time.
4551 	 */
4552 	if (cpuid_getvendor(CPU) == X86_VENDOR_AMD) {
4553 		on_trap_data_t otd;
4554 		uint64_t reg;
4555 
4556 		if (!on_trap(&otd, OT_DATA_ACCESS)) {
4557 			reg = rdmsr(MSR_AMD_INT_PENDING_CMP_HALT);
4558 			/* Disable C1E state if it is enabled by BIOS */
4559 			if ((reg >> AMD_ACTONCMPHALT_SHIFT) &
4560 			    AMD_ACTONCMPHALT_MASK) {
4561 				reg &= ~(AMD_ACTONCMPHALT_MASK <<
4562 				    AMD_ACTONCMPHALT_SHIFT);
4563 				wrmsr(MSR_AMD_INT_PENDING_CMP_HALT, reg);
4564 			}
4565 		}
4566 		no_trap();
4567 	}
4568 #endif	/* !__xpv */
4569 }
4570 
4571 /*
4572  * Setup necessary registers to enable XSAVE feature on this processor.
4573  * This function needs to be called early enough, so that no xsave/xrstor
4574  * ops will execute on the processor before the MSRs are properly set up.
4575  *
4576  * Current implementation has the following assumption:
4577  * - cpuid_pass1() is done, so that X86 features are known.
4578  * - fpu_probe() is done, so that fp_save_mech is chosen.
4579  */
4580 void
4581 xsave_setup_msr(cpu_t *cpu)
4582 {
4583 	ASSERT(fp_save_mech == FP_XSAVE);
4584 	ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
4585 
4586 	/* Enable OSXSAVE in CR4. */
4587 	setcr4(getcr4() | CR4_OSXSAVE);
4588 	/*
4589 	 * Update SW copy of ECX, so that /dev/cpu/self/cpuid will report
4590 	 * correct value.
4591 	 */
4592 	cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_ecx |= CPUID_INTC_ECX_OSXSAVE;
4593 	setup_xfem();
4594 }
4595 
4596 /*
4597  * Starting with the Westmere processor the local
4598  * APIC timer will continue running in all C-states,
4599  * including the deepest C-states.
4600  */
4601 int
4602 cpuid_arat_supported(void)
4603 {
4604 	struct cpuid_info *cpi;
4605 	struct cpuid_regs regs;
4606 
4607 	ASSERT(cpuid_checkpass(CPU, 1));
4608 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
4609 
4610 	cpi = CPU->cpu_m.mcpu_cpi;
4611 
4612 	switch (cpi->cpi_vendor) {
4613 	case X86_VENDOR_Intel:
4614 		/*
4615 		 * Always-running Local APIC Timer is
4616 		 * indicated by CPUID.6.EAX[2].
4617 		 */
4618 		if (cpi->cpi_maxeax >= 6) {
4619 			regs.cp_eax = 6;
4620 			(void) cpuid_insn(NULL, &regs);
4621 			return (regs.cp_eax & CPUID_CSTATE_ARAT);
4622 		} else {
4623 			return (0);
4624 		}
4625 	default:
4626 		return (0);
4627 	}
4628 }
4629 
4630 /*
4631  * Check support for Intel ENERGY_PERF_BIAS feature
4632  */
4633 int
4634 cpuid_iepb_supported(struct cpu *cp)
4635 {
4636 	struct cpuid_info *cpi = cp->cpu_m.mcpu_cpi;
4637 	struct cpuid_regs regs;
4638 
4639 	ASSERT(cpuid_checkpass(cp, 1));
4640 
4641 	if (!(is_x86_feature(x86_featureset, X86FSET_CPUID)) ||
4642 	    !(is_x86_feature(x86_featureset, X86FSET_MSR))) {
4643 		return (0);
4644 	}
4645 
4646 	/*
4647 	 * Intel ENERGY_PERF_BIAS MSR is indicated by
4648 	 * capability bit CPUID.6.ECX.3
4649 	 */
4650 	if ((cpi->cpi_vendor != X86_VENDOR_Intel) || (cpi->cpi_maxeax < 6))
4651 		return (0);
4652 
4653 	regs.cp_eax = 0x6;
4654 	(void) cpuid_insn(NULL, &regs);
4655 	return (regs.cp_ecx & CPUID_EPB_SUPPORT);
4656 }
4657 
4658 /*
4659  * Check support for TSC deadline timer
4660  *
4661  * TSC deadline timer provides a superior software programming
4662  * model over local APIC timer that eliminates "time drifts".
4663  * Instead of specifying a relative time, software specifies an
4664  * absolute time as the target at which the processor should
4665  * generate a timer event.
4666  */
4667 int
4668 cpuid_deadline_tsc_supported(void)
4669 {
4670 	struct cpuid_info *cpi = CPU->cpu_m.mcpu_cpi;
4671 	struct cpuid_regs regs;
4672 
4673 	ASSERT(cpuid_checkpass(CPU, 1));
4674 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
4675 
4676 	switch (cpi->cpi_vendor) {
4677 	case X86_VENDOR_Intel:
4678 		if (cpi->cpi_maxeax >= 1) {
4679 			regs.cp_eax = 1;
4680 			(void) cpuid_insn(NULL, &regs);
4681 			return (regs.cp_ecx & CPUID_DEADLINE_TSC);
4682 		} else {
4683 			return (0);
4684 		}
4685 	default:
4686 		return (0);
4687 	}
4688 }
4689 
4690 #if defined(__amd64) && !defined(__xpv)
4691 /*
4692  * Patch in versions of bcopy for high performance Intel Nhm processors
4693  * and later...
4694  */
4695 void
4696 patch_memops(uint_t vendor)
4697 {
4698 	size_t cnt, i;
4699 	caddr_t to, from;
4700 
4701 	if ((vendor == X86_VENDOR_Intel) &&
4702 	    is_x86_feature(x86_featureset, X86FSET_SSE4_2)) {
4703 		cnt = &bcopy_patch_end - &bcopy_patch_start;
4704 		to = &bcopy_ck_size;
4705 		from = &bcopy_patch_start;
4706 		for (i = 0; i < cnt; i++) {
4707 			*to++ = *from++;
4708 		}
4709 	}
4710 }
4711 #endif  /* __amd64 && !__xpv */
4712 
4713 /*
4714  * This function finds the number of bits to represent the number of cores per
4715  * chip and the number of strands per core for the Intel platforms.
4716  * It re-uses the x2APIC cpuid code of the cpuid_pass2().
4717  */
4718 void
4719 cpuid_get_ext_topo(uint_t vendor, uint_t *core_nbits, uint_t *strand_nbits)
4720 {
4721 	struct cpuid_regs regs;
4722 	struct cpuid_regs *cp = &regs;
4723 
4724 	if (vendor != X86_VENDOR_Intel) {
4725 		return;
4726 	}
4727 
4728 	/* if the cpuid level is 0xB, extended topo is available. */
4729 	cp->cp_eax = 0;
4730 	if (__cpuid_insn(cp) >= 0xB) {
4731 
4732 		cp->cp_eax = 0xB;
4733 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
4734 		(void) __cpuid_insn(cp);
4735 
4736 		/*
4737 		 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
4738 		 * indicates that the extended topology enumeration leaf is
4739 		 * available.
4740 		 */
4741 		if (cp->cp_ebx) {
4742 			uint_t coreid_shift = 0;
4743 			uint_t chipid_shift = 0;
4744 			uint_t i;
4745 			uint_t level;
4746 
4747 			for (i = 0; i < CPI_FNB_ECX_MAX; i++) {
4748 				cp->cp_eax = 0xB;
4749 				cp->cp_ecx = i;
4750 
4751 				(void) __cpuid_insn(cp);
4752 				level = CPI_CPU_LEVEL_TYPE(cp);
4753 
4754 				if (level == 1) {
4755 					/*
4756 					 * Thread level processor topology
4757 					 * Number of bits shift right APIC ID
4758 					 * to get the coreid.
4759 					 */
4760 					coreid_shift = BITX(cp->cp_eax, 4, 0);
4761 				} else if (level == 2) {
4762 					/*
4763 					 * Core level processor topology
4764 					 * Number of bits shift right APIC ID
4765 					 * to get the chipid.
4766 					 */
4767 					chipid_shift = BITX(cp->cp_eax, 4, 0);
4768 				}
4769 			}
4770 
4771 			if (coreid_shift > 0 && chipid_shift > coreid_shift) {
4772 				*strand_nbits = coreid_shift;
4773 				*core_nbits = chipid_shift - coreid_shift;
4774 			}
4775 		}
4776 	}
4777 }
4778