17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5ee88d2b9Skchow * Common Development and Distribution License (the "License"). 6ee88d2b9Skchow * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 226e5580c9SFrank Van Der Linden * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 23cfe84b82SMatt Amdur * Copyright (c) 2011 by Delphix. All rights reserved. 2479ec9da8SYuri Pankov * Copyright 2013 Nexenta Systems, Inc. All rights reserved. 256eedf6a5SJosef 'Jeff' Sipek * Copyright 2014 Josef "Jeff" Sipek <jeffpc@josefsipek.net> 267c478bd9Sstevel@tonic-gate */ 27cef70d2cSBill Holler /* 2841afdfa7SKrishnendu Sadhukhan - Sun Microsystems * Copyright (c) 2010, Intel Corporation. 29cef70d2cSBill Holler * All rights reserved. 30cef70d2cSBill Holler */ 318031591dSSrihari Venkatesan /* 328031591dSSrihari Venkatesan * Portions Copyright 2009 Advanced Micro Devices, Inc. 338031591dSSrihari Venkatesan */ 34faa20166SBryan Cantrill /* 35245ac945SRobert Mustacchi * Copyright (c) 2015, Joyent, Inc. All rights reserved. 36faa20166SBryan Cantrill */ 377c478bd9Sstevel@tonic-gate /* 387c478bd9Sstevel@tonic-gate * Various routines to handle identification 397c478bd9Sstevel@tonic-gate * and classification of x86 processors. 407c478bd9Sstevel@tonic-gate */ 417c478bd9Sstevel@tonic-gate 427c478bd9Sstevel@tonic-gate #include <sys/types.h> 437c478bd9Sstevel@tonic-gate #include <sys/archsystm.h> 447c478bd9Sstevel@tonic-gate #include <sys/x86_archext.h> 457c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 467c478bd9Sstevel@tonic-gate #include <sys/systm.h> 477c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 487c478bd9Sstevel@tonic-gate #include <sys/sunddi.h> 497c478bd9Sstevel@tonic-gate #include <sys/sunndi.h> 507c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 517c478bd9Sstevel@tonic-gate #include <sys/processor.h> 525b8a6efeSbholler #include <sys/sysmacros.h> 53fb2f18f8Sesaxe #include <sys/pg.h> 547c478bd9Sstevel@tonic-gate #include <sys/fp.h> 557c478bd9Sstevel@tonic-gate #include <sys/controlregs.h> 567c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 57dfea898aSKuriakose Kuruvilla #include <sys/auxv_386.h> 587c478bd9Sstevel@tonic-gate #include <sys/memnode.h> 598031591dSSrihari Venkatesan #include <sys/pci_cfgspace.h> 607c478bd9Sstevel@tonic-gate 61e4b86885SCheng Sean Ye #ifdef __xpv 62e4b86885SCheng Sean Ye #include <sys/hypervisor.h> 63e774b42bSBill Holler #else 64e774b42bSBill Holler #include <sys/ontrap.h> 65e4b86885SCheng Sean Ye #endif 66e4b86885SCheng Sean Ye 677c478bd9Sstevel@tonic-gate /* 687c478bd9Sstevel@tonic-gate * Pass 0 of cpuid feature analysis happens in locore. It contains special code 697c478bd9Sstevel@tonic-gate * to recognize Cyrix processors that are not cpuid-compliant, and to deal with 707c478bd9Sstevel@tonic-gate * them accordingly. For most modern processors, feature detection occurs here 717c478bd9Sstevel@tonic-gate * in pass 1. 727c478bd9Sstevel@tonic-gate * 737c478bd9Sstevel@tonic-gate * Pass 1 of cpuid feature analysis happens just at the beginning of mlsetup() 747c478bd9Sstevel@tonic-gate * for the boot CPU and does the basic analysis that the early kernel needs. 757417cfdeSKuriakose Kuruvilla * x86_featureset is set based on the return value of cpuid_pass1() of the boot 767c478bd9Sstevel@tonic-gate * CPU. 777c478bd9Sstevel@tonic-gate * 787c478bd9Sstevel@tonic-gate * Pass 1 includes: 797c478bd9Sstevel@tonic-gate * 807c478bd9Sstevel@tonic-gate * o Determining vendor/model/family/stepping and setting x86_type and 817c478bd9Sstevel@tonic-gate * x86_vendor accordingly. 827c478bd9Sstevel@tonic-gate * o Processing the feature flags returned by the cpuid instruction while 837c478bd9Sstevel@tonic-gate * applying any workarounds or tricks for the specific processor. 847c478bd9Sstevel@tonic-gate * o Mapping the feature flags into Solaris feature bits (X86_*). 857c478bd9Sstevel@tonic-gate * o Processing extended feature flags if supported by the processor, 867c478bd9Sstevel@tonic-gate * again while applying specific processor knowledge. 877c478bd9Sstevel@tonic-gate * o Determining the CMT characteristics of the system. 887c478bd9Sstevel@tonic-gate * 897c478bd9Sstevel@tonic-gate * Pass 1 is done on non-boot CPUs during their initialization and the results 907c478bd9Sstevel@tonic-gate * are used only as a meager attempt at ensuring that all processors within the 917c478bd9Sstevel@tonic-gate * system support the same features. 927c478bd9Sstevel@tonic-gate * 937c478bd9Sstevel@tonic-gate * Pass 2 of cpuid feature analysis happens just at the beginning 947c478bd9Sstevel@tonic-gate * of startup(). It just copies in and corrects the remainder 957c478bd9Sstevel@tonic-gate * of the cpuid data we depend on: standard cpuid functions that we didn't 967c478bd9Sstevel@tonic-gate * need for pass1 feature analysis, and extended cpuid functions beyond the 977c478bd9Sstevel@tonic-gate * simple feature processing done in pass1. 987c478bd9Sstevel@tonic-gate * 997c478bd9Sstevel@tonic-gate * Pass 3 of cpuid analysis is invoked after basic kernel services; in 1007c478bd9Sstevel@tonic-gate * particular kernel memory allocation has been made available. It creates a 1017c478bd9Sstevel@tonic-gate * readable brand string based on the data collected in the first two passes. 1027c478bd9Sstevel@tonic-gate * 1037c478bd9Sstevel@tonic-gate * Pass 4 of cpuid analysis is invoked after post_startup() when all 1047c478bd9Sstevel@tonic-gate * the support infrastructure for various hardware features has been 1057c478bd9Sstevel@tonic-gate * initialized. It determines which processor features will be reported 1067c478bd9Sstevel@tonic-gate * to userland via the aux vector. 1077c478bd9Sstevel@tonic-gate * 1087c478bd9Sstevel@tonic-gate * All passes are executed on all CPUs, but only the boot CPU determines what 1097c478bd9Sstevel@tonic-gate * features the kernel will use. 1107c478bd9Sstevel@tonic-gate * 1117c478bd9Sstevel@tonic-gate * Much of the worst junk in this file is for the support of processors 1127c478bd9Sstevel@tonic-gate * that didn't really implement the cpuid instruction properly. 1137c478bd9Sstevel@tonic-gate * 1147c478bd9Sstevel@tonic-gate * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon, 1157c478bd9Sstevel@tonic-gate * the pass numbers. Accordingly, changes to the pass code may require changes 1167c478bd9Sstevel@tonic-gate * to the accessor code. 1177c478bd9Sstevel@tonic-gate */ 1187c478bd9Sstevel@tonic-gate 1197c478bd9Sstevel@tonic-gate uint_t x86_vendor = X86_VENDOR_IntelClone; 1207c478bd9Sstevel@tonic-gate uint_t x86_type = X86_TYPE_OTHER; 12186c1f4dcSVikram Hegde uint_t x86_clflush_size = 0; 1227c478bd9Sstevel@tonic-gate 1237c478bd9Sstevel@tonic-gate uint_t pentiumpro_bug4046376; 1247c478bd9Sstevel@tonic-gate 125dfea898aSKuriakose Kuruvilla uchar_t x86_featureset[BT_SIZEOFMAP(NUM_X86_FEATURES)]; 1267417cfdeSKuriakose Kuruvilla 127dfea898aSKuriakose Kuruvilla static char *x86_feature_names[NUM_X86_FEATURES] = { 1287417cfdeSKuriakose Kuruvilla "lgpg", 1297417cfdeSKuriakose Kuruvilla "tsc", 1307417cfdeSKuriakose Kuruvilla "msr", 1317417cfdeSKuriakose Kuruvilla "mtrr", 1327417cfdeSKuriakose Kuruvilla "pge", 1337417cfdeSKuriakose Kuruvilla "de", 1347417cfdeSKuriakose Kuruvilla "cmov", 1357417cfdeSKuriakose Kuruvilla "mmx", 1367417cfdeSKuriakose Kuruvilla "mca", 1377417cfdeSKuriakose Kuruvilla "pae", 1387417cfdeSKuriakose Kuruvilla "cv8", 1397417cfdeSKuriakose Kuruvilla "pat", 1407417cfdeSKuriakose Kuruvilla "sep", 1417417cfdeSKuriakose Kuruvilla "sse", 1427417cfdeSKuriakose Kuruvilla "sse2", 1437417cfdeSKuriakose Kuruvilla "htt", 1447417cfdeSKuriakose Kuruvilla "asysc", 1457417cfdeSKuriakose Kuruvilla "nx", 1467417cfdeSKuriakose Kuruvilla "sse3", 1477417cfdeSKuriakose Kuruvilla "cx16", 1487417cfdeSKuriakose Kuruvilla "cmp", 1497417cfdeSKuriakose Kuruvilla "tscp", 1507417cfdeSKuriakose Kuruvilla "mwait", 1517417cfdeSKuriakose Kuruvilla "sse4a", 1527417cfdeSKuriakose Kuruvilla "cpuid", 1537417cfdeSKuriakose Kuruvilla "ssse3", 1547417cfdeSKuriakose Kuruvilla "sse4_1", 1557417cfdeSKuriakose Kuruvilla "sse4_2", 1567417cfdeSKuriakose Kuruvilla "1gpg", 1577417cfdeSKuriakose Kuruvilla "clfsh", 1587417cfdeSKuriakose Kuruvilla "64", 1597417cfdeSKuriakose Kuruvilla "aes", 1607af88ac7SKuriakose Kuruvilla "pclmulqdq", 1617af88ac7SKuriakose Kuruvilla "xsave", 162faa20166SBryan Cantrill "avx", 163faa20166SBryan Cantrill "vmx", 1647660e73fSHans Rosenfeld "svm", 165ebb8ac07SRobert Mustacchi "topoext", 166ebb8ac07SRobert Mustacchi "f16c", 1676eedf6a5SJosef 'Jeff' Sipek "rdrand", 1686eedf6a5SJosef 'Jeff' Sipek "x2apic", 169245ac945SRobert Mustacchi "avx2", 170245ac945SRobert Mustacchi "bmi1", 171245ac945SRobert Mustacchi "bmi2", 172*799823bbSRobert Mustacchi "fma", 173*799823bbSRobert Mustacchi "smep" 174faa20166SBryan Cantrill }; 1757417cfdeSKuriakose Kuruvilla 1767417cfdeSKuriakose Kuruvilla boolean_t 1777417cfdeSKuriakose Kuruvilla is_x86_feature(void *featureset, uint_t feature) 1787417cfdeSKuriakose Kuruvilla { 1797417cfdeSKuriakose Kuruvilla ASSERT(feature < NUM_X86_FEATURES); 1807417cfdeSKuriakose Kuruvilla return (BT_TEST((ulong_t *)featureset, feature)); 1817417cfdeSKuriakose Kuruvilla } 1827417cfdeSKuriakose Kuruvilla 1837417cfdeSKuriakose Kuruvilla void 1847417cfdeSKuriakose Kuruvilla add_x86_feature(void *featureset, uint_t feature) 1857417cfdeSKuriakose Kuruvilla { 1867417cfdeSKuriakose Kuruvilla ASSERT(feature < NUM_X86_FEATURES); 1877417cfdeSKuriakose Kuruvilla BT_SET((ulong_t *)featureset, feature); 1887417cfdeSKuriakose Kuruvilla } 1897417cfdeSKuriakose Kuruvilla 1907417cfdeSKuriakose Kuruvilla void 1917417cfdeSKuriakose Kuruvilla remove_x86_feature(void *featureset, uint_t feature) 1927417cfdeSKuriakose Kuruvilla { 1937417cfdeSKuriakose Kuruvilla ASSERT(feature < NUM_X86_FEATURES); 1947417cfdeSKuriakose Kuruvilla BT_CLEAR((ulong_t *)featureset, feature); 1957417cfdeSKuriakose Kuruvilla } 1967417cfdeSKuriakose Kuruvilla 1977417cfdeSKuriakose Kuruvilla boolean_t 1987417cfdeSKuriakose Kuruvilla compare_x86_featureset(void *setA, void *setB) 1997417cfdeSKuriakose Kuruvilla { 2007417cfdeSKuriakose Kuruvilla /* 2017417cfdeSKuriakose Kuruvilla * We assume that the unused bits of the bitmap are always zero. 2027417cfdeSKuriakose Kuruvilla */ 2037417cfdeSKuriakose Kuruvilla if (memcmp(setA, setB, BT_SIZEOFMAP(NUM_X86_FEATURES)) == 0) { 2047417cfdeSKuriakose Kuruvilla return (B_TRUE); 2057417cfdeSKuriakose Kuruvilla } else { 2067417cfdeSKuriakose Kuruvilla return (B_FALSE); 2077417cfdeSKuriakose Kuruvilla } 2087417cfdeSKuriakose Kuruvilla } 2097417cfdeSKuriakose Kuruvilla 2107417cfdeSKuriakose Kuruvilla void 2117417cfdeSKuriakose Kuruvilla print_x86_featureset(void *featureset) 2127417cfdeSKuriakose Kuruvilla { 2137417cfdeSKuriakose Kuruvilla uint_t i; 2147417cfdeSKuriakose Kuruvilla 2157417cfdeSKuriakose Kuruvilla for (i = 0; i < NUM_X86_FEATURES; i++) { 2167417cfdeSKuriakose Kuruvilla if (is_x86_feature(featureset, i)) { 2177417cfdeSKuriakose Kuruvilla cmn_err(CE_CONT, "?x86_feature: %s\n", 2187417cfdeSKuriakose Kuruvilla x86_feature_names[i]); 2197417cfdeSKuriakose Kuruvilla } 2207417cfdeSKuriakose Kuruvilla } 2217417cfdeSKuriakose Kuruvilla } 2227417cfdeSKuriakose Kuruvilla 2237af88ac7SKuriakose Kuruvilla static size_t xsave_state_size = 0; 2247af88ac7SKuriakose Kuruvilla uint64_t xsave_bv_all = (XFEATURE_LEGACY_FP | XFEATURE_SSE); 2257af88ac7SKuriakose Kuruvilla boolean_t xsave_force_disable = B_FALSE; 2267af88ac7SKuriakose Kuruvilla 2277997e108SSurya Prakki /* 22879ec9da8SYuri Pankov * This is set to platform type we are running on. 2297997e108SSurya Prakki */ 230349b53ddSStuart Maybee static int platform_type = -1; 231349b53ddSStuart Maybee 232349b53ddSStuart Maybee #if !defined(__xpv) 233349b53ddSStuart Maybee /* 234349b53ddSStuart Maybee * Variable to patch if hypervisor platform detection needs to be 235349b53ddSStuart Maybee * disabled (e.g. platform_type will always be HW_NATIVE if this is 0). 236349b53ddSStuart Maybee */ 237349b53ddSStuart Maybee int enable_platform_detection = 1; 238349b53ddSStuart Maybee #endif 2397c478bd9Sstevel@tonic-gate 2407c478bd9Sstevel@tonic-gate /* 241f98fbcecSbholler * monitor/mwait info. 2425b8a6efeSbholler * 2435b8a6efeSbholler * size_actual and buf_actual are the real address and size allocated to get 2445b8a6efeSbholler * proper mwait_buf alignement. buf_actual and size_actual should be passed 2455b8a6efeSbholler * to kmem_free(). Currently kmem_alloc() and mwait happen to both use 2465b8a6efeSbholler * processor cache-line alignment, but this is not guarantied in the furture. 247f98fbcecSbholler */ 248f98fbcecSbholler struct mwait_info { 249f98fbcecSbholler size_t mon_min; /* min size to avoid missed wakeups */ 250f98fbcecSbholler size_t mon_max; /* size to avoid false wakeups */ 2515b8a6efeSbholler size_t size_actual; /* size actually allocated */ 2525b8a6efeSbholler void *buf_actual; /* memory actually allocated */ 253f98fbcecSbholler uint32_t support; /* processor support of monitor/mwait */ 254f98fbcecSbholler }; 255f98fbcecSbholler 256f98fbcecSbholler /* 2577af88ac7SKuriakose Kuruvilla * xsave/xrestor info. 2587af88ac7SKuriakose Kuruvilla * 2597af88ac7SKuriakose Kuruvilla * This structure contains HW feature bits and size of the xsave save area. 2607af88ac7SKuriakose Kuruvilla * Note: the kernel will use the maximum size required for all hardware 2617af88ac7SKuriakose Kuruvilla * features. It is not optimize for potential memory savings if features at 2627af88ac7SKuriakose Kuruvilla * the end of the save area are not enabled. 2637af88ac7SKuriakose Kuruvilla */ 2647af88ac7SKuriakose Kuruvilla struct xsave_info { 2657af88ac7SKuriakose Kuruvilla uint32_t xsav_hw_features_low; /* Supported HW features */ 2667af88ac7SKuriakose Kuruvilla uint32_t xsav_hw_features_high; /* Supported HW features */ 2677af88ac7SKuriakose Kuruvilla size_t xsav_max_size; /* max size save area for HW features */ 2687af88ac7SKuriakose Kuruvilla size_t ymm_size; /* AVX: size of ymm save area */ 2697af88ac7SKuriakose Kuruvilla size_t ymm_offset; /* AVX: offset for ymm save area */ 2707af88ac7SKuriakose Kuruvilla }; 2717af88ac7SKuriakose Kuruvilla 2727af88ac7SKuriakose Kuruvilla 2737af88ac7SKuriakose Kuruvilla /* 2747c478bd9Sstevel@tonic-gate * These constants determine how many of the elements of the 2757c478bd9Sstevel@tonic-gate * cpuid we cache in the cpuid_info data structure; the 2767c478bd9Sstevel@tonic-gate * remaining elements are accessible via the cpuid instruction. 2777c478bd9Sstevel@tonic-gate */ 2787c478bd9Sstevel@tonic-gate 279245ac945SRobert Mustacchi #define NMAX_CPI_STD 8 /* eax = 0 .. 7 */ 2807660e73fSHans Rosenfeld #define NMAX_CPI_EXTD 0x1f /* eax = 0x80000000 .. 0x8000001e */ 2818031591dSSrihari Venkatesan 2828031591dSSrihari Venkatesan /* 2838031591dSSrihari Venkatesan * Some terminology needs to be explained: 2848031591dSSrihari Venkatesan * - Socket: Something that can be plugged into a motherboard. 2858031591dSSrihari Venkatesan * - Package: Same as socket 2868031591dSSrihari Venkatesan * - Chip: Same as socket. Note that AMD's documentation uses term "chip" 2878031591dSSrihari Venkatesan * differently: there, chip is the same as processor node (below) 2888031591dSSrihari Venkatesan * - Processor node: Some AMD processors have more than one 2898031591dSSrihari Venkatesan * "subprocessor" embedded in a package. These subprocessors (nodes) 2908031591dSSrihari Venkatesan * are fully-functional processors themselves with cores, caches, 2918031591dSSrihari Venkatesan * memory controllers, PCI configuration spaces. They are connected 2928031591dSSrihari Venkatesan * inside the package with Hypertransport links. On single-node 2938031591dSSrihari Venkatesan * processors, processor node is equivalent to chip/socket/package. 2947660e73fSHans Rosenfeld * - Compute Unit: Some AMD processors pair cores in "compute units" that 2957660e73fSHans Rosenfeld * share the FPU and the I$ and L2 caches. 2968031591dSSrihari Venkatesan */ 2977c478bd9Sstevel@tonic-gate 2987c478bd9Sstevel@tonic-gate struct cpuid_info { 2997c478bd9Sstevel@tonic-gate uint_t cpi_pass; /* last pass completed */ 3007c478bd9Sstevel@tonic-gate /* 3017c478bd9Sstevel@tonic-gate * standard function information 3027c478bd9Sstevel@tonic-gate */ 3037c478bd9Sstevel@tonic-gate uint_t cpi_maxeax; /* fn 0: %eax */ 3047c478bd9Sstevel@tonic-gate char cpi_vendorstr[13]; /* fn 0: %ebx:%ecx:%edx */ 3057c478bd9Sstevel@tonic-gate uint_t cpi_vendor; /* enum of cpi_vendorstr */ 3067c478bd9Sstevel@tonic-gate 3077c478bd9Sstevel@tonic-gate uint_t cpi_family; /* fn 1: extended family */ 3087c478bd9Sstevel@tonic-gate uint_t cpi_model; /* fn 1: extended model */ 3097c478bd9Sstevel@tonic-gate uint_t cpi_step; /* fn 1: stepping */ 3108031591dSSrihari Venkatesan chipid_t cpi_chipid; /* fn 1: %ebx: Intel: chip # */ 3118031591dSSrihari Venkatesan /* AMD: package/socket # */ 3127c478bd9Sstevel@tonic-gate uint_t cpi_brandid; /* fn 1: %ebx: brand ID */ 3137c478bd9Sstevel@tonic-gate int cpi_clogid; /* fn 1: %ebx: thread # */ 3148949bcd6Sandrei uint_t cpi_ncpu_per_chip; /* fn 1: %ebx: logical cpu count */ 3157c478bd9Sstevel@tonic-gate uint8_t cpi_cacheinfo[16]; /* fn 2: intel-style cache desc */ 3167c478bd9Sstevel@tonic-gate uint_t cpi_ncache; /* fn 2: number of elements */ 317d129bde2Sesaxe uint_t cpi_ncpu_shr_last_cache; /* fn 4: %eax: ncpus sharing cache */ 318d129bde2Sesaxe id_t cpi_last_lvl_cacheid; /* fn 4: %eax: derived cache id */ 319d129bde2Sesaxe uint_t cpi_std_4_size; /* fn 4: number of fn 4 elements */ 320d129bde2Sesaxe struct cpuid_regs **cpi_std_4; /* fn 4: %ecx == 0 .. fn4_size */ 321245ac945SRobert Mustacchi struct cpuid_regs cpi_std[NMAX_CPI_STD]; /* 0 .. 7 */ 3227c478bd9Sstevel@tonic-gate /* 3237c478bd9Sstevel@tonic-gate * extended function information 3247c478bd9Sstevel@tonic-gate */ 3257c478bd9Sstevel@tonic-gate uint_t cpi_xmaxeax; /* fn 0x80000000: %eax */ 3267c478bd9Sstevel@tonic-gate char cpi_brandstr[49]; /* fn 0x8000000[234] */ 3277c478bd9Sstevel@tonic-gate uint8_t cpi_pabits; /* fn 0x80000006: %eax */ 3287c478bd9Sstevel@tonic-gate uint8_t cpi_vabits; /* fn 0x80000006: %eax */ 3298031591dSSrihari Venkatesan struct cpuid_regs cpi_extd[NMAX_CPI_EXTD]; /* 0x800000XX */ 3308031591dSSrihari Venkatesan 33110569901Sgavinm id_t cpi_coreid; /* same coreid => strands share core */ 33210569901Sgavinm int cpi_pkgcoreid; /* core number within single package */ 3338949bcd6Sandrei uint_t cpi_ncore_per_chip; /* AMD: fn 0x80000008: %ecx[7-0] */ 3348949bcd6Sandrei /* Intel: fn 4: %eax[31-26] */ 3357c478bd9Sstevel@tonic-gate /* 3367c478bd9Sstevel@tonic-gate * supported feature information 3377c478bd9Sstevel@tonic-gate */ 338245ac945SRobert Mustacchi uint32_t cpi_support[6]; 3397c478bd9Sstevel@tonic-gate #define STD_EDX_FEATURES 0 3407c478bd9Sstevel@tonic-gate #define AMD_EDX_FEATURES 1 3417c478bd9Sstevel@tonic-gate #define TM_EDX_FEATURES 2 3427c478bd9Sstevel@tonic-gate #define STD_ECX_FEATURES 3 343ae115bc7Smrj #define AMD_ECX_FEATURES 4 344245ac945SRobert Mustacchi #define STD_EBX_FEATURES 5 3458a40a695Sgavinm /* 3468a40a695Sgavinm * Synthesized information, where known. 3478a40a695Sgavinm */ 3488a40a695Sgavinm uint32_t cpi_chiprev; /* See X86_CHIPREV_* in x86_archext.h */ 3498a40a695Sgavinm const char *cpi_chiprevstr; /* May be NULL if chiprev unknown */ 3508a40a695Sgavinm uint32_t cpi_socket; /* Chip package/socket type */ 351f98fbcecSbholler 352f98fbcecSbholler struct mwait_info cpi_mwait; /* fn 5: monitor/mwait info */ 353b6917abeSmishra uint32_t cpi_apicid; 3548031591dSSrihari Venkatesan uint_t cpi_procnodeid; /* AMD: nodeID on HT, Intel: chipid */ 3558031591dSSrihari Venkatesan uint_t cpi_procnodes_per_pkg; /* AMD: # of nodes in the package */ 3568031591dSSrihari Venkatesan /* Intel: 1 */ 3577660e73fSHans Rosenfeld uint_t cpi_compunitid; /* AMD: ComputeUnit ID, Intel: coreid */ 3587660e73fSHans Rosenfeld uint_t cpi_cores_per_compunit; /* AMD: # of cores in the ComputeUnit */ 3597af88ac7SKuriakose Kuruvilla 3607af88ac7SKuriakose Kuruvilla struct xsave_info cpi_xsave; /* fn D: xsave/xrestor info */ 3617c478bd9Sstevel@tonic-gate }; 3627c478bd9Sstevel@tonic-gate 3637c478bd9Sstevel@tonic-gate 3647c478bd9Sstevel@tonic-gate static struct cpuid_info cpuid_info0; 3657c478bd9Sstevel@tonic-gate 3667c478bd9Sstevel@tonic-gate /* 3677c478bd9Sstevel@tonic-gate * These bit fields are defined by the Intel Application Note AP-485 3687c478bd9Sstevel@tonic-gate * "Intel Processor Identification and the CPUID Instruction" 3697c478bd9Sstevel@tonic-gate */ 3707c478bd9Sstevel@tonic-gate #define CPI_FAMILY_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 27, 20) 3717c478bd9Sstevel@tonic-gate #define CPI_MODEL_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 19, 16) 3727c478bd9Sstevel@tonic-gate #define CPI_TYPE(cpi) BITX((cpi)->cpi_std[1].cp_eax, 13, 12) 3737c478bd9Sstevel@tonic-gate #define CPI_FAMILY(cpi) BITX((cpi)->cpi_std[1].cp_eax, 11, 8) 3747c478bd9Sstevel@tonic-gate #define CPI_STEP(cpi) BITX((cpi)->cpi_std[1].cp_eax, 3, 0) 3757c478bd9Sstevel@tonic-gate #define CPI_MODEL(cpi) BITX((cpi)->cpi_std[1].cp_eax, 7, 4) 3767c478bd9Sstevel@tonic-gate 3777c478bd9Sstevel@tonic-gate #define CPI_FEATURES_EDX(cpi) ((cpi)->cpi_std[1].cp_edx) 3787c478bd9Sstevel@tonic-gate #define CPI_FEATURES_ECX(cpi) ((cpi)->cpi_std[1].cp_ecx) 3797c478bd9Sstevel@tonic-gate #define CPI_FEATURES_XTD_EDX(cpi) ((cpi)->cpi_extd[1].cp_edx) 3807c478bd9Sstevel@tonic-gate #define CPI_FEATURES_XTD_ECX(cpi) ((cpi)->cpi_extd[1].cp_ecx) 381245ac945SRobert Mustacchi #define CPI_FEATURES_7_0_EBX(cpi) ((cpi)->cpi_std[7].cp_ebx) 3827c478bd9Sstevel@tonic-gate 3837c478bd9Sstevel@tonic-gate #define CPI_BRANDID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 7, 0) 3847c478bd9Sstevel@tonic-gate #define CPI_CHUNKS(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 15, 7) 3857c478bd9Sstevel@tonic-gate #define CPI_CPU_COUNT(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 23, 16) 3867c478bd9Sstevel@tonic-gate #define CPI_APIC_ID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 31, 24) 3877c478bd9Sstevel@tonic-gate 3887c478bd9Sstevel@tonic-gate #define CPI_MAXEAX_MAX 0x100 /* sanity control */ 3897c478bd9Sstevel@tonic-gate #define CPI_XMAXEAX_MAX 0x80000100 390d129bde2Sesaxe #define CPI_FN4_ECX_MAX 0x20 /* sanity: max fn 4 levels */ 391b6917abeSmishra #define CPI_FNB_ECX_MAX 0x20 /* sanity: max fn B levels */ 392d129bde2Sesaxe 393d129bde2Sesaxe /* 394d129bde2Sesaxe * Function 4 (Deterministic Cache Parameters) macros 395d129bde2Sesaxe * Defined by Intel Application Note AP-485 396d129bde2Sesaxe */ 397d129bde2Sesaxe #define CPI_NUM_CORES(regs) BITX((regs)->cp_eax, 31, 26) 398d129bde2Sesaxe #define CPI_NTHR_SHR_CACHE(regs) BITX((regs)->cp_eax, 25, 14) 399d129bde2Sesaxe #define CPI_FULL_ASSOC_CACHE(regs) BITX((regs)->cp_eax, 9, 9) 400d129bde2Sesaxe #define CPI_SELF_INIT_CACHE(regs) BITX((regs)->cp_eax, 8, 8) 401d129bde2Sesaxe #define CPI_CACHE_LVL(regs) BITX((regs)->cp_eax, 7, 5) 402d129bde2Sesaxe #define CPI_CACHE_TYPE(regs) BITX((regs)->cp_eax, 4, 0) 403b6917abeSmishra #define CPI_CPU_LEVEL_TYPE(regs) BITX((regs)->cp_ecx, 15, 8) 404d129bde2Sesaxe 405d129bde2Sesaxe #define CPI_CACHE_WAYS(regs) BITX((regs)->cp_ebx, 31, 22) 406d129bde2Sesaxe #define CPI_CACHE_PARTS(regs) BITX((regs)->cp_ebx, 21, 12) 407d129bde2Sesaxe #define CPI_CACHE_COH_LN_SZ(regs) BITX((regs)->cp_ebx, 11, 0) 408d129bde2Sesaxe 409d129bde2Sesaxe #define CPI_CACHE_SETS(regs) BITX((regs)->cp_ecx, 31, 0) 410d129bde2Sesaxe 411d129bde2Sesaxe #define CPI_PREFCH_STRIDE(regs) BITX((regs)->cp_edx, 9, 0) 412d129bde2Sesaxe 4137c478bd9Sstevel@tonic-gate 4147c478bd9Sstevel@tonic-gate /* 4155ff02082Sdmick * A couple of shorthand macros to identify "later" P6-family chips 4165ff02082Sdmick * like the Pentium M and Core. First, the "older" P6-based stuff 4175ff02082Sdmick * (loosely defined as "pre-Pentium-4"): 4185ff02082Sdmick * P6, PII, Mobile PII, PII Xeon, PIII, Mobile PIII, PIII Xeon 4195ff02082Sdmick */ 4205ff02082Sdmick 4215ff02082Sdmick #define IS_LEGACY_P6(cpi) ( \ 4225ff02082Sdmick cpi->cpi_family == 6 && \ 4235ff02082Sdmick (cpi->cpi_model == 1 || \ 4245ff02082Sdmick cpi->cpi_model == 3 || \ 4255ff02082Sdmick cpi->cpi_model == 5 || \ 4265ff02082Sdmick cpi->cpi_model == 6 || \ 4275ff02082Sdmick cpi->cpi_model == 7 || \ 4285ff02082Sdmick cpi->cpi_model == 8 || \ 4295ff02082Sdmick cpi->cpi_model == 0xA || \ 4305ff02082Sdmick cpi->cpi_model == 0xB) \ 4315ff02082Sdmick ) 4325ff02082Sdmick 4335ff02082Sdmick /* A "new F6" is everything with family 6 that's not the above */ 4345ff02082Sdmick #define IS_NEW_F6(cpi) ((cpi->cpi_family == 6) && !IS_LEGACY_P6(cpi)) 4355ff02082Sdmick 436bf91205bSksadhukh /* Extended family/model support */ 437bf91205bSksadhukh #define IS_EXTENDED_MODEL_INTEL(cpi) (cpi->cpi_family == 0x6 || \ 438bf91205bSksadhukh cpi->cpi_family >= 0xf) 439bf91205bSksadhukh 4405ff02082Sdmick /* 441f98fbcecSbholler * Info for monitor/mwait idle loop. 442f98fbcecSbholler * 443f98fbcecSbholler * See cpuid section of "Intel 64 and IA-32 Architectures Software Developer's 444f98fbcecSbholler * Manual Volume 2A: Instruction Set Reference, A-M" #25366-022US, November 445f98fbcecSbholler * 2006. 446f98fbcecSbholler * See MONITOR/MWAIT section of "AMD64 Architecture Programmer's Manual 447f98fbcecSbholler * Documentation Updates" #33633, Rev 2.05, December 2006. 448f98fbcecSbholler */ 449f98fbcecSbholler #define MWAIT_SUPPORT (0x00000001) /* mwait supported */ 450f98fbcecSbholler #define MWAIT_EXTENSIONS (0x00000002) /* extenstion supported */ 451f98fbcecSbholler #define MWAIT_ECX_INT_ENABLE (0x00000004) /* ecx 1 extension supported */ 452f98fbcecSbholler #define MWAIT_SUPPORTED(cpi) ((cpi)->cpi_std[1].cp_ecx & CPUID_INTC_ECX_MON) 453f98fbcecSbholler #define MWAIT_INT_ENABLE(cpi) ((cpi)->cpi_std[5].cp_ecx & 0x2) 454f98fbcecSbholler #define MWAIT_EXTENSION(cpi) ((cpi)->cpi_std[5].cp_ecx & 0x1) 455f98fbcecSbholler #define MWAIT_SIZE_MIN(cpi) BITX((cpi)->cpi_std[5].cp_eax, 15, 0) 456f98fbcecSbholler #define MWAIT_SIZE_MAX(cpi) BITX((cpi)->cpi_std[5].cp_ebx, 15, 0) 457f98fbcecSbholler /* 458f98fbcecSbholler * Number of sub-cstates for a given c-state. 459f98fbcecSbholler */ 460f98fbcecSbholler #define MWAIT_NUM_SUBC_STATES(cpi, c_state) \ 461f98fbcecSbholler BITX((cpi)->cpi_std[5].cp_edx, c_state + 3, c_state) 462f98fbcecSbholler 4638a40a695Sgavinm /* 4647af88ac7SKuriakose Kuruvilla * XSAVE leaf 0xD enumeration 4657af88ac7SKuriakose Kuruvilla */ 4667af88ac7SKuriakose Kuruvilla #define CPUID_LEAFD_2_YMM_OFFSET 576 4677af88ac7SKuriakose Kuruvilla #define CPUID_LEAFD_2_YMM_SIZE 256 4687af88ac7SKuriakose Kuruvilla 4697af88ac7SKuriakose Kuruvilla /* 470e4b86885SCheng Sean Ye * Functions we consune from cpuid_subr.c; don't publish these in a header 471e4b86885SCheng Sean Ye * file to try and keep people using the expected cpuid_* interfaces. 4728a40a695Sgavinm */ 473e4b86885SCheng Sean Ye extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t); 47489e921d5SKuriakose Kuruvilla extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t); 475e4b86885SCheng Sean Ye extern uint32_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t); 476e4b86885SCheng Sean Ye extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t); 477e4b86885SCheng Sean Ye extern uint_t _cpuid_vendorstr_to_vendorcode(char *); 4788a40a695Sgavinm 4798a40a695Sgavinm /* 480ae115bc7Smrj * Apply up various platform-dependent restrictions where the 481ae115bc7Smrj * underlying platform restrictions mean the CPU can be marked 482ae115bc7Smrj * as less capable than its cpuid instruction would imply. 483ae115bc7Smrj */ 484843e1988Sjohnlev #if defined(__xpv) 485843e1988Sjohnlev static void 486843e1988Sjohnlev platform_cpuid_mangle(uint_t vendor, uint32_t eax, struct cpuid_regs *cp) 487843e1988Sjohnlev { 488843e1988Sjohnlev switch (eax) { 489e4b86885SCheng Sean Ye case 1: { 490e4b86885SCheng Sean Ye uint32_t mcamask = DOMAIN_IS_INITDOMAIN(xen_info) ? 491e4b86885SCheng Sean Ye 0 : CPUID_INTC_EDX_MCA; 492843e1988Sjohnlev cp->cp_edx &= 493e4b86885SCheng Sean Ye ~(mcamask | 494e4b86885SCheng Sean Ye CPUID_INTC_EDX_PSE | 495843e1988Sjohnlev CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE | 496843e1988Sjohnlev CPUID_INTC_EDX_SEP | CPUID_INTC_EDX_MTRR | 497843e1988Sjohnlev CPUID_INTC_EDX_PGE | CPUID_INTC_EDX_PAT | 498843e1988Sjohnlev CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP | 499843e1988Sjohnlev CPUID_INTC_EDX_PSE36 | CPUID_INTC_EDX_HTT); 500843e1988Sjohnlev break; 501e4b86885SCheng Sean Ye } 502ae115bc7Smrj 503843e1988Sjohnlev case 0x80000001: 504843e1988Sjohnlev cp->cp_edx &= 505843e1988Sjohnlev ~(CPUID_AMD_EDX_PSE | 506843e1988Sjohnlev CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE | 507843e1988Sjohnlev CPUID_AMD_EDX_MTRR | CPUID_AMD_EDX_PGE | 508843e1988Sjohnlev CPUID_AMD_EDX_PAT | CPUID_AMD_EDX_PSE36 | 509843e1988Sjohnlev CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP | 510843e1988Sjohnlev CPUID_AMD_EDX_TSCP); 511843e1988Sjohnlev cp->cp_ecx &= ~CPUID_AMD_ECX_CMP_LGCY; 512843e1988Sjohnlev break; 513843e1988Sjohnlev default: 514843e1988Sjohnlev break; 515843e1988Sjohnlev } 516843e1988Sjohnlev 517843e1988Sjohnlev switch (vendor) { 518843e1988Sjohnlev case X86_VENDOR_Intel: 519843e1988Sjohnlev switch (eax) { 520843e1988Sjohnlev case 4: 521843e1988Sjohnlev /* 522843e1988Sjohnlev * Zero out the (ncores-per-chip - 1) field 523843e1988Sjohnlev */ 524843e1988Sjohnlev cp->cp_eax &= 0x03fffffff; 525843e1988Sjohnlev break; 526843e1988Sjohnlev default: 527843e1988Sjohnlev break; 528843e1988Sjohnlev } 529843e1988Sjohnlev break; 530843e1988Sjohnlev case X86_VENDOR_AMD: 531843e1988Sjohnlev switch (eax) { 5322ef50f01SJoe Bonasera 5332ef50f01SJoe Bonasera case 0x80000001: 5342ef50f01SJoe Bonasera cp->cp_ecx &= ~CPUID_AMD_ECX_CR8D; 5352ef50f01SJoe Bonasera break; 5362ef50f01SJoe Bonasera 537843e1988Sjohnlev case 0x80000008: 538843e1988Sjohnlev /* 539843e1988Sjohnlev * Zero out the (ncores-per-chip - 1) field 540843e1988Sjohnlev */ 541843e1988Sjohnlev cp->cp_ecx &= 0xffffff00; 542843e1988Sjohnlev break; 543843e1988Sjohnlev default: 544843e1988Sjohnlev break; 545843e1988Sjohnlev } 546843e1988Sjohnlev break; 547843e1988Sjohnlev default: 548843e1988Sjohnlev break; 549843e1988Sjohnlev } 550843e1988Sjohnlev } 551843e1988Sjohnlev #else 552ae115bc7Smrj #define platform_cpuid_mangle(vendor, eax, cp) /* nothing */ 553843e1988Sjohnlev #endif 554ae115bc7Smrj 555ae115bc7Smrj /* 5567c478bd9Sstevel@tonic-gate * Some undocumented ways of patching the results of the cpuid 5577c478bd9Sstevel@tonic-gate * instruction to permit running Solaris 10 on future cpus that 5587c478bd9Sstevel@tonic-gate * we don't currently support. Could be set to non-zero values 5597c478bd9Sstevel@tonic-gate * via settings in eeprom. 5607c478bd9Sstevel@tonic-gate */ 5617c478bd9Sstevel@tonic-gate 5627c478bd9Sstevel@tonic-gate uint32_t cpuid_feature_ecx_include; 5637c478bd9Sstevel@tonic-gate uint32_t cpuid_feature_ecx_exclude; 5647c478bd9Sstevel@tonic-gate uint32_t cpuid_feature_edx_include; 5657c478bd9Sstevel@tonic-gate uint32_t cpuid_feature_edx_exclude; 5667c478bd9Sstevel@tonic-gate 567a3114836SGerry Liu /* 568a3114836SGerry Liu * Allocate space for mcpu_cpi in the machcpu structure for all non-boot CPUs. 569a3114836SGerry Liu */ 570ae115bc7Smrj void 571ae115bc7Smrj cpuid_alloc_space(cpu_t *cpu) 572ae115bc7Smrj { 573ae115bc7Smrj /* 574ae115bc7Smrj * By convention, cpu0 is the boot cpu, which is set up 575ae115bc7Smrj * before memory allocation is available. All other cpus get 576ae115bc7Smrj * their cpuid_info struct allocated here. 577ae115bc7Smrj */ 578ae115bc7Smrj ASSERT(cpu->cpu_id != 0); 579a3114836SGerry Liu ASSERT(cpu->cpu_m.mcpu_cpi == NULL); 580ae115bc7Smrj cpu->cpu_m.mcpu_cpi = 581ae115bc7Smrj kmem_zalloc(sizeof (*cpu->cpu_m.mcpu_cpi), KM_SLEEP); 582ae115bc7Smrj } 583ae115bc7Smrj 584ae115bc7Smrj void 585ae115bc7Smrj cpuid_free_space(cpu_t *cpu) 586ae115bc7Smrj { 587d129bde2Sesaxe struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 588d129bde2Sesaxe int i; 589d129bde2Sesaxe 590a3114836SGerry Liu ASSERT(cpi != NULL); 591a3114836SGerry Liu ASSERT(cpi != &cpuid_info0); 592d129bde2Sesaxe 593d129bde2Sesaxe /* 594d129bde2Sesaxe * Free up any function 4 related dynamic storage 595d129bde2Sesaxe */ 596d129bde2Sesaxe for (i = 1; i < cpi->cpi_std_4_size; i++) 597d129bde2Sesaxe kmem_free(cpi->cpi_std_4[i], sizeof (struct cpuid_regs)); 598d129bde2Sesaxe if (cpi->cpi_std_4_size > 0) 599d129bde2Sesaxe kmem_free(cpi->cpi_std_4, 600d129bde2Sesaxe cpi->cpi_std_4_size * sizeof (struct cpuid_regs *)); 601d129bde2Sesaxe 602a3114836SGerry Liu kmem_free(cpi, sizeof (*cpi)); 603a3114836SGerry Liu cpu->cpu_m.mcpu_cpi = NULL; 604ae115bc7Smrj } 605ae115bc7Smrj 606551bc2a6Smrj #if !defined(__xpv) 607cfe84b82SMatt Amdur /* 608cfe84b82SMatt Amdur * Determine the type of the underlying platform. This is used to customize 609cfe84b82SMatt Amdur * initialization of various subsystems (e.g. TSC). determine_platform() must 610cfe84b82SMatt Amdur * only ever be called once to prevent two processors from seeing different 61179ec9da8SYuri Pankov * values of platform_type. Must be called before cpuid_pass1(), the earliest 61279ec9da8SYuri Pankov * consumer to execute (uses _cpuid_chiprev --> synth_amd_info --> get_hwenv). 613cfe84b82SMatt Amdur */ 614cfe84b82SMatt Amdur void 615cfe84b82SMatt Amdur determine_platform(void) 616551bc2a6Smrj { 617551bc2a6Smrj struct cpuid_regs cp; 61879ec9da8SYuri Pankov uint32_t base; 61979ec9da8SYuri Pankov uint32_t regs[4]; 62079ec9da8SYuri Pankov char *hvstr = (char *)regs; 621551bc2a6Smrj 622cfe84b82SMatt Amdur ASSERT(platform_type == -1); 623cfe84b82SMatt Amdur 624349b53ddSStuart Maybee platform_type = HW_NATIVE; 625349b53ddSStuart Maybee 626349b53ddSStuart Maybee if (!enable_platform_detection) 627349b53ddSStuart Maybee return; 628349b53ddSStuart Maybee 629551bc2a6Smrj /* 63079ec9da8SYuri Pankov * If Hypervisor CPUID bit is set, try to determine hypervisor 63179ec9da8SYuri Pankov * vendor signature, and set platform type accordingly. 63279ec9da8SYuri Pankov * 63379ec9da8SYuri Pankov * References: 63479ec9da8SYuri Pankov * http://lkml.org/lkml/2008/10/1/246 63579ec9da8SYuri Pankov * http://kb.vmware.com/kb/1009458 636551bc2a6Smrj */ 63779ec9da8SYuri Pankov cp.cp_eax = 0x1; 638551bc2a6Smrj (void) __cpuid_insn(&cp); 63979ec9da8SYuri Pankov if ((cp.cp_ecx & CPUID_INTC_ECX_HV) != 0) { 64079ec9da8SYuri Pankov cp.cp_eax = 0x40000000; 64179ec9da8SYuri Pankov (void) __cpuid_insn(&cp); 64279ec9da8SYuri Pankov regs[0] = cp.cp_ebx; 64379ec9da8SYuri Pankov regs[1] = cp.cp_ecx; 64479ec9da8SYuri Pankov regs[2] = cp.cp_edx; 64579ec9da8SYuri Pankov regs[3] = 0; 64679ec9da8SYuri Pankov if (strcmp(hvstr, HVSIG_XEN_HVM) == 0) { 647b9bfdccdSStuart Maybee platform_type = HW_XEN_HVM; 6486e5580c9SFrank Van Der Linden return; 649551bc2a6Smrj } 65079ec9da8SYuri Pankov if (strcmp(hvstr, HVSIG_VMWARE) == 0) { 65179ec9da8SYuri Pankov platform_type = HW_VMWARE; 65279ec9da8SYuri Pankov return; 65379ec9da8SYuri Pankov } 65479ec9da8SYuri Pankov if (strcmp(hvstr, HVSIG_KVM) == 0) { 65579ec9da8SYuri Pankov platform_type = HW_KVM; 65679ec9da8SYuri Pankov return; 65779ec9da8SYuri Pankov } 65879ec9da8SYuri Pankov if (strcmp(hvstr, HVSIG_MICROSOFT) == 0) 65979ec9da8SYuri Pankov platform_type = HW_MICROSOFT; 66079ec9da8SYuri Pankov } else { 66179ec9da8SYuri Pankov /* 66279ec9da8SYuri Pankov * Check older VMware hardware versions. VMware hypervisor is 66379ec9da8SYuri Pankov * detected by performing an IN operation to VMware hypervisor 66479ec9da8SYuri Pankov * port and checking that value returned in %ebx is VMware 66579ec9da8SYuri Pankov * hypervisor magic value. 66679ec9da8SYuri Pankov * 66779ec9da8SYuri Pankov * References: http://kb.vmware.com/kb/1009458 66879ec9da8SYuri Pankov */ 66979ec9da8SYuri Pankov vmware_port(VMWARE_HVCMD_GETVERSION, regs); 67079ec9da8SYuri Pankov if (regs[1] == VMWARE_HVMAGIC) { 67179ec9da8SYuri Pankov platform_type = HW_VMWARE; 67279ec9da8SYuri Pankov return; 67379ec9da8SYuri Pankov } 674b9bfdccdSStuart Maybee } 675b9bfdccdSStuart Maybee 67679ec9da8SYuri Pankov /* 67779ec9da8SYuri Pankov * Check Xen hypervisor. In a fully virtualized domain, 67879ec9da8SYuri Pankov * Xen's pseudo-cpuid function returns a string representing the 67979ec9da8SYuri Pankov * Xen signature in %ebx, %ecx, and %edx. %eax contains the maximum 68079ec9da8SYuri Pankov * supported cpuid function. We need at least a (base + 2) leaf value 68179ec9da8SYuri Pankov * to do what we want to do. Try different base values, since the 68279ec9da8SYuri Pankov * hypervisor might use a different one depending on whether Hyper-V 68379ec9da8SYuri Pankov * emulation is switched on by default or not. 68479ec9da8SYuri Pankov */ 68579ec9da8SYuri Pankov for (base = 0x40000000; base < 0x40010000; base += 0x100) { 68679ec9da8SYuri Pankov cp.cp_eax = base; 68779ec9da8SYuri Pankov (void) __cpuid_insn(&cp); 68879ec9da8SYuri Pankov regs[0] = cp.cp_ebx; 68979ec9da8SYuri Pankov regs[1] = cp.cp_ecx; 69079ec9da8SYuri Pankov regs[2] = cp.cp_edx; 69179ec9da8SYuri Pankov regs[3] = 0; 69279ec9da8SYuri Pankov if (strcmp(hvstr, HVSIG_XEN_HVM) == 0 && 69379ec9da8SYuri Pankov cp.cp_eax >= (base + 2)) { 69479ec9da8SYuri Pankov platform_type &= ~HW_NATIVE; 69579ec9da8SYuri Pankov platform_type |= HW_XEN_HVM; 69679ec9da8SYuri Pankov return; 69779ec9da8SYuri Pankov } 69879ec9da8SYuri Pankov } 6996e5580c9SFrank Van Der Linden } 7006e5580c9SFrank Van Der Linden 701b9bfdccdSStuart Maybee int 702b9bfdccdSStuart Maybee get_hwenv(void) 703b9bfdccdSStuart Maybee { 704cfe84b82SMatt Amdur ASSERT(platform_type != -1); 705b9bfdccdSStuart Maybee return (platform_type); 706b9bfdccdSStuart Maybee } 707b9bfdccdSStuart Maybee 708b9bfdccdSStuart Maybee int 709b9bfdccdSStuart Maybee is_controldom(void) 710b9bfdccdSStuart Maybee { 711b9bfdccdSStuart Maybee return (0); 712b9bfdccdSStuart Maybee } 713b9bfdccdSStuart Maybee 714b9bfdccdSStuart Maybee #else 715b9bfdccdSStuart Maybee 716b9bfdccdSStuart Maybee int 717b9bfdccdSStuart Maybee get_hwenv(void) 718b9bfdccdSStuart Maybee { 719b9bfdccdSStuart Maybee return (HW_XEN_PV); 720b9bfdccdSStuart Maybee } 721b9bfdccdSStuart Maybee 722b9bfdccdSStuart Maybee int 723b9bfdccdSStuart Maybee is_controldom(void) 724b9bfdccdSStuart Maybee { 725b9bfdccdSStuart Maybee return (DOMAIN_IS_INITDOMAIN(xen_info)); 726b9bfdccdSStuart Maybee } 727b9bfdccdSStuart Maybee 728551bc2a6Smrj #endif /* __xpv */ 729551bc2a6Smrj 7308031591dSSrihari Venkatesan static void 7317417cfdeSKuriakose Kuruvilla cpuid_intel_getids(cpu_t *cpu, void *feature) 7328031591dSSrihari Venkatesan { 7338031591dSSrihari Venkatesan uint_t i; 7348031591dSSrihari Venkatesan uint_t chipid_shift = 0; 7358031591dSSrihari Venkatesan uint_t coreid_shift = 0; 7368031591dSSrihari Venkatesan struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 7378031591dSSrihari Venkatesan 7388031591dSSrihari Venkatesan for (i = 1; i < cpi->cpi_ncpu_per_chip; i <<= 1) 7398031591dSSrihari Venkatesan chipid_shift++; 7408031591dSSrihari Venkatesan 7418031591dSSrihari Venkatesan cpi->cpi_chipid = cpi->cpi_apicid >> chipid_shift; 7428031591dSSrihari Venkatesan cpi->cpi_clogid = cpi->cpi_apicid & ((1 << chipid_shift) - 1); 7438031591dSSrihari Venkatesan 7447417cfdeSKuriakose Kuruvilla if (is_x86_feature(feature, X86FSET_CMP)) { 7458031591dSSrihari Venkatesan /* 7468031591dSSrihari Venkatesan * Multi-core (and possibly multi-threaded) 7478031591dSSrihari Venkatesan * processors. 7488031591dSSrihari Venkatesan */ 7498031591dSSrihari Venkatesan uint_t ncpu_per_core; 7508031591dSSrihari Venkatesan if (cpi->cpi_ncore_per_chip == 1) 7518031591dSSrihari Venkatesan ncpu_per_core = cpi->cpi_ncpu_per_chip; 7528031591dSSrihari Venkatesan else if (cpi->cpi_ncore_per_chip > 1) 7538031591dSSrihari Venkatesan ncpu_per_core = cpi->cpi_ncpu_per_chip / 7548031591dSSrihari Venkatesan cpi->cpi_ncore_per_chip; 7558031591dSSrihari Venkatesan /* 7568031591dSSrihari Venkatesan * 8bit APIC IDs on dual core Pentiums 7578031591dSSrihari Venkatesan * look like this: 7588031591dSSrihari Venkatesan * 7598031591dSSrihari Venkatesan * +-----------------------+------+------+ 7608031591dSSrihari Venkatesan * | Physical Package ID | MC | HT | 7618031591dSSrihari Venkatesan * +-----------------------+------+------+ 7628031591dSSrihari Venkatesan * <------- chipid --------> 7638031591dSSrihari Venkatesan * <------- coreid ---------------> 7648031591dSSrihari Venkatesan * <--- clogid --> 7658031591dSSrihari Venkatesan * <------> 7668031591dSSrihari Venkatesan * pkgcoreid 7678031591dSSrihari Venkatesan * 7688031591dSSrihari Venkatesan * Where the number of bits necessary to 7698031591dSSrihari Venkatesan * represent MC and HT fields together equals 7708031591dSSrihari Venkatesan * to the minimum number of bits necessary to 7718031591dSSrihari Venkatesan * store the value of cpi->cpi_ncpu_per_chip. 7728031591dSSrihari Venkatesan * Of those bits, the MC part uses the number 7738031591dSSrihari Venkatesan * of bits necessary to store the value of 7748031591dSSrihari Venkatesan * cpi->cpi_ncore_per_chip. 7758031591dSSrihari Venkatesan */ 7768031591dSSrihari Venkatesan for (i = 1; i < ncpu_per_core; i <<= 1) 7778031591dSSrihari Venkatesan coreid_shift++; 7788031591dSSrihari Venkatesan cpi->cpi_coreid = cpi->cpi_apicid >> coreid_shift; 7798031591dSSrihari Venkatesan cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift; 7807417cfdeSKuriakose Kuruvilla } else if (is_x86_feature(feature, X86FSET_HTT)) { 7818031591dSSrihari Venkatesan /* 7828031591dSSrihari Venkatesan * Single-core multi-threaded processors. 7838031591dSSrihari Venkatesan */ 7848031591dSSrihari Venkatesan cpi->cpi_coreid = cpi->cpi_chipid; 7858031591dSSrihari Venkatesan cpi->cpi_pkgcoreid = 0; 7868031591dSSrihari Venkatesan } 7878031591dSSrihari Venkatesan cpi->cpi_procnodeid = cpi->cpi_chipid; 7887660e73fSHans Rosenfeld cpi->cpi_compunitid = cpi->cpi_coreid; 7898031591dSSrihari Venkatesan } 7908031591dSSrihari Venkatesan 7918031591dSSrihari Venkatesan static void 7928031591dSSrihari Venkatesan cpuid_amd_getids(cpu_t *cpu) 7938031591dSSrihari Venkatesan { 7941fbe4a4fSSrihari Venkatesan int i, first_half, coreidsz; 7958031591dSSrihari Venkatesan uint32_t nb_caps_reg; 7968031591dSSrihari Venkatesan uint_t node2_1; 7978031591dSSrihari Venkatesan struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 7987660e73fSHans Rosenfeld struct cpuid_regs *cp; 7998031591dSSrihari Venkatesan 8008031591dSSrihari Venkatesan /* 8018031591dSSrihari Venkatesan * AMD CMP chips currently have a single thread per core. 8028031591dSSrihari Venkatesan * 8038031591dSSrihari Venkatesan * Since no two cpus share a core we must assign a distinct coreid 8048031591dSSrihari Venkatesan * per cpu, and we do this by using the cpu_id. This scheme does not, 8058031591dSSrihari Venkatesan * however, guarantee that sibling cores of a chip will have sequential 8068031591dSSrihari Venkatesan * coreids starting at a multiple of the number of cores per chip - 8078031591dSSrihari Venkatesan * that is usually the case, but if the ACPI MADT table is presented 8088031591dSSrihari Venkatesan * in a different order then we need to perform a few more gymnastics 8098031591dSSrihari Venkatesan * for the pkgcoreid. 8108031591dSSrihari Venkatesan * 8118031591dSSrihari Venkatesan * All processors in the system have the same number of enabled 8128031591dSSrihari Venkatesan * cores. Cores within a processor are always numbered sequentially 8138031591dSSrihari Venkatesan * from 0 regardless of how many or which are disabled, and there 8148031591dSSrihari Venkatesan * is no way for operating system to discover the real core id when some 8158031591dSSrihari Venkatesan * are disabled. 8167660e73fSHans Rosenfeld * 8177660e73fSHans Rosenfeld * In family 0x15, the cores come in pairs called compute units. They 8187660e73fSHans Rosenfeld * share I$ and L2 caches and the FPU. Enumeration of this feature is 8197660e73fSHans Rosenfeld * simplified by the new topology extensions CPUID leaf, indicated by 8207660e73fSHans Rosenfeld * the X86 feature X86FSET_TOPOEXT. 8218031591dSSrihari Venkatesan */ 8228031591dSSrihari Venkatesan 8238031591dSSrihari Venkatesan cpi->cpi_coreid = cpu->cpu_id; 8247660e73fSHans Rosenfeld cpi->cpi_compunitid = cpu->cpu_id; 8258031591dSSrihari Venkatesan 8268031591dSSrihari Venkatesan if (cpi->cpi_xmaxeax >= 0x80000008) { 8278031591dSSrihari Venkatesan 8288031591dSSrihari Venkatesan coreidsz = BITX((cpi)->cpi_extd[8].cp_ecx, 15, 12); 8298031591dSSrihari Venkatesan 8308031591dSSrihari Venkatesan /* 8318031591dSSrihari Venkatesan * In AMD parlance chip is really a node while Solaris 8328031591dSSrihari Venkatesan * sees chip as equivalent to socket/package. 8338031591dSSrihari Venkatesan */ 8348031591dSSrihari Venkatesan cpi->cpi_ncore_per_chip = 8358031591dSSrihari Venkatesan BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1; 8361fbe4a4fSSrihari Venkatesan if (coreidsz == 0) { 8378031591dSSrihari Venkatesan /* Use legacy method */ 8381fbe4a4fSSrihari Venkatesan for (i = 1; i < cpi->cpi_ncore_per_chip; i <<= 1) 8391fbe4a4fSSrihari Venkatesan coreidsz++; 8401fbe4a4fSSrihari Venkatesan if (coreidsz == 0) 8411fbe4a4fSSrihari Venkatesan coreidsz = 1; 8421fbe4a4fSSrihari Venkatesan } 8438031591dSSrihari Venkatesan } else { 8448031591dSSrihari Venkatesan /* Assume single-core part */ 8451fbe4a4fSSrihari Venkatesan cpi->cpi_ncore_per_chip = 1; 84672b70389SJakub Jermar coreidsz = 1; 8478031591dSSrihari Venkatesan } 8488031591dSSrihari Venkatesan 8491fbe4a4fSSrihari Venkatesan cpi->cpi_clogid = cpi->cpi_pkgcoreid = 8501fbe4a4fSSrihari Venkatesan cpi->cpi_apicid & ((1<<coreidsz) - 1); 8518031591dSSrihari Venkatesan cpi->cpi_ncpu_per_chip = cpi->cpi_ncore_per_chip; 8528031591dSSrihari Venkatesan 8537660e73fSHans Rosenfeld /* Get node ID, compute unit ID */ 8547660e73fSHans Rosenfeld if (is_x86_feature(x86_featureset, X86FSET_TOPOEXT) && 8557660e73fSHans Rosenfeld cpi->cpi_xmaxeax >= 0x8000001e) { 8567660e73fSHans Rosenfeld cp = &cpi->cpi_extd[0x1e]; 8577660e73fSHans Rosenfeld cp->cp_eax = 0x8000001e; 8587660e73fSHans Rosenfeld (void) __cpuid_insn(cp); 8597660e73fSHans Rosenfeld 8607660e73fSHans Rosenfeld cpi->cpi_procnodes_per_pkg = BITX(cp->cp_ecx, 10, 8) + 1; 8617660e73fSHans Rosenfeld cpi->cpi_procnodeid = BITX(cp->cp_ecx, 7, 0); 8627660e73fSHans Rosenfeld cpi->cpi_cores_per_compunit = BITX(cp->cp_ebx, 15, 8) + 1; 8637660e73fSHans Rosenfeld cpi->cpi_compunitid = BITX(cp->cp_ebx, 7, 0) 8647660e73fSHans Rosenfeld + (cpi->cpi_ncore_per_chip / cpi->cpi_cores_per_compunit) 8657660e73fSHans Rosenfeld * (cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg); 8667660e73fSHans Rosenfeld } else if (cpi->cpi_family == 0xf || cpi->cpi_family >= 0x11) { 8671fbe4a4fSSrihari Venkatesan cpi->cpi_procnodeid = (cpi->cpi_apicid >> coreidsz) & 7; 8688031591dSSrihari Venkatesan } else if (cpi->cpi_family == 0x10) { 8698031591dSSrihari Venkatesan /* 8708031591dSSrihari Venkatesan * See if we are a multi-node processor. 8718031591dSSrihari Venkatesan * All processors in the system have the same number of nodes 8728031591dSSrihari Venkatesan */ 8738031591dSSrihari Venkatesan nb_caps_reg = pci_getl_func(0, 24, 3, 0xe8); 8748031591dSSrihari Venkatesan if ((cpi->cpi_model < 8) || BITX(nb_caps_reg, 29, 29) == 0) { 8758031591dSSrihari Venkatesan /* Single-node */ 8761fbe4a4fSSrihari Venkatesan cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 5, 8771fbe4a4fSSrihari Venkatesan coreidsz); 8788031591dSSrihari Venkatesan } else { 8798031591dSSrihari Venkatesan 8808031591dSSrihari Venkatesan /* 8818031591dSSrihari Venkatesan * Multi-node revision D (2 nodes per package 8828031591dSSrihari Venkatesan * are supported) 8838031591dSSrihari Venkatesan */ 8848031591dSSrihari Venkatesan cpi->cpi_procnodes_per_pkg = 2; 8858031591dSSrihari Venkatesan 8868031591dSSrihari Venkatesan first_half = (cpi->cpi_pkgcoreid <= 8878031591dSSrihari Venkatesan (cpi->cpi_ncore_per_chip/2 - 1)); 8888031591dSSrihari Venkatesan 8898031591dSSrihari Venkatesan if (cpi->cpi_apicid == cpi->cpi_pkgcoreid) { 8908031591dSSrihari Venkatesan /* We are BSP */ 8918031591dSSrihari Venkatesan cpi->cpi_procnodeid = (first_half ? 0 : 1); 8928031591dSSrihari Venkatesan } else { 8938031591dSSrihari Venkatesan 8948031591dSSrihari Venkatesan /* We are AP */ 8958031591dSSrihari Venkatesan /* NodeId[2:1] bits to use for reading F3xe8 */ 8968031591dSSrihari Venkatesan node2_1 = BITX(cpi->cpi_apicid, 5, 4) << 1; 8978031591dSSrihari Venkatesan 8988031591dSSrihari Venkatesan nb_caps_reg = 8998031591dSSrihari Venkatesan pci_getl_func(0, 24 + node2_1, 3, 0xe8); 9008031591dSSrihari Venkatesan 9018031591dSSrihari Venkatesan /* 9028031591dSSrihari Venkatesan * Check IntNodeNum bit (31:30, but bit 31 is 9038031591dSSrihari Venkatesan * always 0 on dual-node processors) 9048031591dSSrihari Venkatesan */ 9058031591dSSrihari Venkatesan if (BITX(nb_caps_reg, 30, 30) == 0) 9068031591dSSrihari Venkatesan cpi->cpi_procnodeid = node2_1 + 9078031591dSSrihari Venkatesan !first_half; 9088031591dSSrihari Venkatesan else 9098031591dSSrihari Venkatesan cpi->cpi_procnodeid = node2_1 + 9108031591dSSrihari Venkatesan first_half; 9118031591dSSrihari Venkatesan } 9128031591dSSrihari Venkatesan } 9138031591dSSrihari Venkatesan } else { 9148031591dSSrihari Venkatesan cpi->cpi_procnodeid = 0; 9158031591dSSrihari Venkatesan } 9167660e73fSHans Rosenfeld 9177660e73fSHans Rosenfeld cpi->cpi_chipid = 9187660e73fSHans Rosenfeld cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg; 9198031591dSSrihari Venkatesan } 9208031591dSSrihari Venkatesan 9217af88ac7SKuriakose Kuruvilla /* 9227af88ac7SKuriakose Kuruvilla * Setup XFeature_Enabled_Mask register. Required by xsave feature. 9237af88ac7SKuriakose Kuruvilla */ 9247af88ac7SKuriakose Kuruvilla void 9257af88ac7SKuriakose Kuruvilla setup_xfem(void) 9267af88ac7SKuriakose Kuruvilla { 9277af88ac7SKuriakose Kuruvilla uint64_t flags = XFEATURE_LEGACY_FP; 9287af88ac7SKuriakose Kuruvilla 9297af88ac7SKuriakose Kuruvilla ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE)); 9307af88ac7SKuriakose Kuruvilla 9317af88ac7SKuriakose Kuruvilla if (is_x86_feature(x86_featureset, X86FSET_SSE)) 9327af88ac7SKuriakose Kuruvilla flags |= XFEATURE_SSE; 9337af88ac7SKuriakose Kuruvilla 9347af88ac7SKuriakose Kuruvilla if (is_x86_feature(x86_featureset, X86FSET_AVX)) 9357af88ac7SKuriakose Kuruvilla flags |= XFEATURE_AVX; 9367af88ac7SKuriakose Kuruvilla 9377af88ac7SKuriakose Kuruvilla set_xcr(XFEATURE_ENABLED_MASK, flags); 9387af88ac7SKuriakose Kuruvilla 9397af88ac7SKuriakose Kuruvilla xsave_bv_all = flags; 9407af88ac7SKuriakose Kuruvilla } 9417af88ac7SKuriakose Kuruvilla 942dfea898aSKuriakose Kuruvilla void 943dfea898aSKuriakose Kuruvilla cpuid_pass1(cpu_t *cpu, uchar_t *featureset) 9447c478bd9Sstevel@tonic-gate { 9457c478bd9Sstevel@tonic-gate uint32_t mask_ecx, mask_edx; 9467c478bd9Sstevel@tonic-gate struct cpuid_info *cpi; 9478949bcd6Sandrei struct cpuid_regs *cp; 9487c478bd9Sstevel@tonic-gate int xcpuid; 949843e1988Sjohnlev #if !defined(__xpv) 9505b8a6efeSbholler extern int idle_cpu_prefer_mwait; 951843e1988Sjohnlev #endif 952ae115bc7Smrj 9537c478bd9Sstevel@tonic-gate /* 954a3114836SGerry Liu * Space statically allocated for BSP, ensure pointer is set 9557c478bd9Sstevel@tonic-gate */ 9567417cfdeSKuriakose Kuruvilla if (cpu->cpu_id == 0) { 9577417cfdeSKuriakose Kuruvilla if (cpu->cpu_m.mcpu_cpi == NULL) 958ae115bc7Smrj cpu->cpu_m.mcpu_cpi = &cpuid_info0; 9597417cfdeSKuriakose Kuruvilla } 9607417cfdeSKuriakose Kuruvilla 9617417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_CPUID); 9627417cfdeSKuriakose Kuruvilla 963ae115bc7Smrj cpi = cpu->cpu_m.mcpu_cpi; 964ae115bc7Smrj ASSERT(cpi != NULL); 9657c478bd9Sstevel@tonic-gate cp = &cpi->cpi_std[0]; 9668949bcd6Sandrei cp->cp_eax = 0; 9678949bcd6Sandrei cpi->cpi_maxeax = __cpuid_insn(cp); 9687c478bd9Sstevel@tonic-gate { 9697c478bd9Sstevel@tonic-gate uint32_t *iptr = (uint32_t *)cpi->cpi_vendorstr; 9707c478bd9Sstevel@tonic-gate *iptr++ = cp->cp_ebx; 9717c478bd9Sstevel@tonic-gate *iptr++ = cp->cp_edx; 9727c478bd9Sstevel@tonic-gate *iptr++ = cp->cp_ecx; 9737c478bd9Sstevel@tonic-gate *(char *)&cpi->cpi_vendorstr[12] = '\0'; 9747c478bd9Sstevel@tonic-gate } 9757c478bd9Sstevel@tonic-gate 976e4b86885SCheng Sean Ye cpi->cpi_vendor = _cpuid_vendorstr_to_vendorcode(cpi->cpi_vendorstr); 9777c478bd9Sstevel@tonic-gate x86_vendor = cpi->cpi_vendor; /* for compatibility */ 9787c478bd9Sstevel@tonic-gate 9797c478bd9Sstevel@tonic-gate /* 9807c478bd9Sstevel@tonic-gate * Limit the range in case of weird hardware 9817c478bd9Sstevel@tonic-gate */ 9827c478bd9Sstevel@tonic-gate if (cpi->cpi_maxeax > CPI_MAXEAX_MAX) 9837c478bd9Sstevel@tonic-gate cpi->cpi_maxeax = CPI_MAXEAX_MAX; 9847c478bd9Sstevel@tonic-gate if (cpi->cpi_maxeax < 1) 9857c478bd9Sstevel@tonic-gate goto pass1_done; 9867c478bd9Sstevel@tonic-gate 9877c478bd9Sstevel@tonic-gate cp = &cpi->cpi_std[1]; 9888949bcd6Sandrei cp->cp_eax = 1; 9898949bcd6Sandrei (void) __cpuid_insn(cp); 9907c478bd9Sstevel@tonic-gate 9917c478bd9Sstevel@tonic-gate /* 9927c478bd9Sstevel@tonic-gate * Extract identifying constants for easy access. 9937c478bd9Sstevel@tonic-gate */ 9947c478bd9Sstevel@tonic-gate cpi->cpi_model = CPI_MODEL(cpi); 9957c478bd9Sstevel@tonic-gate cpi->cpi_family = CPI_FAMILY(cpi); 9967c478bd9Sstevel@tonic-gate 9975ff02082Sdmick if (cpi->cpi_family == 0xf) 9987c478bd9Sstevel@tonic-gate cpi->cpi_family += CPI_FAMILY_XTD(cpi); 9995ff02082Sdmick 100068c91426Sdmick /* 1001875b116eSkchow * Beware: AMD uses "extended model" iff base *FAMILY* == 0xf. 100268c91426Sdmick * Intel, and presumably everyone else, uses model == 0xf, as 100368c91426Sdmick * one would expect (max value means possible overflow). Sigh. 100468c91426Sdmick */ 100568c91426Sdmick 100668c91426Sdmick switch (cpi->cpi_vendor) { 1007bf91205bSksadhukh case X86_VENDOR_Intel: 1008bf91205bSksadhukh if (IS_EXTENDED_MODEL_INTEL(cpi)) 1009bf91205bSksadhukh cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4; 1010447af253Sksadhukh break; 101168c91426Sdmick case X86_VENDOR_AMD: 1012875b116eSkchow if (CPI_FAMILY(cpi) == 0xf) 101368c91426Sdmick cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4; 101468c91426Sdmick break; 101568c91426Sdmick default: 10165ff02082Sdmick if (cpi->cpi_model == 0xf) 10177c478bd9Sstevel@tonic-gate cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4; 101868c91426Sdmick break; 101968c91426Sdmick } 10207c478bd9Sstevel@tonic-gate 10217c478bd9Sstevel@tonic-gate cpi->cpi_step = CPI_STEP(cpi); 10227c478bd9Sstevel@tonic-gate cpi->cpi_brandid = CPI_BRANDID(cpi); 10237c478bd9Sstevel@tonic-gate 10247c478bd9Sstevel@tonic-gate /* 10257c478bd9Sstevel@tonic-gate * *default* assumptions: 10267c478bd9Sstevel@tonic-gate * - believe %edx feature word 10277c478bd9Sstevel@tonic-gate * - ignore %ecx feature word 10287c478bd9Sstevel@tonic-gate * - 32-bit virtual and physical addressing 10297c478bd9Sstevel@tonic-gate */ 10307c478bd9Sstevel@tonic-gate mask_edx = 0xffffffff; 10317c478bd9Sstevel@tonic-gate mask_ecx = 0; 10327c478bd9Sstevel@tonic-gate 10337c478bd9Sstevel@tonic-gate cpi->cpi_pabits = cpi->cpi_vabits = 32; 10347c478bd9Sstevel@tonic-gate 10357c478bd9Sstevel@tonic-gate switch (cpi->cpi_vendor) { 10367c478bd9Sstevel@tonic-gate case X86_VENDOR_Intel: 10377c478bd9Sstevel@tonic-gate if (cpi->cpi_family == 5) 10387c478bd9Sstevel@tonic-gate x86_type = X86_TYPE_P5; 10395ff02082Sdmick else if (IS_LEGACY_P6(cpi)) { 10407c478bd9Sstevel@tonic-gate x86_type = X86_TYPE_P6; 10417c478bd9Sstevel@tonic-gate pentiumpro_bug4046376 = 1; 10427c478bd9Sstevel@tonic-gate /* 10437c478bd9Sstevel@tonic-gate * Clear the SEP bit when it was set erroneously 10447c478bd9Sstevel@tonic-gate */ 10457c478bd9Sstevel@tonic-gate if (cpi->cpi_model < 3 && cpi->cpi_step < 3) 10467c478bd9Sstevel@tonic-gate cp->cp_edx &= ~CPUID_INTC_EDX_SEP; 10475ff02082Sdmick } else if (IS_NEW_F6(cpi) || cpi->cpi_family == 0xf) { 10487c478bd9Sstevel@tonic-gate x86_type = X86_TYPE_P4; 10497c478bd9Sstevel@tonic-gate /* 10507c478bd9Sstevel@tonic-gate * We don't currently depend on any of the %ecx 10517c478bd9Sstevel@tonic-gate * features until Prescott, so we'll only check 10527c478bd9Sstevel@tonic-gate * this from P4 onwards. We might want to revisit 10537c478bd9Sstevel@tonic-gate * that idea later. 10547c478bd9Sstevel@tonic-gate */ 10557c478bd9Sstevel@tonic-gate mask_ecx = 0xffffffff; 10567c478bd9Sstevel@tonic-gate } else if (cpi->cpi_family > 0xf) 10577c478bd9Sstevel@tonic-gate mask_ecx = 0xffffffff; 10587c622d23Sbholler /* 10597c622d23Sbholler * We don't support MONITOR/MWAIT if leaf 5 is not available 10607c622d23Sbholler * to obtain the monitor linesize. 10617c622d23Sbholler */ 10627c622d23Sbholler if (cpi->cpi_maxeax < 5) 10637c622d23Sbholler mask_ecx &= ~CPUID_INTC_ECX_MON; 10647c478bd9Sstevel@tonic-gate break; 10657c478bd9Sstevel@tonic-gate case X86_VENDOR_IntelClone: 10667c478bd9Sstevel@tonic-gate default: 10677c478bd9Sstevel@tonic-gate break; 10687c478bd9Sstevel@tonic-gate case X86_VENDOR_AMD: 10697c478bd9Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_108) 10707c478bd9Sstevel@tonic-gate if (cpi->cpi_family == 0xf && cpi->cpi_model == 0xe) { 10717c478bd9Sstevel@tonic-gate cp->cp_eax = (0xf0f & cp->cp_eax) | 0xc0; 10727c478bd9Sstevel@tonic-gate cpi->cpi_model = 0xc; 10737c478bd9Sstevel@tonic-gate } else 10747c478bd9Sstevel@tonic-gate #endif 10757c478bd9Sstevel@tonic-gate if (cpi->cpi_family == 5) { 10767c478bd9Sstevel@tonic-gate /* 10777c478bd9Sstevel@tonic-gate * AMD K5 and K6 10787c478bd9Sstevel@tonic-gate * 10797c478bd9Sstevel@tonic-gate * These CPUs have an incomplete implementation 10807c478bd9Sstevel@tonic-gate * of MCA/MCE which we mask away. 10817c478bd9Sstevel@tonic-gate */ 10828949bcd6Sandrei mask_edx &= ~(CPUID_INTC_EDX_MCE | CPUID_INTC_EDX_MCA); 10838949bcd6Sandrei 10847c478bd9Sstevel@tonic-gate /* 10857c478bd9Sstevel@tonic-gate * Model 0 uses the wrong (APIC) bit 10867c478bd9Sstevel@tonic-gate * to indicate PGE. Fix it here. 10877c478bd9Sstevel@tonic-gate */ 10888949bcd6Sandrei if (cpi->cpi_model == 0) { 10897c478bd9Sstevel@tonic-gate if (cp->cp_edx & 0x200) { 10907c478bd9Sstevel@tonic-gate cp->cp_edx &= ~0x200; 10917c478bd9Sstevel@tonic-gate cp->cp_edx |= CPUID_INTC_EDX_PGE; 10927c478bd9Sstevel@tonic-gate } 10937c478bd9Sstevel@tonic-gate } 10948949bcd6Sandrei 10958949bcd6Sandrei /* 10968949bcd6Sandrei * Early models had problems w/ MMX; disable. 10978949bcd6Sandrei */ 10988949bcd6Sandrei if (cpi->cpi_model < 6) 10998949bcd6Sandrei mask_edx &= ~CPUID_INTC_EDX_MMX; 11008949bcd6Sandrei } 11018949bcd6Sandrei 11028949bcd6Sandrei /* 11038949bcd6Sandrei * For newer families, SSE3 and CX16, at least, are valid; 11048949bcd6Sandrei * enable all 11058949bcd6Sandrei */ 11068949bcd6Sandrei if (cpi->cpi_family >= 0xf) 11078949bcd6Sandrei mask_ecx = 0xffffffff; 11087c622d23Sbholler /* 11097c622d23Sbholler * We don't support MONITOR/MWAIT if leaf 5 is not available 11107c622d23Sbholler * to obtain the monitor linesize. 11117c622d23Sbholler */ 11127c622d23Sbholler if (cpi->cpi_maxeax < 5) 11137c622d23Sbholler mask_ecx &= ~CPUID_INTC_ECX_MON; 11145b8a6efeSbholler 1115843e1988Sjohnlev #if !defined(__xpv) 11165b8a6efeSbholler /* 11175b8a6efeSbholler * Do not use MONITOR/MWAIT to halt in the idle loop on any AMD 11185b8a6efeSbholler * processors. AMD does not intend MWAIT to be used in the cpu 11195b8a6efeSbholler * idle loop on current and future processors. 10h and future 11205b8a6efeSbholler * AMD processors use more power in MWAIT than HLT. 11215b8a6efeSbholler * Pre-family-10h Opterons do not have the MWAIT instruction. 11225b8a6efeSbholler */ 11235b8a6efeSbholler idle_cpu_prefer_mwait = 0; 1124843e1988Sjohnlev #endif 11255b8a6efeSbholler 11267c478bd9Sstevel@tonic-gate break; 11277c478bd9Sstevel@tonic-gate case X86_VENDOR_TM: 11287c478bd9Sstevel@tonic-gate /* 11297c478bd9Sstevel@tonic-gate * workaround the NT workaround in CMS 4.1 11307c478bd9Sstevel@tonic-gate */ 11317c478bd9Sstevel@tonic-gate if (cpi->cpi_family == 5 && cpi->cpi_model == 4 && 11327c478bd9Sstevel@tonic-gate (cpi->cpi_step == 2 || cpi->cpi_step == 3)) 11337c478bd9Sstevel@tonic-gate cp->cp_edx |= CPUID_INTC_EDX_CX8; 11347c478bd9Sstevel@tonic-gate break; 11357c478bd9Sstevel@tonic-gate case X86_VENDOR_Centaur: 11367c478bd9Sstevel@tonic-gate /* 11377c478bd9Sstevel@tonic-gate * workaround the NT workarounds again 11387c478bd9Sstevel@tonic-gate */ 11397c478bd9Sstevel@tonic-gate if (cpi->cpi_family == 6) 11407c478bd9Sstevel@tonic-gate cp->cp_edx |= CPUID_INTC_EDX_CX8; 11417c478bd9Sstevel@tonic-gate break; 11427c478bd9Sstevel@tonic-gate case X86_VENDOR_Cyrix: 11437c478bd9Sstevel@tonic-gate /* 11447c478bd9Sstevel@tonic-gate * We rely heavily on the probing in locore 11457c478bd9Sstevel@tonic-gate * to actually figure out what parts, if any, 11467c478bd9Sstevel@tonic-gate * of the Cyrix cpuid instruction to believe. 11477c478bd9Sstevel@tonic-gate */ 11487c478bd9Sstevel@tonic-gate switch (x86_type) { 11497c478bd9Sstevel@tonic-gate case X86_TYPE_CYRIX_486: 11507c478bd9Sstevel@tonic-gate mask_edx = 0; 11517c478bd9Sstevel@tonic-gate break; 11527c478bd9Sstevel@tonic-gate case X86_TYPE_CYRIX_6x86: 11537c478bd9Sstevel@tonic-gate mask_edx = 0; 11547c478bd9Sstevel@tonic-gate break; 11557c478bd9Sstevel@tonic-gate case X86_TYPE_CYRIX_6x86L: 11567c478bd9Sstevel@tonic-gate mask_edx = 11577c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_DE | 11587c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_CX8; 11597c478bd9Sstevel@tonic-gate break; 11607c478bd9Sstevel@tonic-gate case X86_TYPE_CYRIX_6x86MX: 11617c478bd9Sstevel@tonic-gate mask_edx = 11627c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_DE | 11637c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_MSR | 11647c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_CX8 | 11657c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_PGE | 11667c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_CMOV | 11677c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_MMX; 11687c478bd9Sstevel@tonic-gate break; 11697c478bd9Sstevel@tonic-gate case X86_TYPE_CYRIX_GXm: 11707c478bd9Sstevel@tonic-gate mask_edx = 11717c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_MSR | 11727c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_CX8 | 11737c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_CMOV | 11747c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_MMX; 11757c478bd9Sstevel@tonic-gate break; 11767c478bd9Sstevel@tonic-gate case X86_TYPE_CYRIX_MediaGX: 11777c478bd9Sstevel@tonic-gate break; 11787c478bd9Sstevel@tonic-gate case X86_TYPE_CYRIX_MII: 11797c478bd9Sstevel@tonic-gate case X86_TYPE_VIA_CYRIX_III: 11807c478bd9Sstevel@tonic-gate mask_edx = 11817c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_DE | 11827c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_TSC | 11837c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_MSR | 11847c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_CX8 | 11857c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_PGE | 11867c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_CMOV | 11877c478bd9Sstevel@tonic-gate CPUID_INTC_EDX_MMX; 11887c478bd9Sstevel@tonic-gate break; 11897c478bd9Sstevel@tonic-gate default: 11907c478bd9Sstevel@tonic-gate break; 11917c478bd9Sstevel@tonic-gate } 11927c478bd9Sstevel@tonic-gate break; 11937c478bd9Sstevel@tonic-gate } 11947c478bd9Sstevel@tonic-gate 1195843e1988Sjohnlev #if defined(__xpv) 1196843e1988Sjohnlev /* 1197843e1988Sjohnlev * Do not support MONITOR/MWAIT under a hypervisor 1198843e1988Sjohnlev */ 1199843e1988Sjohnlev mask_ecx &= ~CPUID_INTC_ECX_MON; 12007af88ac7SKuriakose Kuruvilla /* 12017af88ac7SKuriakose Kuruvilla * Do not support XSAVE under a hypervisor for now 12027af88ac7SKuriakose Kuruvilla */ 12037af88ac7SKuriakose Kuruvilla xsave_force_disable = B_TRUE; 12047af88ac7SKuriakose Kuruvilla 1205843e1988Sjohnlev #endif /* __xpv */ 1206843e1988Sjohnlev 12077af88ac7SKuriakose Kuruvilla if (xsave_force_disable) { 12087af88ac7SKuriakose Kuruvilla mask_ecx &= ~CPUID_INTC_ECX_XSAVE; 12097af88ac7SKuriakose Kuruvilla mask_ecx &= ~CPUID_INTC_ECX_AVX; 1210ebb8ac07SRobert Mustacchi mask_ecx &= ~CPUID_INTC_ECX_F16C; 1211245ac945SRobert Mustacchi mask_ecx &= ~CPUID_INTC_ECX_FMA; 12127af88ac7SKuriakose Kuruvilla } 12137af88ac7SKuriakose Kuruvilla 12147c478bd9Sstevel@tonic-gate /* 12157c478bd9Sstevel@tonic-gate * Now we've figured out the masks that determine 12167c478bd9Sstevel@tonic-gate * which bits we choose to believe, apply the masks 12177c478bd9Sstevel@tonic-gate * to the feature words, then map the kernel's view 12187c478bd9Sstevel@tonic-gate * of these feature words into its feature word. 12197c478bd9Sstevel@tonic-gate */ 12207c478bd9Sstevel@tonic-gate cp->cp_edx &= mask_edx; 12217c478bd9Sstevel@tonic-gate cp->cp_ecx &= mask_ecx; 12227c478bd9Sstevel@tonic-gate 12237c478bd9Sstevel@tonic-gate /* 1224ae115bc7Smrj * apply any platform restrictions (we don't call this 1225ae115bc7Smrj * immediately after __cpuid_insn here, because we need the 1226ae115bc7Smrj * workarounds applied above first) 12277c478bd9Sstevel@tonic-gate */ 1228ae115bc7Smrj platform_cpuid_mangle(cpi->cpi_vendor, 1, cp); 12297c478bd9Sstevel@tonic-gate 1230ae115bc7Smrj /* 1231245ac945SRobert Mustacchi * In addition to ecx and edx, Intel is storing a bunch of instruction 1232245ac945SRobert Mustacchi * set extensions in leaf 7's ebx. 1233245ac945SRobert Mustacchi */ 1234245ac945SRobert Mustacchi if (cpi->cpi_vendor == X86_VENDOR_Intel && cpi->cpi_maxeax >= 7) { 1235245ac945SRobert Mustacchi struct cpuid_regs *ecp; 1236245ac945SRobert Mustacchi ecp = &cpi->cpi_std[7]; 1237245ac945SRobert Mustacchi ecp->cp_eax = 7; 1238245ac945SRobert Mustacchi ecp->cp_ecx = 0; 1239245ac945SRobert Mustacchi (void) __cpuid_insn(ecp); 1240245ac945SRobert Mustacchi /* 1241245ac945SRobert Mustacchi * If XSAVE has been disabled, just ignore all of the AVX 1242245ac945SRobert Mustacchi * dependent flags here. 1243245ac945SRobert Mustacchi */ 1244245ac945SRobert Mustacchi if (xsave_force_disable) { 1245245ac945SRobert Mustacchi ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_BMI1; 1246245ac945SRobert Mustacchi ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_BMI2; 1247245ac945SRobert Mustacchi ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_AVX2; 1248245ac945SRobert Mustacchi } 1249*799823bbSRobert Mustacchi 1250*799823bbSRobert Mustacchi if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_SMEP) 1251*799823bbSRobert Mustacchi add_x86_feature(featureset, X86FSET_SMEP); 1252245ac945SRobert Mustacchi } 1253245ac945SRobert Mustacchi 1254245ac945SRobert Mustacchi /* 1255ae115bc7Smrj * fold in overrides from the "eeprom" mechanism 1256ae115bc7Smrj */ 12577c478bd9Sstevel@tonic-gate cp->cp_edx |= cpuid_feature_edx_include; 12587c478bd9Sstevel@tonic-gate cp->cp_edx &= ~cpuid_feature_edx_exclude; 12597c478bd9Sstevel@tonic-gate 12607c478bd9Sstevel@tonic-gate cp->cp_ecx |= cpuid_feature_ecx_include; 12617c478bd9Sstevel@tonic-gate cp->cp_ecx &= ~cpuid_feature_ecx_exclude; 12627c478bd9Sstevel@tonic-gate 12637417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_INTC_EDX_PSE) { 12647417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_LARGEPAGE); 12657417cfdeSKuriakose Kuruvilla } 12667417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_INTC_EDX_TSC) { 12677417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_TSC); 12687417cfdeSKuriakose Kuruvilla } 12697417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_INTC_EDX_MSR) { 12707417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_MSR); 12717417cfdeSKuriakose Kuruvilla } 12727417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_INTC_EDX_MTRR) { 12737417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_MTRR); 12747417cfdeSKuriakose Kuruvilla } 12757417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_INTC_EDX_PGE) { 12767417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_PGE); 12777417cfdeSKuriakose Kuruvilla } 12787417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_INTC_EDX_CMOV) { 12797417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_CMOV); 12807417cfdeSKuriakose Kuruvilla } 12817417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_INTC_EDX_MMX) { 12827417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_MMX); 12837417cfdeSKuriakose Kuruvilla } 12847c478bd9Sstevel@tonic-gate if ((cp->cp_edx & CPUID_INTC_EDX_MCE) != 0 && 12857417cfdeSKuriakose Kuruvilla (cp->cp_edx & CPUID_INTC_EDX_MCA) != 0) { 12867417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_MCA); 12877417cfdeSKuriakose Kuruvilla } 12887417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_INTC_EDX_PAE) { 12897417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_PAE); 12907417cfdeSKuriakose Kuruvilla } 12917417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_INTC_EDX_CX8) { 12927417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_CX8); 12937417cfdeSKuriakose Kuruvilla } 12947417cfdeSKuriakose Kuruvilla if (cp->cp_ecx & CPUID_INTC_ECX_CX16) { 12957417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_CX16); 12967417cfdeSKuriakose Kuruvilla } 12977417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_INTC_EDX_PAT) { 12987417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_PAT); 12997417cfdeSKuriakose Kuruvilla } 13007417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_INTC_EDX_SEP) { 13017417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_SEP); 13027417cfdeSKuriakose Kuruvilla } 13037c478bd9Sstevel@tonic-gate if (cp->cp_edx & CPUID_INTC_EDX_FXSR) { 13047c478bd9Sstevel@tonic-gate /* 13057c478bd9Sstevel@tonic-gate * In our implementation, fxsave/fxrstor 13067c478bd9Sstevel@tonic-gate * are prerequisites before we'll even 13077c478bd9Sstevel@tonic-gate * try and do SSE things. 13087c478bd9Sstevel@tonic-gate */ 13097417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_INTC_EDX_SSE) { 13107417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_SSE); 13117417cfdeSKuriakose Kuruvilla } 13127417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_INTC_EDX_SSE2) { 13137417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_SSE2); 13147417cfdeSKuriakose Kuruvilla } 13157417cfdeSKuriakose Kuruvilla if (cp->cp_ecx & CPUID_INTC_ECX_SSE3) { 13167417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_SSE3); 13177417cfdeSKuriakose Kuruvilla } 13187417cfdeSKuriakose Kuruvilla if (cp->cp_ecx & CPUID_INTC_ECX_SSSE3) { 13197417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_SSSE3); 13207417cfdeSKuriakose Kuruvilla } 13217417cfdeSKuriakose Kuruvilla if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_1) { 13227417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_SSE4_1); 13237417cfdeSKuriakose Kuruvilla } 13247417cfdeSKuriakose Kuruvilla if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_2) { 13257417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_SSE4_2); 13267417cfdeSKuriakose Kuruvilla } 13277417cfdeSKuriakose Kuruvilla if (cp->cp_ecx & CPUID_INTC_ECX_AES) { 13287417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_AES); 13297417cfdeSKuriakose Kuruvilla } 13307417cfdeSKuriakose Kuruvilla if (cp->cp_ecx & CPUID_INTC_ECX_PCLMULQDQ) { 13317417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_PCLMULQDQ); 1332d0f8ff6eSkk208521 } 13337af88ac7SKuriakose Kuruvilla 13347af88ac7SKuriakose Kuruvilla if (cp->cp_ecx & CPUID_INTC_ECX_XSAVE) { 13357af88ac7SKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_XSAVE); 1336ebb8ac07SRobert Mustacchi 13377af88ac7SKuriakose Kuruvilla /* We only test AVX when there is XSAVE */ 13387af88ac7SKuriakose Kuruvilla if (cp->cp_ecx & CPUID_INTC_ECX_AVX) { 13397af88ac7SKuriakose Kuruvilla add_x86_feature(featureset, 13407af88ac7SKuriakose Kuruvilla X86FSET_AVX); 1341ebb8ac07SRobert Mustacchi 1342245ac945SRobert Mustacchi /* 1343245ac945SRobert Mustacchi * Intel says we can't check these without also 1344245ac945SRobert Mustacchi * checking AVX. 1345245ac945SRobert Mustacchi */ 1346ebb8ac07SRobert Mustacchi if (cp->cp_ecx & CPUID_INTC_ECX_F16C) 1347ebb8ac07SRobert Mustacchi add_x86_feature(featureset, 1348ebb8ac07SRobert Mustacchi X86FSET_F16C); 1349245ac945SRobert Mustacchi 1350245ac945SRobert Mustacchi if (cp->cp_ecx & CPUID_INTC_ECX_FMA) 1351245ac945SRobert Mustacchi add_x86_feature(featureset, 1352245ac945SRobert Mustacchi X86FSET_FMA); 1353245ac945SRobert Mustacchi 1354245ac945SRobert Mustacchi if (cpi->cpi_std[7].cp_ebx & 1355245ac945SRobert Mustacchi CPUID_INTC_EBX_7_0_BMI1) 1356245ac945SRobert Mustacchi add_x86_feature(featureset, 1357245ac945SRobert Mustacchi X86FSET_BMI1); 1358245ac945SRobert Mustacchi 1359245ac945SRobert Mustacchi if (cpi->cpi_std[7].cp_ebx & 1360245ac945SRobert Mustacchi CPUID_INTC_EBX_7_0_BMI2) 1361245ac945SRobert Mustacchi add_x86_feature(featureset, 1362245ac945SRobert Mustacchi X86FSET_BMI2); 1363245ac945SRobert Mustacchi 1364245ac945SRobert Mustacchi if (cpi->cpi_std[7].cp_ebx & 1365245ac945SRobert Mustacchi CPUID_INTC_EBX_7_0_AVX2) 1366245ac945SRobert Mustacchi add_x86_feature(featureset, 1367245ac945SRobert Mustacchi X86FSET_AVX2); 13687af88ac7SKuriakose Kuruvilla } 13697af88ac7SKuriakose Kuruvilla } 13707c478bd9Sstevel@tonic-gate } 13716eedf6a5SJosef 'Jeff' Sipek if (cp->cp_ecx & CPUID_INTC_ECX_X2APIC) { 13726eedf6a5SJosef 'Jeff' Sipek add_x86_feature(featureset, X86FSET_X2APIC); 13736eedf6a5SJosef 'Jeff' Sipek } 13747417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_INTC_EDX_DE) { 13757417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_DE); 13767417cfdeSKuriakose Kuruvilla } 13771d1a3942SBill Holler #if !defined(__xpv) 1378f98fbcecSbholler if (cp->cp_ecx & CPUID_INTC_ECX_MON) { 13791d1a3942SBill Holler 13801d1a3942SBill Holler /* 13811d1a3942SBill Holler * We require the CLFLUSH instruction for erratum workaround 13821d1a3942SBill Holler * to use MONITOR/MWAIT. 13831d1a3942SBill Holler */ 13841d1a3942SBill Holler if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) { 1385f98fbcecSbholler cpi->cpi_mwait.support |= MWAIT_SUPPORT; 13867417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_MWAIT); 13871d1a3942SBill Holler } else { 13881d1a3942SBill Holler extern int idle_cpu_assert_cflush_monitor; 13891d1a3942SBill Holler 13901d1a3942SBill Holler /* 13911d1a3942SBill Holler * All processors we are aware of which have 13921d1a3942SBill Holler * MONITOR/MWAIT also have CLFLUSH. 13931d1a3942SBill Holler */ 13941d1a3942SBill Holler if (idle_cpu_assert_cflush_monitor) { 13951d1a3942SBill Holler ASSERT((cp->cp_ecx & CPUID_INTC_ECX_MON) && 13961d1a3942SBill Holler (cp->cp_edx & CPUID_INTC_EDX_CLFSH)); 1397f98fbcecSbholler } 13981d1a3942SBill Holler } 13991d1a3942SBill Holler } 14001d1a3942SBill Holler #endif /* __xpv */ 14017c478bd9Sstevel@tonic-gate 1402faa20166SBryan Cantrill if (cp->cp_ecx & CPUID_INTC_ECX_VMX) { 1403faa20166SBryan Cantrill add_x86_feature(featureset, X86FSET_VMX); 1404faa20166SBryan Cantrill } 1405faa20166SBryan Cantrill 1406ebb8ac07SRobert Mustacchi if (cp->cp_ecx & CPUID_INTC_ECX_RDRAND) 1407ebb8ac07SRobert Mustacchi add_x86_feature(featureset, X86FSET_RDRAND); 1408ebb8ac07SRobert Mustacchi 140986c1f4dcSVikram Hegde /* 1410faa20166SBryan Cantrill * Only need it first time, rest of the cpus would follow suit. 141186c1f4dcSVikram Hegde * we only capture this for the bootcpu. 141286c1f4dcSVikram Hegde */ 141386c1f4dcSVikram Hegde if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) { 14147417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_CLFSH); 141586c1f4dcSVikram Hegde x86_clflush_size = (BITX(cp->cp_ebx, 15, 8) * 8); 141686c1f4dcSVikram Hegde } 14177417cfdeSKuriakose Kuruvilla if (is_x86_feature(featureset, X86FSET_PAE)) 14187c478bd9Sstevel@tonic-gate cpi->cpi_pabits = 36; 14197c478bd9Sstevel@tonic-gate 14207c478bd9Sstevel@tonic-gate /* 14217c478bd9Sstevel@tonic-gate * Hyperthreading configuration is slightly tricky on Intel 14227c478bd9Sstevel@tonic-gate * and pure clones, and even trickier on AMD. 14237c478bd9Sstevel@tonic-gate * 14247c478bd9Sstevel@tonic-gate * (AMD chose to set the HTT bit on their CMP processors, 14257c478bd9Sstevel@tonic-gate * even though they're not actually hyperthreaded. Thus it 14267c478bd9Sstevel@tonic-gate * takes a bit more work to figure out what's really going 1427ae115bc7Smrj * on ... see the handling of the CMP_LGCY bit below) 14287c478bd9Sstevel@tonic-gate */ 14297c478bd9Sstevel@tonic-gate if (cp->cp_edx & CPUID_INTC_EDX_HTT) { 14307c478bd9Sstevel@tonic-gate cpi->cpi_ncpu_per_chip = CPI_CPU_COUNT(cpi); 14317c478bd9Sstevel@tonic-gate if (cpi->cpi_ncpu_per_chip > 1) 14327417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_HTT); 14338949bcd6Sandrei } else { 14348949bcd6Sandrei cpi->cpi_ncpu_per_chip = 1; 14357c478bd9Sstevel@tonic-gate } 14367c478bd9Sstevel@tonic-gate 14377c478bd9Sstevel@tonic-gate /* 14387c478bd9Sstevel@tonic-gate * Work on the "extended" feature information, doing 14397c478bd9Sstevel@tonic-gate * some basic initialization for cpuid_pass2() 14407c478bd9Sstevel@tonic-gate */ 14417c478bd9Sstevel@tonic-gate xcpuid = 0; 14427c478bd9Sstevel@tonic-gate switch (cpi->cpi_vendor) { 14437c478bd9Sstevel@tonic-gate case X86_VENDOR_Intel: 14445ff02082Sdmick if (IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf) 14457c478bd9Sstevel@tonic-gate xcpuid++; 14467c478bd9Sstevel@tonic-gate break; 14477c478bd9Sstevel@tonic-gate case X86_VENDOR_AMD: 14487c478bd9Sstevel@tonic-gate if (cpi->cpi_family > 5 || 14497c478bd9Sstevel@tonic-gate (cpi->cpi_family == 5 && cpi->cpi_model >= 1)) 14507c478bd9Sstevel@tonic-gate xcpuid++; 14517c478bd9Sstevel@tonic-gate break; 14527c478bd9Sstevel@tonic-gate case X86_VENDOR_Cyrix: 14537c478bd9Sstevel@tonic-gate /* 14547c478bd9Sstevel@tonic-gate * Only these Cyrix CPUs are -known- to support 14557c478bd9Sstevel@tonic-gate * extended cpuid operations. 14567c478bd9Sstevel@tonic-gate */ 14577c478bd9Sstevel@tonic-gate if (x86_type == X86_TYPE_VIA_CYRIX_III || 14587c478bd9Sstevel@tonic-gate x86_type == X86_TYPE_CYRIX_GXm) 14597c478bd9Sstevel@tonic-gate xcpuid++; 14607c478bd9Sstevel@tonic-gate break; 14617c478bd9Sstevel@tonic-gate case X86_VENDOR_Centaur: 14627c478bd9Sstevel@tonic-gate case X86_VENDOR_TM: 14637c478bd9Sstevel@tonic-gate default: 14647c478bd9Sstevel@tonic-gate xcpuid++; 14657c478bd9Sstevel@tonic-gate break; 14667c478bd9Sstevel@tonic-gate } 14677c478bd9Sstevel@tonic-gate 14687c478bd9Sstevel@tonic-gate if (xcpuid) { 14697c478bd9Sstevel@tonic-gate cp = &cpi->cpi_extd[0]; 14708949bcd6Sandrei cp->cp_eax = 0x80000000; 14718949bcd6Sandrei cpi->cpi_xmaxeax = __cpuid_insn(cp); 14727c478bd9Sstevel@tonic-gate } 14737c478bd9Sstevel@tonic-gate 14747c478bd9Sstevel@tonic-gate if (cpi->cpi_xmaxeax & 0x80000000) { 14757c478bd9Sstevel@tonic-gate 14767c478bd9Sstevel@tonic-gate if (cpi->cpi_xmaxeax > CPI_XMAXEAX_MAX) 14777c478bd9Sstevel@tonic-gate cpi->cpi_xmaxeax = CPI_XMAXEAX_MAX; 14787c478bd9Sstevel@tonic-gate 14797c478bd9Sstevel@tonic-gate switch (cpi->cpi_vendor) { 14807c478bd9Sstevel@tonic-gate case X86_VENDOR_Intel: 14817c478bd9Sstevel@tonic-gate case X86_VENDOR_AMD: 14827c478bd9Sstevel@tonic-gate if (cpi->cpi_xmaxeax < 0x80000001) 14837c478bd9Sstevel@tonic-gate break; 14847c478bd9Sstevel@tonic-gate cp = &cpi->cpi_extd[1]; 14858949bcd6Sandrei cp->cp_eax = 0x80000001; 14868949bcd6Sandrei (void) __cpuid_insn(cp); 1487ae115bc7Smrj 14887c478bd9Sstevel@tonic-gate if (cpi->cpi_vendor == X86_VENDOR_AMD && 14897c478bd9Sstevel@tonic-gate cpi->cpi_family == 5 && 14907c478bd9Sstevel@tonic-gate cpi->cpi_model == 6 && 14917c478bd9Sstevel@tonic-gate cpi->cpi_step == 6) { 14927c478bd9Sstevel@tonic-gate /* 14937c478bd9Sstevel@tonic-gate * K6 model 6 uses bit 10 to indicate SYSC 14947c478bd9Sstevel@tonic-gate * Later models use bit 11. Fix it here. 14957c478bd9Sstevel@tonic-gate */ 14967c478bd9Sstevel@tonic-gate if (cp->cp_edx & 0x400) { 14977c478bd9Sstevel@tonic-gate cp->cp_edx &= ~0x400; 14987c478bd9Sstevel@tonic-gate cp->cp_edx |= CPUID_AMD_EDX_SYSC; 14997c478bd9Sstevel@tonic-gate } 15007c478bd9Sstevel@tonic-gate } 15017c478bd9Sstevel@tonic-gate 1502ae115bc7Smrj platform_cpuid_mangle(cpi->cpi_vendor, 0x80000001, cp); 1503ae115bc7Smrj 15047c478bd9Sstevel@tonic-gate /* 15057c478bd9Sstevel@tonic-gate * Compute the additions to the kernel's feature word. 15067c478bd9Sstevel@tonic-gate */ 15077417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_AMD_EDX_NX) { 15087417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_NX); 15097417cfdeSKuriakose Kuruvilla } 15107c478bd9Sstevel@tonic-gate 151119397407SSherry Moore /* 151219397407SSherry Moore * Regardless whether or not we boot 64-bit, 151319397407SSherry Moore * we should have a way to identify whether 151419397407SSherry Moore * the CPU is capable of running 64-bit. 151519397407SSherry Moore */ 15167417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_AMD_EDX_LM) { 15177417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_64); 15187417cfdeSKuriakose Kuruvilla } 151919397407SSherry Moore 152002bc52beSkchow #if defined(__amd64) 152102bc52beSkchow /* 1 GB large page - enable only for 64 bit kernel */ 15227417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_AMD_EDX_1GPG) { 15237417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_1GPG); 15247417cfdeSKuriakose Kuruvilla } 152502bc52beSkchow #endif 152602bc52beSkchow 1527f8801251Skk208521 if ((cpi->cpi_vendor == X86_VENDOR_AMD) && 1528f8801251Skk208521 (cpi->cpi_std[1].cp_edx & CPUID_INTC_EDX_FXSR) && 15297417cfdeSKuriakose Kuruvilla (cp->cp_ecx & CPUID_AMD_ECX_SSE4A)) { 15307417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_SSE4A); 15317417cfdeSKuriakose Kuruvilla } 1532f8801251Skk208521 15337c478bd9Sstevel@tonic-gate /* 1534ae115bc7Smrj * If both the HTT and CMP_LGCY bits are set, 15358949bcd6Sandrei * then we're not actually HyperThreaded. Read 15368949bcd6Sandrei * "AMD CPUID Specification" for more details. 15377c478bd9Sstevel@tonic-gate */ 15387c478bd9Sstevel@tonic-gate if (cpi->cpi_vendor == X86_VENDOR_AMD && 15397417cfdeSKuriakose Kuruvilla is_x86_feature(featureset, X86FSET_HTT) && 1540ae115bc7Smrj (cp->cp_ecx & CPUID_AMD_ECX_CMP_LGCY)) { 15417417cfdeSKuriakose Kuruvilla remove_x86_feature(featureset, X86FSET_HTT); 15427417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_CMP); 15438949bcd6Sandrei } 1544ae115bc7Smrj #if defined(__amd64) 15457c478bd9Sstevel@tonic-gate /* 15467c478bd9Sstevel@tonic-gate * It's really tricky to support syscall/sysret in 15477c478bd9Sstevel@tonic-gate * the i386 kernel; we rely on sysenter/sysexit 15487c478bd9Sstevel@tonic-gate * instead. In the amd64 kernel, things are -way- 15497c478bd9Sstevel@tonic-gate * better. 15507c478bd9Sstevel@tonic-gate */ 15517417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_AMD_EDX_SYSC) { 15527417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_ASYSC); 15537417cfdeSKuriakose Kuruvilla } 15547c478bd9Sstevel@tonic-gate 15557c478bd9Sstevel@tonic-gate /* 15567c478bd9Sstevel@tonic-gate * While we're thinking about system calls, note 15577c478bd9Sstevel@tonic-gate * that AMD processors don't support sysenter 15587c478bd9Sstevel@tonic-gate * in long mode at all, so don't try to program them. 15597c478bd9Sstevel@tonic-gate */ 15607417cfdeSKuriakose Kuruvilla if (x86_vendor == X86_VENDOR_AMD) { 15617417cfdeSKuriakose Kuruvilla remove_x86_feature(featureset, X86FSET_SEP); 15627417cfdeSKuriakose Kuruvilla } 15637c478bd9Sstevel@tonic-gate #endif 15647417cfdeSKuriakose Kuruvilla if (cp->cp_edx & CPUID_AMD_EDX_TSCP) { 15657417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_TSCP); 15667417cfdeSKuriakose Kuruvilla } 1567faa20166SBryan Cantrill 1568faa20166SBryan Cantrill if (cp->cp_ecx & CPUID_AMD_ECX_SVM) { 1569faa20166SBryan Cantrill add_x86_feature(featureset, X86FSET_SVM); 1570faa20166SBryan Cantrill } 15717660e73fSHans Rosenfeld 15727660e73fSHans Rosenfeld if (cp->cp_ecx & CPUID_AMD_ECX_TOPOEXT) { 15737660e73fSHans Rosenfeld add_x86_feature(featureset, X86FSET_TOPOEXT); 15747660e73fSHans Rosenfeld } 15757c478bd9Sstevel@tonic-gate break; 15767c478bd9Sstevel@tonic-gate default: 15777c478bd9Sstevel@tonic-gate break; 15787c478bd9Sstevel@tonic-gate } 15797c478bd9Sstevel@tonic-gate 15808949bcd6Sandrei /* 15818949bcd6Sandrei * Get CPUID data about processor cores and hyperthreads. 15828949bcd6Sandrei */ 15837c478bd9Sstevel@tonic-gate switch (cpi->cpi_vendor) { 15847c478bd9Sstevel@tonic-gate case X86_VENDOR_Intel: 15858949bcd6Sandrei if (cpi->cpi_maxeax >= 4) { 15868949bcd6Sandrei cp = &cpi->cpi_std[4]; 15878949bcd6Sandrei cp->cp_eax = 4; 15888949bcd6Sandrei cp->cp_ecx = 0; 15898949bcd6Sandrei (void) __cpuid_insn(cp); 1590ae115bc7Smrj platform_cpuid_mangle(cpi->cpi_vendor, 4, cp); 15918949bcd6Sandrei } 15928949bcd6Sandrei /*FALLTHROUGH*/ 15937c478bd9Sstevel@tonic-gate case X86_VENDOR_AMD: 15947c478bd9Sstevel@tonic-gate if (cpi->cpi_xmaxeax < 0x80000008) 15957c478bd9Sstevel@tonic-gate break; 15967c478bd9Sstevel@tonic-gate cp = &cpi->cpi_extd[8]; 15978949bcd6Sandrei cp->cp_eax = 0x80000008; 15988949bcd6Sandrei (void) __cpuid_insn(cp); 1599ae115bc7Smrj platform_cpuid_mangle(cpi->cpi_vendor, 0x80000008, cp); 1600ae115bc7Smrj 16017c478bd9Sstevel@tonic-gate /* 16027c478bd9Sstevel@tonic-gate * Virtual and physical address limits from 16037c478bd9Sstevel@tonic-gate * cpuid override previously guessed values. 16047c478bd9Sstevel@tonic-gate */ 16057c478bd9Sstevel@tonic-gate cpi->cpi_pabits = BITX(cp->cp_eax, 7, 0); 16067c478bd9Sstevel@tonic-gate cpi->cpi_vabits = BITX(cp->cp_eax, 15, 8); 16077c478bd9Sstevel@tonic-gate break; 16087c478bd9Sstevel@tonic-gate default: 16097c478bd9Sstevel@tonic-gate break; 16107c478bd9Sstevel@tonic-gate } 16118949bcd6Sandrei 1612d129bde2Sesaxe /* 1613d129bde2Sesaxe * Derive the number of cores per chip 1614d129bde2Sesaxe */ 16158949bcd6Sandrei switch (cpi->cpi_vendor) { 16168949bcd6Sandrei case X86_VENDOR_Intel: 16178949bcd6Sandrei if (cpi->cpi_maxeax < 4) { 16188949bcd6Sandrei cpi->cpi_ncore_per_chip = 1; 16198949bcd6Sandrei break; 16208949bcd6Sandrei } else { 16218949bcd6Sandrei cpi->cpi_ncore_per_chip = 16228949bcd6Sandrei BITX((cpi)->cpi_std[4].cp_eax, 31, 26) + 1; 16238949bcd6Sandrei } 16248949bcd6Sandrei break; 16258949bcd6Sandrei case X86_VENDOR_AMD: 16268949bcd6Sandrei if (cpi->cpi_xmaxeax < 0x80000008) { 16278949bcd6Sandrei cpi->cpi_ncore_per_chip = 1; 16288949bcd6Sandrei break; 16298949bcd6Sandrei } else { 163010569901Sgavinm /* 163110569901Sgavinm * On family 0xf cpuid fn 2 ECX[7:0] "NC" is 163210569901Sgavinm * 1 less than the number of physical cores on 163310569901Sgavinm * the chip. In family 0x10 this value can 163410569901Sgavinm * be affected by "downcoring" - it reflects 163510569901Sgavinm * 1 less than the number of cores actually 163610569901Sgavinm * enabled on this node. 163710569901Sgavinm */ 16388949bcd6Sandrei cpi->cpi_ncore_per_chip = 16398949bcd6Sandrei BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1; 16408949bcd6Sandrei } 16418949bcd6Sandrei break; 16428949bcd6Sandrei default: 16438949bcd6Sandrei cpi->cpi_ncore_per_chip = 1; 16448949bcd6Sandrei break; 16457c478bd9Sstevel@tonic-gate } 16460e751525SEric Saxe 16470e751525SEric Saxe /* 16480e751525SEric Saxe * Get CPUID data about TSC Invariance in Deep C-State. 16490e751525SEric Saxe */ 16500e751525SEric Saxe switch (cpi->cpi_vendor) { 16510e751525SEric Saxe case X86_VENDOR_Intel: 16520e751525SEric Saxe if (cpi->cpi_maxeax >= 7) { 16530e751525SEric Saxe cp = &cpi->cpi_extd[7]; 16540e751525SEric Saxe cp->cp_eax = 0x80000007; 16550e751525SEric Saxe cp->cp_ecx = 0; 16560e751525SEric Saxe (void) __cpuid_insn(cp); 16570e751525SEric Saxe } 16580e751525SEric Saxe break; 16590e751525SEric Saxe default: 16600e751525SEric Saxe break; 16610e751525SEric Saxe } 1662fa2e767eSgavinm } else { 1663fa2e767eSgavinm cpi->cpi_ncore_per_chip = 1; 16648949bcd6Sandrei } 16658949bcd6Sandrei 16668949bcd6Sandrei /* 16678949bcd6Sandrei * If more than one core, then this processor is CMP. 16688949bcd6Sandrei */ 16697417cfdeSKuriakose Kuruvilla if (cpi->cpi_ncore_per_chip > 1) { 16707417cfdeSKuriakose Kuruvilla add_x86_feature(featureset, X86FSET_CMP); 16717417cfdeSKuriakose Kuruvilla } 1672ae115bc7Smrj 16738949bcd6Sandrei /* 16748949bcd6Sandrei * If the number of cores is the same as the number 16758949bcd6Sandrei * of CPUs, then we cannot have HyperThreading. 16768949bcd6Sandrei */ 16777417cfdeSKuriakose Kuruvilla if (cpi->cpi_ncpu_per_chip == cpi->cpi_ncore_per_chip) { 16787417cfdeSKuriakose Kuruvilla remove_x86_feature(featureset, X86FSET_HTT); 16797417cfdeSKuriakose Kuruvilla } 16808949bcd6Sandrei 16818031591dSSrihari Venkatesan cpi->cpi_apicid = CPI_APIC_ID(cpi); 16828031591dSSrihari Venkatesan cpi->cpi_procnodes_per_pkg = 1; 16837660e73fSHans Rosenfeld cpi->cpi_cores_per_compunit = 1; 16847417cfdeSKuriakose Kuruvilla if (is_x86_feature(featureset, X86FSET_HTT) == B_FALSE && 16857417cfdeSKuriakose Kuruvilla is_x86_feature(featureset, X86FSET_CMP) == B_FALSE) { 16868949bcd6Sandrei /* 16878949bcd6Sandrei * Single-core single-threaded processors. 16888949bcd6Sandrei */ 16897c478bd9Sstevel@tonic-gate cpi->cpi_chipid = -1; 16907c478bd9Sstevel@tonic-gate cpi->cpi_clogid = 0; 16918949bcd6Sandrei cpi->cpi_coreid = cpu->cpu_id; 169210569901Sgavinm cpi->cpi_pkgcoreid = 0; 16938031591dSSrihari Venkatesan if (cpi->cpi_vendor == X86_VENDOR_AMD) 16948031591dSSrihari Venkatesan cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 3, 0); 16958031591dSSrihari Venkatesan else 16968031591dSSrihari Venkatesan cpi->cpi_procnodeid = cpi->cpi_chipid; 16977c478bd9Sstevel@tonic-gate } else if (cpi->cpi_ncpu_per_chip > 1) { 16988031591dSSrihari Venkatesan if (cpi->cpi_vendor == X86_VENDOR_Intel) 16997417cfdeSKuriakose Kuruvilla cpuid_intel_getids(cpu, featureset); 17008031591dSSrihari Venkatesan else if (cpi->cpi_vendor == X86_VENDOR_AMD) 17018031591dSSrihari Venkatesan cpuid_amd_getids(cpu); 17028031591dSSrihari Venkatesan else { 17038949bcd6Sandrei /* 17048949bcd6Sandrei * All other processors are currently 17058949bcd6Sandrei * assumed to have single cores. 17068949bcd6Sandrei */ 17078949bcd6Sandrei cpi->cpi_coreid = cpi->cpi_chipid; 170810569901Sgavinm cpi->cpi_pkgcoreid = 0; 17098031591dSSrihari Venkatesan cpi->cpi_procnodeid = cpi->cpi_chipid; 17107660e73fSHans Rosenfeld cpi->cpi_compunitid = cpi->cpi_chipid; 17118949bcd6Sandrei } 17127c478bd9Sstevel@tonic-gate } 17137c478bd9Sstevel@tonic-gate 17148a40a695Sgavinm /* 17158a40a695Sgavinm * Synthesize chip "revision" and socket type 17168a40a695Sgavinm */ 1717e4b86885SCheng Sean Ye cpi->cpi_chiprev = _cpuid_chiprev(cpi->cpi_vendor, cpi->cpi_family, 1718e4b86885SCheng Sean Ye cpi->cpi_model, cpi->cpi_step); 1719e4b86885SCheng Sean Ye cpi->cpi_chiprevstr = _cpuid_chiprevstr(cpi->cpi_vendor, 1720e4b86885SCheng Sean Ye cpi->cpi_family, cpi->cpi_model, cpi->cpi_step); 1721e4b86885SCheng Sean Ye cpi->cpi_socket = _cpuid_skt(cpi->cpi_vendor, cpi->cpi_family, 1722e4b86885SCheng Sean Ye cpi->cpi_model, cpi->cpi_step); 17238a40a695Sgavinm 17247c478bd9Sstevel@tonic-gate pass1_done: 17257c478bd9Sstevel@tonic-gate cpi->cpi_pass = 1; 17267c478bd9Sstevel@tonic-gate } 17277c478bd9Sstevel@tonic-gate 17287c478bd9Sstevel@tonic-gate /* 17297c478bd9Sstevel@tonic-gate * Make copies of the cpuid table entries we depend on, in 17307c478bd9Sstevel@tonic-gate * part for ease of parsing now, in part so that we have only 17317c478bd9Sstevel@tonic-gate * one place to correct any of it, in part for ease of 17327c478bd9Sstevel@tonic-gate * later export to userland, and in part so we can look at 17337c478bd9Sstevel@tonic-gate * this stuff in a crash dump. 17347c478bd9Sstevel@tonic-gate */ 17357c478bd9Sstevel@tonic-gate 17367c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 17377c478bd9Sstevel@tonic-gate void 17387c478bd9Sstevel@tonic-gate cpuid_pass2(cpu_t *cpu) 17397c478bd9Sstevel@tonic-gate { 17407c478bd9Sstevel@tonic-gate uint_t n, nmax; 17417c478bd9Sstevel@tonic-gate int i; 17428949bcd6Sandrei struct cpuid_regs *cp; 17437c478bd9Sstevel@tonic-gate uint8_t *dp; 17447c478bd9Sstevel@tonic-gate uint32_t *iptr; 17457c478bd9Sstevel@tonic-gate struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 17467c478bd9Sstevel@tonic-gate 17477c478bd9Sstevel@tonic-gate ASSERT(cpi->cpi_pass == 1); 17487c478bd9Sstevel@tonic-gate 17497c478bd9Sstevel@tonic-gate if (cpi->cpi_maxeax < 1) 17507c478bd9Sstevel@tonic-gate goto pass2_done; 17517c478bd9Sstevel@tonic-gate 17527c478bd9Sstevel@tonic-gate if ((nmax = cpi->cpi_maxeax + 1) > NMAX_CPI_STD) 17537c478bd9Sstevel@tonic-gate nmax = NMAX_CPI_STD; 17547c478bd9Sstevel@tonic-gate /* 17557c478bd9Sstevel@tonic-gate * (We already handled n == 0 and n == 1 in pass 1) 17567c478bd9Sstevel@tonic-gate */ 17577c478bd9Sstevel@tonic-gate for (n = 2, cp = &cpi->cpi_std[2]; n < nmax; n++, cp++) { 17588949bcd6Sandrei cp->cp_eax = n; 1759d129bde2Sesaxe 1760d129bde2Sesaxe /* 1761d129bde2Sesaxe * CPUID function 4 expects %ecx to be initialized 1762d129bde2Sesaxe * with an index which indicates which cache to return 1763d129bde2Sesaxe * information about. The OS is expected to call function 4 1764d129bde2Sesaxe * with %ecx set to 0, 1, 2, ... until it returns with 1765d129bde2Sesaxe * EAX[4:0] set to 0, which indicates there are no more 1766d129bde2Sesaxe * caches. 1767d129bde2Sesaxe * 1768d129bde2Sesaxe * Here, populate cpi_std[4] with the information returned by 1769d129bde2Sesaxe * function 4 when %ecx == 0, and do the rest in cpuid_pass3() 1770d129bde2Sesaxe * when dynamic memory allocation becomes available. 1771d129bde2Sesaxe * 1772d129bde2Sesaxe * Note: we need to explicitly initialize %ecx here, since 1773d129bde2Sesaxe * function 4 may have been previously invoked. 1774d129bde2Sesaxe */ 1775d129bde2Sesaxe if (n == 4) 1776d129bde2Sesaxe cp->cp_ecx = 0; 1777d129bde2Sesaxe 17788949bcd6Sandrei (void) __cpuid_insn(cp); 1779ae115bc7Smrj platform_cpuid_mangle(cpi->cpi_vendor, n, cp); 17807c478bd9Sstevel@tonic-gate switch (n) { 17817c478bd9Sstevel@tonic-gate case 2: 17827c478bd9Sstevel@tonic-gate /* 17837c478bd9Sstevel@tonic-gate * "the lower 8 bits of the %eax register 17847c478bd9Sstevel@tonic-gate * contain a value that identifies the number 17857c478bd9Sstevel@tonic-gate * of times the cpuid [instruction] has to be 17867c478bd9Sstevel@tonic-gate * executed to obtain a complete image of the 17877c478bd9Sstevel@tonic-gate * processor's caching systems." 17887c478bd9Sstevel@tonic-gate * 17897c478bd9Sstevel@tonic-gate * How *do* they make this stuff up? 17907c478bd9Sstevel@tonic-gate */ 17917c478bd9Sstevel@tonic-gate cpi->cpi_ncache = sizeof (*cp) * 17927c478bd9Sstevel@tonic-gate BITX(cp->cp_eax, 7, 0); 17937c478bd9Sstevel@tonic-gate if (cpi->cpi_ncache == 0) 17947c478bd9Sstevel@tonic-gate break; 17957c478bd9Sstevel@tonic-gate cpi->cpi_ncache--; /* skip count byte */ 17967c478bd9Sstevel@tonic-gate 17977c478bd9Sstevel@tonic-gate /* 17987c478bd9Sstevel@tonic-gate * Well, for now, rather than attempt to implement 17997c478bd9Sstevel@tonic-gate * this slightly dubious algorithm, we just look 18007c478bd9Sstevel@tonic-gate * at the first 15 .. 18017c478bd9Sstevel@tonic-gate */ 18027c478bd9Sstevel@tonic-gate if (cpi->cpi_ncache > (sizeof (*cp) - 1)) 18037c478bd9Sstevel@tonic-gate cpi->cpi_ncache = sizeof (*cp) - 1; 18047c478bd9Sstevel@tonic-gate 18057c478bd9Sstevel@tonic-gate dp = cpi->cpi_cacheinfo; 18067c478bd9Sstevel@tonic-gate if (BITX(cp->cp_eax, 31, 31) == 0) { 18077c478bd9Sstevel@tonic-gate uint8_t *p = (void *)&cp->cp_eax; 180863d3f7dfSkk208521 for (i = 1; i < 4; i++) 18097c478bd9Sstevel@tonic-gate if (p[i] != 0) 18107c478bd9Sstevel@tonic-gate *dp++ = p[i]; 18117c478bd9Sstevel@tonic-gate } 18127c478bd9Sstevel@tonic-gate if (BITX(cp->cp_ebx, 31, 31) == 0) { 18137c478bd9Sstevel@tonic-gate uint8_t *p = (void *)&cp->cp_ebx; 18147c478bd9Sstevel@tonic-gate for (i = 0; i < 4; i++) 18157c478bd9Sstevel@tonic-gate if (p[i] != 0) 18167c478bd9Sstevel@tonic-gate *dp++ = p[i]; 18177c478bd9Sstevel@tonic-gate } 18187c478bd9Sstevel@tonic-gate if (BITX(cp->cp_ecx, 31, 31) == 0) { 18197c478bd9Sstevel@tonic-gate uint8_t *p = (void *)&cp->cp_ecx; 18207c478bd9Sstevel@tonic-gate for (i = 0; i < 4; i++) 18217c478bd9Sstevel@tonic-gate if (p[i] != 0) 18227c478bd9Sstevel@tonic-gate *dp++ = p[i]; 18237c478bd9Sstevel@tonic-gate } 18247c478bd9Sstevel@tonic-gate if (BITX(cp->cp_edx, 31, 31) == 0) { 18257c478bd9Sstevel@tonic-gate uint8_t *p = (void *)&cp->cp_edx; 18267c478bd9Sstevel@tonic-gate for (i = 0; i < 4; i++) 18277c478bd9Sstevel@tonic-gate if (p[i] != 0) 18287c478bd9Sstevel@tonic-gate *dp++ = p[i]; 18297c478bd9Sstevel@tonic-gate } 18307c478bd9Sstevel@tonic-gate break; 1831f98fbcecSbholler 18327c478bd9Sstevel@tonic-gate case 3: /* Processor serial number, if PSN supported */ 1833f98fbcecSbholler break; 1834f98fbcecSbholler 18357c478bd9Sstevel@tonic-gate case 4: /* Deterministic cache parameters */ 1836f98fbcecSbholler break; 1837f98fbcecSbholler 18387c478bd9Sstevel@tonic-gate case 5: /* Monitor/Mwait parameters */ 18395b8a6efeSbholler { 18405b8a6efeSbholler size_t mwait_size; 1841f98fbcecSbholler 1842f98fbcecSbholler /* 1843f98fbcecSbholler * check cpi_mwait.support which was set in cpuid_pass1 1844f98fbcecSbholler */ 1845f98fbcecSbholler if (!(cpi->cpi_mwait.support & MWAIT_SUPPORT)) 1846f98fbcecSbholler break; 1847f98fbcecSbholler 18485b8a6efeSbholler /* 18495b8a6efeSbholler * Protect ourself from insane mwait line size. 18505b8a6efeSbholler * Workaround for incomplete hardware emulator(s). 18515b8a6efeSbholler */ 18525b8a6efeSbholler mwait_size = (size_t)MWAIT_SIZE_MAX(cpi); 18535b8a6efeSbholler if (mwait_size < sizeof (uint32_t) || 18545b8a6efeSbholler !ISP2(mwait_size)) { 18555b8a6efeSbholler #if DEBUG 18565b8a6efeSbholler cmn_err(CE_NOTE, "Cannot handle cpu %d mwait " 18575d8efbbcSSaurabh Misra "size %ld", cpu->cpu_id, (long)mwait_size); 18585b8a6efeSbholler #endif 18595b8a6efeSbholler break; 18605b8a6efeSbholler } 18615b8a6efeSbholler 1862f98fbcecSbholler cpi->cpi_mwait.mon_min = (size_t)MWAIT_SIZE_MIN(cpi); 18635b8a6efeSbholler cpi->cpi_mwait.mon_max = mwait_size; 1864f98fbcecSbholler if (MWAIT_EXTENSION(cpi)) { 1865f98fbcecSbholler cpi->cpi_mwait.support |= MWAIT_EXTENSIONS; 1866f98fbcecSbholler if (MWAIT_INT_ENABLE(cpi)) 1867f98fbcecSbholler cpi->cpi_mwait.support |= 1868f98fbcecSbholler MWAIT_ECX_INT_ENABLE; 1869f98fbcecSbholler } 1870f98fbcecSbholler break; 18715b8a6efeSbholler } 18727c478bd9Sstevel@tonic-gate default: 18737c478bd9Sstevel@tonic-gate break; 18747c478bd9Sstevel@tonic-gate } 18757c478bd9Sstevel@tonic-gate } 18767c478bd9Sstevel@tonic-gate 1877b6917abeSmishra if (cpi->cpi_maxeax >= 0xB && cpi->cpi_vendor == X86_VENDOR_Intel) { 18785d8efbbcSSaurabh Misra struct cpuid_regs regs; 18795d8efbbcSSaurabh Misra 18805d8efbbcSSaurabh Misra cp = ®s; 1881b6917abeSmishra cp->cp_eax = 0xB; 18825d8efbbcSSaurabh Misra cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0; 1883b6917abeSmishra 1884b6917abeSmishra (void) __cpuid_insn(cp); 1885b6917abeSmishra 1886b6917abeSmishra /* 1887b6917abeSmishra * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which 1888b6917abeSmishra * indicates that the extended topology enumeration leaf is 1889b6917abeSmishra * available. 1890b6917abeSmishra */ 1891b6917abeSmishra if (cp->cp_ebx) { 1892b6917abeSmishra uint32_t x2apic_id; 1893b6917abeSmishra uint_t coreid_shift = 0; 1894b6917abeSmishra uint_t ncpu_per_core = 1; 1895b6917abeSmishra uint_t chipid_shift = 0; 1896b6917abeSmishra uint_t ncpu_per_chip = 1; 1897b6917abeSmishra uint_t i; 1898b6917abeSmishra uint_t level; 1899b6917abeSmishra 1900b6917abeSmishra for (i = 0; i < CPI_FNB_ECX_MAX; i++) { 1901b6917abeSmishra cp->cp_eax = 0xB; 1902b6917abeSmishra cp->cp_ecx = i; 1903b6917abeSmishra 1904b6917abeSmishra (void) __cpuid_insn(cp); 1905b6917abeSmishra level = CPI_CPU_LEVEL_TYPE(cp); 1906b6917abeSmishra 1907b6917abeSmishra if (level == 1) { 1908b6917abeSmishra x2apic_id = cp->cp_edx; 1909b6917abeSmishra coreid_shift = BITX(cp->cp_eax, 4, 0); 1910b6917abeSmishra ncpu_per_core = BITX(cp->cp_ebx, 15, 0); 1911b6917abeSmishra } else if (level == 2) { 1912b6917abeSmishra x2apic_id = cp->cp_edx; 1913b6917abeSmishra chipid_shift = BITX(cp->cp_eax, 4, 0); 1914b6917abeSmishra ncpu_per_chip = BITX(cp->cp_ebx, 15, 0); 1915b6917abeSmishra } 1916b6917abeSmishra } 1917b6917abeSmishra 1918b6917abeSmishra cpi->cpi_apicid = x2apic_id; 1919b6917abeSmishra cpi->cpi_ncpu_per_chip = ncpu_per_chip; 1920b6917abeSmishra cpi->cpi_ncore_per_chip = ncpu_per_chip / 1921b6917abeSmishra ncpu_per_core; 1922b6917abeSmishra cpi->cpi_chipid = x2apic_id >> chipid_shift; 1923b6917abeSmishra cpi->cpi_clogid = x2apic_id & ((1 << chipid_shift) - 1); 1924b6917abeSmishra cpi->cpi_coreid = x2apic_id >> coreid_shift; 1925b6917abeSmishra cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift; 1926b6917abeSmishra } 19275d8efbbcSSaurabh Misra 19285d8efbbcSSaurabh Misra /* Make cp NULL so that we don't stumble on others */ 19295d8efbbcSSaurabh Misra cp = NULL; 1930b6917abeSmishra } 1931b6917abeSmishra 19327af88ac7SKuriakose Kuruvilla /* 19337af88ac7SKuriakose Kuruvilla * XSAVE enumeration 19347af88ac7SKuriakose Kuruvilla */ 193563408480SHans Rosenfeld if (cpi->cpi_maxeax >= 0xD) { 19367af88ac7SKuriakose Kuruvilla struct cpuid_regs regs; 19377af88ac7SKuriakose Kuruvilla boolean_t cpuid_d_valid = B_TRUE; 19387af88ac7SKuriakose Kuruvilla 19397af88ac7SKuriakose Kuruvilla cp = ®s; 19407af88ac7SKuriakose Kuruvilla cp->cp_eax = 0xD; 19417af88ac7SKuriakose Kuruvilla cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0; 19427af88ac7SKuriakose Kuruvilla 19437af88ac7SKuriakose Kuruvilla (void) __cpuid_insn(cp); 19447af88ac7SKuriakose Kuruvilla 19457af88ac7SKuriakose Kuruvilla /* 19467af88ac7SKuriakose Kuruvilla * Sanity checks for debug 19477af88ac7SKuriakose Kuruvilla */ 19487af88ac7SKuriakose Kuruvilla if ((cp->cp_eax & XFEATURE_LEGACY_FP) == 0 || 19497af88ac7SKuriakose Kuruvilla (cp->cp_eax & XFEATURE_SSE) == 0) { 19507af88ac7SKuriakose Kuruvilla cpuid_d_valid = B_FALSE; 19517af88ac7SKuriakose Kuruvilla } 19527af88ac7SKuriakose Kuruvilla 19537af88ac7SKuriakose Kuruvilla cpi->cpi_xsave.xsav_hw_features_low = cp->cp_eax; 19547af88ac7SKuriakose Kuruvilla cpi->cpi_xsave.xsav_hw_features_high = cp->cp_edx; 19557af88ac7SKuriakose Kuruvilla cpi->cpi_xsave.xsav_max_size = cp->cp_ecx; 19567af88ac7SKuriakose Kuruvilla 19577af88ac7SKuriakose Kuruvilla /* 19587af88ac7SKuriakose Kuruvilla * If the hw supports AVX, get the size and offset in the save 19597af88ac7SKuriakose Kuruvilla * area for the ymm state. 19607af88ac7SKuriakose Kuruvilla */ 19617af88ac7SKuriakose Kuruvilla if (cpi->cpi_xsave.xsav_hw_features_low & XFEATURE_AVX) { 19627af88ac7SKuriakose Kuruvilla cp->cp_eax = 0xD; 19637af88ac7SKuriakose Kuruvilla cp->cp_ecx = 2; 19647af88ac7SKuriakose Kuruvilla cp->cp_edx = cp->cp_ebx = 0; 19657af88ac7SKuriakose Kuruvilla 19667af88ac7SKuriakose Kuruvilla (void) __cpuid_insn(cp); 19677af88ac7SKuriakose Kuruvilla 19687af88ac7SKuriakose Kuruvilla if (cp->cp_ebx != CPUID_LEAFD_2_YMM_OFFSET || 19697af88ac7SKuriakose Kuruvilla cp->cp_eax != CPUID_LEAFD_2_YMM_SIZE) { 19707af88ac7SKuriakose Kuruvilla cpuid_d_valid = B_FALSE; 19717af88ac7SKuriakose Kuruvilla } 19727af88ac7SKuriakose Kuruvilla 19737af88ac7SKuriakose Kuruvilla cpi->cpi_xsave.ymm_size = cp->cp_eax; 19747af88ac7SKuriakose Kuruvilla cpi->cpi_xsave.ymm_offset = cp->cp_ebx; 19757af88ac7SKuriakose Kuruvilla } 19767af88ac7SKuriakose Kuruvilla 19777af88ac7SKuriakose Kuruvilla if (is_x86_feature(x86_featureset, X86FSET_XSAVE)) { 19787af88ac7SKuriakose Kuruvilla xsave_state_size = 0; 19797af88ac7SKuriakose Kuruvilla } else if (cpuid_d_valid) { 19807af88ac7SKuriakose Kuruvilla xsave_state_size = cpi->cpi_xsave.xsav_max_size; 19817af88ac7SKuriakose Kuruvilla } else { 19827af88ac7SKuriakose Kuruvilla /* Broken CPUID 0xD, probably in HVM */ 19837af88ac7SKuriakose Kuruvilla cmn_err(CE_WARN, "cpu%d: CPUID.0xD returns invalid " 19847af88ac7SKuriakose Kuruvilla "value: hw_low = %d, hw_high = %d, xsave_size = %d" 19857af88ac7SKuriakose Kuruvilla ", ymm_size = %d, ymm_offset = %d\n", 19867af88ac7SKuriakose Kuruvilla cpu->cpu_id, cpi->cpi_xsave.xsav_hw_features_low, 19877af88ac7SKuriakose Kuruvilla cpi->cpi_xsave.xsav_hw_features_high, 19887af88ac7SKuriakose Kuruvilla (int)cpi->cpi_xsave.xsav_max_size, 19897af88ac7SKuriakose Kuruvilla (int)cpi->cpi_xsave.ymm_size, 19907af88ac7SKuriakose Kuruvilla (int)cpi->cpi_xsave.ymm_offset); 19917af88ac7SKuriakose Kuruvilla 19927af88ac7SKuriakose Kuruvilla if (xsave_state_size != 0) { 19937af88ac7SKuriakose Kuruvilla /* 19947af88ac7SKuriakose Kuruvilla * This must be a non-boot CPU. We cannot 19957af88ac7SKuriakose Kuruvilla * continue, because boot cpu has already 19967af88ac7SKuriakose Kuruvilla * enabled XSAVE. 19977af88ac7SKuriakose Kuruvilla */ 19987af88ac7SKuriakose Kuruvilla ASSERT(cpu->cpu_id != 0); 19997af88ac7SKuriakose Kuruvilla cmn_err(CE_PANIC, "cpu%d: we have already " 20007af88ac7SKuriakose Kuruvilla "enabled XSAVE on boot cpu, cannot " 20017af88ac7SKuriakose Kuruvilla "continue.", cpu->cpu_id); 20027af88ac7SKuriakose Kuruvilla } else { 20037af88ac7SKuriakose Kuruvilla /* 2004dcf050afSRobert Mustacchi * If we reached here on the boot CPU, it's also 2005dcf050afSRobert Mustacchi * almost certain that we'll reach here on the 2006dcf050afSRobert Mustacchi * non-boot CPUs. When we're here on a boot CPU 2007dcf050afSRobert Mustacchi * we should disable the feature, on a non-boot 2008dcf050afSRobert Mustacchi * CPU we need to confirm that we have. 20097af88ac7SKuriakose Kuruvilla */ 2010dcf050afSRobert Mustacchi if (cpu->cpu_id == 0) { 20117af88ac7SKuriakose Kuruvilla remove_x86_feature(x86_featureset, 20127af88ac7SKuriakose Kuruvilla X86FSET_XSAVE); 2013dcf050afSRobert Mustacchi remove_x86_feature(x86_featureset, 2014dcf050afSRobert Mustacchi X86FSET_AVX); 2015245ac945SRobert Mustacchi remove_x86_feature(x86_featureset, 2016245ac945SRobert Mustacchi X86FSET_F16C); 2017245ac945SRobert Mustacchi remove_x86_feature(x86_featureset, 2018245ac945SRobert Mustacchi X86FSET_BMI1); 2019245ac945SRobert Mustacchi remove_x86_feature(x86_featureset, 2020245ac945SRobert Mustacchi X86FSET_BMI2); 2021245ac945SRobert Mustacchi remove_x86_feature(x86_featureset, 2022245ac945SRobert Mustacchi X86FSET_FMA); 2023245ac945SRobert Mustacchi remove_x86_feature(x86_featureset, 2024245ac945SRobert Mustacchi X86FSET_AVX2); 2025dcf050afSRobert Mustacchi CPI_FEATURES_ECX(cpi) &= 2026dcf050afSRobert Mustacchi ~CPUID_INTC_ECX_XSAVE; 2027dcf050afSRobert Mustacchi CPI_FEATURES_ECX(cpi) &= 2028dcf050afSRobert Mustacchi ~CPUID_INTC_ECX_AVX; 2029dcf050afSRobert Mustacchi CPI_FEATURES_ECX(cpi) &= 2030dcf050afSRobert Mustacchi ~CPUID_INTC_ECX_F16C; 2031245ac945SRobert Mustacchi CPI_FEATURES_ECX(cpi) &= 2032245ac945SRobert Mustacchi ~CPUID_INTC_ECX_FMA; 2033245ac945SRobert Mustacchi CPI_FEATURES_7_0_EBX(cpi) &= 2034245ac945SRobert Mustacchi ~CPUID_INTC_EBX_7_0_BMI1; 2035245ac945SRobert Mustacchi CPI_FEATURES_7_0_EBX(cpi) &= 2036245ac945SRobert Mustacchi ~CPUID_INTC_EBX_7_0_BMI2; 2037245ac945SRobert Mustacchi CPI_FEATURES_7_0_EBX(cpi) &= 2038245ac945SRobert Mustacchi ~CPUID_INTC_EBX_7_0_AVX2; 20397af88ac7SKuriakose Kuruvilla xsave_force_disable = B_TRUE; 2040dcf050afSRobert Mustacchi } else { 2041dcf050afSRobert Mustacchi VERIFY(is_x86_feature(x86_featureset, 2042dcf050afSRobert Mustacchi X86FSET_XSAVE) == B_FALSE); 2043dcf050afSRobert Mustacchi } 20447af88ac7SKuriakose Kuruvilla } 20457af88ac7SKuriakose Kuruvilla } 20467af88ac7SKuriakose Kuruvilla } 20477af88ac7SKuriakose Kuruvilla 20487af88ac7SKuriakose Kuruvilla 20497c478bd9Sstevel@tonic-gate if ((cpi->cpi_xmaxeax & 0x80000000) == 0) 20507c478bd9Sstevel@tonic-gate goto pass2_done; 20517c478bd9Sstevel@tonic-gate 20527c478bd9Sstevel@tonic-gate if ((nmax = cpi->cpi_xmaxeax - 0x80000000 + 1) > NMAX_CPI_EXTD) 20537c478bd9Sstevel@tonic-gate nmax = NMAX_CPI_EXTD; 20547c478bd9Sstevel@tonic-gate /* 20557c478bd9Sstevel@tonic-gate * Copy the extended properties, fixing them as we go. 20567c478bd9Sstevel@tonic-gate * (We already handled n == 0 and n == 1 in pass 1) 20577c478bd9Sstevel@tonic-gate */ 20587c478bd9Sstevel@tonic-gate iptr = (void *)cpi->cpi_brandstr; 20597c478bd9Sstevel@tonic-gate for (n = 2, cp = &cpi->cpi_extd[2]; n < nmax; cp++, n++) { 20608949bcd6Sandrei cp->cp_eax = 0x80000000 + n; 20618949bcd6Sandrei (void) __cpuid_insn(cp); 2062ae115bc7Smrj platform_cpuid_mangle(cpi->cpi_vendor, 0x80000000 + n, cp); 20637c478bd9Sstevel@tonic-gate switch (n) { 20647c478bd9Sstevel@tonic-gate case 2: 20657c478bd9Sstevel@tonic-gate case 3: 20667c478bd9Sstevel@tonic-gate case 4: 20677c478bd9Sstevel@tonic-gate /* 20687c478bd9Sstevel@tonic-gate * Extract the brand string 20697c478bd9Sstevel@tonic-gate */ 20707c478bd9Sstevel@tonic-gate *iptr++ = cp->cp_eax; 20717c478bd9Sstevel@tonic-gate *iptr++ = cp->cp_ebx; 20727c478bd9Sstevel@tonic-gate *iptr++ = cp->cp_ecx; 20737c478bd9Sstevel@tonic-gate *iptr++ = cp->cp_edx; 20747c478bd9Sstevel@tonic-gate break; 20757c478bd9Sstevel@tonic-gate case 5: 20767c478bd9Sstevel@tonic-gate switch (cpi->cpi_vendor) { 20777c478bd9Sstevel@tonic-gate case X86_VENDOR_AMD: 20787c478bd9Sstevel@tonic-gate /* 20797c478bd9Sstevel@tonic-gate * The Athlon and Duron were the first 20807c478bd9Sstevel@tonic-gate * parts to report the sizes of the 20817c478bd9Sstevel@tonic-gate * TLB for large pages. Before then, 20827c478bd9Sstevel@tonic-gate * we don't trust the data. 20837c478bd9Sstevel@tonic-gate */ 20847c478bd9Sstevel@tonic-gate if (cpi->cpi_family < 6 || 20857c478bd9Sstevel@tonic-gate (cpi->cpi_family == 6 && 20867c478bd9Sstevel@tonic-gate cpi->cpi_model < 1)) 20877c478bd9Sstevel@tonic-gate cp->cp_eax = 0; 20887c478bd9Sstevel@tonic-gate break; 20897c478bd9Sstevel@tonic-gate default: 20907c478bd9Sstevel@tonic-gate break; 20917c478bd9Sstevel@tonic-gate } 20927c478bd9Sstevel@tonic-gate break; 20937c478bd9Sstevel@tonic-gate case 6: 20947c478bd9Sstevel@tonic-gate switch (cpi->cpi_vendor) { 20957c478bd9Sstevel@tonic-gate case X86_VENDOR_AMD: 20967c478bd9Sstevel@tonic-gate /* 20977c478bd9Sstevel@tonic-gate * The Athlon and Duron were the first 20987c478bd9Sstevel@tonic-gate * AMD parts with L2 TLB's. 20997c478bd9Sstevel@tonic-gate * Before then, don't trust the data. 21007c478bd9Sstevel@tonic-gate */ 21017c478bd9Sstevel@tonic-gate if (cpi->cpi_family < 6 || 21027c478bd9Sstevel@tonic-gate cpi->cpi_family == 6 && 21037c478bd9Sstevel@tonic-gate cpi->cpi_model < 1) 21047c478bd9Sstevel@tonic-gate cp->cp_eax = cp->cp_ebx = 0; 21057c478bd9Sstevel@tonic-gate /* 21067c478bd9Sstevel@tonic-gate * AMD Duron rev A0 reports L2 21077c478bd9Sstevel@tonic-gate * cache size incorrectly as 1K 21087c478bd9Sstevel@tonic-gate * when it is really 64K 21097c478bd9Sstevel@tonic-gate */ 21107c478bd9Sstevel@tonic-gate if (cpi->cpi_family == 6 && 21117c478bd9Sstevel@tonic-gate cpi->cpi_model == 3 && 21127c478bd9Sstevel@tonic-gate cpi->cpi_step == 0) { 21137c478bd9Sstevel@tonic-gate cp->cp_ecx &= 0xffff; 21147c478bd9Sstevel@tonic-gate cp->cp_ecx |= 0x400000; 21157c478bd9Sstevel@tonic-gate } 21167c478bd9Sstevel@tonic-gate break; 21177c478bd9Sstevel@tonic-gate case X86_VENDOR_Cyrix: /* VIA C3 */ 21187c478bd9Sstevel@tonic-gate /* 21197c478bd9Sstevel@tonic-gate * VIA C3 processors are a bit messed 21207c478bd9Sstevel@tonic-gate * up w.r.t. encoding cache sizes in %ecx 21217c478bd9Sstevel@tonic-gate */ 21227c478bd9Sstevel@tonic-gate if (cpi->cpi_family != 6) 21237c478bd9Sstevel@tonic-gate break; 21247c478bd9Sstevel@tonic-gate /* 21257c478bd9Sstevel@tonic-gate * model 7 and 8 were incorrectly encoded 21267c478bd9Sstevel@tonic-gate * 21277c478bd9Sstevel@tonic-gate * xxx is model 8 really broken? 21287c478bd9Sstevel@tonic-gate */ 21297c478bd9Sstevel@tonic-gate if (cpi->cpi_model == 7 || 21307c478bd9Sstevel@tonic-gate cpi->cpi_model == 8) 21317c478bd9Sstevel@tonic-gate cp->cp_ecx = 21327c478bd9Sstevel@tonic-gate BITX(cp->cp_ecx, 31, 24) << 16 | 21337c478bd9Sstevel@tonic-gate BITX(cp->cp_ecx, 23, 16) << 12 | 21347c478bd9Sstevel@tonic-gate BITX(cp->cp_ecx, 15, 8) << 8 | 21357c478bd9Sstevel@tonic-gate BITX(cp->cp_ecx, 7, 0); 21367c478bd9Sstevel@tonic-gate /* 21377c478bd9Sstevel@tonic-gate * model 9 stepping 1 has wrong associativity 21387c478bd9Sstevel@tonic-gate */ 21397c478bd9Sstevel@tonic-gate if (cpi->cpi_model == 9 && cpi->cpi_step == 1) 21407c478bd9Sstevel@tonic-gate cp->cp_ecx |= 8 << 12; 21417c478bd9Sstevel@tonic-gate break; 21427c478bd9Sstevel@tonic-gate case X86_VENDOR_Intel: 21437c478bd9Sstevel@tonic-gate /* 21447c478bd9Sstevel@tonic-gate * Extended L2 Cache features function. 21457c478bd9Sstevel@tonic-gate * First appeared on Prescott. 21467c478bd9Sstevel@tonic-gate */ 21477c478bd9Sstevel@tonic-gate default: 21487c478bd9Sstevel@tonic-gate break; 21497c478bd9Sstevel@tonic-gate } 21507c478bd9Sstevel@tonic-gate break; 21517c478bd9Sstevel@tonic-gate default: 21527c478bd9Sstevel@tonic-gate break; 21537c478bd9Sstevel@tonic-gate } 21547c478bd9Sstevel@tonic-gate } 21557c478bd9Sstevel@tonic-gate 21567c478bd9Sstevel@tonic-gate pass2_done: 21577c478bd9Sstevel@tonic-gate cpi->cpi_pass = 2; 21587c478bd9Sstevel@tonic-gate } 21597c478bd9Sstevel@tonic-gate 21607c478bd9Sstevel@tonic-gate static const char * 21617c478bd9Sstevel@tonic-gate intel_cpubrand(const struct cpuid_info *cpi) 21627c478bd9Sstevel@tonic-gate { 21637c478bd9Sstevel@tonic-gate int i; 21647c478bd9Sstevel@tonic-gate 21657417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_CPUID) || 21667c478bd9Sstevel@tonic-gate cpi->cpi_maxeax < 1 || cpi->cpi_family < 5) 21677c478bd9Sstevel@tonic-gate return ("i486"); 21687c478bd9Sstevel@tonic-gate 21697c478bd9Sstevel@tonic-gate switch (cpi->cpi_family) { 21707c478bd9Sstevel@tonic-gate case 5: 21717c478bd9Sstevel@tonic-gate return ("Intel Pentium(r)"); 21727c478bd9Sstevel@tonic-gate case 6: 21737c478bd9Sstevel@tonic-gate switch (cpi->cpi_model) { 21747c478bd9Sstevel@tonic-gate uint_t celeron, xeon; 21758949bcd6Sandrei const struct cpuid_regs *cp; 21767c478bd9Sstevel@tonic-gate case 0: 21777c478bd9Sstevel@tonic-gate case 1: 21787c478bd9Sstevel@tonic-gate case 2: 21797c478bd9Sstevel@tonic-gate return ("Intel Pentium(r) Pro"); 21807c478bd9Sstevel@tonic-gate case 3: 21817c478bd9Sstevel@tonic-gate case 4: 21827c478bd9Sstevel@tonic-gate return ("Intel Pentium(r) II"); 21837c478bd9Sstevel@tonic-gate case 6: 21847c478bd9Sstevel@tonic-gate return ("Intel Celeron(r)"); 21857c478bd9Sstevel@tonic-gate case 5: 21867c478bd9Sstevel@tonic-gate case 7: 21877c478bd9Sstevel@tonic-gate celeron = xeon = 0; 21887c478bd9Sstevel@tonic-gate cp = &cpi->cpi_std[2]; /* cache info */ 21897c478bd9Sstevel@tonic-gate 219063d3f7dfSkk208521 for (i = 1; i < 4; i++) { 21917c478bd9Sstevel@tonic-gate uint_t tmp; 21927c478bd9Sstevel@tonic-gate 21937c478bd9Sstevel@tonic-gate tmp = (cp->cp_eax >> (8 * i)) & 0xff; 21947c478bd9Sstevel@tonic-gate if (tmp == 0x40) 21957c478bd9Sstevel@tonic-gate celeron++; 21967c478bd9Sstevel@tonic-gate if (tmp >= 0x44 && tmp <= 0x45) 21977c478bd9Sstevel@tonic-gate xeon++; 21987c478bd9Sstevel@tonic-gate } 21997c478bd9Sstevel@tonic-gate 22007c478bd9Sstevel@tonic-gate for (i = 0; i < 2; i++) { 22017c478bd9Sstevel@tonic-gate uint_t tmp; 22027c478bd9Sstevel@tonic-gate 22037c478bd9Sstevel@tonic-gate tmp = (cp->cp_ebx >> (8 * i)) & 0xff; 22047c478bd9Sstevel@tonic-gate if (tmp == 0x40) 22057c478bd9Sstevel@tonic-gate celeron++; 22067c478bd9Sstevel@tonic-gate else if (tmp >= 0x44 && tmp <= 0x45) 22077c478bd9Sstevel@tonic-gate xeon++; 22087c478bd9Sstevel@tonic-gate } 22097c478bd9Sstevel@tonic-gate 22107c478bd9Sstevel@tonic-gate for (i = 0; i < 4; i++) { 22117c478bd9Sstevel@tonic-gate uint_t tmp; 22127c478bd9Sstevel@tonic-gate 22137c478bd9Sstevel@tonic-gate tmp = (cp->cp_ecx >> (8 * i)) & 0xff; 22147c478bd9Sstevel@tonic-gate if (tmp == 0x40) 22157c478bd9Sstevel@tonic-gate celeron++; 22167c478bd9Sstevel@tonic-gate else if (tmp >= 0x44 && tmp <= 0x45) 22177c478bd9Sstevel@tonic-gate xeon++; 22187c478bd9Sstevel@tonic-gate } 22197c478bd9Sstevel@tonic-gate 22207c478bd9Sstevel@tonic-gate for (i = 0; i < 4; i++) { 22217c478bd9Sstevel@tonic-gate uint_t tmp; 22227c478bd9Sstevel@tonic-gate 22237c478bd9Sstevel@tonic-gate tmp = (cp->cp_edx >> (8 * i)) & 0xff; 22247c478bd9Sstevel@tonic-gate if (tmp == 0x40) 22257c478bd9Sstevel@tonic-gate celeron++; 22267c478bd9Sstevel@tonic-gate else if (tmp >= 0x44 && tmp <= 0x45) 22277c478bd9Sstevel@tonic-gate xeon++; 22287c478bd9Sstevel@tonic-gate } 22297c478bd9Sstevel@tonic-gate 22307c478bd9Sstevel@tonic-gate if (celeron) 22317c478bd9Sstevel@tonic-gate return ("Intel Celeron(r)"); 22327c478bd9Sstevel@tonic-gate if (xeon) 22337c478bd9Sstevel@tonic-gate return (cpi->cpi_model == 5 ? 22347c478bd9Sstevel@tonic-gate "Intel Pentium(r) II Xeon(tm)" : 22357c478bd9Sstevel@tonic-gate "Intel Pentium(r) III Xeon(tm)"); 22367c478bd9Sstevel@tonic-gate return (cpi->cpi_model == 5 ? 22377c478bd9Sstevel@tonic-gate "Intel Pentium(r) II or Pentium(r) II Xeon(tm)" : 22387c478bd9Sstevel@tonic-gate "Intel Pentium(r) III or Pentium(r) III Xeon(tm)"); 22397c478bd9Sstevel@tonic-gate default: 22407c478bd9Sstevel@tonic-gate break; 22417c478bd9Sstevel@tonic-gate } 22427c478bd9Sstevel@tonic-gate default: 22437c478bd9Sstevel@tonic-gate break; 22447c478bd9Sstevel@tonic-gate } 22457c478bd9Sstevel@tonic-gate 22465ff02082Sdmick /* BrandID is present if the field is nonzero */ 22475ff02082Sdmick if (cpi->cpi_brandid != 0) { 22487c478bd9Sstevel@tonic-gate static const struct { 22497c478bd9Sstevel@tonic-gate uint_t bt_bid; 22507c478bd9Sstevel@tonic-gate const char *bt_str; 22517c478bd9Sstevel@tonic-gate } brand_tbl[] = { 22527c478bd9Sstevel@tonic-gate { 0x1, "Intel(r) Celeron(r)" }, 22537c478bd9Sstevel@tonic-gate { 0x2, "Intel(r) Pentium(r) III" }, 22547c478bd9Sstevel@tonic-gate { 0x3, "Intel(r) Pentium(r) III Xeon(tm)" }, 22557c478bd9Sstevel@tonic-gate { 0x4, "Intel(r) Pentium(r) III" }, 22567c478bd9Sstevel@tonic-gate { 0x6, "Mobile Intel(r) Pentium(r) III" }, 22577c478bd9Sstevel@tonic-gate { 0x7, "Mobile Intel(r) Celeron(r)" }, 22587c478bd9Sstevel@tonic-gate { 0x8, "Intel(r) Pentium(r) 4" }, 22597c478bd9Sstevel@tonic-gate { 0x9, "Intel(r) Pentium(r) 4" }, 22607c478bd9Sstevel@tonic-gate { 0xa, "Intel(r) Celeron(r)" }, 22617c478bd9Sstevel@tonic-gate { 0xb, "Intel(r) Xeon(tm)" }, 22627c478bd9Sstevel@tonic-gate { 0xc, "Intel(r) Xeon(tm) MP" }, 22637c478bd9Sstevel@tonic-gate { 0xe, "Mobile Intel(r) Pentium(r) 4" }, 22645ff02082Sdmick { 0xf, "Mobile Intel(r) Celeron(r)" }, 22655ff02082Sdmick { 0x11, "Mobile Genuine Intel(r)" }, 22665ff02082Sdmick { 0x12, "Intel(r) Celeron(r) M" }, 22675ff02082Sdmick { 0x13, "Mobile Intel(r) Celeron(r)" }, 22685ff02082Sdmick { 0x14, "Intel(r) Celeron(r)" }, 22695ff02082Sdmick { 0x15, "Mobile Genuine Intel(r)" }, 22705ff02082Sdmick { 0x16, "Intel(r) Pentium(r) M" }, 22715ff02082Sdmick { 0x17, "Mobile Intel(r) Celeron(r)" } 22727c478bd9Sstevel@tonic-gate }; 22737c478bd9Sstevel@tonic-gate uint_t btblmax = sizeof (brand_tbl) / sizeof (brand_tbl[0]); 22747c478bd9Sstevel@tonic-gate uint_t sgn; 22757c478bd9Sstevel@tonic-gate 22767c478bd9Sstevel@tonic-gate sgn = (cpi->cpi_family << 8) | 22777c478bd9Sstevel@tonic-gate (cpi->cpi_model << 4) | cpi->cpi_step; 22787c478bd9Sstevel@tonic-gate 22797c478bd9Sstevel@tonic-gate for (i = 0; i < btblmax; i++) 22807c478bd9Sstevel@tonic-gate if (brand_tbl[i].bt_bid == cpi->cpi_brandid) 22817c478bd9Sstevel@tonic-gate break; 22827c478bd9Sstevel@tonic-gate if (i < btblmax) { 22837c478bd9Sstevel@tonic-gate if (sgn == 0x6b1 && cpi->cpi_brandid == 3) 22847c478bd9Sstevel@tonic-gate return ("Intel(r) Celeron(r)"); 22857c478bd9Sstevel@tonic-gate if (sgn < 0xf13 && cpi->cpi_brandid == 0xb) 22867c478bd9Sstevel@tonic-gate return ("Intel(r) Xeon(tm) MP"); 22877c478bd9Sstevel@tonic-gate if (sgn < 0xf13 && cpi->cpi_brandid == 0xe) 22887c478bd9Sstevel@tonic-gate return ("Intel(r) Xeon(tm)"); 22897c478bd9Sstevel@tonic-gate return (brand_tbl[i].bt_str); 22907c478bd9Sstevel@tonic-gate } 22917c478bd9Sstevel@tonic-gate } 22927c478bd9Sstevel@tonic-gate 22937c478bd9Sstevel@tonic-gate return (NULL); 22947c478bd9Sstevel@tonic-gate } 22957c478bd9Sstevel@tonic-gate 22967c478bd9Sstevel@tonic-gate static const char * 22977c478bd9Sstevel@tonic-gate amd_cpubrand(const struct cpuid_info *cpi) 22987c478bd9Sstevel@tonic-gate { 22997417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_CPUID) || 23007c478bd9Sstevel@tonic-gate cpi->cpi_maxeax < 1 || cpi->cpi_family < 5) 23017c478bd9Sstevel@tonic-gate return ("i486 compatible"); 23027c478bd9Sstevel@tonic-gate 23037c478bd9Sstevel@tonic-gate switch (cpi->cpi_family) { 23047c478bd9Sstevel@tonic-gate case 5: 23057c478bd9Sstevel@tonic-gate switch (cpi->cpi_model) { 23067c478bd9Sstevel@tonic-gate case 0: 23077c478bd9Sstevel@tonic-gate case 1: 23087c478bd9Sstevel@tonic-gate case 2: 23097c478bd9Sstevel@tonic-gate case 3: 23107c478bd9Sstevel@tonic-gate case 4: 23117c478bd9Sstevel@tonic-gate case 5: 23127c478bd9Sstevel@tonic-gate return ("AMD-K5(r)"); 23137c478bd9Sstevel@tonic-gate case 6: 23147c478bd9Sstevel@tonic-gate case 7: 23157c478bd9Sstevel@tonic-gate return ("AMD-K6(r)"); 23167c478bd9Sstevel@tonic-gate case 8: 23177c478bd9Sstevel@tonic-gate return ("AMD-K6(r)-2"); 23187c478bd9Sstevel@tonic-gate case 9: 23197c478bd9Sstevel@tonic-gate return ("AMD-K6(r)-III"); 23207c478bd9Sstevel@tonic-gate default: 23217c478bd9Sstevel@tonic-gate return ("AMD (family 5)"); 23227c478bd9Sstevel@tonic-gate } 23237c478bd9Sstevel@tonic-gate case 6: 23247c478bd9Sstevel@tonic-gate switch (cpi->cpi_model) { 23257c478bd9Sstevel@tonic-gate case 1: 23267c478bd9Sstevel@tonic-gate return ("AMD-K7(tm)"); 23277c478bd9Sstevel@tonic-gate case 0: 23287c478bd9Sstevel@tonic-gate case 2: 23297c478bd9Sstevel@tonic-gate case 4: 23307c478bd9Sstevel@tonic-gate return ("AMD Athlon(tm)"); 23317c478bd9Sstevel@tonic-gate case 3: 23327c478bd9Sstevel@tonic-gate case 7: 23337c478bd9Sstevel@tonic-gate return ("AMD Duron(tm)"); 23347c478bd9Sstevel@tonic-gate case 6: 23357c478bd9Sstevel@tonic-gate case 8: 23367c478bd9Sstevel@tonic-gate case 10: 23377c478bd9Sstevel@tonic-gate /* 23387c478bd9Sstevel@tonic-gate * Use the L2 cache size to distinguish 23397c478bd9Sstevel@tonic-gate */ 23407c478bd9Sstevel@tonic-gate return ((cpi->cpi_extd[6].cp_ecx >> 16) >= 256 ? 23417c478bd9Sstevel@tonic-gate "AMD Athlon(tm)" : "AMD Duron(tm)"); 23427c478bd9Sstevel@tonic-gate default: 23437c478bd9Sstevel@tonic-gate return ("AMD (family 6)"); 23447c478bd9Sstevel@tonic-gate } 23457c478bd9Sstevel@tonic-gate default: 23467c478bd9Sstevel@tonic-gate break; 23477c478bd9Sstevel@tonic-gate } 23487c478bd9Sstevel@tonic-gate 23497c478bd9Sstevel@tonic-gate if (cpi->cpi_family == 0xf && cpi->cpi_model == 5 && 23507c478bd9Sstevel@tonic-gate cpi->cpi_brandid != 0) { 23517c478bd9Sstevel@tonic-gate switch (BITX(cpi->cpi_brandid, 7, 5)) { 23527c478bd9Sstevel@tonic-gate case 3: 23537c478bd9Sstevel@tonic-gate return ("AMD Opteron(tm) UP 1xx"); 23547c478bd9Sstevel@tonic-gate case 4: 23557c478bd9Sstevel@tonic-gate return ("AMD Opteron(tm) DP 2xx"); 23567c478bd9Sstevel@tonic-gate case 5: 23577c478bd9Sstevel@tonic-gate return ("AMD Opteron(tm) MP 8xx"); 23587c478bd9Sstevel@tonic-gate default: 23597c478bd9Sstevel@tonic-gate return ("AMD Opteron(tm)"); 23607c478bd9Sstevel@tonic-gate } 23617c478bd9Sstevel@tonic-gate } 23627c478bd9Sstevel@tonic-gate 23637c478bd9Sstevel@tonic-gate return (NULL); 23647c478bd9Sstevel@tonic-gate } 23657c478bd9Sstevel@tonic-gate 23667c478bd9Sstevel@tonic-gate static const char * 23677c478bd9Sstevel@tonic-gate cyrix_cpubrand(struct cpuid_info *cpi, uint_t type) 23687c478bd9Sstevel@tonic-gate { 23697417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_CPUID) || 23707c478bd9Sstevel@tonic-gate cpi->cpi_maxeax < 1 || cpi->cpi_family < 5 || 23717c478bd9Sstevel@tonic-gate type == X86_TYPE_CYRIX_486) 23727c478bd9Sstevel@tonic-gate return ("i486 compatible"); 23737c478bd9Sstevel@tonic-gate 23747c478bd9Sstevel@tonic-gate switch (type) { 23757c478bd9Sstevel@tonic-gate case X86_TYPE_CYRIX_6x86: 23767c478bd9Sstevel@tonic-gate return ("Cyrix 6x86"); 23777c478bd9Sstevel@tonic-gate case X86_TYPE_CYRIX_6x86L: 23787c478bd9Sstevel@tonic-gate return ("Cyrix 6x86L"); 23797c478bd9Sstevel@tonic-gate case X86_TYPE_CYRIX_6x86MX: 23807c478bd9Sstevel@tonic-gate return ("Cyrix 6x86MX"); 23817c478bd9Sstevel@tonic-gate case X86_TYPE_CYRIX_GXm: 23827c478bd9Sstevel@tonic-gate return ("Cyrix GXm"); 23837c478bd9Sstevel@tonic-gate case X86_TYPE_CYRIX_MediaGX: 23847c478bd9Sstevel@tonic-gate return ("Cyrix MediaGX"); 23857c478bd9Sstevel@tonic-gate case X86_TYPE_CYRIX_MII: 23867c478bd9Sstevel@tonic-gate return ("Cyrix M2"); 23877c478bd9Sstevel@tonic-gate case X86_TYPE_VIA_CYRIX_III: 23887c478bd9Sstevel@tonic-gate return ("VIA Cyrix M3"); 23897c478bd9Sstevel@tonic-gate default: 23907c478bd9Sstevel@tonic-gate /* 23917c478bd9Sstevel@tonic-gate * Have another wild guess .. 23927c478bd9Sstevel@tonic-gate */ 23937c478bd9Sstevel@tonic-gate if (cpi->cpi_family == 4 && cpi->cpi_model == 9) 23947c478bd9Sstevel@tonic-gate return ("Cyrix 5x86"); 23957c478bd9Sstevel@tonic-gate else if (cpi->cpi_family == 5) { 23967c478bd9Sstevel@tonic-gate switch (cpi->cpi_model) { 23977c478bd9Sstevel@tonic-gate case 2: 23987c478bd9Sstevel@tonic-gate return ("Cyrix 6x86"); /* Cyrix M1 */ 23997c478bd9Sstevel@tonic-gate case 4: 24007c478bd9Sstevel@tonic-gate return ("Cyrix MediaGX"); 24017c478bd9Sstevel@tonic-gate default: 24027c478bd9Sstevel@tonic-gate break; 24037c478bd9Sstevel@tonic-gate } 24047c478bd9Sstevel@tonic-gate } else if (cpi->cpi_family == 6) { 24057c478bd9Sstevel@tonic-gate switch (cpi->cpi_model) { 24067c478bd9Sstevel@tonic-gate case 0: 24077c478bd9Sstevel@tonic-gate return ("Cyrix 6x86MX"); /* Cyrix M2? */ 24087c478bd9Sstevel@tonic-gate case 5: 24097c478bd9Sstevel@tonic-gate case 6: 24107c478bd9Sstevel@tonic-gate case 7: 24117c478bd9Sstevel@tonic-gate case 8: 24127c478bd9Sstevel@tonic-gate case 9: 24137c478bd9Sstevel@tonic-gate return ("VIA C3"); 24147c478bd9Sstevel@tonic-gate default: 24157c478bd9Sstevel@tonic-gate break; 24167c478bd9Sstevel@tonic-gate } 24177c478bd9Sstevel@tonic-gate } 24187c478bd9Sstevel@tonic-gate break; 24197c478bd9Sstevel@tonic-gate } 24207c478bd9Sstevel@tonic-gate return (NULL); 24217c478bd9Sstevel@tonic-gate } 24227c478bd9Sstevel@tonic-gate 24237c478bd9Sstevel@tonic-gate /* 24247c478bd9Sstevel@tonic-gate * This only gets called in the case that the CPU extended 24257c478bd9Sstevel@tonic-gate * feature brand string (0x80000002, 0x80000003, 0x80000004) 24267c478bd9Sstevel@tonic-gate * aren't available, or contain null bytes for some reason. 24277c478bd9Sstevel@tonic-gate */ 24287c478bd9Sstevel@tonic-gate static void 24297c478bd9Sstevel@tonic-gate fabricate_brandstr(struct cpuid_info *cpi) 24307c478bd9Sstevel@tonic-gate { 24317c478bd9Sstevel@tonic-gate const char *brand = NULL; 24327c478bd9Sstevel@tonic-gate 24337c478bd9Sstevel@tonic-gate switch (cpi->cpi_vendor) { 24347c478bd9Sstevel@tonic-gate case X86_VENDOR_Intel: 24357c478bd9Sstevel@tonic-gate brand = intel_cpubrand(cpi); 24367c478bd9Sstevel@tonic-gate break; 24377c478bd9Sstevel@tonic-gate case X86_VENDOR_AMD: 24387c478bd9Sstevel@tonic-gate brand = amd_cpubrand(cpi); 24397c478bd9Sstevel@tonic-gate break; 24407c478bd9Sstevel@tonic-gate case X86_VENDOR_Cyrix: 24417c478bd9Sstevel@tonic-gate brand = cyrix_cpubrand(cpi, x86_type); 24427c478bd9Sstevel@tonic-gate break; 24437c478bd9Sstevel@tonic-gate case X86_VENDOR_NexGen: 24447c478bd9Sstevel@tonic-gate if (cpi->cpi_family == 5 && cpi->cpi_model == 0) 24457c478bd9Sstevel@tonic-gate brand = "NexGen Nx586"; 24467c478bd9Sstevel@tonic-gate break; 24477c478bd9Sstevel@tonic-gate case X86_VENDOR_Centaur: 24487c478bd9Sstevel@tonic-gate if (cpi->cpi_family == 5) 24497c478bd9Sstevel@tonic-gate switch (cpi->cpi_model) { 24507c478bd9Sstevel@tonic-gate case 4: 24517c478bd9Sstevel@tonic-gate brand = "Centaur C6"; 24527c478bd9Sstevel@tonic-gate break; 24537c478bd9Sstevel@tonic-gate case 8: 24547c478bd9Sstevel@tonic-gate brand = "Centaur C2"; 24557c478bd9Sstevel@tonic-gate break; 24567c478bd9Sstevel@tonic-gate case 9: 24577c478bd9Sstevel@tonic-gate brand = "Centaur C3"; 24587c478bd9Sstevel@tonic-gate break; 24597c478bd9Sstevel@tonic-gate default: 24607c478bd9Sstevel@tonic-gate break; 24617c478bd9Sstevel@tonic-gate } 24627c478bd9Sstevel@tonic-gate break; 24637c478bd9Sstevel@tonic-gate case X86_VENDOR_Rise: 24647c478bd9Sstevel@tonic-gate if (cpi->cpi_family == 5 && 24657c478bd9Sstevel@tonic-gate (cpi->cpi_model == 0 || cpi->cpi_model == 2)) 24667c478bd9Sstevel@tonic-gate brand = "Rise mP6"; 24677c478bd9Sstevel@tonic-gate break; 24687c478bd9Sstevel@tonic-gate case X86_VENDOR_SiS: 24697c478bd9Sstevel@tonic-gate if (cpi->cpi_family == 5 && cpi->cpi_model == 0) 24707c478bd9Sstevel@tonic-gate brand = "SiS 55x"; 24717c478bd9Sstevel@tonic-gate break; 24727c478bd9Sstevel@tonic-gate case X86_VENDOR_TM: 24737c478bd9Sstevel@tonic-gate if (cpi->cpi_family == 5 && cpi->cpi_model == 4) 24747c478bd9Sstevel@tonic-gate brand = "Transmeta Crusoe TM3x00 or TM5x00"; 24757c478bd9Sstevel@tonic-gate break; 24767c478bd9Sstevel@tonic-gate case X86_VENDOR_NSC: 24777c478bd9Sstevel@tonic-gate case X86_VENDOR_UMC: 24787c478bd9Sstevel@tonic-gate default: 24797c478bd9Sstevel@tonic-gate break; 24807c478bd9Sstevel@tonic-gate } 24817c478bd9Sstevel@tonic-gate if (brand) { 24827c478bd9Sstevel@tonic-gate (void) strcpy((char *)cpi->cpi_brandstr, brand); 24837c478bd9Sstevel@tonic-gate return; 24847c478bd9Sstevel@tonic-gate } 24857c478bd9Sstevel@tonic-gate 24867c478bd9Sstevel@tonic-gate /* 24877c478bd9Sstevel@tonic-gate * If all else fails ... 24887c478bd9Sstevel@tonic-gate */ 24897c478bd9Sstevel@tonic-gate (void) snprintf(cpi->cpi_brandstr, sizeof (cpi->cpi_brandstr), 24907c478bd9Sstevel@tonic-gate "%s %d.%d.%d", cpi->cpi_vendorstr, cpi->cpi_family, 24917c478bd9Sstevel@tonic-gate cpi->cpi_model, cpi->cpi_step); 24927c478bd9Sstevel@tonic-gate } 24937c478bd9Sstevel@tonic-gate 24947c478bd9Sstevel@tonic-gate /* 24957c478bd9Sstevel@tonic-gate * This routine is called just after kernel memory allocation 24967c478bd9Sstevel@tonic-gate * becomes available on cpu0, and as part of mp_startup() on 24977c478bd9Sstevel@tonic-gate * the other cpus. 24987c478bd9Sstevel@tonic-gate * 2499d129bde2Sesaxe * Fixup the brand string, and collect any information from cpuid 250079ec9da8SYuri Pankov * that requires dynamically allocated storage to represent. 25017c478bd9Sstevel@tonic-gate */ 25027c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 25037c478bd9Sstevel@tonic-gate void 25047c478bd9Sstevel@tonic-gate cpuid_pass3(cpu_t *cpu) 25057c478bd9Sstevel@tonic-gate { 2506d129bde2Sesaxe int i, max, shft, level, size; 2507d129bde2Sesaxe struct cpuid_regs regs; 2508d129bde2Sesaxe struct cpuid_regs *cp; 25097c478bd9Sstevel@tonic-gate struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 25107c478bd9Sstevel@tonic-gate 25117c478bd9Sstevel@tonic-gate ASSERT(cpi->cpi_pass == 2); 25127c478bd9Sstevel@tonic-gate 2513d129bde2Sesaxe /* 2514d129bde2Sesaxe * Function 4: Deterministic cache parameters 2515d129bde2Sesaxe * 2516d129bde2Sesaxe * Take this opportunity to detect the number of threads 2517d129bde2Sesaxe * sharing the last level cache, and construct a corresponding 2518d129bde2Sesaxe * cache id. The respective cpuid_info members are initialized 2519d129bde2Sesaxe * to the default case of "no last level cache sharing". 2520d129bde2Sesaxe */ 2521d129bde2Sesaxe cpi->cpi_ncpu_shr_last_cache = 1; 2522d129bde2Sesaxe cpi->cpi_last_lvl_cacheid = cpu->cpu_id; 2523d129bde2Sesaxe 2524d129bde2Sesaxe if (cpi->cpi_maxeax >= 4 && cpi->cpi_vendor == X86_VENDOR_Intel) { 2525d129bde2Sesaxe 2526d129bde2Sesaxe /* 2527d129bde2Sesaxe * Find the # of elements (size) returned by fn 4, and along 2528d129bde2Sesaxe * the way detect last level cache sharing details. 2529d129bde2Sesaxe */ 2530d129bde2Sesaxe bzero(®s, sizeof (regs)); 2531d129bde2Sesaxe cp = ®s; 2532d129bde2Sesaxe for (i = 0, max = 0; i < CPI_FN4_ECX_MAX; i++) { 2533d129bde2Sesaxe cp->cp_eax = 4; 2534d129bde2Sesaxe cp->cp_ecx = i; 2535d129bde2Sesaxe 2536d129bde2Sesaxe (void) __cpuid_insn(cp); 2537d129bde2Sesaxe 2538d129bde2Sesaxe if (CPI_CACHE_TYPE(cp) == 0) 2539d129bde2Sesaxe break; 2540d129bde2Sesaxe level = CPI_CACHE_LVL(cp); 2541d129bde2Sesaxe if (level > max) { 2542d129bde2Sesaxe max = level; 2543d129bde2Sesaxe cpi->cpi_ncpu_shr_last_cache = 2544d129bde2Sesaxe CPI_NTHR_SHR_CACHE(cp) + 1; 2545d129bde2Sesaxe } 2546d129bde2Sesaxe } 2547d129bde2Sesaxe cpi->cpi_std_4_size = size = i; 2548d129bde2Sesaxe 2549d129bde2Sesaxe /* 2550d129bde2Sesaxe * Allocate the cpi_std_4 array. The first element 2551d129bde2Sesaxe * references the regs for fn 4, %ecx == 0, which 2552d129bde2Sesaxe * cpuid_pass2() stashed in cpi->cpi_std[4]. 2553d129bde2Sesaxe */ 2554d129bde2Sesaxe if (size > 0) { 2555d129bde2Sesaxe cpi->cpi_std_4 = 2556d129bde2Sesaxe kmem_alloc(size * sizeof (cp), KM_SLEEP); 2557d129bde2Sesaxe cpi->cpi_std_4[0] = &cpi->cpi_std[4]; 2558d129bde2Sesaxe 2559d129bde2Sesaxe /* 2560d129bde2Sesaxe * Allocate storage to hold the additional regs 2561d129bde2Sesaxe * for function 4, %ecx == 1 .. cpi_std_4_size. 2562d129bde2Sesaxe * 2563d129bde2Sesaxe * The regs for fn 4, %ecx == 0 has already 2564d129bde2Sesaxe * been allocated as indicated above. 2565d129bde2Sesaxe */ 2566d129bde2Sesaxe for (i = 1; i < size; i++) { 2567d129bde2Sesaxe cp = cpi->cpi_std_4[i] = 2568d129bde2Sesaxe kmem_zalloc(sizeof (regs), KM_SLEEP); 2569d129bde2Sesaxe cp->cp_eax = 4; 2570d129bde2Sesaxe cp->cp_ecx = i; 2571d129bde2Sesaxe 2572d129bde2Sesaxe (void) __cpuid_insn(cp); 2573d129bde2Sesaxe } 2574d129bde2Sesaxe } 2575d129bde2Sesaxe /* 2576d129bde2Sesaxe * Determine the number of bits needed to represent 2577d129bde2Sesaxe * the number of CPUs sharing the last level cache. 2578d129bde2Sesaxe * 2579d129bde2Sesaxe * Shift off that number of bits from the APIC id to 2580d129bde2Sesaxe * derive the cache id. 2581d129bde2Sesaxe */ 2582d129bde2Sesaxe shft = 0; 2583d129bde2Sesaxe for (i = 1; i < cpi->cpi_ncpu_shr_last_cache; i <<= 1) 2584d129bde2Sesaxe shft++; 2585b6917abeSmishra cpi->cpi_last_lvl_cacheid = cpi->cpi_apicid >> shft; 2586d129bde2Sesaxe } 2587d129bde2Sesaxe 2588d129bde2Sesaxe /* 2589d129bde2Sesaxe * Now fixup the brand string 2590d129bde2Sesaxe */ 25917c478bd9Sstevel@tonic-gate if ((cpi->cpi_xmaxeax & 0x80000000) == 0) { 25927c478bd9Sstevel@tonic-gate fabricate_brandstr(cpi); 2593d129bde2Sesaxe } else { 25947c478bd9Sstevel@tonic-gate 25957c478bd9Sstevel@tonic-gate /* 25967c478bd9Sstevel@tonic-gate * If we successfully extracted a brand string from the cpuid 25977c478bd9Sstevel@tonic-gate * instruction, clean it up by removing leading spaces and 25987c478bd9Sstevel@tonic-gate * similar junk. 25997c478bd9Sstevel@tonic-gate */ 26007c478bd9Sstevel@tonic-gate if (cpi->cpi_brandstr[0]) { 26017c478bd9Sstevel@tonic-gate size_t maxlen = sizeof (cpi->cpi_brandstr); 26027c478bd9Sstevel@tonic-gate char *src, *dst; 26037c478bd9Sstevel@tonic-gate 26047c478bd9Sstevel@tonic-gate dst = src = (char *)cpi->cpi_brandstr; 26057c478bd9Sstevel@tonic-gate src[maxlen - 1] = '\0'; 26067c478bd9Sstevel@tonic-gate /* 26077c478bd9Sstevel@tonic-gate * strip leading spaces 26087c478bd9Sstevel@tonic-gate */ 26097c478bd9Sstevel@tonic-gate while (*src == ' ') 26107c478bd9Sstevel@tonic-gate src++; 26117c478bd9Sstevel@tonic-gate /* 26127c478bd9Sstevel@tonic-gate * Remove any 'Genuine' or "Authentic" prefixes 26137c478bd9Sstevel@tonic-gate */ 26147c478bd9Sstevel@tonic-gate if (strncmp(src, "Genuine ", 8) == 0) 26157c478bd9Sstevel@tonic-gate src += 8; 26167c478bd9Sstevel@tonic-gate if (strncmp(src, "Authentic ", 10) == 0) 26177c478bd9Sstevel@tonic-gate src += 10; 26187c478bd9Sstevel@tonic-gate 26197c478bd9Sstevel@tonic-gate /* 26207c478bd9Sstevel@tonic-gate * Now do an in-place copy. 26217c478bd9Sstevel@tonic-gate * Map (R) to (r) and (TM) to (tm). 26227c478bd9Sstevel@tonic-gate * The era of teletypes is long gone, and there's 26237c478bd9Sstevel@tonic-gate * -really- no need to shout. 26247c478bd9Sstevel@tonic-gate */ 26257c478bd9Sstevel@tonic-gate while (*src != '\0') { 26267c478bd9Sstevel@tonic-gate if (src[0] == '(') { 26277c478bd9Sstevel@tonic-gate if (strncmp(src + 1, "R)", 2) == 0) { 26287c478bd9Sstevel@tonic-gate (void) strncpy(dst, "(r)", 3); 26297c478bd9Sstevel@tonic-gate src += 3; 26307c478bd9Sstevel@tonic-gate dst += 3; 26317c478bd9Sstevel@tonic-gate continue; 26327c478bd9Sstevel@tonic-gate } 26337c478bd9Sstevel@tonic-gate if (strncmp(src + 1, "TM)", 3) == 0) { 26347c478bd9Sstevel@tonic-gate (void) strncpy(dst, "(tm)", 4); 26357c478bd9Sstevel@tonic-gate src += 4; 26367c478bd9Sstevel@tonic-gate dst += 4; 26377c478bd9Sstevel@tonic-gate continue; 26387c478bd9Sstevel@tonic-gate } 26397c478bd9Sstevel@tonic-gate } 26407c478bd9Sstevel@tonic-gate *dst++ = *src++; 26417c478bd9Sstevel@tonic-gate } 26427c478bd9Sstevel@tonic-gate *dst = '\0'; 26437c478bd9Sstevel@tonic-gate 26447c478bd9Sstevel@tonic-gate /* 26457c478bd9Sstevel@tonic-gate * Finally, remove any trailing spaces 26467c478bd9Sstevel@tonic-gate */ 26477c478bd9Sstevel@tonic-gate while (--dst > cpi->cpi_brandstr) 26487c478bd9Sstevel@tonic-gate if (*dst == ' ') 26497c478bd9Sstevel@tonic-gate *dst = '\0'; 26507c478bd9Sstevel@tonic-gate else 26517c478bd9Sstevel@tonic-gate break; 26527c478bd9Sstevel@tonic-gate } else 26537c478bd9Sstevel@tonic-gate fabricate_brandstr(cpi); 2654d129bde2Sesaxe } 26557c478bd9Sstevel@tonic-gate cpi->cpi_pass = 3; 26567c478bd9Sstevel@tonic-gate } 26577c478bd9Sstevel@tonic-gate 26587c478bd9Sstevel@tonic-gate /* 26597c478bd9Sstevel@tonic-gate * This routine is called out of bind_hwcap() much later in the life 26607c478bd9Sstevel@tonic-gate * of the kernel (post_startup()). The job of this routine is to resolve 26617c478bd9Sstevel@tonic-gate * the hardware feature support and kernel support for those features into 26627c478bd9Sstevel@tonic-gate * what we're actually going to tell applications via the aux vector. 26637c478bd9Sstevel@tonic-gate */ 2664ebb8ac07SRobert Mustacchi void 2665ebb8ac07SRobert Mustacchi cpuid_pass4(cpu_t *cpu, uint_t *hwcap_out) 26667c478bd9Sstevel@tonic-gate { 26677c478bd9Sstevel@tonic-gate struct cpuid_info *cpi; 2668ebb8ac07SRobert Mustacchi uint_t hwcap_flags = 0, hwcap_flags_2 = 0; 26697c478bd9Sstevel@tonic-gate 26707c478bd9Sstevel@tonic-gate if (cpu == NULL) 26717c478bd9Sstevel@tonic-gate cpu = CPU; 26727c478bd9Sstevel@tonic-gate cpi = cpu->cpu_m.mcpu_cpi; 26737c478bd9Sstevel@tonic-gate 26747c478bd9Sstevel@tonic-gate ASSERT(cpi->cpi_pass == 3); 26757c478bd9Sstevel@tonic-gate 26767c478bd9Sstevel@tonic-gate if (cpi->cpi_maxeax >= 1) { 26777c478bd9Sstevel@tonic-gate uint32_t *edx = &cpi->cpi_support[STD_EDX_FEATURES]; 26787c478bd9Sstevel@tonic-gate uint32_t *ecx = &cpi->cpi_support[STD_ECX_FEATURES]; 2679245ac945SRobert Mustacchi uint32_t *ebx = &cpi->cpi_support[STD_EBX_FEATURES]; 26807c478bd9Sstevel@tonic-gate 26817c478bd9Sstevel@tonic-gate *edx = CPI_FEATURES_EDX(cpi); 26827c478bd9Sstevel@tonic-gate *ecx = CPI_FEATURES_ECX(cpi); 2683245ac945SRobert Mustacchi *ebx = CPI_FEATURES_7_0_EBX(cpi); 26847c478bd9Sstevel@tonic-gate 26857c478bd9Sstevel@tonic-gate /* 26867c478bd9Sstevel@tonic-gate * [these require explicit kernel support] 26877c478bd9Sstevel@tonic-gate */ 26887417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_SEP)) 26897c478bd9Sstevel@tonic-gate *edx &= ~CPUID_INTC_EDX_SEP; 26907c478bd9Sstevel@tonic-gate 26917417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_SSE)) 26927c478bd9Sstevel@tonic-gate *edx &= ~(CPUID_INTC_EDX_FXSR|CPUID_INTC_EDX_SSE); 26937417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_SSE2)) 26947c478bd9Sstevel@tonic-gate *edx &= ~CPUID_INTC_EDX_SSE2; 26957c478bd9Sstevel@tonic-gate 26967417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_HTT)) 26977c478bd9Sstevel@tonic-gate *edx &= ~CPUID_INTC_EDX_HTT; 26987c478bd9Sstevel@tonic-gate 26997417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_SSE3)) 27007c478bd9Sstevel@tonic-gate *ecx &= ~CPUID_INTC_ECX_SSE3; 27017c478bd9Sstevel@tonic-gate 27027417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_SSSE3)) 2703d0f8ff6eSkk208521 *ecx &= ~CPUID_INTC_ECX_SSSE3; 27047417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_SSE4_1)) 2705d0f8ff6eSkk208521 *ecx &= ~CPUID_INTC_ECX_SSE4_1; 27067417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_SSE4_2)) 2707d0f8ff6eSkk208521 *ecx &= ~CPUID_INTC_ECX_SSE4_2; 27087417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_AES)) 2709a50a8b93SKuriakose Kuruvilla *ecx &= ~CPUID_INTC_ECX_AES; 27107417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_PCLMULQDQ)) 27117417cfdeSKuriakose Kuruvilla *ecx &= ~CPUID_INTC_ECX_PCLMULQDQ; 27127af88ac7SKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_XSAVE)) 27137af88ac7SKuriakose Kuruvilla *ecx &= ~(CPUID_INTC_ECX_XSAVE | 27147af88ac7SKuriakose Kuruvilla CPUID_INTC_ECX_OSXSAVE); 27157af88ac7SKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_AVX)) 27167af88ac7SKuriakose Kuruvilla *ecx &= ~CPUID_INTC_ECX_AVX; 2717ebb8ac07SRobert Mustacchi if (!is_x86_feature(x86_featureset, X86FSET_F16C)) 2718ebb8ac07SRobert Mustacchi *ecx &= ~CPUID_INTC_ECX_F16C; 2719245ac945SRobert Mustacchi if (!is_x86_feature(x86_featureset, X86FSET_FMA)) 2720245ac945SRobert Mustacchi *ecx &= ~CPUID_INTC_ECX_FMA; 2721245ac945SRobert Mustacchi if (!is_x86_feature(x86_featureset, X86FSET_BMI1)) 2722245ac945SRobert Mustacchi *ebx &= ~CPUID_INTC_EBX_7_0_BMI1; 2723245ac945SRobert Mustacchi if (!is_x86_feature(x86_featureset, X86FSET_BMI2)) 2724245ac945SRobert Mustacchi *ebx &= ~CPUID_INTC_EBX_7_0_BMI2; 2725245ac945SRobert Mustacchi if (!is_x86_feature(x86_featureset, X86FSET_AVX2)) 2726245ac945SRobert Mustacchi *ebx &= ~CPUID_INTC_EBX_7_0_AVX2; 2727d0f8ff6eSkk208521 27287c478bd9Sstevel@tonic-gate /* 27297c478bd9Sstevel@tonic-gate * [no explicit support required beyond x87 fp context] 27307c478bd9Sstevel@tonic-gate */ 27317c478bd9Sstevel@tonic-gate if (!fpu_exists) 27327c478bd9Sstevel@tonic-gate *edx &= ~(CPUID_INTC_EDX_FPU | CPUID_INTC_EDX_MMX); 27337c478bd9Sstevel@tonic-gate 27347c478bd9Sstevel@tonic-gate /* 27357c478bd9Sstevel@tonic-gate * Now map the supported feature vector to things that we 27367c478bd9Sstevel@tonic-gate * think userland will care about. 27377c478bd9Sstevel@tonic-gate */ 27387c478bd9Sstevel@tonic-gate if (*edx & CPUID_INTC_EDX_SEP) 27397c478bd9Sstevel@tonic-gate hwcap_flags |= AV_386_SEP; 27407c478bd9Sstevel@tonic-gate if (*edx & CPUID_INTC_EDX_SSE) 27417c478bd9Sstevel@tonic-gate hwcap_flags |= AV_386_FXSR | AV_386_SSE; 27427c478bd9Sstevel@tonic-gate if (*edx & CPUID_INTC_EDX_SSE2) 27437c478bd9Sstevel@tonic-gate hwcap_flags |= AV_386_SSE2; 27447c478bd9Sstevel@tonic-gate if (*ecx & CPUID_INTC_ECX_SSE3) 27457c478bd9Sstevel@tonic-gate hwcap_flags |= AV_386_SSE3; 2746d0f8ff6eSkk208521 if (*ecx & CPUID_INTC_ECX_SSSE3) 2747d0f8ff6eSkk208521 hwcap_flags |= AV_386_SSSE3; 2748d0f8ff6eSkk208521 if (*ecx & CPUID_INTC_ECX_SSE4_1) 2749d0f8ff6eSkk208521 hwcap_flags |= AV_386_SSE4_1; 2750d0f8ff6eSkk208521 if (*ecx & CPUID_INTC_ECX_SSE4_2) 2751d0f8ff6eSkk208521 hwcap_flags |= AV_386_SSE4_2; 27525087e485SKrishnendu Sadhukhan - Sun Microsystems if (*ecx & CPUID_INTC_ECX_MOVBE) 27535087e485SKrishnendu Sadhukhan - Sun Microsystems hwcap_flags |= AV_386_MOVBE; 2754a50a8b93SKuriakose Kuruvilla if (*ecx & CPUID_INTC_ECX_AES) 2755a50a8b93SKuriakose Kuruvilla hwcap_flags |= AV_386_AES; 2756a50a8b93SKuriakose Kuruvilla if (*ecx & CPUID_INTC_ECX_PCLMULQDQ) 2757a50a8b93SKuriakose Kuruvilla hwcap_flags |= AV_386_PCLMULQDQ; 27587af88ac7SKuriakose Kuruvilla if ((*ecx & CPUID_INTC_ECX_XSAVE) && 2759f3390f39SRobert Mustacchi (*ecx & CPUID_INTC_ECX_OSXSAVE)) { 27607af88ac7SKuriakose Kuruvilla hwcap_flags |= AV_386_XSAVE; 2761f3390f39SRobert Mustacchi 2762ebb8ac07SRobert Mustacchi if (*ecx & CPUID_INTC_ECX_AVX) { 2763f3390f39SRobert Mustacchi hwcap_flags |= AV_386_AVX; 2764ebb8ac07SRobert Mustacchi if (*ecx & CPUID_INTC_ECX_F16C) 2765ebb8ac07SRobert Mustacchi hwcap_flags_2 |= AV_386_2_F16C; 2766245ac945SRobert Mustacchi if (*ecx & CPUID_INTC_ECX_FMA) 2767245ac945SRobert Mustacchi hwcap_flags_2 |= AV_386_2_FMA; 2768245ac945SRobert Mustacchi if (*ebx & CPUID_INTC_EBX_7_0_BMI1) 2769245ac945SRobert Mustacchi hwcap_flags_2 |= AV_386_2_BMI1; 2770245ac945SRobert Mustacchi if (*ebx & CPUID_INTC_EBX_7_0_BMI2) 2771245ac945SRobert Mustacchi hwcap_flags_2 |= AV_386_2_BMI2; 2772245ac945SRobert Mustacchi if (*ebx & CPUID_INTC_EBX_7_0_AVX2) 2773245ac945SRobert Mustacchi hwcap_flags_2 |= AV_386_2_AVX2; 2774ebb8ac07SRobert Mustacchi } 2775f3390f39SRobert Mustacchi } 2776faa20166SBryan Cantrill if (*ecx & CPUID_INTC_ECX_VMX) 2777faa20166SBryan Cantrill hwcap_flags |= AV_386_VMX; 2778f8801251Skk208521 if (*ecx & CPUID_INTC_ECX_POPCNT) 2779f8801251Skk208521 hwcap_flags |= AV_386_POPCNT; 27807c478bd9Sstevel@tonic-gate if (*edx & CPUID_INTC_EDX_FPU) 27817c478bd9Sstevel@tonic-gate hwcap_flags |= AV_386_FPU; 27827c478bd9Sstevel@tonic-gate if (*edx & CPUID_INTC_EDX_MMX) 27837c478bd9Sstevel@tonic-gate hwcap_flags |= AV_386_MMX; 27847c478bd9Sstevel@tonic-gate 27857c478bd9Sstevel@tonic-gate if (*edx & CPUID_INTC_EDX_TSC) 27867c478bd9Sstevel@tonic-gate hwcap_flags |= AV_386_TSC; 27877c478bd9Sstevel@tonic-gate if (*edx & CPUID_INTC_EDX_CX8) 27887c478bd9Sstevel@tonic-gate hwcap_flags |= AV_386_CX8; 27897c478bd9Sstevel@tonic-gate if (*edx & CPUID_INTC_EDX_CMOV) 27907c478bd9Sstevel@tonic-gate hwcap_flags |= AV_386_CMOV; 27917c478bd9Sstevel@tonic-gate if (*ecx & CPUID_INTC_ECX_CX16) 27927c478bd9Sstevel@tonic-gate hwcap_flags |= AV_386_CX16; 2793ebb8ac07SRobert Mustacchi 2794ebb8ac07SRobert Mustacchi if (*ecx & CPUID_INTC_ECX_RDRAND) 2795ebb8ac07SRobert Mustacchi hwcap_flags_2 |= AV_386_2_RDRAND; 27967c478bd9Sstevel@tonic-gate } 27977c478bd9Sstevel@tonic-gate 27987c478bd9Sstevel@tonic-gate if (cpi->cpi_xmaxeax < 0x80000001) 27997c478bd9Sstevel@tonic-gate goto pass4_done; 28007c478bd9Sstevel@tonic-gate 28017c478bd9Sstevel@tonic-gate switch (cpi->cpi_vendor) { 28028949bcd6Sandrei struct cpuid_regs cp; 2803ae115bc7Smrj uint32_t *edx, *ecx; 28047c478bd9Sstevel@tonic-gate 2805ae115bc7Smrj case X86_VENDOR_Intel: 2806ae115bc7Smrj /* 2807ae115bc7Smrj * Seems like Intel duplicated what we necessary 2808ae115bc7Smrj * here to make the initial crop of 64-bit OS's work. 2809ae115bc7Smrj * Hopefully, those are the only "extended" bits 2810ae115bc7Smrj * they'll add. 2811ae115bc7Smrj */ 2812ae115bc7Smrj /*FALLTHROUGH*/ 2813ae115bc7Smrj 28147c478bd9Sstevel@tonic-gate case X86_VENDOR_AMD: 28157c478bd9Sstevel@tonic-gate edx = &cpi->cpi_support[AMD_EDX_FEATURES]; 2816ae115bc7Smrj ecx = &cpi->cpi_support[AMD_ECX_FEATURES]; 28177c478bd9Sstevel@tonic-gate 28187c478bd9Sstevel@tonic-gate *edx = CPI_FEATURES_XTD_EDX(cpi); 2819ae115bc7Smrj *ecx = CPI_FEATURES_XTD_ECX(cpi); 2820ae115bc7Smrj 2821ae115bc7Smrj /* 2822ae115bc7Smrj * [these features require explicit kernel support] 2823ae115bc7Smrj */ 2824ae115bc7Smrj switch (cpi->cpi_vendor) { 2825ae115bc7Smrj case X86_VENDOR_Intel: 28267417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_TSCP)) 2827d36ea5d8Ssudheer *edx &= ~CPUID_AMD_EDX_TSCP; 2828ae115bc7Smrj break; 2829ae115bc7Smrj 2830ae115bc7Smrj case X86_VENDOR_AMD: 28317417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_TSCP)) 2832ae115bc7Smrj *edx &= ~CPUID_AMD_EDX_TSCP; 28337417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_SSE4A)) 2834f8801251Skk208521 *ecx &= ~CPUID_AMD_ECX_SSE4A; 2835ae115bc7Smrj break; 2836ae115bc7Smrj 2837ae115bc7Smrj default: 2838ae115bc7Smrj break; 2839ae115bc7Smrj } 28407c478bd9Sstevel@tonic-gate 28417c478bd9Sstevel@tonic-gate /* 28427c478bd9Sstevel@tonic-gate * [no explicit support required beyond 28437c478bd9Sstevel@tonic-gate * x87 fp context and exception handlers] 28447c478bd9Sstevel@tonic-gate */ 28457c478bd9Sstevel@tonic-gate if (!fpu_exists) 28467c478bd9Sstevel@tonic-gate *edx &= ~(CPUID_AMD_EDX_MMXamd | 28477c478bd9Sstevel@tonic-gate CPUID_AMD_EDX_3DNow | CPUID_AMD_EDX_3DNowx); 28487c478bd9Sstevel@tonic-gate 28497417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_NX)) 28507c478bd9Sstevel@tonic-gate *edx &= ~CPUID_AMD_EDX_NX; 2851ae115bc7Smrj #if !defined(__amd64) 28527c478bd9Sstevel@tonic-gate *edx &= ~CPUID_AMD_EDX_LM; 28537c478bd9Sstevel@tonic-gate #endif 28547c478bd9Sstevel@tonic-gate /* 28557c478bd9Sstevel@tonic-gate * Now map the supported feature vector to 28567c478bd9Sstevel@tonic-gate * things that we think userland will care about. 28577c478bd9Sstevel@tonic-gate */ 2858ae115bc7Smrj #if defined(__amd64) 28597c478bd9Sstevel@tonic-gate if (*edx & CPUID_AMD_EDX_SYSC) 28607c478bd9Sstevel@tonic-gate hwcap_flags |= AV_386_AMD_SYSC; 2861ae115bc7Smrj #endif 28627c478bd9Sstevel@tonic-gate if (*edx & CPUID_AMD_EDX_MMXamd) 28637c478bd9Sstevel@tonic-gate hwcap_flags |= AV_386_AMD_MMX; 28647c478bd9Sstevel@tonic-gate if (*edx & CPUID_AMD_EDX_3DNow) 28657c478bd9Sstevel@tonic-gate hwcap_flags |= AV_386_AMD_3DNow; 28667c478bd9Sstevel@tonic-gate if (*edx & CPUID_AMD_EDX_3DNowx) 28677c478bd9Sstevel@tonic-gate hwcap_flags |= AV_386_AMD_3DNowx; 2868faa20166SBryan Cantrill if (*ecx & CPUID_AMD_ECX_SVM) 2869faa20166SBryan Cantrill hwcap_flags |= AV_386_AMD_SVM; 2870ae115bc7Smrj 2871ae115bc7Smrj switch (cpi->cpi_vendor) { 2872ae115bc7Smrj case X86_VENDOR_AMD: 2873ae115bc7Smrj if (*edx & CPUID_AMD_EDX_TSCP) 2874ae115bc7Smrj hwcap_flags |= AV_386_TSCP; 2875ae115bc7Smrj if (*ecx & CPUID_AMD_ECX_AHF64) 2876ae115bc7Smrj hwcap_flags |= AV_386_AHF; 2877f8801251Skk208521 if (*ecx & CPUID_AMD_ECX_SSE4A) 2878f8801251Skk208521 hwcap_flags |= AV_386_AMD_SSE4A; 2879f8801251Skk208521 if (*ecx & CPUID_AMD_ECX_LZCNT) 2880f8801251Skk208521 hwcap_flags |= AV_386_AMD_LZCNT; 2881ae115bc7Smrj break; 2882ae115bc7Smrj 2883ae115bc7Smrj case X86_VENDOR_Intel: 2884d36ea5d8Ssudheer if (*edx & CPUID_AMD_EDX_TSCP) 2885d36ea5d8Ssudheer hwcap_flags |= AV_386_TSCP; 2886ae115bc7Smrj /* 2887ae115bc7Smrj * Aarrgh. 2888ae115bc7Smrj * Intel uses a different bit in the same word. 2889ae115bc7Smrj */ 2890ae115bc7Smrj if (*ecx & CPUID_INTC_ECX_AHF64) 2891ae115bc7Smrj hwcap_flags |= AV_386_AHF; 2892ae115bc7Smrj break; 2893ae115bc7Smrj 2894ae115bc7Smrj default: 2895ae115bc7Smrj break; 2896ae115bc7Smrj } 28977c478bd9Sstevel@tonic-gate break; 28987c478bd9Sstevel@tonic-gate 28997c478bd9Sstevel@tonic-gate case X86_VENDOR_TM: 29008949bcd6Sandrei cp.cp_eax = 0x80860001; 29018949bcd6Sandrei (void) __cpuid_insn(&cp); 29028949bcd6Sandrei cpi->cpi_support[TM_EDX_FEATURES] = cp.cp_edx; 29037c478bd9Sstevel@tonic-gate break; 29047c478bd9Sstevel@tonic-gate 29057c478bd9Sstevel@tonic-gate default: 29067c478bd9Sstevel@tonic-gate break; 29077c478bd9Sstevel@tonic-gate } 29087c478bd9Sstevel@tonic-gate 29097c478bd9Sstevel@tonic-gate pass4_done: 29107c478bd9Sstevel@tonic-gate cpi->cpi_pass = 4; 2911ebb8ac07SRobert Mustacchi if (hwcap_out != NULL) { 2912ebb8ac07SRobert Mustacchi hwcap_out[0] = hwcap_flags; 2913ebb8ac07SRobert Mustacchi hwcap_out[1] = hwcap_flags_2; 2914ebb8ac07SRobert Mustacchi } 29157c478bd9Sstevel@tonic-gate } 29167c478bd9Sstevel@tonic-gate 29177c478bd9Sstevel@tonic-gate 29187c478bd9Sstevel@tonic-gate /* 29197c478bd9Sstevel@tonic-gate * Simulate the cpuid instruction using the data we previously 29207c478bd9Sstevel@tonic-gate * captured about this CPU. We try our best to return the truth 29217c478bd9Sstevel@tonic-gate * about the hardware, independently of kernel support. 29227c478bd9Sstevel@tonic-gate */ 29237c478bd9Sstevel@tonic-gate uint32_t 29248949bcd6Sandrei cpuid_insn(cpu_t *cpu, struct cpuid_regs *cp) 29257c478bd9Sstevel@tonic-gate { 29267c478bd9Sstevel@tonic-gate struct cpuid_info *cpi; 29278949bcd6Sandrei struct cpuid_regs *xcp; 29287c478bd9Sstevel@tonic-gate 29297c478bd9Sstevel@tonic-gate if (cpu == NULL) 29307c478bd9Sstevel@tonic-gate cpu = CPU; 29317c478bd9Sstevel@tonic-gate cpi = cpu->cpu_m.mcpu_cpi; 29327c478bd9Sstevel@tonic-gate 29337c478bd9Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 3)); 29347c478bd9Sstevel@tonic-gate 29357c478bd9Sstevel@tonic-gate /* 29367c478bd9Sstevel@tonic-gate * CPUID data is cached in two separate places: cpi_std for standard 29377c478bd9Sstevel@tonic-gate * CPUID functions, and cpi_extd for extended CPUID functions. 29387c478bd9Sstevel@tonic-gate */ 29398949bcd6Sandrei if (cp->cp_eax <= cpi->cpi_maxeax && cp->cp_eax < NMAX_CPI_STD) 29408949bcd6Sandrei xcp = &cpi->cpi_std[cp->cp_eax]; 29418949bcd6Sandrei else if (cp->cp_eax >= 0x80000000 && cp->cp_eax <= cpi->cpi_xmaxeax && 29428949bcd6Sandrei cp->cp_eax < 0x80000000 + NMAX_CPI_EXTD) 29438949bcd6Sandrei xcp = &cpi->cpi_extd[cp->cp_eax - 0x80000000]; 29447c478bd9Sstevel@tonic-gate else 29457c478bd9Sstevel@tonic-gate /* 29467c478bd9Sstevel@tonic-gate * The caller is asking for data from an input parameter which 29477c478bd9Sstevel@tonic-gate * the kernel has not cached. In this case we go fetch from 29487c478bd9Sstevel@tonic-gate * the hardware and return the data directly to the user. 29497c478bd9Sstevel@tonic-gate */ 29508949bcd6Sandrei return (__cpuid_insn(cp)); 29518949bcd6Sandrei 29528949bcd6Sandrei cp->cp_eax = xcp->cp_eax; 29538949bcd6Sandrei cp->cp_ebx = xcp->cp_ebx; 29548949bcd6Sandrei cp->cp_ecx = xcp->cp_ecx; 29558949bcd6Sandrei cp->cp_edx = xcp->cp_edx; 29567c478bd9Sstevel@tonic-gate return (cp->cp_eax); 29577c478bd9Sstevel@tonic-gate } 29587c478bd9Sstevel@tonic-gate 29597c478bd9Sstevel@tonic-gate int 29607c478bd9Sstevel@tonic-gate cpuid_checkpass(cpu_t *cpu, int pass) 29617c478bd9Sstevel@tonic-gate { 29627c478bd9Sstevel@tonic-gate return (cpu != NULL && cpu->cpu_m.mcpu_cpi != NULL && 29637c478bd9Sstevel@tonic-gate cpu->cpu_m.mcpu_cpi->cpi_pass >= pass); 29647c478bd9Sstevel@tonic-gate } 29657c478bd9Sstevel@tonic-gate 29667c478bd9Sstevel@tonic-gate int 29677c478bd9Sstevel@tonic-gate cpuid_getbrandstr(cpu_t *cpu, char *s, size_t n) 29687c478bd9Sstevel@tonic-gate { 29697c478bd9Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 3)); 29707c478bd9Sstevel@tonic-gate 29717c478bd9Sstevel@tonic-gate return (snprintf(s, n, "%s", cpu->cpu_m.mcpu_cpi->cpi_brandstr)); 29727c478bd9Sstevel@tonic-gate } 29737c478bd9Sstevel@tonic-gate 29747c478bd9Sstevel@tonic-gate int 29758949bcd6Sandrei cpuid_is_cmt(cpu_t *cpu) 29767c478bd9Sstevel@tonic-gate { 29777c478bd9Sstevel@tonic-gate if (cpu == NULL) 29787c478bd9Sstevel@tonic-gate cpu = CPU; 29797c478bd9Sstevel@tonic-gate 29807c478bd9Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1)); 29817c478bd9Sstevel@tonic-gate 29827c478bd9Sstevel@tonic-gate return (cpu->cpu_m.mcpu_cpi->cpi_chipid >= 0); 29837c478bd9Sstevel@tonic-gate } 29847c478bd9Sstevel@tonic-gate 29857c478bd9Sstevel@tonic-gate /* 29867c478bd9Sstevel@tonic-gate * AMD and Intel both implement the 64-bit variant of the syscall 29877c478bd9Sstevel@tonic-gate * instruction (syscallq), so if there's -any- support for syscall, 29887c478bd9Sstevel@tonic-gate * cpuid currently says "yes, we support this". 29897c478bd9Sstevel@tonic-gate * 29907c478bd9Sstevel@tonic-gate * However, Intel decided to -not- implement the 32-bit variant of the 29917c478bd9Sstevel@tonic-gate * syscall instruction, so we provide a predicate to allow our caller 29927c478bd9Sstevel@tonic-gate * to test that subtlety here. 2993843e1988Sjohnlev * 2994843e1988Sjohnlev * XXPV Currently, 32-bit syscall instructions don't work via the hypervisor, 2995843e1988Sjohnlev * even in the case where the hardware would in fact support it. 29967c478bd9Sstevel@tonic-gate */ 29977c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 29987c478bd9Sstevel@tonic-gate int 29997c478bd9Sstevel@tonic-gate cpuid_syscall32_insn(cpu_t *cpu) 30007c478bd9Sstevel@tonic-gate { 30017c478bd9Sstevel@tonic-gate ASSERT(cpuid_checkpass((cpu == NULL ? CPU : cpu), 1)); 30027c478bd9Sstevel@tonic-gate 3003843e1988Sjohnlev #if !defined(__xpv) 3004ae115bc7Smrj if (cpu == NULL) 3005ae115bc7Smrj cpu = CPU; 3006ae115bc7Smrj 3007ae115bc7Smrj /*CSTYLED*/ 3008ae115bc7Smrj { 3009ae115bc7Smrj struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 3010ae115bc7Smrj 3011ae115bc7Smrj if (cpi->cpi_vendor == X86_VENDOR_AMD && 3012ae115bc7Smrj cpi->cpi_xmaxeax >= 0x80000001 && 3013ae115bc7Smrj (CPI_FEATURES_XTD_EDX(cpi) & CPUID_AMD_EDX_SYSC)) 3014ae115bc7Smrj return (1); 3015ae115bc7Smrj } 3016843e1988Sjohnlev #endif 30177c478bd9Sstevel@tonic-gate return (0); 30187c478bd9Sstevel@tonic-gate } 30197c478bd9Sstevel@tonic-gate 30207c478bd9Sstevel@tonic-gate int 30217c478bd9Sstevel@tonic-gate cpuid_getidstr(cpu_t *cpu, char *s, size_t n) 30227c478bd9Sstevel@tonic-gate { 30237c478bd9Sstevel@tonic-gate struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 30247c478bd9Sstevel@tonic-gate 30257c478bd9Sstevel@tonic-gate static const char fmt[] = 3026ecfa43a5Sdmick "x86 (%s %X family %d model %d step %d clock %d MHz)"; 30277c478bd9Sstevel@tonic-gate static const char fmt_ht[] = 3028ecfa43a5Sdmick "x86 (chipid 0x%x %s %X family %d model %d step %d clock %d MHz)"; 30297c478bd9Sstevel@tonic-gate 30307c478bd9Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1)); 30317c478bd9Sstevel@tonic-gate 30328949bcd6Sandrei if (cpuid_is_cmt(cpu)) 30337c478bd9Sstevel@tonic-gate return (snprintf(s, n, fmt_ht, cpi->cpi_chipid, 3034ecfa43a5Sdmick cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax, 3035ecfa43a5Sdmick cpi->cpi_family, cpi->cpi_model, 30367c478bd9Sstevel@tonic-gate cpi->cpi_step, cpu->cpu_type_info.pi_clock)); 30377c478bd9Sstevel@tonic-gate return (snprintf(s, n, fmt, 3038ecfa43a5Sdmick cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax, 3039ecfa43a5Sdmick cpi->cpi_family, cpi->cpi_model, 30407c478bd9Sstevel@tonic-gate cpi->cpi_step, cpu->cpu_type_info.pi_clock)); 30417c478bd9Sstevel@tonic-gate } 30427c478bd9Sstevel@tonic-gate 30437c478bd9Sstevel@tonic-gate const char * 30447c478bd9Sstevel@tonic-gate cpuid_getvendorstr(cpu_t *cpu) 30457c478bd9Sstevel@tonic-gate { 30467c478bd9Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1)); 30477c478bd9Sstevel@tonic-gate return ((const char *)cpu->cpu_m.mcpu_cpi->cpi_vendorstr); 30487c478bd9Sstevel@tonic-gate } 30497c478bd9Sstevel@tonic-gate 30507c478bd9Sstevel@tonic-gate uint_t 30517c478bd9Sstevel@tonic-gate cpuid_getvendor(cpu_t *cpu) 30527c478bd9Sstevel@tonic-gate { 30537c478bd9Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1)); 30547c478bd9Sstevel@tonic-gate return (cpu->cpu_m.mcpu_cpi->cpi_vendor); 30557c478bd9Sstevel@tonic-gate } 30567c478bd9Sstevel@tonic-gate 30577c478bd9Sstevel@tonic-gate uint_t 30587c478bd9Sstevel@tonic-gate cpuid_getfamily(cpu_t *cpu) 30597c478bd9Sstevel@tonic-gate { 30607c478bd9Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1)); 30617c478bd9Sstevel@tonic-gate return (cpu->cpu_m.mcpu_cpi->cpi_family); 30627c478bd9Sstevel@tonic-gate } 30637c478bd9Sstevel@tonic-gate 30647c478bd9Sstevel@tonic-gate uint_t 30657c478bd9Sstevel@tonic-gate cpuid_getmodel(cpu_t *cpu) 30667c478bd9Sstevel@tonic-gate { 30677c478bd9Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1)); 30687c478bd9Sstevel@tonic-gate return (cpu->cpu_m.mcpu_cpi->cpi_model); 30697c478bd9Sstevel@tonic-gate } 30707c478bd9Sstevel@tonic-gate 30717c478bd9Sstevel@tonic-gate uint_t 30727c478bd9Sstevel@tonic-gate cpuid_get_ncpu_per_chip(cpu_t *cpu) 30737c478bd9Sstevel@tonic-gate { 30747c478bd9Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1)); 30757c478bd9Sstevel@tonic-gate return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_per_chip); 30767c478bd9Sstevel@tonic-gate } 30777c478bd9Sstevel@tonic-gate 30787c478bd9Sstevel@tonic-gate uint_t 30798949bcd6Sandrei cpuid_get_ncore_per_chip(cpu_t *cpu) 30808949bcd6Sandrei { 30818949bcd6Sandrei ASSERT(cpuid_checkpass(cpu, 1)); 30828949bcd6Sandrei return (cpu->cpu_m.mcpu_cpi->cpi_ncore_per_chip); 30838949bcd6Sandrei } 30848949bcd6Sandrei 30858949bcd6Sandrei uint_t 3086d129bde2Sesaxe cpuid_get_ncpu_sharing_last_cache(cpu_t *cpu) 3087d129bde2Sesaxe { 3088d129bde2Sesaxe ASSERT(cpuid_checkpass(cpu, 2)); 3089d129bde2Sesaxe return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_shr_last_cache); 3090d129bde2Sesaxe } 3091d129bde2Sesaxe 3092d129bde2Sesaxe id_t 3093d129bde2Sesaxe cpuid_get_last_lvl_cacheid(cpu_t *cpu) 3094d129bde2Sesaxe { 3095d129bde2Sesaxe ASSERT(cpuid_checkpass(cpu, 2)); 3096d129bde2Sesaxe return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid); 3097d129bde2Sesaxe } 3098d129bde2Sesaxe 3099d129bde2Sesaxe uint_t 31007c478bd9Sstevel@tonic-gate cpuid_getstep(cpu_t *cpu) 31017c478bd9Sstevel@tonic-gate { 31027c478bd9Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1)); 31037c478bd9Sstevel@tonic-gate return (cpu->cpu_m.mcpu_cpi->cpi_step); 31047c478bd9Sstevel@tonic-gate } 31057c478bd9Sstevel@tonic-gate 31062449e17fSsherrym uint_t 31072449e17fSsherrym cpuid_getsig(struct cpu *cpu) 31082449e17fSsherrym { 31092449e17fSsherrym ASSERT(cpuid_checkpass(cpu, 1)); 31102449e17fSsherrym return (cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_eax); 31112449e17fSsherrym } 31122449e17fSsherrym 31138a40a695Sgavinm uint32_t 31148a40a695Sgavinm cpuid_getchiprev(struct cpu *cpu) 31158a40a695Sgavinm { 31168a40a695Sgavinm ASSERT(cpuid_checkpass(cpu, 1)); 31178a40a695Sgavinm return (cpu->cpu_m.mcpu_cpi->cpi_chiprev); 31188a40a695Sgavinm } 31198a40a695Sgavinm 31208a40a695Sgavinm const char * 31218a40a695Sgavinm cpuid_getchiprevstr(struct cpu *cpu) 31228a40a695Sgavinm { 31238a40a695Sgavinm ASSERT(cpuid_checkpass(cpu, 1)); 31248a40a695Sgavinm return (cpu->cpu_m.mcpu_cpi->cpi_chiprevstr); 31258a40a695Sgavinm } 31268a40a695Sgavinm 31278a40a695Sgavinm uint32_t 31288a40a695Sgavinm cpuid_getsockettype(struct cpu *cpu) 31298a40a695Sgavinm { 31308a40a695Sgavinm ASSERT(cpuid_checkpass(cpu, 1)); 31318a40a695Sgavinm return (cpu->cpu_m.mcpu_cpi->cpi_socket); 31328a40a695Sgavinm } 31338a40a695Sgavinm 313489e921d5SKuriakose Kuruvilla const char * 313589e921d5SKuriakose Kuruvilla cpuid_getsocketstr(cpu_t *cpu) 313689e921d5SKuriakose Kuruvilla { 313789e921d5SKuriakose Kuruvilla static const char *socketstr = NULL; 313889e921d5SKuriakose Kuruvilla struct cpuid_info *cpi; 313989e921d5SKuriakose Kuruvilla 314089e921d5SKuriakose Kuruvilla ASSERT(cpuid_checkpass(cpu, 1)); 314189e921d5SKuriakose Kuruvilla cpi = cpu->cpu_m.mcpu_cpi; 314289e921d5SKuriakose Kuruvilla 314389e921d5SKuriakose Kuruvilla /* Assume that socket types are the same across the system */ 314489e921d5SKuriakose Kuruvilla if (socketstr == NULL) 314589e921d5SKuriakose Kuruvilla socketstr = _cpuid_sktstr(cpi->cpi_vendor, cpi->cpi_family, 314689e921d5SKuriakose Kuruvilla cpi->cpi_model, cpi->cpi_step); 314789e921d5SKuriakose Kuruvilla 314889e921d5SKuriakose Kuruvilla 314989e921d5SKuriakose Kuruvilla return (socketstr); 315089e921d5SKuriakose Kuruvilla } 315189e921d5SKuriakose Kuruvilla 3152fb2f18f8Sesaxe int 3153fb2f18f8Sesaxe cpuid_get_chipid(cpu_t *cpu) 31547c478bd9Sstevel@tonic-gate { 31557c478bd9Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1)); 31567c478bd9Sstevel@tonic-gate 31578949bcd6Sandrei if (cpuid_is_cmt(cpu)) 31587c478bd9Sstevel@tonic-gate return (cpu->cpu_m.mcpu_cpi->cpi_chipid); 31597c478bd9Sstevel@tonic-gate return (cpu->cpu_id); 31607c478bd9Sstevel@tonic-gate } 31617c478bd9Sstevel@tonic-gate 31628949bcd6Sandrei id_t 3163fb2f18f8Sesaxe cpuid_get_coreid(cpu_t *cpu) 31648949bcd6Sandrei { 31658949bcd6Sandrei ASSERT(cpuid_checkpass(cpu, 1)); 31668949bcd6Sandrei return (cpu->cpu_m.mcpu_cpi->cpi_coreid); 31678949bcd6Sandrei } 31688949bcd6Sandrei 31697c478bd9Sstevel@tonic-gate int 317010569901Sgavinm cpuid_get_pkgcoreid(cpu_t *cpu) 317110569901Sgavinm { 317210569901Sgavinm ASSERT(cpuid_checkpass(cpu, 1)); 317310569901Sgavinm return (cpu->cpu_m.mcpu_cpi->cpi_pkgcoreid); 317410569901Sgavinm } 317510569901Sgavinm 317610569901Sgavinm int 3177fb2f18f8Sesaxe cpuid_get_clogid(cpu_t *cpu) 31787c478bd9Sstevel@tonic-gate { 31797c478bd9Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1)); 31807c478bd9Sstevel@tonic-gate return (cpu->cpu_m.mcpu_cpi->cpi_clogid); 31817c478bd9Sstevel@tonic-gate } 31827c478bd9Sstevel@tonic-gate 3183b885580bSAlexander Kolbasov int 3184b885580bSAlexander Kolbasov cpuid_get_cacheid(cpu_t *cpu) 3185b885580bSAlexander Kolbasov { 3186b885580bSAlexander Kolbasov ASSERT(cpuid_checkpass(cpu, 1)); 3187b885580bSAlexander Kolbasov return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid); 3188b885580bSAlexander Kolbasov } 3189b885580bSAlexander Kolbasov 31908031591dSSrihari Venkatesan uint_t 31918031591dSSrihari Venkatesan cpuid_get_procnodeid(cpu_t *cpu) 31928031591dSSrihari Venkatesan { 31938031591dSSrihari Venkatesan ASSERT(cpuid_checkpass(cpu, 1)); 31948031591dSSrihari Venkatesan return (cpu->cpu_m.mcpu_cpi->cpi_procnodeid); 31958031591dSSrihari Venkatesan } 31968031591dSSrihari Venkatesan 31978031591dSSrihari Venkatesan uint_t 31988031591dSSrihari Venkatesan cpuid_get_procnodes_per_pkg(cpu_t *cpu) 31998031591dSSrihari Venkatesan { 32008031591dSSrihari Venkatesan ASSERT(cpuid_checkpass(cpu, 1)); 32018031591dSSrihari Venkatesan return (cpu->cpu_m.mcpu_cpi->cpi_procnodes_per_pkg); 32028031591dSSrihari Venkatesan } 32038031591dSSrihari Venkatesan 32047660e73fSHans Rosenfeld uint_t 32057660e73fSHans Rosenfeld cpuid_get_compunitid(cpu_t *cpu) 32067660e73fSHans Rosenfeld { 32077660e73fSHans Rosenfeld ASSERT(cpuid_checkpass(cpu, 1)); 32087660e73fSHans Rosenfeld return (cpu->cpu_m.mcpu_cpi->cpi_compunitid); 32097660e73fSHans Rosenfeld } 32107660e73fSHans Rosenfeld 32117660e73fSHans Rosenfeld uint_t 32127660e73fSHans Rosenfeld cpuid_get_cores_per_compunit(cpu_t *cpu) 32137660e73fSHans Rosenfeld { 32147660e73fSHans Rosenfeld ASSERT(cpuid_checkpass(cpu, 1)); 32157660e73fSHans Rosenfeld return (cpu->cpu_m.mcpu_cpi->cpi_cores_per_compunit); 32167660e73fSHans Rosenfeld } 32177660e73fSHans Rosenfeld 32182ef50f01SJoe Bonasera /*ARGSUSED*/ 32192ef50f01SJoe Bonasera int 32202ef50f01SJoe Bonasera cpuid_have_cr8access(cpu_t *cpu) 32212ef50f01SJoe Bonasera { 32222ef50f01SJoe Bonasera #if defined(__amd64) 32232ef50f01SJoe Bonasera return (1); 32242ef50f01SJoe Bonasera #else 32252ef50f01SJoe Bonasera struct cpuid_info *cpi; 32262ef50f01SJoe Bonasera 32272ef50f01SJoe Bonasera ASSERT(cpu != NULL); 32282ef50f01SJoe Bonasera cpi = cpu->cpu_m.mcpu_cpi; 32292ef50f01SJoe Bonasera if (cpi->cpi_vendor == X86_VENDOR_AMD && cpi->cpi_maxeax >= 1 && 32302ef50f01SJoe Bonasera (CPI_FEATURES_XTD_ECX(cpi) & CPUID_AMD_ECX_CR8D) != 0) 32312ef50f01SJoe Bonasera return (1); 32322ef50f01SJoe Bonasera return (0); 32332ef50f01SJoe Bonasera #endif 32342ef50f01SJoe Bonasera } 32352ef50f01SJoe Bonasera 3236fa96bd91SMichael Corcoran uint32_t 3237fa96bd91SMichael Corcoran cpuid_get_apicid(cpu_t *cpu) 3238fa96bd91SMichael Corcoran { 3239fa96bd91SMichael Corcoran ASSERT(cpuid_checkpass(cpu, 1)); 3240fa96bd91SMichael Corcoran if (cpu->cpu_m.mcpu_cpi->cpi_maxeax < 1) { 3241fa96bd91SMichael Corcoran return (UINT32_MAX); 3242fa96bd91SMichael Corcoran } else { 3243fa96bd91SMichael Corcoran return (cpu->cpu_m.mcpu_cpi->cpi_apicid); 3244fa96bd91SMichael Corcoran } 3245fa96bd91SMichael Corcoran } 3246fa96bd91SMichael Corcoran 32477c478bd9Sstevel@tonic-gate void 32487c478bd9Sstevel@tonic-gate cpuid_get_addrsize(cpu_t *cpu, uint_t *pabits, uint_t *vabits) 32497c478bd9Sstevel@tonic-gate { 32507c478bd9Sstevel@tonic-gate struct cpuid_info *cpi; 32517c478bd9Sstevel@tonic-gate 32527c478bd9Sstevel@tonic-gate if (cpu == NULL) 32537c478bd9Sstevel@tonic-gate cpu = CPU; 32547c478bd9Sstevel@tonic-gate cpi = cpu->cpu_m.mcpu_cpi; 32557c478bd9Sstevel@tonic-gate 32567c478bd9Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1)); 32577c478bd9Sstevel@tonic-gate 32587c478bd9Sstevel@tonic-gate if (pabits) 32597c478bd9Sstevel@tonic-gate *pabits = cpi->cpi_pabits; 32607c478bd9Sstevel@tonic-gate if (vabits) 32617c478bd9Sstevel@tonic-gate *vabits = cpi->cpi_vabits; 32627c478bd9Sstevel@tonic-gate } 32637c478bd9Sstevel@tonic-gate 32647c478bd9Sstevel@tonic-gate /* 32657c478bd9Sstevel@tonic-gate * Returns the number of data TLB entries for a corresponding 32667c478bd9Sstevel@tonic-gate * pagesize. If it can't be computed, or isn't known, the 32677c478bd9Sstevel@tonic-gate * routine returns zero. If you ask about an architecturally 32687c478bd9Sstevel@tonic-gate * impossible pagesize, the routine will panic (so that the 32697c478bd9Sstevel@tonic-gate * hat implementor knows that things are inconsistent.) 32707c478bd9Sstevel@tonic-gate */ 32717c478bd9Sstevel@tonic-gate uint_t 32727c478bd9Sstevel@tonic-gate cpuid_get_dtlb_nent(cpu_t *cpu, size_t pagesize) 32737c478bd9Sstevel@tonic-gate { 32747c478bd9Sstevel@tonic-gate struct cpuid_info *cpi; 32757c478bd9Sstevel@tonic-gate uint_t dtlb_nent = 0; 32767c478bd9Sstevel@tonic-gate 32777c478bd9Sstevel@tonic-gate if (cpu == NULL) 32787c478bd9Sstevel@tonic-gate cpu = CPU; 32797c478bd9Sstevel@tonic-gate cpi = cpu->cpu_m.mcpu_cpi; 32807c478bd9Sstevel@tonic-gate 32817c478bd9Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1)); 32827c478bd9Sstevel@tonic-gate 32837c478bd9Sstevel@tonic-gate /* 32847c478bd9Sstevel@tonic-gate * Check the L2 TLB info 32857c478bd9Sstevel@tonic-gate */ 32867c478bd9Sstevel@tonic-gate if (cpi->cpi_xmaxeax >= 0x80000006) { 32878949bcd6Sandrei struct cpuid_regs *cp = &cpi->cpi_extd[6]; 32887c478bd9Sstevel@tonic-gate 32897c478bd9Sstevel@tonic-gate switch (pagesize) { 32907c478bd9Sstevel@tonic-gate 32917c478bd9Sstevel@tonic-gate case 4 * 1024: 32927c478bd9Sstevel@tonic-gate /* 32937c478bd9Sstevel@tonic-gate * All zero in the top 16 bits of the register 32947c478bd9Sstevel@tonic-gate * indicates a unified TLB. Size is in low 16 bits. 32957c478bd9Sstevel@tonic-gate */ 32967c478bd9Sstevel@tonic-gate if ((cp->cp_ebx & 0xffff0000) == 0) 32977c478bd9Sstevel@tonic-gate dtlb_nent = cp->cp_ebx & 0x0000ffff; 32987c478bd9Sstevel@tonic-gate else 32997c478bd9Sstevel@tonic-gate dtlb_nent = BITX(cp->cp_ebx, 27, 16); 33007c478bd9Sstevel@tonic-gate break; 33017c478bd9Sstevel@tonic-gate 33027c478bd9Sstevel@tonic-gate case 2 * 1024 * 1024: 33037c478bd9Sstevel@tonic-gate if ((cp->cp_eax & 0xffff0000) == 0) 33047c478bd9Sstevel@tonic-gate dtlb_nent = cp->cp_eax & 0x0000ffff; 33057c478bd9Sstevel@tonic-gate else 33067c478bd9Sstevel@tonic-gate dtlb_nent = BITX(cp->cp_eax, 27, 16); 33077c478bd9Sstevel@tonic-gate break; 33087c478bd9Sstevel@tonic-gate 33097c478bd9Sstevel@tonic-gate default: 33107c478bd9Sstevel@tonic-gate panic("unknown L2 pagesize"); 33117c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 33127c478bd9Sstevel@tonic-gate } 33137c478bd9Sstevel@tonic-gate } 33147c478bd9Sstevel@tonic-gate 33157c478bd9Sstevel@tonic-gate if (dtlb_nent != 0) 33167c478bd9Sstevel@tonic-gate return (dtlb_nent); 33177c478bd9Sstevel@tonic-gate 33187c478bd9Sstevel@tonic-gate /* 33197c478bd9Sstevel@tonic-gate * No L2 TLB support for this size, try L1. 33207c478bd9Sstevel@tonic-gate */ 33217c478bd9Sstevel@tonic-gate if (cpi->cpi_xmaxeax >= 0x80000005) { 33228949bcd6Sandrei struct cpuid_regs *cp = &cpi->cpi_extd[5]; 33237c478bd9Sstevel@tonic-gate 33247c478bd9Sstevel@tonic-gate switch (pagesize) { 33257c478bd9Sstevel@tonic-gate case 4 * 1024: 33267c478bd9Sstevel@tonic-gate dtlb_nent = BITX(cp->cp_ebx, 23, 16); 33277c478bd9Sstevel@tonic-gate break; 33287c478bd9Sstevel@tonic-gate case 2 * 1024 * 1024: 33297c478bd9Sstevel@tonic-gate dtlb_nent = BITX(cp->cp_eax, 23, 16); 33307c478bd9Sstevel@tonic-gate break; 33317c478bd9Sstevel@tonic-gate default: 33327c478bd9Sstevel@tonic-gate panic("unknown L1 d-TLB pagesize"); 33337c478bd9Sstevel@tonic-gate /*NOTREACHED*/ 33347c478bd9Sstevel@tonic-gate } 33357c478bd9Sstevel@tonic-gate } 33367c478bd9Sstevel@tonic-gate 33377c478bd9Sstevel@tonic-gate return (dtlb_nent); 33387c478bd9Sstevel@tonic-gate } 33397c478bd9Sstevel@tonic-gate 33407c478bd9Sstevel@tonic-gate /* 33417c478bd9Sstevel@tonic-gate * Return 0 if the erratum is not present or not applicable, positive 33427c478bd9Sstevel@tonic-gate * if it is, and negative if the status of the erratum is unknown. 33437c478bd9Sstevel@tonic-gate * 33447c478bd9Sstevel@tonic-gate * See "Revision Guide for AMD Athlon(tm) 64 and AMD Opteron(tm) 33452201b277Skucharsk * Processors" #25759, Rev 3.57, August 2005 33467c478bd9Sstevel@tonic-gate */ 33477c478bd9Sstevel@tonic-gate int 33487c478bd9Sstevel@tonic-gate cpuid_opteron_erratum(cpu_t *cpu, uint_t erratum) 33497c478bd9Sstevel@tonic-gate { 33507c478bd9Sstevel@tonic-gate struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 33518949bcd6Sandrei uint_t eax; 33527c478bd9Sstevel@tonic-gate 3353ea99987eSsethg /* 3354ea99987eSsethg * Bail out if this CPU isn't an AMD CPU, or if it's 3355ea99987eSsethg * a legacy (32-bit) AMD CPU. 3356ea99987eSsethg */ 3357ea99987eSsethg if (cpi->cpi_vendor != X86_VENDOR_AMD || 3358875b116eSkchow cpi->cpi_family == 4 || cpi->cpi_family == 5 || 3359875b116eSkchow cpi->cpi_family == 6) 33608a40a695Sgavinm 33617c478bd9Sstevel@tonic-gate return (0); 33627c478bd9Sstevel@tonic-gate 33637c478bd9Sstevel@tonic-gate eax = cpi->cpi_std[1].cp_eax; 33647c478bd9Sstevel@tonic-gate 33657c478bd9Sstevel@tonic-gate #define SH_B0(eax) (eax == 0xf40 || eax == 0xf50) 33667c478bd9Sstevel@tonic-gate #define SH_B3(eax) (eax == 0xf51) 3367ee88d2b9Skchow #define B(eax) (SH_B0(eax) || SH_B3(eax)) 33687c478bd9Sstevel@tonic-gate 33697c478bd9Sstevel@tonic-gate #define SH_C0(eax) (eax == 0xf48 || eax == 0xf58) 33707c478bd9Sstevel@tonic-gate 33717c478bd9Sstevel@tonic-gate #define SH_CG(eax) (eax == 0xf4a || eax == 0xf5a || eax == 0xf7a) 33727c478bd9Sstevel@tonic-gate #define DH_CG(eax) (eax == 0xfc0 || eax == 0xfe0 || eax == 0xff0) 33737c478bd9Sstevel@tonic-gate #define CH_CG(eax) (eax == 0xf82 || eax == 0xfb2) 3374ee88d2b9Skchow #define CG(eax) (SH_CG(eax) || DH_CG(eax) || CH_CG(eax)) 33757c478bd9Sstevel@tonic-gate 33767c478bd9Sstevel@tonic-gate #define SH_D0(eax) (eax == 0x10f40 || eax == 0x10f50 || eax == 0x10f70) 33777c478bd9Sstevel@tonic-gate #define DH_D0(eax) (eax == 0x10fc0 || eax == 0x10ff0) 33787c478bd9Sstevel@tonic-gate #define CH_D0(eax) (eax == 0x10f80 || eax == 0x10fb0) 3379ee88d2b9Skchow #define D0(eax) (SH_D0(eax) || DH_D0(eax) || CH_D0(eax)) 33807c478bd9Sstevel@tonic-gate 33817c478bd9Sstevel@tonic-gate #define SH_E0(eax) (eax == 0x20f50 || eax == 0x20f40 || eax == 0x20f70) 33827c478bd9Sstevel@tonic-gate #define JH_E1(eax) (eax == 0x20f10) /* JH8_E0 had 0x20f30 */ 33837c478bd9Sstevel@tonic-gate #define DH_E3(eax) (eax == 0x20fc0 || eax == 0x20ff0) 33847c478bd9Sstevel@tonic-gate #define SH_E4(eax) (eax == 0x20f51 || eax == 0x20f71) 33857c478bd9Sstevel@tonic-gate #define BH_E4(eax) (eax == 0x20fb1) 33867c478bd9Sstevel@tonic-gate #define SH_E5(eax) (eax == 0x20f42) 33877c478bd9Sstevel@tonic-gate #define DH_E6(eax) (eax == 0x20ff2 || eax == 0x20fc2) 33887c478bd9Sstevel@tonic-gate #define JH_E6(eax) (eax == 0x20f12 || eax == 0x20f32) 3389ee88d2b9Skchow #define EX(eax) (SH_E0(eax) || JH_E1(eax) || DH_E3(eax) || \ 3390ee88d2b9Skchow SH_E4(eax) || BH_E4(eax) || SH_E5(eax) || \ 3391ee88d2b9Skchow DH_E6(eax) || JH_E6(eax)) 33927c478bd9Sstevel@tonic-gate 3393512cf780Skchow #define DR_AX(eax) (eax == 0x100f00 || eax == 0x100f01 || eax == 0x100f02) 3394512cf780Skchow #define DR_B0(eax) (eax == 0x100f20) 3395512cf780Skchow #define DR_B1(eax) (eax == 0x100f21) 3396512cf780Skchow #define DR_BA(eax) (eax == 0x100f2a) 3397512cf780Skchow #define DR_B2(eax) (eax == 0x100f22) 3398512cf780Skchow #define DR_B3(eax) (eax == 0x100f23) 3399512cf780Skchow #define RB_C0(eax) (eax == 0x100f40) 3400512cf780Skchow 34017c478bd9Sstevel@tonic-gate switch (erratum) { 34027c478bd9Sstevel@tonic-gate case 1: 3403875b116eSkchow return (cpi->cpi_family < 0x10); 34047c478bd9Sstevel@tonic-gate case 51: /* what does the asterisk mean? */ 34057c478bd9Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax)); 34067c478bd9Sstevel@tonic-gate case 52: 34077c478bd9Sstevel@tonic-gate return (B(eax)); 34087c478bd9Sstevel@tonic-gate case 57: 3409512cf780Skchow return (cpi->cpi_family <= 0x11); 34107c478bd9Sstevel@tonic-gate case 58: 34117c478bd9Sstevel@tonic-gate return (B(eax)); 34127c478bd9Sstevel@tonic-gate case 60: 3413512cf780Skchow return (cpi->cpi_family <= 0x11); 34147c478bd9Sstevel@tonic-gate case 61: 34157c478bd9Sstevel@tonic-gate case 62: 34167c478bd9Sstevel@tonic-gate case 63: 34177c478bd9Sstevel@tonic-gate case 64: 34187c478bd9Sstevel@tonic-gate case 65: 34197c478bd9Sstevel@tonic-gate case 66: 34207c478bd9Sstevel@tonic-gate case 68: 34217c478bd9Sstevel@tonic-gate case 69: 34227c478bd9Sstevel@tonic-gate case 70: 34237c478bd9Sstevel@tonic-gate case 71: 34247c478bd9Sstevel@tonic-gate return (B(eax)); 34257c478bd9Sstevel@tonic-gate case 72: 34267c478bd9Sstevel@tonic-gate return (SH_B0(eax)); 34277c478bd9Sstevel@tonic-gate case 74: 34287c478bd9Sstevel@tonic-gate return (B(eax)); 34297c478bd9Sstevel@tonic-gate case 75: 3430875b116eSkchow return (cpi->cpi_family < 0x10); 34317c478bd9Sstevel@tonic-gate case 76: 34327c478bd9Sstevel@tonic-gate return (B(eax)); 34337c478bd9Sstevel@tonic-gate case 77: 3434512cf780Skchow return (cpi->cpi_family <= 0x11); 34357c478bd9Sstevel@tonic-gate case 78: 34367c478bd9Sstevel@tonic-gate return (B(eax) || SH_C0(eax)); 34377c478bd9Sstevel@tonic-gate case 79: 34387c478bd9Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax)); 34397c478bd9Sstevel@tonic-gate case 80: 34407c478bd9Sstevel@tonic-gate case 81: 34417c478bd9Sstevel@tonic-gate case 82: 34427c478bd9Sstevel@tonic-gate return (B(eax)); 34437c478bd9Sstevel@tonic-gate case 83: 34447c478bd9Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax)); 34457c478bd9Sstevel@tonic-gate case 85: 3446875b116eSkchow return (cpi->cpi_family < 0x10); 34477c478bd9Sstevel@tonic-gate case 86: 34487c478bd9Sstevel@tonic-gate return (SH_C0(eax) || CG(eax)); 34497c478bd9Sstevel@tonic-gate case 88: 34507c478bd9Sstevel@tonic-gate #if !defined(__amd64) 34517c478bd9Sstevel@tonic-gate return (0); 34527c478bd9Sstevel@tonic-gate #else 34537c478bd9Sstevel@tonic-gate return (B(eax) || SH_C0(eax)); 34547c478bd9Sstevel@tonic-gate #endif 34557c478bd9Sstevel@tonic-gate case 89: 3456875b116eSkchow return (cpi->cpi_family < 0x10); 34577c478bd9Sstevel@tonic-gate case 90: 34587c478bd9Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax)); 34597c478bd9Sstevel@tonic-gate case 91: 34607c478bd9Sstevel@tonic-gate case 92: 34617c478bd9Sstevel@tonic-gate return (B(eax) || SH_C0(eax)); 34627c478bd9Sstevel@tonic-gate case 93: 34637c478bd9Sstevel@tonic-gate return (SH_C0(eax)); 34647c478bd9Sstevel@tonic-gate case 94: 34657c478bd9Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax)); 34667c478bd9Sstevel@tonic-gate case 95: 34677c478bd9Sstevel@tonic-gate #if !defined(__amd64) 34687c478bd9Sstevel@tonic-gate return (0); 34697c478bd9Sstevel@tonic-gate #else 34707c478bd9Sstevel@tonic-gate return (B(eax) || SH_C0(eax)); 34717c478bd9Sstevel@tonic-gate #endif 34727c478bd9Sstevel@tonic-gate case 96: 34737c478bd9Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax)); 34747c478bd9Sstevel@tonic-gate case 97: 34757c478bd9Sstevel@tonic-gate case 98: 34767c478bd9Sstevel@tonic-gate return (SH_C0(eax) || CG(eax)); 34777c478bd9Sstevel@tonic-gate case 99: 34787c478bd9Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax)); 34797c478bd9Sstevel@tonic-gate case 100: 34807c478bd9Sstevel@tonic-gate return (B(eax) || SH_C0(eax)); 34817c478bd9Sstevel@tonic-gate case 101: 34827c478bd9Sstevel@tonic-gate case 103: 34837c478bd9Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax)); 34847c478bd9Sstevel@tonic-gate case 104: 34857c478bd9Sstevel@tonic-gate return (SH_C0(eax) || CG(eax) || D0(eax)); 34867c478bd9Sstevel@tonic-gate case 105: 34877c478bd9Sstevel@tonic-gate case 106: 34887c478bd9Sstevel@tonic-gate case 107: 34897c478bd9Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax)); 34907c478bd9Sstevel@tonic-gate case 108: 34917c478bd9Sstevel@tonic-gate return (DH_CG(eax)); 34927c478bd9Sstevel@tonic-gate case 109: 34937c478bd9Sstevel@tonic-gate return (SH_C0(eax) || CG(eax) || D0(eax)); 34947c478bd9Sstevel@tonic-gate case 110: 34957c478bd9Sstevel@tonic-gate return (D0(eax) || EX(eax)); 34967c478bd9Sstevel@tonic-gate case 111: 34977c478bd9Sstevel@tonic-gate return (CG(eax)); 34987c478bd9Sstevel@tonic-gate case 112: 34997c478bd9Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax)); 35007c478bd9Sstevel@tonic-gate case 113: 35017c478bd9Sstevel@tonic-gate return (eax == 0x20fc0); 35027c478bd9Sstevel@tonic-gate case 114: 35037c478bd9Sstevel@tonic-gate return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax)); 35047c478bd9Sstevel@tonic-gate case 115: 35057c478bd9Sstevel@tonic-gate return (SH_E0(eax) || JH_E1(eax)); 35067c478bd9Sstevel@tonic-gate case 116: 35077c478bd9Sstevel@tonic-gate return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax)); 35087c478bd9Sstevel@tonic-gate case 117: 35097c478bd9Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax)); 35107c478bd9Sstevel@tonic-gate case 118: 35117c478bd9Sstevel@tonic-gate return (SH_E0(eax) || JH_E1(eax) || SH_E4(eax) || BH_E4(eax) || 35127c478bd9Sstevel@tonic-gate JH_E6(eax)); 35137c478bd9Sstevel@tonic-gate case 121: 35147c478bd9Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax)); 35157c478bd9Sstevel@tonic-gate case 122: 3516512cf780Skchow return (cpi->cpi_family < 0x10 || cpi->cpi_family == 0x11); 35177c478bd9Sstevel@tonic-gate case 123: 35187c478bd9Sstevel@tonic-gate return (JH_E1(eax) || BH_E4(eax) || JH_E6(eax)); 35192201b277Skucharsk case 131: 3520875b116eSkchow return (cpi->cpi_family < 0x10); 3521ef50d8c0Sesaxe case 6336786: 3522ef50d8c0Sesaxe /* 3523ef50d8c0Sesaxe * Test for AdvPowerMgmtInfo.TscPStateInvariant 3524875b116eSkchow * if this is a K8 family or newer processor 3525ef50d8c0Sesaxe */ 3526ef50d8c0Sesaxe if (CPI_FAMILY(cpi) == 0xf) { 35278949bcd6Sandrei struct cpuid_regs regs; 35288949bcd6Sandrei regs.cp_eax = 0x80000007; 35298949bcd6Sandrei (void) __cpuid_insn(®s); 35308949bcd6Sandrei return (!(regs.cp_edx & 0x100)); 3531ef50d8c0Sesaxe } 3532ef50d8c0Sesaxe return (0); 3533ee88d2b9Skchow case 6323525: 3534ee88d2b9Skchow return (((((eax >> 12) & 0xff00) + (eax & 0xf00)) | 3535ee88d2b9Skchow (((eax >> 4) & 0xf) | ((eax >> 12) & 0xf0))) < 0xf40); 3536ee88d2b9Skchow 3537512cf780Skchow case 6671130: 3538512cf780Skchow /* 3539512cf780Skchow * check for processors (pre-Shanghai) that do not provide 3540512cf780Skchow * optimal management of 1gb ptes in its tlb. 3541512cf780Skchow */ 3542512cf780Skchow return (cpi->cpi_family == 0x10 && cpi->cpi_model < 4); 3543512cf780Skchow 3544512cf780Skchow case 298: 3545512cf780Skchow return (DR_AX(eax) || DR_B0(eax) || DR_B1(eax) || DR_BA(eax) || 3546512cf780Skchow DR_B2(eax) || RB_C0(eax)); 3547512cf780Skchow 35485e54b56dSHans Rosenfeld case 721: 35495e54b56dSHans Rosenfeld #if defined(__amd64) 35505e54b56dSHans Rosenfeld return (cpi->cpi_family == 0x10 || cpi->cpi_family == 0x12); 35515e54b56dSHans Rosenfeld #else 35525e54b56dSHans Rosenfeld return (0); 35535e54b56dSHans Rosenfeld #endif 35545e54b56dSHans Rosenfeld 3555512cf780Skchow default: 3556512cf780Skchow return (-1); 3557512cf780Skchow 3558512cf780Skchow } 3559512cf780Skchow } 3560512cf780Skchow 3561512cf780Skchow /* 3562512cf780Skchow * Determine if specified erratum is present via OSVW (OS Visible Workaround). 3563512cf780Skchow * Return 1 if erratum is present, 0 if not present and -1 if indeterminate. 3564512cf780Skchow */ 3565512cf780Skchow int 3566512cf780Skchow osvw_opteron_erratum(cpu_t *cpu, uint_t erratum) 3567512cf780Skchow { 3568512cf780Skchow struct cpuid_info *cpi; 3569512cf780Skchow uint_t osvwid; 3570512cf780Skchow static int osvwfeature = -1; 3571512cf780Skchow uint64_t osvwlength; 3572512cf780Skchow 3573512cf780Skchow 3574512cf780Skchow cpi = cpu->cpu_m.mcpu_cpi; 3575512cf780Skchow 3576512cf780Skchow /* confirm OSVW supported */ 3577512cf780Skchow if (osvwfeature == -1) { 3578512cf780Skchow osvwfeature = cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW; 3579512cf780Skchow } else { 3580512cf780Skchow /* assert that osvw feature setting is consistent on all cpus */ 3581512cf780Skchow ASSERT(osvwfeature == 3582512cf780Skchow (cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW)); 3583512cf780Skchow } 3584512cf780Skchow if (!osvwfeature) 3585512cf780Skchow return (-1); 3586512cf780Skchow 3587512cf780Skchow osvwlength = rdmsr(MSR_AMD_OSVW_ID_LEN) & OSVW_ID_LEN_MASK; 3588512cf780Skchow 3589512cf780Skchow switch (erratum) { 3590512cf780Skchow case 298: /* osvwid is 0 */ 3591512cf780Skchow osvwid = 0; 3592512cf780Skchow if (osvwlength <= (uint64_t)osvwid) { 3593512cf780Skchow /* osvwid 0 is unknown */ 3594512cf780Skchow return (-1); 3595512cf780Skchow } 3596512cf780Skchow 3597512cf780Skchow /* 3598512cf780Skchow * Check the OSVW STATUS MSR to determine the state 3599512cf780Skchow * of the erratum where: 3600512cf780Skchow * 0 - fixed by HW 3601512cf780Skchow * 1 - BIOS has applied the workaround when BIOS 3602512cf780Skchow * workaround is available. (Or for other errata, 3603512cf780Skchow * OS workaround is required.) 3604512cf780Skchow * For a value of 1, caller will confirm that the 3605512cf780Skchow * erratum 298 workaround has indeed been applied by BIOS. 3606512cf780Skchow * 3607512cf780Skchow * A 1 may be set in cpus that have a HW fix 3608512cf780Skchow * in a mixed cpu system. Regarding erratum 298: 3609512cf780Skchow * In a multiprocessor platform, the workaround above 3610512cf780Skchow * should be applied to all processors regardless of 3611512cf780Skchow * silicon revision when an affected processor is 3612512cf780Skchow * present. 3613512cf780Skchow */ 3614512cf780Skchow 3615512cf780Skchow return (rdmsr(MSR_AMD_OSVW_STATUS + 3616512cf780Skchow (osvwid / OSVW_ID_CNT_PER_MSR)) & 3617512cf780Skchow (1ULL << (osvwid % OSVW_ID_CNT_PER_MSR))); 3618512cf780Skchow 36197c478bd9Sstevel@tonic-gate default: 36207c478bd9Sstevel@tonic-gate return (-1); 36217c478bd9Sstevel@tonic-gate } 36227c478bd9Sstevel@tonic-gate } 36237c478bd9Sstevel@tonic-gate 36247c478bd9Sstevel@tonic-gate static const char assoc_str[] = "associativity"; 36257c478bd9Sstevel@tonic-gate static const char line_str[] = "line-size"; 36267c478bd9Sstevel@tonic-gate static const char size_str[] = "size"; 36277c478bd9Sstevel@tonic-gate 36287c478bd9Sstevel@tonic-gate static void 36297c478bd9Sstevel@tonic-gate add_cache_prop(dev_info_t *devi, const char *label, const char *type, 36307c478bd9Sstevel@tonic-gate uint32_t val) 36317c478bd9Sstevel@tonic-gate { 36327c478bd9Sstevel@tonic-gate char buf[128]; 36337c478bd9Sstevel@tonic-gate 36347c478bd9Sstevel@tonic-gate /* 36357c478bd9Sstevel@tonic-gate * ndi_prop_update_int() is used because it is desirable for 36367c478bd9Sstevel@tonic-gate * DDI_PROP_HW_DEF and DDI_PROP_DONTSLEEP to be set. 36377c478bd9Sstevel@tonic-gate */ 36387c478bd9Sstevel@tonic-gate if (snprintf(buf, sizeof (buf), "%s-%s", label, type) < sizeof (buf)) 36397c478bd9Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, devi, buf, val); 36407c478bd9Sstevel@tonic-gate } 36417c478bd9Sstevel@tonic-gate 36427c478bd9Sstevel@tonic-gate /* 36437c478bd9Sstevel@tonic-gate * Intel-style cache/tlb description 36447c478bd9Sstevel@tonic-gate * 36457c478bd9Sstevel@tonic-gate * Standard cpuid level 2 gives a randomly ordered 36467c478bd9Sstevel@tonic-gate * selection of tags that index into a table that describes 36477c478bd9Sstevel@tonic-gate * cache and tlb properties. 36487c478bd9Sstevel@tonic-gate */ 36497c478bd9Sstevel@tonic-gate 36507c478bd9Sstevel@tonic-gate static const char l1_icache_str[] = "l1-icache"; 36517c478bd9Sstevel@tonic-gate static const char l1_dcache_str[] = "l1-dcache"; 36527c478bd9Sstevel@tonic-gate static const char l2_cache_str[] = "l2-cache"; 3653ae115bc7Smrj static const char l3_cache_str[] = "l3-cache"; 36547c478bd9Sstevel@tonic-gate static const char itlb4k_str[] = "itlb-4K"; 36557c478bd9Sstevel@tonic-gate static const char dtlb4k_str[] = "dtlb-4K"; 3656824e4fecSvd224797 static const char itlb2M_str[] = "itlb-2M"; 36577c478bd9Sstevel@tonic-gate static const char itlb4M_str[] = "itlb-4M"; 36587c478bd9Sstevel@tonic-gate static const char dtlb4M_str[] = "dtlb-4M"; 365925dfb062Sksadhukh static const char dtlb24_str[] = "dtlb0-2M-4M"; 36607c478bd9Sstevel@tonic-gate static const char itlb424_str[] = "itlb-4K-2M-4M"; 366125dfb062Sksadhukh static const char itlb24_str[] = "itlb-2M-4M"; 36627c478bd9Sstevel@tonic-gate static const char dtlb44_str[] = "dtlb-4K-4M"; 36637c478bd9Sstevel@tonic-gate static const char sl1_dcache_str[] = "sectored-l1-dcache"; 36647c478bd9Sstevel@tonic-gate static const char sl2_cache_str[] = "sectored-l2-cache"; 36657c478bd9Sstevel@tonic-gate static const char itrace_str[] = "itrace-cache"; 36667c478bd9Sstevel@tonic-gate static const char sl3_cache_str[] = "sectored-l3-cache"; 366725dfb062Sksadhukh static const char sh_l2_tlb4k_str[] = "shared-l2-tlb-4k"; 36687c478bd9Sstevel@tonic-gate 36697c478bd9Sstevel@tonic-gate static const struct cachetab { 36707c478bd9Sstevel@tonic-gate uint8_t ct_code; 36717c478bd9Sstevel@tonic-gate uint8_t ct_assoc; 36727c478bd9Sstevel@tonic-gate uint16_t ct_line_size; 36737c478bd9Sstevel@tonic-gate size_t ct_size; 36747c478bd9Sstevel@tonic-gate const char *ct_label; 36757c478bd9Sstevel@tonic-gate } intel_ctab[] = { 3676824e4fecSvd224797 /* 3677824e4fecSvd224797 * maintain descending order! 3678824e4fecSvd224797 * 3679824e4fecSvd224797 * Codes ignored - Reason 3680824e4fecSvd224797 * ---------------------- 3681824e4fecSvd224797 * 40H - intel_cpuid_4_cache_info() disambiguates l2/l3 cache 3682824e4fecSvd224797 * f0H/f1H - Currently we do not interpret prefetch size by design 3683824e4fecSvd224797 */ 368425dfb062Sksadhukh { 0xe4, 16, 64, 8*1024*1024, l3_cache_str}, 368525dfb062Sksadhukh { 0xe3, 16, 64, 4*1024*1024, l3_cache_str}, 368625dfb062Sksadhukh { 0xe2, 16, 64, 2*1024*1024, l3_cache_str}, 368725dfb062Sksadhukh { 0xde, 12, 64, 6*1024*1024, l3_cache_str}, 368825dfb062Sksadhukh { 0xdd, 12, 64, 3*1024*1024, l3_cache_str}, 368925dfb062Sksadhukh { 0xdc, 12, 64, ((1*1024*1024)+(512*1024)), l3_cache_str}, 369025dfb062Sksadhukh { 0xd8, 8, 64, 4*1024*1024, l3_cache_str}, 369125dfb062Sksadhukh { 0xd7, 8, 64, 2*1024*1024, l3_cache_str}, 369225dfb062Sksadhukh { 0xd6, 8, 64, 1*1024*1024, l3_cache_str}, 369325dfb062Sksadhukh { 0xd2, 4, 64, 2*1024*1024, l3_cache_str}, 369425dfb062Sksadhukh { 0xd1, 4, 64, 1*1024*1024, l3_cache_str}, 369525dfb062Sksadhukh { 0xd0, 4, 64, 512*1024, l3_cache_str}, 369625dfb062Sksadhukh { 0xca, 4, 0, 512, sh_l2_tlb4k_str}, 3697824e4fecSvd224797 { 0xc0, 4, 0, 8, dtlb44_str }, 3698824e4fecSvd224797 { 0xba, 4, 0, 64, dtlb4k_str }, 3699ae115bc7Smrj { 0xb4, 4, 0, 256, dtlb4k_str }, 37007c478bd9Sstevel@tonic-gate { 0xb3, 4, 0, 128, dtlb4k_str }, 370125dfb062Sksadhukh { 0xb2, 4, 0, 64, itlb4k_str }, 37027c478bd9Sstevel@tonic-gate { 0xb0, 4, 0, 128, itlb4k_str }, 37037c478bd9Sstevel@tonic-gate { 0x87, 8, 64, 1024*1024, l2_cache_str}, 37047c478bd9Sstevel@tonic-gate { 0x86, 4, 64, 512*1024, l2_cache_str}, 37057c478bd9Sstevel@tonic-gate { 0x85, 8, 32, 2*1024*1024, l2_cache_str}, 37067c478bd9Sstevel@tonic-gate { 0x84, 8, 32, 1024*1024, l2_cache_str}, 37077c478bd9Sstevel@tonic-gate { 0x83, 8, 32, 512*1024, l2_cache_str}, 37087c478bd9Sstevel@tonic-gate { 0x82, 8, 32, 256*1024, l2_cache_str}, 3709824e4fecSvd224797 { 0x80, 8, 64, 512*1024, l2_cache_str}, 37107c478bd9Sstevel@tonic-gate { 0x7f, 2, 64, 512*1024, l2_cache_str}, 37117c478bd9Sstevel@tonic-gate { 0x7d, 8, 64, 2*1024*1024, sl2_cache_str}, 37127c478bd9Sstevel@tonic-gate { 0x7c, 8, 64, 1024*1024, sl2_cache_str}, 37137c478bd9Sstevel@tonic-gate { 0x7b, 8, 64, 512*1024, sl2_cache_str}, 37147c478bd9Sstevel@tonic-gate { 0x7a, 8, 64, 256*1024, sl2_cache_str}, 37157c478bd9Sstevel@tonic-gate { 0x79, 8, 64, 128*1024, sl2_cache_str}, 37167c478bd9Sstevel@tonic-gate { 0x78, 8, 64, 1024*1024, l2_cache_str}, 3717ae115bc7Smrj { 0x73, 8, 0, 64*1024, itrace_str}, 37187c478bd9Sstevel@tonic-gate { 0x72, 8, 0, 32*1024, itrace_str}, 37197c478bd9Sstevel@tonic-gate { 0x71, 8, 0, 16*1024, itrace_str}, 37207c478bd9Sstevel@tonic-gate { 0x70, 8, 0, 12*1024, itrace_str}, 37217c478bd9Sstevel@tonic-gate { 0x68, 4, 64, 32*1024, sl1_dcache_str}, 37227c478bd9Sstevel@tonic-gate { 0x67, 4, 64, 16*1024, sl1_dcache_str}, 37237c478bd9Sstevel@tonic-gate { 0x66, 4, 64, 8*1024, sl1_dcache_str}, 37247c478bd9Sstevel@tonic-gate { 0x60, 8, 64, 16*1024, sl1_dcache_str}, 37257c478bd9Sstevel@tonic-gate { 0x5d, 0, 0, 256, dtlb44_str}, 37267c478bd9Sstevel@tonic-gate { 0x5c, 0, 0, 128, dtlb44_str}, 37277c478bd9Sstevel@tonic-gate { 0x5b, 0, 0, 64, dtlb44_str}, 372825dfb062Sksadhukh { 0x5a, 4, 0, 32, dtlb24_str}, 3729824e4fecSvd224797 { 0x59, 0, 0, 16, dtlb4k_str}, 3730824e4fecSvd224797 { 0x57, 4, 0, 16, dtlb4k_str}, 3731824e4fecSvd224797 { 0x56, 4, 0, 16, dtlb4M_str}, 373225dfb062Sksadhukh { 0x55, 0, 0, 7, itlb24_str}, 37337c478bd9Sstevel@tonic-gate { 0x52, 0, 0, 256, itlb424_str}, 37347c478bd9Sstevel@tonic-gate { 0x51, 0, 0, 128, itlb424_str}, 37357c478bd9Sstevel@tonic-gate { 0x50, 0, 0, 64, itlb424_str}, 3736824e4fecSvd224797 { 0x4f, 0, 0, 32, itlb4k_str}, 3737824e4fecSvd224797 { 0x4e, 24, 64, 6*1024*1024, l2_cache_str}, 3738ae115bc7Smrj { 0x4d, 16, 64, 16*1024*1024, l3_cache_str}, 3739ae115bc7Smrj { 0x4c, 12, 64, 12*1024*1024, l3_cache_str}, 3740ae115bc7Smrj { 0x4b, 16, 64, 8*1024*1024, l3_cache_str}, 3741ae115bc7Smrj { 0x4a, 12, 64, 6*1024*1024, l3_cache_str}, 3742ae115bc7Smrj { 0x49, 16, 64, 4*1024*1024, l3_cache_str}, 3743824e4fecSvd224797 { 0x48, 12, 64, 3*1024*1024, l2_cache_str}, 3744ae115bc7Smrj { 0x47, 8, 64, 8*1024*1024, l3_cache_str}, 3745ae115bc7Smrj { 0x46, 4, 64, 4*1024*1024, l3_cache_str}, 37467c478bd9Sstevel@tonic-gate { 0x45, 4, 32, 2*1024*1024, l2_cache_str}, 37477c478bd9Sstevel@tonic-gate { 0x44, 4, 32, 1024*1024, l2_cache_str}, 37487c478bd9Sstevel@tonic-gate { 0x43, 4, 32, 512*1024, l2_cache_str}, 37497c478bd9Sstevel@tonic-gate { 0x42, 4, 32, 256*1024, l2_cache_str}, 37507c478bd9Sstevel@tonic-gate { 0x41, 4, 32, 128*1024, l2_cache_str}, 3751ae115bc7Smrj { 0x3e, 4, 64, 512*1024, sl2_cache_str}, 3752ae115bc7Smrj { 0x3d, 6, 64, 384*1024, sl2_cache_str}, 37537c478bd9Sstevel@tonic-gate { 0x3c, 4, 64, 256*1024, sl2_cache_str}, 37547c478bd9Sstevel@tonic-gate { 0x3b, 2, 64, 128*1024, sl2_cache_str}, 3755ae115bc7Smrj { 0x3a, 6, 64, 192*1024, sl2_cache_str}, 37567c478bd9Sstevel@tonic-gate { 0x39, 4, 64, 128*1024, sl2_cache_str}, 37577c478bd9Sstevel@tonic-gate { 0x30, 8, 64, 32*1024, l1_icache_str}, 37587c478bd9Sstevel@tonic-gate { 0x2c, 8, 64, 32*1024, l1_dcache_str}, 37597c478bd9Sstevel@tonic-gate { 0x29, 8, 64, 4096*1024, sl3_cache_str}, 37607c478bd9Sstevel@tonic-gate { 0x25, 8, 64, 2048*1024, sl3_cache_str}, 37617c478bd9Sstevel@tonic-gate { 0x23, 8, 64, 1024*1024, sl3_cache_str}, 37627c478bd9Sstevel@tonic-gate { 0x22, 4, 64, 512*1024, sl3_cache_str}, 3763824e4fecSvd224797 { 0x0e, 6, 64, 24*1024, l1_dcache_str}, 376425dfb062Sksadhukh { 0x0d, 4, 32, 16*1024, l1_dcache_str}, 37657c478bd9Sstevel@tonic-gate { 0x0c, 4, 32, 16*1024, l1_dcache_str}, 3766ae115bc7Smrj { 0x0b, 4, 0, 4, itlb4M_str}, 37677c478bd9Sstevel@tonic-gate { 0x0a, 2, 32, 8*1024, l1_dcache_str}, 37687c478bd9Sstevel@tonic-gate { 0x08, 4, 32, 16*1024, l1_icache_str}, 37697c478bd9Sstevel@tonic-gate { 0x06, 4, 32, 8*1024, l1_icache_str}, 3770824e4fecSvd224797 { 0x05, 4, 0, 32, dtlb4M_str}, 37717c478bd9Sstevel@tonic-gate { 0x04, 4, 0, 8, dtlb4M_str}, 37727c478bd9Sstevel@tonic-gate { 0x03, 4, 0, 64, dtlb4k_str}, 37737c478bd9Sstevel@tonic-gate { 0x02, 4, 0, 2, itlb4M_str}, 37747c478bd9Sstevel@tonic-gate { 0x01, 4, 0, 32, itlb4k_str}, 37757c478bd9Sstevel@tonic-gate { 0 } 37767c478bd9Sstevel@tonic-gate }; 37777c478bd9Sstevel@tonic-gate 37787c478bd9Sstevel@tonic-gate static const struct cachetab cyrix_ctab[] = { 37797c478bd9Sstevel@tonic-gate { 0x70, 4, 0, 32, "tlb-4K" }, 37807c478bd9Sstevel@tonic-gate { 0x80, 4, 16, 16*1024, "l1-cache" }, 37817c478bd9Sstevel@tonic-gate { 0 } 37827c478bd9Sstevel@tonic-gate }; 37837c478bd9Sstevel@tonic-gate 37847c478bd9Sstevel@tonic-gate /* 37857c478bd9Sstevel@tonic-gate * Search a cache table for a matching entry 37867c478bd9Sstevel@tonic-gate */ 37877c478bd9Sstevel@tonic-gate static const struct cachetab * 37887c478bd9Sstevel@tonic-gate find_cacheent(const struct cachetab *ct, uint_t code) 37897c478bd9Sstevel@tonic-gate { 37907c478bd9Sstevel@tonic-gate if (code != 0) { 37917c478bd9Sstevel@tonic-gate for (; ct->ct_code != 0; ct++) 37927c478bd9Sstevel@tonic-gate if (ct->ct_code <= code) 37937c478bd9Sstevel@tonic-gate break; 37947c478bd9Sstevel@tonic-gate if (ct->ct_code == code) 37957c478bd9Sstevel@tonic-gate return (ct); 37967c478bd9Sstevel@tonic-gate } 37977c478bd9Sstevel@tonic-gate return (NULL); 37987c478bd9Sstevel@tonic-gate } 37997c478bd9Sstevel@tonic-gate 38007c478bd9Sstevel@tonic-gate /* 38017dee861bSksadhukh * Populate cachetab entry with L2 or L3 cache-information using 38027dee861bSksadhukh * cpuid function 4. This function is called from intel_walk_cacheinfo() 38037dee861bSksadhukh * when descriptor 0x49 is encountered. It returns 0 if no such cache 38047dee861bSksadhukh * information is found. 38057dee861bSksadhukh */ 38067dee861bSksadhukh static int 38077dee861bSksadhukh intel_cpuid_4_cache_info(struct cachetab *ct, struct cpuid_info *cpi) 38087dee861bSksadhukh { 38097dee861bSksadhukh uint32_t level, i; 38107dee861bSksadhukh int ret = 0; 38117dee861bSksadhukh 38127dee861bSksadhukh for (i = 0; i < cpi->cpi_std_4_size; i++) { 38137dee861bSksadhukh level = CPI_CACHE_LVL(cpi->cpi_std_4[i]); 38147dee861bSksadhukh 38157dee861bSksadhukh if (level == 2 || level == 3) { 38167dee861bSksadhukh ct->ct_assoc = CPI_CACHE_WAYS(cpi->cpi_std_4[i]) + 1; 38177dee861bSksadhukh ct->ct_line_size = 38187dee861bSksadhukh CPI_CACHE_COH_LN_SZ(cpi->cpi_std_4[i]) + 1; 38197dee861bSksadhukh ct->ct_size = ct->ct_assoc * 38207dee861bSksadhukh (CPI_CACHE_PARTS(cpi->cpi_std_4[i]) + 1) * 38217dee861bSksadhukh ct->ct_line_size * 38227dee861bSksadhukh (cpi->cpi_std_4[i]->cp_ecx + 1); 38237dee861bSksadhukh 38247dee861bSksadhukh if (level == 2) { 38257dee861bSksadhukh ct->ct_label = l2_cache_str; 38267dee861bSksadhukh } else if (level == 3) { 38277dee861bSksadhukh ct->ct_label = l3_cache_str; 38287dee861bSksadhukh } 38297dee861bSksadhukh ret = 1; 38307dee861bSksadhukh } 38317dee861bSksadhukh } 38327dee861bSksadhukh 38337dee861bSksadhukh return (ret); 38347dee861bSksadhukh } 38357dee861bSksadhukh 38367dee861bSksadhukh /* 38377c478bd9Sstevel@tonic-gate * Walk the cacheinfo descriptor, applying 'func' to every valid element 38387c478bd9Sstevel@tonic-gate * The walk is terminated if the walker returns non-zero. 38397c478bd9Sstevel@tonic-gate */ 38407c478bd9Sstevel@tonic-gate static void 38417c478bd9Sstevel@tonic-gate intel_walk_cacheinfo(struct cpuid_info *cpi, 38427c478bd9Sstevel@tonic-gate void *arg, int (*func)(void *, const struct cachetab *)) 38437c478bd9Sstevel@tonic-gate { 38447c478bd9Sstevel@tonic-gate const struct cachetab *ct; 3845824e4fecSvd224797 struct cachetab des_49_ct, des_b1_ct; 38467c478bd9Sstevel@tonic-gate uint8_t *dp; 38477c478bd9Sstevel@tonic-gate int i; 38487c478bd9Sstevel@tonic-gate 38497c478bd9Sstevel@tonic-gate if ((dp = cpi->cpi_cacheinfo) == NULL) 38507c478bd9Sstevel@tonic-gate return; 3851f1d742a9Sksadhukh for (i = 0; i < cpi->cpi_ncache; i++, dp++) { 3852f1d742a9Sksadhukh /* 3853f1d742a9Sksadhukh * For overloaded descriptor 0x49 we use cpuid function 4 38547dee861bSksadhukh * if supported by the current processor, to create 3855f1d742a9Sksadhukh * cache information. 3856824e4fecSvd224797 * For overloaded descriptor 0xb1 we use X86_PAE flag 3857824e4fecSvd224797 * to disambiguate the cache information. 3858f1d742a9Sksadhukh */ 38597dee861bSksadhukh if (*dp == 0x49 && cpi->cpi_maxeax >= 0x4 && 38607dee861bSksadhukh intel_cpuid_4_cache_info(&des_49_ct, cpi) == 1) { 38617dee861bSksadhukh ct = &des_49_ct; 3862824e4fecSvd224797 } else if (*dp == 0xb1) { 3863824e4fecSvd224797 des_b1_ct.ct_code = 0xb1; 3864824e4fecSvd224797 des_b1_ct.ct_assoc = 4; 3865824e4fecSvd224797 des_b1_ct.ct_line_size = 0; 38667417cfdeSKuriakose Kuruvilla if (is_x86_feature(x86_featureset, X86FSET_PAE)) { 3867824e4fecSvd224797 des_b1_ct.ct_size = 8; 3868824e4fecSvd224797 des_b1_ct.ct_label = itlb2M_str; 3869824e4fecSvd224797 } else { 3870824e4fecSvd224797 des_b1_ct.ct_size = 4; 3871824e4fecSvd224797 des_b1_ct.ct_label = itlb4M_str; 3872824e4fecSvd224797 } 3873824e4fecSvd224797 ct = &des_b1_ct; 38747dee861bSksadhukh } else { 38757dee861bSksadhukh if ((ct = find_cacheent(intel_ctab, *dp)) == NULL) { 3876f1d742a9Sksadhukh continue; 3877f1d742a9Sksadhukh } 38787dee861bSksadhukh } 3879f1d742a9Sksadhukh 38807dee861bSksadhukh if (func(arg, ct) != 0) { 38817c478bd9Sstevel@tonic-gate break; 38827c478bd9Sstevel@tonic-gate } 38837c478bd9Sstevel@tonic-gate } 3884f1d742a9Sksadhukh } 38857c478bd9Sstevel@tonic-gate 38867c478bd9Sstevel@tonic-gate /* 38877c478bd9Sstevel@tonic-gate * (Like the Intel one, except for Cyrix CPUs) 38887c478bd9Sstevel@tonic-gate */ 38897c478bd9Sstevel@tonic-gate static void 38907c478bd9Sstevel@tonic-gate cyrix_walk_cacheinfo(struct cpuid_info *cpi, 38917c478bd9Sstevel@tonic-gate void *arg, int (*func)(void *, const struct cachetab *)) 38927c478bd9Sstevel@tonic-gate { 38937c478bd9Sstevel@tonic-gate const struct cachetab *ct; 38947c478bd9Sstevel@tonic-gate uint8_t *dp; 38957c478bd9Sstevel@tonic-gate int i; 38967c478bd9Sstevel@tonic-gate 38977c478bd9Sstevel@tonic-gate if ((dp = cpi->cpi_cacheinfo) == NULL) 38987c478bd9Sstevel@tonic-gate return; 38997c478bd9Sstevel@tonic-gate for (i = 0; i < cpi->cpi_ncache; i++, dp++) { 39007c478bd9Sstevel@tonic-gate /* 39017c478bd9Sstevel@tonic-gate * Search Cyrix-specific descriptor table first .. 39027c478bd9Sstevel@tonic-gate */ 39037c478bd9Sstevel@tonic-gate if ((ct = find_cacheent(cyrix_ctab, *dp)) != NULL) { 39047c478bd9Sstevel@tonic-gate if (func(arg, ct) != 0) 39057c478bd9Sstevel@tonic-gate break; 39067c478bd9Sstevel@tonic-gate continue; 39077c478bd9Sstevel@tonic-gate } 39087c478bd9Sstevel@tonic-gate /* 39097c478bd9Sstevel@tonic-gate * .. else fall back to the Intel one 39107c478bd9Sstevel@tonic-gate */ 39117c478bd9Sstevel@tonic-gate if ((ct = find_cacheent(intel_ctab, *dp)) != NULL) { 39127c478bd9Sstevel@tonic-gate if (func(arg, ct) != 0) 39137c478bd9Sstevel@tonic-gate break; 39147c478bd9Sstevel@tonic-gate continue; 39157c478bd9Sstevel@tonic-gate } 39167c478bd9Sstevel@tonic-gate } 39177c478bd9Sstevel@tonic-gate } 39187c478bd9Sstevel@tonic-gate 39197c478bd9Sstevel@tonic-gate /* 39207c478bd9Sstevel@tonic-gate * A cacheinfo walker that adds associativity, line-size, and size properties 39217c478bd9Sstevel@tonic-gate * to the devinfo node it is passed as an argument. 39227c478bd9Sstevel@tonic-gate */ 39237c478bd9Sstevel@tonic-gate static int 39247c478bd9Sstevel@tonic-gate add_cacheent_props(void *arg, const struct cachetab *ct) 39257c478bd9Sstevel@tonic-gate { 39267c478bd9Sstevel@tonic-gate dev_info_t *devi = arg; 39277c478bd9Sstevel@tonic-gate 39287c478bd9Sstevel@tonic-gate add_cache_prop(devi, ct->ct_label, assoc_str, ct->ct_assoc); 39297c478bd9Sstevel@tonic-gate if (ct->ct_line_size != 0) 39307c478bd9Sstevel@tonic-gate add_cache_prop(devi, ct->ct_label, line_str, 39317c478bd9Sstevel@tonic-gate ct->ct_line_size); 39327c478bd9Sstevel@tonic-gate add_cache_prop(devi, ct->ct_label, size_str, ct->ct_size); 39337c478bd9Sstevel@tonic-gate return (0); 39347c478bd9Sstevel@tonic-gate } 39357c478bd9Sstevel@tonic-gate 3936f1d742a9Sksadhukh 39377c478bd9Sstevel@tonic-gate static const char fully_assoc[] = "fully-associative?"; 39387c478bd9Sstevel@tonic-gate 39397c478bd9Sstevel@tonic-gate /* 39407c478bd9Sstevel@tonic-gate * AMD style cache/tlb description 39417c478bd9Sstevel@tonic-gate * 39427c478bd9Sstevel@tonic-gate * Extended functions 5 and 6 directly describe properties of 39437c478bd9Sstevel@tonic-gate * tlbs and various cache levels. 39447c478bd9Sstevel@tonic-gate */ 39457c478bd9Sstevel@tonic-gate static void 39467c478bd9Sstevel@tonic-gate add_amd_assoc(dev_info_t *devi, const char *label, uint_t assoc) 39477c478bd9Sstevel@tonic-gate { 39487c478bd9Sstevel@tonic-gate switch (assoc) { 39497c478bd9Sstevel@tonic-gate case 0: /* reserved; ignore */ 39507c478bd9Sstevel@tonic-gate break; 39517c478bd9Sstevel@tonic-gate default: 39527c478bd9Sstevel@tonic-gate add_cache_prop(devi, label, assoc_str, assoc); 39537c478bd9Sstevel@tonic-gate break; 39547c478bd9Sstevel@tonic-gate case 0xff: 39557c478bd9Sstevel@tonic-gate add_cache_prop(devi, label, fully_assoc, 1); 39567c478bd9Sstevel@tonic-gate break; 39577c478bd9Sstevel@tonic-gate } 39587c478bd9Sstevel@tonic-gate } 39597c478bd9Sstevel@tonic-gate 39607c478bd9Sstevel@tonic-gate static void 39617c478bd9Sstevel@tonic-gate add_amd_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size) 39627c478bd9Sstevel@tonic-gate { 39637c478bd9Sstevel@tonic-gate if (size == 0) 39647c478bd9Sstevel@tonic-gate return; 39657c478bd9Sstevel@tonic-gate add_cache_prop(devi, label, size_str, size); 39667c478bd9Sstevel@tonic-gate add_amd_assoc(devi, label, assoc); 39677c478bd9Sstevel@tonic-gate } 39687c478bd9Sstevel@tonic-gate 39697c478bd9Sstevel@tonic-gate static void 39707c478bd9Sstevel@tonic-gate add_amd_cache(dev_info_t *devi, const char *label, 39717c478bd9Sstevel@tonic-gate uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size) 39727c478bd9Sstevel@tonic-gate { 39737c478bd9Sstevel@tonic-gate if (size == 0 || line_size == 0) 39747c478bd9Sstevel@tonic-gate return; 39757c478bd9Sstevel@tonic-gate add_amd_assoc(devi, label, assoc); 39767c478bd9Sstevel@tonic-gate /* 39777c478bd9Sstevel@tonic-gate * Most AMD parts have a sectored cache. Multiple cache lines are 39787c478bd9Sstevel@tonic-gate * associated with each tag. A sector consists of all cache lines 39797c478bd9Sstevel@tonic-gate * associated with a tag. For example, the AMD K6-III has a sector 39807c478bd9Sstevel@tonic-gate * size of 2 cache lines per tag. 39817c478bd9Sstevel@tonic-gate */ 39827c478bd9Sstevel@tonic-gate if (lines_per_tag != 0) 39837c478bd9Sstevel@tonic-gate add_cache_prop(devi, label, "lines-per-tag", lines_per_tag); 39847c478bd9Sstevel@tonic-gate add_cache_prop(devi, label, line_str, line_size); 39857c478bd9Sstevel@tonic-gate add_cache_prop(devi, label, size_str, size * 1024); 39867c478bd9Sstevel@tonic-gate } 39877c478bd9Sstevel@tonic-gate 39887c478bd9Sstevel@tonic-gate static void 39897c478bd9Sstevel@tonic-gate add_amd_l2_assoc(dev_info_t *devi, const char *label, uint_t assoc) 39907c478bd9Sstevel@tonic-gate { 39917c478bd9Sstevel@tonic-gate switch (assoc) { 39927c478bd9Sstevel@tonic-gate case 0: /* off */ 39937c478bd9Sstevel@tonic-gate break; 39947c478bd9Sstevel@tonic-gate case 1: 39957c478bd9Sstevel@tonic-gate case 2: 39967c478bd9Sstevel@tonic-gate case 4: 39977c478bd9Sstevel@tonic-gate add_cache_prop(devi, label, assoc_str, assoc); 39987c478bd9Sstevel@tonic-gate break; 39997c478bd9Sstevel@tonic-gate case 6: 40007c478bd9Sstevel@tonic-gate add_cache_prop(devi, label, assoc_str, 8); 40017c478bd9Sstevel@tonic-gate break; 40027c478bd9Sstevel@tonic-gate case 8: 40037c478bd9Sstevel@tonic-gate add_cache_prop(devi, label, assoc_str, 16); 40047c478bd9Sstevel@tonic-gate break; 40057c478bd9Sstevel@tonic-gate case 0xf: 40067c478bd9Sstevel@tonic-gate add_cache_prop(devi, label, fully_assoc, 1); 40077c478bd9Sstevel@tonic-gate break; 40087c478bd9Sstevel@tonic-gate default: /* reserved; ignore */ 40097c478bd9Sstevel@tonic-gate break; 40107c478bd9Sstevel@tonic-gate } 40117c478bd9Sstevel@tonic-gate } 40127c478bd9Sstevel@tonic-gate 40137c478bd9Sstevel@tonic-gate static void 40147c478bd9Sstevel@tonic-gate add_amd_l2_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size) 40157c478bd9Sstevel@tonic-gate { 40167c478bd9Sstevel@tonic-gate if (size == 0 || assoc == 0) 40177c478bd9Sstevel@tonic-gate return; 40187c478bd9Sstevel@tonic-gate add_amd_l2_assoc(devi, label, assoc); 40197c478bd9Sstevel@tonic-gate add_cache_prop(devi, label, size_str, size); 40207c478bd9Sstevel@tonic-gate } 40217c478bd9Sstevel@tonic-gate 40227c478bd9Sstevel@tonic-gate static void 40237c478bd9Sstevel@tonic-gate add_amd_l2_cache(dev_info_t *devi, const char *label, 40247c478bd9Sstevel@tonic-gate uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size) 40257c478bd9Sstevel@tonic-gate { 40267c478bd9Sstevel@tonic-gate if (size == 0 || assoc == 0 || line_size == 0) 40277c478bd9Sstevel@tonic-gate return; 40287c478bd9Sstevel@tonic-gate add_amd_l2_assoc(devi, label, assoc); 40297c478bd9Sstevel@tonic-gate if (lines_per_tag != 0) 40307c478bd9Sstevel@tonic-gate add_cache_prop(devi, label, "lines-per-tag", lines_per_tag); 40317c478bd9Sstevel@tonic-gate add_cache_prop(devi, label, line_str, line_size); 40327c478bd9Sstevel@tonic-gate add_cache_prop(devi, label, size_str, size * 1024); 40337c478bd9Sstevel@tonic-gate } 40347c478bd9Sstevel@tonic-gate 40357c478bd9Sstevel@tonic-gate static void 40367c478bd9Sstevel@tonic-gate amd_cache_info(struct cpuid_info *cpi, dev_info_t *devi) 40377c478bd9Sstevel@tonic-gate { 40388949bcd6Sandrei struct cpuid_regs *cp; 40397c478bd9Sstevel@tonic-gate 40407c478bd9Sstevel@tonic-gate if (cpi->cpi_xmaxeax < 0x80000005) 40417c478bd9Sstevel@tonic-gate return; 40427c478bd9Sstevel@tonic-gate cp = &cpi->cpi_extd[5]; 40437c478bd9Sstevel@tonic-gate 40447c478bd9Sstevel@tonic-gate /* 40457c478bd9Sstevel@tonic-gate * 4M/2M L1 TLB configuration 40467c478bd9Sstevel@tonic-gate * 40477c478bd9Sstevel@tonic-gate * We report the size for 2M pages because AMD uses two 40487c478bd9Sstevel@tonic-gate * TLB entries for one 4M page. 40497c478bd9Sstevel@tonic-gate */ 40507c478bd9Sstevel@tonic-gate add_amd_tlb(devi, "dtlb-2M", 40517c478bd9Sstevel@tonic-gate BITX(cp->cp_eax, 31, 24), BITX(cp->cp_eax, 23, 16)); 40527c478bd9Sstevel@tonic-gate add_amd_tlb(devi, "itlb-2M", 40537c478bd9Sstevel@tonic-gate BITX(cp->cp_eax, 15, 8), BITX(cp->cp_eax, 7, 0)); 40547c478bd9Sstevel@tonic-gate 40557c478bd9Sstevel@tonic-gate /* 40567c478bd9Sstevel@tonic-gate * 4K L1 TLB configuration 40577c478bd9Sstevel@tonic-gate */ 40587c478bd9Sstevel@tonic-gate 40597c478bd9Sstevel@tonic-gate switch (cpi->cpi_vendor) { 40607c478bd9Sstevel@tonic-gate uint_t nentries; 40617c478bd9Sstevel@tonic-gate case X86_VENDOR_TM: 40627c478bd9Sstevel@tonic-gate if (cpi->cpi_family >= 5) { 40637c478bd9Sstevel@tonic-gate /* 40647c478bd9Sstevel@tonic-gate * Crusoe processors have 256 TLB entries, but 40657c478bd9Sstevel@tonic-gate * cpuid data format constrains them to only 40667c478bd9Sstevel@tonic-gate * reporting 255 of them. 40677c478bd9Sstevel@tonic-gate */ 40687c478bd9Sstevel@tonic-gate if ((nentries = BITX(cp->cp_ebx, 23, 16)) == 255) 40697c478bd9Sstevel@tonic-gate nentries = 256; 40707c478bd9Sstevel@tonic-gate /* 40717c478bd9Sstevel@tonic-gate * Crusoe processors also have a unified TLB 40727c478bd9Sstevel@tonic-gate */ 40737c478bd9Sstevel@tonic-gate add_amd_tlb(devi, "tlb-4K", BITX(cp->cp_ebx, 31, 24), 40747c478bd9Sstevel@tonic-gate nentries); 40757c478bd9Sstevel@tonic-gate break; 40767c478bd9Sstevel@tonic-gate } 40777c478bd9Sstevel@tonic-gate /*FALLTHROUGH*/ 40787c478bd9Sstevel@tonic-gate default: 40797c478bd9Sstevel@tonic-gate add_amd_tlb(devi, itlb4k_str, 40807c478bd9Sstevel@tonic-gate BITX(cp->cp_ebx, 31, 24), BITX(cp->cp_ebx, 23, 16)); 40817c478bd9Sstevel@tonic-gate add_amd_tlb(devi, dtlb4k_str, 40827c478bd9Sstevel@tonic-gate BITX(cp->cp_ebx, 15, 8), BITX(cp->cp_ebx, 7, 0)); 40837c478bd9Sstevel@tonic-gate break; 40847c478bd9Sstevel@tonic-gate } 40857c478bd9Sstevel@tonic-gate 40867c478bd9Sstevel@tonic-gate /* 40877c478bd9Sstevel@tonic-gate * data L1 cache configuration 40887c478bd9Sstevel@tonic-gate */ 40897c478bd9Sstevel@tonic-gate 40907c478bd9Sstevel@tonic-gate add_amd_cache(devi, l1_dcache_str, 40917c478bd9Sstevel@tonic-gate BITX(cp->cp_ecx, 31, 24), BITX(cp->cp_ecx, 23, 16), 40927c478bd9Sstevel@tonic-gate BITX(cp->cp_ecx, 15, 8), BITX(cp->cp_ecx, 7, 0)); 40937c478bd9Sstevel@tonic-gate 40947c478bd9Sstevel@tonic-gate /* 40957c478bd9Sstevel@tonic-gate * code L1 cache configuration 40967c478bd9Sstevel@tonic-gate */ 40977c478bd9Sstevel@tonic-gate 40987c478bd9Sstevel@tonic-gate add_amd_cache(devi, l1_icache_str, 40997c478bd9Sstevel@tonic-gate BITX(cp->cp_edx, 31, 24), BITX(cp->cp_edx, 23, 16), 41007c478bd9Sstevel@tonic-gate BITX(cp->cp_edx, 15, 8), BITX(cp->cp_edx, 7, 0)); 41017c478bd9Sstevel@tonic-gate 41027c478bd9Sstevel@tonic-gate if (cpi->cpi_xmaxeax < 0x80000006) 41037c478bd9Sstevel@tonic-gate return; 41047c478bd9Sstevel@tonic-gate cp = &cpi->cpi_extd[6]; 41057c478bd9Sstevel@tonic-gate 41067c478bd9Sstevel@tonic-gate /* Check for a unified L2 TLB for large pages */ 41077c478bd9Sstevel@tonic-gate 41087c478bd9Sstevel@tonic-gate if (BITX(cp->cp_eax, 31, 16) == 0) 41097c478bd9Sstevel@tonic-gate add_amd_l2_tlb(devi, "l2-tlb-2M", 41107c478bd9Sstevel@tonic-gate BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0)); 41117c478bd9Sstevel@tonic-gate else { 41127c478bd9Sstevel@tonic-gate add_amd_l2_tlb(devi, "l2-dtlb-2M", 41137c478bd9Sstevel@tonic-gate BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16)); 41147c478bd9Sstevel@tonic-gate add_amd_l2_tlb(devi, "l2-itlb-2M", 41157c478bd9Sstevel@tonic-gate BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0)); 41167c478bd9Sstevel@tonic-gate } 41177c478bd9Sstevel@tonic-gate 41187c478bd9Sstevel@tonic-gate /* Check for a unified L2 TLB for 4K pages */ 41197c478bd9Sstevel@tonic-gate 41207c478bd9Sstevel@tonic-gate if (BITX(cp->cp_ebx, 31, 16) == 0) { 41217c478bd9Sstevel@tonic-gate add_amd_l2_tlb(devi, "l2-tlb-4K", 41227c478bd9Sstevel@tonic-gate BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0)); 41237c478bd9Sstevel@tonic-gate } else { 41247c478bd9Sstevel@tonic-gate add_amd_l2_tlb(devi, "l2-dtlb-4K", 41257c478bd9Sstevel@tonic-gate BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16)); 41267c478bd9Sstevel@tonic-gate add_amd_l2_tlb(devi, "l2-itlb-4K", 41277c478bd9Sstevel@tonic-gate BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0)); 41287c478bd9Sstevel@tonic-gate } 41297c478bd9Sstevel@tonic-gate 41307c478bd9Sstevel@tonic-gate add_amd_l2_cache(devi, l2_cache_str, 41317c478bd9Sstevel@tonic-gate BITX(cp->cp_ecx, 31, 16), BITX(cp->cp_ecx, 15, 12), 41327c478bd9Sstevel@tonic-gate BITX(cp->cp_ecx, 11, 8), BITX(cp->cp_ecx, 7, 0)); 41337c478bd9Sstevel@tonic-gate } 41347c478bd9Sstevel@tonic-gate 41357c478bd9Sstevel@tonic-gate /* 41367c478bd9Sstevel@tonic-gate * There are two basic ways that the x86 world describes it cache 41377c478bd9Sstevel@tonic-gate * and tlb architecture - Intel's way and AMD's way. 41387c478bd9Sstevel@tonic-gate * 41397c478bd9Sstevel@tonic-gate * Return which flavor of cache architecture we should use 41407c478bd9Sstevel@tonic-gate */ 41417c478bd9Sstevel@tonic-gate static int 41427c478bd9Sstevel@tonic-gate x86_which_cacheinfo(struct cpuid_info *cpi) 41437c478bd9Sstevel@tonic-gate { 41447c478bd9Sstevel@tonic-gate switch (cpi->cpi_vendor) { 41457c478bd9Sstevel@tonic-gate case X86_VENDOR_Intel: 41467c478bd9Sstevel@tonic-gate if (cpi->cpi_maxeax >= 2) 41477c478bd9Sstevel@tonic-gate return (X86_VENDOR_Intel); 41487c478bd9Sstevel@tonic-gate break; 41497c478bd9Sstevel@tonic-gate case X86_VENDOR_AMD: 41507c478bd9Sstevel@tonic-gate /* 41517c478bd9Sstevel@tonic-gate * The K5 model 1 was the first part from AMD that reported 41527c478bd9Sstevel@tonic-gate * cache sizes via extended cpuid functions. 41537c478bd9Sstevel@tonic-gate */ 41547c478bd9Sstevel@tonic-gate if (cpi->cpi_family > 5 || 41557c478bd9Sstevel@tonic-gate (cpi->cpi_family == 5 && cpi->cpi_model >= 1)) 41567c478bd9Sstevel@tonic-gate return (X86_VENDOR_AMD); 41577c478bd9Sstevel@tonic-gate break; 41587c478bd9Sstevel@tonic-gate case X86_VENDOR_TM: 41597c478bd9Sstevel@tonic-gate if (cpi->cpi_family >= 5) 41607c478bd9Sstevel@tonic-gate return (X86_VENDOR_AMD); 41617c478bd9Sstevel@tonic-gate /*FALLTHROUGH*/ 41627c478bd9Sstevel@tonic-gate default: 41637c478bd9Sstevel@tonic-gate /* 41647c478bd9Sstevel@tonic-gate * If they have extended CPU data for 0x80000005 41657c478bd9Sstevel@tonic-gate * then we assume they have AMD-format cache 41667c478bd9Sstevel@tonic-gate * information. 41677c478bd9Sstevel@tonic-gate * 41687c478bd9Sstevel@tonic-gate * If not, and the vendor happens to be Cyrix, 41697c478bd9Sstevel@tonic-gate * then try our-Cyrix specific handler. 41707c478bd9Sstevel@tonic-gate * 41717c478bd9Sstevel@tonic-gate * If we're not Cyrix, then assume we're using Intel's 41727c478bd9Sstevel@tonic-gate * table-driven format instead. 41737c478bd9Sstevel@tonic-gate */ 41747c478bd9Sstevel@tonic-gate if (cpi->cpi_xmaxeax >= 0x80000005) 41757c478bd9Sstevel@tonic-gate return (X86_VENDOR_AMD); 41767c478bd9Sstevel@tonic-gate else if (cpi->cpi_vendor == X86_VENDOR_Cyrix) 41777c478bd9Sstevel@tonic-gate return (X86_VENDOR_Cyrix); 41787c478bd9Sstevel@tonic-gate else if (cpi->cpi_maxeax >= 2) 41797c478bd9Sstevel@tonic-gate return (X86_VENDOR_Intel); 41807c478bd9Sstevel@tonic-gate break; 41817c478bd9Sstevel@tonic-gate } 41827c478bd9Sstevel@tonic-gate return (-1); 41837c478bd9Sstevel@tonic-gate } 41847c478bd9Sstevel@tonic-gate 41857c478bd9Sstevel@tonic-gate void 4186fa96bd91SMichael Corcoran cpuid_set_cpu_properties(void *dip, processorid_t cpu_id, 4187fa96bd91SMichael Corcoran struct cpuid_info *cpi) 41887c478bd9Sstevel@tonic-gate { 41897c478bd9Sstevel@tonic-gate dev_info_t *cpu_devi; 41907c478bd9Sstevel@tonic-gate int create; 41917c478bd9Sstevel@tonic-gate 4192fa96bd91SMichael Corcoran cpu_devi = (dev_info_t *)dip; 41937c478bd9Sstevel@tonic-gate 41947c478bd9Sstevel@tonic-gate /* device_type */ 41957c478bd9Sstevel@tonic-gate (void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi, 41967c478bd9Sstevel@tonic-gate "device_type", "cpu"); 41977c478bd9Sstevel@tonic-gate 41987c478bd9Sstevel@tonic-gate /* reg */ 41997c478bd9Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 42007c478bd9Sstevel@tonic-gate "reg", cpu_id); 42017c478bd9Sstevel@tonic-gate 42027c478bd9Sstevel@tonic-gate /* cpu-mhz, and clock-frequency */ 42037c478bd9Sstevel@tonic-gate if (cpu_freq > 0) { 42047c478bd9Sstevel@tonic-gate long long mul; 42057c478bd9Sstevel@tonic-gate 42067c478bd9Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 42077c478bd9Sstevel@tonic-gate "cpu-mhz", cpu_freq); 42087c478bd9Sstevel@tonic-gate if ((mul = cpu_freq * 1000000LL) <= INT_MAX) 42097c478bd9Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 42107c478bd9Sstevel@tonic-gate "clock-frequency", (int)mul); 42117c478bd9Sstevel@tonic-gate } 42127c478bd9Sstevel@tonic-gate 42137417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_CPUID)) { 42147c478bd9Sstevel@tonic-gate return; 42157c478bd9Sstevel@tonic-gate } 42167c478bd9Sstevel@tonic-gate 42177c478bd9Sstevel@tonic-gate /* vendor-id */ 42187c478bd9Sstevel@tonic-gate (void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi, 42197c478bd9Sstevel@tonic-gate "vendor-id", cpi->cpi_vendorstr); 42207c478bd9Sstevel@tonic-gate 42217c478bd9Sstevel@tonic-gate if (cpi->cpi_maxeax == 0) { 42227c478bd9Sstevel@tonic-gate return; 42237c478bd9Sstevel@tonic-gate } 42247c478bd9Sstevel@tonic-gate 42257c478bd9Sstevel@tonic-gate /* 42267c478bd9Sstevel@tonic-gate * family, model, and step 42277c478bd9Sstevel@tonic-gate */ 42287c478bd9Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 42297c478bd9Sstevel@tonic-gate "family", CPI_FAMILY(cpi)); 42307c478bd9Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 42317c478bd9Sstevel@tonic-gate "cpu-model", CPI_MODEL(cpi)); 42327c478bd9Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 42337c478bd9Sstevel@tonic-gate "stepping-id", CPI_STEP(cpi)); 42347c478bd9Sstevel@tonic-gate 42357c478bd9Sstevel@tonic-gate /* type */ 42367c478bd9Sstevel@tonic-gate switch (cpi->cpi_vendor) { 42377c478bd9Sstevel@tonic-gate case X86_VENDOR_Intel: 42387c478bd9Sstevel@tonic-gate create = 1; 42397c478bd9Sstevel@tonic-gate break; 42407c478bd9Sstevel@tonic-gate default: 42417c478bd9Sstevel@tonic-gate create = 0; 42427c478bd9Sstevel@tonic-gate break; 42437c478bd9Sstevel@tonic-gate } 42447c478bd9Sstevel@tonic-gate if (create) 42457c478bd9Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 42467c478bd9Sstevel@tonic-gate "type", CPI_TYPE(cpi)); 42477c478bd9Sstevel@tonic-gate 42487c478bd9Sstevel@tonic-gate /* ext-family */ 42497c478bd9Sstevel@tonic-gate switch (cpi->cpi_vendor) { 42507c478bd9Sstevel@tonic-gate case X86_VENDOR_Intel: 42517c478bd9Sstevel@tonic-gate case X86_VENDOR_AMD: 42527c478bd9Sstevel@tonic-gate create = cpi->cpi_family >= 0xf; 42537c478bd9Sstevel@tonic-gate break; 42547c478bd9Sstevel@tonic-gate default: 42557c478bd9Sstevel@tonic-gate create = 0; 42567c478bd9Sstevel@tonic-gate break; 42577c478bd9Sstevel@tonic-gate } 42587c478bd9Sstevel@tonic-gate if (create) 42597c478bd9Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 42607c478bd9Sstevel@tonic-gate "ext-family", CPI_FAMILY_XTD(cpi)); 42617c478bd9Sstevel@tonic-gate 42627c478bd9Sstevel@tonic-gate /* ext-model */ 42637c478bd9Sstevel@tonic-gate switch (cpi->cpi_vendor) { 42647c478bd9Sstevel@tonic-gate case X86_VENDOR_Intel: 426563d3f7dfSkk208521 create = IS_EXTENDED_MODEL_INTEL(cpi); 426668c91426Sdmick break; 42677c478bd9Sstevel@tonic-gate case X86_VENDOR_AMD: 4268ee88d2b9Skchow create = CPI_FAMILY(cpi) == 0xf; 42697c478bd9Sstevel@tonic-gate break; 42707c478bd9Sstevel@tonic-gate default: 42717c478bd9Sstevel@tonic-gate create = 0; 42727c478bd9Sstevel@tonic-gate break; 42737c478bd9Sstevel@tonic-gate } 42747c478bd9Sstevel@tonic-gate if (create) 42757c478bd9Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 42767c478bd9Sstevel@tonic-gate "ext-model", CPI_MODEL_XTD(cpi)); 42777c478bd9Sstevel@tonic-gate 42787c478bd9Sstevel@tonic-gate /* generation */ 42797c478bd9Sstevel@tonic-gate switch (cpi->cpi_vendor) { 42807c478bd9Sstevel@tonic-gate case X86_VENDOR_AMD: 42817c478bd9Sstevel@tonic-gate /* 42827c478bd9Sstevel@tonic-gate * AMD K5 model 1 was the first part to support this 42837c478bd9Sstevel@tonic-gate */ 42847c478bd9Sstevel@tonic-gate create = cpi->cpi_xmaxeax >= 0x80000001; 42857c478bd9Sstevel@tonic-gate break; 42867c478bd9Sstevel@tonic-gate default: 42877c478bd9Sstevel@tonic-gate create = 0; 42887c478bd9Sstevel@tonic-gate break; 42897c478bd9Sstevel@tonic-gate } 42907c478bd9Sstevel@tonic-gate if (create) 42917c478bd9Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 42927c478bd9Sstevel@tonic-gate "generation", BITX((cpi)->cpi_extd[1].cp_eax, 11, 8)); 42937c478bd9Sstevel@tonic-gate 42947c478bd9Sstevel@tonic-gate /* brand-id */ 42957c478bd9Sstevel@tonic-gate switch (cpi->cpi_vendor) { 42967c478bd9Sstevel@tonic-gate case X86_VENDOR_Intel: 42977c478bd9Sstevel@tonic-gate /* 42987c478bd9Sstevel@tonic-gate * brand id first appeared on Pentium III Xeon model 8, 42997c478bd9Sstevel@tonic-gate * and Celeron model 8 processors and Opteron 43007c478bd9Sstevel@tonic-gate */ 43017c478bd9Sstevel@tonic-gate create = cpi->cpi_family > 6 || 43027c478bd9Sstevel@tonic-gate (cpi->cpi_family == 6 && cpi->cpi_model >= 8); 43037c478bd9Sstevel@tonic-gate break; 43047c478bd9Sstevel@tonic-gate case X86_VENDOR_AMD: 43057c478bd9Sstevel@tonic-gate create = cpi->cpi_family >= 0xf; 43067c478bd9Sstevel@tonic-gate break; 43077c478bd9Sstevel@tonic-gate default: 43087c478bd9Sstevel@tonic-gate create = 0; 43097c478bd9Sstevel@tonic-gate break; 43107c478bd9Sstevel@tonic-gate } 43117c478bd9Sstevel@tonic-gate if (create && cpi->cpi_brandid != 0) { 43127c478bd9Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 43137c478bd9Sstevel@tonic-gate "brand-id", cpi->cpi_brandid); 43147c478bd9Sstevel@tonic-gate } 43157c478bd9Sstevel@tonic-gate 43167c478bd9Sstevel@tonic-gate /* chunks, and apic-id */ 43177c478bd9Sstevel@tonic-gate switch (cpi->cpi_vendor) { 43187c478bd9Sstevel@tonic-gate /* 43197c478bd9Sstevel@tonic-gate * first available on Pentium IV and Opteron (K8) 43207c478bd9Sstevel@tonic-gate */ 43215ff02082Sdmick case X86_VENDOR_Intel: 43225ff02082Sdmick create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf; 43235ff02082Sdmick break; 43245ff02082Sdmick case X86_VENDOR_AMD: 43257c478bd9Sstevel@tonic-gate create = cpi->cpi_family >= 0xf; 43267c478bd9Sstevel@tonic-gate break; 43277c478bd9Sstevel@tonic-gate default: 43287c478bd9Sstevel@tonic-gate create = 0; 43297c478bd9Sstevel@tonic-gate break; 43307c478bd9Sstevel@tonic-gate } 43317c478bd9Sstevel@tonic-gate if (create) { 43327c478bd9Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 43337c478bd9Sstevel@tonic-gate "chunks", CPI_CHUNKS(cpi)); 43347c478bd9Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 4335b6917abeSmishra "apic-id", cpi->cpi_apicid); 43367aec1d6eScindi if (cpi->cpi_chipid >= 0) { 43377c478bd9Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 43387c478bd9Sstevel@tonic-gate "chip#", cpi->cpi_chipid); 43397aec1d6eScindi (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 43407aec1d6eScindi "clog#", cpi->cpi_clogid); 43417aec1d6eScindi } 43427c478bd9Sstevel@tonic-gate } 43437c478bd9Sstevel@tonic-gate 43447c478bd9Sstevel@tonic-gate /* cpuid-features */ 43457c478bd9Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 43467c478bd9Sstevel@tonic-gate "cpuid-features", CPI_FEATURES_EDX(cpi)); 43477c478bd9Sstevel@tonic-gate 43487c478bd9Sstevel@tonic-gate 43497c478bd9Sstevel@tonic-gate /* cpuid-features-ecx */ 43507c478bd9Sstevel@tonic-gate switch (cpi->cpi_vendor) { 43517c478bd9Sstevel@tonic-gate case X86_VENDOR_Intel: 43525ff02082Sdmick create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf; 43537c478bd9Sstevel@tonic-gate break; 435463408480SHans Rosenfeld case X86_VENDOR_AMD: 435563408480SHans Rosenfeld create = cpi->cpi_family >= 0xf; 435663408480SHans Rosenfeld break; 43577c478bd9Sstevel@tonic-gate default: 43587c478bd9Sstevel@tonic-gate create = 0; 43597c478bd9Sstevel@tonic-gate break; 43607c478bd9Sstevel@tonic-gate } 43617c478bd9Sstevel@tonic-gate if (create) 43627c478bd9Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 43637c478bd9Sstevel@tonic-gate "cpuid-features-ecx", CPI_FEATURES_ECX(cpi)); 43647c478bd9Sstevel@tonic-gate 43657c478bd9Sstevel@tonic-gate /* ext-cpuid-features */ 43667c478bd9Sstevel@tonic-gate switch (cpi->cpi_vendor) { 43675ff02082Sdmick case X86_VENDOR_Intel: 43687c478bd9Sstevel@tonic-gate case X86_VENDOR_AMD: 43697c478bd9Sstevel@tonic-gate case X86_VENDOR_Cyrix: 43707c478bd9Sstevel@tonic-gate case X86_VENDOR_TM: 43717c478bd9Sstevel@tonic-gate case X86_VENDOR_Centaur: 43727c478bd9Sstevel@tonic-gate create = cpi->cpi_xmaxeax >= 0x80000001; 43737c478bd9Sstevel@tonic-gate break; 43747c478bd9Sstevel@tonic-gate default: 43757c478bd9Sstevel@tonic-gate create = 0; 43767c478bd9Sstevel@tonic-gate break; 43777c478bd9Sstevel@tonic-gate } 43785ff02082Sdmick if (create) { 43797c478bd9Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 43807c478bd9Sstevel@tonic-gate "ext-cpuid-features", CPI_FEATURES_XTD_EDX(cpi)); 43815ff02082Sdmick (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 43825ff02082Sdmick "ext-cpuid-features-ecx", CPI_FEATURES_XTD_ECX(cpi)); 43835ff02082Sdmick } 43847c478bd9Sstevel@tonic-gate 43857c478bd9Sstevel@tonic-gate /* 43867c478bd9Sstevel@tonic-gate * Brand String first appeared in Intel Pentium IV, AMD K5 43877c478bd9Sstevel@tonic-gate * model 1, and Cyrix GXm. On earlier models we try and 43887c478bd9Sstevel@tonic-gate * simulate something similar .. so this string should always 43897c478bd9Sstevel@tonic-gate * same -something- about the processor, however lame. 43907c478bd9Sstevel@tonic-gate */ 43917c478bd9Sstevel@tonic-gate (void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi, 43927c478bd9Sstevel@tonic-gate "brand-string", cpi->cpi_brandstr); 43937c478bd9Sstevel@tonic-gate 43947c478bd9Sstevel@tonic-gate /* 43957c478bd9Sstevel@tonic-gate * Finally, cache and tlb information 43967c478bd9Sstevel@tonic-gate */ 43977c478bd9Sstevel@tonic-gate switch (x86_which_cacheinfo(cpi)) { 43987c478bd9Sstevel@tonic-gate case X86_VENDOR_Intel: 43997c478bd9Sstevel@tonic-gate intel_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props); 44007c478bd9Sstevel@tonic-gate break; 44017c478bd9Sstevel@tonic-gate case X86_VENDOR_Cyrix: 44027c478bd9Sstevel@tonic-gate cyrix_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props); 44037c478bd9Sstevel@tonic-gate break; 44047c478bd9Sstevel@tonic-gate case X86_VENDOR_AMD: 44057c478bd9Sstevel@tonic-gate amd_cache_info(cpi, cpu_devi); 44067c478bd9Sstevel@tonic-gate break; 44077c478bd9Sstevel@tonic-gate default: 44087c478bd9Sstevel@tonic-gate break; 44097c478bd9Sstevel@tonic-gate } 44107c478bd9Sstevel@tonic-gate } 44117c478bd9Sstevel@tonic-gate 44127c478bd9Sstevel@tonic-gate struct l2info { 44137c478bd9Sstevel@tonic-gate int *l2i_csz; 44147c478bd9Sstevel@tonic-gate int *l2i_lsz; 44157c478bd9Sstevel@tonic-gate int *l2i_assoc; 44167c478bd9Sstevel@tonic-gate int l2i_ret; 44177c478bd9Sstevel@tonic-gate }; 44187c478bd9Sstevel@tonic-gate 44197c478bd9Sstevel@tonic-gate /* 44207c478bd9Sstevel@tonic-gate * A cacheinfo walker that fetches the size, line-size and associativity 44217c478bd9Sstevel@tonic-gate * of the L2 cache 44227c478bd9Sstevel@tonic-gate */ 44237c478bd9Sstevel@tonic-gate static int 44247c478bd9Sstevel@tonic-gate intel_l2cinfo(void *arg, const struct cachetab *ct) 44257c478bd9Sstevel@tonic-gate { 44267c478bd9Sstevel@tonic-gate struct l2info *l2i = arg; 44277c478bd9Sstevel@tonic-gate int *ip; 44287c478bd9Sstevel@tonic-gate 44297c478bd9Sstevel@tonic-gate if (ct->ct_label != l2_cache_str && 44307c478bd9Sstevel@tonic-gate ct->ct_label != sl2_cache_str) 44317c478bd9Sstevel@tonic-gate return (0); /* not an L2 -- keep walking */ 44327c478bd9Sstevel@tonic-gate 44337c478bd9Sstevel@tonic-gate if ((ip = l2i->l2i_csz) != NULL) 44347c478bd9Sstevel@tonic-gate *ip = ct->ct_size; 44357c478bd9Sstevel@tonic-gate if ((ip = l2i->l2i_lsz) != NULL) 44367c478bd9Sstevel@tonic-gate *ip = ct->ct_line_size; 44377c478bd9Sstevel@tonic-gate if ((ip = l2i->l2i_assoc) != NULL) 44387c478bd9Sstevel@tonic-gate *ip = ct->ct_assoc; 44397c478bd9Sstevel@tonic-gate l2i->l2i_ret = ct->ct_size; 44407c478bd9Sstevel@tonic-gate return (1); /* was an L2 -- terminate walk */ 44417c478bd9Sstevel@tonic-gate } 44427c478bd9Sstevel@tonic-gate 4443606303c9Skchow /* 4444606303c9Skchow * AMD L2/L3 Cache and TLB Associativity Field Definition: 4445606303c9Skchow * 4446606303c9Skchow * Unlike the associativity for the L1 cache and tlb where the 8 bit 4447606303c9Skchow * value is the associativity, the associativity for the L2 cache and 4448606303c9Skchow * tlb is encoded in the following table. The 4 bit L2 value serves as 4449606303c9Skchow * an index into the amd_afd[] array to determine the associativity. 4450606303c9Skchow * -1 is undefined. 0 is fully associative. 4451606303c9Skchow */ 4452606303c9Skchow 4453606303c9Skchow static int amd_afd[] = 4454606303c9Skchow {-1, 1, 2, -1, 4, -1, 8, -1, 16, -1, 32, 48, 64, 96, 128, 0}; 4455606303c9Skchow 44567c478bd9Sstevel@tonic-gate static void 44577c478bd9Sstevel@tonic-gate amd_l2cacheinfo(struct cpuid_info *cpi, struct l2info *l2i) 44587c478bd9Sstevel@tonic-gate { 44598949bcd6Sandrei struct cpuid_regs *cp; 44607c478bd9Sstevel@tonic-gate uint_t size, assoc; 4461606303c9Skchow int i; 44627c478bd9Sstevel@tonic-gate int *ip; 44637c478bd9Sstevel@tonic-gate 44647c478bd9Sstevel@tonic-gate if (cpi->cpi_xmaxeax < 0x80000006) 44657c478bd9Sstevel@tonic-gate return; 44667c478bd9Sstevel@tonic-gate cp = &cpi->cpi_extd[6]; 44677c478bd9Sstevel@tonic-gate 4468606303c9Skchow if ((i = BITX(cp->cp_ecx, 15, 12)) != 0 && 44697c478bd9Sstevel@tonic-gate (size = BITX(cp->cp_ecx, 31, 16)) != 0) { 44707c478bd9Sstevel@tonic-gate uint_t cachesz = size * 1024; 4471606303c9Skchow assoc = amd_afd[i]; 44727c478bd9Sstevel@tonic-gate 4473606303c9Skchow ASSERT(assoc != -1); 44747c478bd9Sstevel@tonic-gate 44757c478bd9Sstevel@tonic-gate if ((ip = l2i->l2i_csz) != NULL) 44767c478bd9Sstevel@tonic-gate *ip = cachesz; 44777c478bd9Sstevel@tonic-gate if ((ip = l2i->l2i_lsz) != NULL) 44787c478bd9Sstevel@tonic-gate *ip = BITX(cp->cp_ecx, 7, 0); 44797c478bd9Sstevel@tonic-gate if ((ip = l2i->l2i_assoc) != NULL) 44807c478bd9Sstevel@tonic-gate *ip = assoc; 44817c478bd9Sstevel@tonic-gate l2i->l2i_ret = cachesz; 44827c478bd9Sstevel@tonic-gate } 44837c478bd9Sstevel@tonic-gate } 44847c478bd9Sstevel@tonic-gate 44857c478bd9Sstevel@tonic-gate int 44867c478bd9Sstevel@tonic-gate getl2cacheinfo(cpu_t *cpu, int *csz, int *lsz, int *assoc) 44877c478bd9Sstevel@tonic-gate { 44887c478bd9Sstevel@tonic-gate struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 44897c478bd9Sstevel@tonic-gate struct l2info __l2info, *l2i = &__l2info; 44907c478bd9Sstevel@tonic-gate 44917c478bd9Sstevel@tonic-gate l2i->l2i_csz = csz; 44927c478bd9Sstevel@tonic-gate l2i->l2i_lsz = lsz; 44937c478bd9Sstevel@tonic-gate l2i->l2i_assoc = assoc; 44947c478bd9Sstevel@tonic-gate l2i->l2i_ret = -1; 44957c478bd9Sstevel@tonic-gate 44967c478bd9Sstevel@tonic-gate switch (x86_which_cacheinfo(cpi)) { 44977c478bd9Sstevel@tonic-gate case X86_VENDOR_Intel: 44987c478bd9Sstevel@tonic-gate intel_walk_cacheinfo(cpi, l2i, intel_l2cinfo); 44997c478bd9Sstevel@tonic-gate break; 45007c478bd9Sstevel@tonic-gate case X86_VENDOR_Cyrix: 45017c478bd9Sstevel@tonic-gate cyrix_walk_cacheinfo(cpi, l2i, intel_l2cinfo); 45027c478bd9Sstevel@tonic-gate break; 45037c478bd9Sstevel@tonic-gate case X86_VENDOR_AMD: 45047c478bd9Sstevel@tonic-gate amd_l2cacheinfo(cpi, l2i); 45057c478bd9Sstevel@tonic-gate break; 45067c478bd9Sstevel@tonic-gate default: 45077c478bd9Sstevel@tonic-gate break; 45087c478bd9Sstevel@tonic-gate } 45097c478bd9Sstevel@tonic-gate return (l2i->l2i_ret); 45107c478bd9Sstevel@tonic-gate } 4511f98fbcecSbholler 4512843e1988Sjohnlev #if !defined(__xpv) 4513843e1988Sjohnlev 45145b8a6efeSbholler uint32_t * 45155b8a6efeSbholler cpuid_mwait_alloc(cpu_t *cpu) 45165b8a6efeSbholler { 45175b8a6efeSbholler uint32_t *ret; 45185b8a6efeSbholler size_t mwait_size; 45195b8a6efeSbholler 4520a3114836SGerry Liu ASSERT(cpuid_checkpass(CPU, 2)); 45215b8a6efeSbholler 4522a3114836SGerry Liu mwait_size = CPU->cpu_m.mcpu_cpi->cpi_mwait.mon_max; 45235b8a6efeSbholler if (mwait_size == 0) 45245b8a6efeSbholler return (NULL); 45255b8a6efeSbholler 45265b8a6efeSbholler /* 45275b8a6efeSbholler * kmem_alloc() returns cache line size aligned data for mwait_size 45285b8a6efeSbholler * allocations. mwait_size is currently cache line sized. Neither 45295b8a6efeSbholler * of these implementation details are guarantied to be true in the 45305b8a6efeSbholler * future. 45315b8a6efeSbholler * 45325b8a6efeSbholler * First try allocating mwait_size as kmem_alloc() currently returns 45335b8a6efeSbholler * correctly aligned memory. If kmem_alloc() does not return 45345b8a6efeSbholler * mwait_size aligned memory, then use mwait_size ROUNDUP. 45355b8a6efeSbholler * 45365b8a6efeSbholler * Set cpi_mwait.buf_actual and cpi_mwait.size_actual in case we 45375b8a6efeSbholler * decide to free this memory. 45385b8a6efeSbholler */ 45395b8a6efeSbholler ret = kmem_zalloc(mwait_size, KM_SLEEP); 45405b8a6efeSbholler if (ret == (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size)) { 45415b8a6efeSbholler cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret; 45425b8a6efeSbholler cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size; 45435b8a6efeSbholler *ret = MWAIT_RUNNING; 45445b8a6efeSbholler return (ret); 45455b8a6efeSbholler } else { 45465b8a6efeSbholler kmem_free(ret, mwait_size); 45475b8a6efeSbholler ret = kmem_zalloc(mwait_size * 2, KM_SLEEP); 45485b8a6efeSbholler cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret; 45495b8a6efeSbholler cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size * 2; 45505b8a6efeSbholler ret = (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size); 45515b8a6efeSbholler *ret = MWAIT_RUNNING; 45525b8a6efeSbholler return (ret); 45535b8a6efeSbholler } 45545b8a6efeSbholler } 45555b8a6efeSbholler 45565b8a6efeSbholler void 45575b8a6efeSbholler cpuid_mwait_free(cpu_t *cpu) 4558f98fbcecSbholler { 4559a3114836SGerry Liu if (cpu->cpu_m.mcpu_cpi == NULL) { 4560a3114836SGerry Liu return; 4561a3114836SGerry Liu } 45625b8a6efeSbholler 45635b8a6efeSbholler if (cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual != NULL && 45645b8a6efeSbholler cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual > 0) { 45655b8a6efeSbholler kmem_free(cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual, 45665b8a6efeSbholler cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual); 45675b8a6efeSbholler } 45685b8a6efeSbholler 45695b8a6efeSbholler cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = NULL; 45705b8a6efeSbholler cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = 0; 4571f98fbcecSbholler } 4572843e1988Sjohnlev 4573247dbb3dSsudheer void 4574247dbb3dSsudheer patch_tsc_read(int flag) 4575247dbb3dSsudheer { 4576247dbb3dSsudheer size_t cnt; 4577e4b86885SCheng Sean Ye 4578247dbb3dSsudheer switch (flag) { 4579247dbb3dSsudheer case X86_NO_TSC: 4580247dbb3dSsudheer cnt = &_no_rdtsc_end - &_no_rdtsc_start; 45812b0bcb26Ssudheer (void) memcpy((void *)tsc_read, (void *)&_no_rdtsc_start, cnt); 4582247dbb3dSsudheer break; 4583247dbb3dSsudheer case X86_HAVE_TSCP: 4584247dbb3dSsudheer cnt = &_tscp_end - &_tscp_start; 45852b0bcb26Ssudheer (void) memcpy((void *)tsc_read, (void *)&_tscp_start, cnt); 4586247dbb3dSsudheer break; 4587247dbb3dSsudheer case X86_TSC_MFENCE: 4588247dbb3dSsudheer cnt = &_tsc_mfence_end - &_tsc_mfence_start; 45892b0bcb26Ssudheer (void) memcpy((void *)tsc_read, 45902b0bcb26Ssudheer (void *)&_tsc_mfence_start, cnt); 4591247dbb3dSsudheer break; 459215363b27Ssudheer case X86_TSC_LFENCE: 459315363b27Ssudheer cnt = &_tsc_lfence_end - &_tsc_lfence_start; 459415363b27Ssudheer (void) memcpy((void *)tsc_read, 459515363b27Ssudheer (void *)&_tsc_lfence_start, cnt); 459615363b27Ssudheer break; 4597247dbb3dSsudheer default: 4598247dbb3dSsudheer break; 4599247dbb3dSsudheer } 4600247dbb3dSsudheer } 4601247dbb3dSsudheer 46020e751525SEric Saxe int 46030e751525SEric Saxe cpuid_deep_cstates_supported(void) 46040e751525SEric Saxe { 46050e751525SEric Saxe struct cpuid_info *cpi; 46060e751525SEric Saxe struct cpuid_regs regs; 46070e751525SEric Saxe 46080e751525SEric Saxe ASSERT(cpuid_checkpass(CPU, 1)); 46090e751525SEric Saxe 46100e751525SEric Saxe cpi = CPU->cpu_m.mcpu_cpi; 46110e751525SEric Saxe 46127417cfdeSKuriakose Kuruvilla if (!is_x86_feature(x86_featureset, X86FSET_CPUID)) 46130e751525SEric Saxe return (0); 46140e751525SEric Saxe 46150e751525SEric Saxe switch (cpi->cpi_vendor) { 46160e751525SEric Saxe case X86_VENDOR_Intel: 46170e751525SEric Saxe if (cpi->cpi_xmaxeax < 0x80000007) 46180e751525SEric Saxe return (0); 46190e751525SEric Saxe 46200e751525SEric Saxe /* 46210e751525SEric Saxe * TSC run at a constant rate in all ACPI C-states? 46220e751525SEric Saxe */ 46230e751525SEric Saxe regs.cp_eax = 0x80000007; 46240e751525SEric Saxe (void) __cpuid_insn(®s); 46250e751525SEric Saxe return (regs.cp_edx & CPUID_TSC_CSTATE_INVARIANCE); 46260e751525SEric Saxe 46270e751525SEric Saxe default: 46280e751525SEric Saxe return (0); 46290e751525SEric Saxe } 46300e751525SEric Saxe } 46310e751525SEric Saxe 4632e774b42bSBill Holler #endif /* !__xpv */ 4633e774b42bSBill Holler 4634e774b42bSBill Holler void 4635e774b42bSBill Holler post_startup_cpu_fixups(void) 4636e774b42bSBill Holler { 4637e774b42bSBill Holler #ifndef __xpv 4638e774b42bSBill Holler /* 4639e774b42bSBill Holler * Some AMD processors support C1E state. Entering this state will 4640e774b42bSBill Holler * cause the local APIC timer to stop, which we can't deal with at 4641e774b42bSBill Holler * this time. 4642e774b42bSBill Holler */ 4643e774b42bSBill Holler if (cpuid_getvendor(CPU) == X86_VENDOR_AMD) { 4644e774b42bSBill Holler on_trap_data_t otd; 4645e774b42bSBill Holler uint64_t reg; 4646e774b42bSBill Holler 4647e774b42bSBill Holler if (!on_trap(&otd, OT_DATA_ACCESS)) { 4648e774b42bSBill Holler reg = rdmsr(MSR_AMD_INT_PENDING_CMP_HALT); 4649e774b42bSBill Holler /* Disable C1E state if it is enabled by BIOS */ 4650e774b42bSBill Holler if ((reg >> AMD_ACTONCMPHALT_SHIFT) & 4651e774b42bSBill Holler AMD_ACTONCMPHALT_MASK) { 4652e774b42bSBill Holler reg &= ~(AMD_ACTONCMPHALT_MASK << 4653e774b42bSBill Holler AMD_ACTONCMPHALT_SHIFT); 4654e774b42bSBill Holler wrmsr(MSR_AMD_INT_PENDING_CMP_HALT, reg); 4655e774b42bSBill Holler } 4656e774b42bSBill Holler } 4657e774b42bSBill Holler no_trap(); 4658e774b42bSBill Holler } 4659e774b42bSBill Holler #endif /* !__xpv */ 4660e774b42bSBill Holler } 4661e774b42bSBill Holler 4662cef70d2cSBill Holler /* 46637af88ac7SKuriakose Kuruvilla * Setup necessary registers to enable XSAVE feature on this processor. 46647af88ac7SKuriakose Kuruvilla * This function needs to be called early enough, so that no xsave/xrstor 46657af88ac7SKuriakose Kuruvilla * ops will execute on the processor before the MSRs are properly set up. 46667af88ac7SKuriakose Kuruvilla * 46677af88ac7SKuriakose Kuruvilla * Current implementation has the following assumption: 46687af88ac7SKuriakose Kuruvilla * - cpuid_pass1() is done, so that X86 features are known. 46697af88ac7SKuriakose Kuruvilla * - fpu_probe() is done, so that fp_save_mech is chosen. 46707af88ac7SKuriakose Kuruvilla */ 46717af88ac7SKuriakose Kuruvilla void 46727af88ac7SKuriakose Kuruvilla xsave_setup_msr(cpu_t *cpu) 46737af88ac7SKuriakose Kuruvilla { 46747af88ac7SKuriakose Kuruvilla ASSERT(fp_save_mech == FP_XSAVE); 46757af88ac7SKuriakose Kuruvilla ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE)); 46767af88ac7SKuriakose Kuruvilla 46777af88ac7SKuriakose Kuruvilla /* Enable OSXSAVE in CR4. */ 46787af88ac7SKuriakose Kuruvilla setcr4(getcr4() | CR4_OSXSAVE); 46797af88ac7SKuriakose Kuruvilla /* 46807af88ac7SKuriakose Kuruvilla * Update SW copy of ECX, so that /dev/cpu/self/cpuid will report 46817af88ac7SKuriakose Kuruvilla * correct value. 46827af88ac7SKuriakose Kuruvilla */ 46837af88ac7SKuriakose Kuruvilla cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_ecx |= CPUID_INTC_ECX_OSXSAVE; 46847af88ac7SKuriakose Kuruvilla setup_xfem(); 46857af88ac7SKuriakose Kuruvilla } 46867af88ac7SKuriakose Kuruvilla 46877af88ac7SKuriakose Kuruvilla /* 4688cef70d2cSBill Holler * Starting with the Westmere processor the local 4689cef70d2cSBill Holler * APIC timer will continue running in all C-states, 4690cef70d2cSBill Holler * including the deepest C-states. 4691cef70d2cSBill Holler */ 4692cef70d2cSBill Holler int 4693cef70d2cSBill Holler cpuid_arat_supported(void) 4694cef70d2cSBill Holler { 4695cef70d2cSBill Holler struct cpuid_info *cpi; 4696cef70d2cSBill Holler struct cpuid_regs regs; 4697cef70d2cSBill Holler 4698cef70d2cSBill Holler ASSERT(cpuid_checkpass(CPU, 1)); 46997417cfdeSKuriakose Kuruvilla ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID)); 4700cef70d2cSBill Holler 4701cef70d2cSBill Holler cpi = CPU->cpu_m.mcpu_cpi; 4702cef70d2cSBill Holler 4703cef70d2cSBill Holler switch (cpi->cpi_vendor) { 4704cef70d2cSBill Holler case X86_VENDOR_Intel: 4705cef70d2cSBill Holler /* 4706cef70d2cSBill Holler * Always-running Local APIC Timer is 4707cef70d2cSBill Holler * indicated by CPUID.6.EAX[2]. 4708cef70d2cSBill Holler */ 4709cef70d2cSBill Holler if (cpi->cpi_maxeax >= 6) { 4710cef70d2cSBill Holler regs.cp_eax = 6; 4711cef70d2cSBill Holler (void) cpuid_insn(NULL, ®s); 4712cef70d2cSBill Holler return (regs.cp_eax & CPUID_CSTATE_ARAT); 4713cef70d2cSBill Holler } else { 4714cef70d2cSBill Holler return (0); 4715cef70d2cSBill Holler } 4716cef70d2cSBill Holler default: 4717cef70d2cSBill Holler return (0); 4718cef70d2cSBill Holler } 4719cef70d2cSBill Holler } 4720cef70d2cSBill Holler 4721f21ed392Saubrey.li@intel.com /* 4722f21ed392Saubrey.li@intel.com * Check support for Intel ENERGY_PERF_BIAS feature 4723f21ed392Saubrey.li@intel.com */ 4724f21ed392Saubrey.li@intel.com int 4725f21ed392Saubrey.li@intel.com cpuid_iepb_supported(struct cpu *cp) 4726f21ed392Saubrey.li@intel.com { 4727f21ed392Saubrey.li@intel.com struct cpuid_info *cpi = cp->cpu_m.mcpu_cpi; 4728f21ed392Saubrey.li@intel.com struct cpuid_regs regs; 4729f21ed392Saubrey.li@intel.com 4730f21ed392Saubrey.li@intel.com ASSERT(cpuid_checkpass(cp, 1)); 4731f21ed392Saubrey.li@intel.com 47327417cfdeSKuriakose Kuruvilla if (!(is_x86_feature(x86_featureset, X86FSET_CPUID)) || 47337417cfdeSKuriakose Kuruvilla !(is_x86_feature(x86_featureset, X86FSET_MSR))) { 4734f21ed392Saubrey.li@intel.com return (0); 4735f21ed392Saubrey.li@intel.com } 4736f21ed392Saubrey.li@intel.com 4737f21ed392Saubrey.li@intel.com /* 4738f21ed392Saubrey.li@intel.com * Intel ENERGY_PERF_BIAS MSR is indicated by 4739f21ed392Saubrey.li@intel.com * capability bit CPUID.6.ECX.3 4740f21ed392Saubrey.li@intel.com */ 4741f21ed392Saubrey.li@intel.com if ((cpi->cpi_vendor != X86_VENDOR_Intel) || (cpi->cpi_maxeax < 6)) 4742f21ed392Saubrey.li@intel.com return (0); 4743f21ed392Saubrey.li@intel.com 4744f21ed392Saubrey.li@intel.com regs.cp_eax = 0x6; 4745f21ed392Saubrey.li@intel.com (void) cpuid_insn(NULL, ®s); 4746f21ed392Saubrey.li@intel.com return (regs.cp_ecx & CPUID_EPB_SUPPORT); 4747f21ed392Saubrey.li@intel.com } 4748f21ed392Saubrey.li@intel.com 474941afdfa7SKrishnendu Sadhukhan - Sun Microsystems /* 475041afdfa7SKrishnendu Sadhukhan - Sun Microsystems * Check support for TSC deadline timer 475141afdfa7SKrishnendu Sadhukhan - Sun Microsystems * 475241afdfa7SKrishnendu Sadhukhan - Sun Microsystems * TSC deadline timer provides a superior software programming 475341afdfa7SKrishnendu Sadhukhan - Sun Microsystems * model over local APIC timer that eliminates "time drifts". 475441afdfa7SKrishnendu Sadhukhan - Sun Microsystems * Instead of specifying a relative time, software specifies an 475541afdfa7SKrishnendu Sadhukhan - Sun Microsystems * absolute time as the target at which the processor should 475641afdfa7SKrishnendu Sadhukhan - Sun Microsystems * generate a timer event. 475741afdfa7SKrishnendu Sadhukhan - Sun Microsystems */ 475841afdfa7SKrishnendu Sadhukhan - Sun Microsystems int 475941afdfa7SKrishnendu Sadhukhan - Sun Microsystems cpuid_deadline_tsc_supported(void) 476041afdfa7SKrishnendu Sadhukhan - Sun Microsystems { 476141afdfa7SKrishnendu Sadhukhan - Sun Microsystems struct cpuid_info *cpi = CPU->cpu_m.mcpu_cpi; 476241afdfa7SKrishnendu Sadhukhan - Sun Microsystems struct cpuid_regs regs; 476341afdfa7SKrishnendu Sadhukhan - Sun Microsystems 476441afdfa7SKrishnendu Sadhukhan - Sun Microsystems ASSERT(cpuid_checkpass(CPU, 1)); 476541afdfa7SKrishnendu Sadhukhan - Sun Microsystems ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID)); 476641afdfa7SKrishnendu Sadhukhan - Sun Microsystems 476741afdfa7SKrishnendu Sadhukhan - Sun Microsystems switch (cpi->cpi_vendor) { 476841afdfa7SKrishnendu Sadhukhan - Sun Microsystems case X86_VENDOR_Intel: 476941afdfa7SKrishnendu Sadhukhan - Sun Microsystems if (cpi->cpi_maxeax >= 1) { 477041afdfa7SKrishnendu Sadhukhan - Sun Microsystems regs.cp_eax = 1; 477141afdfa7SKrishnendu Sadhukhan - Sun Microsystems (void) cpuid_insn(NULL, ®s); 477241afdfa7SKrishnendu Sadhukhan - Sun Microsystems return (regs.cp_ecx & CPUID_DEADLINE_TSC); 477341afdfa7SKrishnendu Sadhukhan - Sun Microsystems } else { 477441afdfa7SKrishnendu Sadhukhan - Sun Microsystems return (0); 477541afdfa7SKrishnendu Sadhukhan - Sun Microsystems } 477641afdfa7SKrishnendu Sadhukhan - Sun Microsystems default: 477741afdfa7SKrishnendu Sadhukhan - Sun Microsystems return (0); 477841afdfa7SKrishnendu Sadhukhan - Sun Microsystems } 477941afdfa7SKrishnendu Sadhukhan - Sun Microsystems } 478041afdfa7SKrishnendu Sadhukhan - Sun Microsystems 478122cc0e45SBill Holler #if defined(__amd64) && !defined(__xpv) 478222cc0e45SBill Holler /* 478322cc0e45SBill Holler * Patch in versions of bcopy for high performance Intel Nhm processors 478422cc0e45SBill Holler * and later... 478522cc0e45SBill Holler */ 478622cc0e45SBill Holler void 478722cc0e45SBill Holler patch_memops(uint_t vendor) 478822cc0e45SBill Holler { 478922cc0e45SBill Holler size_t cnt, i; 479022cc0e45SBill Holler caddr_t to, from; 479122cc0e45SBill Holler 47927417cfdeSKuriakose Kuruvilla if ((vendor == X86_VENDOR_Intel) && 47937417cfdeSKuriakose Kuruvilla is_x86_feature(x86_featureset, X86FSET_SSE4_2)) { 479422cc0e45SBill Holler cnt = &bcopy_patch_end - &bcopy_patch_start; 479522cc0e45SBill Holler to = &bcopy_ck_size; 479622cc0e45SBill Holler from = &bcopy_patch_start; 479722cc0e45SBill Holler for (i = 0; i < cnt; i++) { 479822cc0e45SBill Holler *to++ = *from++; 479922cc0e45SBill Holler } 480022cc0e45SBill Holler } 480122cc0e45SBill Holler } 480222cc0e45SBill Holler #endif /* __amd64 && !__xpv */ 48032d2efdc6SVuong Nguyen 48042d2efdc6SVuong Nguyen /* 48052d2efdc6SVuong Nguyen * This function finds the number of bits to represent the number of cores per 48062d2efdc6SVuong Nguyen * chip and the number of strands per core for the Intel platforms. 48072d2efdc6SVuong Nguyen * It re-uses the x2APIC cpuid code of the cpuid_pass2(). 48082d2efdc6SVuong Nguyen */ 48092d2efdc6SVuong Nguyen void 48102d2efdc6SVuong Nguyen cpuid_get_ext_topo(uint_t vendor, uint_t *core_nbits, uint_t *strand_nbits) 48112d2efdc6SVuong Nguyen { 48122d2efdc6SVuong Nguyen struct cpuid_regs regs; 48132d2efdc6SVuong Nguyen struct cpuid_regs *cp = ®s; 48142d2efdc6SVuong Nguyen 48152d2efdc6SVuong Nguyen if (vendor != X86_VENDOR_Intel) { 48162d2efdc6SVuong Nguyen return; 48172d2efdc6SVuong Nguyen } 48182d2efdc6SVuong Nguyen 48192d2efdc6SVuong Nguyen /* if the cpuid level is 0xB, extended topo is available. */ 48202d2efdc6SVuong Nguyen cp->cp_eax = 0; 48212d2efdc6SVuong Nguyen if (__cpuid_insn(cp) >= 0xB) { 48222d2efdc6SVuong Nguyen 48232d2efdc6SVuong Nguyen cp->cp_eax = 0xB; 48242d2efdc6SVuong Nguyen cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0; 48252d2efdc6SVuong Nguyen (void) __cpuid_insn(cp); 48262d2efdc6SVuong Nguyen 48272d2efdc6SVuong Nguyen /* 48282d2efdc6SVuong Nguyen * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which 48292d2efdc6SVuong Nguyen * indicates that the extended topology enumeration leaf is 48302d2efdc6SVuong Nguyen * available. 48312d2efdc6SVuong Nguyen */ 48322d2efdc6SVuong Nguyen if (cp->cp_ebx) { 48332d2efdc6SVuong Nguyen uint_t coreid_shift = 0; 48342d2efdc6SVuong Nguyen uint_t chipid_shift = 0; 48352d2efdc6SVuong Nguyen uint_t i; 48362d2efdc6SVuong Nguyen uint_t level; 48372d2efdc6SVuong Nguyen 48382d2efdc6SVuong Nguyen for (i = 0; i < CPI_FNB_ECX_MAX; i++) { 48392d2efdc6SVuong Nguyen cp->cp_eax = 0xB; 48402d2efdc6SVuong Nguyen cp->cp_ecx = i; 48412d2efdc6SVuong Nguyen 48422d2efdc6SVuong Nguyen (void) __cpuid_insn(cp); 48432d2efdc6SVuong Nguyen level = CPI_CPU_LEVEL_TYPE(cp); 48442d2efdc6SVuong Nguyen 48452d2efdc6SVuong Nguyen if (level == 1) { 48462d2efdc6SVuong Nguyen /* 48472d2efdc6SVuong Nguyen * Thread level processor topology 48482d2efdc6SVuong Nguyen * Number of bits shift right APIC ID 48492d2efdc6SVuong Nguyen * to get the coreid. 48502d2efdc6SVuong Nguyen */ 48512d2efdc6SVuong Nguyen coreid_shift = BITX(cp->cp_eax, 4, 0); 48522d2efdc6SVuong Nguyen } else if (level == 2) { 48532d2efdc6SVuong Nguyen /* 48542d2efdc6SVuong Nguyen * Core level processor topology 48552d2efdc6SVuong Nguyen * Number of bits shift right APIC ID 48562d2efdc6SVuong Nguyen * to get the chipid. 48572d2efdc6SVuong Nguyen */ 48582d2efdc6SVuong Nguyen chipid_shift = BITX(cp->cp_eax, 4, 0); 48592d2efdc6SVuong Nguyen } 48602d2efdc6SVuong Nguyen } 48612d2efdc6SVuong Nguyen 48622d2efdc6SVuong Nguyen if (coreid_shift > 0 && chipid_shift > coreid_shift) { 48632d2efdc6SVuong Nguyen *strand_nbits = coreid_shift; 48642d2efdc6SVuong Nguyen *core_nbits = chipid_shift - coreid_shift; 48652d2efdc6SVuong Nguyen } 48662d2efdc6SVuong Nguyen } 48672d2efdc6SVuong Nguyen } 48682d2efdc6SVuong Nguyen } 4869