Lines Matching +full:multi +full:- +full:functional

71  * to recognize Cyrix processors that are not cpuid-compliant, and to deal with
91 * Pass 1 is done on non-boot CPUs during their initialization and the results
235 static int platform_type = -1;
251 * processor cache-line alignment, but this is not guarantied in the furture.
289 * - Socket: Something that can be plugged into a motherboard.
290 * - Package: Same as socket
291 * - Chip: Same as socket. Note that AMD's documentation uses term "chip"
293 * - Processor node: Some AMD processors have more than one
295 * are fully-functional processors themselves with cores, caches,
297 * inside the package with Hypertransport links. On single-node
299 * - Compute Unit: Some AMD processors pair cores in "compute units" that
320 uint8_t cpi_cacheinfo[16]; /* fn 2: intel-style cache desc */
338 uint_t cpi_ncore_per_chip; /* AMD: fn 0x80000008: %ecx[7-0] */
339 /* Intel: fn 4: %eax[31-26] */
372 * These bit fields are defined by the Intel Application Note AP-485
375 #define CPI_FAMILY_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 27, 20)
376 #define CPI_MODEL_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 19, 16)
377 #define CPI_TYPE(cpi) BITX((cpi)->cpi_std[1].cp_eax, 13, 12)
378 #define CPI_FAMILY(cpi) BITX((cpi)->cpi_std[1].cp_eax, 11, 8)
379 #define CPI_STEP(cpi) BITX((cpi)->cpi_std[1].cp_eax, 3, 0)
380 #define CPI_MODEL(cpi) BITX((cpi)->cpi_std[1].cp_eax, 7, 4)
382 #define CPI_FEATURES_EDX(cpi) ((cpi)->cpi_std[1].cp_edx)
383 #define CPI_FEATURES_ECX(cpi) ((cpi)->cpi_std[1].cp_ecx)
384 #define CPI_FEATURES_XTD_EDX(cpi) ((cpi)->cpi_extd[1].cp_edx)
385 #define CPI_FEATURES_XTD_ECX(cpi) ((cpi)->cpi_extd[1].cp_ecx)
386 #define CPI_FEATURES_7_0_EBX(cpi) ((cpi)->cpi_std[7].cp_ebx)
388 #define CPI_BRANDID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 7, 0)
389 #define CPI_CHUNKS(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 15, 7)
390 #define CPI_CPU_COUNT(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 23, 16)
391 #define CPI_APIC_ID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 31, 24)
400 * Defined by Intel Application Note AP-485
402 #define CPI_NUM_CORES(regs) BITX((regs)->cp_eax, 31, 26)
403 #define CPI_NTHR_SHR_CACHE(regs) BITX((regs)->cp_eax, 25, 14)
404 #define CPI_FULL_ASSOC_CACHE(regs) BITX((regs)->cp_eax, 9, 9)
405 #define CPI_SELF_INIT_CACHE(regs) BITX((regs)->cp_eax, 8, 8)
406 #define CPI_CACHE_LVL(regs) BITX((regs)->cp_eax, 7, 5)
407 #define CPI_CACHE_TYPE(regs) BITX((regs)->cp_eax, 4, 0)
408 #define CPI_CPU_LEVEL_TYPE(regs) BITX((regs)->cp_ecx, 15, 8)
410 #define CPI_CACHE_WAYS(regs) BITX((regs)->cp_ebx, 31, 22)
411 #define CPI_CACHE_PARTS(regs) BITX((regs)->cp_ebx, 21, 12)
412 #define CPI_CACHE_COH_LN_SZ(regs) BITX((regs)->cp_ebx, 11, 0)
414 #define CPI_CACHE_SETS(regs) BITX((regs)->cp_ecx, 31, 0)
416 #define CPI_PREFCH_STRIDE(regs) BITX((regs)->cp_edx, 9, 0)
420 * A couple of shorthand macros to identify "later" P6-family chips
421 * like the Pentium M and Core. First, the "older" P6-based stuff
422 * (loosely defined as "pre-Pentium-4"):
427 cpi->cpi_family == 6 && \
428 (cpi->cpi_model == 1 || \
429 cpi->cpi_model == 3 || \
430 cpi->cpi_model == 5 || \
431 cpi->cpi_model == 6 || \
432 cpi->cpi_model == 7 || \
433 cpi->cpi_model == 8 || \
434 cpi->cpi_model == 0xA || \
435 cpi->cpi_model == 0xB) \
439 #define IS_NEW_F6(cpi) ((cpi->cpi_family == 6) && !IS_LEGACY_P6(cpi))
442 #define IS_EXTENDED_MODEL_INTEL(cpi) (cpi->cpi_family == 0x6 || \
443 cpi->cpi_family >= 0xf)
448 * See cpuid section of "Intel 64 and IA-32 Architectures Software Developer's
449 * Manual Volume 2A: Instruction Set Reference, A-M" #25366-022US, November
457 #define MWAIT_SUPPORTED(cpi) ((cpi)->cpi_std[1].cp_ecx & CPUID_INTC_ECX_MON)
458 #define MWAIT_INT_ENABLE(cpi) ((cpi)->cpi_std[5].cp_ecx & 0x2)
459 #define MWAIT_EXTENSION(cpi) ((cpi)->cpi_std[5].cp_ecx & 0x1)
460 #define MWAIT_SIZE_MIN(cpi) BITX((cpi)->cpi_std[5].cp_eax, 15, 0)
461 #define MWAIT_SIZE_MAX(cpi) BITX((cpi)->cpi_std[5].cp_ebx, 15, 0)
463 * Number of sub-cstates for a given c-state.
466 BITX((cpi)->cpi_std[5].cp_edx, c_state + 3, c_state)
485 * Apply up various platform-dependent restrictions where the
497 cp->cp_edx &= in platform_cpuid_mangle()
509 cp->cp_edx &= in platform_cpuid_mangle()
516 cp->cp_ecx &= ~CPUID_AMD_ECX_CMP_LGCY; in platform_cpuid_mangle()
527 * Zero out the (ncores-per-chip - 1) field in platform_cpuid_mangle()
529 cp->cp_eax &= 0x03fffffff; in platform_cpuid_mangle()
539 cp->cp_ecx &= ~CPUID_AMD_ECX_CR8D; in platform_cpuid_mangle()
544 * Zero out the (ncores-per-chip - 1) field in platform_cpuid_mangle()
546 cp->cp_ecx &= 0xffffff00; in platform_cpuid_mangle()
563 * we don't currently support. Could be set to non-zero values
573 * Allocate space for mcpu_cpi in the machcpu structure for all non-boot CPUs.
583 ASSERT(cpu->cpu_id != 0); in cpuid_alloc_space()
584 ASSERT(cpu->cpu_m.mcpu_cpi == NULL); in cpuid_alloc_space()
585 cpu->cpu_m.mcpu_cpi = in cpuid_alloc_space()
586 kmem_zalloc(sizeof (*cpu->cpu_m.mcpu_cpi), KM_SLEEP); in cpuid_alloc_space()
592 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; in cpuid_free_space()
601 for (i = 1; i < cpi->cpi_std_4_size; i++) in cpuid_free_space()
602 kmem_free(cpi->cpi_std_4[i], sizeof (struct cpuid_regs)); in cpuid_free_space()
603 if (cpi->cpi_std_4_size > 0) in cpuid_free_space()
604 kmem_free(cpi->cpi_std_4, in cpuid_free_space()
605 cpi->cpi_std_4_size * sizeof (struct cpuid_regs *)); in cpuid_free_space()
608 cpu->cpu_m.mcpu_cpi = NULL; in cpuid_free_space()
617 * consumer to execute (uses _cpuid_chiprev --> synth_amd_info --> get_hwenv).
627 ASSERT(platform_type == -1); in determine_platform()
683 * Xen's pseudo-cpuid function returns a string representing the in determine_platform()
687 * hypervisor might use a different one depending on whether Hyper-V in determine_platform()
709 ASSERT(platform_type != -1); in get_hwenv()
741 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; in cpuid_intel_getids()
743 for (i = 1; i < cpi->cpi_ncpu_per_chip; i <<= 1) in cpuid_intel_getids()
746 cpi->cpi_chipid = cpi->cpi_apicid >> chipid_shift; in cpuid_intel_getids()
747 cpi->cpi_clogid = cpi->cpi_apicid & ((1 << chipid_shift) - 1); in cpuid_intel_getids()
751 * Multi-core (and possibly multi-threaded) in cpuid_intel_getids()
755 if (cpi->cpi_ncore_per_chip == 1) in cpuid_intel_getids()
756 ncpu_per_core = cpi->cpi_ncpu_per_chip; in cpuid_intel_getids()
757 else if (cpi->cpi_ncore_per_chip > 1) in cpuid_intel_getids()
758 ncpu_per_core = cpi->cpi_ncpu_per_chip / in cpuid_intel_getids()
759 cpi->cpi_ncore_per_chip; in cpuid_intel_getids()
764 * +-----------------------+------+------+ in cpuid_intel_getids()
766 * +-----------------------+------+------+ in cpuid_intel_getids()
767 * <------- chipid --------> in cpuid_intel_getids()
768 * <------- coreid ---------------> in cpuid_intel_getids()
769 * <--- clogid --> in cpuid_intel_getids()
770 * <------> in cpuid_intel_getids()
776 * store the value of cpi->cpi_ncpu_per_chip. in cpuid_intel_getids()
779 * cpi->cpi_ncore_per_chip. in cpuid_intel_getids()
783 cpi->cpi_coreid = cpi->cpi_apicid >> coreid_shift; in cpuid_intel_getids()
784 cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift; in cpuid_intel_getids()
787 * Single-core multi-threaded processors. in cpuid_intel_getids()
789 cpi->cpi_coreid = cpi->cpi_chipid; in cpuid_intel_getids()
790 cpi->cpi_pkgcoreid = 0; in cpuid_intel_getids()
792 cpi->cpi_procnodeid = cpi->cpi_chipid; in cpuid_intel_getids()
793 cpi->cpi_compunitid = cpi->cpi_coreid; in cpuid_intel_getids()
802 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; in cpuid_amd_getids()
811 * coreids starting at a multiple of the number of cores per chip - in cpuid_amd_getids()
828 cpi->cpi_coreid = cpu->cpu_id; in cpuid_amd_getids()
829 cpi->cpi_compunitid = cpu->cpu_id; in cpuid_amd_getids()
831 if (cpi->cpi_xmaxeax >= 0x80000008) { in cpuid_amd_getids()
833 coreidsz = BITX((cpi)->cpi_extd[8].cp_ecx, 15, 12); in cpuid_amd_getids()
839 cpi->cpi_ncore_per_chip = in cpuid_amd_getids()
840 BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1; in cpuid_amd_getids()
843 for (i = 1; i < cpi->cpi_ncore_per_chip; i <<= 1) in cpuid_amd_getids()
849 /* Assume single-core part */ in cpuid_amd_getids()
850 cpi->cpi_ncore_per_chip = 1; in cpuid_amd_getids()
854 cpi->cpi_clogid = cpi->cpi_pkgcoreid = in cpuid_amd_getids()
855 cpi->cpi_apicid & ((1<<coreidsz) - 1); in cpuid_amd_getids()
856 cpi->cpi_ncpu_per_chip = cpi->cpi_ncore_per_chip; in cpuid_amd_getids()
860 cpi->cpi_xmaxeax >= 0x8000001e) { in cpuid_amd_getids()
861 cp = &cpi->cpi_extd[0x1e]; in cpuid_amd_getids()
862 cp->cp_eax = 0x8000001e; in cpuid_amd_getids()
865 cpi->cpi_procnodes_per_pkg = BITX(cp->cp_ecx, 10, 8) + 1; in cpuid_amd_getids()
866 cpi->cpi_procnodeid = BITX(cp->cp_ecx, 7, 0); in cpuid_amd_getids()
867 cpi->cpi_cores_per_compunit = BITX(cp->cp_ebx, 15, 8) + 1; in cpuid_amd_getids()
868 cpi->cpi_compunitid = BITX(cp->cp_ebx, 7, 0) in cpuid_amd_getids()
869 + (cpi->cpi_ncore_per_chip / cpi->cpi_cores_per_compunit) in cpuid_amd_getids()
870 * (cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg); in cpuid_amd_getids()
871 } else if (cpi->cpi_family == 0xf || cpi->cpi_family >= 0x11) { in cpuid_amd_getids()
872 cpi->cpi_procnodeid = (cpi->cpi_apicid >> coreidsz) & 7; in cpuid_amd_getids()
873 } else if (cpi->cpi_family == 0x10) { in cpuid_amd_getids()
875 * See if we are a multi-node processor. in cpuid_amd_getids()
879 if ((cpi->cpi_model < 8) || BITX(nb_caps_reg, 29, 29) == 0) { in cpuid_amd_getids()
880 /* Single-node */ in cpuid_amd_getids()
881 cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 5, in cpuid_amd_getids()
886 * Multi-node revision D (2 nodes per package in cpuid_amd_getids()
889 cpi->cpi_procnodes_per_pkg = 2; in cpuid_amd_getids()
891 first_half = (cpi->cpi_pkgcoreid <= in cpuid_amd_getids()
892 (cpi->cpi_ncore_per_chip/2 - 1)); in cpuid_amd_getids()
894 if (cpi->cpi_apicid == cpi->cpi_pkgcoreid) { in cpuid_amd_getids()
896 cpi->cpi_procnodeid = (first_half ? 0 : 1); in cpuid_amd_getids()
901 node2_1 = BITX(cpi->cpi_apicid, 5, 4) << 1; in cpuid_amd_getids()
908 * always 0 on dual-node processors) in cpuid_amd_getids()
911 cpi->cpi_procnodeid = node2_1 + in cpuid_amd_getids()
914 cpi->cpi_procnodeid = node2_1 + in cpuid_amd_getids()
919 cpi->cpi_procnodeid = 0; in cpuid_amd_getids()
922 cpi->cpi_chipid = in cpuid_amd_getids()
923 cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg; in cpuid_amd_getids()
961 if (cpu->cpu_id == 0) { in cpuid_pass1()
962 if (cpu->cpu_m.mcpu_cpi == NULL) in cpuid_pass1()
963 cpu->cpu_m.mcpu_cpi = &cpuid_info0; in cpuid_pass1()
968 cpi = cpu->cpu_m.mcpu_cpi; in cpuid_pass1()
970 cp = &cpi->cpi_std[0]; in cpuid_pass1()
971 cp->cp_eax = 0; in cpuid_pass1()
972 cpi->cpi_maxeax = __cpuid_insn(cp); in cpuid_pass1()
974 uint32_t *iptr = (uint32_t *)cpi->cpi_vendorstr; in cpuid_pass1()
975 *iptr++ = cp->cp_ebx; in cpuid_pass1()
976 *iptr++ = cp->cp_edx; in cpuid_pass1()
977 *iptr++ = cp->cp_ecx; in cpuid_pass1()
978 *(char *)&cpi->cpi_vendorstr[12] = '\0'; in cpuid_pass1()
981 cpi->cpi_vendor = _cpuid_vendorstr_to_vendorcode(cpi->cpi_vendorstr); in cpuid_pass1()
982 x86_vendor = cpi->cpi_vendor; /* for compatibility */ in cpuid_pass1()
987 if (cpi->cpi_maxeax > CPI_MAXEAX_MAX) in cpuid_pass1()
988 cpi->cpi_maxeax = CPI_MAXEAX_MAX; in cpuid_pass1()
989 if (cpi->cpi_maxeax < 1) in cpuid_pass1()
992 cp = &cpi->cpi_std[1]; in cpuid_pass1()
993 cp->cp_eax = 1; in cpuid_pass1()
999 cpi->cpi_model = CPI_MODEL(cpi); in cpuid_pass1()
1000 cpi->cpi_family = CPI_FAMILY(cpi); in cpuid_pass1()
1002 if (cpi->cpi_family == 0xf) in cpuid_pass1()
1003 cpi->cpi_family += CPI_FAMILY_XTD(cpi); in cpuid_pass1()
1011 switch (cpi->cpi_vendor) { in cpuid_pass1()
1014 cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4; in cpuid_pass1()
1018 cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4; in cpuid_pass1()
1021 if (cpi->cpi_model == 0xf) in cpuid_pass1()
1022 cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4; in cpuid_pass1()
1026 cpi->cpi_step = CPI_STEP(cpi); in cpuid_pass1()
1027 cpi->cpi_brandid = CPI_BRANDID(cpi); in cpuid_pass1()
1031 * - believe %edx feature word in cpuid_pass1()
1032 * - ignore %ecx feature word in cpuid_pass1()
1033 * - 32-bit virtual and physical addressing in cpuid_pass1()
1038 cpi->cpi_pabits = cpi->cpi_vabits = 32; in cpuid_pass1()
1040 switch (cpi->cpi_vendor) { in cpuid_pass1()
1042 if (cpi->cpi_family == 5) in cpuid_pass1()
1050 if (cpi->cpi_model < 3 && cpi->cpi_step < 3) in cpuid_pass1()
1051 cp->cp_edx &= ~CPUID_INTC_EDX_SEP; in cpuid_pass1()
1052 } else if (IS_NEW_F6(cpi) || cpi->cpi_family == 0xf) { in cpuid_pass1()
1061 } else if (cpi->cpi_family > 0xf) in cpuid_pass1()
1067 if (cpi->cpi_maxeax < 5) in cpuid_pass1()
1075 if (cpi->cpi_family == 0xf && cpi->cpi_model == 0xe) { in cpuid_pass1()
1076 cp->cp_eax = (0xf0f & cp->cp_eax) | 0xc0; in cpuid_pass1()
1077 cpi->cpi_model = 0xc; in cpuid_pass1()
1080 if (cpi->cpi_family == 5) { in cpuid_pass1()
1093 if (cpi->cpi_model == 0) { in cpuid_pass1()
1094 if (cp->cp_edx & 0x200) { in cpuid_pass1()
1095 cp->cp_edx &= ~0x200; in cpuid_pass1()
1096 cp->cp_edx |= CPUID_INTC_EDX_PGE; in cpuid_pass1()
1103 if (cpi->cpi_model < 6) in cpuid_pass1()
1111 if (cpi->cpi_family >= 0xf) in cpuid_pass1()
1117 if (cpi->cpi_maxeax < 5) in cpuid_pass1()
1126 * Pre-family-10h Opterons do not have the MWAIT instruction. in cpuid_pass1()
1136 if (cpi->cpi_family == 5 && cpi->cpi_model == 4 && in cpuid_pass1()
1137 (cpi->cpi_step == 2 || cpi->cpi_step == 3)) in cpuid_pass1()
1138 cp->cp_edx |= CPUID_INTC_EDX_CX8; in cpuid_pass1()
1144 if (cpi->cpi_family == 6) in cpuid_pass1()
1145 cp->cp_edx |= CPUID_INTC_EDX_CX8; in cpuid_pass1()
1225 cp->cp_edx &= mask_edx; in cpuid_pass1()
1226 cp->cp_ecx &= mask_ecx; in cpuid_pass1()
1233 platform_cpuid_mangle(cpi->cpi_vendor, 1, cp); in cpuid_pass1()
1239 if (cpi->cpi_vendor == X86_VENDOR_Intel && cpi->cpi_maxeax >= 7) { in cpuid_pass1()
1241 ecp = &cpi->cpi_std[7]; in cpuid_pass1()
1242 ecp->cp_eax = 7; in cpuid_pass1()
1243 ecp->cp_ecx = 0; in cpuid_pass1()
1250 ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_BMI1; in cpuid_pass1()
1251 ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_BMI2; in cpuid_pass1()
1252 ecp->cp_ebx &= ~CPUID_INTC_EBX_7_0_AVX2; in cpuid_pass1()
1255 if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_SMEP) in cpuid_pass1()
1258 if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_RDSEED) in cpuid_pass1()
1261 if (ecp->cp_ebx & CPUID_INTC_EBX_7_0_ADX) in cpuid_pass1()
1268 cp->cp_edx |= cpuid_feature_edx_include; in cpuid_pass1()
1269 cp->cp_edx &= ~cpuid_feature_edx_exclude; in cpuid_pass1()
1271 cp->cp_ecx |= cpuid_feature_ecx_include; in cpuid_pass1()
1272 cp->cp_ecx &= ~cpuid_feature_ecx_exclude; in cpuid_pass1()
1274 if (cp->cp_edx & CPUID_INTC_EDX_PSE) { in cpuid_pass1()
1277 if (cp->cp_edx & CPUID_INTC_EDX_TSC) { in cpuid_pass1()
1280 if (cp->cp_edx & CPUID_INTC_EDX_MSR) { in cpuid_pass1()
1283 if (cp->cp_edx & CPUID_INTC_EDX_MTRR) { in cpuid_pass1()
1286 if (cp->cp_edx & CPUID_INTC_EDX_PGE) { in cpuid_pass1()
1289 if (cp->cp_edx & CPUID_INTC_EDX_CMOV) { in cpuid_pass1()
1292 if (cp->cp_edx & CPUID_INTC_EDX_MMX) { in cpuid_pass1()
1295 if ((cp->cp_edx & CPUID_INTC_EDX_MCE) != 0 && in cpuid_pass1()
1296 (cp->cp_edx & CPUID_INTC_EDX_MCA) != 0) { in cpuid_pass1()
1299 if (cp->cp_edx & CPUID_INTC_EDX_PAE) { in cpuid_pass1()
1302 if (cp->cp_edx & CPUID_INTC_EDX_CX8) { in cpuid_pass1()
1305 if (cp->cp_ecx & CPUID_INTC_ECX_CX16) { in cpuid_pass1()
1308 if (cp->cp_edx & CPUID_INTC_EDX_PAT) { in cpuid_pass1()
1311 if (cp->cp_edx & CPUID_INTC_EDX_SEP) { in cpuid_pass1()
1314 if (cp->cp_edx & CPUID_INTC_EDX_FXSR) { in cpuid_pass1()
1320 if (cp->cp_edx & CPUID_INTC_EDX_SSE) { in cpuid_pass1()
1323 if (cp->cp_edx & CPUID_INTC_EDX_SSE2) { in cpuid_pass1()
1326 if (cp->cp_ecx & CPUID_INTC_ECX_SSE3) { in cpuid_pass1()
1329 if (cp->cp_ecx & CPUID_INTC_ECX_SSSE3) { in cpuid_pass1()
1332 if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_1) { in cpuid_pass1()
1335 if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_2) { in cpuid_pass1()
1338 if (cp->cp_ecx & CPUID_INTC_ECX_AES) { in cpuid_pass1()
1341 if (cp->cp_ecx & CPUID_INTC_ECX_PCLMULQDQ) { in cpuid_pass1()
1345 if (cp->cp_ecx & CPUID_INTC_ECX_XSAVE) { in cpuid_pass1()
1349 if (cp->cp_ecx & CPUID_INTC_ECX_AVX) { in cpuid_pass1()
1357 if (cp->cp_ecx & CPUID_INTC_ECX_F16C) in cpuid_pass1()
1361 if (cp->cp_ecx & CPUID_INTC_ECX_FMA) in cpuid_pass1()
1365 if (cpi->cpi_std[7].cp_ebx & in cpuid_pass1()
1370 if (cpi->cpi_std[7].cp_ebx & in cpuid_pass1()
1375 if (cpi->cpi_std[7].cp_ebx & in cpuid_pass1()
1382 if (cp->cp_ecx & CPUID_INTC_ECX_X2APIC) { in cpuid_pass1()
1385 if (cp->cp_edx & CPUID_INTC_EDX_DE) { in cpuid_pass1()
1389 if (cp->cp_ecx & CPUID_INTC_ECX_MON) { in cpuid_pass1()
1395 if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) { in cpuid_pass1()
1396 cpi->cpi_mwait.support |= MWAIT_SUPPORT; in cpuid_pass1()
1406 ASSERT((cp->cp_ecx & CPUID_INTC_ECX_MON) && in cpuid_pass1()
1407 (cp->cp_edx & CPUID_INTC_EDX_CLFSH)); in cpuid_pass1()
1413 if (cp->cp_ecx & CPUID_INTC_ECX_VMX) { in cpuid_pass1()
1417 if (cp->cp_ecx & CPUID_INTC_ECX_RDRAND) in cpuid_pass1()
1424 if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) { in cpuid_pass1()
1426 x86_clflush_size = (BITX(cp->cp_ebx, 15, 8) * 8); in cpuid_pass1()
1429 cpi->cpi_pabits = 36; in cpuid_pass1()
1440 if (cp->cp_edx & CPUID_INTC_EDX_HTT) { in cpuid_pass1()
1441 cpi->cpi_ncpu_per_chip = CPI_CPU_COUNT(cpi); in cpuid_pass1()
1442 if (cpi->cpi_ncpu_per_chip > 1) in cpuid_pass1()
1445 cpi->cpi_ncpu_per_chip = 1; in cpuid_pass1()
1453 switch (cpi->cpi_vendor) { in cpuid_pass1()
1455 if (IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf) in cpuid_pass1()
1459 if (cpi->cpi_family > 5 || in cpuid_pass1()
1460 (cpi->cpi_family == 5 && cpi->cpi_model >= 1)) in cpuid_pass1()
1465 * Only these Cyrix CPUs are -known- to support in cpuid_pass1()
1480 cp = &cpi->cpi_extd[0]; in cpuid_pass1()
1481 cp->cp_eax = 0x80000000; in cpuid_pass1()
1482 cpi->cpi_xmaxeax = __cpuid_insn(cp); in cpuid_pass1()
1485 if (cpi->cpi_xmaxeax & 0x80000000) { in cpuid_pass1()
1487 if (cpi->cpi_xmaxeax > CPI_XMAXEAX_MAX) in cpuid_pass1()
1488 cpi->cpi_xmaxeax = CPI_XMAXEAX_MAX; in cpuid_pass1()
1490 switch (cpi->cpi_vendor) { in cpuid_pass1()
1493 if (cpi->cpi_xmaxeax < 0x80000001) in cpuid_pass1()
1495 cp = &cpi->cpi_extd[1]; in cpuid_pass1()
1496 cp->cp_eax = 0x80000001; in cpuid_pass1()
1499 if (cpi->cpi_vendor == X86_VENDOR_AMD && in cpuid_pass1()
1500 cpi->cpi_family == 5 && in cpuid_pass1()
1501 cpi->cpi_model == 6 && in cpuid_pass1()
1502 cpi->cpi_step == 6) { in cpuid_pass1()
1507 if (cp->cp_edx & 0x400) { in cpuid_pass1()
1508 cp->cp_edx &= ~0x400; in cpuid_pass1()
1509 cp->cp_edx |= CPUID_AMD_EDX_SYSC; in cpuid_pass1()
1513 platform_cpuid_mangle(cpi->cpi_vendor, 0x80000001, cp); in cpuid_pass1()
1518 if (cp->cp_edx & CPUID_AMD_EDX_NX) { in cpuid_pass1()
1523 * Regardless whether or not we boot 64-bit, in cpuid_pass1()
1525 * the CPU is capable of running 64-bit. in cpuid_pass1()
1527 if (cp->cp_edx & CPUID_AMD_EDX_LM) { in cpuid_pass1()
1532 /* 1 GB large page - enable only for 64 bit kernel */ in cpuid_pass1()
1533 if (cp->cp_edx & CPUID_AMD_EDX_1GPG) { in cpuid_pass1()
1538 if ((cpi->cpi_vendor == X86_VENDOR_AMD) && in cpuid_pass1()
1539 (cpi->cpi_std[1].cp_edx & CPUID_INTC_EDX_FXSR) && in cpuid_pass1()
1540 (cp->cp_ecx & CPUID_AMD_ECX_SSE4A)) { in cpuid_pass1()
1549 if (cpi->cpi_vendor == X86_VENDOR_AMD && in cpuid_pass1()
1551 (cp->cp_ecx & CPUID_AMD_ECX_CMP_LGCY)) { in cpuid_pass1()
1559 * instead. In the amd64 kernel, things are -way- in cpuid_pass1()
1562 if (cp->cp_edx & CPUID_AMD_EDX_SYSC) { in cpuid_pass1()
1575 if (cp->cp_edx & CPUID_AMD_EDX_TSCP) { in cpuid_pass1()
1579 if (cp->cp_ecx & CPUID_AMD_ECX_SVM) { in cpuid_pass1()
1583 if (cp->cp_ecx & CPUID_AMD_ECX_TOPOEXT) { in cpuid_pass1()
1587 if (cp->cp_ecx & CPUID_AMD_ECX_PCEC) { in cpuid_pass1()
1598 switch (cpi->cpi_vendor) { in cpuid_pass1()
1600 if (cpi->cpi_maxeax >= 4) { in cpuid_pass1()
1601 cp = &cpi->cpi_std[4]; in cpuid_pass1()
1602 cp->cp_eax = 4; in cpuid_pass1()
1603 cp->cp_ecx = 0; in cpuid_pass1()
1605 platform_cpuid_mangle(cpi->cpi_vendor, 4, cp); in cpuid_pass1()
1609 if (cpi->cpi_xmaxeax < 0x80000008) in cpuid_pass1()
1611 cp = &cpi->cpi_extd[8]; in cpuid_pass1()
1612 cp->cp_eax = 0x80000008; in cpuid_pass1()
1614 platform_cpuid_mangle(cpi->cpi_vendor, 0x80000008, cp); in cpuid_pass1()
1620 cpi->cpi_pabits = BITX(cp->cp_eax, 7, 0); in cpuid_pass1()
1621 cpi->cpi_vabits = BITX(cp->cp_eax, 15, 8); in cpuid_pass1()
1630 switch (cpi->cpi_vendor) { in cpuid_pass1()
1632 if (cpi->cpi_maxeax < 4) { in cpuid_pass1()
1633 cpi->cpi_ncore_per_chip = 1; in cpuid_pass1()
1636 cpi->cpi_ncore_per_chip = in cpuid_pass1()
1637 BITX((cpi)->cpi_std[4].cp_eax, 31, 26) + 1; in cpuid_pass1()
1641 if (cpi->cpi_xmaxeax < 0x80000008) { in cpuid_pass1()
1642 cpi->cpi_ncore_per_chip = 1; in cpuid_pass1()
1649 * be affected by "downcoring" - it reflects in cpuid_pass1()
1653 cpi->cpi_ncore_per_chip = in cpuid_pass1()
1654 BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1; in cpuid_pass1()
1658 cpi->cpi_ncore_per_chip = 1; in cpuid_pass1()
1663 * Get CPUID data about TSC Invariance in Deep C-State. in cpuid_pass1()
1665 switch (cpi->cpi_vendor) { in cpuid_pass1()
1667 if (cpi->cpi_maxeax >= 7) { in cpuid_pass1()
1668 cp = &cpi->cpi_extd[7]; in cpuid_pass1()
1669 cp->cp_eax = 0x80000007; in cpuid_pass1()
1670 cp->cp_ecx = 0; in cpuid_pass1()
1678 cpi->cpi_ncore_per_chip = 1; in cpuid_pass1()
1684 if (cpi->cpi_ncore_per_chip > 1) { in cpuid_pass1()
1692 if (cpi->cpi_ncpu_per_chip == cpi->cpi_ncore_per_chip) { in cpuid_pass1()
1696 cpi->cpi_apicid = CPI_APIC_ID(cpi); in cpuid_pass1()
1697 cpi->cpi_procnodes_per_pkg = 1; in cpuid_pass1()
1698 cpi->cpi_cores_per_compunit = 1; in cpuid_pass1()
1702 * Single-core single-threaded processors. in cpuid_pass1()
1704 cpi->cpi_chipid = -1; in cpuid_pass1()
1705 cpi->cpi_clogid = 0; in cpuid_pass1()
1706 cpi->cpi_coreid = cpu->cpu_id; in cpuid_pass1()
1707 cpi->cpi_pkgcoreid = 0; in cpuid_pass1()
1708 if (cpi->cpi_vendor == X86_VENDOR_AMD) in cpuid_pass1()
1709 cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 3, 0); in cpuid_pass1()
1711 cpi->cpi_procnodeid = cpi->cpi_chipid; in cpuid_pass1()
1712 } else if (cpi->cpi_ncpu_per_chip > 1) { in cpuid_pass1()
1713 if (cpi->cpi_vendor == X86_VENDOR_Intel) in cpuid_pass1()
1715 else if (cpi->cpi_vendor == X86_VENDOR_AMD) in cpuid_pass1()
1722 cpi->cpi_coreid = cpi->cpi_chipid; in cpuid_pass1()
1723 cpi->cpi_pkgcoreid = 0; in cpuid_pass1()
1724 cpi->cpi_procnodeid = cpi->cpi_chipid; in cpuid_pass1()
1725 cpi->cpi_compunitid = cpi->cpi_chipid; in cpuid_pass1()
1732 cpi->cpi_chiprev = _cpuid_chiprev(cpi->cpi_vendor, cpi->cpi_family, in cpuid_pass1()
1733 cpi->cpi_model, cpi->cpi_step); in cpuid_pass1()
1734 cpi->cpi_chiprevstr = _cpuid_chiprevstr(cpi->cpi_vendor, in cpuid_pass1()
1735 cpi->cpi_family, cpi->cpi_model, cpi->cpi_step); in cpuid_pass1()
1736 cpi->cpi_socket = _cpuid_skt(cpi->cpi_vendor, cpi->cpi_family, in cpuid_pass1()
1737 cpi->cpi_model, cpi->cpi_step); in cpuid_pass1()
1740 cpi->cpi_pass = 1; in cpuid_pass1()
1760 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; in cpuid_pass2()
1762 ASSERT(cpi->cpi_pass == 1); in cpuid_pass2()
1764 if (cpi->cpi_maxeax < 1) in cpuid_pass2()
1767 if ((nmax = cpi->cpi_maxeax + 1) > NMAX_CPI_STD) in cpuid_pass2()
1772 for (n = 2, cp = &cpi->cpi_std[2]; n < nmax; n++, cp++) { in cpuid_pass2()
1773 cp->cp_eax = n; in cpuid_pass2()
1791 cp->cp_ecx = 0; in cpuid_pass2()
1794 platform_cpuid_mangle(cpi->cpi_vendor, n, cp); in cpuid_pass2()
1806 cpi->cpi_ncache = sizeof (*cp) * in cpuid_pass2()
1807 BITX(cp->cp_eax, 7, 0); in cpuid_pass2()
1808 if (cpi->cpi_ncache == 0) in cpuid_pass2()
1810 cpi->cpi_ncache--; /* skip count byte */ in cpuid_pass2()
1817 if (cpi->cpi_ncache > (sizeof (*cp) - 1)) in cpuid_pass2()
1818 cpi->cpi_ncache = sizeof (*cp) - 1; in cpuid_pass2()
1820 dp = cpi->cpi_cacheinfo; in cpuid_pass2()
1821 if (BITX(cp->cp_eax, 31, 31) == 0) { in cpuid_pass2()
1822 uint8_t *p = (void *)&cp->cp_eax; in cpuid_pass2()
1827 if (BITX(cp->cp_ebx, 31, 31) == 0) { in cpuid_pass2()
1828 uint8_t *p = (void *)&cp->cp_ebx; in cpuid_pass2()
1833 if (BITX(cp->cp_ecx, 31, 31) == 0) { in cpuid_pass2()
1834 uint8_t *p = (void *)&cp->cp_ecx; in cpuid_pass2()
1839 if (BITX(cp->cp_edx, 31, 31) == 0) { in cpuid_pass2()
1840 uint8_t *p = (void *)&cp->cp_edx; in cpuid_pass2()
1860 if (!(cpi->cpi_mwait.support & MWAIT_SUPPORT)) in cpuid_pass2()
1872 "size %ld", cpu->cpu_id, (long)mwait_size); in cpuid_pass2()
1877 cpi->cpi_mwait.mon_min = (size_t)MWAIT_SIZE_MIN(cpi); in cpuid_pass2()
1878 cpi->cpi_mwait.mon_max = mwait_size; in cpuid_pass2()
1880 cpi->cpi_mwait.support |= MWAIT_EXTENSIONS; in cpuid_pass2()
1882 cpi->cpi_mwait.support |= in cpuid_pass2()
1892 if (cpi->cpi_maxeax >= 0xB && cpi->cpi_vendor == X86_VENDOR_Intel) { in cpuid_pass2()
1896 cp->cp_eax = 0xB; in cpuid_pass2()
1897 cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0; in cpuid_pass2()
1902 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which in cpuid_pass2()
1906 if (cp->cp_ebx) { in cpuid_pass2()
1916 cp->cp_eax = 0xB; in cpuid_pass2()
1917 cp->cp_ecx = i; in cpuid_pass2()
1923 x2apic_id = cp->cp_edx; in cpuid_pass2()
1924 coreid_shift = BITX(cp->cp_eax, 4, 0); in cpuid_pass2()
1925 ncpu_per_core = BITX(cp->cp_ebx, 15, 0); in cpuid_pass2()
1927 x2apic_id = cp->cp_edx; in cpuid_pass2()
1928 chipid_shift = BITX(cp->cp_eax, 4, 0); in cpuid_pass2()
1929 ncpu_per_chip = BITX(cp->cp_ebx, 15, 0); in cpuid_pass2()
1933 cpi->cpi_apicid = x2apic_id; in cpuid_pass2()
1934 cpi->cpi_ncpu_per_chip = ncpu_per_chip; in cpuid_pass2()
1935 cpi->cpi_ncore_per_chip = ncpu_per_chip / in cpuid_pass2()
1937 cpi->cpi_chipid = x2apic_id >> chipid_shift; in cpuid_pass2()
1938 cpi->cpi_clogid = x2apic_id & ((1 << chipid_shift) - 1); in cpuid_pass2()
1939 cpi->cpi_coreid = x2apic_id >> coreid_shift; in cpuid_pass2()
1940 cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift; in cpuid_pass2()
1950 if (cpi->cpi_maxeax >= 0xD) { in cpuid_pass2()
1955 cp->cp_eax = 0xD; in cpuid_pass2()
1956 cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0; in cpuid_pass2()
1963 if ((cp->cp_eax & XFEATURE_LEGACY_FP) == 0 || in cpuid_pass2()
1964 (cp->cp_eax & XFEATURE_SSE) == 0) { in cpuid_pass2()
1968 cpi->cpi_xsave.xsav_hw_features_low = cp->cp_eax; in cpuid_pass2()
1969 cpi->cpi_xsave.xsav_hw_features_high = cp->cp_edx; in cpuid_pass2()
1970 cpi->cpi_xsave.xsav_max_size = cp->cp_ecx; in cpuid_pass2()
1976 if (cpi->cpi_xsave.xsav_hw_features_low & XFEATURE_AVX) { in cpuid_pass2()
1977 cp->cp_eax = 0xD; in cpuid_pass2()
1978 cp->cp_ecx = 2; in cpuid_pass2()
1979 cp->cp_edx = cp->cp_ebx = 0; in cpuid_pass2()
1983 if (cp->cp_ebx != CPUID_LEAFD_2_YMM_OFFSET || in cpuid_pass2()
1984 cp->cp_eax != CPUID_LEAFD_2_YMM_SIZE) { in cpuid_pass2()
1988 cpi->cpi_xsave.ymm_size = cp->cp_eax; in cpuid_pass2()
1989 cpi->cpi_xsave.ymm_offset = cp->cp_ebx; in cpuid_pass2()
1995 xsave_state_size = cpi->cpi_xsave.xsav_max_size; in cpuid_pass2()
2001 cpu->cpu_id, cpi->cpi_xsave.xsav_hw_features_low, in cpuid_pass2()
2002 cpi->cpi_xsave.xsav_hw_features_high, in cpuid_pass2()
2003 (int)cpi->cpi_xsave.xsav_max_size, in cpuid_pass2()
2004 (int)cpi->cpi_xsave.ymm_size, in cpuid_pass2()
2005 (int)cpi->cpi_xsave.ymm_offset); in cpuid_pass2()
2009 * This must be a non-boot CPU. We cannot in cpuid_pass2()
2013 ASSERT(cpu->cpu_id != 0); in cpuid_pass2()
2016 "continue.", cpu->cpu_id); in cpuid_pass2()
2021 * non-boot CPUs. When we're here on a boot CPU in cpuid_pass2()
2022 * we should disable the feature, on a non-boot in cpuid_pass2()
2025 if (cpu->cpu_id == 0) { in cpuid_pass2()
2064 if ((cpi->cpi_xmaxeax & 0x80000000) == 0) in cpuid_pass2()
2067 if ((nmax = cpi->cpi_xmaxeax - 0x80000000 + 1) > NMAX_CPI_EXTD) in cpuid_pass2()
2073 iptr = (void *)cpi->cpi_brandstr; in cpuid_pass2()
2074 for (n = 2, cp = &cpi->cpi_extd[2]; n < nmax; cp++, n++) { in cpuid_pass2()
2075 cp->cp_eax = 0x80000000 + n; in cpuid_pass2()
2077 platform_cpuid_mangle(cpi->cpi_vendor, 0x80000000 + n, cp); in cpuid_pass2()
2085 *iptr++ = cp->cp_eax; in cpuid_pass2()
2086 *iptr++ = cp->cp_ebx; in cpuid_pass2()
2087 *iptr++ = cp->cp_ecx; in cpuid_pass2()
2088 *iptr++ = cp->cp_edx; in cpuid_pass2()
2091 switch (cpi->cpi_vendor) { in cpuid_pass2()
2099 if (cpi->cpi_family < 6 || in cpuid_pass2()
2100 (cpi->cpi_family == 6 && in cpuid_pass2()
2101 cpi->cpi_model < 1)) in cpuid_pass2()
2102 cp->cp_eax = 0; in cpuid_pass2()
2109 switch (cpi->cpi_vendor) { in cpuid_pass2()
2116 if (cpi->cpi_family < 6 || in cpuid_pass2()
2117 cpi->cpi_family == 6 && in cpuid_pass2()
2118 cpi->cpi_model < 1) in cpuid_pass2()
2119 cp->cp_eax = cp->cp_ebx = 0; in cpuid_pass2()
2125 if (cpi->cpi_family == 6 && in cpuid_pass2()
2126 cpi->cpi_model == 3 && in cpuid_pass2()
2127 cpi->cpi_step == 0) { in cpuid_pass2()
2128 cp->cp_ecx &= 0xffff; in cpuid_pass2()
2129 cp->cp_ecx |= 0x400000; in cpuid_pass2()
2137 if (cpi->cpi_family != 6) in cpuid_pass2()
2144 if (cpi->cpi_model == 7 || in cpuid_pass2()
2145 cpi->cpi_model == 8) in cpuid_pass2()
2146 cp->cp_ecx = in cpuid_pass2()
2147 BITX(cp->cp_ecx, 31, 24) << 16 | in cpuid_pass2()
2148 BITX(cp->cp_ecx, 23, 16) << 12 | in cpuid_pass2()
2149 BITX(cp->cp_ecx, 15, 8) << 8 | in cpuid_pass2()
2150 BITX(cp->cp_ecx, 7, 0); in cpuid_pass2()
2154 if (cpi->cpi_model == 9 && cpi->cpi_step == 1) in cpuid_pass2()
2155 cp->cp_ecx |= 8 << 12; in cpuid_pass2()
2172 cpi->cpi_pass = 2; in cpuid_pass2()
2181 cpi->cpi_maxeax < 1 || cpi->cpi_family < 5) in intel_cpubrand()
2184 switch (cpi->cpi_family) { in intel_cpubrand()
2188 switch (cpi->cpi_model) { in intel_cpubrand()
2203 cp = &cpi->cpi_std[2]; /* cache info */ in intel_cpubrand()
2208 tmp = (cp->cp_eax >> (8 * i)) & 0xff; in intel_cpubrand()
2218 tmp = (cp->cp_ebx >> (8 * i)) & 0xff; in intel_cpubrand()
2228 tmp = (cp->cp_ecx >> (8 * i)) & 0xff; in intel_cpubrand()
2238 tmp = (cp->cp_edx >> (8 * i)) & 0xff; in intel_cpubrand()
2248 return (cpi->cpi_model == 5 ? in intel_cpubrand()
2251 return (cpi->cpi_model == 5 ? in intel_cpubrand()
2262 if (cpi->cpi_brandid != 0) { in intel_cpubrand()
2291 sgn = (cpi->cpi_family << 8) | in intel_cpubrand()
2292 (cpi->cpi_model << 4) | cpi->cpi_step; in intel_cpubrand()
2295 if (brand_tbl[i].bt_bid == cpi->cpi_brandid) in intel_cpubrand()
2298 if (sgn == 0x6b1 && cpi->cpi_brandid == 3) in intel_cpubrand()
2300 if (sgn < 0xf13 && cpi->cpi_brandid == 0xb) in intel_cpubrand()
2302 if (sgn < 0xf13 && cpi->cpi_brandid == 0xe) in intel_cpubrand()
2315 cpi->cpi_maxeax < 1 || cpi->cpi_family < 5) in amd_cpubrand()
2318 switch (cpi->cpi_family) { in amd_cpubrand()
2320 switch (cpi->cpi_model) { in amd_cpubrand()
2327 return ("AMD-K5(r)"); in amd_cpubrand()
2330 return ("AMD-K6(r)"); in amd_cpubrand()
2332 return ("AMD-K6(r)-2"); in amd_cpubrand()
2334 return ("AMD-K6(r)-III"); in amd_cpubrand()
2339 switch (cpi->cpi_model) { in amd_cpubrand()
2341 return ("AMD-K7(tm)"); in amd_cpubrand()
2355 return ((cpi->cpi_extd[6].cp_ecx >> 16) >= 256 ? in amd_cpubrand()
2364 if (cpi->cpi_family == 0xf && cpi->cpi_model == 5 && in amd_cpubrand()
2365 cpi->cpi_brandid != 0) { in amd_cpubrand()
2366 switch (BITX(cpi->cpi_brandid, 7, 5)) { in amd_cpubrand()
2385 cpi->cpi_maxeax < 1 || cpi->cpi_family < 5 || in cyrix_cpubrand()
2408 if (cpi->cpi_family == 4 && cpi->cpi_model == 9) in cyrix_cpubrand()
2410 else if (cpi->cpi_family == 5) { in cyrix_cpubrand()
2411 switch (cpi->cpi_model) { in cyrix_cpubrand()
2419 } else if (cpi->cpi_family == 6) { in cyrix_cpubrand()
2420 switch (cpi->cpi_model) { in cyrix_cpubrand()
2448 switch (cpi->cpi_vendor) { in fabricate_brandstr()
2459 if (cpi->cpi_family == 5 && cpi->cpi_model == 0) in fabricate_brandstr()
2463 if (cpi->cpi_family == 5) in fabricate_brandstr()
2464 switch (cpi->cpi_model) { in fabricate_brandstr()
2479 if (cpi->cpi_family == 5 && in fabricate_brandstr()
2480 (cpi->cpi_model == 0 || cpi->cpi_model == 2)) in fabricate_brandstr()
2484 if (cpi->cpi_family == 5 && cpi->cpi_model == 0) in fabricate_brandstr()
2488 if (cpi->cpi_family == 5 && cpi->cpi_model == 4) in fabricate_brandstr()
2497 (void) strcpy((char *)cpi->cpi_brandstr, brand); in fabricate_brandstr()
2504 (void) snprintf(cpi->cpi_brandstr, sizeof (cpi->cpi_brandstr), in fabricate_brandstr()
2505 "%s %d.%d.%d", cpi->cpi_vendorstr, cpi->cpi_family, in fabricate_brandstr()
2506 cpi->cpi_model, cpi->cpi_step); in fabricate_brandstr()
2524 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; in cpuid_pass3()
2526 ASSERT(cpi->cpi_pass == 2); in cpuid_pass3()
2536 cpi->cpi_ncpu_shr_last_cache = 1; in cpuid_pass3()
2537 cpi->cpi_last_lvl_cacheid = cpu->cpu_id; in cpuid_pass3()
2539 if (cpi->cpi_maxeax >= 4 && cpi->cpi_vendor == X86_VENDOR_Intel) { in cpuid_pass3()
2548 cp->cp_eax = 4; in cpuid_pass3()
2549 cp->cp_ecx = i; in cpuid_pass3()
2558 cpi->cpi_ncpu_shr_last_cache = in cpuid_pass3()
2562 cpi->cpi_std_4_size = size = i; in cpuid_pass3()
2567 * cpuid_pass2() stashed in cpi->cpi_std[4]. in cpuid_pass3()
2570 cpi->cpi_std_4 = in cpuid_pass3()
2572 cpi->cpi_std_4[0] = &cpi->cpi_std[4]; in cpuid_pass3()
2582 cp = cpi->cpi_std_4[i] = in cpuid_pass3()
2584 cp->cp_eax = 4; in cpuid_pass3()
2585 cp->cp_ecx = i; in cpuid_pass3()
2598 for (i = 1; i < cpi->cpi_ncpu_shr_last_cache; i <<= 1) in cpuid_pass3()
2600 cpi->cpi_last_lvl_cacheid = cpi->cpi_apicid >> shft; in cpuid_pass3()
2606 if ((cpi->cpi_xmaxeax & 0x80000000) == 0) { in cpuid_pass3()
2615 if (cpi->cpi_brandstr[0]) { in cpuid_pass3()
2616 size_t maxlen = sizeof (cpi->cpi_brandstr); in cpuid_pass3()
2619 dst = src = (char *)cpi->cpi_brandstr; in cpuid_pass3()
2620 src[maxlen - 1] = '\0'; in cpuid_pass3()
2635 * Now do an in-place copy. in cpuid_pass3()
2638 * -really- no need to shout. in cpuid_pass3()
2662 while (--dst > cpi->cpi_brandstr) in cpuid_pass3()
2670 cpi->cpi_pass = 3; in cpuid_pass3()
2687 cpi = cpu->cpu_m.mcpu_cpi; in cpuid_pass4()
2689 ASSERT(cpi->cpi_pass == 3); in cpuid_pass4()
2691 if (cpi->cpi_maxeax >= 1) { in cpuid_pass4()
2692 uint32_t *edx = &cpi->cpi_support[STD_EDX_FEATURES]; in cpuid_pass4()
2693 uint32_t *ecx = &cpi->cpi_support[STD_ECX_FEATURES]; in cpuid_pass4()
2694 uint32_t *ebx = &cpi->cpi_support[STD_EBX_FEATURES]; in cpuid_pass4()
2824 if (cpi->cpi_xmaxeax < 0x80000001) in cpuid_pass4()
2827 switch (cpi->cpi_vendor) { in cpuid_pass4()
2834 * here to make the initial crop of 64-bit OS's work. in cpuid_pass4()
2841 edx = &cpi->cpi_support[AMD_EDX_FEATURES]; in cpuid_pass4()
2842 ecx = &cpi->cpi_support[AMD_ECX_FEATURES]; in cpuid_pass4()
2850 switch (cpi->cpi_vendor) { in cpuid_pass4()
2897 switch (cpi->cpi_vendor) { in cpuid_pass4()
2928 cpi->cpi_support[TM_EDX_FEATURES] = cp.cp_edx; in cpuid_pass4()
2936 cpi->cpi_pass = 4; in cpuid_pass4()
2957 cpi = cpu->cpu_m.mcpu_cpi; in cpuid_insn()
2965 if (cp->cp_eax <= cpi->cpi_maxeax && cp->cp_eax < NMAX_CPI_STD) in cpuid_insn()
2966 xcp = &cpi->cpi_std[cp->cp_eax]; in cpuid_insn()
2967 else if (cp->cp_eax >= 0x80000000 && cp->cp_eax <= cpi->cpi_xmaxeax && in cpuid_insn()
2968 cp->cp_eax < 0x80000000 + NMAX_CPI_EXTD) in cpuid_insn()
2969 xcp = &cpi->cpi_extd[cp->cp_eax - 0x80000000]; in cpuid_insn()
2978 cp->cp_eax = xcp->cp_eax; in cpuid_insn()
2979 cp->cp_ebx = xcp->cp_ebx; in cpuid_insn()
2980 cp->cp_ecx = xcp->cp_ecx; in cpuid_insn()
2981 cp->cp_edx = xcp->cp_edx; in cpuid_insn()
2982 return (cp->cp_eax); in cpuid_insn()
2988 return (cpu != NULL && cpu->cpu_m.mcpu_cpi != NULL && in cpuid_checkpass()
2989 cpu->cpu_m.mcpu_cpi->cpi_pass >= pass); in cpuid_checkpass()
2997 return (snprintf(s, n, "%s", cpu->cpu_m.mcpu_cpi->cpi_brandstr)); in cpuid_getbrandstr()
3008 return (cpu->cpu_m.mcpu_cpi->cpi_chipid >= 0); in cpuid_is_cmt()
3012 * AMD and Intel both implement the 64-bit variant of the syscall
3013 * instruction (syscallq), so if there's -any- support for syscall,
3016 * However, Intel decided to -not- implement the 32-bit variant of the
3020 * XXPV Currently, 32-bit syscall instructions don't work via the hypervisor,
3035 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; in cpuid_syscall32_insn()
3037 if (cpi->cpi_vendor == X86_VENDOR_AMD && in cpuid_syscall32_insn()
3038 cpi->cpi_xmaxeax >= 0x80000001 && in cpuid_syscall32_insn()
3049 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; in cpuid_getidstr()
3059 return (snprintf(s, n, fmt_ht, cpi->cpi_chipid, in cpuid_getidstr()
3060 cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax, in cpuid_getidstr()
3061 cpi->cpi_family, cpi->cpi_model, in cpuid_getidstr()
3062 cpi->cpi_step, cpu->cpu_type_info.pi_clock)); in cpuid_getidstr()
3064 cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax, in cpuid_getidstr()
3065 cpi->cpi_family, cpi->cpi_model, in cpuid_getidstr()
3066 cpi->cpi_step, cpu->cpu_type_info.pi_clock)); in cpuid_getidstr()
3073 return ((const char *)cpu->cpu_m.mcpu_cpi->cpi_vendorstr); in cpuid_getvendorstr()
3080 return (cpu->cpu_m.mcpu_cpi->cpi_vendor); in cpuid_getvendor()
3087 return (cpu->cpu_m.mcpu_cpi->cpi_family); in cpuid_getfamily()
3094 return (cpu->cpu_m.mcpu_cpi->cpi_model); in cpuid_getmodel()
3101 return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_per_chip); in cpuid_get_ncpu_per_chip()
3108 return (cpu->cpu_m.mcpu_cpi->cpi_ncore_per_chip); in cpuid_get_ncore_per_chip()
3115 return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_shr_last_cache); in cpuid_get_ncpu_sharing_last_cache()
3122 return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid); in cpuid_get_last_lvl_cacheid()
3129 return (cpu->cpu_m.mcpu_cpi->cpi_step); in cpuid_getstep()
3136 return (cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_eax); in cpuid_getsig()
3143 return (cpu->cpu_m.mcpu_cpi->cpi_chiprev); in cpuid_getchiprev()
3150 return (cpu->cpu_m.mcpu_cpi->cpi_chiprevstr); in cpuid_getchiprevstr()
3157 return (cpu->cpu_m.mcpu_cpi->cpi_socket); in cpuid_getsockettype()
3167 cpi = cpu->cpu_m.mcpu_cpi; in cpuid_getsocketstr()
3171 socketstr = _cpuid_sktstr(cpi->cpi_vendor, cpi->cpi_family, in cpuid_getsocketstr()
3172 cpi->cpi_model, cpi->cpi_step); in cpuid_getsocketstr()
3184 return (cpu->cpu_m.mcpu_cpi->cpi_chipid); in cpuid_get_chipid()
3185 return (cpu->cpu_id); in cpuid_get_chipid()
3192 return (cpu->cpu_m.mcpu_cpi->cpi_coreid); in cpuid_get_coreid()
3199 return (cpu->cpu_m.mcpu_cpi->cpi_pkgcoreid); in cpuid_get_pkgcoreid()
3206 return (cpu->cpu_m.mcpu_cpi->cpi_clogid); in cpuid_get_clogid()
3213 return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid); in cpuid_get_cacheid()
3220 return (cpu->cpu_m.mcpu_cpi->cpi_procnodeid); in cpuid_get_procnodeid()
3227 return (cpu->cpu_m.mcpu_cpi->cpi_procnodes_per_pkg); in cpuid_get_procnodes_per_pkg()
3234 return (cpu->cpu_m.mcpu_cpi->cpi_compunitid); in cpuid_get_compunitid()
3241 return (cpu->cpu_m.mcpu_cpi->cpi_cores_per_compunit); in cpuid_get_cores_per_compunit()
3254 cpi = cpu->cpu_m.mcpu_cpi; in cpuid_have_cr8access()
3255 if (cpi->cpi_vendor == X86_VENDOR_AMD && cpi->cpi_maxeax >= 1 && in cpuid_have_cr8access()
3266 if (cpu->cpu_m.mcpu_cpi->cpi_maxeax < 1) { in cpuid_get_apicid()
3269 return (cpu->cpu_m.mcpu_cpi->cpi_apicid); in cpuid_get_apicid()
3280 cpi = cpu->cpu_m.mcpu_cpi; in cpuid_get_addrsize()
3285 *pabits = cpi->cpi_pabits; in cpuid_get_addrsize()
3287 *vabits = cpi->cpi_vabits; in cpuid_get_addrsize()
3305 cpi = cpu->cpu_m.mcpu_cpi; in cpuid_get_dtlb_nent()
3312 if (cpi->cpi_xmaxeax >= 0x80000006) { in cpuid_get_dtlb_nent()
3313 struct cpuid_regs *cp = &cpi->cpi_extd[6]; in cpuid_get_dtlb_nent()
3322 if ((cp->cp_ebx & 0xffff0000) == 0) in cpuid_get_dtlb_nent()
3323 dtlb_nent = cp->cp_ebx & 0x0000ffff; in cpuid_get_dtlb_nent()
3325 dtlb_nent = BITX(cp->cp_ebx, 27, 16); in cpuid_get_dtlb_nent()
3329 if ((cp->cp_eax & 0xffff0000) == 0) in cpuid_get_dtlb_nent()
3330 dtlb_nent = cp->cp_eax & 0x0000ffff; in cpuid_get_dtlb_nent()
3332 dtlb_nent = BITX(cp->cp_eax, 27, 16); in cpuid_get_dtlb_nent()
3347 if (cpi->cpi_xmaxeax >= 0x80000005) { in cpuid_get_dtlb_nent()
3348 struct cpuid_regs *cp = &cpi->cpi_extd[5]; in cpuid_get_dtlb_nent()
3352 dtlb_nent = BITX(cp->cp_ebx, 23, 16); in cpuid_get_dtlb_nent()
3355 dtlb_nent = BITX(cp->cp_eax, 23, 16); in cpuid_get_dtlb_nent()
3358 panic("unknown L1 d-TLB pagesize"); in cpuid_get_dtlb_nent()
3376 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; in cpuid_opteron_erratum()
3381 * a legacy (32-bit) AMD CPU. in cpuid_opteron_erratum()
3383 if (cpi->cpi_vendor != X86_VENDOR_AMD || in cpuid_opteron_erratum()
3384 cpi->cpi_family == 4 || cpi->cpi_family == 5 || in cpuid_opteron_erratum()
3385 cpi->cpi_family == 6) in cpuid_opteron_erratum()
3389 eax = cpi->cpi_std[1].cp_eax; in cpuid_opteron_erratum()
3429 return (cpi->cpi_family < 0x10); in cpuid_opteron_erratum()
3435 return (cpi->cpi_family <= 0x11); in cpuid_opteron_erratum()
3439 return (cpi->cpi_family <= 0x11); in cpuid_opteron_erratum()
3456 return (cpi->cpi_family < 0x10); in cpuid_opteron_erratum()
3460 return (cpi->cpi_family <= 0x11); in cpuid_opteron_erratum()
3472 return (cpi->cpi_family < 0x10); in cpuid_opteron_erratum()
3482 return (cpi->cpi_family < 0x10); in cpuid_opteron_erratum()
3542 return (cpi->cpi_family < 0x10 || cpi->cpi_family == 0x11); in cpuid_opteron_erratum()
3546 return (cpi->cpi_family < 0x10); in cpuid_opteron_erratum()
3565 * check for processors (pre-Shanghai) that do not provide in cpuid_opteron_erratum()
3568 return (cpi->cpi_family == 0x10 && cpi->cpi_model < 4); in cpuid_opteron_erratum()
3576 return (cpi->cpi_family == 0x10 || cpi->cpi_family == 0x12); in cpuid_opteron_erratum()
3582 return (-1); in cpuid_opteron_erratum()
3589 * Return 1 if erratum is present, 0 if not present and -1 if indeterminate.
3596 static int osvwfeature = -1; in osvw_opteron_erratum()
3600 cpi = cpu->cpu_m.mcpu_cpi; in osvw_opteron_erratum()
3603 if (osvwfeature == -1) { in osvw_opteron_erratum()
3604 osvwfeature = cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW; in osvw_opteron_erratum()
3608 (cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW)); in osvw_opteron_erratum()
3611 return (-1); in osvw_opteron_erratum()
3620 return (-1); in osvw_opteron_erratum()
3626 * 0 - fixed by HW in osvw_opteron_erratum()
3627 * 1 - BIOS has applied the workaround when BIOS in osvw_opteron_erratum()
3646 return (-1); in osvw_opteron_erratum()
3651 static const char line_str[] = "line-size";
3664 if (snprintf(buf, sizeof (buf), "%s-%s", label, type) < sizeof (buf)) in add_cache_prop()
3669 * Intel-style cache/tlb description
3676 static const char l1_icache_str[] = "l1-icache";
3677 static const char l1_dcache_str[] = "l1-dcache";
3678 static const char l2_cache_str[] = "l2-cache";
3679 static const char l3_cache_str[] = "l3-cache";
3680 static const char itlb4k_str[] = "itlb-4K";
3681 static const char dtlb4k_str[] = "dtlb-4K";
3682 static const char itlb2M_str[] = "itlb-2M";
3683 static const char itlb4M_str[] = "itlb-4M";
3684 static const char dtlb4M_str[] = "dtlb-4M";
3685 static const char dtlb24_str[] = "dtlb0-2M-4M";
3686 static const char itlb424_str[] = "itlb-4K-2M-4M";
3687 static const char itlb24_str[] = "itlb-2M-4M";
3688 static const char dtlb44_str[] = "dtlb-4K-4M";
3689 static const char sl1_dcache_str[] = "sectored-l1-dcache";
3690 static const char sl2_cache_str[] = "sectored-l2-cache";
3691 static const char itrace_str[] = "itrace-cache";
3692 static const char sl3_cache_str[] = "sectored-l3-cache";
3693 static const char sh_l2_tlb4k_str[] = "shared-l2-tlb-4k";
3705 * Codes ignored - Reason
3706 * ----------------------
3707 * 40H - intel_cpuid_4_cache_info() disambiguates l2/l3 cache
3708 * f0H/f1H - Currently we do not interpret prefetch size by design
3805 { 0x70, 4, 0, 32, "tlb-4K" },
3806 { 0x80, 4, 16, 16*1024, "l1-cache" },
3817 for (; ct->ct_code != 0; ct++) in find_cacheent()
3818 if (ct->ct_code <= code) in find_cacheent()
3820 if (ct->ct_code == code) in find_cacheent()
3827 * Populate cachetab entry with L2 or L3 cache-information using
3838 for (i = 0; i < cpi->cpi_std_4_size; i++) { in intel_cpuid_4_cache_info()
3839 level = CPI_CACHE_LVL(cpi->cpi_std_4[i]); in intel_cpuid_4_cache_info()
3842 ct->ct_assoc = CPI_CACHE_WAYS(cpi->cpi_std_4[i]) + 1; in intel_cpuid_4_cache_info()
3843 ct->ct_line_size = in intel_cpuid_4_cache_info()
3844 CPI_CACHE_COH_LN_SZ(cpi->cpi_std_4[i]) + 1; in intel_cpuid_4_cache_info()
3845 ct->ct_size = ct->ct_assoc * in intel_cpuid_4_cache_info()
3846 (CPI_CACHE_PARTS(cpi->cpi_std_4[i]) + 1) * in intel_cpuid_4_cache_info()
3847 ct->ct_line_size * in intel_cpuid_4_cache_info()
3848 (cpi->cpi_std_4[i]->cp_ecx + 1); in intel_cpuid_4_cache_info()
3851 ct->ct_label = l2_cache_str; in intel_cpuid_4_cache_info()
3853 ct->ct_label = l3_cache_str; in intel_cpuid_4_cache_info()
3864 * The walk is terminated if the walker returns non-zero.
3875 if ((dp = cpi->cpi_cacheinfo) == NULL) in intel_walk_cacheinfo()
3877 for (i = 0; i < cpi->cpi_ncache; i++, dp++) { in intel_walk_cacheinfo()
3885 if (*dp == 0x49 && cpi->cpi_maxeax >= 0x4 && in intel_walk_cacheinfo()
3923 if ((dp = cpi->cpi_cacheinfo) == NULL) in cyrix_walk_cacheinfo()
3925 for (i = 0; i < cpi->cpi_ncache; i++, dp++) { in cyrix_walk_cacheinfo()
3927 * Search Cyrix-specific descriptor table first .. in cyrix_walk_cacheinfo()
3946 * A cacheinfo walker that adds associativity, line-size, and size properties
3954 add_cache_prop(devi, ct->ct_label, assoc_str, ct->ct_assoc); in add_cacheent_props()
3955 if (ct->ct_line_size != 0) in add_cacheent_props()
3956 add_cache_prop(devi, ct->ct_label, line_str, in add_cacheent_props()
3957 ct->ct_line_size); in add_cacheent_props()
3958 add_cache_prop(devi, ct->ct_label, size_str, ct->ct_size); in add_cacheent_props()
3963 static const char fully_assoc[] = "fully-associative?";
4005 * associated with a tag. For example, the AMD K6-III has a sector in add_amd_cache()
4009 add_cache_prop(devi, label, "lines-per-tag", lines_per_tag); in add_amd_cache()
4056 add_cache_prop(devi, label, "lines-per-tag", lines_per_tag); in add_amd_l2_cache()
4066 if (cpi->cpi_xmaxeax < 0x80000005) in amd_cache_info()
4068 cp = &cpi->cpi_extd[5]; in amd_cache_info()
4076 add_amd_tlb(devi, "dtlb-2M", in amd_cache_info()
4077 BITX(cp->cp_eax, 31, 24), BITX(cp->cp_eax, 23, 16)); in amd_cache_info()
4078 add_amd_tlb(devi, "itlb-2M", in amd_cache_info()
4079 BITX(cp->cp_eax, 15, 8), BITX(cp->cp_eax, 7, 0)); in amd_cache_info()
4085 switch (cpi->cpi_vendor) { in amd_cache_info()
4088 if (cpi->cpi_family >= 5) { in amd_cache_info()
4094 if ((nentries = BITX(cp->cp_ebx, 23, 16)) == 255) in amd_cache_info()
4099 add_amd_tlb(devi, "tlb-4K", BITX(cp->cp_ebx, 31, 24), in amd_cache_info()
4106 BITX(cp->cp_ebx, 31, 24), BITX(cp->cp_ebx, 23, 16)); in amd_cache_info()
4108 BITX(cp->cp_ebx, 15, 8), BITX(cp->cp_ebx, 7, 0)); in amd_cache_info()
4117 BITX(cp->cp_ecx, 31, 24), BITX(cp->cp_ecx, 23, 16), in amd_cache_info()
4118 BITX(cp->cp_ecx, 15, 8), BITX(cp->cp_ecx, 7, 0)); in amd_cache_info()
4125 BITX(cp->cp_edx, 31, 24), BITX(cp->cp_edx, 23, 16), in amd_cache_info()
4126 BITX(cp->cp_edx, 15, 8), BITX(cp->cp_edx, 7, 0)); in amd_cache_info()
4128 if (cpi->cpi_xmaxeax < 0x80000006) in amd_cache_info()
4130 cp = &cpi->cpi_extd[6]; in amd_cache_info()
4134 if (BITX(cp->cp_eax, 31, 16) == 0) in amd_cache_info()
4135 add_amd_l2_tlb(devi, "l2-tlb-2M", in amd_cache_info()
4136 BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0)); in amd_cache_info()
4138 add_amd_l2_tlb(devi, "l2-dtlb-2M", in amd_cache_info()
4139 BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16)); in amd_cache_info()
4140 add_amd_l2_tlb(devi, "l2-itlb-2M", in amd_cache_info()
4141 BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0)); in amd_cache_info()
4146 if (BITX(cp->cp_ebx, 31, 16) == 0) { in amd_cache_info()
4147 add_amd_l2_tlb(devi, "l2-tlb-4K", in amd_cache_info()
4148 BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0)); in amd_cache_info()
4150 add_amd_l2_tlb(devi, "l2-dtlb-4K", in amd_cache_info()
4151 BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16)); in amd_cache_info()
4152 add_amd_l2_tlb(devi, "l2-itlb-4K", in amd_cache_info()
4153 BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0)); in amd_cache_info()
4157 BITX(cp->cp_ecx, 31, 16), BITX(cp->cp_ecx, 15, 12), in amd_cache_info()
4158 BITX(cp->cp_ecx, 11, 8), BITX(cp->cp_ecx, 7, 0)); in amd_cache_info()
4163 * and tlb architecture - Intel's way and AMD's way.
4170 switch (cpi->cpi_vendor) { in x86_which_cacheinfo()
4172 if (cpi->cpi_maxeax >= 2) in x86_which_cacheinfo()
4180 if (cpi->cpi_family > 5 || in x86_which_cacheinfo()
4181 (cpi->cpi_family == 5 && cpi->cpi_model >= 1)) in x86_which_cacheinfo()
4185 if (cpi->cpi_family >= 5) in x86_which_cacheinfo()
4191 * then we assume they have AMD-format cache in x86_which_cacheinfo()
4195 * then try our-Cyrix specific handler. in x86_which_cacheinfo()
4198 * table-driven format instead. in x86_which_cacheinfo()
4200 if (cpi->cpi_xmaxeax >= 0x80000005) in x86_which_cacheinfo()
4202 else if (cpi->cpi_vendor == X86_VENDOR_Cyrix) in x86_which_cacheinfo()
4204 else if (cpi->cpi_maxeax >= 2) in x86_which_cacheinfo()
4208 return (-1); in x86_which_cacheinfo()
4228 /* cpu-mhz, and clock-frequency */ in cpuid_set_cpu_properties()
4233 "cpu-mhz", cpu_freq); in cpuid_set_cpu_properties()
4236 "clock-frequency", (int)mul); in cpuid_set_cpu_properties()
4243 /* vendor-id */ in cpuid_set_cpu_properties()
4245 "vendor-id", cpi->cpi_vendorstr); in cpuid_set_cpu_properties()
4247 if (cpi->cpi_maxeax == 0) { in cpuid_set_cpu_properties()
4257 "cpu-model", CPI_MODEL(cpi)); in cpuid_set_cpu_properties()
4259 "stepping-id", CPI_STEP(cpi)); in cpuid_set_cpu_properties()
4262 switch (cpi->cpi_vendor) { in cpuid_set_cpu_properties()
4274 /* ext-family */ in cpuid_set_cpu_properties()
4275 switch (cpi->cpi_vendor) { in cpuid_set_cpu_properties()
4278 create = cpi->cpi_family >= 0xf; in cpuid_set_cpu_properties()
4286 "ext-family", CPI_FAMILY_XTD(cpi)); in cpuid_set_cpu_properties()
4288 /* ext-model */ in cpuid_set_cpu_properties()
4289 switch (cpi->cpi_vendor) { in cpuid_set_cpu_properties()
4302 "ext-model", CPI_MODEL_XTD(cpi)); in cpuid_set_cpu_properties()
4305 switch (cpi->cpi_vendor) { in cpuid_set_cpu_properties()
4310 create = cpi->cpi_xmaxeax >= 0x80000001; in cpuid_set_cpu_properties()
4318 "generation", BITX((cpi)->cpi_extd[1].cp_eax, 11, 8)); in cpuid_set_cpu_properties()
4320 /* brand-id */ in cpuid_set_cpu_properties()
4321 switch (cpi->cpi_vendor) { in cpuid_set_cpu_properties()
4327 create = cpi->cpi_family > 6 || in cpuid_set_cpu_properties()
4328 (cpi->cpi_family == 6 && cpi->cpi_model >= 8); in cpuid_set_cpu_properties()
4331 create = cpi->cpi_family >= 0xf; in cpuid_set_cpu_properties()
4337 if (create && cpi->cpi_brandid != 0) { in cpuid_set_cpu_properties()
4339 "brand-id", cpi->cpi_brandid); in cpuid_set_cpu_properties()
4342 /* chunks, and apic-id */ in cpuid_set_cpu_properties()
4343 switch (cpi->cpi_vendor) { in cpuid_set_cpu_properties()
4348 create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf; in cpuid_set_cpu_properties()
4351 create = cpi->cpi_family >= 0xf; in cpuid_set_cpu_properties()
4361 "apic-id", cpi->cpi_apicid); in cpuid_set_cpu_properties()
4362 if (cpi->cpi_chipid >= 0) { in cpuid_set_cpu_properties()
4364 "chip#", cpi->cpi_chipid); in cpuid_set_cpu_properties()
4366 "clog#", cpi->cpi_clogid); in cpuid_set_cpu_properties()
4370 /* cpuid-features */ in cpuid_set_cpu_properties()
4372 "cpuid-features", CPI_FEATURES_EDX(cpi)); in cpuid_set_cpu_properties()
4375 /* cpuid-features-ecx */ in cpuid_set_cpu_properties()
4376 switch (cpi->cpi_vendor) { in cpuid_set_cpu_properties()
4378 create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf; in cpuid_set_cpu_properties()
4381 create = cpi->cpi_family >= 0xf; in cpuid_set_cpu_properties()
4389 "cpuid-features-ecx", CPI_FEATURES_ECX(cpi)); in cpuid_set_cpu_properties()
4391 /* ext-cpuid-features */ in cpuid_set_cpu_properties()
4392 switch (cpi->cpi_vendor) { in cpuid_set_cpu_properties()
4398 create = cpi->cpi_xmaxeax >= 0x80000001; in cpuid_set_cpu_properties()
4406 "ext-cpuid-features", CPI_FEATURES_XTD_EDX(cpi)); in cpuid_set_cpu_properties()
4408 "ext-cpuid-features-ecx", CPI_FEATURES_XTD_ECX(cpi)); in cpuid_set_cpu_properties()
4415 * same -something- about the processor, however lame. in cpuid_set_cpu_properties()
4418 "brand-string", cpi->cpi_brandstr); in cpuid_set_cpu_properties()
4446 * A cacheinfo walker that fetches the size, line-size and associativity
4455 if (ct->ct_label != l2_cache_str && in intel_l2cinfo()
4456 ct->ct_label != sl2_cache_str) in intel_l2cinfo()
4457 return (0); /* not an L2 -- keep walking */ in intel_l2cinfo()
4459 if ((ip = l2i->l2i_csz) != NULL) in intel_l2cinfo()
4460 *ip = ct->ct_size; in intel_l2cinfo()
4461 if ((ip = l2i->l2i_lsz) != NULL) in intel_l2cinfo()
4462 *ip = ct->ct_line_size; in intel_l2cinfo()
4463 if ((ip = l2i->l2i_assoc) != NULL) in intel_l2cinfo()
4464 *ip = ct->ct_assoc; in intel_l2cinfo()
4465 l2i->l2i_ret = ct->ct_size; in intel_l2cinfo()
4466 return (1); /* was an L2 -- terminate walk */ in intel_l2cinfo()
4476 * -1 is undefined. 0 is fully associative.
4480 {-1, 1, 2, -1, 4, -1, 8, -1, 16, -1, 32, 48, 64, 96, 128, 0};
4490 if (cpi->cpi_xmaxeax < 0x80000006) in amd_l2cacheinfo()
4492 cp = &cpi->cpi_extd[6]; in amd_l2cacheinfo()
4494 if ((i = BITX(cp->cp_ecx, 15, 12)) != 0 && in amd_l2cacheinfo()
4495 (size = BITX(cp->cp_ecx, 31, 16)) != 0) { in amd_l2cacheinfo()
4499 ASSERT(assoc != -1); in amd_l2cacheinfo()
4501 if ((ip = l2i->l2i_csz) != NULL) in amd_l2cacheinfo()
4503 if ((ip = l2i->l2i_lsz) != NULL) in amd_l2cacheinfo()
4504 *ip = BITX(cp->cp_ecx, 7, 0); in amd_l2cacheinfo()
4505 if ((ip = l2i->l2i_assoc) != NULL) in amd_l2cacheinfo()
4507 l2i->l2i_ret = cachesz; in amd_l2cacheinfo()
4514 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; in getl2cacheinfo()
4517 l2i->l2i_csz = csz; in getl2cacheinfo()
4518 l2i->l2i_lsz = lsz; in getl2cacheinfo()
4519 l2i->l2i_assoc = assoc; in getl2cacheinfo()
4520 l2i->l2i_ret = -1; in getl2cacheinfo()
4535 return (l2i->l2i_ret); in getl2cacheinfo()
4548 mwait_size = CPU->cpu_m.mcpu_cpi->cpi_mwait.mon_max; in cpuid_mwait_alloc()
4567 cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret; in cpuid_mwait_alloc()
4568 cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size; in cpuid_mwait_alloc()
4574 cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret; in cpuid_mwait_alloc()
4575 cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size * 2; in cpuid_mwait_alloc()
4585 if (cpu->cpu_m.mcpu_cpi == NULL) { in cpuid_mwait_free()
4589 if (cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual != NULL && in cpuid_mwait_free()
4590 cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual > 0) { in cpuid_mwait_free()
4591 kmem_free(cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual, in cpuid_mwait_free()
4592 cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual); in cpuid_mwait_free()
4595 cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = NULL; in cpuid_mwait_free()
4596 cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = 0; in cpuid_mwait_free()
4606 cnt = &_no_rdtsc_end - &_no_rdtsc_start; in patch_tsc_read()
4610 cnt = &_tsc_mfence_end - &_tsc_mfence_start; in patch_tsc_read()
4615 cnt = &_tsc_lfence_end - &_tsc_lfence_start; in patch_tsc_read()
4620 cnt = &_tscp_end - &_tscp_start; in patch_tsc_read()
4639 cpi = CPU->cpu_m.mcpu_cpi; in cpuid_deep_cstates_supported()
4644 switch (cpi->cpi_vendor) { in cpuid_deep_cstates_supported()
4646 if (cpi->cpi_xmaxeax < 0x80000007) in cpuid_deep_cstates_supported()
4650 * TSC run at a constant rate in all ACPI C-states? in cpuid_deep_cstates_supported()
4697 * - cpuid_pass1() is done, so that X86 features are known.
4698 * - fpu_probe() is done, so that fp_save_mech is chosen.
4712 cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_ecx |= CPUID_INTC_ECX_OSXSAVE; in xsave_setup_msr()
4718 * APIC timer will continue running in all C-states,
4719 * including the deepest C-states.
4730 cpi = CPU->cpu_m.mcpu_cpi; in cpuid_arat_supported()
4732 switch (cpi->cpi_vendor) { in cpuid_arat_supported()
4735 * Always-running Local APIC Timer is in cpuid_arat_supported()
4738 if (cpi->cpi_maxeax >= 6) { in cpuid_arat_supported()
4756 struct cpuid_info *cpi = cp->cpu_m.mcpu_cpi; in cpuid_iepb_supported()
4770 if ((cpi->cpi_vendor != X86_VENDOR_Intel) || (cpi->cpi_maxeax < 6)) in cpuid_iepb_supported()
4790 struct cpuid_info *cpi = CPU->cpu_m.mcpu_cpi; in cpuid_deadline_tsc_supported()
4796 switch (cpi->cpi_vendor) { in cpuid_deadline_tsc_supported()
4798 if (cpi->cpi_maxeax >= 1) { in cpuid_deadline_tsc_supported()
4823 cnt = &bcopy_patch_end - &bcopy_patch_start; in patch_memops()
4836 * It re-uses the x2APIC cpuid code of the cpuid_pass2().
4849 cp->cp_eax = 0; in cpuid_get_ext_topo()
4852 cp->cp_eax = 0xB; in cpuid_get_ext_topo()
4853 cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0; in cpuid_get_ext_topo()
4857 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which in cpuid_get_ext_topo()
4861 if (cp->cp_ebx) { in cpuid_get_ext_topo()
4868 cp->cp_eax = 0xB; in cpuid_get_ext_topo()
4869 cp->cp_ecx = i; in cpuid_get_ext_topo()
4880 coreid_shift = BITX(cp->cp_eax, 4, 0); in cpuid_get_ext_topo()
4887 chipid_shift = BITX(cp->cp_eax, 4, 0); in cpuid_get_ext_topo()
4893 *core_nbits = chipid_shift - coreid_shift; in cpuid_get_ext_topo()