1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Various routines to handle identification 28 * and classification of x86 processors. 29 */ 30 31 #include <sys/types.h> 32 #include <sys/archsystm.h> 33 #include <sys/x86_archext.h> 34 #include <sys/kmem.h> 35 #include <sys/systm.h> 36 #include <sys/cmn_err.h> 37 #include <sys/sunddi.h> 38 #include <sys/sunndi.h> 39 #include <sys/cpuvar.h> 40 #include <sys/processor.h> 41 #include <sys/sysmacros.h> 42 #include <sys/pg.h> 43 #include <sys/fp.h> 44 #include <sys/controlregs.h> 45 #include <sys/auxv_386.h> 46 #include <sys/bitmap.h> 47 #include <sys/memnode.h> 48 49 #ifdef __xpv 50 #include <sys/hypervisor.h> 51 #endif 52 53 /* 54 * Pass 0 of cpuid feature analysis happens in locore. It contains special code 55 * to recognize Cyrix processors that are not cpuid-compliant, and to deal with 56 * them accordingly. For most modern processors, feature detection occurs here 57 * in pass 1. 58 * 59 * Pass 1 of cpuid feature analysis happens just at the beginning of mlsetup() 60 * for the boot CPU and does the basic analysis that the early kernel needs. 61 * x86_feature is set based on the return value of cpuid_pass1() of the boot 62 * CPU. 63 * 64 * Pass 1 includes: 65 * 66 * o Determining vendor/model/family/stepping and setting x86_type and 67 * x86_vendor accordingly. 68 * o Processing the feature flags returned by the cpuid instruction while 69 * applying any workarounds or tricks for the specific processor. 70 * o Mapping the feature flags into Solaris feature bits (X86_*). 71 * o Processing extended feature flags if supported by the processor, 72 * again while applying specific processor knowledge. 73 * o Determining the CMT characteristics of the system. 74 * 75 * Pass 1 is done on non-boot CPUs during their initialization and the results 76 * are used only as a meager attempt at ensuring that all processors within the 77 * system support the same features. 78 * 79 * Pass 2 of cpuid feature analysis happens just at the beginning 80 * of startup(). It just copies in and corrects the remainder 81 * of the cpuid data we depend on: standard cpuid functions that we didn't 82 * need for pass1 feature analysis, and extended cpuid functions beyond the 83 * simple feature processing done in pass1. 84 * 85 * Pass 3 of cpuid analysis is invoked after basic kernel services; in 86 * particular kernel memory allocation has been made available. It creates a 87 * readable brand string based on the data collected in the first two passes. 88 * 89 * Pass 4 of cpuid analysis is invoked after post_startup() when all 90 * the support infrastructure for various hardware features has been 91 * initialized. It determines which processor features will be reported 92 * to userland via the aux vector. 93 * 94 * All passes are executed on all CPUs, but only the boot CPU determines what 95 * features the kernel will use. 96 * 97 * Much of the worst junk in this file is for the support of processors 98 * that didn't really implement the cpuid instruction properly. 99 * 100 * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon, 101 * the pass numbers. Accordingly, changes to the pass code may require changes 102 * to the accessor code. 103 */ 104 105 uint_t x86_feature = 0; 106 uint_t x86_vendor = X86_VENDOR_IntelClone; 107 uint_t x86_type = X86_TYPE_OTHER; 108 uint_t x86_clflush_size = 0; 109 110 uint_t pentiumpro_bug4046376; 111 uint_t pentiumpro_bug4064495; 112 113 uint_t enable486; 114 115 /* 116 * monitor/mwait info. 117 * 118 * size_actual and buf_actual are the real address and size allocated to get 119 * proper mwait_buf alignement. buf_actual and size_actual should be passed 120 * to kmem_free(). Currently kmem_alloc() and mwait happen to both use 121 * processor cache-line alignment, but this is not guarantied in the furture. 122 */ 123 struct mwait_info { 124 size_t mon_min; /* min size to avoid missed wakeups */ 125 size_t mon_max; /* size to avoid false wakeups */ 126 size_t size_actual; /* size actually allocated */ 127 void *buf_actual; /* memory actually allocated */ 128 uint32_t support; /* processor support of monitor/mwait */ 129 }; 130 131 /* 132 * These constants determine how many of the elements of the 133 * cpuid we cache in the cpuid_info data structure; the 134 * remaining elements are accessible via the cpuid instruction. 135 */ 136 137 #define NMAX_CPI_STD 6 /* eax = 0 .. 5 */ 138 #define NMAX_CPI_EXTD 9 /* eax = 0x80000000 .. 0x80000008 */ 139 140 struct cpuid_info { 141 uint_t cpi_pass; /* last pass completed */ 142 /* 143 * standard function information 144 */ 145 uint_t cpi_maxeax; /* fn 0: %eax */ 146 char cpi_vendorstr[13]; /* fn 0: %ebx:%ecx:%edx */ 147 uint_t cpi_vendor; /* enum of cpi_vendorstr */ 148 149 uint_t cpi_family; /* fn 1: extended family */ 150 uint_t cpi_model; /* fn 1: extended model */ 151 uint_t cpi_step; /* fn 1: stepping */ 152 chipid_t cpi_chipid; /* fn 1: %ebx: chip # on ht cpus */ 153 uint_t cpi_brandid; /* fn 1: %ebx: brand ID */ 154 int cpi_clogid; /* fn 1: %ebx: thread # */ 155 uint_t cpi_ncpu_per_chip; /* fn 1: %ebx: logical cpu count */ 156 uint8_t cpi_cacheinfo[16]; /* fn 2: intel-style cache desc */ 157 uint_t cpi_ncache; /* fn 2: number of elements */ 158 uint_t cpi_ncpu_shr_last_cache; /* fn 4: %eax: ncpus sharing cache */ 159 id_t cpi_last_lvl_cacheid; /* fn 4: %eax: derived cache id */ 160 uint_t cpi_std_4_size; /* fn 4: number of fn 4 elements */ 161 struct cpuid_regs **cpi_std_4; /* fn 4: %ecx == 0 .. fn4_size */ 162 struct cpuid_regs cpi_std[NMAX_CPI_STD]; /* 0 .. 5 */ 163 /* 164 * extended function information 165 */ 166 uint_t cpi_xmaxeax; /* fn 0x80000000: %eax */ 167 char cpi_brandstr[49]; /* fn 0x8000000[234] */ 168 uint8_t cpi_pabits; /* fn 0x80000006: %eax */ 169 uint8_t cpi_vabits; /* fn 0x80000006: %eax */ 170 struct cpuid_regs cpi_extd[NMAX_CPI_EXTD]; /* 0x8000000[0-8] */ 171 id_t cpi_coreid; /* same coreid => strands share core */ 172 int cpi_pkgcoreid; /* core number within single package */ 173 uint_t cpi_ncore_per_chip; /* AMD: fn 0x80000008: %ecx[7-0] */ 174 /* Intel: fn 4: %eax[31-26] */ 175 /* 176 * supported feature information 177 */ 178 uint32_t cpi_support[5]; 179 #define STD_EDX_FEATURES 0 180 #define AMD_EDX_FEATURES 1 181 #define TM_EDX_FEATURES 2 182 #define STD_ECX_FEATURES 3 183 #define AMD_ECX_FEATURES 4 184 /* 185 * Synthesized information, where known. 186 */ 187 uint32_t cpi_chiprev; /* See X86_CHIPREV_* in x86_archext.h */ 188 const char *cpi_chiprevstr; /* May be NULL if chiprev unknown */ 189 uint32_t cpi_socket; /* Chip package/socket type */ 190 191 struct mwait_info cpi_mwait; /* fn 5: monitor/mwait info */ 192 uint32_t cpi_apicid; 193 }; 194 195 196 static struct cpuid_info cpuid_info0; 197 198 /* 199 * These bit fields are defined by the Intel Application Note AP-485 200 * "Intel Processor Identification and the CPUID Instruction" 201 */ 202 #define CPI_FAMILY_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 27, 20) 203 #define CPI_MODEL_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 19, 16) 204 #define CPI_TYPE(cpi) BITX((cpi)->cpi_std[1].cp_eax, 13, 12) 205 #define CPI_FAMILY(cpi) BITX((cpi)->cpi_std[1].cp_eax, 11, 8) 206 #define CPI_STEP(cpi) BITX((cpi)->cpi_std[1].cp_eax, 3, 0) 207 #define CPI_MODEL(cpi) BITX((cpi)->cpi_std[1].cp_eax, 7, 4) 208 209 #define CPI_FEATURES_EDX(cpi) ((cpi)->cpi_std[1].cp_edx) 210 #define CPI_FEATURES_ECX(cpi) ((cpi)->cpi_std[1].cp_ecx) 211 #define CPI_FEATURES_XTD_EDX(cpi) ((cpi)->cpi_extd[1].cp_edx) 212 #define CPI_FEATURES_XTD_ECX(cpi) ((cpi)->cpi_extd[1].cp_ecx) 213 214 #define CPI_BRANDID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 7, 0) 215 #define CPI_CHUNKS(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 15, 7) 216 #define CPI_CPU_COUNT(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 23, 16) 217 #define CPI_APIC_ID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 31, 24) 218 219 #define CPI_MAXEAX_MAX 0x100 /* sanity control */ 220 #define CPI_XMAXEAX_MAX 0x80000100 221 #define CPI_FN4_ECX_MAX 0x20 /* sanity: max fn 4 levels */ 222 #define CPI_FNB_ECX_MAX 0x20 /* sanity: max fn B levels */ 223 224 /* 225 * Function 4 (Deterministic Cache Parameters) macros 226 * Defined by Intel Application Note AP-485 227 */ 228 #define CPI_NUM_CORES(regs) BITX((regs)->cp_eax, 31, 26) 229 #define CPI_NTHR_SHR_CACHE(regs) BITX((regs)->cp_eax, 25, 14) 230 #define CPI_FULL_ASSOC_CACHE(regs) BITX((regs)->cp_eax, 9, 9) 231 #define CPI_SELF_INIT_CACHE(regs) BITX((regs)->cp_eax, 8, 8) 232 #define CPI_CACHE_LVL(regs) BITX((regs)->cp_eax, 7, 5) 233 #define CPI_CACHE_TYPE(regs) BITX((regs)->cp_eax, 4, 0) 234 #define CPI_CPU_LEVEL_TYPE(regs) BITX((regs)->cp_ecx, 15, 8) 235 236 #define CPI_CACHE_WAYS(regs) BITX((regs)->cp_ebx, 31, 22) 237 #define CPI_CACHE_PARTS(regs) BITX((regs)->cp_ebx, 21, 12) 238 #define CPI_CACHE_COH_LN_SZ(regs) BITX((regs)->cp_ebx, 11, 0) 239 240 #define CPI_CACHE_SETS(regs) BITX((regs)->cp_ecx, 31, 0) 241 242 #define CPI_PREFCH_STRIDE(regs) BITX((regs)->cp_edx, 9, 0) 243 244 245 /* 246 * A couple of shorthand macros to identify "later" P6-family chips 247 * like the Pentium M and Core. First, the "older" P6-based stuff 248 * (loosely defined as "pre-Pentium-4"): 249 * P6, PII, Mobile PII, PII Xeon, PIII, Mobile PIII, PIII Xeon 250 */ 251 252 #define IS_LEGACY_P6(cpi) ( \ 253 cpi->cpi_family == 6 && \ 254 (cpi->cpi_model == 1 || \ 255 cpi->cpi_model == 3 || \ 256 cpi->cpi_model == 5 || \ 257 cpi->cpi_model == 6 || \ 258 cpi->cpi_model == 7 || \ 259 cpi->cpi_model == 8 || \ 260 cpi->cpi_model == 0xA || \ 261 cpi->cpi_model == 0xB) \ 262 ) 263 264 /* A "new F6" is everything with family 6 that's not the above */ 265 #define IS_NEW_F6(cpi) ((cpi->cpi_family == 6) && !IS_LEGACY_P6(cpi)) 266 267 /* Extended family/model support */ 268 #define IS_EXTENDED_MODEL_INTEL(cpi) (cpi->cpi_family == 0x6 || \ 269 cpi->cpi_family >= 0xf) 270 271 /* 272 * Info for monitor/mwait idle loop. 273 * 274 * See cpuid section of "Intel 64 and IA-32 Architectures Software Developer's 275 * Manual Volume 2A: Instruction Set Reference, A-M" #25366-022US, November 276 * 2006. 277 * See MONITOR/MWAIT section of "AMD64 Architecture Programmer's Manual 278 * Documentation Updates" #33633, Rev 2.05, December 2006. 279 */ 280 #define MWAIT_SUPPORT (0x00000001) /* mwait supported */ 281 #define MWAIT_EXTENSIONS (0x00000002) /* extenstion supported */ 282 #define MWAIT_ECX_INT_ENABLE (0x00000004) /* ecx 1 extension supported */ 283 #define MWAIT_SUPPORTED(cpi) ((cpi)->cpi_std[1].cp_ecx & CPUID_INTC_ECX_MON) 284 #define MWAIT_INT_ENABLE(cpi) ((cpi)->cpi_std[5].cp_ecx & 0x2) 285 #define MWAIT_EXTENSION(cpi) ((cpi)->cpi_std[5].cp_ecx & 0x1) 286 #define MWAIT_SIZE_MIN(cpi) BITX((cpi)->cpi_std[5].cp_eax, 15, 0) 287 #define MWAIT_SIZE_MAX(cpi) BITX((cpi)->cpi_std[5].cp_ebx, 15, 0) 288 /* 289 * Number of sub-cstates for a given c-state. 290 */ 291 #define MWAIT_NUM_SUBC_STATES(cpi, c_state) \ 292 BITX((cpi)->cpi_std[5].cp_edx, c_state + 3, c_state) 293 294 /* 295 * Functions we consune from cpuid_subr.c; don't publish these in a header 296 * file to try and keep people using the expected cpuid_* interfaces. 297 */ 298 extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t); 299 extern uint32_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t); 300 extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t); 301 extern uint_t _cpuid_vendorstr_to_vendorcode(char *); 302 303 /* 304 * Apply up various platform-dependent restrictions where the 305 * underlying platform restrictions mean the CPU can be marked 306 * as less capable than its cpuid instruction would imply. 307 */ 308 #if defined(__xpv) 309 static void 310 platform_cpuid_mangle(uint_t vendor, uint32_t eax, struct cpuid_regs *cp) 311 { 312 switch (eax) { 313 case 1: { 314 uint32_t mcamask = DOMAIN_IS_INITDOMAIN(xen_info) ? 315 0 : CPUID_INTC_EDX_MCA; 316 cp->cp_edx &= 317 ~(mcamask | 318 CPUID_INTC_EDX_PSE | 319 CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE | 320 CPUID_INTC_EDX_SEP | CPUID_INTC_EDX_MTRR | 321 CPUID_INTC_EDX_PGE | CPUID_INTC_EDX_PAT | 322 CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP | 323 CPUID_INTC_EDX_PSE36 | CPUID_INTC_EDX_HTT); 324 break; 325 } 326 327 case 0x80000001: 328 cp->cp_edx &= 329 ~(CPUID_AMD_EDX_PSE | 330 CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE | 331 CPUID_AMD_EDX_MTRR | CPUID_AMD_EDX_PGE | 332 CPUID_AMD_EDX_PAT | CPUID_AMD_EDX_PSE36 | 333 CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP | 334 CPUID_AMD_EDX_TSCP); 335 cp->cp_ecx &= ~CPUID_AMD_ECX_CMP_LGCY; 336 break; 337 default: 338 break; 339 } 340 341 switch (vendor) { 342 case X86_VENDOR_Intel: 343 switch (eax) { 344 case 4: 345 /* 346 * Zero out the (ncores-per-chip - 1) field 347 */ 348 cp->cp_eax &= 0x03fffffff; 349 break; 350 default: 351 break; 352 } 353 break; 354 case X86_VENDOR_AMD: 355 switch (eax) { 356 case 0x80000008: 357 /* 358 * Zero out the (ncores-per-chip - 1) field 359 */ 360 cp->cp_ecx &= 0xffffff00; 361 break; 362 default: 363 break; 364 } 365 break; 366 default: 367 break; 368 } 369 } 370 #else 371 #define platform_cpuid_mangle(vendor, eax, cp) /* nothing */ 372 #endif 373 374 /* 375 * Some undocumented ways of patching the results of the cpuid 376 * instruction to permit running Solaris 10 on future cpus that 377 * we don't currently support. Could be set to non-zero values 378 * via settings in eeprom. 379 */ 380 381 uint32_t cpuid_feature_ecx_include; 382 uint32_t cpuid_feature_ecx_exclude; 383 uint32_t cpuid_feature_edx_include; 384 uint32_t cpuid_feature_edx_exclude; 385 386 void 387 cpuid_alloc_space(cpu_t *cpu) 388 { 389 /* 390 * By convention, cpu0 is the boot cpu, which is set up 391 * before memory allocation is available. All other cpus get 392 * their cpuid_info struct allocated here. 393 */ 394 ASSERT(cpu->cpu_id != 0); 395 cpu->cpu_m.mcpu_cpi = 396 kmem_zalloc(sizeof (*cpu->cpu_m.mcpu_cpi), KM_SLEEP); 397 } 398 399 void 400 cpuid_free_space(cpu_t *cpu) 401 { 402 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 403 int i; 404 405 ASSERT(cpu->cpu_id != 0); 406 407 /* 408 * Free up any function 4 related dynamic storage 409 */ 410 for (i = 1; i < cpi->cpi_std_4_size; i++) 411 kmem_free(cpi->cpi_std_4[i], sizeof (struct cpuid_regs)); 412 if (cpi->cpi_std_4_size > 0) 413 kmem_free(cpi->cpi_std_4, 414 cpi->cpi_std_4_size * sizeof (struct cpuid_regs *)); 415 416 kmem_free(cpu->cpu_m.mcpu_cpi, sizeof (*cpu->cpu_m.mcpu_cpi)); 417 } 418 419 #if !defined(__xpv) 420 421 static void 422 check_for_hvm() 423 { 424 struct cpuid_regs cp; 425 char *xen_str; 426 uint32_t xen_signature[4]; 427 extern int xpv_is_hvm; 428 429 /* 430 * In a fully virtualized domain, Xen's pseudo-cpuid function 431 * 0x40000000 returns a string representing the Xen signature in 432 * %ebx, %ecx, and %edx. %eax contains the maximum supported cpuid 433 * function. 434 */ 435 cp.cp_eax = 0x40000000; 436 (void) __cpuid_insn(&cp); 437 xen_signature[0] = cp.cp_ebx; 438 xen_signature[1] = cp.cp_ecx; 439 xen_signature[2] = cp.cp_edx; 440 xen_signature[3] = 0; 441 xen_str = (char *)xen_signature; 442 if (strcmp("XenVMMXenVMM", xen_str) == 0 && cp.cp_eax <= 0x40000002) 443 xpv_is_hvm = 1; 444 } 445 #endif /* __xpv */ 446 447 uint_t 448 cpuid_pass1(cpu_t *cpu) 449 { 450 uint32_t mask_ecx, mask_edx; 451 uint_t feature = X86_CPUID; 452 struct cpuid_info *cpi; 453 struct cpuid_regs *cp; 454 int xcpuid; 455 #if !defined(__xpv) 456 extern int idle_cpu_prefer_mwait; 457 #endif 458 459 /* 460 * Space statically allocated for cpu0, ensure pointer is set 461 */ 462 if (cpu->cpu_id == 0) 463 cpu->cpu_m.mcpu_cpi = &cpuid_info0; 464 cpi = cpu->cpu_m.mcpu_cpi; 465 ASSERT(cpi != NULL); 466 cp = &cpi->cpi_std[0]; 467 cp->cp_eax = 0; 468 cpi->cpi_maxeax = __cpuid_insn(cp); 469 { 470 uint32_t *iptr = (uint32_t *)cpi->cpi_vendorstr; 471 *iptr++ = cp->cp_ebx; 472 *iptr++ = cp->cp_edx; 473 *iptr++ = cp->cp_ecx; 474 *(char *)&cpi->cpi_vendorstr[12] = '\0'; 475 } 476 477 cpi->cpi_vendor = _cpuid_vendorstr_to_vendorcode(cpi->cpi_vendorstr); 478 x86_vendor = cpi->cpi_vendor; /* for compatibility */ 479 480 /* 481 * Limit the range in case of weird hardware 482 */ 483 if (cpi->cpi_maxeax > CPI_MAXEAX_MAX) 484 cpi->cpi_maxeax = CPI_MAXEAX_MAX; 485 if (cpi->cpi_maxeax < 1) 486 goto pass1_done; 487 488 cp = &cpi->cpi_std[1]; 489 cp->cp_eax = 1; 490 (void) __cpuid_insn(cp); 491 492 /* 493 * Extract identifying constants for easy access. 494 */ 495 cpi->cpi_model = CPI_MODEL(cpi); 496 cpi->cpi_family = CPI_FAMILY(cpi); 497 498 if (cpi->cpi_family == 0xf) 499 cpi->cpi_family += CPI_FAMILY_XTD(cpi); 500 501 /* 502 * Beware: AMD uses "extended model" iff base *FAMILY* == 0xf. 503 * Intel, and presumably everyone else, uses model == 0xf, as 504 * one would expect (max value means possible overflow). Sigh. 505 */ 506 507 switch (cpi->cpi_vendor) { 508 case X86_VENDOR_Intel: 509 if (IS_EXTENDED_MODEL_INTEL(cpi)) 510 cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4; 511 break; 512 case X86_VENDOR_AMD: 513 if (CPI_FAMILY(cpi) == 0xf) 514 cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4; 515 break; 516 default: 517 if (cpi->cpi_model == 0xf) 518 cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4; 519 break; 520 } 521 522 cpi->cpi_step = CPI_STEP(cpi); 523 cpi->cpi_brandid = CPI_BRANDID(cpi); 524 525 /* 526 * *default* assumptions: 527 * - believe %edx feature word 528 * - ignore %ecx feature word 529 * - 32-bit virtual and physical addressing 530 */ 531 mask_edx = 0xffffffff; 532 mask_ecx = 0; 533 534 cpi->cpi_pabits = cpi->cpi_vabits = 32; 535 536 switch (cpi->cpi_vendor) { 537 case X86_VENDOR_Intel: 538 if (cpi->cpi_family == 5) 539 x86_type = X86_TYPE_P5; 540 else if (IS_LEGACY_P6(cpi)) { 541 x86_type = X86_TYPE_P6; 542 pentiumpro_bug4046376 = 1; 543 pentiumpro_bug4064495 = 1; 544 /* 545 * Clear the SEP bit when it was set erroneously 546 */ 547 if (cpi->cpi_model < 3 && cpi->cpi_step < 3) 548 cp->cp_edx &= ~CPUID_INTC_EDX_SEP; 549 } else if (IS_NEW_F6(cpi) || cpi->cpi_family == 0xf) { 550 x86_type = X86_TYPE_P4; 551 /* 552 * We don't currently depend on any of the %ecx 553 * features until Prescott, so we'll only check 554 * this from P4 onwards. We might want to revisit 555 * that idea later. 556 */ 557 mask_ecx = 0xffffffff; 558 } else if (cpi->cpi_family > 0xf) 559 mask_ecx = 0xffffffff; 560 /* 561 * We don't support MONITOR/MWAIT if leaf 5 is not available 562 * to obtain the monitor linesize. 563 */ 564 if (cpi->cpi_maxeax < 5) 565 mask_ecx &= ~CPUID_INTC_ECX_MON; 566 break; 567 case X86_VENDOR_IntelClone: 568 default: 569 break; 570 case X86_VENDOR_AMD: 571 #if defined(OPTERON_ERRATUM_108) 572 if (cpi->cpi_family == 0xf && cpi->cpi_model == 0xe) { 573 cp->cp_eax = (0xf0f & cp->cp_eax) | 0xc0; 574 cpi->cpi_model = 0xc; 575 } else 576 #endif 577 if (cpi->cpi_family == 5) { 578 /* 579 * AMD K5 and K6 580 * 581 * These CPUs have an incomplete implementation 582 * of MCA/MCE which we mask away. 583 */ 584 mask_edx &= ~(CPUID_INTC_EDX_MCE | CPUID_INTC_EDX_MCA); 585 586 /* 587 * Model 0 uses the wrong (APIC) bit 588 * to indicate PGE. Fix it here. 589 */ 590 if (cpi->cpi_model == 0) { 591 if (cp->cp_edx & 0x200) { 592 cp->cp_edx &= ~0x200; 593 cp->cp_edx |= CPUID_INTC_EDX_PGE; 594 } 595 } 596 597 /* 598 * Early models had problems w/ MMX; disable. 599 */ 600 if (cpi->cpi_model < 6) 601 mask_edx &= ~CPUID_INTC_EDX_MMX; 602 } 603 604 /* 605 * For newer families, SSE3 and CX16, at least, are valid; 606 * enable all 607 */ 608 if (cpi->cpi_family >= 0xf) 609 mask_ecx = 0xffffffff; 610 /* 611 * We don't support MONITOR/MWAIT if leaf 5 is not available 612 * to obtain the monitor linesize. 613 */ 614 if (cpi->cpi_maxeax < 5) 615 mask_ecx &= ~CPUID_INTC_ECX_MON; 616 617 #if !defined(__xpv) 618 /* 619 * Do not use MONITOR/MWAIT to halt in the idle loop on any AMD 620 * processors. AMD does not intend MWAIT to be used in the cpu 621 * idle loop on current and future processors. 10h and future 622 * AMD processors use more power in MWAIT than HLT. 623 * Pre-family-10h Opterons do not have the MWAIT instruction. 624 */ 625 idle_cpu_prefer_mwait = 0; 626 #endif 627 628 break; 629 case X86_VENDOR_TM: 630 /* 631 * workaround the NT workaround in CMS 4.1 632 */ 633 if (cpi->cpi_family == 5 && cpi->cpi_model == 4 && 634 (cpi->cpi_step == 2 || cpi->cpi_step == 3)) 635 cp->cp_edx |= CPUID_INTC_EDX_CX8; 636 break; 637 case X86_VENDOR_Centaur: 638 /* 639 * workaround the NT workarounds again 640 */ 641 if (cpi->cpi_family == 6) 642 cp->cp_edx |= CPUID_INTC_EDX_CX8; 643 break; 644 case X86_VENDOR_Cyrix: 645 /* 646 * We rely heavily on the probing in locore 647 * to actually figure out what parts, if any, 648 * of the Cyrix cpuid instruction to believe. 649 */ 650 switch (x86_type) { 651 case X86_TYPE_CYRIX_486: 652 mask_edx = 0; 653 break; 654 case X86_TYPE_CYRIX_6x86: 655 mask_edx = 0; 656 break; 657 case X86_TYPE_CYRIX_6x86L: 658 mask_edx = 659 CPUID_INTC_EDX_DE | 660 CPUID_INTC_EDX_CX8; 661 break; 662 case X86_TYPE_CYRIX_6x86MX: 663 mask_edx = 664 CPUID_INTC_EDX_DE | 665 CPUID_INTC_EDX_MSR | 666 CPUID_INTC_EDX_CX8 | 667 CPUID_INTC_EDX_PGE | 668 CPUID_INTC_EDX_CMOV | 669 CPUID_INTC_EDX_MMX; 670 break; 671 case X86_TYPE_CYRIX_GXm: 672 mask_edx = 673 CPUID_INTC_EDX_MSR | 674 CPUID_INTC_EDX_CX8 | 675 CPUID_INTC_EDX_CMOV | 676 CPUID_INTC_EDX_MMX; 677 break; 678 case X86_TYPE_CYRIX_MediaGX: 679 break; 680 case X86_TYPE_CYRIX_MII: 681 case X86_TYPE_VIA_CYRIX_III: 682 mask_edx = 683 CPUID_INTC_EDX_DE | 684 CPUID_INTC_EDX_TSC | 685 CPUID_INTC_EDX_MSR | 686 CPUID_INTC_EDX_CX8 | 687 CPUID_INTC_EDX_PGE | 688 CPUID_INTC_EDX_CMOV | 689 CPUID_INTC_EDX_MMX; 690 break; 691 default: 692 break; 693 } 694 break; 695 } 696 697 #if defined(__xpv) 698 /* 699 * Do not support MONITOR/MWAIT under a hypervisor 700 */ 701 mask_ecx &= ~CPUID_INTC_ECX_MON; 702 #endif /* __xpv */ 703 704 /* 705 * Now we've figured out the masks that determine 706 * which bits we choose to believe, apply the masks 707 * to the feature words, then map the kernel's view 708 * of these feature words into its feature word. 709 */ 710 cp->cp_edx &= mask_edx; 711 cp->cp_ecx &= mask_ecx; 712 713 /* 714 * apply any platform restrictions (we don't call this 715 * immediately after __cpuid_insn here, because we need the 716 * workarounds applied above first) 717 */ 718 platform_cpuid_mangle(cpi->cpi_vendor, 1, cp); 719 720 /* 721 * fold in overrides from the "eeprom" mechanism 722 */ 723 cp->cp_edx |= cpuid_feature_edx_include; 724 cp->cp_edx &= ~cpuid_feature_edx_exclude; 725 726 cp->cp_ecx |= cpuid_feature_ecx_include; 727 cp->cp_ecx &= ~cpuid_feature_ecx_exclude; 728 729 if (cp->cp_edx & CPUID_INTC_EDX_PSE) 730 feature |= X86_LARGEPAGE; 731 if (cp->cp_edx & CPUID_INTC_EDX_TSC) 732 feature |= X86_TSC; 733 if (cp->cp_edx & CPUID_INTC_EDX_MSR) 734 feature |= X86_MSR; 735 if (cp->cp_edx & CPUID_INTC_EDX_MTRR) 736 feature |= X86_MTRR; 737 if (cp->cp_edx & CPUID_INTC_EDX_PGE) 738 feature |= X86_PGE; 739 if (cp->cp_edx & CPUID_INTC_EDX_CMOV) 740 feature |= X86_CMOV; 741 if (cp->cp_edx & CPUID_INTC_EDX_MMX) 742 feature |= X86_MMX; 743 if ((cp->cp_edx & CPUID_INTC_EDX_MCE) != 0 && 744 (cp->cp_edx & CPUID_INTC_EDX_MCA) != 0) 745 feature |= X86_MCA; 746 if (cp->cp_edx & CPUID_INTC_EDX_PAE) 747 feature |= X86_PAE; 748 if (cp->cp_edx & CPUID_INTC_EDX_CX8) 749 feature |= X86_CX8; 750 if (cp->cp_ecx & CPUID_INTC_ECX_CX16) 751 feature |= X86_CX16; 752 if (cp->cp_edx & CPUID_INTC_EDX_PAT) 753 feature |= X86_PAT; 754 if (cp->cp_edx & CPUID_INTC_EDX_SEP) 755 feature |= X86_SEP; 756 if (cp->cp_edx & CPUID_INTC_EDX_FXSR) { 757 /* 758 * In our implementation, fxsave/fxrstor 759 * are prerequisites before we'll even 760 * try and do SSE things. 761 */ 762 if (cp->cp_edx & CPUID_INTC_EDX_SSE) 763 feature |= X86_SSE; 764 if (cp->cp_edx & CPUID_INTC_EDX_SSE2) 765 feature |= X86_SSE2; 766 if (cp->cp_ecx & CPUID_INTC_ECX_SSE3) 767 feature |= X86_SSE3; 768 if (cpi->cpi_vendor == X86_VENDOR_Intel) { 769 if (cp->cp_ecx & CPUID_INTC_ECX_SSSE3) 770 feature |= X86_SSSE3; 771 if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_1) 772 feature |= X86_SSE4_1; 773 if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_2) 774 feature |= X86_SSE4_2; 775 } 776 } 777 if (cp->cp_edx & CPUID_INTC_EDX_DE) 778 feature |= X86_DE; 779 #if !defined(__xpv) 780 if (cp->cp_ecx & CPUID_INTC_ECX_MON) { 781 782 /* 783 * We require the CLFLUSH instruction for erratum workaround 784 * to use MONITOR/MWAIT. 785 */ 786 if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) { 787 cpi->cpi_mwait.support |= MWAIT_SUPPORT; 788 feature |= X86_MWAIT; 789 } else { 790 extern int idle_cpu_assert_cflush_monitor; 791 792 /* 793 * All processors we are aware of which have 794 * MONITOR/MWAIT also have CLFLUSH. 795 */ 796 if (idle_cpu_assert_cflush_monitor) { 797 ASSERT((cp->cp_ecx & CPUID_INTC_ECX_MON) && 798 (cp->cp_edx & CPUID_INTC_EDX_CLFSH)); 799 } 800 } 801 } 802 #endif /* __xpv */ 803 804 /* 805 * Only need it first time, rest of the cpus would follow suite. 806 * we only capture this for the bootcpu. 807 */ 808 if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) { 809 feature |= X86_CLFSH; 810 x86_clflush_size = (BITX(cp->cp_ebx, 15, 8) * 8); 811 } 812 813 if (feature & X86_PAE) 814 cpi->cpi_pabits = 36; 815 816 /* 817 * Hyperthreading configuration is slightly tricky on Intel 818 * and pure clones, and even trickier on AMD. 819 * 820 * (AMD chose to set the HTT bit on their CMP processors, 821 * even though they're not actually hyperthreaded. Thus it 822 * takes a bit more work to figure out what's really going 823 * on ... see the handling of the CMP_LGCY bit below) 824 */ 825 if (cp->cp_edx & CPUID_INTC_EDX_HTT) { 826 cpi->cpi_ncpu_per_chip = CPI_CPU_COUNT(cpi); 827 if (cpi->cpi_ncpu_per_chip > 1) 828 feature |= X86_HTT; 829 } else { 830 cpi->cpi_ncpu_per_chip = 1; 831 } 832 833 /* 834 * Work on the "extended" feature information, doing 835 * some basic initialization for cpuid_pass2() 836 */ 837 xcpuid = 0; 838 switch (cpi->cpi_vendor) { 839 case X86_VENDOR_Intel: 840 if (IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf) 841 xcpuid++; 842 break; 843 case X86_VENDOR_AMD: 844 if (cpi->cpi_family > 5 || 845 (cpi->cpi_family == 5 && cpi->cpi_model >= 1)) 846 xcpuid++; 847 break; 848 case X86_VENDOR_Cyrix: 849 /* 850 * Only these Cyrix CPUs are -known- to support 851 * extended cpuid operations. 852 */ 853 if (x86_type == X86_TYPE_VIA_CYRIX_III || 854 x86_type == X86_TYPE_CYRIX_GXm) 855 xcpuid++; 856 break; 857 case X86_VENDOR_Centaur: 858 case X86_VENDOR_TM: 859 default: 860 xcpuid++; 861 break; 862 } 863 864 if (xcpuid) { 865 cp = &cpi->cpi_extd[0]; 866 cp->cp_eax = 0x80000000; 867 cpi->cpi_xmaxeax = __cpuid_insn(cp); 868 } 869 870 if (cpi->cpi_xmaxeax & 0x80000000) { 871 872 if (cpi->cpi_xmaxeax > CPI_XMAXEAX_MAX) 873 cpi->cpi_xmaxeax = CPI_XMAXEAX_MAX; 874 875 switch (cpi->cpi_vendor) { 876 case X86_VENDOR_Intel: 877 case X86_VENDOR_AMD: 878 if (cpi->cpi_xmaxeax < 0x80000001) 879 break; 880 cp = &cpi->cpi_extd[1]; 881 cp->cp_eax = 0x80000001; 882 (void) __cpuid_insn(cp); 883 884 if (cpi->cpi_vendor == X86_VENDOR_AMD && 885 cpi->cpi_family == 5 && 886 cpi->cpi_model == 6 && 887 cpi->cpi_step == 6) { 888 /* 889 * K6 model 6 uses bit 10 to indicate SYSC 890 * Later models use bit 11. Fix it here. 891 */ 892 if (cp->cp_edx & 0x400) { 893 cp->cp_edx &= ~0x400; 894 cp->cp_edx |= CPUID_AMD_EDX_SYSC; 895 } 896 } 897 898 platform_cpuid_mangle(cpi->cpi_vendor, 0x80000001, cp); 899 900 /* 901 * Compute the additions to the kernel's feature word. 902 */ 903 if (cp->cp_edx & CPUID_AMD_EDX_NX) 904 feature |= X86_NX; 905 906 /* 907 * Regardless whether or not we boot 64-bit, 908 * we should have a way to identify whether 909 * the CPU is capable of running 64-bit. 910 */ 911 if (cp->cp_edx & CPUID_AMD_EDX_LM) 912 feature |= X86_64; 913 914 #if defined(__amd64) 915 /* 1 GB large page - enable only for 64 bit kernel */ 916 if (cp->cp_edx & CPUID_AMD_EDX_1GPG) 917 feature |= X86_1GPG; 918 #endif 919 920 if ((cpi->cpi_vendor == X86_VENDOR_AMD) && 921 (cpi->cpi_std[1].cp_edx & CPUID_INTC_EDX_FXSR) && 922 (cp->cp_ecx & CPUID_AMD_ECX_SSE4A)) 923 feature |= X86_SSE4A; 924 925 /* 926 * If both the HTT and CMP_LGCY bits are set, 927 * then we're not actually HyperThreaded. Read 928 * "AMD CPUID Specification" for more details. 929 */ 930 if (cpi->cpi_vendor == X86_VENDOR_AMD && 931 (feature & X86_HTT) && 932 (cp->cp_ecx & CPUID_AMD_ECX_CMP_LGCY)) { 933 feature &= ~X86_HTT; 934 feature |= X86_CMP; 935 } 936 #if defined(__amd64) 937 /* 938 * It's really tricky to support syscall/sysret in 939 * the i386 kernel; we rely on sysenter/sysexit 940 * instead. In the amd64 kernel, things are -way- 941 * better. 942 */ 943 if (cp->cp_edx & CPUID_AMD_EDX_SYSC) 944 feature |= X86_ASYSC; 945 946 /* 947 * While we're thinking about system calls, note 948 * that AMD processors don't support sysenter 949 * in long mode at all, so don't try to program them. 950 */ 951 if (x86_vendor == X86_VENDOR_AMD) 952 feature &= ~X86_SEP; 953 #endif 954 if (cp->cp_edx & CPUID_AMD_EDX_TSCP) 955 feature |= X86_TSCP; 956 break; 957 default: 958 break; 959 } 960 961 /* 962 * Get CPUID data about processor cores and hyperthreads. 963 */ 964 switch (cpi->cpi_vendor) { 965 case X86_VENDOR_Intel: 966 if (cpi->cpi_maxeax >= 4) { 967 cp = &cpi->cpi_std[4]; 968 cp->cp_eax = 4; 969 cp->cp_ecx = 0; 970 (void) __cpuid_insn(cp); 971 platform_cpuid_mangle(cpi->cpi_vendor, 4, cp); 972 } 973 /*FALLTHROUGH*/ 974 case X86_VENDOR_AMD: 975 if (cpi->cpi_xmaxeax < 0x80000008) 976 break; 977 cp = &cpi->cpi_extd[8]; 978 cp->cp_eax = 0x80000008; 979 (void) __cpuid_insn(cp); 980 platform_cpuid_mangle(cpi->cpi_vendor, 0x80000008, cp); 981 982 /* 983 * Virtual and physical address limits from 984 * cpuid override previously guessed values. 985 */ 986 cpi->cpi_pabits = BITX(cp->cp_eax, 7, 0); 987 cpi->cpi_vabits = BITX(cp->cp_eax, 15, 8); 988 break; 989 default: 990 break; 991 } 992 993 /* 994 * Derive the number of cores per chip 995 */ 996 switch (cpi->cpi_vendor) { 997 case X86_VENDOR_Intel: 998 if (cpi->cpi_maxeax < 4) { 999 cpi->cpi_ncore_per_chip = 1; 1000 break; 1001 } else { 1002 cpi->cpi_ncore_per_chip = 1003 BITX((cpi)->cpi_std[4].cp_eax, 31, 26) + 1; 1004 } 1005 break; 1006 case X86_VENDOR_AMD: 1007 if (cpi->cpi_xmaxeax < 0x80000008) { 1008 cpi->cpi_ncore_per_chip = 1; 1009 break; 1010 } else { 1011 /* 1012 * On family 0xf cpuid fn 2 ECX[7:0] "NC" is 1013 * 1 less than the number of physical cores on 1014 * the chip. In family 0x10 this value can 1015 * be affected by "downcoring" - it reflects 1016 * 1 less than the number of cores actually 1017 * enabled on this node. 1018 */ 1019 cpi->cpi_ncore_per_chip = 1020 BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1; 1021 } 1022 break; 1023 default: 1024 cpi->cpi_ncore_per_chip = 1; 1025 break; 1026 } 1027 } else { 1028 cpi->cpi_ncore_per_chip = 1; 1029 } 1030 1031 /* 1032 * If more than one core, then this processor is CMP. 1033 */ 1034 if (cpi->cpi_ncore_per_chip > 1) 1035 feature |= X86_CMP; 1036 1037 /* 1038 * If the number of cores is the same as the number 1039 * of CPUs, then we cannot have HyperThreading. 1040 */ 1041 if (cpi->cpi_ncpu_per_chip == cpi->cpi_ncore_per_chip) 1042 feature &= ~X86_HTT; 1043 1044 if ((feature & (X86_HTT | X86_CMP)) == 0) { 1045 /* 1046 * Single-core single-threaded processors. 1047 */ 1048 cpi->cpi_chipid = -1; 1049 cpi->cpi_clogid = 0; 1050 cpi->cpi_coreid = cpu->cpu_id; 1051 cpi->cpi_pkgcoreid = 0; 1052 } else if (cpi->cpi_ncpu_per_chip > 1) { 1053 uint_t i; 1054 uint_t chipid_shift = 0; 1055 uint_t coreid_shift = 0; 1056 uint_t apic_id = CPI_APIC_ID(cpi); 1057 1058 for (i = 1; i < cpi->cpi_ncpu_per_chip; i <<= 1) 1059 chipid_shift++; 1060 cpi->cpi_chipid = apic_id >> chipid_shift; 1061 cpi->cpi_clogid = apic_id & ((1 << chipid_shift) - 1); 1062 1063 if (cpi->cpi_vendor == X86_VENDOR_Intel) { 1064 if (feature & X86_CMP) { 1065 /* 1066 * Multi-core (and possibly multi-threaded) 1067 * processors. 1068 */ 1069 uint_t ncpu_per_core; 1070 if (cpi->cpi_ncore_per_chip == 1) 1071 ncpu_per_core = cpi->cpi_ncpu_per_chip; 1072 else if (cpi->cpi_ncore_per_chip > 1) 1073 ncpu_per_core = cpi->cpi_ncpu_per_chip / 1074 cpi->cpi_ncore_per_chip; 1075 /* 1076 * 8bit APIC IDs on dual core Pentiums 1077 * look like this: 1078 * 1079 * +-----------------------+------+------+ 1080 * | Physical Package ID | MC | HT | 1081 * +-----------------------+------+------+ 1082 * <------- chipid --------> 1083 * <------- coreid ---------------> 1084 * <--- clogid --> 1085 * <------> 1086 * pkgcoreid 1087 * 1088 * Where the number of bits necessary to 1089 * represent MC and HT fields together equals 1090 * to the minimum number of bits necessary to 1091 * store the value of cpi->cpi_ncpu_per_chip. 1092 * Of those bits, the MC part uses the number 1093 * of bits necessary to store the value of 1094 * cpi->cpi_ncore_per_chip. 1095 */ 1096 for (i = 1; i < ncpu_per_core; i <<= 1) 1097 coreid_shift++; 1098 cpi->cpi_coreid = apic_id >> coreid_shift; 1099 cpi->cpi_pkgcoreid = cpi->cpi_clogid >> 1100 coreid_shift; 1101 } else if (feature & X86_HTT) { 1102 /* 1103 * Single-core multi-threaded processors. 1104 */ 1105 cpi->cpi_coreid = cpi->cpi_chipid; 1106 cpi->cpi_pkgcoreid = 0; 1107 } 1108 } else if (cpi->cpi_vendor == X86_VENDOR_AMD) { 1109 /* 1110 * AMD CMP chips currently have a single thread per 1111 * core, with 2 cores on family 0xf and 2, 3 or 4 1112 * cores on family 0x10. 1113 * 1114 * Since no two cpus share a core we must assign a 1115 * distinct coreid per cpu, and we do this by using 1116 * the cpu_id. This scheme does not, however, 1117 * guarantee that sibling cores of a chip will have 1118 * sequential coreids starting at a multiple of the 1119 * number of cores per chip - that is usually the 1120 * case, but if the ACPI MADT table is presented 1121 * in a different order then we need to perform a 1122 * few more gymnastics for the pkgcoreid. 1123 * 1124 * In family 0xf CMPs there are 2 cores on all nodes 1125 * present - no mixing of single and dual core parts. 1126 * 1127 * In family 0x10 CMPs cpuid fn 2 ECX[15:12] 1128 * "ApicIdCoreIdSize[3:0]" tells us how 1129 * many least-significant bits in the ApicId 1130 * are used to represent the core number 1131 * within the node. Cores are always 1132 * numbered sequentially from 0 regardless 1133 * of how many or which are disabled, and 1134 * there seems to be no way to discover the 1135 * real core id when some are disabled. 1136 */ 1137 cpi->cpi_coreid = cpu->cpu_id; 1138 1139 if (cpi->cpi_family == 0x10 && 1140 cpi->cpi_xmaxeax >= 0x80000008) { 1141 int coreidsz = 1142 BITX((cpi)->cpi_extd[8].cp_ecx, 15, 12); 1143 1144 cpi->cpi_pkgcoreid = 1145 apic_id & ((1 << coreidsz) - 1); 1146 } else { 1147 cpi->cpi_pkgcoreid = cpi->cpi_clogid; 1148 } 1149 } else { 1150 /* 1151 * All other processors are currently 1152 * assumed to have single cores. 1153 */ 1154 cpi->cpi_coreid = cpi->cpi_chipid; 1155 cpi->cpi_pkgcoreid = 0; 1156 } 1157 } 1158 1159 cpi->cpi_apicid = CPI_APIC_ID(cpi); 1160 1161 /* 1162 * Synthesize chip "revision" and socket type 1163 */ 1164 cpi->cpi_chiprev = _cpuid_chiprev(cpi->cpi_vendor, cpi->cpi_family, 1165 cpi->cpi_model, cpi->cpi_step); 1166 cpi->cpi_chiprevstr = _cpuid_chiprevstr(cpi->cpi_vendor, 1167 cpi->cpi_family, cpi->cpi_model, cpi->cpi_step); 1168 cpi->cpi_socket = _cpuid_skt(cpi->cpi_vendor, cpi->cpi_family, 1169 cpi->cpi_model, cpi->cpi_step); 1170 1171 pass1_done: 1172 #if !defined(__xpv) 1173 check_for_hvm(); 1174 #endif 1175 cpi->cpi_pass = 1; 1176 return (feature); 1177 } 1178 1179 /* 1180 * Make copies of the cpuid table entries we depend on, in 1181 * part for ease of parsing now, in part so that we have only 1182 * one place to correct any of it, in part for ease of 1183 * later export to userland, and in part so we can look at 1184 * this stuff in a crash dump. 1185 */ 1186 1187 /*ARGSUSED*/ 1188 void 1189 cpuid_pass2(cpu_t *cpu) 1190 { 1191 uint_t n, nmax; 1192 int i; 1193 struct cpuid_regs *cp; 1194 uint8_t *dp; 1195 uint32_t *iptr; 1196 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 1197 1198 ASSERT(cpi->cpi_pass == 1); 1199 1200 if (cpi->cpi_maxeax < 1) 1201 goto pass2_done; 1202 1203 if ((nmax = cpi->cpi_maxeax + 1) > NMAX_CPI_STD) 1204 nmax = NMAX_CPI_STD; 1205 /* 1206 * (We already handled n == 0 and n == 1 in pass 1) 1207 */ 1208 for (n = 2, cp = &cpi->cpi_std[2]; n < nmax; n++, cp++) { 1209 cp->cp_eax = n; 1210 1211 /* 1212 * CPUID function 4 expects %ecx to be initialized 1213 * with an index which indicates which cache to return 1214 * information about. The OS is expected to call function 4 1215 * with %ecx set to 0, 1, 2, ... until it returns with 1216 * EAX[4:0] set to 0, which indicates there are no more 1217 * caches. 1218 * 1219 * Here, populate cpi_std[4] with the information returned by 1220 * function 4 when %ecx == 0, and do the rest in cpuid_pass3() 1221 * when dynamic memory allocation becomes available. 1222 * 1223 * Note: we need to explicitly initialize %ecx here, since 1224 * function 4 may have been previously invoked. 1225 */ 1226 if (n == 4) 1227 cp->cp_ecx = 0; 1228 1229 (void) __cpuid_insn(cp); 1230 platform_cpuid_mangle(cpi->cpi_vendor, n, cp); 1231 switch (n) { 1232 case 2: 1233 /* 1234 * "the lower 8 bits of the %eax register 1235 * contain a value that identifies the number 1236 * of times the cpuid [instruction] has to be 1237 * executed to obtain a complete image of the 1238 * processor's caching systems." 1239 * 1240 * How *do* they make this stuff up? 1241 */ 1242 cpi->cpi_ncache = sizeof (*cp) * 1243 BITX(cp->cp_eax, 7, 0); 1244 if (cpi->cpi_ncache == 0) 1245 break; 1246 cpi->cpi_ncache--; /* skip count byte */ 1247 1248 /* 1249 * Well, for now, rather than attempt to implement 1250 * this slightly dubious algorithm, we just look 1251 * at the first 15 .. 1252 */ 1253 if (cpi->cpi_ncache > (sizeof (*cp) - 1)) 1254 cpi->cpi_ncache = sizeof (*cp) - 1; 1255 1256 dp = cpi->cpi_cacheinfo; 1257 if (BITX(cp->cp_eax, 31, 31) == 0) { 1258 uint8_t *p = (void *)&cp->cp_eax; 1259 for (i = 1; i < 4; i++) 1260 if (p[i] != 0) 1261 *dp++ = p[i]; 1262 } 1263 if (BITX(cp->cp_ebx, 31, 31) == 0) { 1264 uint8_t *p = (void *)&cp->cp_ebx; 1265 for (i = 0; i < 4; i++) 1266 if (p[i] != 0) 1267 *dp++ = p[i]; 1268 } 1269 if (BITX(cp->cp_ecx, 31, 31) == 0) { 1270 uint8_t *p = (void *)&cp->cp_ecx; 1271 for (i = 0; i < 4; i++) 1272 if (p[i] != 0) 1273 *dp++ = p[i]; 1274 } 1275 if (BITX(cp->cp_edx, 31, 31) == 0) { 1276 uint8_t *p = (void *)&cp->cp_edx; 1277 for (i = 0; i < 4; i++) 1278 if (p[i] != 0) 1279 *dp++ = p[i]; 1280 } 1281 break; 1282 1283 case 3: /* Processor serial number, if PSN supported */ 1284 break; 1285 1286 case 4: /* Deterministic cache parameters */ 1287 break; 1288 1289 case 5: /* Monitor/Mwait parameters */ 1290 { 1291 size_t mwait_size; 1292 1293 /* 1294 * check cpi_mwait.support which was set in cpuid_pass1 1295 */ 1296 if (!(cpi->cpi_mwait.support & MWAIT_SUPPORT)) 1297 break; 1298 1299 /* 1300 * Protect ourself from insane mwait line size. 1301 * Workaround for incomplete hardware emulator(s). 1302 */ 1303 mwait_size = (size_t)MWAIT_SIZE_MAX(cpi); 1304 if (mwait_size < sizeof (uint32_t) || 1305 !ISP2(mwait_size)) { 1306 #if DEBUG 1307 cmn_err(CE_NOTE, "Cannot handle cpu %d mwait " 1308 "size %ld", 1309 cpu->cpu_id, (long)mwait_size); 1310 #endif 1311 break; 1312 } 1313 1314 cpi->cpi_mwait.mon_min = (size_t)MWAIT_SIZE_MIN(cpi); 1315 cpi->cpi_mwait.mon_max = mwait_size; 1316 if (MWAIT_EXTENSION(cpi)) { 1317 cpi->cpi_mwait.support |= MWAIT_EXTENSIONS; 1318 if (MWAIT_INT_ENABLE(cpi)) 1319 cpi->cpi_mwait.support |= 1320 MWAIT_ECX_INT_ENABLE; 1321 } 1322 break; 1323 } 1324 default: 1325 break; 1326 } 1327 } 1328 1329 if (cpi->cpi_maxeax >= 0xB && cpi->cpi_vendor == X86_VENDOR_Intel) { 1330 cp->cp_eax = 0xB; 1331 cp->cp_ecx = 0; 1332 1333 (void) __cpuid_insn(cp); 1334 1335 /* 1336 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which 1337 * indicates that the extended topology enumeration leaf is 1338 * available. 1339 */ 1340 if (cp->cp_ebx) { 1341 uint32_t x2apic_id; 1342 uint_t coreid_shift = 0; 1343 uint_t ncpu_per_core = 1; 1344 uint_t chipid_shift = 0; 1345 uint_t ncpu_per_chip = 1; 1346 uint_t i; 1347 uint_t level; 1348 1349 for (i = 0; i < CPI_FNB_ECX_MAX; i++) { 1350 cp->cp_eax = 0xB; 1351 cp->cp_ecx = i; 1352 1353 (void) __cpuid_insn(cp); 1354 level = CPI_CPU_LEVEL_TYPE(cp); 1355 1356 if (level == 1) { 1357 x2apic_id = cp->cp_edx; 1358 coreid_shift = BITX(cp->cp_eax, 4, 0); 1359 ncpu_per_core = BITX(cp->cp_ebx, 15, 0); 1360 } else if (level == 2) { 1361 x2apic_id = cp->cp_edx; 1362 chipid_shift = BITX(cp->cp_eax, 4, 0); 1363 ncpu_per_chip = BITX(cp->cp_ebx, 15, 0); 1364 } 1365 } 1366 1367 cpi->cpi_apicid = x2apic_id; 1368 cpi->cpi_ncpu_per_chip = ncpu_per_chip; 1369 cpi->cpi_ncore_per_chip = ncpu_per_chip / 1370 ncpu_per_core; 1371 cpi->cpi_chipid = x2apic_id >> chipid_shift; 1372 cpi->cpi_clogid = x2apic_id & ((1 << chipid_shift) - 1); 1373 cpi->cpi_coreid = x2apic_id >> coreid_shift; 1374 cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift; 1375 } 1376 } 1377 1378 if ((cpi->cpi_xmaxeax & 0x80000000) == 0) 1379 goto pass2_done; 1380 1381 if ((nmax = cpi->cpi_xmaxeax - 0x80000000 + 1) > NMAX_CPI_EXTD) 1382 nmax = NMAX_CPI_EXTD; 1383 /* 1384 * Copy the extended properties, fixing them as we go. 1385 * (We already handled n == 0 and n == 1 in pass 1) 1386 */ 1387 iptr = (void *)cpi->cpi_brandstr; 1388 for (n = 2, cp = &cpi->cpi_extd[2]; n < nmax; cp++, n++) { 1389 cp->cp_eax = 0x80000000 + n; 1390 (void) __cpuid_insn(cp); 1391 platform_cpuid_mangle(cpi->cpi_vendor, 0x80000000 + n, cp); 1392 switch (n) { 1393 case 2: 1394 case 3: 1395 case 4: 1396 /* 1397 * Extract the brand string 1398 */ 1399 *iptr++ = cp->cp_eax; 1400 *iptr++ = cp->cp_ebx; 1401 *iptr++ = cp->cp_ecx; 1402 *iptr++ = cp->cp_edx; 1403 break; 1404 case 5: 1405 switch (cpi->cpi_vendor) { 1406 case X86_VENDOR_AMD: 1407 /* 1408 * The Athlon and Duron were the first 1409 * parts to report the sizes of the 1410 * TLB for large pages. Before then, 1411 * we don't trust the data. 1412 */ 1413 if (cpi->cpi_family < 6 || 1414 (cpi->cpi_family == 6 && 1415 cpi->cpi_model < 1)) 1416 cp->cp_eax = 0; 1417 break; 1418 default: 1419 break; 1420 } 1421 break; 1422 case 6: 1423 switch (cpi->cpi_vendor) { 1424 case X86_VENDOR_AMD: 1425 /* 1426 * The Athlon and Duron were the first 1427 * AMD parts with L2 TLB's. 1428 * Before then, don't trust the data. 1429 */ 1430 if (cpi->cpi_family < 6 || 1431 cpi->cpi_family == 6 && 1432 cpi->cpi_model < 1) 1433 cp->cp_eax = cp->cp_ebx = 0; 1434 /* 1435 * AMD Duron rev A0 reports L2 1436 * cache size incorrectly as 1K 1437 * when it is really 64K 1438 */ 1439 if (cpi->cpi_family == 6 && 1440 cpi->cpi_model == 3 && 1441 cpi->cpi_step == 0) { 1442 cp->cp_ecx &= 0xffff; 1443 cp->cp_ecx |= 0x400000; 1444 } 1445 break; 1446 case X86_VENDOR_Cyrix: /* VIA C3 */ 1447 /* 1448 * VIA C3 processors are a bit messed 1449 * up w.r.t. encoding cache sizes in %ecx 1450 */ 1451 if (cpi->cpi_family != 6) 1452 break; 1453 /* 1454 * model 7 and 8 were incorrectly encoded 1455 * 1456 * xxx is model 8 really broken? 1457 */ 1458 if (cpi->cpi_model == 7 || 1459 cpi->cpi_model == 8) 1460 cp->cp_ecx = 1461 BITX(cp->cp_ecx, 31, 24) << 16 | 1462 BITX(cp->cp_ecx, 23, 16) << 12 | 1463 BITX(cp->cp_ecx, 15, 8) << 8 | 1464 BITX(cp->cp_ecx, 7, 0); 1465 /* 1466 * model 9 stepping 1 has wrong associativity 1467 */ 1468 if (cpi->cpi_model == 9 && cpi->cpi_step == 1) 1469 cp->cp_ecx |= 8 << 12; 1470 break; 1471 case X86_VENDOR_Intel: 1472 /* 1473 * Extended L2 Cache features function. 1474 * First appeared on Prescott. 1475 */ 1476 default: 1477 break; 1478 } 1479 break; 1480 default: 1481 break; 1482 } 1483 } 1484 1485 pass2_done: 1486 cpi->cpi_pass = 2; 1487 } 1488 1489 static const char * 1490 intel_cpubrand(const struct cpuid_info *cpi) 1491 { 1492 int i; 1493 1494 if ((x86_feature & X86_CPUID) == 0 || 1495 cpi->cpi_maxeax < 1 || cpi->cpi_family < 5) 1496 return ("i486"); 1497 1498 switch (cpi->cpi_family) { 1499 case 5: 1500 return ("Intel Pentium(r)"); 1501 case 6: 1502 switch (cpi->cpi_model) { 1503 uint_t celeron, xeon; 1504 const struct cpuid_regs *cp; 1505 case 0: 1506 case 1: 1507 case 2: 1508 return ("Intel Pentium(r) Pro"); 1509 case 3: 1510 case 4: 1511 return ("Intel Pentium(r) II"); 1512 case 6: 1513 return ("Intel Celeron(r)"); 1514 case 5: 1515 case 7: 1516 celeron = xeon = 0; 1517 cp = &cpi->cpi_std[2]; /* cache info */ 1518 1519 for (i = 1; i < 4; i++) { 1520 uint_t tmp; 1521 1522 tmp = (cp->cp_eax >> (8 * i)) & 0xff; 1523 if (tmp == 0x40) 1524 celeron++; 1525 if (tmp >= 0x44 && tmp <= 0x45) 1526 xeon++; 1527 } 1528 1529 for (i = 0; i < 2; i++) { 1530 uint_t tmp; 1531 1532 tmp = (cp->cp_ebx >> (8 * i)) & 0xff; 1533 if (tmp == 0x40) 1534 celeron++; 1535 else if (tmp >= 0x44 && tmp <= 0x45) 1536 xeon++; 1537 } 1538 1539 for (i = 0; i < 4; i++) { 1540 uint_t tmp; 1541 1542 tmp = (cp->cp_ecx >> (8 * i)) & 0xff; 1543 if (tmp == 0x40) 1544 celeron++; 1545 else if (tmp >= 0x44 && tmp <= 0x45) 1546 xeon++; 1547 } 1548 1549 for (i = 0; i < 4; i++) { 1550 uint_t tmp; 1551 1552 tmp = (cp->cp_edx >> (8 * i)) & 0xff; 1553 if (tmp == 0x40) 1554 celeron++; 1555 else if (tmp >= 0x44 && tmp <= 0x45) 1556 xeon++; 1557 } 1558 1559 if (celeron) 1560 return ("Intel Celeron(r)"); 1561 if (xeon) 1562 return (cpi->cpi_model == 5 ? 1563 "Intel Pentium(r) II Xeon(tm)" : 1564 "Intel Pentium(r) III Xeon(tm)"); 1565 return (cpi->cpi_model == 5 ? 1566 "Intel Pentium(r) II or Pentium(r) II Xeon(tm)" : 1567 "Intel Pentium(r) III or Pentium(r) III Xeon(tm)"); 1568 default: 1569 break; 1570 } 1571 default: 1572 break; 1573 } 1574 1575 /* BrandID is present if the field is nonzero */ 1576 if (cpi->cpi_brandid != 0) { 1577 static const struct { 1578 uint_t bt_bid; 1579 const char *bt_str; 1580 } brand_tbl[] = { 1581 { 0x1, "Intel(r) Celeron(r)" }, 1582 { 0x2, "Intel(r) Pentium(r) III" }, 1583 { 0x3, "Intel(r) Pentium(r) III Xeon(tm)" }, 1584 { 0x4, "Intel(r) Pentium(r) III" }, 1585 { 0x6, "Mobile Intel(r) Pentium(r) III" }, 1586 { 0x7, "Mobile Intel(r) Celeron(r)" }, 1587 { 0x8, "Intel(r) Pentium(r) 4" }, 1588 { 0x9, "Intel(r) Pentium(r) 4" }, 1589 { 0xa, "Intel(r) Celeron(r)" }, 1590 { 0xb, "Intel(r) Xeon(tm)" }, 1591 { 0xc, "Intel(r) Xeon(tm) MP" }, 1592 { 0xe, "Mobile Intel(r) Pentium(r) 4" }, 1593 { 0xf, "Mobile Intel(r) Celeron(r)" }, 1594 { 0x11, "Mobile Genuine Intel(r)" }, 1595 { 0x12, "Intel(r) Celeron(r) M" }, 1596 { 0x13, "Mobile Intel(r) Celeron(r)" }, 1597 { 0x14, "Intel(r) Celeron(r)" }, 1598 { 0x15, "Mobile Genuine Intel(r)" }, 1599 { 0x16, "Intel(r) Pentium(r) M" }, 1600 { 0x17, "Mobile Intel(r) Celeron(r)" } 1601 }; 1602 uint_t btblmax = sizeof (brand_tbl) / sizeof (brand_tbl[0]); 1603 uint_t sgn; 1604 1605 sgn = (cpi->cpi_family << 8) | 1606 (cpi->cpi_model << 4) | cpi->cpi_step; 1607 1608 for (i = 0; i < btblmax; i++) 1609 if (brand_tbl[i].bt_bid == cpi->cpi_brandid) 1610 break; 1611 if (i < btblmax) { 1612 if (sgn == 0x6b1 && cpi->cpi_brandid == 3) 1613 return ("Intel(r) Celeron(r)"); 1614 if (sgn < 0xf13 && cpi->cpi_brandid == 0xb) 1615 return ("Intel(r) Xeon(tm) MP"); 1616 if (sgn < 0xf13 && cpi->cpi_brandid == 0xe) 1617 return ("Intel(r) Xeon(tm)"); 1618 return (brand_tbl[i].bt_str); 1619 } 1620 } 1621 1622 return (NULL); 1623 } 1624 1625 static const char * 1626 amd_cpubrand(const struct cpuid_info *cpi) 1627 { 1628 if ((x86_feature & X86_CPUID) == 0 || 1629 cpi->cpi_maxeax < 1 || cpi->cpi_family < 5) 1630 return ("i486 compatible"); 1631 1632 switch (cpi->cpi_family) { 1633 case 5: 1634 switch (cpi->cpi_model) { 1635 case 0: 1636 case 1: 1637 case 2: 1638 case 3: 1639 case 4: 1640 case 5: 1641 return ("AMD-K5(r)"); 1642 case 6: 1643 case 7: 1644 return ("AMD-K6(r)"); 1645 case 8: 1646 return ("AMD-K6(r)-2"); 1647 case 9: 1648 return ("AMD-K6(r)-III"); 1649 default: 1650 return ("AMD (family 5)"); 1651 } 1652 case 6: 1653 switch (cpi->cpi_model) { 1654 case 1: 1655 return ("AMD-K7(tm)"); 1656 case 0: 1657 case 2: 1658 case 4: 1659 return ("AMD Athlon(tm)"); 1660 case 3: 1661 case 7: 1662 return ("AMD Duron(tm)"); 1663 case 6: 1664 case 8: 1665 case 10: 1666 /* 1667 * Use the L2 cache size to distinguish 1668 */ 1669 return ((cpi->cpi_extd[6].cp_ecx >> 16) >= 256 ? 1670 "AMD Athlon(tm)" : "AMD Duron(tm)"); 1671 default: 1672 return ("AMD (family 6)"); 1673 } 1674 default: 1675 break; 1676 } 1677 1678 if (cpi->cpi_family == 0xf && cpi->cpi_model == 5 && 1679 cpi->cpi_brandid != 0) { 1680 switch (BITX(cpi->cpi_brandid, 7, 5)) { 1681 case 3: 1682 return ("AMD Opteron(tm) UP 1xx"); 1683 case 4: 1684 return ("AMD Opteron(tm) DP 2xx"); 1685 case 5: 1686 return ("AMD Opteron(tm) MP 8xx"); 1687 default: 1688 return ("AMD Opteron(tm)"); 1689 } 1690 } 1691 1692 return (NULL); 1693 } 1694 1695 static const char * 1696 cyrix_cpubrand(struct cpuid_info *cpi, uint_t type) 1697 { 1698 if ((x86_feature & X86_CPUID) == 0 || 1699 cpi->cpi_maxeax < 1 || cpi->cpi_family < 5 || 1700 type == X86_TYPE_CYRIX_486) 1701 return ("i486 compatible"); 1702 1703 switch (type) { 1704 case X86_TYPE_CYRIX_6x86: 1705 return ("Cyrix 6x86"); 1706 case X86_TYPE_CYRIX_6x86L: 1707 return ("Cyrix 6x86L"); 1708 case X86_TYPE_CYRIX_6x86MX: 1709 return ("Cyrix 6x86MX"); 1710 case X86_TYPE_CYRIX_GXm: 1711 return ("Cyrix GXm"); 1712 case X86_TYPE_CYRIX_MediaGX: 1713 return ("Cyrix MediaGX"); 1714 case X86_TYPE_CYRIX_MII: 1715 return ("Cyrix M2"); 1716 case X86_TYPE_VIA_CYRIX_III: 1717 return ("VIA Cyrix M3"); 1718 default: 1719 /* 1720 * Have another wild guess .. 1721 */ 1722 if (cpi->cpi_family == 4 && cpi->cpi_model == 9) 1723 return ("Cyrix 5x86"); 1724 else if (cpi->cpi_family == 5) { 1725 switch (cpi->cpi_model) { 1726 case 2: 1727 return ("Cyrix 6x86"); /* Cyrix M1 */ 1728 case 4: 1729 return ("Cyrix MediaGX"); 1730 default: 1731 break; 1732 } 1733 } else if (cpi->cpi_family == 6) { 1734 switch (cpi->cpi_model) { 1735 case 0: 1736 return ("Cyrix 6x86MX"); /* Cyrix M2? */ 1737 case 5: 1738 case 6: 1739 case 7: 1740 case 8: 1741 case 9: 1742 return ("VIA C3"); 1743 default: 1744 break; 1745 } 1746 } 1747 break; 1748 } 1749 return (NULL); 1750 } 1751 1752 /* 1753 * This only gets called in the case that the CPU extended 1754 * feature brand string (0x80000002, 0x80000003, 0x80000004) 1755 * aren't available, or contain null bytes for some reason. 1756 */ 1757 static void 1758 fabricate_brandstr(struct cpuid_info *cpi) 1759 { 1760 const char *brand = NULL; 1761 1762 switch (cpi->cpi_vendor) { 1763 case X86_VENDOR_Intel: 1764 brand = intel_cpubrand(cpi); 1765 break; 1766 case X86_VENDOR_AMD: 1767 brand = amd_cpubrand(cpi); 1768 break; 1769 case X86_VENDOR_Cyrix: 1770 brand = cyrix_cpubrand(cpi, x86_type); 1771 break; 1772 case X86_VENDOR_NexGen: 1773 if (cpi->cpi_family == 5 && cpi->cpi_model == 0) 1774 brand = "NexGen Nx586"; 1775 break; 1776 case X86_VENDOR_Centaur: 1777 if (cpi->cpi_family == 5) 1778 switch (cpi->cpi_model) { 1779 case 4: 1780 brand = "Centaur C6"; 1781 break; 1782 case 8: 1783 brand = "Centaur C2"; 1784 break; 1785 case 9: 1786 brand = "Centaur C3"; 1787 break; 1788 default: 1789 break; 1790 } 1791 break; 1792 case X86_VENDOR_Rise: 1793 if (cpi->cpi_family == 5 && 1794 (cpi->cpi_model == 0 || cpi->cpi_model == 2)) 1795 brand = "Rise mP6"; 1796 break; 1797 case X86_VENDOR_SiS: 1798 if (cpi->cpi_family == 5 && cpi->cpi_model == 0) 1799 brand = "SiS 55x"; 1800 break; 1801 case X86_VENDOR_TM: 1802 if (cpi->cpi_family == 5 && cpi->cpi_model == 4) 1803 brand = "Transmeta Crusoe TM3x00 or TM5x00"; 1804 break; 1805 case X86_VENDOR_NSC: 1806 case X86_VENDOR_UMC: 1807 default: 1808 break; 1809 } 1810 if (brand) { 1811 (void) strcpy((char *)cpi->cpi_brandstr, brand); 1812 return; 1813 } 1814 1815 /* 1816 * If all else fails ... 1817 */ 1818 (void) snprintf(cpi->cpi_brandstr, sizeof (cpi->cpi_brandstr), 1819 "%s %d.%d.%d", cpi->cpi_vendorstr, cpi->cpi_family, 1820 cpi->cpi_model, cpi->cpi_step); 1821 } 1822 1823 /* 1824 * This routine is called just after kernel memory allocation 1825 * becomes available on cpu0, and as part of mp_startup() on 1826 * the other cpus. 1827 * 1828 * Fixup the brand string, and collect any information from cpuid 1829 * that requires dynamicically allocated storage to represent. 1830 */ 1831 /*ARGSUSED*/ 1832 void 1833 cpuid_pass3(cpu_t *cpu) 1834 { 1835 int i, max, shft, level, size; 1836 struct cpuid_regs regs; 1837 struct cpuid_regs *cp; 1838 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 1839 1840 ASSERT(cpi->cpi_pass == 2); 1841 1842 /* 1843 * Function 4: Deterministic cache parameters 1844 * 1845 * Take this opportunity to detect the number of threads 1846 * sharing the last level cache, and construct a corresponding 1847 * cache id. The respective cpuid_info members are initialized 1848 * to the default case of "no last level cache sharing". 1849 */ 1850 cpi->cpi_ncpu_shr_last_cache = 1; 1851 cpi->cpi_last_lvl_cacheid = cpu->cpu_id; 1852 1853 if (cpi->cpi_maxeax >= 4 && cpi->cpi_vendor == X86_VENDOR_Intel) { 1854 1855 /* 1856 * Find the # of elements (size) returned by fn 4, and along 1857 * the way detect last level cache sharing details. 1858 */ 1859 bzero(®s, sizeof (regs)); 1860 cp = ®s; 1861 for (i = 0, max = 0; i < CPI_FN4_ECX_MAX; i++) { 1862 cp->cp_eax = 4; 1863 cp->cp_ecx = i; 1864 1865 (void) __cpuid_insn(cp); 1866 1867 if (CPI_CACHE_TYPE(cp) == 0) 1868 break; 1869 level = CPI_CACHE_LVL(cp); 1870 if (level > max) { 1871 max = level; 1872 cpi->cpi_ncpu_shr_last_cache = 1873 CPI_NTHR_SHR_CACHE(cp) + 1; 1874 } 1875 } 1876 cpi->cpi_std_4_size = size = i; 1877 1878 /* 1879 * Allocate the cpi_std_4 array. The first element 1880 * references the regs for fn 4, %ecx == 0, which 1881 * cpuid_pass2() stashed in cpi->cpi_std[4]. 1882 */ 1883 if (size > 0) { 1884 cpi->cpi_std_4 = 1885 kmem_alloc(size * sizeof (cp), KM_SLEEP); 1886 cpi->cpi_std_4[0] = &cpi->cpi_std[4]; 1887 1888 /* 1889 * Allocate storage to hold the additional regs 1890 * for function 4, %ecx == 1 .. cpi_std_4_size. 1891 * 1892 * The regs for fn 4, %ecx == 0 has already 1893 * been allocated as indicated above. 1894 */ 1895 for (i = 1; i < size; i++) { 1896 cp = cpi->cpi_std_4[i] = 1897 kmem_zalloc(sizeof (regs), KM_SLEEP); 1898 cp->cp_eax = 4; 1899 cp->cp_ecx = i; 1900 1901 (void) __cpuid_insn(cp); 1902 } 1903 } 1904 /* 1905 * Determine the number of bits needed to represent 1906 * the number of CPUs sharing the last level cache. 1907 * 1908 * Shift off that number of bits from the APIC id to 1909 * derive the cache id. 1910 */ 1911 shft = 0; 1912 for (i = 1; i < cpi->cpi_ncpu_shr_last_cache; i <<= 1) 1913 shft++; 1914 cpi->cpi_last_lvl_cacheid = cpi->cpi_apicid >> shft; 1915 } 1916 1917 /* 1918 * Now fixup the brand string 1919 */ 1920 if ((cpi->cpi_xmaxeax & 0x80000000) == 0) { 1921 fabricate_brandstr(cpi); 1922 } else { 1923 1924 /* 1925 * If we successfully extracted a brand string from the cpuid 1926 * instruction, clean it up by removing leading spaces and 1927 * similar junk. 1928 */ 1929 if (cpi->cpi_brandstr[0]) { 1930 size_t maxlen = sizeof (cpi->cpi_brandstr); 1931 char *src, *dst; 1932 1933 dst = src = (char *)cpi->cpi_brandstr; 1934 src[maxlen - 1] = '\0'; 1935 /* 1936 * strip leading spaces 1937 */ 1938 while (*src == ' ') 1939 src++; 1940 /* 1941 * Remove any 'Genuine' or "Authentic" prefixes 1942 */ 1943 if (strncmp(src, "Genuine ", 8) == 0) 1944 src += 8; 1945 if (strncmp(src, "Authentic ", 10) == 0) 1946 src += 10; 1947 1948 /* 1949 * Now do an in-place copy. 1950 * Map (R) to (r) and (TM) to (tm). 1951 * The era of teletypes is long gone, and there's 1952 * -really- no need to shout. 1953 */ 1954 while (*src != '\0') { 1955 if (src[0] == '(') { 1956 if (strncmp(src + 1, "R)", 2) == 0) { 1957 (void) strncpy(dst, "(r)", 3); 1958 src += 3; 1959 dst += 3; 1960 continue; 1961 } 1962 if (strncmp(src + 1, "TM)", 3) == 0) { 1963 (void) strncpy(dst, "(tm)", 4); 1964 src += 4; 1965 dst += 4; 1966 continue; 1967 } 1968 } 1969 *dst++ = *src++; 1970 } 1971 *dst = '\0'; 1972 1973 /* 1974 * Finally, remove any trailing spaces 1975 */ 1976 while (--dst > cpi->cpi_brandstr) 1977 if (*dst == ' ') 1978 *dst = '\0'; 1979 else 1980 break; 1981 } else 1982 fabricate_brandstr(cpi); 1983 } 1984 cpi->cpi_pass = 3; 1985 } 1986 1987 /* 1988 * This routine is called out of bind_hwcap() much later in the life 1989 * of the kernel (post_startup()). The job of this routine is to resolve 1990 * the hardware feature support and kernel support for those features into 1991 * what we're actually going to tell applications via the aux vector. 1992 */ 1993 uint_t 1994 cpuid_pass4(cpu_t *cpu) 1995 { 1996 struct cpuid_info *cpi; 1997 uint_t hwcap_flags = 0; 1998 1999 if (cpu == NULL) 2000 cpu = CPU; 2001 cpi = cpu->cpu_m.mcpu_cpi; 2002 2003 ASSERT(cpi->cpi_pass == 3); 2004 2005 if (cpi->cpi_maxeax >= 1) { 2006 uint32_t *edx = &cpi->cpi_support[STD_EDX_FEATURES]; 2007 uint32_t *ecx = &cpi->cpi_support[STD_ECX_FEATURES]; 2008 2009 *edx = CPI_FEATURES_EDX(cpi); 2010 *ecx = CPI_FEATURES_ECX(cpi); 2011 2012 /* 2013 * [these require explicit kernel support] 2014 */ 2015 if ((x86_feature & X86_SEP) == 0) 2016 *edx &= ~CPUID_INTC_EDX_SEP; 2017 2018 if ((x86_feature & X86_SSE) == 0) 2019 *edx &= ~(CPUID_INTC_EDX_FXSR|CPUID_INTC_EDX_SSE); 2020 if ((x86_feature & X86_SSE2) == 0) 2021 *edx &= ~CPUID_INTC_EDX_SSE2; 2022 2023 if ((x86_feature & X86_HTT) == 0) 2024 *edx &= ~CPUID_INTC_EDX_HTT; 2025 2026 if ((x86_feature & X86_SSE3) == 0) 2027 *ecx &= ~CPUID_INTC_ECX_SSE3; 2028 2029 if (cpi->cpi_vendor == X86_VENDOR_Intel) { 2030 if ((x86_feature & X86_SSSE3) == 0) 2031 *ecx &= ~CPUID_INTC_ECX_SSSE3; 2032 if ((x86_feature & X86_SSE4_1) == 0) 2033 *ecx &= ~CPUID_INTC_ECX_SSE4_1; 2034 if ((x86_feature & X86_SSE4_2) == 0) 2035 *ecx &= ~CPUID_INTC_ECX_SSE4_2; 2036 } 2037 2038 /* 2039 * [no explicit support required beyond x87 fp context] 2040 */ 2041 if (!fpu_exists) 2042 *edx &= ~(CPUID_INTC_EDX_FPU | CPUID_INTC_EDX_MMX); 2043 2044 /* 2045 * Now map the supported feature vector to things that we 2046 * think userland will care about. 2047 */ 2048 if (*edx & CPUID_INTC_EDX_SEP) 2049 hwcap_flags |= AV_386_SEP; 2050 if (*edx & CPUID_INTC_EDX_SSE) 2051 hwcap_flags |= AV_386_FXSR | AV_386_SSE; 2052 if (*edx & CPUID_INTC_EDX_SSE2) 2053 hwcap_flags |= AV_386_SSE2; 2054 if (*ecx & CPUID_INTC_ECX_SSE3) 2055 hwcap_flags |= AV_386_SSE3; 2056 if (cpi->cpi_vendor == X86_VENDOR_Intel) { 2057 if (*ecx & CPUID_INTC_ECX_SSSE3) 2058 hwcap_flags |= AV_386_SSSE3; 2059 if (*ecx & CPUID_INTC_ECX_SSE4_1) 2060 hwcap_flags |= AV_386_SSE4_1; 2061 if (*ecx & CPUID_INTC_ECX_SSE4_2) 2062 hwcap_flags |= AV_386_SSE4_2; 2063 } 2064 if (*ecx & CPUID_INTC_ECX_POPCNT) 2065 hwcap_flags |= AV_386_POPCNT; 2066 if (*edx & CPUID_INTC_EDX_FPU) 2067 hwcap_flags |= AV_386_FPU; 2068 if (*edx & CPUID_INTC_EDX_MMX) 2069 hwcap_flags |= AV_386_MMX; 2070 2071 if (*edx & CPUID_INTC_EDX_TSC) 2072 hwcap_flags |= AV_386_TSC; 2073 if (*edx & CPUID_INTC_EDX_CX8) 2074 hwcap_flags |= AV_386_CX8; 2075 if (*edx & CPUID_INTC_EDX_CMOV) 2076 hwcap_flags |= AV_386_CMOV; 2077 if (*ecx & CPUID_INTC_ECX_MON) 2078 hwcap_flags |= AV_386_MON; 2079 if (*ecx & CPUID_INTC_ECX_CX16) 2080 hwcap_flags |= AV_386_CX16; 2081 } 2082 2083 if (x86_feature & X86_HTT) 2084 hwcap_flags |= AV_386_PAUSE; 2085 2086 if (cpi->cpi_xmaxeax < 0x80000001) 2087 goto pass4_done; 2088 2089 switch (cpi->cpi_vendor) { 2090 struct cpuid_regs cp; 2091 uint32_t *edx, *ecx; 2092 2093 case X86_VENDOR_Intel: 2094 /* 2095 * Seems like Intel duplicated what we necessary 2096 * here to make the initial crop of 64-bit OS's work. 2097 * Hopefully, those are the only "extended" bits 2098 * they'll add. 2099 */ 2100 /*FALLTHROUGH*/ 2101 2102 case X86_VENDOR_AMD: 2103 edx = &cpi->cpi_support[AMD_EDX_FEATURES]; 2104 ecx = &cpi->cpi_support[AMD_ECX_FEATURES]; 2105 2106 *edx = CPI_FEATURES_XTD_EDX(cpi); 2107 *ecx = CPI_FEATURES_XTD_ECX(cpi); 2108 2109 /* 2110 * [these features require explicit kernel support] 2111 */ 2112 switch (cpi->cpi_vendor) { 2113 case X86_VENDOR_Intel: 2114 if ((x86_feature & X86_TSCP) == 0) 2115 *edx &= ~CPUID_AMD_EDX_TSCP; 2116 break; 2117 2118 case X86_VENDOR_AMD: 2119 if ((x86_feature & X86_TSCP) == 0) 2120 *edx &= ~CPUID_AMD_EDX_TSCP; 2121 if ((x86_feature & X86_SSE4A) == 0) 2122 *ecx &= ~CPUID_AMD_ECX_SSE4A; 2123 break; 2124 2125 default: 2126 break; 2127 } 2128 2129 /* 2130 * [no explicit support required beyond 2131 * x87 fp context and exception handlers] 2132 */ 2133 if (!fpu_exists) 2134 *edx &= ~(CPUID_AMD_EDX_MMXamd | 2135 CPUID_AMD_EDX_3DNow | CPUID_AMD_EDX_3DNowx); 2136 2137 if ((x86_feature & X86_NX) == 0) 2138 *edx &= ~CPUID_AMD_EDX_NX; 2139 #if !defined(__amd64) 2140 *edx &= ~CPUID_AMD_EDX_LM; 2141 #endif 2142 /* 2143 * Now map the supported feature vector to 2144 * things that we think userland will care about. 2145 */ 2146 #if defined(__amd64) 2147 if (*edx & CPUID_AMD_EDX_SYSC) 2148 hwcap_flags |= AV_386_AMD_SYSC; 2149 #endif 2150 if (*edx & CPUID_AMD_EDX_MMXamd) 2151 hwcap_flags |= AV_386_AMD_MMX; 2152 if (*edx & CPUID_AMD_EDX_3DNow) 2153 hwcap_flags |= AV_386_AMD_3DNow; 2154 if (*edx & CPUID_AMD_EDX_3DNowx) 2155 hwcap_flags |= AV_386_AMD_3DNowx; 2156 2157 switch (cpi->cpi_vendor) { 2158 case X86_VENDOR_AMD: 2159 if (*edx & CPUID_AMD_EDX_TSCP) 2160 hwcap_flags |= AV_386_TSCP; 2161 if (*ecx & CPUID_AMD_ECX_AHF64) 2162 hwcap_flags |= AV_386_AHF; 2163 if (*ecx & CPUID_AMD_ECX_SSE4A) 2164 hwcap_flags |= AV_386_AMD_SSE4A; 2165 if (*ecx & CPUID_AMD_ECX_LZCNT) 2166 hwcap_flags |= AV_386_AMD_LZCNT; 2167 break; 2168 2169 case X86_VENDOR_Intel: 2170 if (*edx & CPUID_AMD_EDX_TSCP) 2171 hwcap_flags |= AV_386_TSCP; 2172 /* 2173 * Aarrgh. 2174 * Intel uses a different bit in the same word. 2175 */ 2176 if (*ecx & CPUID_INTC_ECX_AHF64) 2177 hwcap_flags |= AV_386_AHF; 2178 break; 2179 2180 default: 2181 break; 2182 } 2183 break; 2184 2185 case X86_VENDOR_TM: 2186 cp.cp_eax = 0x80860001; 2187 (void) __cpuid_insn(&cp); 2188 cpi->cpi_support[TM_EDX_FEATURES] = cp.cp_edx; 2189 break; 2190 2191 default: 2192 break; 2193 } 2194 2195 pass4_done: 2196 cpi->cpi_pass = 4; 2197 return (hwcap_flags); 2198 } 2199 2200 2201 /* 2202 * Simulate the cpuid instruction using the data we previously 2203 * captured about this CPU. We try our best to return the truth 2204 * about the hardware, independently of kernel support. 2205 */ 2206 uint32_t 2207 cpuid_insn(cpu_t *cpu, struct cpuid_regs *cp) 2208 { 2209 struct cpuid_info *cpi; 2210 struct cpuid_regs *xcp; 2211 2212 if (cpu == NULL) 2213 cpu = CPU; 2214 cpi = cpu->cpu_m.mcpu_cpi; 2215 2216 ASSERT(cpuid_checkpass(cpu, 3)); 2217 2218 /* 2219 * CPUID data is cached in two separate places: cpi_std for standard 2220 * CPUID functions, and cpi_extd for extended CPUID functions. 2221 */ 2222 if (cp->cp_eax <= cpi->cpi_maxeax && cp->cp_eax < NMAX_CPI_STD) 2223 xcp = &cpi->cpi_std[cp->cp_eax]; 2224 else if (cp->cp_eax >= 0x80000000 && cp->cp_eax <= cpi->cpi_xmaxeax && 2225 cp->cp_eax < 0x80000000 + NMAX_CPI_EXTD) 2226 xcp = &cpi->cpi_extd[cp->cp_eax - 0x80000000]; 2227 else 2228 /* 2229 * The caller is asking for data from an input parameter which 2230 * the kernel has not cached. In this case we go fetch from 2231 * the hardware and return the data directly to the user. 2232 */ 2233 return (__cpuid_insn(cp)); 2234 2235 cp->cp_eax = xcp->cp_eax; 2236 cp->cp_ebx = xcp->cp_ebx; 2237 cp->cp_ecx = xcp->cp_ecx; 2238 cp->cp_edx = xcp->cp_edx; 2239 return (cp->cp_eax); 2240 } 2241 2242 int 2243 cpuid_checkpass(cpu_t *cpu, int pass) 2244 { 2245 return (cpu != NULL && cpu->cpu_m.mcpu_cpi != NULL && 2246 cpu->cpu_m.mcpu_cpi->cpi_pass >= pass); 2247 } 2248 2249 int 2250 cpuid_getbrandstr(cpu_t *cpu, char *s, size_t n) 2251 { 2252 ASSERT(cpuid_checkpass(cpu, 3)); 2253 2254 return (snprintf(s, n, "%s", cpu->cpu_m.mcpu_cpi->cpi_brandstr)); 2255 } 2256 2257 int 2258 cpuid_is_cmt(cpu_t *cpu) 2259 { 2260 if (cpu == NULL) 2261 cpu = CPU; 2262 2263 ASSERT(cpuid_checkpass(cpu, 1)); 2264 2265 return (cpu->cpu_m.mcpu_cpi->cpi_chipid >= 0); 2266 } 2267 2268 /* 2269 * AMD and Intel both implement the 64-bit variant of the syscall 2270 * instruction (syscallq), so if there's -any- support for syscall, 2271 * cpuid currently says "yes, we support this". 2272 * 2273 * However, Intel decided to -not- implement the 32-bit variant of the 2274 * syscall instruction, so we provide a predicate to allow our caller 2275 * to test that subtlety here. 2276 * 2277 * XXPV Currently, 32-bit syscall instructions don't work via the hypervisor, 2278 * even in the case where the hardware would in fact support it. 2279 */ 2280 /*ARGSUSED*/ 2281 int 2282 cpuid_syscall32_insn(cpu_t *cpu) 2283 { 2284 ASSERT(cpuid_checkpass((cpu == NULL ? CPU : cpu), 1)); 2285 2286 #if !defined(__xpv) 2287 if (cpu == NULL) 2288 cpu = CPU; 2289 2290 /*CSTYLED*/ 2291 { 2292 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 2293 2294 if (cpi->cpi_vendor == X86_VENDOR_AMD && 2295 cpi->cpi_xmaxeax >= 0x80000001 && 2296 (CPI_FEATURES_XTD_EDX(cpi) & CPUID_AMD_EDX_SYSC)) 2297 return (1); 2298 } 2299 #endif 2300 return (0); 2301 } 2302 2303 int 2304 cpuid_getidstr(cpu_t *cpu, char *s, size_t n) 2305 { 2306 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 2307 2308 static const char fmt[] = 2309 "x86 (%s %X family %d model %d step %d clock %d MHz)"; 2310 static const char fmt_ht[] = 2311 "x86 (chipid 0x%x %s %X family %d model %d step %d clock %d MHz)"; 2312 2313 ASSERT(cpuid_checkpass(cpu, 1)); 2314 2315 if (cpuid_is_cmt(cpu)) 2316 return (snprintf(s, n, fmt_ht, cpi->cpi_chipid, 2317 cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax, 2318 cpi->cpi_family, cpi->cpi_model, 2319 cpi->cpi_step, cpu->cpu_type_info.pi_clock)); 2320 return (snprintf(s, n, fmt, 2321 cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax, 2322 cpi->cpi_family, cpi->cpi_model, 2323 cpi->cpi_step, cpu->cpu_type_info.pi_clock)); 2324 } 2325 2326 const char * 2327 cpuid_getvendorstr(cpu_t *cpu) 2328 { 2329 ASSERT(cpuid_checkpass(cpu, 1)); 2330 return ((const char *)cpu->cpu_m.mcpu_cpi->cpi_vendorstr); 2331 } 2332 2333 uint_t 2334 cpuid_getvendor(cpu_t *cpu) 2335 { 2336 ASSERT(cpuid_checkpass(cpu, 1)); 2337 return (cpu->cpu_m.mcpu_cpi->cpi_vendor); 2338 } 2339 2340 uint_t 2341 cpuid_getfamily(cpu_t *cpu) 2342 { 2343 ASSERT(cpuid_checkpass(cpu, 1)); 2344 return (cpu->cpu_m.mcpu_cpi->cpi_family); 2345 } 2346 2347 uint_t 2348 cpuid_getmodel(cpu_t *cpu) 2349 { 2350 ASSERT(cpuid_checkpass(cpu, 1)); 2351 return (cpu->cpu_m.mcpu_cpi->cpi_model); 2352 } 2353 2354 uint_t 2355 cpuid_get_ncpu_per_chip(cpu_t *cpu) 2356 { 2357 ASSERT(cpuid_checkpass(cpu, 1)); 2358 return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_per_chip); 2359 } 2360 2361 uint_t 2362 cpuid_get_ncore_per_chip(cpu_t *cpu) 2363 { 2364 ASSERT(cpuid_checkpass(cpu, 1)); 2365 return (cpu->cpu_m.mcpu_cpi->cpi_ncore_per_chip); 2366 } 2367 2368 uint_t 2369 cpuid_get_ncpu_sharing_last_cache(cpu_t *cpu) 2370 { 2371 ASSERT(cpuid_checkpass(cpu, 2)); 2372 return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_shr_last_cache); 2373 } 2374 2375 id_t 2376 cpuid_get_last_lvl_cacheid(cpu_t *cpu) 2377 { 2378 ASSERT(cpuid_checkpass(cpu, 2)); 2379 return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid); 2380 } 2381 2382 uint_t 2383 cpuid_getstep(cpu_t *cpu) 2384 { 2385 ASSERT(cpuid_checkpass(cpu, 1)); 2386 return (cpu->cpu_m.mcpu_cpi->cpi_step); 2387 } 2388 2389 uint_t 2390 cpuid_getsig(struct cpu *cpu) 2391 { 2392 ASSERT(cpuid_checkpass(cpu, 1)); 2393 return (cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_eax); 2394 } 2395 2396 uint32_t 2397 cpuid_getchiprev(struct cpu *cpu) 2398 { 2399 ASSERT(cpuid_checkpass(cpu, 1)); 2400 return (cpu->cpu_m.mcpu_cpi->cpi_chiprev); 2401 } 2402 2403 const char * 2404 cpuid_getchiprevstr(struct cpu *cpu) 2405 { 2406 ASSERT(cpuid_checkpass(cpu, 1)); 2407 return (cpu->cpu_m.mcpu_cpi->cpi_chiprevstr); 2408 } 2409 2410 uint32_t 2411 cpuid_getsockettype(struct cpu *cpu) 2412 { 2413 ASSERT(cpuid_checkpass(cpu, 1)); 2414 return (cpu->cpu_m.mcpu_cpi->cpi_socket); 2415 } 2416 2417 int 2418 cpuid_get_chipid(cpu_t *cpu) 2419 { 2420 ASSERT(cpuid_checkpass(cpu, 1)); 2421 2422 if (cpuid_is_cmt(cpu)) 2423 return (cpu->cpu_m.mcpu_cpi->cpi_chipid); 2424 return (cpu->cpu_id); 2425 } 2426 2427 id_t 2428 cpuid_get_coreid(cpu_t *cpu) 2429 { 2430 ASSERT(cpuid_checkpass(cpu, 1)); 2431 return (cpu->cpu_m.mcpu_cpi->cpi_coreid); 2432 } 2433 2434 int 2435 cpuid_get_pkgcoreid(cpu_t *cpu) 2436 { 2437 ASSERT(cpuid_checkpass(cpu, 1)); 2438 return (cpu->cpu_m.mcpu_cpi->cpi_pkgcoreid); 2439 } 2440 2441 int 2442 cpuid_get_clogid(cpu_t *cpu) 2443 { 2444 ASSERT(cpuid_checkpass(cpu, 1)); 2445 return (cpu->cpu_m.mcpu_cpi->cpi_clogid); 2446 } 2447 2448 void 2449 cpuid_get_addrsize(cpu_t *cpu, uint_t *pabits, uint_t *vabits) 2450 { 2451 struct cpuid_info *cpi; 2452 2453 if (cpu == NULL) 2454 cpu = CPU; 2455 cpi = cpu->cpu_m.mcpu_cpi; 2456 2457 ASSERT(cpuid_checkpass(cpu, 1)); 2458 2459 if (pabits) 2460 *pabits = cpi->cpi_pabits; 2461 if (vabits) 2462 *vabits = cpi->cpi_vabits; 2463 } 2464 2465 /* 2466 * Returns the number of data TLB entries for a corresponding 2467 * pagesize. If it can't be computed, or isn't known, the 2468 * routine returns zero. If you ask about an architecturally 2469 * impossible pagesize, the routine will panic (so that the 2470 * hat implementor knows that things are inconsistent.) 2471 */ 2472 uint_t 2473 cpuid_get_dtlb_nent(cpu_t *cpu, size_t pagesize) 2474 { 2475 struct cpuid_info *cpi; 2476 uint_t dtlb_nent = 0; 2477 2478 if (cpu == NULL) 2479 cpu = CPU; 2480 cpi = cpu->cpu_m.mcpu_cpi; 2481 2482 ASSERT(cpuid_checkpass(cpu, 1)); 2483 2484 /* 2485 * Check the L2 TLB info 2486 */ 2487 if (cpi->cpi_xmaxeax >= 0x80000006) { 2488 struct cpuid_regs *cp = &cpi->cpi_extd[6]; 2489 2490 switch (pagesize) { 2491 2492 case 4 * 1024: 2493 /* 2494 * All zero in the top 16 bits of the register 2495 * indicates a unified TLB. Size is in low 16 bits. 2496 */ 2497 if ((cp->cp_ebx & 0xffff0000) == 0) 2498 dtlb_nent = cp->cp_ebx & 0x0000ffff; 2499 else 2500 dtlb_nent = BITX(cp->cp_ebx, 27, 16); 2501 break; 2502 2503 case 2 * 1024 * 1024: 2504 if ((cp->cp_eax & 0xffff0000) == 0) 2505 dtlb_nent = cp->cp_eax & 0x0000ffff; 2506 else 2507 dtlb_nent = BITX(cp->cp_eax, 27, 16); 2508 break; 2509 2510 default: 2511 panic("unknown L2 pagesize"); 2512 /*NOTREACHED*/ 2513 } 2514 } 2515 2516 if (dtlb_nent != 0) 2517 return (dtlb_nent); 2518 2519 /* 2520 * No L2 TLB support for this size, try L1. 2521 */ 2522 if (cpi->cpi_xmaxeax >= 0x80000005) { 2523 struct cpuid_regs *cp = &cpi->cpi_extd[5]; 2524 2525 switch (pagesize) { 2526 case 4 * 1024: 2527 dtlb_nent = BITX(cp->cp_ebx, 23, 16); 2528 break; 2529 case 2 * 1024 * 1024: 2530 dtlb_nent = BITX(cp->cp_eax, 23, 16); 2531 break; 2532 default: 2533 panic("unknown L1 d-TLB pagesize"); 2534 /*NOTREACHED*/ 2535 } 2536 } 2537 2538 return (dtlb_nent); 2539 } 2540 2541 /* 2542 * Return 0 if the erratum is not present or not applicable, positive 2543 * if it is, and negative if the status of the erratum is unknown. 2544 * 2545 * See "Revision Guide for AMD Athlon(tm) 64 and AMD Opteron(tm) 2546 * Processors" #25759, Rev 3.57, August 2005 2547 */ 2548 int 2549 cpuid_opteron_erratum(cpu_t *cpu, uint_t erratum) 2550 { 2551 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 2552 uint_t eax; 2553 2554 /* 2555 * Bail out if this CPU isn't an AMD CPU, or if it's 2556 * a legacy (32-bit) AMD CPU. 2557 */ 2558 if (cpi->cpi_vendor != X86_VENDOR_AMD || 2559 cpi->cpi_family == 4 || cpi->cpi_family == 5 || 2560 cpi->cpi_family == 6) 2561 2562 return (0); 2563 2564 eax = cpi->cpi_std[1].cp_eax; 2565 2566 #define SH_B0(eax) (eax == 0xf40 || eax == 0xf50) 2567 #define SH_B3(eax) (eax == 0xf51) 2568 #define B(eax) (SH_B0(eax) || SH_B3(eax)) 2569 2570 #define SH_C0(eax) (eax == 0xf48 || eax == 0xf58) 2571 2572 #define SH_CG(eax) (eax == 0xf4a || eax == 0xf5a || eax == 0xf7a) 2573 #define DH_CG(eax) (eax == 0xfc0 || eax == 0xfe0 || eax == 0xff0) 2574 #define CH_CG(eax) (eax == 0xf82 || eax == 0xfb2) 2575 #define CG(eax) (SH_CG(eax) || DH_CG(eax) || CH_CG(eax)) 2576 2577 #define SH_D0(eax) (eax == 0x10f40 || eax == 0x10f50 || eax == 0x10f70) 2578 #define DH_D0(eax) (eax == 0x10fc0 || eax == 0x10ff0) 2579 #define CH_D0(eax) (eax == 0x10f80 || eax == 0x10fb0) 2580 #define D0(eax) (SH_D0(eax) || DH_D0(eax) || CH_D0(eax)) 2581 2582 #define SH_E0(eax) (eax == 0x20f50 || eax == 0x20f40 || eax == 0x20f70) 2583 #define JH_E1(eax) (eax == 0x20f10) /* JH8_E0 had 0x20f30 */ 2584 #define DH_E3(eax) (eax == 0x20fc0 || eax == 0x20ff0) 2585 #define SH_E4(eax) (eax == 0x20f51 || eax == 0x20f71) 2586 #define BH_E4(eax) (eax == 0x20fb1) 2587 #define SH_E5(eax) (eax == 0x20f42) 2588 #define DH_E6(eax) (eax == 0x20ff2 || eax == 0x20fc2) 2589 #define JH_E6(eax) (eax == 0x20f12 || eax == 0x20f32) 2590 #define EX(eax) (SH_E0(eax) || JH_E1(eax) || DH_E3(eax) || \ 2591 SH_E4(eax) || BH_E4(eax) || SH_E5(eax) || \ 2592 DH_E6(eax) || JH_E6(eax)) 2593 2594 #define DR_AX(eax) (eax == 0x100f00 || eax == 0x100f01 || eax == 0x100f02) 2595 #define DR_B0(eax) (eax == 0x100f20) 2596 #define DR_B1(eax) (eax == 0x100f21) 2597 #define DR_BA(eax) (eax == 0x100f2a) 2598 #define DR_B2(eax) (eax == 0x100f22) 2599 #define DR_B3(eax) (eax == 0x100f23) 2600 #define RB_C0(eax) (eax == 0x100f40) 2601 2602 switch (erratum) { 2603 case 1: 2604 return (cpi->cpi_family < 0x10); 2605 case 51: /* what does the asterisk mean? */ 2606 return (B(eax) || SH_C0(eax) || CG(eax)); 2607 case 52: 2608 return (B(eax)); 2609 case 57: 2610 return (cpi->cpi_family <= 0x11); 2611 case 58: 2612 return (B(eax)); 2613 case 60: 2614 return (cpi->cpi_family <= 0x11); 2615 case 61: 2616 case 62: 2617 case 63: 2618 case 64: 2619 case 65: 2620 case 66: 2621 case 68: 2622 case 69: 2623 case 70: 2624 case 71: 2625 return (B(eax)); 2626 case 72: 2627 return (SH_B0(eax)); 2628 case 74: 2629 return (B(eax)); 2630 case 75: 2631 return (cpi->cpi_family < 0x10); 2632 case 76: 2633 return (B(eax)); 2634 case 77: 2635 return (cpi->cpi_family <= 0x11); 2636 case 78: 2637 return (B(eax) || SH_C0(eax)); 2638 case 79: 2639 return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax)); 2640 case 80: 2641 case 81: 2642 case 82: 2643 return (B(eax)); 2644 case 83: 2645 return (B(eax) || SH_C0(eax) || CG(eax)); 2646 case 85: 2647 return (cpi->cpi_family < 0x10); 2648 case 86: 2649 return (SH_C0(eax) || CG(eax)); 2650 case 88: 2651 #if !defined(__amd64) 2652 return (0); 2653 #else 2654 return (B(eax) || SH_C0(eax)); 2655 #endif 2656 case 89: 2657 return (cpi->cpi_family < 0x10); 2658 case 90: 2659 return (B(eax) || SH_C0(eax) || CG(eax)); 2660 case 91: 2661 case 92: 2662 return (B(eax) || SH_C0(eax)); 2663 case 93: 2664 return (SH_C0(eax)); 2665 case 94: 2666 return (B(eax) || SH_C0(eax) || CG(eax)); 2667 case 95: 2668 #if !defined(__amd64) 2669 return (0); 2670 #else 2671 return (B(eax) || SH_C0(eax)); 2672 #endif 2673 case 96: 2674 return (B(eax) || SH_C0(eax) || CG(eax)); 2675 case 97: 2676 case 98: 2677 return (SH_C0(eax) || CG(eax)); 2678 case 99: 2679 return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax)); 2680 case 100: 2681 return (B(eax) || SH_C0(eax)); 2682 case 101: 2683 case 103: 2684 return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax)); 2685 case 104: 2686 return (SH_C0(eax) || CG(eax) || D0(eax)); 2687 case 105: 2688 case 106: 2689 case 107: 2690 return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax)); 2691 case 108: 2692 return (DH_CG(eax)); 2693 case 109: 2694 return (SH_C0(eax) || CG(eax) || D0(eax)); 2695 case 110: 2696 return (D0(eax) || EX(eax)); 2697 case 111: 2698 return (CG(eax)); 2699 case 112: 2700 return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax)); 2701 case 113: 2702 return (eax == 0x20fc0); 2703 case 114: 2704 return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax)); 2705 case 115: 2706 return (SH_E0(eax) || JH_E1(eax)); 2707 case 116: 2708 return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax)); 2709 case 117: 2710 return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax)); 2711 case 118: 2712 return (SH_E0(eax) || JH_E1(eax) || SH_E4(eax) || BH_E4(eax) || 2713 JH_E6(eax)); 2714 case 121: 2715 return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax)); 2716 case 122: 2717 return (cpi->cpi_family < 0x10 || cpi->cpi_family == 0x11); 2718 case 123: 2719 return (JH_E1(eax) || BH_E4(eax) || JH_E6(eax)); 2720 case 131: 2721 return (cpi->cpi_family < 0x10); 2722 case 6336786: 2723 /* 2724 * Test for AdvPowerMgmtInfo.TscPStateInvariant 2725 * if this is a K8 family or newer processor 2726 */ 2727 if (CPI_FAMILY(cpi) == 0xf) { 2728 struct cpuid_regs regs; 2729 regs.cp_eax = 0x80000007; 2730 (void) __cpuid_insn(®s); 2731 return (!(regs.cp_edx & 0x100)); 2732 } 2733 return (0); 2734 case 6323525: 2735 return (((((eax >> 12) & 0xff00) + (eax & 0xf00)) | 2736 (((eax >> 4) & 0xf) | ((eax >> 12) & 0xf0))) < 0xf40); 2737 2738 case 6671130: 2739 /* 2740 * check for processors (pre-Shanghai) that do not provide 2741 * optimal management of 1gb ptes in its tlb. 2742 */ 2743 return (cpi->cpi_family == 0x10 && cpi->cpi_model < 4); 2744 2745 case 298: 2746 return (DR_AX(eax) || DR_B0(eax) || DR_B1(eax) || DR_BA(eax) || 2747 DR_B2(eax) || RB_C0(eax)); 2748 2749 default: 2750 return (-1); 2751 2752 } 2753 } 2754 2755 /* 2756 * Determine if specified erratum is present via OSVW (OS Visible Workaround). 2757 * Return 1 if erratum is present, 0 if not present and -1 if indeterminate. 2758 */ 2759 int 2760 osvw_opteron_erratum(cpu_t *cpu, uint_t erratum) 2761 { 2762 struct cpuid_info *cpi; 2763 uint_t osvwid; 2764 static int osvwfeature = -1; 2765 uint64_t osvwlength; 2766 2767 2768 cpi = cpu->cpu_m.mcpu_cpi; 2769 2770 /* confirm OSVW supported */ 2771 if (osvwfeature == -1) { 2772 osvwfeature = cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW; 2773 } else { 2774 /* assert that osvw feature setting is consistent on all cpus */ 2775 ASSERT(osvwfeature == 2776 (cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW)); 2777 } 2778 if (!osvwfeature) 2779 return (-1); 2780 2781 osvwlength = rdmsr(MSR_AMD_OSVW_ID_LEN) & OSVW_ID_LEN_MASK; 2782 2783 switch (erratum) { 2784 case 298: /* osvwid is 0 */ 2785 osvwid = 0; 2786 if (osvwlength <= (uint64_t)osvwid) { 2787 /* osvwid 0 is unknown */ 2788 return (-1); 2789 } 2790 2791 /* 2792 * Check the OSVW STATUS MSR to determine the state 2793 * of the erratum where: 2794 * 0 - fixed by HW 2795 * 1 - BIOS has applied the workaround when BIOS 2796 * workaround is available. (Or for other errata, 2797 * OS workaround is required.) 2798 * For a value of 1, caller will confirm that the 2799 * erratum 298 workaround has indeed been applied by BIOS. 2800 * 2801 * A 1 may be set in cpus that have a HW fix 2802 * in a mixed cpu system. Regarding erratum 298: 2803 * In a multiprocessor platform, the workaround above 2804 * should be applied to all processors regardless of 2805 * silicon revision when an affected processor is 2806 * present. 2807 */ 2808 2809 return (rdmsr(MSR_AMD_OSVW_STATUS + 2810 (osvwid / OSVW_ID_CNT_PER_MSR)) & 2811 (1ULL << (osvwid % OSVW_ID_CNT_PER_MSR))); 2812 2813 default: 2814 return (-1); 2815 } 2816 } 2817 2818 static const char assoc_str[] = "associativity"; 2819 static const char line_str[] = "line-size"; 2820 static const char size_str[] = "size"; 2821 2822 static void 2823 add_cache_prop(dev_info_t *devi, const char *label, const char *type, 2824 uint32_t val) 2825 { 2826 char buf[128]; 2827 2828 /* 2829 * ndi_prop_update_int() is used because it is desirable for 2830 * DDI_PROP_HW_DEF and DDI_PROP_DONTSLEEP to be set. 2831 */ 2832 if (snprintf(buf, sizeof (buf), "%s-%s", label, type) < sizeof (buf)) 2833 (void) ndi_prop_update_int(DDI_DEV_T_NONE, devi, buf, val); 2834 } 2835 2836 /* 2837 * Intel-style cache/tlb description 2838 * 2839 * Standard cpuid level 2 gives a randomly ordered 2840 * selection of tags that index into a table that describes 2841 * cache and tlb properties. 2842 */ 2843 2844 static const char l1_icache_str[] = "l1-icache"; 2845 static const char l1_dcache_str[] = "l1-dcache"; 2846 static const char l2_cache_str[] = "l2-cache"; 2847 static const char l3_cache_str[] = "l3-cache"; 2848 static const char itlb4k_str[] = "itlb-4K"; 2849 static const char dtlb4k_str[] = "dtlb-4K"; 2850 static const char itlb2M_str[] = "itlb-2M"; 2851 static const char itlb4M_str[] = "itlb-4M"; 2852 static const char dtlb4M_str[] = "dtlb-4M"; 2853 static const char dtlb24_str[] = "dtlb0-2M-4M"; 2854 static const char itlb424_str[] = "itlb-4K-2M-4M"; 2855 static const char itlb24_str[] = "itlb-2M-4M"; 2856 static const char dtlb44_str[] = "dtlb-4K-4M"; 2857 static const char sl1_dcache_str[] = "sectored-l1-dcache"; 2858 static const char sl2_cache_str[] = "sectored-l2-cache"; 2859 static const char itrace_str[] = "itrace-cache"; 2860 static const char sl3_cache_str[] = "sectored-l3-cache"; 2861 static const char sh_l2_tlb4k_str[] = "shared-l2-tlb-4k"; 2862 2863 static const struct cachetab { 2864 uint8_t ct_code; 2865 uint8_t ct_assoc; 2866 uint16_t ct_line_size; 2867 size_t ct_size; 2868 const char *ct_label; 2869 } intel_ctab[] = { 2870 /* 2871 * maintain descending order! 2872 * 2873 * Codes ignored - Reason 2874 * ---------------------- 2875 * 40H - intel_cpuid_4_cache_info() disambiguates l2/l3 cache 2876 * f0H/f1H - Currently we do not interpret prefetch size by design 2877 */ 2878 { 0xe4, 16, 64, 8*1024*1024, l3_cache_str}, 2879 { 0xe3, 16, 64, 4*1024*1024, l3_cache_str}, 2880 { 0xe2, 16, 64, 2*1024*1024, l3_cache_str}, 2881 { 0xde, 12, 64, 6*1024*1024, l3_cache_str}, 2882 { 0xdd, 12, 64, 3*1024*1024, l3_cache_str}, 2883 { 0xdc, 12, 64, ((1*1024*1024)+(512*1024)), l3_cache_str}, 2884 { 0xd8, 8, 64, 4*1024*1024, l3_cache_str}, 2885 { 0xd7, 8, 64, 2*1024*1024, l3_cache_str}, 2886 { 0xd6, 8, 64, 1*1024*1024, l3_cache_str}, 2887 { 0xd2, 4, 64, 2*1024*1024, l3_cache_str}, 2888 { 0xd1, 4, 64, 1*1024*1024, l3_cache_str}, 2889 { 0xd0, 4, 64, 512*1024, l3_cache_str}, 2890 { 0xca, 4, 0, 512, sh_l2_tlb4k_str}, 2891 { 0xc0, 4, 0, 8, dtlb44_str }, 2892 { 0xba, 4, 0, 64, dtlb4k_str }, 2893 { 0xb4, 4, 0, 256, dtlb4k_str }, 2894 { 0xb3, 4, 0, 128, dtlb4k_str }, 2895 { 0xb2, 4, 0, 64, itlb4k_str }, 2896 { 0xb0, 4, 0, 128, itlb4k_str }, 2897 { 0x87, 8, 64, 1024*1024, l2_cache_str}, 2898 { 0x86, 4, 64, 512*1024, l2_cache_str}, 2899 { 0x85, 8, 32, 2*1024*1024, l2_cache_str}, 2900 { 0x84, 8, 32, 1024*1024, l2_cache_str}, 2901 { 0x83, 8, 32, 512*1024, l2_cache_str}, 2902 { 0x82, 8, 32, 256*1024, l2_cache_str}, 2903 { 0x80, 8, 64, 512*1024, l2_cache_str}, 2904 { 0x7f, 2, 64, 512*1024, l2_cache_str}, 2905 { 0x7d, 8, 64, 2*1024*1024, sl2_cache_str}, 2906 { 0x7c, 8, 64, 1024*1024, sl2_cache_str}, 2907 { 0x7b, 8, 64, 512*1024, sl2_cache_str}, 2908 { 0x7a, 8, 64, 256*1024, sl2_cache_str}, 2909 { 0x79, 8, 64, 128*1024, sl2_cache_str}, 2910 { 0x78, 8, 64, 1024*1024, l2_cache_str}, 2911 { 0x73, 8, 0, 64*1024, itrace_str}, 2912 { 0x72, 8, 0, 32*1024, itrace_str}, 2913 { 0x71, 8, 0, 16*1024, itrace_str}, 2914 { 0x70, 8, 0, 12*1024, itrace_str}, 2915 { 0x68, 4, 64, 32*1024, sl1_dcache_str}, 2916 { 0x67, 4, 64, 16*1024, sl1_dcache_str}, 2917 { 0x66, 4, 64, 8*1024, sl1_dcache_str}, 2918 { 0x60, 8, 64, 16*1024, sl1_dcache_str}, 2919 { 0x5d, 0, 0, 256, dtlb44_str}, 2920 { 0x5c, 0, 0, 128, dtlb44_str}, 2921 { 0x5b, 0, 0, 64, dtlb44_str}, 2922 { 0x5a, 4, 0, 32, dtlb24_str}, 2923 { 0x59, 0, 0, 16, dtlb4k_str}, 2924 { 0x57, 4, 0, 16, dtlb4k_str}, 2925 { 0x56, 4, 0, 16, dtlb4M_str}, 2926 { 0x55, 0, 0, 7, itlb24_str}, 2927 { 0x52, 0, 0, 256, itlb424_str}, 2928 { 0x51, 0, 0, 128, itlb424_str}, 2929 { 0x50, 0, 0, 64, itlb424_str}, 2930 { 0x4f, 0, 0, 32, itlb4k_str}, 2931 { 0x4e, 24, 64, 6*1024*1024, l2_cache_str}, 2932 { 0x4d, 16, 64, 16*1024*1024, l3_cache_str}, 2933 { 0x4c, 12, 64, 12*1024*1024, l3_cache_str}, 2934 { 0x4b, 16, 64, 8*1024*1024, l3_cache_str}, 2935 { 0x4a, 12, 64, 6*1024*1024, l3_cache_str}, 2936 { 0x49, 16, 64, 4*1024*1024, l3_cache_str}, 2937 { 0x48, 12, 64, 3*1024*1024, l2_cache_str}, 2938 { 0x47, 8, 64, 8*1024*1024, l3_cache_str}, 2939 { 0x46, 4, 64, 4*1024*1024, l3_cache_str}, 2940 { 0x45, 4, 32, 2*1024*1024, l2_cache_str}, 2941 { 0x44, 4, 32, 1024*1024, l2_cache_str}, 2942 { 0x43, 4, 32, 512*1024, l2_cache_str}, 2943 { 0x42, 4, 32, 256*1024, l2_cache_str}, 2944 { 0x41, 4, 32, 128*1024, l2_cache_str}, 2945 { 0x3e, 4, 64, 512*1024, sl2_cache_str}, 2946 { 0x3d, 6, 64, 384*1024, sl2_cache_str}, 2947 { 0x3c, 4, 64, 256*1024, sl2_cache_str}, 2948 { 0x3b, 2, 64, 128*1024, sl2_cache_str}, 2949 { 0x3a, 6, 64, 192*1024, sl2_cache_str}, 2950 { 0x39, 4, 64, 128*1024, sl2_cache_str}, 2951 { 0x30, 8, 64, 32*1024, l1_icache_str}, 2952 { 0x2c, 8, 64, 32*1024, l1_dcache_str}, 2953 { 0x29, 8, 64, 4096*1024, sl3_cache_str}, 2954 { 0x25, 8, 64, 2048*1024, sl3_cache_str}, 2955 { 0x23, 8, 64, 1024*1024, sl3_cache_str}, 2956 { 0x22, 4, 64, 512*1024, sl3_cache_str}, 2957 { 0x0e, 6, 64, 24*1024, l1_dcache_str}, 2958 { 0x0d, 4, 32, 16*1024, l1_dcache_str}, 2959 { 0x0c, 4, 32, 16*1024, l1_dcache_str}, 2960 { 0x0b, 4, 0, 4, itlb4M_str}, 2961 { 0x0a, 2, 32, 8*1024, l1_dcache_str}, 2962 { 0x08, 4, 32, 16*1024, l1_icache_str}, 2963 { 0x06, 4, 32, 8*1024, l1_icache_str}, 2964 { 0x05, 4, 0, 32, dtlb4M_str}, 2965 { 0x04, 4, 0, 8, dtlb4M_str}, 2966 { 0x03, 4, 0, 64, dtlb4k_str}, 2967 { 0x02, 4, 0, 2, itlb4M_str}, 2968 { 0x01, 4, 0, 32, itlb4k_str}, 2969 { 0 } 2970 }; 2971 2972 static const struct cachetab cyrix_ctab[] = { 2973 { 0x70, 4, 0, 32, "tlb-4K" }, 2974 { 0x80, 4, 16, 16*1024, "l1-cache" }, 2975 { 0 } 2976 }; 2977 2978 /* 2979 * Search a cache table for a matching entry 2980 */ 2981 static const struct cachetab * 2982 find_cacheent(const struct cachetab *ct, uint_t code) 2983 { 2984 if (code != 0) { 2985 for (; ct->ct_code != 0; ct++) 2986 if (ct->ct_code <= code) 2987 break; 2988 if (ct->ct_code == code) 2989 return (ct); 2990 } 2991 return (NULL); 2992 } 2993 2994 /* 2995 * Populate cachetab entry with L2 or L3 cache-information using 2996 * cpuid function 4. This function is called from intel_walk_cacheinfo() 2997 * when descriptor 0x49 is encountered. It returns 0 if no such cache 2998 * information is found. 2999 */ 3000 static int 3001 intel_cpuid_4_cache_info(struct cachetab *ct, struct cpuid_info *cpi) 3002 { 3003 uint32_t level, i; 3004 int ret = 0; 3005 3006 for (i = 0; i < cpi->cpi_std_4_size; i++) { 3007 level = CPI_CACHE_LVL(cpi->cpi_std_4[i]); 3008 3009 if (level == 2 || level == 3) { 3010 ct->ct_assoc = CPI_CACHE_WAYS(cpi->cpi_std_4[i]) + 1; 3011 ct->ct_line_size = 3012 CPI_CACHE_COH_LN_SZ(cpi->cpi_std_4[i]) + 1; 3013 ct->ct_size = ct->ct_assoc * 3014 (CPI_CACHE_PARTS(cpi->cpi_std_4[i]) + 1) * 3015 ct->ct_line_size * 3016 (cpi->cpi_std_4[i]->cp_ecx + 1); 3017 3018 if (level == 2) { 3019 ct->ct_label = l2_cache_str; 3020 } else if (level == 3) { 3021 ct->ct_label = l3_cache_str; 3022 } 3023 ret = 1; 3024 } 3025 } 3026 3027 return (ret); 3028 } 3029 3030 /* 3031 * Walk the cacheinfo descriptor, applying 'func' to every valid element 3032 * The walk is terminated if the walker returns non-zero. 3033 */ 3034 static void 3035 intel_walk_cacheinfo(struct cpuid_info *cpi, 3036 void *arg, int (*func)(void *, const struct cachetab *)) 3037 { 3038 const struct cachetab *ct; 3039 struct cachetab des_49_ct, des_b1_ct; 3040 uint8_t *dp; 3041 int i; 3042 3043 if ((dp = cpi->cpi_cacheinfo) == NULL) 3044 return; 3045 for (i = 0; i < cpi->cpi_ncache; i++, dp++) { 3046 /* 3047 * For overloaded descriptor 0x49 we use cpuid function 4 3048 * if supported by the current processor, to create 3049 * cache information. 3050 * For overloaded descriptor 0xb1 we use X86_PAE flag 3051 * to disambiguate the cache information. 3052 */ 3053 if (*dp == 0x49 && cpi->cpi_maxeax >= 0x4 && 3054 intel_cpuid_4_cache_info(&des_49_ct, cpi) == 1) { 3055 ct = &des_49_ct; 3056 } else if (*dp == 0xb1) { 3057 des_b1_ct.ct_code = 0xb1; 3058 des_b1_ct.ct_assoc = 4; 3059 des_b1_ct.ct_line_size = 0; 3060 if (x86_feature & X86_PAE) { 3061 des_b1_ct.ct_size = 8; 3062 des_b1_ct.ct_label = itlb2M_str; 3063 } else { 3064 des_b1_ct.ct_size = 4; 3065 des_b1_ct.ct_label = itlb4M_str; 3066 } 3067 ct = &des_b1_ct; 3068 } else { 3069 if ((ct = find_cacheent(intel_ctab, *dp)) == NULL) { 3070 continue; 3071 } 3072 } 3073 3074 if (func(arg, ct) != 0) { 3075 break; 3076 } 3077 } 3078 } 3079 3080 /* 3081 * (Like the Intel one, except for Cyrix CPUs) 3082 */ 3083 static void 3084 cyrix_walk_cacheinfo(struct cpuid_info *cpi, 3085 void *arg, int (*func)(void *, const struct cachetab *)) 3086 { 3087 const struct cachetab *ct; 3088 uint8_t *dp; 3089 int i; 3090 3091 if ((dp = cpi->cpi_cacheinfo) == NULL) 3092 return; 3093 for (i = 0; i < cpi->cpi_ncache; i++, dp++) { 3094 /* 3095 * Search Cyrix-specific descriptor table first .. 3096 */ 3097 if ((ct = find_cacheent(cyrix_ctab, *dp)) != NULL) { 3098 if (func(arg, ct) != 0) 3099 break; 3100 continue; 3101 } 3102 /* 3103 * .. else fall back to the Intel one 3104 */ 3105 if ((ct = find_cacheent(intel_ctab, *dp)) != NULL) { 3106 if (func(arg, ct) != 0) 3107 break; 3108 continue; 3109 } 3110 } 3111 } 3112 3113 /* 3114 * A cacheinfo walker that adds associativity, line-size, and size properties 3115 * to the devinfo node it is passed as an argument. 3116 */ 3117 static int 3118 add_cacheent_props(void *arg, const struct cachetab *ct) 3119 { 3120 dev_info_t *devi = arg; 3121 3122 add_cache_prop(devi, ct->ct_label, assoc_str, ct->ct_assoc); 3123 if (ct->ct_line_size != 0) 3124 add_cache_prop(devi, ct->ct_label, line_str, 3125 ct->ct_line_size); 3126 add_cache_prop(devi, ct->ct_label, size_str, ct->ct_size); 3127 return (0); 3128 } 3129 3130 3131 static const char fully_assoc[] = "fully-associative?"; 3132 3133 /* 3134 * AMD style cache/tlb description 3135 * 3136 * Extended functions 5 and 6 directly describe properties of 3137 * tlbs and various cache levels. 3138 */ 3139 static void 3140 add_amd_assoc(dev_info_t *devi, const char *label, uint_t assoc) 3141 { 3142 switch (assoc) { 3143 case 0: /* reserved; ignore */ 3144 break; 3145 default: 3146 add_cache_prop(devi, label, assoc_str, assoc); 3147 break; 3148 case 0xff: 3149 add_cache_prop(devi, label, fully_assoc, 1); 3150 break; 3151 } 3152 } 3153 3154 static void 3155 add_amd_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size) 3156 { 3157 if (size == 0) 3158 return; 3159 add_cache_prop(devi, label, size_str, size); 3160 add_amd_assoc(devi, label, assoc); 3161 } 3162 3163 static void 3164 add_amd_cache(dev_info_t *devi, const char *label, 3165 uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size) 3166 { 3167 if (size == 0 || line_size == 0) 3168 return; 3169 add_amd_assoc(devi, label, assoc); 3170 /* 3171 * Most AMD parts have a sectored cache. Multiple cache lines are 3172 * associated with each tag. A sector consists of all cache lines 3173 * associated with a tag. For example, the AMD K6-III has a sector 3174 * size of 2 cache lines per tag. 3175 */ 3176 if (lines_per_tag != 0) 3177 add_cache_prop(devi, label, "lines-per-tag", lines_per_tag); 3178 add_cache_prop(devi, label, line_str, line_size); 3179 add_cache_prop(devi, label, size_str, size * 1024); 3180 } 3181 3182 static void 3183 add_amd_l2_assoc(dev_info_t *devi, const char *label, uint_t assoc) 3184 { 3185 switch (assoc) { 3186 case 0: /* off */ 3187 break; 3188 case 1: 3189 case 2: 3190 case 4: 3191 add_cache_prop(devi, label, assoc_str, assoc); 3192 break; 3193 case 6: 3194 add_cache_prop(devi, label, assoc_str, 8); 3195 break; 3196 case 8: 3197 add_cache_prop(devi, label, assoc_str, 16); 3198 break; 3199 case 0xf: 3200 add_cache_prop(devi, label, fully_assoc, 1); 3201 break; 3202 default: /* reserved; ignore */ 3203 break; 3204 } 3205 } 3206 3207 static void 3208 add_amd_l2_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size) 3209 { 3210 if (size == 0 || assoc == 0) 3211 return; 3212 add_amd_l2_assoc(devi, label, assoc); 3213 add_cache_prop(devi, label, size_str, size); 3214 } 3215 3216 static void 3217 add_amd_l2_cache(dev_info_t *devi, const char *label, 3218 uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size) 3219 { 3220 if (size == 0 || assoc == 0 || line_size == 0) 3221 return; 3222 add_amd_l2_assoc(devi, label, assoc); 3223 if (lines_per_tag != 0) 3224 add_cache_prop(devi, label, "lines-per-tag", lines_per_tag); 3225 add_cache_prop(devi, label, line_str, line_size); 3226 add_cache_prop(devi, label, size_str, size * 1024); 3227 } 3228 3229 static void 3230 amd_cache_info(struct cpuid_info *cpi, dev_info_t *devi) 3231 { 3232 struct cpuid_regs *cp; 3233 3234 if (cpi->cpi_xmaxeax < 0x80000005) 3235 return; 3236 cp = &cpi->cpi_extd[5]; 3237 3238 /* 3239 * 4M/2M L1 TLB configuration 3240 * 3241 * We report the size for 2M pages because AMD uses two 3242 * TLB entries for one 4M page. 3243 */ 3244 add_amd_tlb(devi, "dtlb-2M", 3245 BITX(cp->cp_eax, 31, 24), BITX(cp->cp_eax, 23, 16)); 3246 add_amd_tlb(devi, "itlb-2M", 3247 BITX(cp->cp_eax, 15, 8), BITX(cp->cp_eax, 7, 0)); 3248 3249 /* 3250 * 4K L1 TLB configuration 3251 */ 3252 3253 switch (cpi->cpi_vendor) { 3254 uint_t nentries; 3255 case X86_VENDOR_TM: 3256 if (cpi->cpi_family >= 5) { 3257 /* 3258 * Crusoe processors have 256 TLB entries, but 3259 * cpuid data format constrains them to only 3260 * reporting 255 of them. 3261 */ 3262 if ((nentries = BITX(cp->cp_ebx, 23, 16)) == 255) 3263 nentries = 256; 3264 /* 3265 * Crusoe processors also have a unified TLB 3266 */ 3267 add_amd_tlb(devi, "tlb-4K", BITX(cp->cp_ebx, 31, 24), 3268 nentries); 3269 break; 3270 } 3271 /*FALLTHROUGH*/ 3272 default: 3273 add_amd_tlb(devi, itlb4k_str, 3274 BITX(cp->cp_ebx, 31, 24), BITX(cp->cp_ebx, 23, 16)); 3275 add_amd_tlb(devi, dtlb4k_str, 3276 BITX(cp->cp_ebx, 15, 8), BITX(cp->cp_ebx, 7, 0)); 3277 break; 3278 } 3279 3280 /* 3281 * data L1 cache configuration 3282 */ 3283 3284 add_amd_cache(devi, l1_dcache_str, 3285 BITX(cp->cp_ecx, 31, 24), BITX(cp->cp_ecx, 23, 16), 3286 BITX(cp->cp_ecx, 15, 8), BITX(cp->cp_ecx, 7, 0)); 3287 3288 /* 3289 * code L1 cache configuration 3290 */ 3291 3292 add_amd_cache(devi, l1_icache_str, 3293 BITX(cp->cp_edx, 31, 24), BITX(cp->cp_edx, 23, 16), 3294 BITX(cp->cp_edx, 15, 8), BITX(cp->cp_edx, 7, 0)); 3295 3296 if (cpi->cpi_xmaxeax < 0x80000006) 3297 return; 3298 cp = &cpi->cpi_extd[6]; 3299 3300 /* Check for a unified L2 TLB for large pages */ 3301 3302 if (BITX(cp->cp_eax, 31, 16) == 0) 3303 add_amd_l2_tlb(devi, "l2-tlb-2M", 3304 BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0)); 3305 else { 3306 add_amd_l2_tlb(devi, "l2-dtlb-2M", 3307 BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16)); 3308 add_amd_l2_tlb(devi, "l2-itlb-2M", 3309 BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0)); 3310 } 3311 3312 /* Check for a unified L2 TLB for 4K pages */ 3313 3314 if (BITX(cp->cp_ebx, 31, 16) == 0) { 3315 add_amd_l2_tlb(devi, "l2-tlb-4K", 3316 BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0)); 3317 } else { 3318 add_amd_l2_tlb(devi, "l2-dtlb-4K", 3319 BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16)); 3320 add_amd_l2_tlb(devi, "l2-itlb-4K", 3321 BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0)); 3322 } 3323 3324 add_amd_l2_cache(devi, l2_cache_str, 3325 BITX(cp->cp_ecx, 31, 16), BITX(cp->cp_ecx, 15, 12), 3326 BITX(cp->cp_ecx, 11, 8), BITX(cp->cp_ecx, 7, 0)); 3327 } 3328 3329 /* 3330 * There are two basic ways that the x86 world describes it cache 3331 * and tlb architecture - Intel's way and AMD's way. 3332 * 3333 * Return which flavor of cache architecture we should use 3334 */ 3335 static int 3336 x86_which_cacheinfo(struct cpuid_info *cpi) 3337 { 3338 switch (cpi->cpi_vendor) { 3339 case X86_VENDOR_Intel: 3340 if (cpi->cpi_maxeax >= 2) 3341 return (X86_VENDOR_Intel); 3342 break; 3343 case X86_VENDOR_AMD: 3344 /* 3345 * The K5 model 1 was the first part from AMD that reported 3346 * cache sizes via extended cpuid functions. 3347 */ 3348 if (cpi->cpi_family > 5 || 3349 (cpi->cpi_family == 5 && cpi->cpi_model >= 1)) 3350 return (X86_VENDOR_AMD); 3351 break; 3352 case X86_VENDOR_TM: 3353 if (cpi->cpi_family >= 5) 3354 return (X86_VENDOR_AMD); 3355 /*FALLTHROUGH*/ 3356 default: 3357 /* 3358 * If they have extended CPU data for 0x80000005 3359 * then we assume they have AMD-format cache 3360 * information. 3361 * 3362 * If not, and the vendor happens to be Cyrix, 3363 * then try our-Cyrix specific handler. 3364 * 3365 * If we're not Cyrix, then assume we're using Intel's 3366 * table-driven format instead. 3367 */ 3368 if (cpi->cpi_xmaxeax >= 0x80000005) 3369 return (X86_VENDOR_AMD); 3370 else if (cpi->cpi_vendor == X86_VENDOR_Cyrix) 3371 return (X86_VENDOR_Cyrix); 3372 else if (cpi->cpi_maxeax >= 2) 3373 return (X86_VENDOR_Intel); 3374 break; 3375 } 3376 return (-1); 3377 } 3378 3379 /* 3380 * create a node for the given cpu under the prom root node. 3381 * Also, create a cpu node in the device tree. 3382 */ 3383 static dev_info_t *cpu_nex_devi = NULL; 3384 static kmutex_t cpu_node_lock; 3385 3386 /* 3387 * Called from post_startup() and mp_startup() 3388 */ 3389 void 3390 add_cpunode2devtree(processorid_t cpu_id, struct cpuid_info *cpi) 3391 { 3392 dev_info_t *cpu_devi; 3393 int create; 3394 3395 mutex_enter(&cpu_node_lock); 3396 3397 /* 3398 * create a nexus node for all cpus identified as 'cpu_id' under 3399 * the root node. 3400 */ 3401 if (cpu_nex_devi == NULL) { 3402 if (ndi_devi_alloc(ddi_root_node(), "cpus", 3403 (pnode_t)DEVI_SID_NODEID, &cpu_nex_devi) != NDI_SUCCESS) { 3404 mutex_exit(&cpu_node_lock); 3405 return; 3406 } 3407 (void) ndi_devi_online(cpu_nex_devi, 0); 3408 } 3409 3410 /* 3411 * create a child node for cpu identified as 'cpu_id' 3412 */ 3413 cpu_devi = ddi_add_child(cpu_nex_devi, "cpu", DEVI_SID_NODEID, 3414 cpu_id); 3415 if (cpu_devi == NULL) { 3416 mutex_exit(&cpu_node_lock); 3417 return; 3418 } 3419 3420 /* device_type */ 3421 3422 (void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi, 3423 "device_type", "cpu"); 3424 3425 /* reg */ 3426 3427 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3428 "reg", cpu_id); 3429 3430 /* cpu-mhz, and clock-frequency */ 3431 3432 if (cpu_freq > 0) { 3433 long long mul; 3434 3435 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3436 "cpu-mhz", cpu_freq); 3437 3438 if ((mul = cpu_freq * 1000000LL) <= INT_MAX) 3439 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3440 "clock-frequency", (int)mul); 3441 } 3442 3443 (void) ndi_devi_online(cpu_devi, 0); 3444 3445 if ((x86_feature & X86_CPUID) == 0) { 3446 mutex_exit(&cpu_node_lock); 3447 return; 3448 } 3449 3450 /* vendor-id */ 3451 3452 (void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi, 3453 "vendor-id", cpi->cpi_vendorstr); 3454 3455 if (cpi->cpi_maxeax == 0) { 3456 mutex_exit(&cpu_node_lock); 3457 return; 3458 } 3459 3460 /* 3461 * family, model, and step 3462 */ 3463 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3464 "family", CPI_FAMILY(cpi)); 3465 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3466 "cpu-model", CPI_MODEL(cpi)); 3467 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3468 "stepping-id", CPI_STEP(cpi)); 3469 3470 /* type */ 3471 3472 switch (cpi->cpi_vendor) { 3473 case X86_VENDOR_Intel: 3474 create = 1; 3475 break; 3476 default: 3477 create = 0; 3478 break; 3479 } 3480 if (create) 3481 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3482 "type", CPI_TYPE(cpi)); 3483 3484 /* ext-family */ 3485 3486 switch (cpi->cpi_vendor) { 3487 case X86_VENDOR_Intel: 3488 case X86_VENDOR_AMD: 3489 create = cpi->cpi_family >= 0xf; 3490 break; 3491 default: 3492 create = 0; 3493 break; 3494 } 3495 if (create) 3496 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3497 "ext-family", CPI_FAMILY_XTD(cpi)); 3498 3499 /* ext-model */ 3500 3501 switch (cpi->cpi_vendor) { 3502 case X86_VENDOR_Intel: 3503 create = IS_EXTENDED_MODEL_INTEL(cpi); 3504 break; 3505 case X86_VENDOR_AMD: 3506 create = CPI_FAMILY(cpi) == 0xf; 3507 break; 3508 default: 3509 create = 0; 3510 break; 3511 } 3512 if (create) 3513 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3514 "ext-model", CPI_MODEL_XTD(cpi)); 3515 3516 /* generation */ 3517 3518 switch (cpi->cpi_vendor) { 3519 case X86_VENDOR_AMD: 3520 /* 3521 * AMD K5 model 1 was the first part to support this 3522 */ 3523 create = cpi->cpi_xmaxeax >= 0x80000001; 3524 break; 3525 default: 3526 create = 0; 3527 break; 3528 } 3529 if (create) 3530 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3531 "generation", BITX((cpi)->cpi_extd[1].cp_eax, 11, 8)); 3532 3533 /* brand-id */ 3534 3535 switch (cpi->cpi_vendor) { 3536 case X86_VENDOR_Intel: 3537 /* 3538 * brand id first appeared on Pentium III Xeon model 8, 3539 * and Celeron model 8 processors and Opteron 3540 */ 3541 create = cpi->cpi_family > 6 || 3542 (cpi->cpi_family == 6 && cpi->cpi_model >= 8); 3543 break; 3544 case X86_VENDOR_AMD: 3545 create = cpi->cpi_family >= 0xf; 3546 break; 3547 default: 3548 create = 0; 3549 break; 3550 } 3551 if (create && cpi->cpi_brandid != 0) { 3552 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3553 "brand-id", cpi->cpi_brandid); 3554 } 3555 3556 /* chunks, and apic-id */ 3557 3558 switch (cpi->cpi_vendor) { 3559 /* 3560 * first available on Pentium IV and Opteron (K8) 3561 */ 3562 case X86_VENDOR_Intel: 3563 create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf; 3564 break; 3565 case X86_VENDOR_AMD: 3566 create = cpi->cpi_family >= 0xf; 3567 break; 3568 default: 3569 create = 0; 3570 break; 3571 } 3572 if (create) { 3573 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3574 "chunks", CPI_CHUNKS(cpi)); 3575 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3576 "apic-id", cpi->cpi_apicid); 3577 if (cpi->cpi_chipid >= 0) { 3578 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3579 "chip#", cpi->cpi_chipid); 3580 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3581 "clog#", cpi->cpi_clogid); 3582 } 3583 } 3584 3585 /* cpuid-features */ 3586 3587 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3588 "cpuid-features", CPI_FEATURES_EDX(cpi)); 3589 3590 3591 /* cpuid-features-ecx */ 3592 3593 switch (cpi->cpi_vendor) { 3594 case X86_VENDOR_Intel: 3595 create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf; 3596 break; 3597 default: 3598 create = 0; 3599 break; 3600 } 3601 if (create) 3602 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3603 "cpuid-features-ecx", CPI_FEATURES_ECX(cpi)); 3604 3605 /* ext-cpuid-features */ 3606 3607 switch (cpi->cpi_vendor) { 3608 case X86_VENDOR_Intel: 3609 case X86_VENDOR_AMD: 3610 case X86_VENDOR_Cyrix: 3611 case X86_VENDOR_TM: 3612 case X86_VENDOR_Centaur: 3613 create = cpi->cpi_xmaxeax >= 0x80000001; 3614 break; 3615 default: 3616 create = 0; 3617 break; 3618 } 3619 if (create) { 3620 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3621 "ext-cpuid-features", CPI_FEATURES_XTD_EDX(cpi)); 3622 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 3623 "ext-cpuid-features-ecx", CPI_FEATURES_XTD_ECX(cpi)); 3624 } 3625 3626 /* 3627 * Brand String first appeared in Intel Pentium IV, AMD K5 3628 * model 1, and Cyrix GXm. On earlier models we try and 3629 * simulate something similar .. so this string should always 3630 * same -something- about the processor, however lame. 3631 */ 3632 (void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi, 3633 "brand-string", cpi->cpi_brandstr); 3634 3635 /* 3636 * Finally, cache and tlb information 3637 */ 3638 switch (x86_which_cacheinfo(cpi)) { 3639 case X86_VENDOR_Intel: 3640 intel_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props); 3641 break; 3642 case X86_VENDOR_Cyrix: 3643 cyrix_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props); 3644 break; 3645 case X86_VENDOR_AMD: 3646 amd_cache_info(cpi, cpu_devi); 3647 break; 3648 default: 3649 break; 3650 } 3651 3652 mutex_exit(&cpu_node_lock); 3653 } 3654 3655 struct l2info { 3656 int *l2i_csz; 3657 int *l2i_lsz; 3658 int *l2i_assoc; 3659 int l2i_ret; 3660 }; 3661 3662 /* 3663 * A cacheinfo walker that fetches the size, line-size and associativity 3664 * of the L2 cache 3665 */ 3666 static int 3667 intel_l2cinfo(void *arg, const struct cachetab *ct) 3668 { 3669 struct l2info *l2i = arg; 3670 int *ip; 3671 3672 if (ct->ct_label != l2_cache_str && 3673 ct->ct_label != sl2_cache_str) 3674 return (0); /* not an L2 -- keep walking */ 3675 3676 if ((ip = l2i->l2i_csz) != NULL) 3677 *ip = ct->ct_size; 3678 if ((ip = l2i->l2i_lsz) != NULL) 3679 *ip = ct->ct_line_size; 3680 if ((ip = l2i->l2i_assoc) != NULL) 3681 *ip = ct->ct_assoc; 3682 l2i->l2i_ret = ct->ct_size; 3683 return (1); /* was an L2 -- terminate walk */ 3684 } 3685 3686 /* 3687 * AMD L2/L3 Cache and TLB Associativity Field Definition: 3688 * 3689 * Unlike the associativity for the L1 cache and tlb where the 8 bit 3690 * value is the associativity, the associativity for the L2 cache and 3691 * tlb is encoded in the following table. The 4 bit L2 value serves as 3692 * an index into the amd_afd[] array to determine the associativity. 3693 * -1 is undefined. 0 is fully associative. 3694 */ 3695 3696 static int amd_afd[] = 3697 {-1, 1, 2, -1, 4, -1, 8, -1, 16, -1, 32, 48, 64, 96, 128, 0}; 3698 3699 static void 3700 amd_l2cacheinfo(struct cpuid_info *cpi, struct l2info *l2i) 3701 { 3702 struct cpuid_regs *cp; 3703 uint_t size, assoc; 3704 int i; 3705 int *ip; 3706 3707 if (cpi->cpi_xmaxeax < 0x80000006) 3708 return; 3709 cp = &cpi->cpi_extd[6]; 3710 3711 if ((i = BITX(cp->cp_ecx, 15, 12)) != 0 && 3712 (size = BITX(cp->cp_ecx, 31, 16)) != 0) { 3713 uint_t cachesz = size * 1024; 3714 assoc = amd_afd[i]; 3715 3716 ASSERT(assoc != -1); 3717 3718 if ((ip = l2i->l2i_csz) != NULL) 3719 *ip = cachesz; 3720 if ((ip = l2i->l2i_lsz) != NULL) 3721 *ip = BITX(cp->cp_ecx, 7, 0); 3722 if ((ip = l2i->l2i_assoc) != NULL) 3723 *ip = assoc; 3724 l2i->l2i_ret = cachesz; 3725 } 3726 } 3727 3728 int 3729 getl2cacheinfo(cpu_t *cpu, int *csz, int *lsz, int *assoc) 3730 { 3731 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 3732 struct l2info __l2info, *l2i = &__l2info; 3733 3734 l2i->l2i_csz = csz; 3735 l2i->l2i_lsz = lsz; 3736 l2i->l2i_assoc = assoc; 3737 l2i->l2i_ret = -1; 3738 3739 switch (x86_which_cacheinfo(cpi)) { 3740 case X86_VENDOR_Intel: 3741 intel_walk_cacheinfo(cpi, l2i, intel_l2cinfo); 3742 break; 3743 case X86_VENDOR_Cyrix: 3744 cyrix_walk_cacheinfo(cpi, l2i, intel_l2cinfo); 3745 break; 3746 case X86_VENDOR_AMD: 3747 amd_l2cacheinfo(cpi, l2i); 3748 break; 3749 default: 3750 break; 3751 } 3752 return (l2i->l2i_ret); 3753 } 3754 3755 #if !defined(__xpv) 3756 3757 uint32_t * 3758 cpuid_mwait_alloc(cpu_t *cpu) 3759 { 3760 uint32_t *ret; 3761 size_t mwait_size; 3762 3763 ASSERT(cpuid_checkpass(cpu, 2)); 3764 3765 mwait_size = cpu->cpu_m.mcpu_cpi->cpi_mwait.mon_max; 3766 if (mwait_size == 0) 3767 return (NULL); 3768 3769 /* 3770 * kmem_alloc() returns cache line size aligned data for mwait_size 3771 * allocations. mwait_size is currently cache line sized. Neither 3772 * of these implementation details are guarantied to be true in the 3773 * future. 3774 * 3775 * First try allocating mwait_size as kmem_alloc() currently returns 3776 * correctly aligned memory. If kmem_alloc() does not return 3777 * mwait_size aligned memory, then use mwait_size ROUNDUP. 3778 * 3779 * Set cpi_mwait.buf_actual and cpi_mwait.size_actual in case we 3780 * decide to free this memory. 3781 */ 3782 ret = kmem_zalloc(mwait_size, KM_SLEEP); 3783 if (ret == (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size)) { 3784 cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret; 3785 cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size; 3786 *ret = MWAIT_RUNNING; 3787 return (ret); 3788 } else { 3789 kmem_free(ret, mwait_size); 3790 ret = kmem_zalloc(mwait_size * 2, KM_SLEEP); 3791 cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret; 3792 cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size * 2; 3793 ret = (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size); 3794 *ret = MWAIT_RUNNING; 3795 return (ret); 3796 } 3797 } 3798 3799 void 3800 cpuid_mwait_free(cpu_t *cpu) 3801 { 3802 ASSERT(cpuid_checkpass(cpu, 2)); 3803 3804 if (cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual != NULL && 3805 cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual > 0) { 3806 kmem_free(cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual, 3807 cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual); 3808 } 3809 3810 cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = NULL; 3811 cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = 0; 3812 } 3813 3814 void 3815 patch_tsc_read(int flag) 3816 { 3817 size_t cnt; 3818 3819 switch (flag) { 3820 case X86_NO_TSC: 3821 cnt = &_no_rdtsc_end - &_no_rdtsc_start; 3822 (void) memcpy((void *)tsc_read, (void *)&_no_rdtsc_start, cnt); 3823 break; 3824 case X86_HAVE_TSCP: 3825 cnt = &_tscp_end - &_tscp_start; 3826 (void) memcpy((void *)tsc_read, (void *)&_tscp_start, cnt); 3827 break; 3828 case X86_TSC_MFENCE: 3829 cnt = &_tsc_mfence_end - &_tsc_mfence_start; 3830 (void) memcpy((void *)tsc_read, 3831 (void *)&_tsc_mfence_start, cnt); 3832 break; 3833 case X86_TSC_LFENCE: 3834 cnt = &_tsc_lfence_end - &_tsc_lfence_start; 3835 (void) memcpy((void *)tsc_read, 3836 (void *)&_tsc_lfence_start, cnt); 3837 break; 3838 default: 3839 break; 3840 } 3841 } 3842 3843 #endif /* !__xpv */ 3844