1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Various routines to handle identification 30 * and classification of x86 processors. 31 */ 32 33 #include <sys/types.h> 34 #include <sys/archsystm.h> 35 #include <sys/x86_archext.h> 36 #include <sys/kmem.h> 37 #include <sys/systm.h> 38 #include <sys/cmn_err.h> 39 #include <sys/sunddi.h> 40 #include <sys/sunndi.h> 41 #include <sys/cpuvar.h> 42 #include <sys/processor.h> 43 #include <sys/chip.h> 44 #include <sys/fp.h> 45 #include <sys/controlregs.h> 46 #include <sys/auxv_386.h> 47 #include <sys/bitmap.h> 48 #include <sys/controlregs.h> 49 #include <sys/memnode.h> 50 51 /* 52 * Pass 0 of cpuid feature analysis happens in locore. It contains special code 53 * to recognize Cyrix processors that are not cpuid-compliant, and to deal with 54 * them accordingly. For most modern processors, feature detection occurs here 55 * in pass 1. 56 * 57 * Pass 1 of cpuid feature analysis happens just at the beginning of mlsetup() 58 * for the boot CPU and does the basic analysis that the early kernel needs. 59 * x86_feature is set based on the return value of cpuid_pass1() of the boot 60 * CPU. 61 * 62 * Pass 1 includes: 63 * 64 * o Determining vendor/model/family/stepping and setting x86_type and 65 * x86_vendor accordingly. 66 * o Processing the feature flags returned by the cpuid instruction while 67 * applying any workarounds or tricks for the specific processor. 68 * o Mapping the feature flags into Solaris feature bits (X86_*). 69 * o Processing extended feature flags if supported by the processor, 70 * again while applying specific processor knowledge. 71 * o Determining the CMT characteristics of the system. 72 * 73 * Pass 1 is done on non-boot CPUs during their initialization and the results 74 * are used only as a meager attempt at ensuring that all processors within the 75 * system support the same features. 76 * 77 * Pass 2 of cpuid feature analysis happens just at the beginning 78 * of startup(). It just copies in and corrects the remainder 79 * of the cpuid data we depend on: standard cpuid functions that we didn't 80 * need for pass1 feature analysis, and extended cpuid functions beyond the 81 * simple feature processing done in pass1. 82 * 83 * Pass 3 of cpuid analysis is invoked after basic kernel services; in 84 * particular kernel memory allocation has been made available. It creates a 85 * readable brand string based on the data collected in the first two passes. 86 * 87 * Pass 4 of cpuid analysis is invoked after post_startup() when all 88 * the support infrastructure for various hardware features has been 89 * initialized. It determines which processor features will be reported 90 * to userland via the aux vector. 91 * 92 * All passes are executed on all CPUs, but only the boot CPU determines what 93 * features the kernel will use. 94 * 95 * Much of the worst junk in this file is for the support of processors 96 * that didn't really implement the cpuid instruction properly. 97 * 98 * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon, 99 * the pass numbers. Accordingly, changes to the pass code may require changes 100 * to the accessor code. 101 */ 102 103 uint_t x86_feature = 0; 104 uint_t x86_vendor = X86_VENDOR_IntelClone; 105 uint_t x86_type = X86_TYPE_OTHER; 106 107 ulong_t cr4_value; 108 uint_t pentiumpro_bug4046376; 109 uint_t pentiumpro_bug4064495; 110 111 uint_t enable486; 112 113 /* 114 * This set of strings are for processors rumored to support the cpuid 115 * instruction, and is used by locore.s to figure out how to set x86_vendor 116 */ 117 const char CyrixInstead[] = "CyrixInstead"; 118 119 /* 120 * These constants determine how many of the elements of the 121 * cpuid we cache in the cpuid_info data structure; the 122 * remaining elements are accessible via the cpuid instruction. 123 */ 124 125 #define NMAX_CPI_STD 6 /* eax = 0 .. 5 */ 126 #define NMAX_CPI_EXTD 9 /* eax = 0x80000000 .. 0x80000008 */ 127 128 struct cpuid_info { 129 uint_t cpi_pass; /* last pass completed */ 130 /* 131 * standard function information 132 */ 133 uint_t cpi_maxeax; /* fn 0: %eax */ 134 char cpi_vendorstr[13]; /* fn 0: %ebx:%ecx:%edx */ 135 uint_t cpi_vendor; /* enum of cpi_vendorstr */ 136 137 uint_t cpi_family; /* fn 1: extended family */ 138 uint_t cpi_model; /* fn 1: extended model */ 139 uint_t cpi_step; /* fn 1: stepping */ 140 chipid_t cpi_chipid; /* fn 1: %ebx: chip # on ht cpus */ 141 uint_t cpi_brandid; /* fn 1: %ebx: brand ID */ 142 int cpi_clogid; /* fn 1: %ebx: thread # */ 143 uint_t cpi_ncpu_per_chip; /* fn 1: %ebx: logical cpu count */ 144 uint8_t cpi_cacheinfo[16]; /* fn 2: intel-style cache desc */ 145 uint_t cpi_ncache; /* fn 2: number of elements */ 146 struct cpuid_regs cpi_std[NMAX_CPI_STD]; /* 0 .. 5 */ 147 /* 148 * extended function information 149 */ 150 uint_t cpi_xmaxeax; /* fn 0x80000000: %eax */ 151 char cpi_brandstr[49]; /* fn 0x8000000[234] */ 152 uint8_t cpi_pabits; /* fn 0x80000006: %eax */ 153 uint8_t cpi_vabits; /* fn 0x80000006: %eax */ 154 struct cpuid_regs cpi_extd[NMAX_CPI_EXTD]; /* 0x8000000[0-8] */ 155 id_t cpi_coreid; 156 uint_t cpi_ncore_per_chip; /* AMD: fn 0x80000008: %ecx[7-0] */ 157 /* Intel: fn 4: %eax[31-26] */ 158 /* 159 * supported feature information 160 */ 161 uint32_t cpi_support[4]; 162 #define STD_EDX_FEATURES 0 163 #define AMD_EDX_FEATURES 1 164 #define TM_EDX_FEATURES 2 165 #define STD_ECX_FEATURES 3 166 167 /* 168 * Synthesized information, where known. 169 */ 170 uint32_t cpi_chiprev; /* See X86_CHIPREV_* in x86_archext.h */ 171 const char *cpi_chiprevstr; /* May be NULL if chiprev unknown */ 172 uint32_t cpi_socket; /* Chip package/socket type */ 173 }; 174 175 176 static struct cpuid_info cpuid_info0; 177 178 /* 179 * These bit fields are defined by the Intel Application Note AP-485 180 * "Intel Processor Identification and the CPUID Instruction" 181 */ 182 #define CPI_FAMILY_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 27, 20) 183 #define CPI_MODEL_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 19, 16) 184 #define CPI_TYPE(cpi) BITX((cpi)->cpi_std[1].cp_eax, 13, 12) 185 #define CPI_FAMILY(cpi) BITX((cpi)->cpi_std[1].cp_eax, 11, 8) 186 #define CPI_STEP(cpi) BITX((cpi)->cpi_std[1].cp_eax, 3, 0) 187 #define CPI_MODEL(cpi) BITX((cpi)->cpi_std[1].cp_eax, 7, 4) 188 189 #define CPI_FEATURES_EDX(cpi) ((cpi)->cpi_std[1].cp_edx) 190 #define CPI_FEATURES_ECX(cpi) ((cpi)->cpi_std[1].cp_ecx) 191 #define CPI_FEATURES_XTD_EDX(cpi) ((cpi)->cpi_extd[1].cp_edx) 192 #define CPI_FEATURES_XTD_ECX(cpi) ((cpi)->cpi_extd[1].cp_ecx) 193 194 #define CPI_BRANDID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 7, 0) 195 #define CPI_CHUNKS(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 15, 7) 196 #define CPI_CPU_COUNT(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 23, 16) 197 #define CPI_APIC_ID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 31, 24) 198 199 #define CPI_MAXEAX_MAX 0x100 /* sanity control */ 200 #define CPI_XMAXEAX_MAX 0x80000100 201 202 /* 203 * A couple of shorthand macros to identify "later" P6-family chips 204 * like the Pentium M and Core. First, the "older" P6-based stuff 205 * (loosely defined as "pre-Pentium-4"): 206 * P6, PII, Mobile PII, PII Xeon, PIII, Mobile PIII, PIII Xeon 207 */ 208 209 #define IS_LEGACY_P6(cpi) ( \ 210 cpi->cpi_family == 6 && \ 211 (cpi->cpi_model == 1 || \ 212 cpi->cpi_model == 3 || \ 213 cpi->cpi_model == 5 || \ 214 cpi->cpi_model == 6 || \ 215 cpi->cpi_model == 7 || \ 216 cpi->cpi_model == 8 || \ 217 cpi->cpi_model == 0xA || \ 218 cpi->cpi_model == 0xB) \ 219 ) 220 221 /* A "new F6" is everything with family 6 that's not the above */ 222 #define IS_NEW_F6(cpi) ((cpi->cpi_family == 6) && !IS_LEGACY_P6(cpi)) 223 224 /* 225 * AMD family 0xf socket types. 226 * First index is 0 for revs B thru E, 1 for F and G. 227 * Second index by (model & 0x3) 228 */ 229 static uint32_t amd_skts[2][4] = { 230 { 231 X86_SOCKET_754, /* 0b00 */ 232 X86_SOCKET_940, /* 0b01 */ 233 X86_SOCKET_754, /* 0b10 */ 234 X86_SOCKET_939 /* 0b11 */ 235 }, 236 { 237 X86_SOCKET_S1g1, /* 0b00 */ 238 X86_SOCKET_F1207, /* 0b01 */ 239 X86_SOCKET_UNKNOWN, /* 0b10 */ 240 X86_SOCKET_AM2 /* 0b11 */ 241 } 242 }; 243 244 /* 245 * Table for mapping AMD Family 0xf model/stepping combination to 246 * chip "revision" and socket type. Only rm_family 0xf is used at the 247 * moment, but AMD family 0x10 will extend the exsiting revision names 248 * so will likely also use this table. 249 * 250 * The first member of this array that matches a given family, extended model 251 * plus model range, and stepping range will be considered a match. 252 */ 253 static const struct amd_rev_mapent { 254 uint_t rm_family; 255 uint_t rm_modello; 256 uint_t rm_modelhi; 257 uint_t rm_steplo; 258 uint_t rm_stephi; 259 uint32_t rm_chiprev; 260 const char *rm_chiprevstr; 261 int rm_sktidx; 262 } amd_revmap[] = { 263 /* 264 * Rev B includes model 0x4 stepping 0 and model 0x5 stepping 0 and 1. 265 */ 266 { 0xf, 0x04, 0x04, 0x0, 0x0, X86_CHIPREV_AMD_F_REV_B, "B", 0 }, 267 { 0xf, 0x05, 0x05, 0x0, 0x1, X86_CHIPREV_AMD_F_REV_B, "B", 0 }, 268 /* 269 * Rev C0 includes model 0x4 stepping 8 and model 0x5 stepping 8 270 */ 271 { 0xf, 0x04, 0x05, 0x8, 0x8, X86_CHIPREV_AMD_F_REV_C0, "C0", 0 }, 272 /* 273 * Rev CG is the rest of extended model 0x0 - i.e., everything 274 * but the rev B and C0 combinations covered above. 275 */ 276 { 0xf, 0x00, 0x0f, 0x0, 0xf, X86_CHIPREV_AMD_F_REV_CG, "CG", 0 }, 277 /* 278 * Rev D has extended model 0x1. 279 */ 280 { 0xf, 0x10, 0x1f, 0x0, 0xf, X86_CHIPREV_AMD_F_REV_D, "D", 0 }, 281 /* 282 * Rev E has extended model 0x2. 283 * Extended model 0x3 is unused but available to grow into. 284 */ 285 { 0xf, 0x20, 0x3f, 0x0, 0xf, X86_CHIPREV_AMD_F_REV_E, "E", 0 }, 286 /* 287 * Rev F has extended models 0x4 and 0x5. 288 */ 289 { 0xf, 0x40, 0x5f, 0x0, 0xf, X86_CHIPREV_AMD_F_REV_F, "F", 1 }, 290 /* 291 * Rev G has extended model 0x6. 292 */ 293 { 0xf, 0x60, 0x6f, 0x0, 0xf, X86_CHIPREV_AMD_F_REV_G, "G", 1 }, 294 }; 295 296 static void 297 synth_amd_info(struct cpuid_info *cpi) 298 { 299 const struct amd_rev_mapent *rmp; 300 uint_t family, model, step; 301 int i; 302 303 /* 304 * Currently only AMD family 0xf uses these fields. 305 */ 306 if (cpi->cpi_family != 0xf) 307 return; 308 309 family = cpi->cpi_family; 310 model = cpi->cpi_model; 311 step = cpi->cpi_step; 312 313 for (i = 0, rmp = amd_revmap; i < sizeof (amd_revmap) / sizeof (*rmp); 314 i++, rmp++) { 315 if (family == rmp->rm_family && 316 model >= rmp->rm_modello && model <= rmp->rm_modelhi && 317 step >= rmp->rm_steplo && step <= rmp->rm_stephi) { 318 cpi->cpi_chiprev = rmp->rm_chiprev; 319 cpi->cpi_chiprevstr = rmp->rm_chiprevstr; 320 cpi->cpi_socket = amd_skts[rmp->rm_sktidx][model & 0x3]; 321 return; 322 } 323 } 324 } 325 326 static void 327 synth_info(struct cpuid_info *cpi) 328 { 329 cpi->cpi_chiprev = X86_CHIPREV_UNKNOWN; 330 cpi->cpi_chiprevstr = "Unknown"; 331 cpi->cpi_socket = X86_SOCKET_UNKNOWN; 332 333 switch (cpi->cpi_vendor) { 334 case X86_VENDOR_AMD: 335 synth_amd_info(cpi); 336 break; 337 338 default: 339 break; 340 341 } 342 } 343 344 /* 345 * Some undocumented ways of patching the results of the cpuid 346 * instruction to permit running Solaris 10 on future cpus that 347 * we don't currently support. Could be set to non-zero values 348 * via settings in eeprom. 349 */ 350 351 uint32_t cpuid_feature_ecx_include; 352 uint32_t cpuid_feature_ecx_exclude; 353 uint32_t cpuid_feature_edx_include; 354 uint32_t cpuid_feature_edx_exclude; 355 356 uint_t 357 cpuid_pass1(cpu_t *cpu) 358 { 359 uint32_t mask_ecx, mask_edx; 360 uint_t feature = X86_CPUID; 361 struct cpuid_info *cpi; 362 struct cpuid_regs *cp; 363 int xcpuid; 364 365 /* 366 * By convention, cpu0 is the boot cpu, which is called 367 * before memory allocation is available. Other cpus are 368 * initialized when memory becomes available. 369 */ 370 if (cpu->cpu_id == 0) 371 cpu->cpu_m.mcpu_cpi = cpi = &cpuid_info0; 372 else 373 cpu->cpu_m.mcpu_cpi = cpi = 374 kmem_zalloc(sizeof (*cpi), KM_SLEEP); 375 376 cp = &cpi->cpi_std[0]; 377 cp->cp_eax = 0; 378 cpi->cpi_maxeax = __cpuid_insn(cp); 379 { 380 uint32_t *iptr = (uint32_t *)cpi->cpi_vendorstr; 381 *iptr++ = cp->cp_ebx; 382 *iptr++ = cp->cp_edx; 383 *iptr++ = cp->cp_ecx; 384 *(char *)&cpi->cpi_vendorstr[12] = '\0'; 385 } 386 387 /* 388 * Map the vendor string to a type code 389 */ 390 if (strcmp(cpi->cpi_vendorstr, "GenuineIntel") == 0) 391 cpi->cpi_vendor = X86_VENDOR_Intel; 392 else if (strcmp(cpi->cpi_vendorstr, "AuthenticAMD") == 0) 393 cpi->cpi_vendor = X86_VENDOR_AMD; 394 else if (strcmp(cpi->cpi_vendorstr, "GenuineTMx86") == 0) 395 cpi->cpi_vendor = X86_VENDOR_TM; 396 else if (strcmp(cpi->cpi_vendorstr, CyrixInstead) == 0) 397 /* 398 * CyrixInstead is a variable used by the Cyrix detection code 399 * in locore. 400 */ 401 cpi->cpi_vendor = X86_VENDOR_Cyrix; 402 else if (strcmp(cpi->cpi_vendorstr, "UMC UMC UMC ") == 0) 403 cpi->cpi_vendor = X86_VENDOR_UMC; 404 else if (strcmp(cpi->cpi_vendorstr, "NexGenDriven") == 0) 405 cpi->cpi_vendor = X86_VENDOR_NexGen; 406 else if (strcmp(cpi->cpi_vendorstr, "CentaurHauls") == 0) 407 cpi->cpi_vendor = X86_VENDOR_Centaur; 408 else if (strcmp(cpi->cpi_vendorstr, "RiseRiseRise") == 0) 409 cpi->cpi_vendor = X86_VENDOR_Rise; 410 else if (strcmp(cpi->cpi_vendorstr, "SiS SiS SiS ") == 0) 411 cpi->cpi_vendor = X86_VENDOR_SiS; 412 else if (strcmp(cpi->cpi_vendorstr, "Geode by NSC") == 0) 413 cpi->cpi_vendor = X86_VENDOR_NSC; 414 else 415 cpi->cpi_vendor = X86_VENDOR_IntelClone; 416 417 x86_vendor = cpi->cpi_vendor; /* for compatibility */ 418 419 /* 420 * Limit the range in case of weird hardware 421 */ 422 if (cpi->cpi_maxeax > CPI_MAXEAX_MAX) 423 cpi->cpi_maxeax = CPI_MAXEAX_MAX; 424 if (cpi->cpi_maxeax < 1) 425 goto pass1_done; 426 427 cp = &cpi->cpi_std[1]; 428 cp->cp_eax = 1; 429 (void) __cpuid_insn(cp); 430 431 /* 432 * Extract identifying constants for easy access. 433 */ 434 cpi->cpi_model = CPI_MODEL(cpi); 435 cpi->cpi_family = CPI_FAMILY(cpi); 436 437 if (cpi->cpi_family == 0xf) 438 cpi->cpi_family += CPI_FAMILY_XTD(cpi); 439 440 /* 441 * Beware: AMD uses "extended model" iff *FAMILY* == 0xf. 442 * Intel, and presumably everyone else, uses model == 0xf, as 443 * one would expect (max value means possible overflow). Sigh. 444 */ 445 446 switch (cpi->cpi_vendor) { 447 case X86_VENDOR_AMD: 448 if (cpi->cpi_family == 0xf) 449 cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4; 450 break; 451 default: 452 if (cpi->cpi_model == 0xf) 453 cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4; 454 break; 455 } 456 457 cpi->cpi_step = CPI_STEP(cpi); 458 cpi->cpi_brandid = CPI_BRANDID(cpi); 459 460 /* 461 * *default* assumptions: 462 * - believe %edx feature word 463 * - ignore %ecx feature word 464 * - 32-bit virtual and physical addressing 465 */ 466 mask_edx = 0xffffffff; 467 mask_ecx = 0; 468 469 cpi->cpi_pabits = cpi->cpi_vabits = 32; 470 471 switch (cpi->cpi_vendor) { 472 case X86_VENDOR_Intel: 473 if (cpi->cpi_family == 5) 474 x86_type = X86_TYPE_P5; 475 else if (IS_LEGACY_P6(cpi)) { 476 x86_type = X86_TYPE_P6; 477 pentiumpro_bug4046376 = 1; 478 pentiumpro_bug4064495 = 1; 479 /* 480 * Clear the SEP bit when it was set erroneously 481 */ 482 if (cpi->cpi_model < 3 && cpi->cpi_step < 3) 483 cp->cp_edx &= ~CPUID_INTC_EDX_SEP; 484 } else if (IS_NEW_F6(cpi) || cpi->cpi_family == 0xf) { 485 x86_type = X86_TYPE_P4; 486 /* 487 * We don't currently depend on any of the %ecx 488 * features until Prescott, so we'll only check 489 * this from P4 onwards. We might want to revisit 490 * that idea later. 491 */ 492 mask_ecx = 0xffffffff; 493 } else if (cpi->cpi_family > 0xf) 494 mask_ecx = 0xffffffff; 495 break; 496 case X86_VENDOR_IntelClone: 497 default: 498 break; 499 case X86_VENDOR_AMD: 500 #if defined(OPTERON_ERRATUM_108) 501 if (cpi->cpi_family == 0xf && cpi->cpi_model == 0xe) { 502 cp->cp_eax = (0xf0f & cp->cp_eax) | 0xc0; 503 cpi->cpi_model = 0xc; 504 } else 505 #endif 506 if (cpi->cpi_family == 5) { 507 /* 508 * AMD K5 and K6 509 * 510 * These CPUs have an incomplete implementation 511 * of MCA/MCE which we mask away. 512 */ 513 mask_edx &= ~(CPUID_INTC_EDX_MCE | CPUID_INTC_EDX_MCA); 514 515 /* 516 * Model 0 uses the wrong (APIC) bit 517 * to indicate PGE. Fix it here. 518 */ 519 if (cpi->cpi_model == 0) { 520 if (cp->cp_edx & 0x200) { 521 cp->cp_edx &= ~0x200; 522 cp->cp_edx |= CPUID_INTC_EDX_PGE; 523 } 524 } 525 526 /* 527 * Early models had problems w/ MMX; disable. 528 */ 529 if (cpi->cpi_model < 6) 530 mask_edx &= ~CPUID_INTC_EDX_MMX; 531 } 532 533 /* 534 * For newer families, SSE3 and CX16, at least, are valid; 535 * enable all 536 */ 537 if (cpi->cpi_family >= 0xf) 538 mask_ecx = 0xffffffff; 539 break; 540 case X86_VENDOR_TM: 541 /* 542 * workaround the NT workaround in CMS 4.1 543 */ 544 if (cpi->cpi_family == 5 && cpi->cpi_model == 4 && 545 (cpi->cpi_step == 2 || cpi->cpi_step == 3)) 546 cp->cp_edx |= CPUID_INTC_EDX_CX8; 547 break; 548 case X86_VENDOR_Centaur: 549 /* 550 * workaround the NT workarounds again 551 */ 552 if (cpi->cpi_family == 6) 553 cp->cp_edx |= CPUID_INTC_EDX_CX8; 554 break; 555 case X86_VENDOR_Cyrix: 556 /* 557 * We rely heavily on the probing in locore 558 * to actually figure out what parts, if any, 559 * of the Cyrix cpuid instruction to believe. 560 */ 561 switch (x86_type) { 562 case X86_TYPE_CYRIX_486: 563 mask_edx = 0; 564 break; 565 case X86_TYPE_CYRIX_6x86: 566 mask_edx = 0; 567 break; 568 case X86_TYPE_CYRIX_6x86L: 569 mask_edx = 570 CPUID_INTC_EDX_DE | 571 CPUID_INTC_EDX_CX8; 572 break; 573 case X86_TYPE_CYRIX_6x86MX: 574 mask_edx = 575 CPUID_INTC_EDX_DE | 576 CPUID_INTC_EDX_MSR | 577 CPUID_INTC_EDX_CX8 | 578 CPUID_INTC_EDX_PGE | 579 CPUID_INTC_EDX_CMOV | 580 CPUID_INTC_EDX_MMX; 581 break; 582 case X86_TYPE_CYRIX_GXm: 583 mask_edx = 584 CPUID_INTC_EDX_MSR | 585 CPUID_INTC_EDX_CX8 | 586 CPUID_INTC_EDX_CMOV | 587 CPUID_INTC_EDX_MMX; 588 break; 589 case X86_TYPE_CYRIX_MediaGX: 590 break; 591 case X86_TYPE_CYRIX_MII: 592 case X86_TYPE_VIA_CYRIX_III: 593 mask_edx = 594 CPUID_INTC_EDX_DE | 595 CPUID_INTC_EDX_TSC | 596 CPUID_INTC_EDX_MSR | 597 CPUID_INTC_EDX_CX8 | 598 CPUID_INTC_EDX_PGE | 599 CPUID_INTC_EDX_CMOV | 600 CPUID_INTC_EDX_MMX; 601 break; 602 default: 603 break; 604 } 605 break; 606 } 607 608 /* 609 * Now we've figured out the masks that determine 610 * which bits we choose to believe, apply the masks 611 * to the feature words, then map the kernel's view 612 * of these feature words into its feature word. 613 */ 614 cp->cp_edx &= mask_edx; 615 cp->cp_ecx &= mask_ecx; 616 617 /* 618 * fold in fix ups 619 */ 620 621 cp->cp_edx |= cpuid_feature_edx_include; 622 cp->cp_edx &= ~cpuid_feature_edx_exclude; 623 624 625 cp->cp_ecx |= cpuid_feature_ecx_include; 626 cp->cp_ecx &= ~cpuid_feature_ecx_exclude; 627 628 if (cp->cp_edx & CPUID_INTC_EDX_PSE) 629 feature |= X86_LARGEPAGE; 630 if (cp->cp_edx & CPUID_INTC_EDX_TSC) 631 feature |= X86_TSC; 632 if (cp->cp_edx & CPUID_INTC_EDX_MSR) 633 feature |= X86_MSR; 634 if (cp->cp_edx & CPUID_INTC_EDX_MTRR) 635 feature |= X86_MTRR; 636 if (cp->cp_edx & CPUID_INTC_EDX_PGE) 637 feature |= X86_PGE; 638 if (cp->cp_edx & CPUID_INTC_EDX_CMOV) 639 feature |= X86_CMOV; 640 if (cp->cp_edx & CPUID_INTC_EDX_MMX) 641 feature |= X86_MMX; 642 if ((cp->cp_edx & CPUID_INTC_EDX_MCE) != 0 && 643 (cp->cp_edx & CPUID_INTC_EDX_MCA) != 0) 644 feature |= X86_MCA; 645 if (cp->cp_edx & CPUID_INTC_EDX_PAE) 646 feature |= X86_PAE; 647 if (cp->cp_edx & CPUID_INTC_EDX_CX8) 648 feature |= X86_CX8; 649 /* 650 * Once this bit was thought questionable, but it looks like it's 651 * back, as of Application Note 485 March 2005 (24161829.pdf) 652 */ 653 if (cp->cp_ecx & CPUID_INTC_ECX_CX16) 654 feature |= X86_CX16; 655 if (cp->cp_edx & CPUID_INTC_EDX_PAT) 656 feature |= X86_PAT; 657 if (cp->cp_edx & CPUID_INTC_EDX_SEP) 658 feature |= X86_SEP; 659 if (cp->cp_edx & CPUID_INTC_EDX_FXSR) { 660 /* 661 * In our implementation, fxsave/fxrstor 662 * are prerequisites before we'll even 663 * try and do SSE things. 664 */ 665 if (cp->cp_edx & CPUID_INTC_EDX_SSE) 666 feature |= X86_SSE; 667 if (cp->cp_edx & CPUID_INTC_EDX_SSE2) 668 feature |= X86_SSE2; 669 if (cp->cp_ecx & CPUID_INTC_ECX_SSE3) 670 feature |= X86_SSE3; 671 } 672 if (cp->cp_edx & CPUID_INTC_EDX_DE) 673 cr4_value |= CR4_DE; 674 675 if (feature & X86_PAE) 676 cpi->cpi_pabits = 36; 677 678 /* 679 * Hyperthreading configuration is slightly tricky on Intel 680 * and pure clones, and even trickier on AMD. 681 * 682 * (AMD chose to set the HTT bit on their CMP processors, 683 * even though they're not actually hyperthreaded. Thus it 684 * takes a bit more work to figure out what's really going 685 * on ... see the handling of the CMP_LEGACY bit below) 686 */ 687 if (cp->cp_edx & CPUID_INTC_EDX_HTT) { 688 cpi->cpi_ncpu_per_chip = CPI_CPU_COUNT(cpi); 689 if (cpi->cpi_ncpu_per_chip > 1) 690 feature |= X86_HTT; 691 } else { 692 cpi->cpi_ncpu_per_chip = 1; 693 } 694 695 /* 696 * Work on the "extended" feature information, doing 697 * some basic initialization for cpuid_pass2() 698 */ 699 xcpuid = 0; 700 switch (cpi->cpi_vendor) { 701 case X86_VENDOR_Intel: 702 if (IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf) 703 xcpuid++; 704 break; 705 case X86_VENDOR_AMD: 706 if (cpi->cpi_family > 5 || 707 (cpi->cpi_family == 5 && cpi->cpi_model >= 1)) 708 xcpuid++; 709 break; 710 case X86_VENDOR_Cyrix: 711 /* 712 * Only these Cyrix CPUs are -known- to support 713 * extended cpuid operations. 714 */ 715 if (x86_type == X86_TYPE_VIA_CYRIX_III || 716 x86_type == X86_TYPE_CYRIX_GXm) 717 xcpuid++; 718 break; 719 case X86_VENDOR_Centaur: 720 case X86_VENDOR_TM: 721 default: 722 xcpuid++; 723 break; 724 } 725 726 if (xcpuid) { 727 cp = &cpi->cpi_extd[0]; 728 cp->cp_eax = 0x80000000; 729 cpi->cpi_xmaxeax = __cpuid_insn(cp); 730 } 731 732 if (cpi->cpi_xmaxeax & 0x80000000) { 733 734 if (cpi->cpi_xmaxeax > CPI_XMAXEAX_MAX) 735 cpi->cpi_xmaxeax = CPI_XMAXEAX_MAX; 736 737 switch (cpi->cpi_vendor) { 738 case X86_VENDOR_Intel: 739 case X86_VENDOR_AMD: 740 if (cpi->cpi_xmaxeax < 0x80000001) 741 break; 742 cp = &cpi->cpi_extd[1]; 743 cp->cp_eax = 0x80000001; 744 (void) __cpuid_insn(cp); 745 if (cpi->cpi_vendor == X86_VENDOR_AMD && 746 cpi->cpi_family == 5 && 747 cpi->cpi_model == 6 && 748 cpi->cpi_step == 6) { 749 /* 750 * K6 model 6 uses bit 10 to indicate SYSC 751 * Later models use bit 11. Fix it here. 752 */ 753 if (cp->cp_edx & 0x400) { 754 cp->cp_edx &= ~0x400; 755 cp->cp_edx |= CPUID_AMD_EDX_SYSC; 756 } 757 } 758 759 /* 760 * Compute the additions to the kernel's feature word. 761 */ 762 if (cp->cp_edx & CPUID_AMD_EDX_NX) 763 feature |= X86_NX; 764 765 /* 766 * If both the HTT and CMP_LEGACY bits are set, 767 * then we're not actually HyperThreaded. Read 768 * "AMD CPUID Specification" for more details. 769 */ 770 if (cpi->cpi_vendor == X86_VENDOR_AMD && 771 (feature & X86_HTT) && 772 (cp->cp_ecx & CPUID_AMD_ECX_CMP_LEGACY)) { 773 feature &= ~X86_HTT; 774 feature |= X86_CMP; 775 } 776 #if defined(_LP64) 777 /* 778 * It's really tricky to support syscall/sysret in 779 * the i386 kernel; we rely on sysenter/sysexit 780 * instead. In the amd64 kernel, things are -way- 781 * better. 782 */ 783 if (cp->cp_edx & CPUID_AMD_EDX_SYSC) 784 feature |= X86_ASYSC; 785 786 /* 787 * While we're thinking about system calls, note 788 * that AMD processors don't support sysenter 789 * in long mode at all, so don't try to program them. 790 */ 791 if (x86_vendor == X86_VENDOR_AMD) 792 feature &= ~X86_SEP; 793 #endif 794 break; 795 default: 796 break; 797 } 798 799 /* 800 * Get CPUID data about processor cores and hyperthreads. 801 */ 802 switch (cpi->cpi_vendor) { 803 case X86_VENDOR_Intel: 804 if (cpi->cpi_maxeax >= 4) { 805 cp = &cpi->cpi_std[4]; 806 cp->cp_eax = 4; 807 cp->cp_ecx = 0; 808 (void) __cpuid_insn(cp); 809 } 810 /*FALLTHROUGH*/ 811 case X86_VENDOR_AMD: 812 if (cpi->cpi_xmaxeax < 0x80000008) 813 break; 814 cp = &cpi->cpi_extd[8]; 815 cp->cp_eax = 0x80000008; 816 (void) __cpuid_insn(cp); 817 /* 818 * Virtual and physical address limits from 819 * cpuid override previously guessed values. 820 */ 821 cpi->cpi_pabits = BITX(cp->cp_eax, 7, 0); 822 cpi->cpi_vabits = BITX(cp->cp_eax, 15, 8); 823 break; 824 default: 825 break; 826 } 827 828 switch (cpi->cpi_vendor) { 829 case X86_VENDOR_Intel: 830 if (cpi->cpi_maxeax < 4) { 831 cpi->cpi_ncore_per_chip = 1; 832 break; 833 } else { 834 cpi->cpi_ncore_per_chip = 835 BITX((cpi)->cpi_std[4].cp_eax, 31, 26) + 1; 836 } 837 break; 838 case X86_VENDOR_AMD: 839 if (cpi->cpi_xmaxeax < 0x80000008) { 840 cpi->cpi_ncore_per_chip = 1; 841 break; 842 } else { 843 cpi->cpi_ncore_per_chip = 844 BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1; 845 } 846 break; 847 default: 848 cpi->cpi_ncore_per_chip = 1; 849 break; 850 } 851 852 } 853 854 /* 855 * If more than one core, then this processor is CMP. 856 */ 857 if (cpi->cpi_ncore_per_chip > 1) 858 feature |= X86_CMP; 859 /* 860 * If the number of cores is the same as the number 861 * of CPUs, then we cannot have HyperThreading. 862 */ 863 if (cpi->cpi_ncpu_per_chip == cpi->cpi_ncore_per_chip) 864 feature &= ~X86_HTT; 865 866 if ((feature & (X86_HTT | X86_CMP)) == 0) { 867 /* 868 * Single-core single-threaded processors. 869 */ 870 cpi->cpi_chipid = -1; 871 cpi->cpi_clogid = 0; 872 cpi->cpi_coreid = cpu->cpu_id; 873 } else if (cpi->cpi_ncpu_per_chip > 1) { 874 uint_t i; 875 uint_t chipid_shift = 0; 876 uint_t coreid_shift = 0; 877 uint_t apic_id = CPI_APIC_ID(cpi); 878 879 for (i = 1; i < cpi->cpi_ncpu_per_chip; i <<= 1) 880 chipid_shift++; 881 cpi->cpi_chipid = apic_id >> chipid_shift; 882 cpi->cpi_clogid = apic_id & ((1 << chipid_shift) - 1); 883 884 if (cpi->cpi_vendor == X86_VENDOR_Intel) { 885 if (feature & X86_CMP) { 886 /* 887 * Multi-core (and possibly multi-threaded) 888 * processors. 889 */ 890 uint_t ncpu_per_core; 891 if (cpi->cpi_ncore_per_chip == 1) 892 ncpu_per_core = cpi->cpi_ncpu_per_chip; 893 else if (cpi->cpi_ncore_per_chip > 1) 894 ncpu_per_core = cpi->cpi_ncpu_per_chip / 895 cpi->cpi_ncore_per_chip; 896 /* 897 * 8bit APIC IDs on dual core Pentiums 898 * look like this: 899 * 900 * +-----------------------+------+------+ 901 * | Physical Package ID | MC | HT | 902 * +-----------------------+------+------+ 903 * <------- chipid --------> 904 * <------- coreid ---------------> 905 * <--- clogid --> 906 * 907 * Where the number of bits necessary to 908 * represent MC and HT fields together equals 909 * to the minimum number of bits necessary to 910 * store the value of cpi->cpi_ncpu_per_chip. 911 * Of those bits, the MC part uses the number 912 * of bits necessary to store the value of 913 * cpi->cpi_ncore_per_chip. 914 */ 915 for (i = 1; i < ncpu_per_core; i <<= 1) 916 coreid_shift++; 917 cpi->cpi_coreid = apic_id >> coreid_shift; 918 } else if (feature & X86_HTT) { 919 /* 920 * Single-core multi-threaded processors. 921 */ 922 cpi->cpi_coreid = cpi->cpi_chipid; 923 } 924 } else if (cpi->cpi_vendor == X86_VENDOR_AMD) { 925 /* 926 * AMD currently only has dual-core processors with 927 * single-threaded cores. If they ever release 928 * multi-threaded processors, then this code 929 * will have to be updated. 930 */ 931 cpi->cpi_coreid = cpu->cpu_id; 932 } else { 933 /* 934 * All other processors are currently 935 * assumed to have single cores. 936 */ 937 cpi->cpi_coreid = cpi->cpi_chipid; 938 } 939 } 940 941 /* 942 * Synthesize chip "revision" and socket type 943 */ 944 synth_info(cpi); 945 946 pass1_done: 947 cpi->cpi_pass = 1; 948 return (feature); 949 } 950 951 /* 952 * Make copies of the cpuid table entries we depend on, in 953 * part for ease of parsing now, in part so that we have only 954 * one place to correct any of it, in part for ease of 955 * later export to userland, and in part so we can look at 956 * this stuff in a crash dump. 957 */ 958 959 /*ARGSUSED*/ 960 void 961 cpuid_pass2(cpu_t *cpu) 962 { 963 uint_t n, nmax; 964 int i; 965 struct cpuid_regs *cp; 966 uint8_t *dp; 967 uint32_t *iptr; 968 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 969 970 ASSERT(cpi->cpi_pass == 1); 971 972 if (cpi->cpi_maxeax < 1) 973 goto pass2_done; 974 975 if ((nmax = cpi->cpi_maxeax + 1) > NMAX_CPI_STD) 976 nmax = NMAX_CPI_STD; 977 /* 978 * (We already handled n == 0 and n == 1 in pass 1) 979 */ 980 for (n = 2, cp = &cpi->cpi_std[2]; n < nmax; n++, cp++) { 981 cp->cp_eax = n; 982 (void) __cpuid_insn(cp); 983 switch (n) { 984 case 2: 985 /* 986 * "the lower 8 bits of the %eax register 987 * contain a value that identifies the number 988 * of times the cpuid [instruction] has to be 989 * executed to obtain a complete image of the 990 * processor's caching systems." 991 * 992 * How *do* they make this stuff up? 993 */ 994 cpi->cpi_ncache = sizeof (*cp) * 995 BITX(cp->cp_eax, 7, 0); 996 if (cpi->cpi_ncache == 0) 997 break; 998 cpi->cpi_ncache--; /* skip count byte */ 999 1000 /* 1001 * Well, for now, rather than attempt to implement 1002 * this slightly dubious algorithm, we just look 1003 * at the first 15 .. 1004 */ 1005 if (cpi->cpi_ncache > (sizeof (*cp) - 1)) 1006 cpi->cpi_ncache = sizeof (*cp) - 1; 1007 1008 dp = cpi->cpi_cacheinfo; 1009 if (BITX(cp->cp_eax, 31, 31) == 0) { 1010 uint8_t *p = (void *)&cp->cp_eax; 1011 for (i = 1; i < 3; i++) 1012 if (p[i] != 0) 1013 *dp++ = p[i]; 1014 } 1015 if (BITX(cp->cp_ebx, 31, 31) == 0) { 1016 uint8_t *p = (void *)&cp->cp_ebx; 1017 for (i = 0; i < 4; i++) 1018 if (p[i] != 0) 1019 *dp++ = p[i]; 1020 } 1021 if (BITX(cp->cp_ecx, 31, 31) == 0) { 1022 uint8_t *p = (void *)&cp->cp_ecx; 1023 for (i = 0; i < 4; i++) 1024 if (p[i] != 0) 1025 *dp++ = p[i]; 1026 } 1027 if (BITX(cp->cp_edx, 31, 31) == 0) { 1028 uint8_t *p = (void *)&cp->cp_edx; 1029 for (i = 0; i < 4; i++) 1030 if (p[i] != 0) 1031 *dp++ = p[i]; 1032 } 1033 break; 1034 case 3: /* Processor serial number, if PSN supported */ 1035 case 4: /* Deterministic cache parameters */ 1036 case 5: /* Monitor/Mwait parameters */ 1037 default: 1038 break; 1039 } 1040 } 1041 1042 if ((cpi->cpi_xmaxeax & 0x80000000) == 0) 1043 goto pass2_done; 1044 1045 if ((nmax = cpi->cpi_xmaxeax - 0x80000000 + 1) > NMAX_CPI_EXTD) 1046 nmax = NMAX_CPI_EXTD; 1047 /* 1048 * Copy the extended properties, fixing them as we go. 1049 * (We already handled n == 0 and n == 1 in pass 1) 1050 */ 1051 iptr = (void *)cpi->cpi_brandstr; 1052 for (n = 2, cp = &cpi->cpi_extd[2]; n < nmax; cp++, n++) { 1053 cp->cp_eax = 0x80000000 + n; 1054 (void) __cpuid_insn(cp); 1055 switch (n) { 1056 case 2: 1057 case 3: 1058 case 4: 1059 /* 1060 * Extract the brand string 1061 */ 1062 *iptr++ = cp->cp_eax; 1063 *iptr++ = cp->cp_ebx; 1064 *iptr++ = cp->cp_ecx; 1065 *iptr++ = cp->cp_edx; 1066 break; 1067 case 5: 1068 switch (cpi->cpi_vendor) { 1069 case X86_VENDOR_AMD: 1070 /* 1071 * The Athlon and Duron were the first 1072 * parts to report the sizes of the 1073 * TLB for large pages. Before then, 1074 * we don't trust the data. 1075 */ 1076 if (cpi->cpi_family < 6 || 1077 (cpi->cpi_family == 6 && 1078 cpi->cpi_model < 1)) 1079 cp->cp_eax = 0; 1080 break; 1081 default: 1082 break; 1083 } 1084 break; 1085 case 6: 1086 switch (cpi->cpi_vendor) { 1087 case X86_VENDOR_AMD: 1088 /* 1089 * The Athlon and Duron were the first 1090 * AMD parts with L2 TLB's. 1091 * Before then, don't trust the data. 1092 */ 1093 if (cpi->cpi_family < 6 || 1094 cpi->cpi_family == 6 && 1095 cpi->cpi_model < 1) 1096 cp->cp_eax = cp->cp_ebx = 0; 1097 /* 1098 * AMD Duron rev A0 reports L2 1099 * cache size incorrectly as 1K 1100 * when it is really 64K 1101 */ 1102 if (cpi->cpi_family == 6 && 1103 cpi->cpi_model == 3 && 1104 cpi->cpi_step == 0) { 1105 cp->cp_ecx &= 0xffff; 1106 cp->cp_ecx |= 0x400000; 1107 } 1108 break; 1109 case X86_VENDOR_Cyrix: /* VIA C3 */ 1110 /* 1111 * VIA C3 processors are a bit messed 1112 * up w.r.t. encoding cache sizes in %ecx 1113 */ 1114 if (cpi->cpi_family != 6) 1115 break; 1116 /* 1117 * model 7 and 8 were incorrectly encoded 1118 * 1119 * xxx is model 8 really broken? 1120 */ 1121 if (cpi->cpi_model == 7 || 1122 cpi->cpi_model == 8) 1123 cp->cp_ecx = 1124 BITX(cp->cp_ecx, 31, 24) << 16 | 1125 BITX(cp->cp_ecx, 23, 16) << 12 | 1126 BITX(cp->cp_ecx, 15, 8) << 8 | 1127 BITX(cp->cp_ecx, 7, 0); 1128 /* 1129 * model 9 stepping 1 has wrong associativity 1130 */ 1131 if (cpi->cpi_model == 9 && cpi->cpi_step == 1) 1132 cp->cp_ecx |= 8 << 12; 1133 break; 1134 case X86_VENDOR_Intel: 1135 /* 1136 * Extended L2 Cache features function. 1137 * First appeared on Prescott. 1138 */ 1139 default: 1140 break; 1141 } 1142 break; 1143 default: 1144 break; 1145 } 1146 } 1147 1148 pass2_done: 1149 cpi->cpi_pass = 2; 1150 } 1151 1152 static const char * 1153 intel_cpubrand(const struct cpuid_info *cpi) 1154 { 1155 int i; 1156 1157 if ((x86_feature & X86_CPUID) == 0 || 1158 cpi->cpi_maxeax < 1 || cpi->cpi_family < 5) 1159 return ("i486"); 1160 1161 switch (cpi->cpi_family) { 1162 case 5: 1163 return ("Intel Pentium(r)"); 1164 case 6: 1165 switch (cpi->cpi_model) { 1166 uint_t celeron, xeon; 1167 const struct cpuid_regs *cp; 1168 case 0: 1169 case 1: 1170 case 2: 1171 return ("Intel Pentium(r) Pro"); 1172 case 3: 1173 case 4: 1174 return ("Intel Pentium(r) II"); 1175 case 6: 1176 return ("Intel Celeron(r)"); 1177 case 5: 1178 case 7: 1179 celeron = xeon = 0; 1180 cp = &cpi->cpi_std[2]; /* cache info */ 1181 1182 for (i = 1; i < 3; i++) { 1183 uint_t tmp; 1184 1185 tmp = (cp->cp_eax >> (8 * i)) & 0xff; 1186 if (tmp == 0x40) 1187 celeron++; 1188 if (tmp >= 0x44 && tmp <= 0x45) 1189 xeon++; 1190 } 1191 1192 for (i = 0; i < 2; i++) { 1193 uint_t tmp; 1194 1195 tmp = (cp->cp_ebx >> (8 * i)) & 0xff; 1196 if (tmp == 0x40) 1197 celeron++; 1198 else if (tmp >= 0x44 && tmp <= 0x45) 1199 xeon++; 1200 } 1201 1202 for (i = 0; i < 4; i++) { 1203 uint_t tmp; 1204 1205 tmp = (cp->cp_ecx >> (8 * i)) & 0xff; 1206 if (tmp == 0x40) 1207 celeron++; 1208 else if (tmp >= 0x44 && tmp <= 0x45) 1209 xeon++; 1210 } 1211 1212 for (i = 0; i < 4; i++) { 1213 uint_t tmp; 1214 1215 tmp = (cp->cp_edx >> (8 * i)) & 0xff; 1216 if (tmp == 0x40) 1217 celeron++; 1218 else if (tmp >= 0x44 && tmp <= 0x45) 1219 xeon++; 1220 } 1221 1222 if (celeron) 1223 return ("Intel Celeron(r)"); 1224 if (xeon) 1225 return (cpi->cpi_model == 5 ? 1226 "Intel Pentium(r) II Xeon(tm)" : 1227 "Intel Pentium(r) III Xeon(tm)"); 1228 return (cpi->cpi_model == 5 ? 1229 "Intel Pentium(r) II or Pentium(r) II Xeon(tm)" : 1230 "Intel Pentium(r) III or Pentium(r) III Xeon(tm)"); 1231 default: 1232 break; 1233 } 1234 default: 1235 break; 1236 } 1237 1238 /* BrandID is present if the field is nonzero */ 1239 if (cpi->cpi_brandid != 0) { 1240 static const struct { 1241 uint_t bt_bid; 1242 const char *bt_str; 1243 } brand_tbl[] = { 1244 { 0x1, "Intel(r) Celeron(r)" }, 1245 { 0x2, "Intel(r) Pentium(r) III" }, 1246 { 0x3, "Intel(r) Pentium(r) III Xeon(tm)" }, 1247 { 0x4, "Intel(r) Pentium(r) III" }, 1248 { 0x6, "Mobile Intel(r) Pentium(r) III" }, 1249 { 0x7, "Mobile Intel(r) Celeron(r)" }, 1250 { 0x8, "Intel(r) Pentium(r) 4" }, 1251 { 0x9, "Intel(r) Pentium(r) 4" }, 1252 { 0xa, "Intel(r) Celeron(r)" }, 1253 { 0xb, "Intel(r) Xeon(tm)" }, 1254 { 0xc, "Intel(r) Xeon(tm) MP" }, 1255 { 0xe, "Mobile Intel(r) Pentium(r) 4" }, 1256 { 0xf, "Mobile Intel(r) Celeron(r)" }, 1257 { 0x11, "Mobile Genuine Intel(r)" }, 1258 { 0x12, "Intel(r) Celeron(r) M" }, 1259 { 0x13, "Mobile Intel(r) Celeron(r)" }, 1260 { 0x14, "Intel(r) Celeron(r)" }, 1261 { 0x15, "Mobile Genuine Intel(r)" }, 1262 { 0x16, "Intel(r) Pentium(r) M" }, 1263 { 0x17, "Mobile Intel(r) Celeron(r)" } 1264 }; 1265 uint_t btblmax = sizeof (brand_tbl) / sizeof (brand_tbl[0]); 1266 uint_t sgn; 1267 1268 sgn = (cpi->cpi_family << 8) | 1269 (cpi->cpi_model << 4) | cpi->cpi_step; 1270 1271 for (i = 0; i < btblmax; i++) 1272 if (brand_tbl[i].bt_bid == cpi->cpi_brandid) 1273 break; 1274 if (i < btblmax) { 1275 if (sgn == 0x6b1 && cpi->cpi_brandid == 3) 1276 return ("Intel(r) Celeron(r)"); 1277 if (sgn < 0xf13 && cpi->cpi_brandid == 0xb) 1278 return ("Intel(r) Xeon(tm) MP"); 1279 if (sgn < 0xf13 && cpi->cpi_brandid == 0xe) 1280 return ("Intel(r) Xeon(tm)"); 1281 return (brand_tbl[i].bt_str); 1282 } 1283 } 1284 1285 return (NULL); 1286 } 1287 1288 static const char * 1289 amd_cpubrand(const struct cpuid_info *cpi) 1290 { 1291 if ((x86_feature & X86_CPUID) == 0 || 1292 cpi->cpi_maxeax < 1 || cpi->cpi_family < 5) 1293 return ("i486 compatible"); 1294 1295 switch (cpi->cpi_family) { 1296 case 5: 1297 switch (cpi->cpi_model) { 1298 case 0: 1299 case 1: 1300 case 2: 1301 case 3: 1302 case 4: 1303 case 5: 1304 return ("AMD-K5(r)"); 1305 case 6: 1306 case 7: 1307 return ("AMD-K6(r)"); 1308 case 8: 1309 return ("AMD-K6(r)-2"); 1310 case 9: 1311 return ("AMD-K6(r)-III"); 1312 default: 1313 return ("AMD (family 5)"); 1314 } 1315 case 6: 1316 switch (cpi->cpi_model) { 1317 case 1: 1318 return ("AMD-K7(tm)"); 1319 case 0: 1320 case 2: 1321 case 4: 1322 return ("AMD Athlon(tm)"); 1323 case 3: 1324 case 7: 1325 return ("AMD Duron(tm)"); 1326 case 6: 1327 case 8: 1328 case 10: 1329 /* 1330 * Use the L2 cache size to distinguish 1331 */ 1332 return ((cpi->cpi_extd[6].cp_ecx >> 16) >= 256 ? 1333 "AMD Athlon(tm)" : "AMD Duron(tm)"); 1334 default: 1335 return ("AMD (family 6)"); 1336 } 1337 default: 1338 break; 1339 } 1340 1341 if (cpi->cpi_family == 0xf && cpi->cpi_model == 5 && 1342 cpi->cpi_brandid != 0) { 1343 switch (BITX(cpi->cpi_brandid, 7, 5)) { 1344 case 3: 1345 return ("AMD Opteron(tm) UP 1xx"); 1346 case 4: 1347 return ("AMD Opteron(tm) DP 2xx"); 1348 case 5: 1349 return ("AMD Opteron(tm) MP 8xx"); 1350 default: 1351 return ("AMD Opteron(tm)"); 1352 } 1353 } 1354 1355 return (NULL); 1356 } 1357 1358 static const char * 1359 cyrix_cpubrand(struct cpuid_info *cpi, uint_t type) 1360 { 1361 if ((x86_feature & X86_CPUID) == 0 || 1362 cpi->cpi_maxeax < 1 || cpi->cpi_family < 5 || 1363 type == X86_TYPE_CYRIX_486) 1364 return ("i486 compatible"); 1365 1366 switch (type) { 1367 case X86_TYPE_CYRIX_6x86: 1368 return ("Cyrix 6x86"); 1369 case X86_TYPE_CYRIX_6x86L: 1370 return ("Cyrix 6x86L"); 1371 case X86_TYPE_CYRIX_6x86MX: 1372 return ("Cyrix 6x86MX"); 1373 case X86_TYPE_CYRIX_GXm: 1374 return ("Cyrix GXm"); 1375 case X86_TYPE_CYRIX_MediaGX: 1376 return ("Cyrix MediaGX"); 1377 case X86_TYPE_CYRIX_MII: 1378 return ("Cyrix M2"); 1379 case X86_TYPE_VIA_CYRIX_III: 1380 return ("VIA Cyrix M3"); 1381 default: 1382 /* 1383 * Have another wild guess .. 1384 */ 1385 if (cpi->cpi_family == 4 && cpi->cpi_model == 9) 1386 return ("Cyrix 5x86"); 1387 else if (cpi->cpi_family == 5) { 1388 switch (cpi->cpi_model) { 1389 case 2: 1390 return ("Cyrix 6x86"); /* Cyrix M1 */ 1391 case 4: 1392 return ("Cyrix MediaGX"); 1393 default: 1394 break; 1395 } 1396 } else if (cpi->cpi_family == 6) { 1397 switch (cpi->cpi_model) { 1398 case 0: 1399 return ("Cyrix 6x86MX"); /* Cyrix M2? */ 1400 case 5: 1401 case 6: 1402 case 7: 1403 case 8: 1404 case 9: 1405 return ("VIA C3"); 1406 default: 1407 break; 1408 } 1409 } 1410 break; 1411 } 1412 return (NULL); 1413 } 1414 1415 /* 1416 * This only gets called in the case that the CPU extended 1417 * feature brand string (0x80000002, 0x80000003, 0x80000004) 1418 * aren't available, or contain null bytes for some reason. 1419 */ 1420 static void 1421 fabricate_brandstr(struct cpuid_info *cpi) 1422 { 1423 const char *brand = NULL; 1424 1425 switch (cpi->cpi_vendor) { 1426 case X86_VENDOR_Intel: 1427 brand = intel_cpubrand(cpi); 1428 break; 1429 case X86_VENDOR_AMD: 1430 brand = amd_cpubrand(cpi); 1431 break; 1432 case X86_VENDOR_Cyrix: 1433 brand = cyrix_cpubrand(cpi, x86_type); 1434 break; 1435 case X86_VENDOR_NexGen: 1436 if (cpi->cpi_family == 5 && cpi->cpi_model == 0) 1437 brand = "NexGen Nx586"; 1438 break; 1439 case X86_VENDOR_Centaur: 1440 if (cpi->cpi_family == 5) 1441 switch (cpi->cpi_model) { 1442 case 4: 1443 brand = "Centaur C6"; 1444 break; 1445 case 8: 1446 brand = "Centaur C2"; 1447 break; 1448 case 9: 1449 brand = "Centaur C3"; 1450 break; 1451 default: 1452 break; 1453 } 1454 break; 1455 case X86_VENDOR_Rise: 1456 if (cpi->cpi_family == 5 && 1457 (cpi->cpi_model == 0 || cpi->cpi_model == 2)) 1458 brand = "Rise mP6"; 1459 break; 1460 case X86_VENDOR_SiS: 1461 if (cpi->cpi_family == 5 && cpi->cpi_model == 0) 1462 brand = "SiS 55x"; 1463 break; 1464 case X86_VENDOR_TM: 1465 if (cpi->cpi_family == 5 && cpi->cpi_model == 4) 1466 brand = "Transmeta Crusoe TM3x00 or TM5x00"; 1467 break; 1468 case X86_VENDOR_NSC: 1469 case X86_VENDOR_UMC: 1470 default: 1471 break; 1472 } 1473 if (brand) { 1474 (void) strcpy((char *)cpi->cpi_brandstr, brand); 1475 return; 1476 } 1477 1478 /* 1479 * If all else fails ... 1480 */ 1481 (void) snprintf(cpi->cpi_brandstr, sizeof (cpi->cpi_brandstr), 1482 "%s %d.%d.%d", cpi->cpi_vendorstr, cpi->cpi_family, 1483 cpi->cpi_model, cpi->cpi_step); 1484 } 1485 1486 /* 1487 * This routine is called just after kernel memory allocation 1488 * becomes available on cpu0, and as part of mp_startup() on 1489 * the other cpus. 1490 * 1491 * Fixup the brand string. 1492 */ 1493 /*ARGSUSED*/ 1494 void 1495 cpuid_pass3(cpu_t *cpu) 1496 { 1497 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 1498 1499 ASSERT(cpi->cpi_pass == 2); 1500 1501 if ((cpi->cpi_xmaxeax & 0x80000000) == 0) { 1502 fabricate_brandstr(cpi); 1503 goto pass3_done; 1504 } 1505 1506 /* 1507 * If we successfully extracted a brand string from the cpuid 1508 * instruction, clean it up by removing leading spaces and 1509 * similar junk. 1510 */ 1511 if (cpi->cpi_brandstr[0]) { 1512 size_t maxlen = sizeof (cpi->cpi_brandstr); 1513 char *src, *dst; 1514 1515 dst = src = (char *)cpi->cpi_brandstr; 1516 src[maxlen - 1] = '\0'; 1517 /* 1518 * strip leading spaces 1519 */ 1520 while (*src == ' ') 1521 src++; 1522 /* 1523 * Remove any 'Genuine' or "Authentic" prefixes 1524 */ 1525 if (strncmp(src, "Genuine ", 8) == 0) 1526 src += 8; 1527 if (strncmp(src, "Authentic ", 10) == 0) 1528 src += 10; 1529 1530 /* 1531 * Now do an in-place copy. 1532 * Map (R) to (r) and (TM) to (tm). 1533 * The era of teletypes is long gone, and there's 1534 * -really- no need to shout. 1535 */ 1536 while (*src != '\0') { 1537 if (src[0] == '(') { 1538 if (strncmp(src + 1, "R)", 2) == 0) { 1539 (void) strncpy(dst, "(r)", 3); 1540 src += 3; 1541 dst += 3; 1542 continue; 1543 } 1544 if (strncmp(src + 1, "TM)", 3) == 0) { 1545 (void) strncpy(dst, "(tm)", 4); 1546 src += 4; 1547 dst += 4; 1548 continue; 1549 } 1550 } 1551 *dst++ = *src++; 1552 } 1553 *dst = '\0'; 1554 1555 /* 1556 * Finally, remove any trailing spaces 1557 */ 1558 while (--dst > cpi->cpi_brandstr) 1559 if (*dst == ' ') 1560 *dst = '\0'; 1561 else 1562 break; 1563 } else 1564 fabricate_brandstr(cpi); 1565 1566 pass3_done: 1567 cpi->cpi_pass = 3; 1568 } 1569 1570 /* 1571 * This routine is called out of bind_hwcap() much later in the life 1572 * of the kernel (post_startup()). The job of this routine is to resolve 1573 * the hardware feature support and kernel support for those features into 1574 * what we're actually going to tell applications via the aux vector. 1575 */ 1576 uint_t 1577 cpuid_pass4(cpu_t *cpu) 1578 { 1579 struct cpuid_info *cpi; 1580 uint_t hwcap_flags = 0; 1581 1582 if (cpu == NULL) 1583 cpu = CPU; 1584 cpi = cpu->cpu_m.mcpu_cpi; 1585 1586 ASSERT(cpi->cpi_pass == 3); 1587 1588 if (cpi->cpi_maxeax >= 1) { 1589 uint32_t *edx = &cpi->cpi_support[STD_EDX_FEATURES]; 1590 uint32_t *ecx = &cpi->cpi_support[STD_ECX_FEATURES]; 1591 1592 *edx = CPI_FEATURES_EDX(cpi); 1593 *ecx = CPI_FEATURES_ECX(cpi); 1594 1595 /* 1596 * [these require explicit kernel support] 1597 */ 1598 if ((x86_feature & X86_SEP) == 0) 1599 *edx &= ~CPUID_INTC_EDX_SEP; 1600 1601 if ((x86_feature & X86_SSE) == 0) 1602 *edx &= ~(CPUID_INTC_EDX_FXSR|CPUID_INTC_EDX_SSE); 1603 if ((x86_feature & X86_SSE2) == 0) 1604 *edx &= ~CPUID_INTC_EDX_SSE2; 1605 1606 if ((x86_feature & X86_HTT) == 0) 1607 *edx &= ~CPUID_INTC_EDX_HTT; 1608 1609 if ((x86_feature & X86_SSE3) == 0) 1610 *ecx &= ~CPUID_INTC_ECX_SSE3; 1611 1612 /* 1613 * [no explicit support required beyond x87 fp context] 1614 */ 1615 if (!fpu_exists) 1616 *edx &= ~(CPUID_INTC_EDX_FPU | CPUID_INTC_EDX_MMX); 1617 1618 /* 1619 * Now map the supported feature vector to things that we 1620 * think userland will care about. 1621 */ 1622 if (*edx & CPUID_INTC_EDX_SEP) 1623 hwcap_flags |= AV_386_SEP; 1624 if (*edx & CPUID_INTC_EDX_SSE) 1625 hwcap_flags |= AV_386_FXSR | AV_386_SSE; 1626 if (*edx & CPUID_INTC_EDX_SSE2) 1627 hwcap_flags |= AV_386_SSE2; 1628 if (*ecx & CPUID_INTC_ECX_SSE3) 1629 hwcap_flags |= AV_386_SSE3; 1630 1631 if (*edx & CPUID_INTC_EDX_FPU) 1632 hwcap_flags |= AV_386_FPU; 1633 if (*edx & CPUID_INTC_EDX_MMX) 1634 hwcap_flags |= AV_386_MMX; 1635 1636 if (*edx & CPUID_INTC_EDX_TSC) 1637 hwcap_flags |= AV_386_TSC; 1638 if (*edx & CPUID_INTC_EDX_CX8) 1639 hwcap_flags |= AV_386_CX8; 1640 if (*edx & CPUID_INTC_EDX_CMOV) 1641 hwcap_flags |= AV_386_CMOV; 1642 if (*ecx & CPUID_INTC_ECX_MON) 1643 hwcap_flags |= AV_386_MON; 1644 #if defined(CPUID_INTC_ECX_CX16) 1645 if (*ecx & CPUID_INTC_ECX_CX16) 1646 hwcap_flags |= AV_386_CX16; 1647 #endif 1648 } 1649 1650 if (x86_feature & X86_HTT) 1651 hwcap_flags |= AV_386_PAUSE; 1652 1653 if (cpi->cpi_xmaxeax < 0x80000001) 1654 goto pass4_done; 1655 1656 switch (cpi->cpi_vendor) { 1657 struct cpuid_regs cp; 1658 uint32_t *edx; 1659 1660 case X86_VENDOR_Intel: /* sigh */ 1661 case X86_VENDOR_AMD: 1662 edx = &cpi->cpi_support[AMD_EDX_FEATURES]; 1663 1664 *edx = CPI_FEATURES_XTD_EDX(cpi); 1665 1666 /* 1667 * [no explicit support required beyond 1668 * x87 fp context and exception handlers] 1669 */ 1670 if (!fpu_exists) 1671 *edx &= ~(CPUID_AMD_EDX_MMXamd | 1672 CPUID_AMD_EDX_3DNow | CPUID_AMD_EDX_3DNowx); 1673 1674 if ((x86_feature & X86_ASYSC) == 0) 1675 *edx &= ~CPUID_AMD_EDX_SYSC; 1676 if ((x86_feature & X86_NX) == 0) 1677 *edx &= ~CPUID_AMD_EDX_NX; 1678 #if !defined(_LP64) 1679 *edx &= ~CPUID_AMD_EDX_LM; 1680 #endif 1681 /* 1682 * Now map the supported feature vector to 1683 * things that we think userland will care about. 1684 */ 1685 if (*edx & CPUID_AMD_EDX_SYSC) 1686 hwcap_flags |= AV_386_AMD_SYSC; 1687 if (*edx & CPUID_AMD_EDX_MMXamd) 1688 hwcap_flags |= AV_386_AMD_MMX; 1689 if (*edx & CPUID_AMD_EDX_3DNow) 1690 hwcap_flags |= AV_386_AMD_3DNow; 1691 if (*edx & CPUID_AMD_EDX_3DNowx) 1692 hwcap_flags |= AV_386_AMD_3DNowx; 1693 break; 1694 1695 case X86_VENDOR_TM: 1696 cp.cp_eax = 0x80860001; 1697 (void) __cpuid_insn(&cp); 1698 cpi->cpi_support[TM_EDX_FEATURES] = cp.cp_edx; 1699 break; 1700 1701 default: 1702 break; 1703 } 1704 1705 pass4_done: 1706 cpi->cpi_pass = 4; 1707 return (hwcap_flags); 1708 } 1709 1710 1711 /* 1712 * Simulate the cpuid instruction using the data we previously 1713 * captured about this CPU. We try our best to return the truth 1714 * about the hardware, independently of kernel support. 1715 */ 1716 uint32_t 1717 cpuid_insn(cpu_t *cpu, struct cpuid_regs *cp) 1718 { 1719 struct cpuid_info *cpi; 1720 struct cpuid_regs *xcp; 1721 1722 if (cpu == NULL) 1723 cpu = CPU; 1724 cpi = cpu->cpu_m.mcpu_cpi; 1725 1726 ASSERT(cpuid_checkpass(cpu, 3)); 1727 1728 /* 1729 * CPUID data is cached in two separate places: cpi_std for standard 1730 * CPUID functions, and cpi_extd for extended CPUID functions. 1731 */ 1732 if (cp->cp_eax <= cpi->cpi_maxeax && cp->cp_eax < NMAX_CPI_STD) 1733 xcp = &cpi->cpi_std[cp->cp_eax]; 1734 else if (cp->cp_eax >= 0x80000000 && cp->cp_eax <= cpi->cpi_xmaxeax && 1735 cp->cp_eax < 0x80000000 + NMAX_CPI_EXTD) 1736 xcp = &cpi->cpi_extd[cp->cp_eax - 0x80000000]; 1737 else 1738 /* 1739 * The caller is asking for data from an input parameter which 1740 * the kernel has not cached. In this case we go fetch from 1741 * the hardware and return the data directly to the user. 1742 */ 1743 return (__cpuid_insn(cp)); 1744 1745 cp->cp_eax = xcp->cp_eax; 1746 cp->cp_ebx = xcp->cp_ebx; 1747 cp->cp_ecx = xcp->cp_ecx; 1748 cp->cp_edx = xcp->cp_edx; 1749 return (cp->cp_eax); 1750 } 1751 1752 int 1753 cpuid_checkpass(cpu_t *cpu, int pass) 1754 { 1755 return (cpu != NULL && cpu->cpu_m.mcpu_cpi != NULL && 1756 cpu->cpu_m.mcpu_cpi->cpi_pass >= pass); 1757 } 1758 1759 int 1760 cpuid_getbrandstr(cpu_t *cpu, char *s, size_t n) 1761 { 1762 ASSERT(cpuid_checkpass(cpu, 3)); 1763 1764 return (snprintf(s, n, "%s", cpu->cpu_m.mcpu_cpi->cpi_brandstr)); 1765 } 1766 1767 int 1768 cpuid_is_cmt(cpu_t *cpu) 1769 { 1770 if (cpu == NULL) 1771 cpu = CPU; 1772 1773 ASSERT(cpuid_checkpass(cpu, 1)); 1774 1775 return (cpu->cpu_m.mcpu_cpi->cpi_chipid >= 0); 1776 } 1777 1778 /* 1779 * AMD and Intel both implement the 64-bit variant of the syscall 1780 * instruction (syscallq), so if there's -any- support for syscall, 1781 * cpuid currently says "yes, we support this". 1782 * 1783 * However, Intel decided to -not- implement the 32-bit variant of the 1784 * syscall instruction, so we provide a predicate to allow our caller 1785 * to test that subtlety here. 1786 */ 1787 /*ARGSUSED*/ 1788 int 1789 cpuid_syscall32_insn(cpu_t *cpu) 1790 { 1791 ASSERT(cpuid_checkpass((cpu == NULL ? CPU : cpu), 1)); 1792 1793 if (x86_feature & X86_ASYSC) 1794 return (x86_vendor != X86_VENDOR_Intel); 1795 return (0); 1796 } 1797 1798 int 1799 cpuid_getidstr(cpu_t *cpu, char *s, size_t n) 1800 { 1801 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 1802 1803 static const char fmt[] = 1804 "x86 (%s family %d model %d step %d clock %d MHz)"; 1805 static const char fmt_ht[] = 1806 "x86 (chipid 0x%x %s family %d model %d step %d clock %d MHz)"; 1807 1808 ASSERT(cpuid_checkpass(cpu, 1)); 1809 1810 if (cpuid_is_cmt(cpu)) 1811 return (snprintf(s, n, fmt_ht, cpi->cpi_chipid, 1812 cpi->cpi_vendorstr, cpi->cpi_family, cpi->cpi_model, 1813 cpi->cpi_step, cpu->cpu_type_info.pi_clock)); 1814 return (snprintf(s, n, fmt, 1815 cpi->cpi_vendorstr, cpi->cpi_family, cpi->cpi_model, 1816 cpi->cpi_step, cpu->cpu_type_info.pi_clock)); 1817 } 1818 1819 const char * 1820 cpuid_getvendorstr(cpu_t *cpu) 1821 { 1822 ASSERT(cpuid_checkpass(cpu, 1)); 1823 return ((const char *)cpu->cpu_m.mcpu_cpi->cpi_vendorstr); 1824 } 1825 1826 uint_t 1827 cpuid_getvendor(cpu_t *cpu) 1828 { 1829 ASSERT(cpuid_checkpass(cpu, 1)); 1830 return (cpu->cpu_m.mcpu_cpi->cpi_vendor); 1831 } 1832 1833 uint_t 1834 cpuid_getfamily(cpu_t *cpu) 1835 { 1836 ASSERT(cpuid_checkpass(cpu, 1)); 1837 return (cpu->cpu_m.mcpu_cpi->cpi_family); 1838 } 1839 1840 uint_t 1841 cpuid_getmodel(cpu_t *cpu) 1842 { 1843 ASSERT(cpuid_checkpass(cpu, 1)); 1844 return (cpu->cpu_m.mcpu_cpi->cpi_model); 1845 } 1846 1847 uint_t 1848 cpuid_get_ncpu_per_chip(cpu_t *cpu) 1849 { 1850 ASSERT(cpuid_checkpass(cpu, 1)); 1851 return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_per_chip); 1852 } 1853 1854 uint_t 1855 cpuid_get_ncore_per_chip(cpu_t *cpu) 1856 { 1857 ASSERT(cpuid_checkpass(cpu, 1)); 1858 return (cpu->cpu_m.mcpu_cpi->cpi_ncore_per_chip); 1859 } 1860 1861 uint_t 1862 cpuid_getstep(cpu_t *cpu) 1863 { 1864 ASSERT(cpuid_checkpass(cpu, 1)); 1865 return (cpu->cpu_m.mcpu_cpi->cpi_step); 1866 } 1867 1868 uint32_t 1869 cpuid_getchiprev(struct cpu *cpu) 1870 { 1871 ASSERT(cpuid_checkpass(cpu, 1)); 1872 return (cpu->cpu_m.mcpu_cpi->cpi_chiprev); 1873 } 1874 1875 const char * 1876 cpuid_getchiprevstr(struct cpu *cpu) 1877 { 1878 ASSERT(cpuid_checkpass(cpu, 1)); 1879 return (cpu->cpu_m.mcpu_cpi->cpi_chiprevstr); 1880 } 1881 1882 uint32_t 1883 cpuid_getsockettype(struct cpu *cpu) 1884 { 1885 ASSERT(cpuid_checkpass(cpu, 1)); 1886 return (cpu->cpu_m.mcpu_cpi->cpi_socket); 1887 } 1888 1889 chipid_t 1890 chip_plat_get_chipid(cpu_t *cpu) 1891 { 1892 ASSERT(cpuid_checkpass(cpu, 1)); 1893 1894 if (cpuid_is_cmt(cpu)) 1895 return (cpu->cpu_m.mcpu_cpi->cpi_chipid); 1896 return (cpu->cpu_id); 1897 } 1898 1899 id_t 1900 chip_plat_get_coreid(cpu_t *cpu) 1901 { 1902 ASSERT(cpuid_checkpass(cpu, 1)); 1903 return (cpu->cpu_m.mcpu_cpi->cpi_coreid); 1904 } 1905 1906 int 1907 chip_plat_get_clogid(cpu_t *cpu) 1908 { 1909 ASSERT(cpuid_checkpass(cpu, 1)); 1910 return (cpu->cpu_m.mcpu_cpi->cpi_clogid); 1911 } 1912 1913 void 1914 cpuid_get_addrsize(cpu_t *cpu, uint_t *pabits, uint_t *vabits) 1915 { 1916 struct cpuid_info *cpi; 1917 1918 if (cpu == NULL) 1919 cpu = CPU; 1920 cpi = cpu->cpu_m.mcpu_cpi; 1921 1922 ASSERT(cpuid_checkpass(cpu, 1)); 1923 1924 if (pabits) 1925 *pabits = cpi->cpi_pabits; 1926 if (vabits) 1927 *vabits = cpi->cpi_vabits; 1928 } 1929 1930 /* 1931 * Returns the number of data TLB entries for a corresponding 1932 * pagesize. If it can't be computed, or isn't known, the 1933 * routine returns zero. If you ask about an architecturally 1934 * impossible pagesize, the routine will panic (so that the 1935 * hat implementor knows that things are inconsistent.) 1936 */ 1937 uint_t 1938 cpuid_get_dtlb_nent(cpu_t *cpu, size_t pagesize) 1939 { 1940 struct cpuid_info *cpi; 1941 uint_t dtlb_nent = 0; 1942 1943 if (cpu == NULL) 1944 cpu = CPU; 1945 cpi = cpu->cpu_m.mcpu_cpi; 1946 1947 ASSERT(cpuid_checkpass(cpu, 1)); 1948 1949 /* 1950 * Check the L2 TLB info 1951 */ 1952 if (cpi->cpi_xmaxeax >= 0x80000006) { 1953 struct cpuid_regs *cp = &cpi->cpi_extd[6]; 1954 1955 switch (pagesize) { 1956 1957 case 4 * 1024: 1958 /* 1959 * All zero in the top 16 bits of the register 1960 * indicates a unified TLB. Size is in low 16 bits. 1961 */ 1962 if ((cp->cp_ebx & 0xffff0000) == 0) 1963 dtlb_nent = cp->cp_ebx & 0x0000ffff; 1964 else 1965 dtlb_nent = BITX(cp->cp_ebx, 27, 16); 1966 break; 1967 1968 case 2 * 1024 * 1024: 1969 if ((cp->cp_eax & 0xffff0000) == 0) 1970 dtlb_nent = cp->cp_eax & 0x0000ffff; 1971 else 1972 dtlb_nent = BITX(cp->cp_eax, 27, 16); 1973 break; 1974 1975 default: 1976 panic("unknown L2 pagesize"); 1977 /*NOTREACHED*/ 1978 } 1979 } 1980 1981 if (dtlb_nent != 0) 1982 return (dtlb_nent); 1983 1984 /* 1985 * No L2 TLB support for this size, try L1. 1986 */ 1987 if (cpi->cpi_xmaxeax >= 0x80000005) { 1988 struct cpuid_regs *cp = &cpi->cpi_extd[5]; 1989 1990 switch (pagesize) { 1991 case 4 * 1024: 1992 dtlb_nent = BITX(cp->cp_ebx, 23, 16); 1993 break; 1994 case 2 * 1024 * 1024: 1995 dtlb_nent = BITX(cp->cp_eax, 23, 16); 1996 break; 1997 default: 1998 panic("unknown L1 d-TLB pagesize"); 1999 /*NOTREACHED*/ 2000 } 2001 } 2002 2003 return (dtlb_nent); 2004 } 2005 2006 /* 2007 * Return 0 if the erratum is not present or not applicable, positive 2008 * if it is, and negative if the status of the erratum is unknown. 2009 * 2010 * See "Revision Guide for AMD Athlon(tm) 64 and AMD Opteron(tm) 2011 * Processors" #25759, Rev 3.57, August 2005 2012 */ 2013 int 2014 cpuid_opteron_erratum(cpu_t *cpu, uint_t erratum) 2015 { 2016 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 2017 uint_t eax; 2018 2019 /* 2020 * Bail out if this CPU isn't an AMD CPU, or if it's 2021 * a legacy (32-bit) AMD CPU. 2022 */ 2023 if (cpi->cpi_vendor != X86_VENDOR_AMD || 2024 CPI_FAMILY(cpi) == 4 || CPI_FAMILY(cpi) == 5 || 2025 CPI_FAMILY(cpi) == 6) 2026 2027 return (0); 2028 2029 eax = cpi->cpi_std[1].cp_eax; 2030 2031 #define SH_B0(eax) (eax == 0xf40 || eax == 0xf50) 2032 #define SH_B3(eax) (eax == 0xf51) 2033 #define B(eax) (SH_B0(eax) || SH_B3(eax)) 2034 2035 #define SH_C0(eax) (eax == 0xf48 || eax == 0xf58) 2036 2037 #define SH_CG(eax) (eax == 0xf4a || eax == 0xf5a || eax == 0xf7a) 2038 #define DH_CG(eax) (eax == 0xfc0 || eax == 0xfe0 || eax == 0xff0) 2039 #define CH_CG(eax) (eax == 0xf82 || eax == 0xfb2) 2040 #define CG(eax) (SH_CG(eax) || DH_CG(eax) || CH_CG(eax)) 2041 2042 #define SH_D0(eax) (eax == 0x10f40 || eax == 0x10f50 || eax == 0x10f70) 2043 #define DH_D0(eax) (eax == 0x10fc0 || eax == 0x10ff0) 2044 #define CH_D0(eax) (eax == 0x10f80 || eax == 0x10fb0) 2045 #define D0(eax) (SH_D0(eax) || DH_D0(eax) || CH_D0(eax)) 2046 2047 #define SH_E0(eax) (eax == 0x20f50 || eax == 0x20f40 || eax == 0x20f70) 2048 #define JH_E1(eax) (eax == 0x20f10) /* JH8_E0 had 0x20f30 */ 2049 #define DH_E3(eax) (eax == 0x20fc0 || eax == 0x20ff0) 2050 #define SH_E4(eax) (eax == 0x20f51 || eax == 0x20f71) 2051 #define BH_E4(eax) (eax == 0x20fb1) 2052 #define SH_E5(eax) (eax == 0x20f42) 2053 #define DH_E6(eax) (eax == 0x20ff2 || eax == 0x20fc2) 2054 #define JH_E6(eax) (eax == 0x20f12 || eax == 0x20f32) 2055 #define EX(eax) (SH_E0(eax) || JH_E1(eax) || DH_E3(eax) || \ 2056 SH_E4(eax) || BH_E4(eax) || SH_E5(eax) || \ 2057 DH_E6(eax) || JH_E6(eax)) 2058 2059 switch (erratum) { 2060 case 1: 2061 return (1); 2062 case 51: /* what does the asterisk mean? */ 2063 return (B(eax) || SH_C0(eax) || CG(eax)); 2064 case 52: 2065 return (B(eax)); 2066 case 57: 2067 return (1); 2068 case 58: 2069 return (B(eax)); 2070 case 60: 2071 return (1); 2072 case 61: 2073 case 62: 2074 case 63: 2075 case 64: 2076 case 65: 2077 case 66: 2078 case 68: 2079 case 69: 2080 case 70: 2081 case 71: 2082 return (B(eax)); 2083 case 72: 2084 return (SH_B0(eax)); 2085 case 74: 2086 return (B(eax)); 2087 case 75: 2088 return (1); 2089 case 76: 2090 return (B(eax)); 2091 case 77: 2092 return (1); 2093 case 78: 2094 return (B(eax) || SH_C0(eax)); 2095 case 79: 2096 return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax)); 2097 case 80: 2098 case 81: 2099 case 82: 2100 return (B(eax)); 2101 case 83: 2102 return (B(eax) || SH_C0(eax) || CG(eax)); 2103 case 85: 2104 return (1); 2105 case 86: 2106 return (SH_C0(eax) || CG(eax)); 2107 case 88: 2108 #if !defined(__amd64) 2109 return (0); 2110 #else 2111 return (B(eax) || SH_C0(eax)); 2112 #endif 2113 case 89: 2114 return (1); 2115 case 90: 2116 return (B(eax) || SH_C0(eax) || CG(eax)); 2117 case 91: 2118 case 92: 2119 return (B(eax) || SH_C0(eax)); 2120 case 93: 2121 return (SH_C0(eax)); 2122 case 94: 2123 return (B(eax) || SH_C0(eax) || CG(eax)); 2124 case 95: 2125 #if !defined(__amd64) 2126 return (0); 2127 #else 2128 return (B(eax) || SH_C0(eax)); 2129 #endif 2130 case 96: 2131 return (B(eax) || SH_C0(eax) || CG(eax)); 2132 case 97: 2133 case 98: 2134 return (SH_C0(eax) || CG(eax)); 2135 case 99: 2136 return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax)); 2137 case 100: 2138 return (B(eax) || SH_C0(eax)); 2139 case 101: 2140 case 103: 2141 return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax)); 2142 case 104: 2143 return (SH_C0(eax) || CG(eax) || D0(eax)); 2144 case 105: 2145 case 106: 2146 case 107: 2147 return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax)); 2148 case 108: 2149 return (DH_CG(eax)); 2150 case 109: 2151 return (SH_C0(eax) || CG(eax) || D0(eax)); 2152 case 110: 2153 return (D0(eax) || EX(eax)); 2154 case 111: 2155 return (CG(eax)); 2156 case 112: 2157 return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax)); 2158 case 113: 2159 return (eax == 0x20fc0); 2160 case 114: 2161 return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax)); 2162 case 115: 2163 return (SH_E0(eax) || JH_E1(eax)); 2164 case 116: 2165 return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax)); 2166 case 117: 2167 return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax)); 2168 case 118: 2169 return (SH_E0(eax) || JH_E1(eax) || SH_E4(eax) || BH_E4(eax) || 2170 JH_E6(eax)); 2171 case 121: 2172 return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax)); 2173 case 122: 2174 return (1); 2175 case 123: 2176 return (JH_E1(eax) || BH_E4(eax) || JH_E6(eax)); 2177 case 131: 2178 return (1); 2179 case 6336786: 2180 /* 2181 * Test for AdvPowerMgmtInfo.TscPStateInvariant 2182 * if this is a K8 family processor 2183 */ 2184 if (CPI_FAMILY(cpi) == 0xf) { 2185 struct cpuid_regs regs; 2186 regs.cp_eax = 0x80000007; 2187 (void) __cpuid_insn(®s); 2188 return (!(regs.cp_edx & 0x100)); 2189 } 2190 return (0); 2191 case 6323525: 2192 return (((((eax >> 12) & 0xff00) + (eax & 0xf00)) | 2193 (((eax >> 4) & 0xf) | ((eax >> 12) & 0xf0))) < 0xf40); 2194 2195 default: 2196 return (-1); 2197 } 2198 } 2199 2200 static const char assoc_str[] = "associativity"; 2201 static const char line_str[] = "line-size"; 2202 static const char size_str[] = "size"; 2203 2204 static void 2205 add_cache_prop(dev_info_t *devi, const char *label, const char *type, 2206 uint32_t val) 2207 { 2208 char buf[128]; 2209 2210 /* 2211 * ndi_prop_update_int() is used because it is desirable for 2212 * DDI_PROP_HW_DEF and DDI_PROP_DONTSLEEP to be set. 2213 */ 2214 if (snprintf(buf, sizeof (buf), "%s-%s", label, type) < sizeof (buf)) 2215 (void) ndi_prop_update_int(DDI_DEV_T_NONE, devi, buf, val); 2216 } 2217 2218 /* 2219 * Intel-style cache/tlb description 2220 * 2221 * Standard cpuid level 2 gives a randomly ordered 2222 * selection of tags that index into a table that describes 2223 * cache and tlb properties. 2224 */ 2225 2226 static const char l1_icache_str[] = "l1-icache"; 2227 static const char l1_dcache_str[] = "l1-dcache"; 2228 static const char l2_cache_str[] = "l2-cache"; 2229 static const char itlb4k_str[] = "itlb-4K"; 2230 static const char dtlb4k_str[] = "dtlb-4K"; 2231 static const char itlb4M_str[] = "itlb-4M"; 2232 static const char dtlb4M_str[] = "dtlb-4M"; 2233 static const char itlb424_str[] = "itlb-4K-2M-4M"; 2234 static const char dtlb44_str[] = "dtlb-4K-4M"; 2235 static const char sl1_dcache_str[] = "sectored-l1-dcache"; 2236 static const char sl2_cache_str[] = "sectored-l2-cache"; 2237 static const char itrace_str[] = "itrace-cache"; 2238 static const char sl3_cache_str[] = "sectored-l3-cache"; 2239 2240 static const struct cachetab { 2241 uint8_t ct_code; 2242 uint8_t ct_assoc; 2243 uint16_t ct_line_size; 2244 size_t ct_size; 2245 const char *ct_label; 2246 } intel_ctab[] = { 2247 /* maintain descending order! */ 2248 { 0xb3, 4, 0, 128, dtlb4k_str }, 2249 { 0xb0, 4, 0, 128, itlb4k_str }, 2250 { 0x87, 8, 64, 1024*1024, l2_cache_str}, 2251 { 0x86, 4, 64, 512*1024, l2_cache_str}, 2252 { 0x85, 8, 32, 2*1024*1024, l2_cache_str}, 2253 { 0x84, 8, 32, 1024*1024, l2_cache_str}, 2254 { 0x83, 8, 32, 512*1024, l2_cache_str}, 2255 { 0x82, 8, 32, 256*1024, l2_cache_str}, 2256 { 0x81, 8, 32, 128*1024, l2_cache_str}, /* suspect! */ 2257 { 0x7f, 2, 64, 512*1024, l2_cache_str}, 2258 { 0x7d, 8, 64, 2*1024*1024, sl2_cache_str}, 2259 { 0x7c, 8, 64, 1024*1024, sl2_cache_str}, 2260 { 0x7b, 8, 64, 512*1024, sl2_cache_str}, 2261 { 0x7a, 8, 64, 256*1024, sl2_cache_str}, 2262 { 0x79, 8, 64, 128*1024, sl2_cache_str}, 2263 { 0x78, 8, 64, 1024*1024, l2_cache_str}, 2264 { 0x72, 8, 0, 32*1024, itrace_str}, 2265 { 0x71, 8, 0, 16*1024, itrace_str}, 2266 { 0x70, 8, 0, 12*1024, itrace_str}, 2267 { 0x68, 4, 64, 32*1024, sl1_dcache_str}, 2268 { 0x67, 4, 64, 16*1024, sl1_dcache_str}, 2269 { 0x66, 4, 64, 8*1024, sl1_dcache_str}, 2270 { 0x60, 8, 64, 16*1024, sl1_dcache_str}, 2271 { 0x5d, 0, 0, 256, dtlb44_str}, 2272 { 0x5c, 0, 0, 128, dtlb44_str}, 2273 { 0x5b, 0, 0, 64, dtlb44_str}, 2274 { 0x52, 0, 0, 256, itlb424_str}, 2275 { 0x51, 0, 0, 128, itlb424_str}, 2276 { 0x50, 0, 0, 64, itlb424_str}, 2277 { 0x45, 4, 32, 2*1024*1024, l2_cache_str}, 2278 { 0x44, 4, 32, 1024*1024, l2_cache_str}, 2279 { 0x43, 4, 32, 512*1024, l2_cache_str}, 2280 { 0x42, 4, 32, 256*1024, l2_cache_str}, 2281 { 0x41, 4, 32, 128*1024, l2_cache_str}, 2282 { 0x3c, 4, 64, 256*1024, sl2_cache_str}, 2283 { 0x3b, 2, 64, 128*1024, sl2_cache_str}, 2284 { 0x39, 4, 64, 128*1024, sl2_cache_str}, 2285 { 0x30, 8, 64, 32*1024, l1_icache_str}, 2286 { 0x2c, 8, 64, 32*1024, l1_dcache_str}, 2287 { 0x29, 8, 64, 4096*1024, sl3_cache_str}, 2288 { 0x25, 8, 64, 2048*1024, sl3_cache_str}, 2289 { 0x23, 8, 64, 1024*1024, sl3_cache_str}, 2290 { 0x22, 4, 64, 512*1024, sl3_cache_str}, 2291 { 0x0c, 4, 32, 16*1024, l1_dcache_str}, 2292 { 0x0a, 2, 32, 8*1024, l1_dcache_str}, 2293 { 0x08, 4, 32, 16*1024, l1_icache_str}, 2294 { 0x06, 4, 32, 8*1024, l1_icache_str}, 2295 { 0x04, 4, 0, 8, dtlb4M_str}, 2296 { 0x03, 4, 0, 64, dtlb4k_str}, 2297 { 0x02, 4, 0, 2, itlb4M_str}, 2298 { 0x01, 4, 0, 32, itlb4k_str}, 2299 { 0 } 2300 }; 2301 2302 static const struct cachetab cyrix_ctab[] = { 2303 { 0x70, 4, 0, 32, "tlb-4K" }, 2304 { 0x80, 4, 16, 16*1024, "l1-cache" }, 2305 { 0 } 2306 }; 2307 2308 /* 2309 * Search a cache table for a matching entry 2310 */ 2311 static const struct cachetab * 2312 find_cacheent(const struct cachetab *ct, uint_t code) 2313 { 2314 if (code != 0) { 2315 for (; ct->ct_code != 0; ct++) 2316 if (ct->ct_code <= code) 2317 break; 2318 if (ct->ct_code == code) 2319 return (ct); 2320 } 2321 return (NULL); 2322 } 2323 2324 /* 2325 * Walk the cacheinfo descriptor, applying 'func' to every valid element 2326 * The walk is terminated if the walker returns non-zero. 2327 */ 2328 static void 2329 intel_walk_cacheinfo(struct cpuid_info *cpi, 2330 void *arg, int (*func)(void *, const struct cachetab *)) 2331 { 2332 const struct cachetab *ct; 2333 uint8_t *dp; 2334 int i; 2335 2336 if ((dp = cpi->cpi_cacheinfo) == NULL) 2337 return; 2338 for (i = 0; i < cpi->cpi_ncache; i++, dp++) 2339 if ((ct = find_cacheent(intel_ctab, *dp)) != NULL) { 2340 if (func(arg, ct) != 0) 2341 break; 2342 } 2343 } 2344 2345 /* 2346 * (Like the Intel one, except for Cyrix CPUs) 2347 */ 2348 static void 2349 cyrix_walk_cacheinfo(struct cpuid_info *cpi, 2350 void *arg, int (*func)(void *, const struct cachetab *)) 2351 { 2352 const struct cachetab *ct; 2353 uint8_t *dp; 2354 int i; 2355 2356 if ((dp = cpi->cpi_cacheinfo) == NULL) 2357 return; 2358 for (i = 0; i < cpi->cpi_ncache; i++, dp++) { 2359 /* 2360 * Search Cyrix-specific descriptor table first .. 2361 */ 2362 if ((ct = find_cacheent(cyrix_ctab, *dp)) != NULL) { 2363 if (func(arg, ct) != 0) 2364 break; 2365 continue; 2366 } 2367 /* 2368 * .. else fall back to the Intel one 2369 */ 2370 if ((ct = find_cacheent(intel_ctab, *dp)) != NULL) { 2371 if (func(arg, ct) != 0) 2372 break; 2373 continue; 2374 } 2375 } 2376 } 2377 2378 /* 2379 * A cacheinfo walker that adds associativity, line-size, and size properties 2380 * to the devinfo node it is passed as an argument. 2381 */ 2382 static int 2383 add_cacheent_props(void *arg, const struct cachetab *ct) 2384 { 2385 dev_info_t *devi = arg; 2386 2387 add_cache_prop(devi, ct->ct_label, assoc_str, ct->ct_assoc); 2388 if (ct->ct_line_size != 0) 2389 add_cache_prop(devi, ct->ct_label, line_str, 2390 ct->ct_line_size); 2391 add_cache_prop(devi, ct->ct_label, size_str, ct->ct_size); 2392 return (0); 2393 } 2394 2395 static const char fully_assoc[] = "fully-associative?"; 2396 2397 /* 2398 * AMD style cache/tlb description 2399 * 2400 * Extended functions 5 and 6 directly describe properties of 2401 * tlbs and various cache levels. 2402 */ 2403 static void 2404 add_amd_assoc(dev_info_t *devi, const char *label, uint_t assoc) 2405 { 2406 switch (assoc) { 2407 case 0: /* reserved; ignore */ 2408 break; 2409 default: 2410 add_cache_prop(devi, label, assoc_str, assoc); 2411 break; 2412 case 0xff: 2413 add_cache_prop(devi, label, fully_assoc, 1); 2414 break; 2415 } 2416 } 2417 2418 static void 2419 add_amd_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size) 2420 { 2421 if (size == 0) 2422 return; 2423 add_cache_prop(devi, label, size_str, size); 2424 add_amd_assoc(devi, label, assoc); 2425 } 2426 2427 static void 2428 add_amd_cache(dev_info_t *devi, const char *label, 2429 uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size) 2430 { 2431 if (size == 0 || line_size == 0) 2432 return; 2433 add_amd_assoc(devi, label, assoc); 2434 /* 2435 * Most AMD parts have a sectored cache. Multiple cache lines are 2436 * associated with each tag. A sector consists of all cache lines 2437 * associated with a tag. For example, the AMD K6-III has a sector 2438 * size of 2 cache lines per tag. 2439 */ 2440 if (lines_per_tag != 0) 2441 add_cache_prop(devi, label, "lines-per-tag", lines_per_tag); 2442 add_cache_prop(devi, label, line_str, line_size); 2443 add_cache_prop(devi, label, size_str, size * 1024); 2444 } 2445 2446 static void 2447 add_amd_l2_assoc(dev_info_t *devi, const char *label, uint_t assoc) 2448 { 2449 switch (assoc) { 2450 case 0: /* off */ 2451 break; 2452 case 1: 2453 case 2: 2454 case 4: 2455 add_cache_prop(devi, label, assoc_str, assoc); 2456 break; 2457 case 6: 2458 add_cache_prop(devi, label, assoc_str, 8); 2459 break; 2460 case 8: 2461 add_cache_prop(devi, label, assoc_str, 16); 2462 break; 2463 case 0xf: 2464 add_cache_prop(devi, label, fully_assoc, 1); 2465 break; 2466 default: /* reserved; ignore */ 2467 break; 2468 } 2469 } 2470 2471 static void 2472 add_amd_l2_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size) 2473 { 2474 if (size == 0 || assoc == 0) 2475 return; 2476 add_amd_l2_assoc(devi, label, assoc); 2477 add_cache_prop(devi, label, size_str, size); 2478 } 2479 2480 static void 2481 add_amd_l2_cache(dev_info_t *devi, const char *label, 2482 uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size) 2483 { 2484 if (size == 0 || assoc == 0 || line_size == 0) 2485 return; 2486 add_amd_l2_assoc(devi, label, assoc); 2487 if (lines_per_tag != 0) 2488 add_cache_prop(devi, label, "lines-per-tag", lines_per_tag); 2489 add_cache_prop(devi, label, line_str, line_size); 2490 add_cache_prop(devi, label, size_str, size * 1024); 2491 } 2492 2493 static void 2494 amd_cache_info(struct cpuid_info *cpi, dev_info_t *devi) 2495 { 2496 struct cpuid_regs *cp; 2497 2498 if (cpi->cpi_xmaxeax < 0x80000005) 2499 return; 2500 cp = &cpi->cpi_extd[5]; 2501 2502 /* 2503 * 4M/2M L1 TLB configuration 2504 * 2505 * We report the size for 2M pages because AMD uses two 2506 * TLB entries for one 4M page. 2507 */ 2508 add_amd_tlb(devi, "dtlb-2M", 2509 BITX(cp->cp_eax, 31, 24), BITX(cp->cp_eax, 23, 16)); 2510 add_amd_tlb(devi, "itlb-2M", 2511 BITX(cp->cp_eax, 15, 8), BITX(cp->cp_eax, 7, 0)); 2512 2513 /* 2514 * 4K L1 TLB configuration 2515 */ 2516 2517 switch (cpi->cpi_vendor) { 2518 uint_t nentries; 2519 case X86_VENDOR_TM: 2520 if (cpi->cpi_family >= 5) { 2521 /* 2522 * Crusoe processors have 256 TLB entries, but 2523 * cpuid data format constrains them to only 2524 * reporting 255 of them. 2525 */ 2526 if ((nentries = BITX(cp->cp_ebx, 23, 16)) == 255) 2527 nentries = 256; 2528 /* 2529 * Crusoe processors also have a unified TLB 2530 */ 2531 add_amd_tlb(devi, "tlb-4K", BITX(cp->cp_ebx, 31, 24), 2532 nentries); 2533 break; 2534 } 2535 /*FALLTHROUGH*/ 2536 default: 2537 add_amd_tlb(devi, itlb4k_str, 2538 BITX(cp->cp_ebx, 31, 24), BITX(cp->cp_ebx, 23, 16)); 2539 add_amd_tlb(devi, dtlb4k_str, 2540 BITX(cp->cp_ebx, 15, 8), BITX(cp->cp_ebx, 7, 0)); 2541 break; 2542 } 2543 2544 /* 2545 * data L1 cache configuration 2546 */ 2547 2548 add_amd_cache(devi, l1_dcache_str, 2549 BITX(cp->cp_ecx, 31, 24), BITX(cp->cp_ecx, 23, 16), 2550 BITX(cp->cp_ecx, 15, 8), BITX(cp->cp_ecx, 7, 0)); 2551 2552 /* 2553 * code L1 cache configuration 2554 */ 2555 2556 add_amd_cache(devi, l1_icache_str, 2557 BITX(cp->cp_edx, 31, 24), BITX(cp->cp_edx, 23, 16), 2558 BITX(cp->cp_edx, 15, 8), BITX(cp->cp_edx, 7, 0)); 2559 2560 if (cpi->cpi_xmaxeax < 0x80000006) 2561 return; 2562 cp = &cpi->cpi_extd[6]; 2563 2564 /* Check for a unified L2 TLB for large pages */ 2565 2566 if (BITX(cp->cp_eax, 31, 16) == 0) 2567 add_amd_l2_tlb(devi, "l2-tlb-2M", 2568 BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0)); 2569 else { 2570 add_amd_l2_tlb(devi, "l2-dtlb-2M", 2571 BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16)); 2572 add_amd_l2_tlb(devi, "l2-itlb-2M", 2573 BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0)); 2574 } 2575 2576 /* Check for a unified L2 TLB for 4K pages */ 2577 2578 if (BITX(cp->cp_ebx, 31, 16) == 0) { 2579 add_amd_l2_tlb(devi, "l2-tlb-4K", 2580 BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0)); 2581 } else { 2582 add_amd_l2_tlb(devi, "l2-dtlb-4K", 2583 BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16)); 2584 add_amd_l2_tlb(devi, "l2-itlb-4K", 2585 BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0)); 2586 } 2587 2588 add_amd_l2_cache(devi, l2_cache_str, 2589 BITX(cp->cp_ecx, 31, 16), BITX(cp->cp_ecx, 15, 12), 2590 BITX(cp->cp_ecx, 11, 8), BITX(cp->cp_ecx, 7, 0)); 2591 } 2592 2593 /* 2594 * There are two basic ways that the x86 world describes it cache 2595 * and tlb architecture - Intel's way and AMD's way. 2596 * 2597 * Return which flavor of cache architecture we should use 2598 */ 2599 static int 2600 x86_which_cacheinfo(struct cpuid_info *cpi) 2601 { 2602 switch (cpi->cpi_vendor) { 2603 case X86_VENDOR_Intel: 2604 if (cpi->cpi_maxeax >= 2) 2605 return (X86_VENDOR_Intel); 2606 break; 2607 case X86_VENDOR_AMD: 2608 /* 2609 * The K5 model 1 was the first part from AMD that reported 2610 * cache sizes via extended cpuid functions. 2611 */ 2612 if (cpi->cpi_family > 5 || 2613 (cpi->cpi_family == 5 && cpi->cpi_model >= 1)) 2614 return (X86_VENDOR_AMD); 2615 break; 2616 case X86_VENDOR_TM: 2617 if (cpi->cpi_family >= 5) 2618 return (X86_VENDOR_AMD); 2619 /*FALLTHROUGH*/ 2620 default: 2621 /* 2622 * If they have extended CPU data for 0x80000005 2623 * then we assume they have AMD-format cache 2624 * information. 2625 * 2626 * If not, and the vendor happens to be Cyrix, 2627 * then try our-Cyrix specific handler. 2628 * 2629 * If we're not Cyrix, then assume we're using Intel's 2630 * table-driven format instead. 2631 */ 2632 if (cpi->cpi_xmaxeax >= 0x80000005) 2633 return (X86_VENDOR_AMD); 2634 else if (cpi->cpi_vendor == X86_VENDOR_Cyrix) 2635 return (X86_VENDOR_Cyrix); 2636 else if (cpi->cpi_maxeax >= 2) 2637 return (X86_VENDOR_Intel); 2638 break; 2639 } 2640 return (-1); 2641 } 2642 2643 /* 2644 * create a node for the given cpu under the prom root node. 2645 * Also, create a cpu node in the device tree. 2646 */ 2647 static dev_info_t *cpu_nex_devi = NULL; 2648 static kmutex_t cpu_node_lock; 2649 2650 /* 2651 * Called from post_startup() and mp_startup() 2652 */ 2653 void 2654 add_cpunode2devtree(processorid_t cpu_id, struct cpuid_info *cpi) 2655 { 2656 dev_info_t *cpu_devi; 2657 int create; 2658 2659 mutex_enter(&cpu_node_lock); 2660 2661 /* 2662 * create a nexus node for all cpus identified as 'cpu_id' under 2663 * the root node. 2664 */ 2665 if (cpu_nex_devi == NULL) { 2666 if (ndi_devi_alloc(ddi_root_node(), "cpus", 2667 (pnode_t)DEVI_SID_NODEID, &cpu_nex_devi) != NDI_SUCCESS) { 2668 mutex_exit(&cpu_node_lock); 2669 return; 2670 } 2671 (void) ndi_devi_online(cpu_nex_devi, 0); 2672 } 2673 2674 /* 2675 * create a child node for cpu identified as 'cpu_id' 2676 */ 2677 cpu_devi = ddi_add_child(cpu_nex_devi, "cpu", DEVI_SID_NODEID, 2678 cpu_id); 2679 if (cpu_devi == NULL) { 2680 mutex_exit(&cpu_node_lock); 2681 return; 2682 } 2683 2684 /* device_type */ 2685 2686 (void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi, 2687 "device_type", "cpu"); 2688 2689 /* reg */ 2690 2691 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2692 "reg", cpu_id); 2693 2694 /* cpu-mhz, and clock-frequency */ 2695 2696 if (cpu_freq > 0) { 2697 long long mul; 2698 2699 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2700 "cpu-mhz", cpu_freq); 2701 2702 if ((mul = cpu_freq * 1000000LL) <= INT_MAX) 2703 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2704 "clock-frequency", (int)mul); 2705 } 2706 2707 (void) ndi_devi_online(cpu_devi, 0); 2708 2709 if ((x86_feature & X86_CPUID) == 0) { 2710 mutex_exit(&cpu_node_lock); 2711 return; 2712 } 2713 2714 /* vendor-id */ 2715 2716 (void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi, 2717 "vendor-id", cpi->cpi_vendorstr); 2718 2719 if (cpi->cpi_maxeax == 0) { 2720 mutex_exit(&cpu_node_lock); 2721 return; 2722 } 2723 2724 /* 2725 * family, model, and step 2726 */ 2727 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2728 "family", CPI_FAMILY(cpi)); 2729 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2730 "cpu-model", CPI_MODEL(cpi)); 2731 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2732 "stepping-id", CPI_STEP(cpi)); 2733 2734 /* type */ 2735 2736 switch (cpi->cpi_vendor) { 2737 case X86_VENDOR_Intel: 2738 create = 1; 2739 break; 2740 default: 2741 create = 0; 2742 break; 2743 } 2744 if (create) 2745 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2746 "type", CPI_TYPE(cpi)); 2747 2748 /* ext-family */ 2749 2750 switch (cpi->cpi_vendor) { 2751 case X86_VENDOR_Intel: 2752 case X86_VENDOR_AMD: 2753 create = cpi->cpi_family >= 0xf; 2754 break; 2755 default: 2756 create = 0; 2757 break; 2758 } 2759 if (create) 2760 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2761 "ext-family", CPI_FAMILY_XTD(cpi)); 2762 2763 /* ext-model */ 2764 2765 switch (cpi->cpi_vendor) { 2766 case X86_VENDOR_Intel: 2767 create = CPI_MODEL(cpi) == 0xf; 2768 break; 2769 case X86_VENDOR_AMD: 2770 create = CPI_FAMILY(cpi) == 0xf; 2771 break; 2772 default: 2773 create = 0; 2774 break; 2775 } 2776 if (create) 2777 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2778 "ext-model", CPI_MODEL_XTD(cpi)); 2779 2780 /* generation */ 2781 2782 switch (cpi->cpi_vendor) { 2783 case X86_VENDOR_AMD: 2784 /* 2785 * AMD K5 model 1 was the first part to support this 2786 */ 2787 create = cpi->cpi_xmaxeax >= 0x80000001; 2788 break; 2789 default: 2790 create = 0; 2791 break; 2792 } 2793 if (create) 2794 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2795 "generation", BITX((cpi)->cpi_extd[1].cp_eax, 11, 8)); 2796 2797 /* brand-id */ 2798 2799 switch (cpi->cpi_vendor) { 2800 case X86_VENDOR_Intel: 2801 /* 2802 * brand id first appeared on Pentium III Xeon model 8, 2803 * and Celeron model 8 processors and Opteron 2804 */ 2805 create = cpi->cpi_family > 6 || 2806 (cpi->cpi_family == 6 && cpi->cpi_model >= 8); 2807 break; 2808 case X86_VENDOR_AMD: 2809 create = cpi->cpi_family >= 0xf; 2810 break; 2811 default: 2812 create = 0; 2813 break; 2814 } 2815 if (create && cpi->cpi_brandid != 0) { 2816 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2817 "brand-id", cpi->cpi_brandid); 2818 } 2819 2820 /* chunks, and apic-id */ 2821 2822 switch (cpi->cpi_vendor) { 2823 /* 2824 * first available on Pentium IV and Opteron (K8) 2825 */ 2826 case X86_VENDOR_Intel: 2827 create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf; 2828 break; 2829 case X86_VENDOR_AMD: 2830 create = cpi->cpi_family >= 0xf; 2831 break; 2832 default: 2833 create = 0; 2834 break; 2835 } 2836 if (create) { 2837 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2838 "chunks", CPI_CHUNKS(cpi)); 2839 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2840 "apic-id", CPI_APIC_ID(cpi)); 2841 if (cpi->cpi_chipid >= 0) { 2842 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2843 "chip#", cpi->cpi_chipid); 2844 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2845 "clog#", cpi->cpi_clogid); 2846 } 2847 } 2848 2849 /* cpuid-features */ 2850 2851 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2852 "cpuid-features", CPI_FEATURES_EDX(cpi)); 2853 2854 2855 /* cpuid-features-ecx */ 2856 2857 switch (cpi->cpi_vendor) { 2858 case X86_VENDOR_Intel: 2859 create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf; 2860 break; 2861 default: 2862 create = 0; 2863 break; 2864 } 2865 if (create) 2866 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2867 "cpuid-features-ecx", CPI_FEATURES_ECX(cpi)); 2868 2869 /* ext-cpuid-features */ 2870 2871 switch (cpi->cpi_vendor) { 2872 case X86_VENDOR_Intel: 2873 case X86_VENDOR_AMD: 2874 case X86_VENDOR_Cyrix: 2875 case X86_VENDOR_TM: 2876 case X86_VENDOR_Centaur: 2877 create = cpi->cpi_xmaxeax >= 0x80000001; 2878 break; 2879 default: 2880 create = 0; 2881 break; 2882 } 2883 if (create) { 2884 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2885 "ext-cpuid-features", CPI_FEATURES_XTD_EDX(cpi)); 2886 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi, 2887 "ext-cpuid-features-ecx", CPI_FEATURES_XTD_ECX(cpi)); 2888 } 2889 2890 /* 2891 * Brand String first appeared in Intel Pentium IV, AMD K5 2892 * model 1, and Cyrix GXm. On earlier models we try and 2893 * simulate something similar .. so this string should always 2894 * same -something- about the processor, however lame. 2895 */ 2896 (void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi, 2897 "brand-string", cpi->cpi_brandstr); 2898 2899 /* 2900 * Finally, cache and tlb information 2901 */ 2902 switch (x86_which_cacheinfo(cpi)) { 2903 case X86_VENDOR_Intel: 2904 intel_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props); 2905 break; 2906 case X86_VENDOR_Cyrix: 2907 cyrix_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props); 2908 break; 2909 case X86_VENDOR_AMD: 2910 amd_cache_info(cpi, cpu_devi); 2911 break; 2912 default: 2913 break; 2914 } 2915 2916 mutex_exit(&cpu_node_lock); 2917 } 2918 2919 struct l2info { 2920 int *l2i_csz; 2921 int *l2i_lsz; 2922 int *l2i_assoc; 2923 int l2i_ret; 2924 }; 2925 2926 /* 2927 * A cacheinfo walker that fetches the size, line-size and associativity 2928 * of the L2 cache 2929 */ 2930 static int 2931 intel_l2cinfo(void *arg, const struct cachetab *ct) 2932 { 2933 struct l2info *l2i = arg; 2934 int *ip; 2935 2936 if (ct->ct_label != l2_cache_str && 2937 ct->ct_label != sl2_cache_str) 2938 return (0); /* not an L2 -- keep walking */ 2939 2940 if ((ip = l2i->l2i_csz) != NULL) 2941 *ip = ct->ct_size; 2942 if ((ip = l2i->l2i_lsz) != NULL) 2943 *ip = ct->ct_line_size; 2944 if ((ip = l2i->l2i_assoc) != NULL) 2945 *ip = ct->ct_assoc; 2946 l2i->l2i_ret = ct->ct_size; 2947 return (1); /* was an L2 -- terminate walk */ 2948 } 2949 2950 static void 2951 amd_l2cacheinfo(struct cpuid_info *cpi, struct l2info *l2i) 2952 { 2953 struct cpuid_regs *cp; 2954 uint_t size, assoc; 2955 int *ip; 2956 2957 if (cpi->cpi_xmaxeax < 0x80000006) 2958 return; 2959 cp = &cpi->cpi_extd[6]; 2960 2961 if ((assoc = BITX(cp->cp_ecx, 15, 12)) != 0 && 2962 (size = BITX(cp->cp_ecx, 31, 16)) != 0) { 2963 uint_t cachesz = size * 1024; 2964 2965 2966 if ((ip = l2i->l2i_csz) != NULL) 2967 *ip = cachesz; 2968 if ((ip = l2i->l2i_lsz) != NULL) 2969 *ip = BITX(cp->cp_ecx, 7, 0); 2970 if ((ip = l2i->l2i_assoc) != NULL) 2971 *ip = assoc; 2972 l2i->l2i_ret = cachesz; 2973 } 2974 } 2975 2976 int 2977 getl2cacheinfo(cpu_t *cpu, int *csz, int *lsz, int *assoc) 2978 { 2979 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi; 2980 struct l2info __l2info, *l2i = &__l2info; 2981 2982 l2i->l2i_csz = csz; 2983 l2i->l2i_lsz = lsz; 2984 l2i->l2i_assoc = assoc; 2985 l2i->l2i_ret = -1; 2986 2987 switch (x86_which_cacheinfo(cpi)) { 2988 case X86_VENDOR_Intel: 2989 intel_walk_cacheinfo(cpi, l2i, intel_l2cinfo); 2990 break; 2991 case X86_VENDOR_Cyrix: 2992 cyrix_walk_cacheinfo(cpi, l2i, intel_l2cinfo); 2993 break; 2994 case X86_VENDOR_AMD: 2995 amd_l2cacheinfo(cpi, l2i); 2996 break; 2997 default: 2998 break; 2999 } 3000 return (l2i->l2i_ret); 3001 } 3002