1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * CPU Module Interface - hardware abstraction. 29 */ 30 31 #ifdef __xpv 32 #include <sys/xpv_user.h> 33 #endif 34 35 #include <sys/types.h> 36 #include <sys/cpu_module.h> 37 #include <sys/kmem.h> 38 #include <sys/x86_archext.h> 39 #include <sys/cpuvar.h> 40 #include <sys/ksynch.h> 41 #include <sys/x_call.h> 42 #include <sys/pghw.h> 43 #include <sys/pci_cfgspace.h> 44 #include <sys/archsystm.h> 45 #include <sys/ontrap.h> 46 #include <sys/controlregs.h> 47 #include <sys/sunddi.h> 48 #include <sys/trap.h> 49 #include <sys/mca_x86.h> 50 #include <sys/processor.h> 51 #include <sys/cmn_err.h> 52 #include <sys/nvpair.h> 53 #include <sys/fm/util.h> 54 #include <sys/fm/protocol.h> 55 #include <sys/fm/smb/fmsmb.h> 56 #include <sys/cpu_module_impl.h> 57 58 /* 59 * Variable which determines if the SMBIOS supports x86 generic topology; or 60 * if legacy topolgy enumeration will occur. 61 */ 62 extern int x86gentopo_legacy; 63 64 /* 65 * Outside of this file consumers use the opaque cmi_hdl_t. This 66 * definition is duplicated in the generic_cpu mdb module, so keep 67 * them in-sync when making changes. 68 */ 69 typedef struct cmi_hdl_impl { 70 enum cmi_hdl_class cmih_class; /* Handle nature */ 71 const struct cmi_hdl_ops *cmih_ops; /* Operations vector */ 72 uint_t cmih_chipid; /* Chipid of cpu resource */ 73 uint_t cmih_procnodeid; /* Nodeid of cpu resource */ 74 uint_t cmih_coreid; /* Core within die */ 75 uint_t cmih_strandid; /* Thread within core */ 76 uint_t cmih_procnodes_per_pkg; /* Nodes in a processor */ 77 boolean_t cmih_mstrand; /* cores are multithreaded */ 78 volatile uint32_t *cmih_refcntp; /* Reference count pointer */ 79 uint64_t cmih_msrsrc; /* MSR data source flags */ 80 void *cmih_hdlpriv; /* cmi_hw.c private data */ 81 void *cmih_spec; /* cmi_hdl_{set,get}_specific */ 82 void *cmih_cmi; /* cpu mod control structure */ 83 void *cmih_cmidata; /* cpu mod private data */ 84 const struct cmi_mc_ops *cmih_mcops; /* Memory-controller ops */ 85 void *cmih_mcdata; /* Memory-controller data */ 86 uint64_t cmih_flags; /* See CMIH_F_* below */ 87 uint16_t cmih_smbiosid; /* SMBIOS Type 4 struct ID */ 88 uint_t cmih_smb_chipid; /* SMBIOS factored chipid */ 89 nvlist_t *cmih_smb_bboard; /* SMBIOS bboard nvlist */ 90 } cmi_hdl_impl_t; 91 92 #define IMPLHDL(ophdl) ((cmi_hdl_impl_t *)ophdl) 93 #define HDLOPS(hdl) ((hdl)->cmih_ops) 94 95 #define CMIH_F_INJACTV 0x1ULL 96 97 /* 98 * Ops structure for handle operations. 99 */ 100 struct cmi_hdl_ops { 101 /* 102 * These ops are required in an implementation. 103 */ 104 uint_t (*cmio_vendor)(cmi_hdl_impl_t *); 105 const char *(*cmio_vendorstr)(cmi_hdl_impl_t *); 106 uint_t (*cmio_family)(cmi_hdl_impl_t *); 107 uint_t (*cmio_model)(cmi_hdl_impl_t *); 108 uint_t (*cmio_stepping)(cmi_hdl_impl_t *); 109 uint_t (*cmio_chipid)(cmi_hdl_impl_t *); 110 uint_t (*cmio_procnodeid)(cmi_hdl_impl_t *); 111 uint_t (*cmio_coreid)(cmi_hdl_impl_t *); 112 uint_t (*cmio_strandid)(cmi_hdl_impl_t *); 113 uint_t (*cmio_procnodes_per_pkg)(cmi_hdl_impl_t *); 114 uint_t (*cmio_strand_apicid)(cmi_hdl_impl_t *); 115 uint32_t (*cmio_chiprev)(cmi_hdl_impl_t *); 116 const char *(*cmio_chiprevstr)(cmi_hdl_impl_t *); 117 uint32_t (*cmio_getsockettype)(cmi_hdl_impl_t *); 118 const char *(*cmio_getsocketstr)(cmi_hdl_impl_t *); 119 120 id_t (*cmio_logical_id)(cmi_hdl_impl_t *); 121 /* 122 * These ops are optional in an implementation. 123 */ 124 ulong_t (*cmio_getcr4)(cmi_hdl_impl_t *); 125 void (*cmio_setcr4)(cmi_hdl_impl_t *, ulong_t); 126 cmi_errno_t (*cmio_rdmsr)(cmi_hdl_impl_t *, uint_t, uint64_t *); 127 cmi_errno_t (*cmio_wrmsr)(cmi_hdl_impl_t *, uint_t, uint64_t); 128 cmi_errno_t (*cmio_msrinterpose)(cmi_hdl_impl_t *, uint_t, uint64_t); 129 void (*cmio_int)(cmi_hdl_impl_t *, int); 130 int (*cmio_online)(cmi_hdl_impl_t *, int, int *); 131 uint16_t (*cmio_smbiosid) (cmi_hdl_impl_t *); 132 uint_t (*cmio_smb_chipid)(cmi_hdl_impl_t *); 133 nvlist_t *(*cmio_smb_bboard)(cmi_hdl_impl_t *); 134 }; 135 136 static const struct cmi_hdl_ops cmi_hdl_ops; 137 138 /* 139 * Handles are looked up from contexts such as polling, injection etc 140 * where the context is reasonably well defined (although a poller could 141 * interrupt any old thread holding any old lock). They are also looked 142 * up by machine check handlers, which may strike at inconvenient times 143 * such as during handle initialization or destruction or during handle 144 * lookup (which the #MC handler itself will also have to perform). 145 * 146 * So keeping handles in a linked list makes locking difficult when we 147 * consider #MC handlers. Our solution is to have a look-up table indexed 148 * by that which uniquely identifies a handle - chip/core/strand id - 149 * with each entry a structure including a pointer to a handle 150 * structure for the resource, and a reference count for the handle. 151 * Reference counts are modified atomically. The public cmi_hdl_hold 152 * always succeeds because this can only be used after handle creation 153 * and before the call to destruct, so the hold count is already at least one. 154 * In other functions that lookup a handle (cmi_hdl_lookup, cmi_hdl_any) 155 * we must be certain that the count has not already decrmented to zero 156 * before applying our hold. 157 * 158 * The table is an array of maximum number of chips defined in 159 * CMI_CHIPID_ARR_SZ indexed by the chip id. If the chip is not present, the 160 * entry is NULL. Each entry is a pointer to another array which contains a 161 * list of all strands of the chip. This first level table is allocated when 162 * first we want to populate an entry. The size of the latter (per chip) table 163 * is CMI_MAX_STRANDS_PER_CHIP and it is populated when one of its cpus starts. 164 * 165 * Ideally we should only allocate to the actual number of chips, cores per 166 * chip and strand per core. The number of chips is not available until all 167 * of them are passed. The number of cores and strands are partially available. 168 * For now we stick with the above approach. 169 */ 170 #define CMI_MAX_CHIPID_NBITS 6 /* max chipid of 63 */ 171 #define CMI_MAX_CORES_PER_CHIP_NBITS 4 /* 16 cores per chip max */ 172 #define CMI_MAX_STRANDS_PER_CORE_NBITS 3 /* 8 strands per core max */ 173 174 #define CMI_MAX_CHIPID ((1 << (CMI_MAX_CHIPID_NBITS)) - 1) 175 #define CMI_MAX_CORES_PER_CHIP (1 << CMI_MAX_CORES_PER_CHIP_NBITS) 176 #define CMI_MAX_STRANDS_PER_CORE (1 << CMI_MAX_STRANDS_PER_CORE_NBITS) 177 #define CMI_MAX_STRANDS_PER_CHIP (CMI_MAX_CORES_PER_CHIP * \ 178 CMI_MAX_STRANDS_PER_CORE) 179 180 /* 181 * Handle array indexing within a per-chip table 182 * [6:3] = Core in package, 183 * [2:0] = Strand in core, 184 */ 185 #define CMI_HDL_ARR_IDX_CORE(coreid) \ 186 (((coreid) & (CMI_MAX_CORES_PER_CHIP - 1)) << \ 187 CMI_MAX_STRANDS_PER_CORE_NBITS) 188 189 #define CMI_HDL_ARR_IDX_STRAND(strandid) \ 190 (((strandid) & (CMI_MAX_STRANDS_PER_CORE - 1))) 191 192 #define CMI_HDL_ARR_IDX(coreid, strandid) \ 193 (CMI_HDL_ARR_IDX_CORE(coreid) | CMI_HDL_ARR_IDX_STRAND(strandid)) 194 195 #define CMI_CHIPID_ARR_SZ (1 << CMI_MAX_CHIPID_NBITS) 196 197 typedef struct cmi_hdl_ent { 198 volatile uint32_t cmae_refcnt; 199 cmi_hdl_impl_t *cmae_hdlp; 200 } cmi_hdl_ent_t; 201 202 static cmi_hdl_ent_t *cmi_chip_tab[CMI_CHIPID_ARR_SZ]; 203 204 /* 205 * Controls where we will source PCI config space data. 206 */ 207 #define CMI_PCICFG_FLAG_RD_HWOK 0x0001 208 #define CMI_PCICFG_FLAG_RD_INTERPOSEOK 0X0002 209 #define CMI_PCICFG_FLAG_WR_HWOK 0x0004 210 #define CMI_PCICFG_FLAG_WR_INTERPOSEOK 0X0008 211 212 static uint64_t cmi_pcicfg_flags = 213 CMI_PCICFG_FLAG_RD_HWOK | CMI_PCICFG_FLAG_RD_INTERPOSEOK | 214 CMI_PCICFG_FLAG_WR_HWOK | CMI_PCICFG_FLAG_WR_INTERPOSEOK; 215 216 /* 217 * The flags for individual cpus are kept in their per-cpu handle cmih_msrsrc 218 */ 219 #define CMI_MSR_FLAG_RD_HWOK 0x0001 220 #define CMI_MSR_FLAG_RD_INTERPOSEOK 0x0002 221 #define CMI_MSR_FLAG_WR_HWOK 0x0004 222 #define CMI_MSR_FLAG_WR_INTERPOSEOK 0x0008 223 224 int cmi_call_func_ntv_tries = 3; 225 226 static cmi_errno_t 227 call_func_ntv(int cpuid, xc_func_t func, xc_arg_t arg1, xc_arg_t arg2) 228 { 229 cmi_errno_t rc = -1; 230 int i; 231 232 kpreempt_disable(); 233 234 if (CPU->cpu_id == cpuid) { 235 (*func)(arg1, arg2, (xc_arg_t)&rc); 236 } else { 237 /* 238 * This should not happen for a #MC trap or a poll, so 239 * this is likely an error injection or similar. 240 * We will try to cross call with xc_trycall - we 241 * can't guarantee success with xc_call because 242 * the interrupt code in the case of a #MC may 243 * already hold the xc mutex. 244 */ 245 for (i = 0; i < cmi_call_func_ntv_tries; i++) { 246 cpuset_t cpus; 247 248 CPUSET_ONLY(cpus, cpuid); 249 xc_priority(arg1, arg2, (xc_arg_t)&rc, 250 CPUSET2BV(cpus), func); 251 if (rc != -1) 252 break; 253 254 DELAY(1); 255 } 256 } 257 258 kpreempt_enable(); 259 260 return (rc != -1 ? rc : CMIERR_DEADLOCK); 261 } 262 263 static uint64_t injcnt; 264 265 void 266 cmi_hdl_inj_begin(cmi_hdl_t ophdl) 267 { 268 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 269 270 if (hdl != NULL) 271 hdl->cmih_flags |= CMIH_F_INJACTV; 272 if (injcnt++ == 0) { 273 cmn_err(CE_NOTE, "Hardware error injection/simulation " 274 "activity noted"); 275 } 276 } 277 278 void 279 cmi_hdl_inj_end(cmi_hdl_t ophdl) 280 { 281 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 282 283 ASSERT(hdl == NULL || hdl->cmih_flags & CMIH_F_INJACTV); 284 if (hdl != NULL) 285 hdl->cmih_flags &= ~CMIH_F_INJACTV; 286 } 287 288 boolean_t 289 cmi_inj_tainted(void) 290 { 291 return (injcnt != 0 ? B_TRUE : B_FALSE); 292 } 293 294 /* 295 * ======================================================= 296 * | MSR Interposition | 297 * | ----------------- | 298 * | | 299 * ------------------------------------------------------- 300 */ 301 302 #define CMI_MSRI_HASHSZ 16 303 #define CMI_MSRI_HASHIDX(hdl, msr) \ 304 (((uintptr_t)(hdl) >> 3 + (msr)) % (CMI_MSRI_HASHSZ - 1)) 305 306 struct cmi_msri_bkt { 307 kmutex_t msrib_lock; 308 struct cmi_msri_hashent *msrib_head; 309 }; 310 311 struct cmi_msri_hashent { 312 struct cmi_msri_hashent *msrie_next; 313 struct cmi_msri_hashent *msrie_prev; 314 cmi_hdl_impl_t *msrie_hdl; 315 uint_t msrie_msrnum; 316 uint64_t msrie_msrval; 317 }; 318 319 #define CMI_MSRI_MATCH(ent, hdl, req_msr) \ 320 ((ent)->msrie_hdl == (hdl) && (ent)->msrie_msrnum == (req_msr)) 321 322 static struct cmi_msri_bkt msrihash[CMI_MSRI_HASHSZ]; 323 324 static void 325 msri_addent(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val) 326 { 327 int idx = CMI_MSRI_HASHIDX(hdl, msr); 328 struct cmi_msri_bkt *hbp = &msrihash[idx]; 329 struct cmi_msri_hashent *hep; 330 331 mutex_enter(&hbp->msrib_lock); 332 333 for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) { 334 if (CMI_MSRI_MATCH(hep, hdl, msr)) 335 break; 336 } 337 338 if (hep != NULL) { 339 hep->msrie_msrval = val; 340 } else { 341 hep = kmem_alloc(sizeof (*hep), KM_SLEEP); 342 hep->msrie_hdl = hdl; 343 hep->msrie_msrnum = msr; 344 hep->msrie_msrval = val; 345 346 if (hbp->msrib_head != NULL) 347 hbp->msrib_head->msrie_prev = hep; 348 hep->msrie_next = hbp->msrib_head; 349 hep->msrie_prev = NULL; 350 hbp->msrib_head = hep; 351 } 352 353 mutex_exit(&hbp->msrib_lock); 354 } 355 356 /* 357 * Look for a match for the given hanlde and msr. Return 1 with valp 358 * filled if a match is found, otherwise return 0 with valp untouched. 359 */ 360 static int 361 msri_lookup(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp) 362 { 363 int idx = CMI_MSRI_HASHIDX(hdl, msr); 364 struct cmi_msri_bkt *hbp = &msrihash[idx]; 365 struct cmi_msri_hashent *hep; 366 367 /* 368 * This function is called during #MC trap handling, so we should 369 * consider the possibility that the hash mutex is held by the 370 * interrupted thread. This should not happen because interposition 371 * is an artificial injection mechanism and the #MC is requested 372 * after adding entries, but just in case of a real #MC at an 373 * unlucky moment we'll use mutex_tryenter here. 374 */ 375 if (!mutex_tryenter(&hbp->msrib_lock)) 376 return (0); 377 378 for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) { 379 if (CMI_MSRI_MATCH(hep, hdl, msr)) { 380 *valp = hep->msrie_msrval; 381 break; 382 } 383 } 384 385 mutex_exit(&hbp->msrib_lock); 386 387 return (hep != NULL); 388 } 389 390 /* 391 * Remove any interposed value that matches. 392 */ 393 static void 394 msri_rment(cmi_hdl_impl_t *hdl, uint_t msr) 395 { 396 397 int idx = CMI_MSRI_HASHIDX(hdl, msr); 398 struct cmi_msri_bkt *hbp = &msrihash[idx]; 399 struct cmi_msri_hashent *hep; 400 401 if (!mutex_tryenter(&hbp->msrib_lock)) 402 return; 403 404 for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) { 405 if (CMI_MSRI_MATCH(hep, hdl, msr)) { 406 if (hep->msrie_prev != NULL) 407 hep->msrie_prev->msrie_next = hep->msrie_next; 408 409 if (hep->msrie_next != NULL) 410 hep->msrie_next->msrie_prev = hep->msrie_prev; 411 412 if (hbp->msrib_head == hep) 413 hbp->msrib_head = hep->msrie_next; 414 415 kmem_free(hep, sizeof (*hep)); 416 break; 417 } 418 } 419 420 mutex_exit(&hbp->msrib_lock); 421 } 422 423 /* 424 * ======================================================= 425 * | PCI Config Space Interposition | 426 * | ------------------------------ | 427 * | | 428 * ------------------------------------------------------- 429 */ 430 431 /* 432 * Hash for interposed PCI config space values. We lookup on bus/dev/fun/offset 433 * and then record whether the value stashed was made with a byte, word or 434 * doubleword access; we will only return a hit for an access of the 435 * same size. If you access say a 32-bit register using byte accesses 436 * and then attempt to read the full 32-bit value back you will not obtain 437 * any sort of merged result - you get a lookup miss. 438 */ 439 440 #define CMI_PCII_HASHSZ 16 441 #define CMI_PCII_HASHIDX(b, d, f, o) \ 442 (((b) + (d) + (f) + (o)) % (CMI_PCII_HASHSZ - 1)) 443 444 struct cmi_pcii_bkt { 445 kmutex_t pciib_lock; 446 struct cmi_pcii_hashent *pciib_head; 447 }; 448 449 struct cmi_pcii_hashent { 450 struct cmi_pcii_hashent *pcii_next; 451 struct cmi_pcii_hashent *pcii_prev; 452 int pcii_bus; 453 int pcii_dev; 454 int pcii_func; 455 int pcii_reg; 456 int pcii_asize; 457 uint32_t pcii_val; 458 }; 459 460 #define CMI_PCII_MATCH(ent, b, d, f, r, asz) \ 461 ((ent)->pcii_bus == (b) && (ent)->pcii_dev == (d) && \ 462 (ent)->pcii_func == (f) && (ent)->pcii_reg == (r) && \ 463 (ent)->pcii_asize == (asz)) 464 465 static struct cmi_pcii_bkt pciihash[CMI_PCII_HASHSZ]; 466 467 468 /* 469 * Add a new entry to the PCI interpose hash, overwriting any existing 470 * entry that is found. 471 */ 472 static void 473 pcii_addent(int bus, int dev, int func, int reg, uint32_t val, int asz) 474 { 475 int idx = CMI_PCII_HASHIDX(bus, dev, func, reg); 476 struct cmi_pcii_bkt *hbp = &pciihash[idx]; 477 struct cmi_pcii_hashent *hep; 478 479 cmi_hdl_inj_begin(NULL); 480 481 mutex_enter(&hbp->pciib_lock); 482 483 for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) { 484 if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) 485 break; 486 } 487 488 if (hep != NULL) { 489 hep->pcii_val = val; 490 } else { 491 hep = kmem_alloc(sizeof (*hep), KM_SLEEP); 492 hep->pcii_bus = bus; 493 hep->pcii_dev = dev; 494 hep->pcii_func = func; 495 hep->pcii_reg = reg; 496 hep->pcii_asize = asz; 497 hep->pcii_val = val; 498 499 if (hbp->pciib_head != NULL) 500 hbp->pciib_head->pcii_prev = hep; 501 hep->pcii_next = hbp->pciib_head; 502 hep->pcii_prev = NULL; 503 hbp->pciib_head = hep; 504 } 505 506 mutex_exit(&hbp->pciib_lock); 507 508 cmi_hdl_inj_end(NULL); 509 } 510 511 /* 512 * Look for a match for the given bus/dev/func/reg; return 1 with valp 513 * filled if a match is found, otherwise return 0 with valp untouched. 514 */ 515 static int 516 pcii_lookup(int bus, int dev, int func, int reg, int asz, uint32_t *valp) 517 { 518 int idx = CMI_PCII_HASHIDX(bus, dev, func, reg); 519 struct cmi_pcii_bkt *hbp = &pciihash[idx]; 520 struct cmi_pcii_hashent *hep; 521 522 if (!mutex_tryenter(&hbp->pciib_lock)) 523 return (0); 524 525 for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) { 526 if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) { 527 *valp = hep->pcii_val; 528 break; 529 } 530 } 531 532 mutex_exit(&hbp->pciib_lock); 533 534 return (hep != NULL); 535 } 536 537 static void 538 pcii_rment(int bus, int dev, int func, int reg, int asz) 539 { 540 int idx = CMI_PCII_HASHIDX(bus, dev, func, reg); 541 struct cmi_pcii_bkt *hbp = &pciihash[idx]; 542 struct cmi_pcii_hashent *hep; 543 544 mutex_enter(&hbp->pciib_lock); 545 546 for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) { 547 if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) { 548 if (hep->pcii_prev != NULL) 549 hep->pcii_prev->pcii_next = hep->pcii_next; 550 551 if (hep->pcii_next != NULL) 552 hep->pcii_next->pcii_prev = hep->pcii_prev; 553 554 if (hbp->pciib_head == hep) 555 hbp->pciib_head = hep->pcii_next; 556 557 kmem_free(hep, sizeof (*hep)); 558 break; 559 } 560 } 561 562 mutex_exit(&hbp->pciib_lock); 563 } 564 565 #ifndef __xpv 566 567 /* 568 * ======================================================= 569 * | Native methods | 570 * | -------------- | 571 * | | 572 * | These are used when we are running native on bare- | 573 * | metal, or simply don't know any better. | 574 * --------------------------------------------------------- 575 */ 576 577 #define HDLPRIV(hdl) ((cpu_t *)(hdl)->cmih_hdlpriv) 578 579 static uint_t 580 ntv_vendor(cmi_hdl_impl_t *hdl) 581 { 582 return (cpuid_getvendor(HDLPRIV(hdl))); 583 } 584 585 static const char * 586 ntv_vendorstr(cmi_hdl_impl_t *hdl) 587 { 588 return (cpuid_getvendorstr(HDLPRIV(hdl))); 589 } 590 591 static uint_t 592 ntv_family(cmi_hdl_impl_t *hdl) 593 { 594 return (cpuid_getfamily(HDLPRIV(hdl))); 595 } 596 597 static uint_t 598 ntv_model(cmi_hdl_impl_t *hdl) 599 { 600 return (cpuid_getmodel(HDLPRIV(hdl))); 601 } 602 603 static uint_t 604 ntv_stepping(cmi_hdl_impl_t *hdl) 605 { 606 return (cpuid_getstep(HDLPRIV(hdl))); 607 } 608 609 static uint_t 610 ntv_chipid(cmi_hdl_impl_t *hdl) 611 { 612 return (hdl->cmih_chipid); 613 614 } 615 616 static uint_t 617 ntv_procnodeid(cmi_hdl_impl_t *hdl) 618 { 619 return (hdl->cmih_procnodeid); 620 } 621 622 static uint_t 623 ntv_procnodes_per_pkg(cmi_hdl_impl_t *hdl) 624 { 625 return (hdl->cmih_procnodes_per_pkg); 626 } 627 628 static uint_t 629 ntv_coreid(cmi_hdl_impl_t *hdl) 630 { 631 return (hdl->cmih_coreid); 632 } 633 634 static uint_t 635 ntv_strandid(cmi_hdl_impl_t *hdl) 636 { 637 return (hdl->cmih_strandid); 638 } 639 640 static uint_t 641 ntv_strand_apicid(cmi_hdl_impl_t *hdl) 642 { 643 return (cpuid_get_apicid(HDLPRIV(hdl))); 644 } 645 646 static uint16_t 647 ntv_smbiosid(cmi_hdl_impl_t *hdl) 648 { 649 return (hdl->cmih_smbiosid); 650 } 651 652 static uint_t 653 ntv_smb_chipid(cmi_hdl_impl_t *hdl) 654 { 655 return (hdl->cmih_smb_chipid); 656 } 657 658 static nvlist_t * 659 ntv_smb_bboard(cmi_hdl_impl_t *hdl) 660 { 661 return (hdl->cmih_smb_bboard); 662 } 663 664 static uint32_t 665 ntv_chiprev(cmi_hdl_impl_t *hdl) 666 { 667 return (cpuid_getchiprev(HDLPRIV(hdl))); 668 } 669 670 static const char * 671 ntv_chiprevstr(cmi_hdl_impl_t *hdl) 672 { 673 return (cpuid_getchiprevstr(HDLPRIV(hdl))); 674 } 675 676 static uint32_t 677 ntv_getsockettype(cmi_hdl_impl_t *hdl) 678 { 679 return (cpuid_getsockettype(HDLPRIV(hdl))); 680 } 681 682 static const char * 683 ntv_getsocketstr(cmi_hdl_impl_t *hdl) 684 { 685 return (cpuid_getsocketstr(HDLPRIV(hdl))); 686 } 687 688 static id_t 689 ntv_logical_id(cmi_hdl_impl_t *hdl) 690 { 691 return (HDLPRIV(hdl)->cpu_id); 692 } 693 694 /*ARGSUSED*/ 695 static int 696 ntv_getcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3) 697 { 698 ulong_t *dest = (ulong_t *)arg1; 699 cmi_errno_t *rcp = (cmi_errno_t *)arg3; 700 701 *dest = getcr4(); 702 *rcp = CMI_SUCCESS; 703 704 return (0); 705 } 706 707 static ulong_t 708 ntv_getcr4(cmi_hdl_impl_t *hdl) 709 { 710 cpu_t *cp = HDLPRIV(hdl); 711 ulong_t val; 712 713 (void) call_func_ntv(cp->cpu_id, ntv_getcr4_xc, (xc_arg_t)&val, NULL); 714 715 return (val); 716 } 717 718 /*ARGSUSED*/ 719 static int 720 ntv_setcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3) 721 { 722 ulong_t val = (ulong_t)arg1; 723 cmi_errno_t *rcp = (cmi_errno_t *)arg3; 724 725 setcr4(val); 726 *rcp = CMI_SUCCESS; 727 728 return (0); 729 } 730 731 static void 732 ntv_setcr4(cmi_hdl_impl_t *hdl, ulong_t val) 733 { 734 cpu_t *cp = HDLPRIV(hdl); 735 736 (void) call_func_ntv(cp->cpu_id, ntv_setcr4_xc, (xc_arg_t)val, NULL); 737 } 738 739 volatile uint32_t cmi_trapped_rdmsr; 740 741 /*ARGSUSED*/ 742 static int 743 ntv_rdmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3) 744 { 745 uint_t msr = (uint_t)arg1; 746 uint64_t *valp = (uint64_t *)arg2; 747 cmi_errno_t *rcp = (cmi_errno_t *)arg3; 748 749 on_trap_data_t otd; 750 751 if (on_trap(&otd, OT_DATA_ACCESS) == 0) { 752 if (checked_rdmsr(msr, valp) == 0) 753 *rcp = CMI_SUCCESS; 754 else 755 *rcp = CMIERR_NOTSUP; 756 } else { 757 *rcp = CMIERR_MSRGPF; 758 atomic_inc_32(&cmi_trapped_rdmsr); 759 } 760 no_trap(); 761 762 return (0); 763 } 764 765 static cmi_errno_t 766 ntv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp) 767 { 768 cpu_t *cp = HDLPRIV(hdl); 769 770 if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_HWOK)) 771 return (CMIERR_INTERPOSE); 772 773 return (call_func_ntv(cp->cpu_id, ntv_rdmsr_xc, 774 (xc_arg_t)msr, (xc_arg_t)valp)); 775 } 776 777 volatile uint32_t cmi_trapped_wrmsr; 778 779 /*ARGSUSED*/ 780 static int 781 ntv_wrmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3) 782 { 783 uint_t msr = (uint_t)arg1; 784 uint64_t val = *((uint64_t *)arg2); 785 cmi_errno_t *rcp = (cmi_errno_t *)arg3; 786 on_trap_data_t otd; 787 788 if (on_trap(&otd, OT_DATA_ACCESS) == 0) { 789 if (checked_wrmsr(msr, val) == 0) 790 *rcp = CMI_SUCCESS; 791 else 792 *rcp = CMIERR_NOTSUP; 793 } else { 794 *rcp = CMIERR_MSRGPF; 795 atomic_inc_32(&cmi_trapped_wrmsr); 796 } 797 no_trap(); 798 799 return (0); 800 801 } 802 803 static cmi_errno_t 804 ntv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val) 805 { 806 cpu_t *cp = HDLPRIV(hdl); 807 808 if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_WR_HWOK)) 809 return (CMI_SUCCESS); 810 811 return (call_func_ntv(cp->cpu_id, ntv_wrmsr_xc, 812 (xc_arg_t)msr, (xc_arg_t)&val)); 813 } 814 815 static cmi_errno_t 816 ntv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val) 817 { 818 msri_addent(hdl, msr, val); 819 return (CMI_SUCCESS); 820 } 821 822 /*ARGSUSED*/ 823 static int 824 ntv_int_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3) 825 { 826 cmi_errno_t *rcp = (cmi_errno_t *)arg3; 827 int int_no = (int)arg1; 828 829 if (int_no == T_MCE) 830 int18(); 831 else 832 int_cmci(); 833 *rcp = CMI_SUCCESS; 834 835 return (0); 836 } 837 838 static void 839 ntv_int(cmi_hdl_impl_t *hdl, int int_no) 840 { 841 cpu_t *cp = HDLPRIV(hdl); 842 843 (void) call_func_ntv(cp->cpu_id, ntv_int_xc, (xc_arg_t)int_no, NULL); 844 } 845 846 static int 847 ntv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status) 848 { 849 processorid_t cpuid = HDLPRIV(hdl)->cpu_id; 850 851 return (p_online_internal(cpuid, new_status, old_status)); 852 } 853 854 #else /* __xpv */ 855 856 /* 857 * ======================================================= 858 * | xVM dom0 methods | 859 * | ---------------- | 860 * | | 861 * | These are used when we are running as dom0 in | 862 * | a Solaris xVM context. | 863 * --------------------------------------------------------- 864 */ 865 866 #define HDLPRIV(hdl) ((xen_mc_lcpu_cookie_t)(hdl)->cmih_hdlpriv) 867 868 extern uint_t _cpuid_vendorstr_to_vendorcode(char *); 869 870 871 static uint_t 872 xpv_vendor(cmi_hdl_impl_t *hdl) 873 { 874 return (_cpuid_vendorstr_to_vendorcode((char *)xen_physcpu_vendorstr( 875 HDLPRIV(hdl)))); 876 } 877 878 static const char * 879 xpv_vendorstr(cmi_hdl_impl_t *hdl) 880 { 881 return (xen_physcpu_vendorstr(HDLPRIV(hdl))); 882 } 883 884 static uint_t 885 xpv_family(cmi_hdl_impl_t *hdl) 886 { 887 return (xen_physcpu_family(HDLPRIV(hdl))); 888 } 889 890 static uint_t 891 xpv_model(cmi_hdl_impl_t *hdl) 892 { 893 return (xen_physcpu_model(HDLPRIV(hdl))); 894 } 895 896 static uint_t 897 xpv_stepping(cmi_hdl_impl_t *hdl) 898 { 899 return (xen_physcpu_stepping(HDLPRIV(hdl))); 900 } 901 902 static uint_t 903 xpv_chipid(cmi_hdl_impl_t *hdl) 904 { 905 return (hdl->cmih_chipid); 906 } 907 908 static uint_t 909 xpv_procnodeid(cmi_hdl_impl_t *hdl) 910 { 911 return (hdl->cmih_procnodeid); 912 } 913 914 static uint_t 915 xpv_procnodes_per_pkg(cmi_hdl_impl_t *hdl) 916 { 917 return (hdl->cmih_procnodes_per_pkg); 918 } 919 920 static uint_t 921 xpv_coreid(cmi_hdl_impl_t *hdl) 922 { 923 return (hdl->cmih_coreid); 924 } 925 926 static uint_t 927 xpv_strandid(cmi_hdl_impl_t *hdl) 928 { 929 return (hdl->cmih_strandid); 930 } 931 932 static uint_t 933 xpv_strand_apicid(cmi_hdl_impl_t *hdl) 934 { 935 return (xen_physcpu_initial_apicid(HDLPRIV(hdl))); 936 } 937 938 static uint16_t 939 xpv_smbiosid(cmi_hdl_impl_t *hdl) 940 { 941 return (hdl->cmih_smbiosid); 942 } 943 944 static uint_t 945 xpv_smb_chipid(cmi_hdl_impl_t *hdl) 946 { 947 return (hdl->cmih_smb_chipid); 948 } 949 950 static nvlist_t * 951 xpv_smb_bboard(cmi_hdl_impl_t *hdl) 952 { 953 return (hdl->cmih_smb_bboard); 954 } 955 956 extern uint32_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t); 957 958 static uint32_t 959 xpv_chiprev(cmi_hdl_impl_t *hdl) 960 { 961 return (_cpuid_chiprev(xpv_vendor(hdl), xpv_family(hdl), 962 xpv_model(hdl), xpv_stepping(hdl))); 963 } 964 965 extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t); 966 967 static const char * 968 xpv_chiprevstr(cmi_hdl_impl_t *hdl) 969 { 970 return (_cpuid_chiprevstr(xpv_vendor(hdl), xpv_family(hdl), 971 xpv_model(hdl), xpv_stepping(hdl))); 972 } 973 974 extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t); 975 976 static uint32_t 977 xpv_getsockettype(cmi_hdl_impl_t *hdl) 978 { 979 return (_cpuid_skt(xpv_vendor(hdl), xpv_family(hdl), 980 xpv_model(hdl), xpv_stepping(hdl))); 981 } 982 983 extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t); 984 985 static const char * 986 xpv_getsocketstr(cmi_hdl_impl_t *hdl) 987 { 988 return (_cpuid_sktstr(xpv_vendor(hdl), xpv_family(hdl), 989 xpv_model(hdl), xpv_stepping(hdl))); 990 } 991 992 static id_t 993 xpv_logical_id(cmi_hdl_impl_t *hdl) 994 { 995 return (xen_physcpu_logical_id(HDLPRIV(hdl))); 996 } 997 998 static cmi_errno_t 999 xpv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp) 1000 { 1001 switch (msr) { 1002 case IA32_MSR_MCG_CAP: 1003 *valp = xen_physcpu_mcg_cap(HDLPRIV(hdl)); 1004 break; 1005 1006 default: 1007 return (CMIERR_NOTSUP); 1008 } 1009 1010 return (CMI_SUCCESS); 1011 } 1012 1013 /* 1014 * Request the hypervisor to write an MSR for us. The hypervisor 1015 * will only accept MCA-related MSRs, as this is for MCA error 1016 * simulation purposes alone. We will pre-screen MSRs for injection 1017 * so we don't bother the HV with bogus requests. We will permit 1018 * injection to any MCA bank register, and to MCG_STATUS. 1019 */ 1020 1021 #define IS_MCA_INJ_MSR(msr) \ 1022 (((msr) >= IA32_MSR_MC(0, CTL) && (msr) <= IA32_MSR_MC(10, MISC)) || \ 1023 (msr) == IA32_MSR_MCG_STATUS) 1024 1025 static cmi_errno_t 1026 xpv_wrmsr_cmn(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val, boolean_t intpose) 1027 { 1028 struct xen_mc_msrinject mci; 1029 1030 if (!(hdl->cmih_flags & CMIH_F_INJACTV)) 1031 return (CMIERR_NOTSUP); /* for injection use only! */ 1032 1033 if (!IS_MCA_INJ_MSR(msr)) 1034 return (CMIERR_API); 1035 1036 if (panicstr) 1037 return (CMIERR_DEADLOCK); 1038 1039 mci.mcinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl)); 1040 mci.mcinj_flags = intpose ? MC_MSRINJ_F_INTERPOSE : 0; 1041 mci.mcinj_count = 1; /* learn to batch sometime */ 1042 mci.mcinj_msr[0].reg = msr; 1043 mci.mcinj_msr[0].value = val; 1044 1045 return (HYPERVISOR_mca(XEN_MC_msrinject, (xen_mc_arg_t *)&mci) == 1046 0 ? CMI_SUCCESS : CMIERR_NOTSUP); 1047 } 1048 1049 static cmi_errno_t 1050 xpv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val) 1051 { 1052 return (xpv_wrmsr_cmn(hdl, msr, val, B_FALSE)); 1053 } 1054 1055 1056 static cmi_errno_t 1057 xpv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val) 1058 { 1059 return (xpv_wrmsr_cmn(hdl, msr, val, B_TRUE)); 1060 } 1061 1062 static void 1063 xpv_int(cmi_hdl_impl_t *hdl, int int_no) 1064 { 1065 struct xen_mc_mceinject mce; 1066 1067 if (!(hdl->cmih_flags & CMIH_F_INJACTV)) 1068 return; 1069 1070 if (int_no != T_MCE) { 1071 cmn_err(CE_WARN, "xpv_int: int_no %d unimplemented\n", 1072 int_no); 1073 } 1074 1075 mce.mceinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl)); 1076 1077 (void) HYPERVISOR_mca(XEN_MC_mceinject, (xen_mc_arg_t *)&mce); 1078 } 1079 1080 static int 1081 xpv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status) 1082 { 1083 xen_sysctl_t xs; 1084 int op, rc, status; 1085 1086 new_status &= ~P_FORCED; 1087 1088 switch (new_status) { 1089 case P_STATUS: 1090 op = XEN_SYSCTL_CPU_HOTPLUG_STATUS; 1091 break; 1092 case P_FAULTED: 1093 case P_OFFLINE: 1094 op = XEN_SYSCTL_CPU_HOTPLUG_OFFLINE; 1095 break; 1096 case P_ONLINE: 1097 op = XEN_SYSCTL_CPU_HOTPLUG_ONLINE; 1098 break; 1099 default: 1100 return (-1); 1101 } 1102 1103 xs.cmd = XEN_SYSCTL_cpu_hotplug; 1104 xs.interface_version = XEN_SYSCTL_INTERFACE_VERSION; 1105 xs.u.cpu_hotplug.cpu = xen_physcpu_logical_id(HDLPRIV(hdl)); 1106 xs.u.cpu_hotplug.op = op; 1107 1108 if ((rc = HYPERVISOR_sysctl(&xs)) >= 0) { 1109 status = rc; 1110 rc = 0; 1111 switch (status) { 1112 case XEN_CPU_HOTPLUG_STATUS_NEW: 1113 *old_status = P_OFFLINE; 1114 break; 1115 case XEN_CPU_HOTPLUG_STATUS_OFFLINE: 1116 *old_status = P_FAULTED; 1117 break; 1118 case XEN_CPU_HOTPLUG_STATUS_ONLINE: 1119 *old_status = P_ONLINE; 1120 break; 1121 default: 1122 return (-1); 1123 } 1124 } 1125 1126 return (-rc); 1127 } 1128 1129 #endif 1130 1131 /*ARGSUSED*/ 1132 static void * 1133 cpu_search(enum cmi_hdl_class class, uint_t chipid, uint_t coreid, 1134 uint_t strandid) 1135 { 1136 #ifdef __xpv 1137 xen_mc_lcpu_cookie_t cpi; 1138 1139 for (cpi = xen_physcpu_next(NULL); cpi != NULL; 1140 cpi = xen_physcpu_next(cpi)) { 1141 if (xen_physcpu_chipid(cpi) == chipid && 1142 xen_physcpu_coreid(cpi) == coreid && 1143 xen_physcpu_strandid(cpi) == strandid) 1144 return ((void *)cpi); 1145 } 1146 return (NULL); 1147 1148 #else /* __xpv */ 1149 1150 cpu_t *cp, *startcp; 1151 1152 kpreempt_disable(); 1153 cp = startcp = CPU; 1154 do { 1155 if (cmi_ntv_hwchipid(cp) == chipid && 1156 cmi_ntv_hwcoreid(cp) == coreid && 1157 cmi_ntv_hwstrandid(cp) == strandid) { 1158 kpreempt_enable(); 1159 return ((void *)cp); 1160 } 1161 1162 cp = cp->cpu_next; 1163 } while (cp != startcp); 1164 kpreempt_enable(); 1165 return (NULL); 1166 #endif /* __ xpv */ 1167 } 1168 1169 static boolean_t 1170 cpu_is_cmt(void *priv) 1171 { 1172 #ifdef __xpv 1173 return (xen_physcpu_is_cmt((xen_mc_lcpu_cookie_t)priv)); 1174 #else /* __xpv */ 1175 cpu_t *cp = (cpu_t *)priv; 1176 1177 int strands_per_core = cpuid_get_ncpu_per_chip(cp) / 1178 cpuid_get_ncore_per_chip(cp); 1179 1180 return (strands_per_core > 1); 1181 #endif /* __xpv */ 1182 } 1183 1184 /* 1185 * Find the handle entry of a given cpu identified by a <chip,core,strand> 1186 * tuple. 1187 */ 1188 static cmi_hdl_ent_t * 1189 cmi_hdl_ent_lookup(uint_t chipid, uint_t coreid, uint_t strandid) 1190 { 1191 /* 1192 * Allocate per-chip table which contains a list of handle of 1193 * all strands of the chip. 1194 */ 1195 if (cmi_chip_tab[chipid] == NULL) { 1196 size_t sz; 1197 cmi_hdl_ent_t *pg; 1198 1199 sz = CMI_MAX_STRANDS_PER_CHIP * sizeof (cmi_hdl_ent_t); 1200 pg = kmem_zalloc(sz, KM_SLEEP); 1201 1202 /* test and set the per-chip table if it is not allocated */ 1203 if (atomic_cas_ptr(&cmi_chip_tab[chipid], NULL, pg) != NULL) 1204 kmem_free(pg, sz); /* someone beat us */ 1205 } 1206 1207 return (cmi_chip_tab[chipid] + CMI_HDL_ARR_IDX(coreid, strandid)); 1208 } 1209 1210 cmi_hdl_t 1211 cmi_hdl_create(enum cmi_hdl_class class, uint_t chipid, uint_t coreid, 1212 uint_t strandid) 1213 { 1214 cmi_hdl_impl_t *hdl; 1215 void *priv; 1216 cmi_hdl_ent_t *ent; 1217 1218 #ifdef __xpv 1219 ASSERT(class == CMI_HDL_SOLARIS_xVM_MCA); 1220 #else 1221 ASSERT(class == CMI_HDL_NATIVE); 1222 #endif 1223 1224 if (chipid > CMI_MAX_CHIPID || 1225 coreid > CMI_MAX_CORES_PER_CHIP - 1 || 1226 strandid > CMI_MAX_STRANDS_PER_CORE - 1) 1227 return (NULL); 1228 1229 if ((priv = cpu_search(class, chipid, coreid, strandid)) == NULL) 1230 return (NULL); 1231 1232 hdl = kmem_zalloc(sizeof (*hdl), KM_SLEEP); 1233 1234 hdl->cmih_class = class; 1235 HDLOPS(hdl) = &cmi_hdl_ops; 1236 hdl->cmih_chipid = chipid; 1237 hdl->cmih_coreid = coreid; 1238 hdl->cmih_strandid = strandid; 1239 hdl->cmih_mstrand = cpu_is_cmt(priv); 1240 hdl->cmih_hdlpriv = priv; 1241 #ifdef __xpv 1242 hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_INTERPOSEOK | 1243 CMI_MSR_FLAG_WR_INTERPOSEOK; 1244 1245 /* 1246 * XXX: need hypervisor support for procnodeid, for now assume 1247 * single-node processors (procnodeid = chipid) 1248 */ 1249 hdl->cmih_procnodeid = xen_physcpu_chipid((xen_mc_lcpu_cookie_t)priv); 1250 hdl->cmih_procnodes_per_pkg = 1; 1251 #else /* __xpv */ 1252 hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_HWOK | CMI_MSR_FLAG_RD_INTERPOSEOK | 1253 CMI_MSR_FLAG_WR_HWOK | CMI_MSR_FLAG_WR_INTERPOSEOK; 1254 hdl->cmih_procnodeid = cpuid_get_procnodeid((cpu_t *)priv); 1255 hdl->cmih_procnodes_per_pkg = 1256 cpuid_get_procnodes_per_pkg((cpu_t *)priv); 1257 #endif /* __xpv */ 1258 1259 ent = cmi_hdl_ent_lookup(chipid, coreid, strandid); 1260 if (ent->cmae_refcnt != 0 || ent->cmae_hdlp != NULL) { 1261 /* 1262 * Somehow this (chipid, coreid, strandid) id tuple has 1263 * already been assigned! This indicates that the 1264 * callers logic in determining these values is busted, 1265 * or perhaps undermined by bad BIOS setup. Complain, 1266 * and refuse to initialize this tuple again as bad things 1267 * will happen. 1268 */ 1269 cmn_err(CE_NOTE, "cmi_hdl_create: chipid %d coreid %d " 1270 "strandid %d handle already allocated!", 1271 chipid, coreid, strandid); 1272 kmem_free(hdl, sizeof (*hdl)); 1273 return (NULL); 1274 } 1275 1276 /* 1277 * Once we store a nonzero reference count others can find this 1278 * handle via cmi_hdl_lookup etc. This initial hold on the handle 1279 * is to be dropped only if some other part of cmi initialization 1280 * fails or, if it succeeds, at later cpu deconfigure. Note the 1281 * the module private data we hold in cmih_cmi and cmih_cmidata 1282 * is still NULL at this point (the caller will fill it with 1283 * cmi_hdl_setcmi if it initializes) so consumers of handles 1284 * should always be ready for that possibility. 1285 */ 1286 ent->cmae_hdlp = hdl; 1287 hdl->cmih_refcntp = &ent->cmae_refcnt; 1288 ent->cmae_refcnt = 1; 1289 1290 return ((cmi_hdl_t)hdl); 1291 } 1292 1293 void 1294 cmi_read_smbios(cmi_hdl_t ophdl) 1295 { 1296 1297 uint_t strand_apicid; 1298 uint_t chip_inst; 1299 uint16_t smb_id; 1300 int rc = 0; 1301 1302 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1303 1304 /* set x86gentopo compatibility */ 1305 fm_smb_fmacompat(); 1306 1307 #ifndef __xpv 1308 strand_apicid = ntv_strand_apicid(hdl); 1309 #else 1310 strand_apicid = xpv_strand_apicid(hdl); 1311 #endif 1312 1313 if (!x86gentopo_legacy) { 1314 /* 1315 * If fm_smb_chipinst() or fm_smb_bboard() fails, 1316 * topo reverts to legacy mode 1317 */ 1318 rc = fm_smb_chipinst(strand_apicid, &chip_inst, &smb_id); 1319 if (rc == 0) { 1320 hdl->cmih_smb_chipid = chip_inst; 1321 hdl->cmih_smbiosid = smb_id; 1322 } else { 1323 #ifdef DEBUG 1324 cmn_err(CE_NOTE, "cmi reads smbios chip info failed"); 1325 #endif /* DEBUG */ 1326 return; 1327 } 1328 1329 hdl->cmih_smb_bboard = fm_smb_bboard(strand_apicid); 1330 #ifdef DEBUG 1331 if (hdl->cmih_smb_bboard == NULL) 1332 cmn_err(CE_NOTE, 1333 "cmi reads smbios base boards info failed"); 1334 #endif /* DEBUG */ 1335 } 1336 } 1337 1338 void 1339 cmi_hdl_hold(cmi_hdl_t ophdl) 1340 { 1341 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1342 1343 ASSERT(*hdl->cmih_refcntp != 0); /* must not be the initial hold */ 1344 1345 atomic_inc_32(hdl->cmih_refcntp); 1346 } 1347 1348 static int 1349 cmi_hdl_canref(cmi_hdl_ent_t *ent) 1350 { 1351 volatile uint32_t *refcntp; 1352 uint32_t refcnt; 1353 1354 refcntp = &ent->cmae_refcnt; 1355 refcnt = *refcntp; 1356 1357 if (refcnt == 0) { 1358 /* 1359 * Associated object never existed, is being destroyed, 1360 * or has been destroyed. 1361 */ 1362 return (0); 1363 } 1364 1365 /* 1366 * We cannot use atomic increment here because once the reference 1367 * count reaches zero it must never be bumped up again. 1368 */ 1369 while (refcnt != 0) { 1370 if (atomic_cas_32(refcntp, refcnt, refcnt + 1) == refcnt) 1371 return (1); 1372 refcnt = *refcntp; 1373 } 1374 1375 /* 1376 * Somebody dropped the reference count to 0 after our initial 1377 * check. 1378 */ 1379 return (0); 1380 } 1381 1382 1383 void 1384 cmi_hdl_rele(cmi_hdl_t ophdl) 1385 { 1386 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1387 cmi_hdl_ent_t *ent; 1388 1389 ASSERT(*hdl->cmih_refcntp > 0); 1390 1391 if (atomic_dec_32_nv(hdl->cmih_refcntp) > 0) 1392 return; 1393 1394 ent = cmi_hdl_ent_lookup(hdl->cmih_chipid, hdl->cmih_coreid, 1395 hdl->cmih_strandid); 1396 ent->cmae_hdlp = NULL; 1397 1398 kmem_free(hdl, sizeof (*hdl)); 1399 } 1400 1401 void 1402 cmi_hdl_setspecific(cmi_hdl_t ophdl, void *arg) 1403 { 1404 IMPLHDL(ophdl)->cmih_spec = arg; 1405 } 1406 1407 void * 1408 cmi_hdl_getspecific(cmi_hdl_t ophdl) 1409 { 1410 return (IMPLHDL(ophdl)->cmih_spec); 1411 } 1412 1413 void 1414 cmi_hdl_setmc(cmi_hdl_t ophdl, const struct cmi_mc_ops *mcops, void *mcdata) 1415 { 1416 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1417 1418 ASSERT(hdl->cmih_mcops == NULL && hdl->cmih_mcdata == NULL); 1419 hdl->cmih_mcops = mcops; 1420 hdl->cmih_mcdata = mcdata; 1421 } 1422 1423 const struct cmi_mc_ops * 1424 cmi_hdl_getmcops(cmi_hdl_t ophdl) 1425 { 1426 return (IMPLHDL(ophdl)->cmih_mcops); 1427 } 1428 1429 void * 1430 cmi_hdl_getmcdata(cmi_hdl_t ophdl) 1431 { 1432 return (IMPLHDL(ophdl)->cmih_mcdata); 1433 } 1434 1435 cmi_hdl_t 1436 cmi_hdl_lookup(enum cmi_hdl_class class, uint_t chipid, uint_t coreid, 1437 uint_t strandid) 1438 { 1439 cmi_hdl_ent_t *ent; 1440 1441 if (chipid > CMI_MAX_CHIPID || 1442 coreid > CMI_MAX_CORES_PER_CHIP - 1 || 1443 strandid > CMI_MAX_STRANDS_PER_CORE - 1) 1444 return (NULL); 1445 1446 ent = cmi_hdl_ent_lookup(chipid, coreid, strandid); 1447 1448 if (class == CMI_HDL_NEUTRAL) 1449 #ifdef __xpv 1450 class = CMI_HDL_SOLARIS_xVM_MCA; 1451 #else 1452 class = CMI_HDL_NATIVE; 1453 #endif 1454 1455 if (!cmi_hdl_canref(ent)) 1456 return (NULL); 1457 1458 if (ent->cmae_hdlp->cmih_class != class) { 1459 cmi_hdl_rele((cmi_hdl_t)ent->cmae_hdlp); 1460 return (NULL); 1461 } 1462 1463 return ((cmi_hdl_t)ent->cmae_hdlp); 1464 } 1465 1466 cmi_hdl_t 1467 cmi_hdl_any(void) 1468 { 1469 int i, j; 1470 cmi_hdl_ent_t *ent; 1471 1472 for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) { 1473 if (cmi_chip_tab[i] == NULL) 1474 continue; 1475 for (j = 0, ent = cmi_chip_tab[i]; j < CMI_MAX_STRANDS_PER_CHIP; 1476 j++, ent++) { 1477 if (cmi_hdl_canref(ent)) 1478 return ((cmi_hdl_t)ent->cmae_hdlp); 1479 } 1480 } 1481 1482 return (NULL); 1483 } 1484 1485 void 1486 cmi_hdl_walk(int (*cbfunc)(cmi_hdl_t, void *, void *, void *), 1487 void *arg1, void *arg2, void *arg3) 1488 { 1489 int i, j; 1490 cmi_hdl_ent_t *ent; 1491 1492 for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) { 1493 if (cmi_chip_tab[i] == NULL) 1494 continue; 1495 for (j = 0, ent = cmi_chip_tab[i]; j < CMI_MAX_STRANDS_PER_CHIP; 1496 j++, ent++) { 1497 if (cmi_hdl_canref(ent)) { 1498 cmi_hdl_impl_t *hdl = ent->cmae_hdlp; 1499 if ((*cbfunc)((cmi_hdl_t)hdl, arg1, arg2, arg3) 1500 == CMI_HDL_WALK_DONE) { 1501 cmi_hdl_rele((cmi_hdl_t)hdl); 1502 return; 1503 } 1504 cmi_hdl_rele((cmi_hdl_t)hdl); 1505 } 1506 } 1507 } 1508 } 1509 1510 void 1511 cmi_hdl_setcmi(cmi_hdl_t ophdl, void *cmi, void *cmidata) 1512 { 1513 IMPLHDL(ophdl)->cmih_cmidata = cmidata; 1514 IMPLHDL(ophdl)->cmih_cmi = cmi; 1515 } 1516 1517 void * 1518 cmi_hdl_getcmi(cmi_hdl_t ophdl) 1519 { 1520 return (IMPLHDL(ophdl)->cmih_cmi); 1521 } 1522 1523 void * 1524 cmi_hdl_getcmidata(cmi_hdl_t ophdl) 1525 { 1526 return (IMPLHDL(ophdl)->cmih_cmidata); 1527 } 1528 1529 enum cmi_hdl_class 1530 cmi_hdl_class(cmi_hdl_t ophdl) 1531 { 1532 return (IMPLHDL(ophdl)->cmih_class); 1533 } 1534 1535 #define CMI_HDL_OPFUNC(what, type) \ 1536 type \ 1537 cmi_hdl_##what(cmi_hdl_t ophdl) \ 1538 { \ 1539 return (HDLOPS(IMPLHDL(ophdl))-> \ 1540 cmio_##what(IMPLHDL(ophdl))); \ 1541 } 1542 1543 CMI_HDL_OPFUNC(vendor, uint_t) 1544 CMI_HDL_OPFUNC(vendorstr, const char *) 1545 CMI_HDL_OPFUNC(family, uint_t) 1546 CMI_HDL_OPFUNC(model, uint_t) 1547 CMI_HDL_OPFUNC(stepping, uint_t) 1548 CMI_HDL_OPFUNC(chipid, uint_t) 1549 CMI_HDL_OPFUNC(procnodeid, uint_t) 1550 CMI_HDL_OPFUNC(coreid, uint_t) 1551 CMI_HDL_OPFUNC(strandid, uint_t) 1552 CMI_HDL_OPFUNC(procnodes_per_pkg, uint_t) 1553 CMI_HDL_OPFUNC(strand_apicid, uint_t) 1554 CMI_HDL_OPFUNC(chiprev, uint32_t) 1555 CMI_HDL_OPFUNC(chiprevstr, const char *) 1556 CMI_HDL_OPFUNC(getsockettype, uint32_t) 1557 CMI_HDL_OPFUNC(getsocketstr, const char *) 1558 CMI_HDL_OPFUNC(logical_id, id_t) 1559 CMI_HDL_OPFUNC(smbiosid, uint16_t) 1560 CMI_HDL_OPFUNC(smb_chipid, uint_t) 1561 CMI_HDL_OPFUNC(smb_bboard, nvlist_t *) 1562 1563 boolean_t 1564 cmi_hdl_is_cmt(cmi_hdl_t ophdl) 1565 { 1566 return (IMPLHDL(ophdl)->cmih_mstrand); 1567 } 1568 1569 void 1570 cmi_hdl_int(cmi_hdl_t ophdl, int num) 1571 { 1572 if (HDLOPS(IMPLHDL(ophdl))->cmio_int == NULL) 1573 return; 1574 1575 cmi_hdl_inj_begin(ophdl); 1576 HDLOPS(IMPLHDL(ophdl))->cmio_int(IMPLHDL(ophdl), num); 1577 cmi_hdl_inj_end(NULL); 1578 } 1579 1580 int 1581 cmi_hdl_online(cmi_hdl_t ophdl, int new_status, int *old_status) 1582 { 1583 return (HDLOPS(IMPLHDL(ophdl))->cmio_online(IMPLHDL(ophdl), 1584 new_status, old_status)); 1585 } 1586 1587 #ifndef __xpv 1588 /* 1589 * Return hardware chip instance; cpuid_get_chipid provides this directly. 1590 */ 1591 uint_t 1592 cmi_ntv_hwchipid(cpu_t *cp) 1593 { 1594 return (cpuid_get_chipid(cp)); 1595 } 1596 1597 /* 1598 * Return hardware node instance; cpuid_get_procnodeid provides this directly. 1599 */ 1600 uint_t 1601 cmi_ntv_hwprocnodeid(cpu_t *cp) 1602 { 1603 return (cpuid_get_procnodeid(cp)); 1604 } 1605 1606 /* 1607 * Return core instance within a single chip. 1608 */ 1609 uint_t 1610 cmi_ntv_hwcoreid(cpu_t *cp) 1611 { 1612 return (cpuid_get_pkgcoreid(cp)); 1613 } 1614 1615 /* 1616 * Return strand number within a single core. cpuid_get_clogid numbers 1617 * all execution units (strands, or cores in unstranded models) sequentially 1618 * within a single chip. 1619 */ 1620 uint_t 1621 cmi_ntv_hwstrandid(cpu_t *cp) 1622 { 1623 int strands_per_core = cpuid_get_ncpu_per_chip(cp) / 1624 cpuid_get_ncore_per_chip(cp); 1625 1626 return (cpuid_get_clogid(cp) % strands_per_core); 1627 } 1628 #endif /* __xpv */ 1629 1630 void 1631 cmi_hdlconf_rdmsr_nohw(cmi_hdl_t ophdl) 1632 { 1633 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1634 1635 hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_RD_HWOK; 1636 } 1637 1638 void 1639 cmi_hdlconf_wrmsr_nohw(cmi_hdl_t ophdl) 1640 { 1641 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1642 1643 hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_WR_HWOK; 1644 } 1645 1646 cmi_errno_t 1647 cmi_hdl_rdmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t *valp) 1648 { 1649 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1650 1651 /* 1652 * Regardless of the handle class, we first check for am 1653 * interposed value. In the xVM case you probably want to 1654 * place interposed values within the hypervisor itself, but 1655 * we still allow interposing them in dom0 for test and bringup 1656 * purposes. 1657 */ 1658 if ((hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_INTERPOSEOK) && 1659 msri_lookup(hdl, msr, valp)) 1660 return (CMI_SUCCESS); 1661 1662 if (HDLOPS(hdl)->cmio_rdmsr == NULL) 1663 return (CMIERR_NOTSUP); 1664 1665 return (HDLOPS(hdl)->cmio_rdmsr(hdl, msr, valp)); 1666 } 1667 1668 cmi_errno_t 1669 cmi_hdl_wrmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t val) 1670 { 1671 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1672 1673 /* Invalidate any interposed value */ 1674 msri_rment(hdl, msr); 1675 1676 if (HDLOPS(hdl)->cmio_wrmsr == NULL) 1677 return (CMI_SUCCESS); /* pretend all is ok */ 1678 1679 return (HDLOPS(hdl)->cmio_wrmsr(hdl, msr, val)); 1680 } 1681 1682 void 1683 cmi_hdl_enable_mce(cmi_hdl_t ophdl) 1684 { 1685 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1686 ulong_t cr4; 1687 1688 if (HDLOPS(hdl)->cmio_getcr4 == NULL || 1689 HDLOPS(hdl)->cmio_setcr4 == NULL) 1690 return; 1691 1692 cr4 = HDLOPS(hdl)->cmio_getcr4(hdl); 1693 1694 HDLOPS(hdl)->cmio_setcr4(hdl, cr4 | CR4_MCE); 1695 } 1696 1697 void 1698 cmi_hdl_msrinterpose(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs) 1699 { 1700 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1701 int i; 1702 1703 if (HDLOPS(hdl)->cmio_msrinterpose == NULL) 1704 return; 1705 1706 cmi_hdl_inj_begin(ophdl); 1707 1708 for (i = 0; i < nregs; i++, regs++) 1709 HDLOPS(hdl)->cmio_msrinterpose(hdl, regs->cmr_msrnum, 1710 regs->cmr_msrval); 1711 1712 cmi_hdl_inj_end(ophdl); 1713 } 1714 1715 /*ARGSUSED*/ 1716 void 1717 cmi_hdl_msrforward(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs) 1718 { 1719 #ifdef __xpv 1720 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1721 int i; 1722 1723 for (i = 0; i < nregs; i++, regs++) 1724 msri_addent(hdl, regs->cmr_msrnum, regs->cmr_msrval); 1725 #endif 1726 } 1727 1728 1729 void 1730 cmi_pcird_nohw(void) 1731 { 1732 cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_RD_HWOK; 1733 } 1734 1735 void 1736 cmi_pciwr_nohw(void) 1737 { 1738 cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_WR_HWOK; 1739 } 1740 1741 static uint32_t 1742 cmi_pci_get_cmn(int bus, int dev, int func, int reg, int asz, 1743 int *interpose, ddi_acc_handle_t hdl) 1744 { 1745 uint32_t val; 1746 1747 if (cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_INTERPOSEOK && 1748 pcii_lookup(bus, dev, func, reg, asz, &val)) { 1749 if (interpose) 1750 *interpose = 1; 1751 return (val); 1752 } 1753 if (interpose) 1754 *interpose = 0; 1755 1756 if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_HWOK)) 1757 return (0); 1758 1759 switch (asz) { 1760 case 1: 1761 if (hdl) 1762 val = pci_config_get8(hdl, (off_t)reg); 1763 else 1764 val = (*pci_getb_func)(bus, dev, func, reg); 1765 break; 1766 case 2: 1767 if (hdl) 1768 val = pci_config_get16(hdl, (off_t)reg); 1769 else 1770 val = (*pci_getw_func)(bus, dev, func, reg); 1771 break; 1772 case 4: 1773 if (hdl) 1774 val = pci_config_get32(hdl, (off_t)reg); 1775 else 1776 val = (*pci_getl_func)(bus, dev, func, reg); 1777 break; 1778 default: 1779 val = 0; 1780 } 1781 return (val); 1782 } 1783 1784 uint8_t 1785 cmi_pci_getb(int bus, int dev, int func, int reg, int *interpose, 1786 ddi_acc_handle_t hdl) 1787 { 1788 return ((uint8_t)cmi_pci_get_cmn(bus, dev, func, reg, 1, interpose, 1789 hdl)); 1790 } 1791 1792 uint16_t 1793 cmi_pci_getw(int bus, int dev, int func, int reg, int *interpose, 1794 ddi_acc_handle_t hdl) 1795 { 1796 return ((uint16_t)cmi_pci_get_cmn(bus, dev, func, reg, 2, interpose, 1797 hdl)); 1798 } 1799 1800 uint32_t 1801 cmi_pci_getl(int bus, int dev, int func, int reg, int *interpose, 1802 ddi_acc_handle_t hdl) 1803 { 1804 return (cmi_pci_get_cmn(bus, dev, func, reg, 4, interpose, hdl)); 1805 } 1806 1807 void 1808 cmi_pci_interposeb(int bus, int dev, int func, int reg, uint8_t val) 1809 { 1810 pcii_addent(bus, dev, func, reg, val, 1); 1811 } 1812 1813 void 1814 cmi_pci_interposew(int bus, int dev, int func, int reg, uint16_t val) 1815 { 1816 pcii_addent(bus, dev, func, reg, val, 2); 1817 } 1818 1819 void 1820 cmi_pci_interposel(int bus, int dev, int func, int reg, uint32_t val) 1821 { 1822 pcii_addent(bus, dev, func, reg, val, 4); 1823 } 1824 1825 static void 1826 cmi_pci_put_cmn(int bus, int dev, int func, int reg, int asz, 1827 ddi_acc_handle_t hdl, uint32_t val) 1828 { 1829 /* 1830 * If there is an interposed value for this register invalidate it. 1831 */ 1832 pcii_rment(bus, dev, func, reg, asz); 1833 1834 if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_WR_HWOK)) 1835 return; 1836 1837 switch (asz) { 1838 case 1: 1839 if (hdl) 1840 pci_config_put8(hdl, (off_t)reg, (uint8_t)val); 1841 else 1842 (*pci_putb_func)(bus, dev, func, reg, (uint8_t)val); 1843 break; 1844 1845 case 2: 1846 if (hdl) 1847 pci_config_put16(hdl, (off_t)reg, (uint16_t)val); 1848 else 1849 (*pci_putw_func)(bus, dev, func, reg, (uint16_t)val); 1850 break; 1851 1852 case 4: 1853 if (hdl) 1854 pci_config_put32(hdl, (off_t)reg, val); 1855 else 1856 (*pci_putl_func)(bus, dev, func, reg, val); 1857 break; 1858 1859 default: 1860 break; 1861 } 1862 } 1863 1864 void 1865 cmi_pci_putb(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl, 1866 uint8_t val) 1867 { 1868 cmi_pci_put_cmn(bus, dev, func, reg, 1, hdl, val); 1869 } 1870 1871 void 1872 cmi_pci_putw(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl, 1873 uint16_t val) 1874 { 1875 cmi_pci_put_cmn(bus, dev, func, reg, 2, hdl, val); 1876 } 1877 1878 void 1879 cmi_pci_putl(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl, 1880 uint32_t val) 1881 { 1882 cmi_pci_put_cmn(bus, dev, func, reg, 4, hdl, val); 1883 } 1884 1885 static const struct cmi_hdl_ops cmi_hdl_ops = { 1886 #ifdef __xpv 1887 /* 1888 * CMI_HDL_SOLARIS_xVM_MCA - ops when we are an xVM dom0 1889 */ 1890 xpv_vendor, /* cmio_vendor */ 1891 xpv_vendorstr, /* cmio_vendorstr */ 1892 xpv_family, /* cmio_family */ 1893 xpv_model, /* cmio_model */ 1894 xpv_stepping, /* cmio_stepping */ 1895 xpv_chipid, /* cmio_chipid */ 1896 xpv_procnodeid, /* cmio_procnodeid */ 1897 xpv_coreid, /* cmio_coreid */ 1898 xpv_strandid, /* cmio_strandid */ 1899 xpv_procnodes_per_pkg, /* cmio_procnodes_per_pkg */ 1900 xpv_strand_apicid, /* cmio_strand_apicid */ 1901 xpv_chiprev, /* cmio_chiprev */ 1902 xpv_chiprevstr, /* cmio_chiprevstr */ 1903 xpv_getsockettype, /* cmio_getsockettype */ 1904 xpv_getsocketstr, /* cmio_getsocketstr */ 1905 xpv_logical_id, /* cmio_logical_id */ 1906 NULL, /* cmio_getcr4 */ 1907 NULL, /* cmio_setcr4 */ 1908 xpv_rdmsr, /* cmio_rdmsr */ 1909 xpv_wrmsr, /* cmio_wrmsr */ 1910 xpv_msrinterpose, /* cmio_msrinterpose */ 1911 xpv_int, /* cmio_int */ 1912 xpv_online, /* cmio_online */ 1913 xpv_smbiosid, /* cmio_smbiosid */ 1914 xpv_smb_chipid, /* cmio_smb_chipid */ 1915 xpv_smb_bboard /* cmio_smb_bboard */ 1916 1917 #else /* __xpv */ 1918 1919 /* 1920 * CMI_HDL_NATIVE - ops when apparently running on bare-metal 1921 */ 1922 ntv_vendor, /* cmio_vendor */ 1923 ntv_vendorstr, /* cmio_vendorstr */ 1924 ntv_family, /* cmio_family */ 1925 ntv_model, /* cmio_model */ 1926 ntv_stepping, /* cmio_stepping */ 1927 ntv_chipid, /* cmio_chipid */ 1928 ntv_procnodeid, /* cmio_procnodeid */ 1929 ntv_coreid, /* cmio_coreid */ 1930 ntv_strandid, /* cmio_strandid */ 1931 ntv_procnodes_per_pkg, /* cmio_procnodes_per_pkg */ 1932 ntv_strand_apicid, /* cmio_strand_apicid */ 1933 ntv_chiprev, /* cmio_chiprev */ 1934 ntv_chiprevstr, /* cmio_chiprevstr */ 1935 ntv_getsockettype, /* cmio_getsockettype */ 1936 ntv_getsocketstr, /* cmio_getsocketstr */ 1937 ntv_logical_id, /* cmio_logical_id */ 1938 ntv_getcr4, /* cmio_getcr4 */ 1939 ntv_setcr4, /* cmio_setcr4 */ 1940 ntv_rdmsr, /* cmio_rdmsr */ 1941 ntv_wrmsr, /* cmio_wrmsr */ 1942 ntv_msrinterpose, /* cmio_msrinterpose */ 1943 ntv_int, /* cmio_int */ 1944 ntv_online, /* cmio_online */ 1945 ntv_smbiosid, /* cmio_smbiosid */ 1946 ntv_smb_chipid, /* cmio_smb_chipid */ 1947 ntv_smb_bboard /* cmio_smb_bboard */ 1948 #endif 1949 }; 1950