1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2018, Joyent, Inc. 25 */ 26 /* 27 * Copyright (c) 2010, Intel Corporation. 28 * All rights reserved. 29 */ 30 31 /* 32 * CPU Module Interface - hardware abstraction. 33 */ 34 35 #ifdef __xpv 36 #include <sys/xpv_user.h> 37 #endif 38 39 #include <sys/types.h> 40 #include <sys/cpu_module.h> 41 #include <sys/kmem.h> 42 #include <sys/x86_archext.h> 43 #include <sys/cpuvar.h> 44 #include <sys/ksynch.h> 45 #include <sys/x_call.h> 46 #include <sys/pghw.h> 47 #include <sys/pci_cfgacc.h> 48 #include <sys/pci_cfgspace.h> 49 #include <sys/archsystm.h> 50 #include <sys/ontrap.h> 51 #include <sys/controlregs.h> 52 #include <sys/sunddi.h> 53 #include <sys/trap.h> 54 #include <sys/mca_x86.h> 55 #include <sys/processor.h> 56 #include <sys/cmn_err.h> 57 #include <sys/nvpair.h> 58 #include <sys/fm/util.h> 59 #include <sys/fm/protocol.h> 60 #include <sys/fm/smb/fmsmb.h> 61 #include <sys/cpu_module_impl.h> 62 63 /* 64 * Variable which determines if the SMBIOS supports x86 generic topology; or 65 * if legacy topolgy enumeration will occur. 66 */ 67 extern int x86gentopo_legacy; 68 69 /* 70 * Outside of this file consumers use the opaque cmi_hdl_t. This 71 * definition is duplicated in the generic_cpu mdb module, so keep 72 * them in-sync when making changes. 73 */ 74 typedef struct cmi_hdl_impl { 75 enum cmi_hdl_class cmih_class; /* Handle nature */ 76 const struct cmi_hdl_ops *cmih_ops; /* Operations vector */ 77 uint_t cmih_chipid; /* Chipid of cpu resource */ 78 uint_t cmih_procnodeid; /* Nodeid of cpu resource */ 79 uint_t cmih_coreid; /* Core within die */ 80 uint_t cmih_strandid; /* Thread within core */ 81 uint_t cmih_procnodes_per_pkg; /* Nodes in a processor */ 82 boolean_t cmih_mstrand; /* cores are multithreaded */ 83 volatile uint32_t *cmih_refcntp; /* Reference count pointer */ 84 uint64_t cmih_msrsrc; /* MSR data source flags */ 85 void *cmih_hdlpriv; /* cmi_hw.c private data */ 86 void *cmih_spec; /* cmi_hdl_{set,get}_specific */ 87 void *cmih_cmi; /* cpu mod control structure */ 88 void *cmih_cmidata; /* cpu mod private data */ 89 const struct cmi_mc_ops *cmih_mcops; /* Memory-controller ops */ 90 void *cmih_mcdata; /* Memory-controller data */ 91 uint64_t cmih_flags; /* See CMIH_F_* below */ 92 uint16_t cmih_smbiosid; /* SMBIOS Type 4 struct ID */ 93 uint_t cmih_smb_chipid; /* SMBIOS factored chipid */ 94 nvlist_t *cmih_smb_bboard; /* SMBIOS bboard nvlist */ 95 } cmi_hdl_impl_t; 96 97 #define IMPLHDL(ophdl) ((cmi_hdl_impl_t *)ophdl) 98 #define HDLOPS(hdl) ((hdl)->cmih_ops) 99 100 #define CMIH_F_INJACTV 0x1ULL 101 #define CMIH_F_DEAD 0x2ULL 102 103 /* 104 * Ops structure for handle operations. 105 */ 106 struct cmi_hdl_ops { 107 /* 108 * These ops are required in an implementation. 109 */ 110 uint_t (*cmio_vendor)(cmi_hdl_impl_t *); 111 const char *(*cmio_vendorstr)(cmi_hdl_impl_t *); 112 uint_t (*cmio_family)(cmi_hdl_impl_t *); 113 uint_t (*cmio_model)(cmi_hdl_impl_t *); 114 uint_t (*cmio_stepping)(cmi_hdl_impl_t *); 115 uint_t (*cmio_chipid)(cmi_hdl_impl_t *); 116 uint_t (*cmio_procnodeid)(cmi_hdl_impl_t *); 117 uint_t (*cmio_coreid)(cmi_hdl_impl_t *); 118 uint_t (*cmio_strandid)(cmi_hdl_impl_t *); 119 uint_t (*cmio_procnodes_per_pkg)(cmi_hdl_impl_t *); 120 uint_t (*cmio_strand_apicid)(cmi_hdl_impl_t *); 121 uint32_t (*cmio_chiprev)(cmi_hdl_impl_t *); 122 const char *(*cmio_chiprevstr)(cmi_hdl_impl_t *); 123 uint32_t (*cmio_getsockettype)(cmi_hdl_impl_t *); 124 const char *(*cmio_getsocketstr)(cmi_hdl_impl_t *); 125 uint_t (*cmio_chipsig)(cmi_hdl_impl_t *); 126 127 id_t (*cmio_logical_id)(cmi_hdl_impl_t *); 128 /* 129 * These ops are optional in an implementation. 130 */ 131 ulong_t (*cmio_getcr4)(cmi_hdl_impl_t *); 132 void (*cmio_setcr4)(cmi_hdl_impl_t *, ulong_t); 133 cmi_errno_t (*cmio_rdmsr)(cmi_hdl_impl_t *, uint_t, uint64_t *); 134 cmi_errno_t (*cmio_wrmsr)(cmi_hdl_impl_t *, uint_t, uint64_t); 135 cmi_errno_t (*cmio_msrinterpose)(cmi_hdl_impl_t *, uint_t, uint64_t); 136 void (*cmio_int)(cmi_hdl_impl_t *, int); 137 int (*cmio_online)(cmi_hdl_impl_t *, int, int *); 138 uint16_t (*cmio_smbiosid) (cmi_hdl_impl_t *); 139 uint_t (*cmio_smb_chipid)(cmi_hdl_impl_t *); 140 nvlist_t *(*cmio_smb_bboard)(cmi_hdl_impl_t *); 141 }; 142 143 static const struct cmi_hdl_ops cmi_hdl_ops; 144 145 /* 146 * Handles are looked up from contexts such as polling, injection etc 147 * where the context is reasonably well defined (although a poller could 148 * interrupt any old thread holding any old lock). They are also looked 149 * up by machine check handlers, which may strike at inconvenient times 150 * such as during handle initialization or destruction or during handle 151 * lookup (which the #MC handler itself will also have to perform). 152 * 153 * So keeping handles in a linked list makes locking difficult when we 154 * consider #MC handlers. Our solution is to have a look-up table indexed 155 * by that which uniquely identifies a handle - chip/core/strand id - 156 * with each entry a structure including a pointer to a handle 157 * structure for the resource, and a reference count for the handle. 158 * Reference counts are modified atomically. The public cmi_hdl_hold 159 * always succeeds because this can only be used after handle creation 160 * and before the call to destruct, so the hold count is already at least one. 161 * In other functions that lookup a handle (cmi_hdl_lookup, cmi_hdl_any) 162 * we must be certain that the count has not already decrmented to zero 163 * before applying our hold. 164 * 165 * The table is an array of maximum number of chips defined in 166 * CMI_CHIPID_ARR_SZ indexed by the chip id. If the chip is not present, the 167 * entry is NULL. Each entry is a pointer to another array which contains a 168 * list of all strands of the chip. This first level table is allocated when 169 * first we want to populate an entry. The size of the latter (per chip) table 170 * is CMI_MAX_STRANDS_PER_CHIP and it is populated when one of its cpus starts. 171 * 172 * Ideally we should only allocate to the actual number of chips, cores per 173 * chip and strand per core. The number of chips is not available until all 174 * of them are passed. The number of cores and strands are partially available. 175 * For now we stick with the above approach. 176 */ 177 #define CMI_MAX_CHIPID_NBITS 6 /* max chipid of 63 */ 178 #define CMI_MAX_CORES_PER_CHIP_NBITS 4 /* 16 cores per chip max */ 179 #define CMI_MAX_STRANDS_PER_CORE_NBITS 3 /* 8 strands per core max */ 180 181 #define CMI_MAX_CHIPID ((1 << (CMI_MAX_CHIPID_NBITS)) - 1) 182 #define CMI_MAX_CORES_PER_CHIP(cbits) (1 << (cbits)) 183 #define CMI_MAX_COREID(cbits) ((1 << (cbits)) - 1) 184 #define CMI_MAX_STRANDS_PER_CORE(sbits) (1 << (sbits)) 185 #define CMI_MAX_STRANDID(sbits) ((1 << (sbits)) - 1) 186 #define CMI_MAX_STRANDS_PER_CHIP(cbits, sbits) \ 187 (CMI_MAX_CORES_PER_CHIP(cbits) * CMI_MAX_STRANDS_PER_CORE(sbits)) 188 189 #define CMI_CHIPID_ARR_SZ (1 << CMI_MAX_CHIPID_NBITS) 190 191 typedef struct cmi_hdl_ent { 192 volatile uint32_t cmae_refcnt; 193 cmi_hdl_impl_t *cmae_hdlp; 194 } cmi_hdl_ent_t; 195 196 static cmi_hdl_ent_t *cmi_chip_tab[CMI_CHIPID_ARR_SZ]; 197 198 /* 199 * Default values for the number of core and strand bits. 200 */ 201 uint_t cmi_core_nbits = CMI_MAX_CORES_PER_CHIP_NBITS; 202 uint_t cmi_strand_nbits = CMI_MAX_STRANDS_PER_CORE_NBITS; 203 static int cmi_ext_topo_check = 0; 204 205 /* 206 * Controls where we will source PCI config space data. 207 */ 208 #define CMI_PCICFG_FLAG_RD_HWOK 0x0001 209 #define CMI_PCICFG_FLAG_RD_INTERPOSEOK 0X0002 210 #define CMI_PCICFG_FLAG_WR_HWOK 0x0004 211 #define CMI_PCICFG_FLAG_WR_INTERPOSEOK 0X0008 212 213 static uint64_t cmi_pcicfg_flags = 214 CMI_PCICFG_FLAG_RD_HWOK | CMI_PCICFG_FLAG_RD_INTERPOSEOK | 215 CMI_PCICFG_FLAG_WR_HWOK | CMI_PCICFG_FLAG_WR_INTERPOSEOK; 216 217 /* 218 * The flags for individual cpus are kept in their per-cpu handle cmih_msrsrc 219 */ 220 #define CMI_MSR_FLAG_RD_HWOK 0x0001 221 #define CMI_MSR_FLAG_RD_INTERPOSEOK 0x0002 222 #define CMI_MSR_FLAG_WR_HWOK 0x0004 223 #define CMI_MSR_FLAG_WR_INTERPOSEOK 0x0008 224 225 int cmi_call_func_ntv_tries = 3; 226 227 static cmi_errno_t 228 call_func_ntv(int cpuid, xc_func_t func, xc_arg_t arg1, xc_arg_t arg2) 229 { 230 cmi_errno_t rc = -1; 231 int i; 232 233 kpreempt_disable(); 234 235 if (CPU->cpu_id == cpuid) { 236 (*func)(arg1, arg2, (xc_arg_t)&rc); 237 } else { 238 /* 239 * This should not happen for a #MC trap or a poll, so 240 * this is likely an error injection or similar. 241 * We will try to cross call with xc_trycall - we 242 * can't guarantee success with xc_call because 243 * the interrupt code in the case of a #MC may 244 * already hold the xc mutex. 245 */ 246 for (i = 0; i < cmi_call_func_ntv_tries; i++) { 247 cpuset_t cpus; 248 249 CPUSET_ONLY(cpus, cpuid); 250 xc_priority(arg1, arg2, (xc_arg_t)&rc, 251 CPUSET2BV(cpus), func); 252 if (rc != -1) 253 break; 254 255 DELAY(1); 256 } 257 } 258 259 kpreempt_enable(); 260 261 return (rc != -1 ? rc : CMIERR_DEADLOCK); 262 } 263 264 static uint64_t injcnt; 265 266 void 267 cmi_hdl_inj_begin(cmi_hdl_t ophdl) 268 { 269 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 270 271 if (hdl != NULL) 272 hdl->cmih_flags |= CMIH_F_INJACTV; 273 if (injcnt++ == 0) { 274 cmn_err(CE_NOTE, "Hardware error injection/simulation " 275 "activity noted"); 276 } 277 } 278 279 void 280 cmi_hdl_inj_end(cmi_hdl_t ophdl) 281 { 282 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 283 284 ASSERT(hdl == NULL || hdl->cmih_flags & CMIH_F_INJACTV); 285 if (hdl != NULL) 286 hdl->cmih_flags &= ~CMIH_F_INJACTV; 287 } 288 289 boolean_t 290 cmi_inj_tainted(void) 291 { 292 return (injcnt != 0 ? B_TRUE : B_FALSE); 293 } 294 295 /* 296 * ======================================================= 297 * | MSR Interposition | 298 * | ----------------- | 299 * | | 300 * ------------------------------------------------------- 301 */ 302 303 #define CMI_MSRI_HASHSZ 16 304 #define CMI_MSRI_HASHIDX(hdl, msr) \ 305 (((uintptr_t)(hdl) >> 3 + (msr)) % (CMI_MSRI_HASHSZ - 1)) 306 307 struct cmi_msri_bkt { 308 kmutex_t msrib_lock; 309 struct cmi_msri_hashent *msrib_head; 310 }; 311 312 struct cmi_msri_hashent { 313 struct cmi_msri_hashent *msrie_next; 314 struct cmi_msri_hashent *msrie_prev; 315 cmi_hdl_impl_t *msrie_hdl; 316 uint_t msrie_msrnum; 317 uint64_t msrie_msrval; 318 }; 319 320 #define CMI_MSRI_MATCH(ent, hdl, req_msr) \ 321 ((ent)->msrie_hdl == (hdl) && (ent)->msrie_msrnum == (req_msr)) 322 323 static struct cmi_msri_bkt msrihash[CMI_MSRI_HASHSZ]; 324 325 static void 326 msri_addent(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val) 327 { 328 int idx = CMI_MSRI_HASHIDX(hdl, msr); 329 struct cmi_msri_bkt *hbp = &msrihash[idx]; 330 struct cmi_msri_hashent *hep; 331 332 mutex_enter(&hbp->msrib_lock); 333 334 for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) { 335 if (CMI_MSRI_MATCH(hep, hdl, msr)) 336 break; 337 } 338 339 if (hep != NULL) { 340 hep->msrie_msrval = val; 341 } else { 342 hep = kmem_alloc(sizeof (*hep), KM_SLEEP); 343 hep->msrie_hdl = hdl; 344 hep->msrie_msrnum = msr; 345 hep->msrie_msrval = val; 346 347 if (hbp->msrib_head != NULL) 348 hbp->msrib_head->msrie_prev = hep; 349 hep->msrie_next = hbp->msrib_head; 350 hep->msrie_prev = NULL; 351 hbp->msrib_head = hep; 352 } 353 354 mutex_exit(&hbp->msrib_lock); 355 } 356 357 /* 358 * Look for a match for the given hanlde and msr. Return 1 with valp 359 * filled if a match is found, otherwise return 0 with valp untouched. 360 */ 361 static int 362 msri_lookup(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp) 363 { 364 int idx = CMI_MSRI_HASHIDX(hdl, msr); 365 struct cmi_msri_bkt *hbp = &msrihash[idx]; 366 struct cmi_msri_hashent *hep; 367 368 /* 369 * This function is called during #MC trap handling, so we should 370 * consider the possibility that the hash mutex is held by the 371 * interrupted thread. This should not happen because interposition 372 * is an artificial injection mechanism and the #MC is requested 373 * after adding entries, but just in case of a real #MC at an 374 * unlucky moment we'll use mutex_tryenter here. 375 */ 376 if (!mutex_tryenter(&hbp->msrib_lock)) 377 return (0); 378 379 for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) { 380 if (CMI_MSRI_MATCH(hep, hdl, msr)) { 381 *valp = hep->msrie_msrval; 382 break; 383 } 384 } 385 386 mutex_exit(&hbp->msrib_lock); 387 388 return (hep != NULL); 389 } 390 391 /* 392 * Remove any interposed value that matches. 393 */ 394 static void 395 msri_rment(cmi_hdl_impl_t *hdl, uint_t msr) 396 { 397 398 int idx = CMI_MSRI_HASHIDX(hdl, msr); 399 struct cmi_msri_bkt *hbp = &msrihash[idx]; 400 struct cmi_msri_hashent *hep; 401 402 if (!mutex_tryenter(&hbp->msrib_lock)) 403 return; 404 405 for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) { 406 if (CMI_MSRI_MATCH(hep, hdl, msr)) { 407 if (hep->msrie_prev != NULL) 408 hep->msrie_prev->msrie_next = hep->msrie_next; 409 410 if (hep->msrie_next != NULL) 411 hep->msrie_next->msrie_prev = hep->msrie_prev; 412 413 if (hbp->msrib_head == hep) 414 hbp->msrib_head = hep->msrie_next; 415 416 kmem_free(hep, sizeof (*hep)); 417 break; 418 } 419 } 420 421 mutex_exit(&hbp->msrib_lock); 422 } 423 424 /* 425 * ======================================================= 426 * | PCI Config Space Interposition | 427 * | ------------------------------ | 428 * | | 429 * ------------------------------------------------------- 430 */ 431 432 /* 433 * Hash for interposed PCI config space values. We lookup on bus/dev/fun/offset 434 * and then record whether the value stashed was made with a byte, word or 435 * doubleword access; we will only return a hit for an access of the 436 * same size. If you access say a 32-bit register using byte accesses 437 * and then attempt to read the full 32-bit value back you will not obtain 438 * any sort of merged result - you get a lookup miss. 439 */ 440 441 #define CMI_PCII_HASHSZ 16 442 #define CMI_PCII_HASHIDX(b, d, f, o) \ 443 (((b) + (d) + (f) + (o)) % (CMI_PCII_HASHSZ - 1)) 444 445 struct cmi_pcii_bkt { 446 kmutex_t pciib_lock; 447 struct cmi_pcii_hashent *pciib_head; 448 }; 449 450 struct cmi_pcii_hashent { 451 struct cmi_pcii_hashent *pcii_next; 452 struct cmi_pcii_hashent *pcii_prev; 453 int pcii_bus; 454 int pcii_dev; 455 int pcii_func; 456 int pcii_reg; 457 int pcii_asize; 458 uint32_t pcii_val; 459 }; 460 461 #define CMI_PCII_MATCH(ent, b, d, f, r, asz) \ 462 ((ent)->pcii_bus == (b) && (ent)->pcii_dev == (d) && \ 463 (ent)->pcii_func == (f) && (ent)->pcii_reg == (r) && \ 464 (ent)->pcii_asize == (asz)) 465 466 static struct cmi_pcii_bkt pciihash[CMI_PCII_HASHSZ]; 467 468 469 /* 470 * Add a new entry to the PCI interpose hash, overwriting any existing 471 * entry that is found. 472 */ 473 static void 474 pcii_addent(int bus, int dev, int func, int reg, uint32_t val, int asz) 475 { 476 int idx = CMI_PCII_HASHIDX(bus, dev, func, reg); 477 struct cmi_pcii_bkt *hbp = &pciihash[idx]; 478 struct cmi_pcii_hashent *hep; 479 480 cmi_hdl_inj_begin(NULL); 481 482 mutex_enter(&hbp->pciib_lock); 483 484 for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) { 485 if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) 486 break; 487 } 488 489 if (hep != NULL) { 490 hep->pcii_val = val; 491 } else { 492 hep = kmem_alloc(sizeof (*hep), KM_SLEEP); 493 hep->pcii_bus = bus; 494 hep->pcii_dev = dev; 495 hep->pcii_func = func; 496 hep->pcii_reg = reg; 497 hep->pcii_asize = asz; 498 hep->pcii_val = val; 499 500 if (hbp->pciib_head != NULL) 501 hbp->pciib_head->pcii_prev = hep; 502 hep->pcii_next = hbp->pciib_head; 503 hep->pcii_prev = NULL; 504 hbp->pciib_head = hep; 505 } 506 507 mutex_exit(&hbp->pciib_lock); 508 509 cmi_hdl_inj_end(NULL); 510 } 511 512 /* 513 * Look for a match for the given bus/dev/func/reg; return 1 with valp 514 * filled if a match is found, otherwise return 0 with valp untouched. 515 */ 516 static int 517 pcii_lookup(int bus, int dev, int func, int reg, int asz, uint32_t *valp) 518 { 519 int idx = CMI_PCII_HASHIDX(bus, dev, func, reg); 520 struct cmi_pcii_bkt *hbp = &pciihash[idx]; 521 struct cmi_pcii_hashent *hep; 522 523 if (!mutex_tryenter(&hbp->pciib_lock)) 524 return (0); 525 526 for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) { 527 if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) { 528 *valp = hep->pcii_val; 529 break; 530 } 531 } 532 533 mutex_exit(&hbp->pciib_lock); 534 535 return (hep != NULL); 536 } 537 538 static void 539 pcii_rment(int bus, int dev, int func, int reg, int asz) 540 { 541 int idx = CMI_PCII_HASHIDX(bus, dev, func, reg); 542 struct cmi_pcii_bkt *hbp = &pciihash[idx]; 543 struct cmi_pcii_hashent *hep; 544 545 mutex_enter(&hbp->pciib_lock); 546 547 for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) { 548 if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) { 549 if (hep->pcii_prev != NULL) 550 hep->pcii_prev->pcii_next = hep->pcii_next; 551 552 if (hep->pcii_next != NULL) 553 hep->pcii_next->pcii_prev = hep->pcii_prev; 554 555 if (hbp->pciib_head == hep) 556 hbp->pciib_head = hep->pcii_next; 557 558 kmem_free(hep, sizeof (*hep)); 559 break; 560 } 561 } 562 563 mutex_exit(&hbp->pciib_lock); 564 } 565 566 #ifndef __xpv 567 568 /* 569 * ======================================================= 570 * | Native methods | 571 * | -------------- | 572 * | | 573 * | These are used when we are running native on bare- | 574 * | metal, or simply don't know any better. | 575 * --------------------------------------------------------- 576 */ 577 578 #define HDLPRIV(hdl) ((cpu_t *)(hdl)->cmih_hdlpriv) 579 580 static uint_t 581 ntv_vendor(cmi_hdl_impl_t *hdl) 582 { 583 return (cpuid_getvendor(HDLPRIV(hdl))); 584 } 585 586 static const char * 587 ntv_vendorstr(cmi_hdl_impl_t *hdl) 588 { 589 return (cpuid_getvendorstr(HDLPRIV(hdl))); 590 } 591 592 static uint_t 593 ntv_family(cmi_hdl_impl_t *hdl) 594 { 595 return (cpuid_getfamily(HDLPRIV(hdl))); 596 } 597 598 static uint_t 599 ntv_model(cmi_hdl_impl_t *hdl) 600 { 601 return (cpuid_getmodel(HDLPRIV(hdl))); 602 } 603 604 static uint_t 605 ntv_stepping(cmi_hdl_impl_t *hdl) 606 { 607 return (cpuid_getstep(HDLPRIV(hdl))); 608 } 609 610 static uint_t 611 ntv_chipid(cmi_hdl_impl_t *hdl) 612 { 613 return (hdl->cmih_chipid); 614 615 } 616 617 static uint_t 618 ntv_procnodeid(cmi_hdl_impl_t *hdl) 619 { 620 return (hdl->cmih_procnodeid); 621 } 622 623 static uint_t 624 ntv_procnodes_per_pkg(cmi_hdl_impl_t *hdl) 625 { 626 return (hdl->cmih_procnodes_per_pkg); 627 } 628 629 static uint_t 630 ntv_coreid(cmi_hdl_impl_t *hdl) 631 { 632 return (hdl->cmih_coreid); 633 } 634 635 static uint_t 636 ntv_strandid(cmi_hdl_impl_t *hdl) 637 { 638 return (hdl->cmih_strandid); 639 } 640 641 static uint_t 642 ntv_strand_apicid(cmi_hdl_impl_t *hdl) 643 { 644 return (cpuid_get_apicid(HDLPRIV(hdl))); 645 } 646 647 static uint16_t 648 ntv_smbiosid(cmi_hdl_impl_t *hdl) 649 { 650 return (hdl->cmih_smbiosid); 651 } 652 653 static uint_t 654 ntv_smb_chipid(cmi_hdl_impl_t *hdl) 655 { 656 return (hdl->cmih_smb_chipid); 657 } 658 659 static nvlist_t * 660 ntv_smb_bboard(cmi_hdl_impl_t *hdl) 661 { 662 return (hdl->cmih_smb_bboard); 663 } 664 665 static uint32_t 666 ntv_chiprev(cmi_hdl_impl_t *hdl) 667 { 668 return (cpuid_getchiprev(HDLPRIV(hdl))); 669 } 670 671 static const char * 672 ntv_chiprevstr(cmi_hdl_impl_t *hdl) 673 { 674 return (cpuid_getchiprevstr(HDLPRIV(hdl))); 675 } 676 677 static uint32_t 678 ntv_getsockettype(cmi_hdl_impl_t *hdl) 679 { 680 return (cpuid_getsockettype(HDLPRIV(hdl))); 681 } 682 683 static const char * 684 ntv_getsocketstr(cmi_hdl_impl_t *hdl) 685 { 686 return (cpuid_getsocketstr(HDLPRIV(hdl))); 687 } 688 689 static uint_t 690 ntv_chipsig(cmi_hdl_impl_t *hdl) 691 { 692 return (cpuid_getsig(HDLPRIV(hdl))); 693 } 694 695 static id_t 696 ntv_logical_id(cmi_hdl_impl_t *hdl) 697 { 698 return (HDLPRIV(hdl)->cpu_id); 699 } 700 701 /*ARGSUSED*/ 702 static int 703 ntv_getcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3) 704 { 705 ulong_t *dest = (ulong_t *)arg1; 706 cmi_errno_t *rcp = (cmi_errno_t *)arg3; 707 708 *dest = getcr4(); 709 *rcp = CMI_SUCCESS; 710 711 return (0); 712 } 713 714 static ulong_t 715 ntv_getcr4(cmi_hdl_impl_t *hdl) 716 { 717 cpu_t *cp = HDLPRIV(hdl); 718 ulong_t val; 719 720 (void) call_func_ntv(cp->cpu_id, ntv_getcr4_xc, (xc_arg_t)&val, NULL); 721 722 return (val); 723 } 724 725 /*ARGSUSED*/ 726 static int 727 ntv_setcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3) 728 { 729 ulong_t val = (ulong_t)arg1; 730 cmi_errno_t *rcp = (cmi_errno_t *)arg3; 731 732 setcr4(val); 733 *rcp = CMI_SUCCESS; 734 735 return (0); 736 } 737 738 static void 739 ntv_setcr4(cmi_hdl_impl_t *hdl, ulong_t val) 740 { 741 cpu_t *cp = HDLPRIV(hdl); 742 743 (void) call_func_ntv(cp->cpu_id, ntv_setcr4_xc, (xc_arg_t)val, NULL); 744 } 745 746 volatile uint32_t cmi_trapped_rdmsr; 747 748 /*ARGSUSED*/ 749 static int 750 ntv_rdmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3) 751 { 752 uint_t msr = (uint_t)arg1; 753 uint64_t *valp = (uint64_t *)arg2; 754 cmi_errno_t *rcp = (cmi_errno_t *)arg3; 755 756 on_trap_data_t otd; 757 758 if (on_trap(&otd, OT_DATA_ACCESS) == 0) { 759 if (checked_rdmsr(msr, valp) == 0) 760 *rcp = CMI_SUCCESS; 761 else 762 *rcp = CMIERR_NOTSUP; 763 } else { 764 *rcp = CMIERR_MSRGPF; 765 atomic_inc_32(&cmi_trapped_rdmsr); 766 } 767 no_trap(); 768 769 return (0); 770 } 771 772 static cmi_errno_t 773 ntv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp) 774 { 775 cpu_t *cp = HDLPRIV(hdl); 776 777 if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_HWOK)) 778 return (CMIERR_INTERPOSE); 779 780 return (call_func_ntv(cp->cpu_id, ntv_rdmsr_xc, 781 (xc_arg_t)msr, (xc_arg_t)valp)); 782 } 783 784 volatile uint32_t cmi_trapped_wrmsr; 785 786 /*ARGSUSED*/ 787 static int 788 ntv_wrmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3) 789 { 790 uint_t msr = (uint_t)arg1; 791 uint64_t val = *((uint64_t *)arg2); 792 cmi_errno_t *rcp = (cmi_errno_t *)arg3; 793 on_trap_data_t otd; 794 795 if (on_trap(&otd, OT_DATA_ACCESS) == 0) { 796 if (checked_wrmsr(msr, val) == 0) 797 *rcp = CMI_SUCCESS; 798 else 799 *rcp = CMIERR_NOTSUP; 800 } else { 801 *rcp = CMIERR_MSRGPF; 802 atomic_inc_32(&cmi_trapped_wrmsr); 803 } 804 no_trap(); 805 806 return (0); 807 808 } 809 810 static cmi_errno_t 811 ntv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val) 812 { 813 cpu_t *cp = HDLPRIV(hdl); 814 815 if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_WR_HWOK)) 816 return (CMI_SUCCESS); 817 818 return (call_func_ntv(cp->cpu_id, ntv_wrmsr_xc, 819 (xc_arg_t)msr, (xc_arg_t)&val)); 820 } 821 822 static cmi_errno_t 823 ntv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val) 824 { 825 msri_addent(hdl, msr, val); 826 return (CMI_SUCCESS); 827 } 828 829 /*ARGSUSED*/ 830 static int 831 ntv_int_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3) 832 { 833 cmi_errno_t *rcp = (cmi_errno_t *)arg3; 834 int int_no = (int)arg1; 835 836 if (int_no == T_MCE) 837 int18(); 838 else 839 int_cmci(); 840 *rcp = CMI_SUCCESS; 841 842 return (0); 843 } 844 845 static void 846 ntv_int(cmi_hdl_impl_t *hdl, int int_no) 847 { 848 cpu_t *cp = HDLPRIV(hdl); 849 850 (void) call_func_ntv(cp->cpu_id, ntv_int_xc, (xc_arg_t)int_no, NULL); 851 } 852 853 static int 854 ntv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status) 855 { 856 int rc; 857 processorid_t cpuid = HDLPRIV(hdl)->cpu_id; 858 859 while (mutex_tryenter(&cpu_lock) == 0) { 860 if (hdl->cmih_flags & CMIH_F_DEAD) 861 return (EBUSY); 862 delay(1); 863 } 864 rc = p_online_internal_locked(cpuid, new_status, old_status); 865 mutex_exit(&cpu_lock); 866 867 return (rc); 868 } 869 870 #else /* __xpv */ 871 872 /* 873 * ======================================================= 874 * | xVM dom0 methods | 875 * | ---------------- | 876 * | | 877 * | These are used when we are running as dom0 in | 878 * | a Solaris xVM context. | 879 * --------------------------------------------------------- 880 */ 881 882 #define HDLPRIV(hdl) ((xen_mc_lcpu_cookie_t)(hdl)->cmih_hdlpriv) 883 884 extern uint_t _cpuid_vendorstr_to_vendorcode(char *); 885 886 887 static uint_t 888 xpv_vendor(cmi_hdl_impl_t *hdl) 889 { 890 return (_cpuid_vendorstr_to_vendorcode((char *)xen_physcpu_vendorstr( 891 HDLPRIV(hdl)))); 892 } 893 894 static const char * 895 xpv_vendorstr(cmi_hdl_impl_t *hdl) 896 { 897 return (xen_physcpu_vendorstr(HDLPRIV(hdl))); 898 } 899 900 static uint_t 901 xpv_family(cmi_hdl_impl_t *hdl) 902 { 903 return (xen_physcpu_family(HDLPRIV(hdl))); 904 } 905 906 static uint_t 907 xpv_model(cmi_hdl_impl_t *hdl) 908 { 909 return (xen_physcpu_model(HDLPRIV(hdl))); 910 } 911 912 static uint_t 913 xpv_stepping(cmi_hdl_impl_t *hdl) 914 { 915 return (xen_physcpu_stepping(HDLPRIV(hdl))); 916 } 917 918 static uint_t 919 xpv_chipid(cmi_hdl_impl_t *hdl) 920 { 921 return (hdl->cmih_chipid); 922 } 923 924 static uint_t 925 xpv_procnodeid(cmi_hdl_impl_t *hdl) 926 { 927 return (hdl->cmih_procnodeid); 928 } 929 930 static uint_t 931 xpv_procnodes_per_pkg(cmi_hdl_impl_t *hdl) 932 { 933 return (hdl->cmih_procnodes_per_pkg); 934 } 935 936 static uint_t 937 xpv_coreid(cmi_hdl_impl_t *hdl) 938 { 939 return (hdl->cmih_coreid); 940 } 941 942 static uint_t 943 xpv_strandid(cmi_hdl_impl_t *hdl) 944 { 945 return (hdl->cmih_strandid); 946 } 947 948 static uint_t 949 xpv_strand_apicid(cmi_hdl_impl_t *hdl) 950 { 951 return (xen_physcpu_initial_apicid(HDLPRIV(hdl))); 952 } 953 954 static uint16_t 955 xpv_smbiosid(cmi_hdl_impl_t *hdl) 956 { 957 return (hdl->cmih_smbiosid); 958 } 959 960 static uint_t 961 xpv_smb_chipid(cmi_hdl_impl_t *hdl) 962 { 963 return (hdl->cmih_smb_chipid); 964 } 965 966 static nvlist_t * 967 xpv_smb_bboard(cmi_hdl_impl_t *hdl) 968 { 969 return (hdl->cmih_smb_bboard); 970 } 971 972 extern uint32_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t); 973 974 static uint32_t 975 xpv_chiprev(cmi_hdl_impl_t *hdl) 976 { 977 return (_cpuid_chiprev(xpv_vendor(hdl), xpv_family(hdl), 978 xpv_model(hdl), xpv_stepping(hdl))); 979 } 980 981 extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t); 982 983 static const char * 984 xpv_chiprevstr(cmi_hdl_impl_t *hdl) 985 { 986 return (_cpuid_chiprevstr(xpv_vendor(hdl), xpv_family(hdl), 987 xpv_model(hdl), xpv_stepping(hdl))); 988 } 989 990 extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t); 991 992 static uint32_t 993 xpv_getsockettype(cmi_hdl_impl_t *hdl) 994 { 995 return (_cpuid_skt(xpv_vendor(hdl), xpv_family(hdl), 996 xpv_model(hdl), xpv_stepping(hdl))); 997 } 998 999 extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t); 1000 1001 static const char * 1002 xpv_getsocketstr(cmi_hdl_impl_t *hdl) 1003 { 1004 return (_cpuid_sktstr(xpv_vendor(hdl), xpv_family(hdl), 1005 xpv_model(hdl), xpv_stepping(hdl))); 1006 } 1007 1008 /* ARGSUSED */ 1009 static uint_t 1010 xpv_chipsig(cmi_hdl_impl_t *hdl) 1011 { 1012 return (0); 1013 } 1014 1015 static id_t 1016 xpv_logical_id(cmi_hdl_impl_t *hdl) 1017 { 1018 return (xen_physcpu_logical_id(HDLPRIV(hdl))); 1019 } 1020 1021 static cmi_errno_t 1022 xpv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp) 1023 { 1024 switch (msr) { 1025 case IA32_MSR_MCG_CAP: 1026 *valp = xen_physcpu_mcg_cap(HDLPRIV(hdl)); 1027 break; 1028 1029 default: 1030 return (CMIERR_NOTSUP); 1031 } 1032 1033 return (CMI_SUCCESS); 1034 } 1035 1036 /* 1037 * Request the hypervisor to write an MSR for us. The hypervisor 1038 * will only accept MCA-related MSRs, as this is for MCA error 1039 * simulation purposes alone. We will pre-screen MSRs for injection 1040 * so we don't bother the HV with bogus requests. We will permit 1041 * injection to any MCA bank register, and to MCG_STATUS. 1042 */ 1043 1044 #define IS_MCA_INJ_MSR(msr) \ 1045 (((msr) >= IA32_MSR_MC(0, CTL) && (msr) <= IA32_MSR_MC(10, MISC)) || \ 1046 (msr) == IA32_MSR_MCG_STATUS) 1047 1048 static cmi_errno_t 1049 xpv_wrmsr_cmn(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val, boolean_t intpose) 1050 { 1051 xen_mc_t xmc; 1052 struct xen_mc_msrinject *mci = &xmc.u.mc_msrinject; 1053 1054 if (!(hdl->cmih_flags & CMIH_F_INJACTV)) 1055 return (CMIERR_NOTSUP); /* for injection use only! */ 1056 1057 if (!IS_MCA_INJ_MSR(msr)) 1058 return (CMIERR_API); 1059 1060 if (panicstr) 1061 return (CMIERR_DEADLOCK); 1062 1063 mci->mcinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl)); 1064 mci->mcinj_flags = intpose ? MC_MSRINJ_F_INTERPOSE : 0; 1065 mci->mcinj_count = 1; /* learn to batch sometime */ 1066 mci->mcinj_msr[0].reg = msr; 1067 mci->mcinj_msr[0].value = val; 1068 1069 return (HYPERVISOR_mca(XEN_MC_msrinject, &xmc) == 1070 0 ? CMI_SUCCESS : CMIERR_NOTSUP); 1071 } 1072 1073 static cmi_errno_t 1074 xpv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val) 1075 { 1076 return (xpv_wrmsr_cmn(hdl, msr, val, B_FALSE)); 1077 } 1078 1079 1080 static cmi_errno_t 1081 xpv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val) 1082 { 1083 return (xpv_wrmsr_cmn(hdl, msr, val, B_TRUE)); 1084 } 1085 1086 static void 1087 xpv_int(cmi_hdl_impl_t *hdl, int int_no) 1088 { 1089 xen_mc_t xmc; 1090 struct xen_mc_mceinject *mce = &xmc.u.mc_mceinject; 1091 1092 if (!(hdl->cmih_flags & CMIH_F_INJACTV)) 1093 return; 1094 1095 if (int_no != T_MCE) { 1096 cmn_err(CE_WARN, "xpv_int: int_no %d unimplemented\n", 1097 int_no); 1098 } 1099 1100 mce->mceinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl)); 1101 1102 (void) HYPERVISOR_mca(XEN_MC_mceinject, &xmc); 1103 } 1104 1105 static int 1106 xpv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status) 1107 { 1108 xen_sysctl_t xs; 1109 int op, rc, status; 1110 1111 new_status &= ~P_FORCED; 1112 1113 switch (new_status) { 1114 case P_STATUS: 1115 op = XEN_SYSCTL_CPU_HOTPLUG_STATUS; 1116 break; 1117 case P_FAULTED: 1118 case P_OFFLINE: 1119 op = XEN_SYSCTL_CPU_HOTPLUG_OFFLINE; 1120 break; 1121 case P_ONLINE: 1122 op = XEN_SYSCTL_CPU_HOTPLUG_ONLINE; 1123 break; 1124 default: 1125 return (-1); 1126 } 1127 1128 xs.cmd = XEN_SYSCTL_cpu_hotplug; 1129 xs.interface_version = XEN_SYSCTL_INTERFACE_VERSION; 1130 xs.u.cpu_hotplug.cpu = xen_physcpu_logical_id(HDLPRIV(hdl)); 1131 xs.u.cpu_hotplug.op = op; 1132 1133 if ((rc = HYPERVISOR_sysctl(&xs)) >= 0) { 1134 status = rc; 1135 rc = 0; 1136 switch (status) { 1137 case XEN_CPU_HOTPLUG_STATUS_NEW: 1138 *old_status = P_OFFLINE; 1139 break; 1140 case XEN_CPU_HOTPLUG_STATUS_OFFLINE: 1141 *old_status = P_FAULTED; 1142 break; 1143 case XEN_CPU_HOTPLUG_STATUS_ONLINE: 1144 *old_status = P_ONLINE; 1145 break; 1146 default: 1147 return (-1); 1148 } 1149 } 1150 1151 return (-rc); 1152 } 1153 1154 #endif 1155 1156 /*ARGSUSED*/ 1157 static void * 1158 cpu_search(enum cmi_hdl_class class, uint_t chipid, uint_t coreid, 1159 uint_t strandid) 1160 { 1161 #ifdef __xpv 1162 xen_mc_lcpu_cookie_t cpi; 1163 1164 for (cpi = xen_physcpu_next(NULL); cpi != NULL; 1165 cpi = xen_physcpu_next(cpi)) { 1166 if (xen_physcpu_chipid(cpi) == chipid && 1167 xen_physcpu_coreid(cpi) == coreid && 1168 xen_physcpu_strandid(cpi) == strandid) 1169 return ((void *)cpi); 1170 } 1171 return (NULL); 1172 1173 #else /* __xpv */ 1174 1175 cpu_t *cp, *startcp; 1176 1177 kpreempt_disable(); 1178 cp = startcp = CPU; 1179 do { 1180 if (cmi_ntv_hwchipid(cp) == chipid && 1181 cmi_ntv_hwcoreid(cp) == coreid && 1182 cmi_ntv_hwstrandid(cp) == strandid) { 1183 kpreempt_enable(); 1184 return ((void *)cp); 1185 } 1186 1187 cp = cp->cpu_next; 1188 } while (cp != startcp); 1189 kpreempt_enable(); 1190 return (NULL); 1191 #endif /* __ xpv */ 1192 } 1193 1194 static boolean_t 1195 cpu_is_cmt(void *priv) 1196 { 1197 #ifdef __xpv 1198 return (xen_physcpu_is_cmt((xen_mc_lcpu_cookie_t)priv)); 1199 #else /* __xpv */ 1200 cpu_t *cp = (cpu_t *)priv; 1201 1202 int strands_per_core = cpuid_get_ncpu_per_chip(cp) / 1203 cpuid_get_ncore_per_chip(cp); 1204 1205 return (strands_per_core > 1); 1206 #endif /* __xpv */ 1207 } 1208 1209 /* 1210 * Find the handle entry of a given cpu identified by a <chip,core,strand> 1211 * tuple. 1212 */ 1213 static cmi_hdl_ent_t * 1214 cmi_hdl_ent_lookup(uint_t chipid, uint_t coreid, uint_t strandid) 1215 { 1216 int max_strands = CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits, 1217 cmi_strand_nbits); 1218 1219 /* 1220 * Allocate per-chip table which contains a list of handle of 1221 * all strands of the chip. 1222 */ 1223 if (cmi_chip_tab[chipid] == NULL) { 1224 size_t sz; 1225 cmi_hdl_ent_t *pg; 1226 1227 sz = max_strands * sizeof (cmi_hdl_ent_t); 1228 pg = kmem_zalloc(sz, KM_SLEEP); 1229 1230 /* test and set the per-chip table if it is not allocated */ 1231 if (atomic_cas_ptr(&cmi_chip_tab[chipid], NULL, pg) != NULL) 1232 kmem_free(pg, sz); /* someone beats us */ 1233 } 1234 1235 return (cmi_chip_tab[chipid] + 1236 ((((coreid) & CMI_MAX_COREID(cmi_core_nbits)) << cmi_strand_nbits) | 1237 ((strandid) & CMI_MAX_STRANDID(cmi_strand_nbits)))); 1238 } 1239 1240 extern void cpuid_get_ext_topo(uint_t, uint_t *, uint_t *); 1241 1242 cmi_hdl_t 1243 cmi_hdl_create(enum cmi_hdl_class class, uint_t chipid, uint_t coreid, 1244 uint_t strandid) 1245 { 1246 cmi_hdl_impl_t *hdl; 1247 void *priv; 1248 cmi_hdl_ent_t *ent; 1249 uint_t vendor; 1250 1251 #ifdef __xpv 1252 ASSERT(class == CMI_HDL_SOLARIS_xVM_MCA); 1253 #else 1254 ASSERT(class == CMI_HDL_NATIVE); 1255 #endif 1256 1257 if ((priv = cpu_search(class, chipid, coreid, strandid)) == NULL) 1258 return (NULL); 1259 1260 /* 1261 * Assume all chips in the system are the same type. 1262 * For Intel, attempt to check if extended topology is available 1263 * CPUID.EAX=0xB. If so, get the number of core and strand bits. 1264 */ 1265 #ifdef __xpv 1266 vendor = _cpuid_vendorstr_to_vendorcode( 1267 (char *)xen_physcpu_vendorstr((xen_mc_lcpu_cookie_t)priv)); 1268 #else 1269 vendor = cpuid_getvendor((cpu_t *)priv); 1270 #endif 1271 if (vendor == X86_VENDOR_Intel && cmi_ext_topo_check == 0) { 1272 cpuid_get_ext_topo(vendor, &cmi_core_nbits, &cmi_strand_nbits); 1273 cmi_ext_topo_check = 1; 1274 } 1275 1276 if (chipid > CMI_MAX_CHIPID || 1277 coreid > CMI_MAX_COREID(cmi_core_nbits) || 1278 strandid > CMI_MAX_STRANDID(cmi_strand_nbits)) 1279 return (NULL); 1280 1281 hdl = kmem_zalloc(sizeof (*hdl), KM_SLEEP); 1282 1283 hdl->cmih_class = class; 1284 HDLOPS(hdl) = &cmi_hdl_ops; 1285 hdl->cmih_chipid = chipid; 1286 hdl->cmih_coreid = coreid; 1287 hdl->cmih_strandid = strandid; 1288 hdl->cmih_mstrand = cpu_is_cmt(priv); 1289 hdl->cmih_hdlpriv = priv; 1290 #ifdef __xpv 1291 hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_INTERPOSEOK | 1292 CMI_MSR_FLAG_WR_INTERPOSEOK; 1293 1294 /* 1295 * XXX: need hypervisor support for procnodeid, for now assume 1296 * single-node processors (procnodeid = chipid) 1297 */ 1298 hdl->cmih_procnodeid = xen_physcpu_chipid((xen_mc_lcpu_cookie_t)priv); 1299 hdl->cmih_procnodes_per_pkg = 1; 1300 #else /* __xpv */ 1301 hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_HWOK | CMI_MSR_FLAG_RD_INTERPOSEOK | 1302 CMI_MSR_FLAG_WR_HWOK | CMI_MSR_FLAG_WR_INTERPOSEOK; 1303 hdl->cmih_procnodeid = cpuid_get_procnodeid((cpu_t *)priv); 1304 hdl->cmih_procnodes_per_pkg = 1305 cpuid_get_procnodes_per_pkg((cpu_t *)priv); 1306 #endif /* __xpv */ 1307 1308 ent = cmi_hdl_ent_lookup(chipid, coreid, strandid); 1309 if (ent->cmae_refcnt != 0 || ent->cmae_hdlp != NULL) { 1310 /* 1311 * Somehow this (chipid, coreid, strandid) id tuple has 1312 * already been assigned! This indicates that the 1313 * callers logic in determining these values is busted, 1314 * or perhaps undermined by bad BIOS setup. Complain, 1315 * and refuse to initialize this tuple again as bad things 1316 * will happen. 1317 */ 1318 cmn_err(CE_NOTE, "cmi_hdl_create: chipid %d coreid %d " 1319 "strandid %d handle already allocated!", 1320 chipid, coreid, strandid); 1321 kmem_free(hdl, sizeof (*hdl)); 1322 return (NULL); 1323 } 1324 1325 /* 1326 * Once we store a nonzero reference count others can find this 1327 * handle via cmi_hdl_lookup etc. This initial hold on the handle 1328 * is to be dropped only if some other part of cmi initialization 1329 * fails or, if it succeeds, at later cpu deconfigure. Note the 1330 * the module private data we hold in cmih_cmi and cmih_cmidata 1331 * is still NULL at this point (the caller will fill it with 1332 * cmi_hdl_setcmi if it initializes) so consumers of handles 1333 * should always be ready for that possibility. 1334 */ 1335 ent->cmae_hdlp = hdl; 1336 hdl->cmih_refcntp = &ent->cmae_refcnt; 1337 ent->cmae_refcnt = 1; 1338 1339 return ((cmi_hdl_t)hdl); 1340 } 1341 1342 void 1343 cmi_read_smbios(cmi_hdl_t ophdl) 1344 { 1345 1346 uint_t strand_apicid = UINT_MAX; 1347 uint_t chip_inst = UINT_MAX; 1348 uint16_t smb_id = USHRT_MAX; 1349 int rc = 0; 1350 1351 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1352 1353 /* set x86gentopo compatibility */ 1354 fm_smb_fmacompat(); 1355 1356 #ifndef __xpv 1357 strand_apicid = ntv_strand_apicid(hdl); 1358 #else 1359 strand_apicid = xpv_strand_apicid(hdl); 1360 #endif 1361 1362 if (!x86gentopo_legacy) { 1363 /* 1364 * If fm_smb_chipinst() or fm_smb_bboard() fails, 1365 * topo reverts to legacy mode 1366 */ 1367 rc = fm_smb_chipinst(strand_apicid, &chip_inst, &smb_id); 1368 if (rc == 0) { 1369 hdl->cmih_smb_chipid = chip_inst; 1370 hdl->cmih_smbiosid = smb_id; 1371 } else { 1372 #ifdef DEBUG 1373 cmn_err(CE_NOTE, "!cmi reads smbios chip info failed"); 1374 #endif /* DEBUG */ 1375 return; 1376 } 1377 1378 hdl->cmih_smb_bboard = fm_smb_bboard(strand_apicid); 1379 #ifdef DEBUG 1380 if (hdl->cmih_smb_bboard == NULL) 1381 cmn_err(CE_NOTE, 1382 "!cmi reads smbios base boards info failed"); 1383 #endif /* DEBUG */ 1384 } 1385 } 1386 1387 void 1388 cmi_hdl_hold(cmi_hdl_t ophdl) 1389 { 1390 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1391 1392 ASSERT(*hdl->cmih_refcntp != 0); /* must not be the initial hold */ 1393 1394 atomic_inc_32(hdl->cmih_refcntp); 1395 } 1396 1397 static int 1398 cmi_hdl_canref(cmi_hdl_ent_t *ent) 1399 { 1400 volatile uint32_t *refcntp; 1401 uint32_t refcnt; 1402 1403 refcntp = &ent->cmae_refcnt; 1404 refcnt = *refcntp; 1405 1406 if (refcnt == 0) { 1407 /* 1408 * Associated object never existed, is being destroyed, 1409 * or has been destroyed. 1410 */ 1411 return (0); 1412 } 1413 1414 /* 1415 * We cannot use atomic increment here because once the reference 1416 * count reaches zero it must never be bumped up again. 1417 */ 1418 while (refcnt != 0) { 1419 if (atomic_cas_32(refcntp, refcnt, refcnt + 1) == refcnt) 1420 return (1); 1421 refcnt = *refcntp; 1422 } 1423 1424 /* 1425 * Somebody dropped the reference count to 0 after our initial 1426 * check. 1427 */ 1428 return (0); 1429 } 1430 1431 1432 void 1433 cmi_hdl_rele(cmi_hdl_t ophdl) 1434 { 1435 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1436 1437 ASSERT(*hdl->cmih_refcntp > 0); 1438 atomic_dec_32(hdl->cmih_refcntp); 1439 } 1440 1441 void 1442 cmi_hdl_destroy(cmi_hdl_t ophdl) 1443 { 1444 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1445 cmi_hdl_ent_t *ent; 1446 1447 /* Release the reference count held by cmi_hdl_create(). */ 1448 ASSERT(*hdl->cmih_refcntp > 0); 1449 atomic_dec_32(hdl->cmih_refcntp); 1450 hdl->cmih_flags |= CMIH_F_DEAD; 1451 1452 ent = cmi_hdl_ent_lookup(hdl->cmih_chipid, hdl->cmih_coreid, 1453 hdl->cmih_strandid); 1454 /* 1455 * Use busy polling instead of condition variable here because 1456 * cmi_hdl_rele() may be called from #MC handler. 1457 */ 1458 while (cmi_hdl_canref(ent)) { 1459 cmi_hdl_rele(ophdl); 1460 delay(1); 1461 } 1462 ent->cmae_hdlp = NULL; 1463 1464 kmem_free(hdl, sizeof (*hdl)); 1465 } 1466 1467 void 1468 cmi_hdl_setspecific(cmi_hdl_t ophdl, void *arg) 1469 { 1470 IMPLHDL(ophdl)->cmih_spec = arg; 1471 } 1472 1473 void * 1474 cmi_hdl_getspecific(cmi_hdl_t ophdl) 1475 { 1476 return (IMPLHDL(ophdl)->cmih_spec); 1477 } 1478 1479 void 1480 cmi_hdl_setmc(cmi_hdl_t ophdl, const struct cmi_mc_ops *mcops, void *mcdata) 1481 { 1482 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1483 1484 ASSERT(hdl->cmih_mcops == NULL && hdl->cmih_mcdata == NULL); 1485 hdl->cmih_mcops = mcops; 1486 hdl->cmih_mcdata = mcdata; 1487 } 1488 1489 const struct cmi_mc_ops * 1490 cmi_hdl_getmcops(cmi_hdl_t ophdl) 1491 { 1492 return (IMPLHDL(ophdl)->cmih_mcops); 1493 } 1494 1495 void * 1496 cmi_hdl_getmcdata(cmi_hdl_t ophdl) 1497 { 1498 return (IMPLHDL(ophdl)->cmih_mcdata); 1499 } 1500 1501 cmi_hdl_t 1502 cmi_hdl_lookup(enum cmi_hdl_class class, uint_t chipid, uint_t coreid, 1503 uint_t strandid) 1504 { 1505 cmi_hdl_ent_t *ent; 1506 1507 if (chipid > CMI_MAX_CHIPID || 1508 coreid > CMI_MAX_COREID(cmi_core_nbits) || 1509 strandid > CMI_MAX_STRANDID(cmi_strand_nbits)) 1510 return (NULL); 1511 1512 ent = cmi_hdl_ent_lookup(chipid, coreid, strandid); 1513 1514 if (class == CMI_HDL_NEUTRAL) 1515 #ifdef __xpv 1516 class = CMI_HDL_SOLARIS_xVM_MCA; 1517 #else 1518 class = CMI_HDL_NATIVE; 1519 #endif 1520 1521 if (!cmi_hdl_canref(ent)) 1522 return (NULL); 1523 1524 if (ent->cmae_hdlp->cmih_class != class) { 1525 cmi_hdl_rele((cmi_hdl_t)ent->cmae_hdlp); 1526 return (NULL); 1527 } 1528 1529 return ((cmi_hdl_t)ent->cmae_hdlp); 1530 } 1531 1532 cmi_hdl_t 1533 cmi_hdl_any(void) 1534 { 1535 int i, j; 1536 cmi_hdl_ent_t *ent; 1537 int max_strands = CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits, 1538 cmi_strand_nbits); 1539 1540 for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) { 1541 if (cmi_chip_tab[i] == NULL) 1542 continue; 1543 for (j = 0, ent = cmi_chip_tab[i]; j < max_strands; 1544 j++, ent++) { 1545 if (cmi_hdl_canref(ent)) 1546 return ((cmi_hdl_t)ent->cmae_hdlp); 1547 } 1548 } 1549 1550 return (NULL); 1551 } 1552 1553 void 1554 cmi_hdl_walk(int (*cbfunc)(cmi_hdl_t, void *, void *, void *), 1555 void *arg1, void *arg2, void *arg3) 1556 { 1557 int i, j; 1558 cmi_hdl_ent_t *ent; 1559 int max_strands = CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits, 1560 cmi_strand_nbits); 1561 1562 for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) { 1563 if (cmi_chip_tab[i] == NULL) 1564 continue; 1565 for (j = 0, ent = cmi_chip_tab[i]; j < max_strands; 1566 j++, ent++) { 1567 if (cmi_hdl_canref(ent)) { 1568 cmi_hdl_impl_t *hdl = ent->cmae_hdlp; 1569 if ((*cbfunc)((cmi_hdl_t)hdl, arg1, arg2, arg3) 1570 == CMI_HDL_WALK_DONE) { 1571 cmi_hdl_rele((cmi_hdl_t)hdl); 1572 return; 1573 } 1574 cmi_hdl_rele((cmi_hdl_t)hdl); 1575 } 1576 } 1577 } 1578 } 1579 1580 void 1581 cmi_hdl_setcmi(cmi_hdl_t ophdl, void *cmi, void *cmidata) 1582 { 1583 IMPLHDL(ophdl)->cmih_cmidata = cmidata; 1584 IMPLHDL(ophdl)->cmih_cmi = cmi; 1585 } 1586 1587 void * 1588 cmi_hdl_getcmi(cmi_hdl_t ophdl) 1589 { 1590 return (IMPLHDL(ophdl)->cmih_cmi); 1591 } 1592 1593 void * 1594 cmi_hdl_getcmidata(cmi_hdl_t ophdl) 1595 { 1596 return (IMPLHDL(ophdl)->cmih_cmidata); 1597 } 1598 1599 enum cmi_hdl_class 1600 cmi_hdl_class(cmi_hdl_t ophdl) 1601 { 1602 return (IMPLHDL(ophdl)->cmih_class); 1603 } 1604 1605 #define CMI_HDL_OPFUNC(what, type) \ 1606 type \ 1607 cmi_hdl_##what(cmi_hdl_t ophdl) \ 1608 { \ 1609 return (HDLOPS(IMPLHDL(ophdl))-> \ 1610 cmio_##what(IMPLHDL(ophdl))); \ 1611 } 1612 1613 /* BEGIN CSTYLED */ 1614 CMI_HDL_OPFUNC(vendor, uint_t) 1615 CMI_HDL_OPFUNC(vendorstr, const char *) 1616 CMI_HDL_OPFUNC(family, uint_t) 1617 CMI_HDL_OPFUNC(model, uint_t) 1618 CMI_HDL_OPFUNC(stepping, uint_t) 1619 CMI_HDL_OPFUNC(chipid, uint_t) 1620 CMI_HDL_OPFUNC(procnodeid, uint_t) 1621 CMI_HDL_OPFUNC(coreid, uint_t) 1622 CMI_HDL_OPFUNC(strandid, uint_t) 1623 CMI_HDL_OPFUNC(procnodes_per_pkg, uint_t) 1624 CMI_HDL_OPFUNC(strand_apicid, uint_t) 1625 CMI_HDL_OPFUNC(chiprev, uint32_t) 1626 CMI_HDL_OPFUNC(chiprevstr, const char *) 1627 CMI_HDL_OPFUNC(getsockettype, uint32_t) 1628 CMI_HDL_OPFUNC(getsocketstr, const char *) 1629 CMI_HDL_OPFUNC(logical_id, id_t) 1630 CMI_HDL_OPFUNC(smbiosid, uint16_t) 1631 CMI_HDL_OPFUNC(smb_chipid, uint_t) 1632 CMI_HDL_OPFUNC(smb_bboard, nvlist_t *) 1633 CMI_HDL_OPFUNC(chipsig, uint_t) 1634 /* END CSTYLED */ 1635 1636 boolean_t 1637 cmi_hdl_is_cmt(cmi_hdl_t ophdl) 1638 { 1639 return (IMPLHDL(ophdl)->cmih_mstrand); 1640 } 1641 1642 void 1643 cmi_hdl_int(cmi_hdl_t ophdl, int num) 1644 { 1645 if (HDLOPS(IMPLHDL(ophdl))->cmio_int == NULL) 1646 return; 1647 1648 cmi_hdl_inj_begin(ophdl); 1649 HDLOPS(IMPLHDL(ophdl))->cmio_int(IMPLHDL(ophdl), num); 1650 cmi_hdl_inj_end(NULL); 1651 } 1652 1653 int 1654 cmi_hdl_online(cmi_hdl_t ophdl, int new_status, int *old_status) 1655 { 1656 return (HDLOPS(IMPLHDL(ophdl))->cmio_online(IMPLHDL(ophdl), 1657 new_status, old_status)); 1658 } 1659 1660 #ifndef __xpv 1661 /* 1662 * Return hardware chip instance; cpuid_get_chipid provides this directly. 1663 */ 1664 uint_t 1665 cmi_ntv_hwchipid(cpu_t *cp) 1666 { 1667 return (cpuid_get_chipid(cp)); 1668 } 1669 1670 /* 1671 * Return hardware node instance; cpuid_get_procnodeid provides this directly. 1672 */ 1673 uint_t 1674 cmi_ntv_hwprocnodeid(cpu_t *cp) 1675 { 1676 return (cpuid_get_procnodeid(cp)); 1677 } 1678 1679 /* 1680 * Return core instance within a single chip. 1681 */ 1682 uint_t 1683 cmi_ntv_hwcoreid(cpu_t *cp) 1684 { 1685 return (cpuid_get_pkgcoreid(cp)); 1686 } 1687 1688 /* 1689 * Return strand number within a single core. cpuid_get_clogid numbers 1690 * all execution units (strands, or cores in unstranded models) sequentially 1691 * within a single chip. 1692 */ 1693 uint_t 1694 cmi_ntv_hwstrandid(cpu_t *cp) 1695 { 1696 int strands_per_core = cpuid_get_ncpu_per_chip(cp) / 1697 cpuid_get_ncore_per_chip(cp); 1698 1699 return (cpuid_get_clogid(cp) % strands_per_core); 1700 } 1701 1702 static void 1703 cmi_ntv_hwdisable_mce_xc(void) 1704 { 1705 ulong_t cr4; 1706 1707 cr4 = getcr4(); 1708 cr4 = cr4 & (~CR4_MCE); 1709 setcr4(cr4); 1710 } 1711 1712 void 1713 cmi_ntv_hwdisable_mce(cmi_hdl_t hdl) 1714 { 1715 cpuset_t set; 1716 cmi_hdl_impl_t *thdl = IMPLHDL(hdl); 1717 cpu_t *cp = HDLPRIV(thdl); 1718 1719 if (CPU->cpu_id == cp->cpu_id) { 1720 cmi_ntv_hwdisable_mce_xc(); 1721 } else { 1722 CPUSET_ONLY(set, cp->cpu_id); 1723 xc_call(NULL, NULL, NULL, CPUSET2BV(set), 1724 (xc_func_t)cmi_ntv_hwdisable_mce_xc); 1725 } 1726 } 1727 1728 #endif /* __xpv */ 1729 1730 void 1731 cmi_hdlconf_rdmsr_nohw(cmi_hdl_t ophdl) 1732 { 1733 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1734 1735 hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_RD_HWOK; 1736 } 1737 1738 void 1739 cmi_hdlconf_wrmsr_nohw(cmi_hdl_t ophdl) 1740 { 1741 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1742 1743 hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_WR_HWOK; 1744 } 1745 1746 cmi_errno_t 1747 cmi_hdl_rdmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t *valp) 1748 { 1749 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1750 1751 /* 1752 * Regardless of the handle class, we first check for am 1753 * interposed value. In the xVM case you probably want to 1754 * place interposed values within the hypervisor itself, but 1755 * we still allow interposing them in dom0 for test and bringup 1756 * purposes. 1757 */ 1758 if ((hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_INTERPOSEOK) && 1759 msri_lookup(hdl, msr, valp)) 1760 return (CMI_SUCCESS); 1761 1762 if (HDLOPS(hdl)->cmio_rdmsr == NULL) 1763 return (CMIERR_NOTSUP); 1764 1765 return (HDLOPS(hdl)->cmio_rdmsr(hdl, msr, valp)); 1766 } 1767 1768 cmi_errno_t 1769 cmi_hdl_wrmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t val) 1770 { 1771 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1772 1773 /* Invalidate any interposed value */ 1774 msri_rment(hdl, msr); 1775 1776 if (HDLOPS(hdl)->cmio_wrmsr == NULL) 1777 return (CMI_SUCCESS); /* pretend all is ok */ 1778 1779 return (HDLOPS(hdl)->cmio_wrmsr(hdl, msr, val)); 1780 } 1781 1782 void 1783 cmi_hdl_enable_mce(cmi_hdl_t ophdl) 1784 { 1785 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1786 ulong_t cr4; 1787 1788 if (HDLOPS(hdl)->cmio_getcr4 == NULL || 1789 HDLOPS(hdl)->cmio_setcr4 == NULL) 1790 return; 1791 1792 cr4 = HDLOPS(hdl)->cmio_getcr4(hdl); 1793 1794 HDLOPS(hdl)->cmio_setcr4(hdl, cr4 | CR4_MCE); 1795 } 1796 1797 void 1798 cmi_hdl_msrinterpose(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs) 1799 { 1800 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1801 int i; 1802 1803 if (HDLOPS(hdl)->cmio_msrinterpose == NULL) 1804 return; 1805 1806 cmi_hdl_inj_begin(ophdl); 1807 1808 for (i = 0; i < nregs; i++, regs++) 1809 HDLOPS(hdl)->cmio_msrinterpose(hdl, regs->cmr_msrnum, 1810 regs->cmr_msrval); 1811 1812 cmi_hdl_inj_end(ophdl); 1813 } 1814 1815 /*ARGSUSED*/ 1816 void 1817 cmi_hdl_msrforward(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs) 1818 { 1819 #ifdef __xpv 1820 cmi_hdl_impl_t *hdl = IMPLHDL(ophdl); 1821 int i; 1822 1823 for (i = 0; i < nregs; i++, regs++) 1824 msri_addent(hdl, regs->cmr_msrnum, regs->cmr_msrval); 1825 #endif 1826 } 1827 1828 1829 void 1830 cmi_pcird_nohw(void) 1831 { 1832 cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_RD_HWOK; 1833 } 1834 1835 void 1836 cmi_pciwr_nohw(void) 1837 { 1838 cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_WR_HWOK; 1839 } 1840 1841 static uint32_t 1842 cmi_pci_get_cmn(int bus, int dev, int func, int reg, int asz, 1843 int *interpose, ddi_acc_handle_t hdl) 1844 { 1845 uint32_t val; 1846 1847 if (cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_INTERPOSEOK && 1848 pcii_lookup(bus, dev, func, reg, asz, &val)) { 1849 if (interpose) 1850 *interpose = 1; 1851 return (val); 1852 } 1853 if (interpose) 1854 *interpose = 0; 1855 1856 if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_HWOK)) 1857 return (0); 1858 1859 switch (asz) { 1860 case 1: 1861 if (hdl) 1862 val = pci_config_get8(hdl, (off_t)reg); 1863 else 1864 val = pci_cfgacc_get8(NULL, PCI_GETBDF(bus, dev, func), 1865 reg); 1866 break; 1867 case 2: 1868 if (hdl) 1869 val = pci_config_get16(hdl, (off_t)reg); 1870 else 1871 val = pci_cfgacc_get16(NULL, PCI_GETBDF(bus, dev, func), 1872 reg); 1873 break; 1874 case 4: 1875 if (hdl) 1876 val = pci_config_get32(hdl, (off_t)reg); 1877 else 1878 val = pci_cfgacc_get32(NULL, PCI_GETBDF(bus, dev, func), 1879 reg); 1880 break; 1881 default: 1882 val = 0; 1883 } 1884 return (val); 1885 } 1886 1887 uint8_t 1888 cmi_pci_getb(int bus, int dev, int func, int reg, int *interpose, 1889 ddi_acc_handle_t hdl) 1890 { 1891 return ((uint8_t)cmi_pci_get_cmn(bus, dev, func, reg, 1, interpose, 1892 hdl)); 1893 } 1894 1895 uint16_t 1896 cmi_pci_getw(int bus, int dev, int func, int reg, int *interpose, 1897 ddi_acc_handle_t hdl) 1898 { 1899 return ((uint16_t)cmi_pci_get_cmn(bus, dev, func, reg, 2, interpose, 1900 hdl)); 1901 } 1902 1903 uint32_t 1904 cmi_pci_getl(int bus, int dev, int func, int reg, int *interpose, 1905 ddi_acc_handle_t hdl) 1906 { 1907 return (cmi_pci_get_cmn(bus, dev, func, reg, 4, interpose, hdl)); 1908 } 1909 1910 void 1911 cmi_pci_interposeb(int bus, int dev, int func, int reg, uint8_t val) 1912 { 1913 pcii_addent(bus, dev, func, reg, val, 1); 1914 } 1915 1916 void 1917 cmi_pci_interposew(int bus, int dev, int func, int reg, uint16_t val) 1918 { 1919 pcii_addent(bus, dev, func, reg, val, 2); 1920 } 1921 1922 void 1923 cmi_pci_interposel(int bus, int dev, int func, int reg, uint32_t val) 1924 { 1925 pcii_addent(bus, dev, func, reg, val, 4); 1926 } 1927 1928 static void 1929 cmi_pci_put_cmn(int bus, int dev, int func, int reg, int asz, 1930 ddi_acc_handle_t hdl, uint32_t val) 1931 { 1932 /* 1933 * If there is an interposed value for this register invalidate it. 1934 */ 1935 pcii_rment(bus, dev, func, reg, asz); 1936 1937 if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_WR_HWOK)) 1938 return; 1939 1940 switch (asz) { 1941 case 1: 1942 if (hdl) 1943 pci_config_put8(hdl, (off_t)reg, (uint8_t)val); 1944 else 1945 pci_cfgacc_put8(NULL, PCI_GETBDF(bus, dev, func), reg, 1946 (uint8_t)val); 1947 break; 1948 1949 case 2: 1950 if (hdl) 1951 pci_config_put16(hdl, (off_t)reg, (uint16_t)val); 1952 else 1953 pci_cfgacc_put16(NULL, PCI_GETBDF(bus, dev, func), reg, 1954 (uint16_t)val); 1955 break; 1956 1957 case 4: 1958 if (hdl) 1959 pci_config_put32(hdl, (off_t)reg, val); 1960 else 1961 pci_cfgacc_put32(NULL, PCI_GETBDF(bus, dev, func), reg, 1962 val); 1963 break; 1964 1965 default: 1966 break; 1967 } 1968 } 1969 1970 void 1971 cmi_pci_putb(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl, 1972 uint8_t val) 1973 { 1974 cmi_pci_put_cmn(bus, dev, func, reg, 1, hdl, val); 1975 } 1976 1977 void 1978 cmi_pci_putw(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl, 1979 uint16_t val) 1980 { 1981 cmi_pci_put_cmn(bus, dev, func, reg, 2, hdl, val); 1982 } 1983 1984 void 1985 cmi_pci_putl(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl, 1986 uint32_t val) 1987 { 1988 cmi_pci_put_cmn(bus, dev, func, reg, 4, hdl, val); 1989 } 1990 1991 static const struct cmi_hdl_ops cmi_hdl_ops = { 1992 #ifdef __xpv 1993 /* 1994 * CMI_HDL_SOLARIS_xVM_MCA - ops when we are an xVM dom0 1995 */ 1996 xpv_vendor, /* cmio_vendor */ 1997 xpv_vendorstr, /* cmio_vendorstr */ 1998 xpv_family, /* cmio_family */ 1999 xpv_model, /* cmio_model */ 2000 xpv_stepping, /* cmio_stepping */ 2001 xpv_chipid, /* cmio_chipid */ 2002 xpv_procnodeid, /* cmio_procnodeid */ 2003 xpv_coreid, /* cmio_coreid */ 2004 xpv_strandid, /* cmio_strandid */ 2005 xpv_procnodes_per_pkg, /* cmio_procnodes_per_pkg */ 2006 xpv_strand_apicid, /* cmio_strand_apicid */ 2007 xpv_chiprev, /* cmio_chiprev */ 2008 xpv_chiprevstr, /* cmio_chiprevstr */ 2009 xpv_getsockettype, /* cmio_getsockettype */ 2010 xpv_getsocketstr, /* cmio_getsocketstr */ 2011 xpv_chipsig, /* cmio_chipsig */ 2012 xpv_logical_id, /* cmio_logical_id */ 2013 NULL, /* cmio_getcr4 */ 2014 NULL, /* cmio_setcr4 */ 2015 xpv_rdmsr, /* cmio_rdmsr */ 2016 xpv_wrmsr, /* cmio_wrmsr */ 2017 xpv_msrinterpose, /* cmio_msrinterpose */ 2018 xpv_int, /* cmio_int */ 2019 xpv_online, /* cmio_online */ 2020 xpv_smbiosid, /* cmio_smbiosid */ 2021 xpv_smb_chipid, /* cmio_smb_chipid */ 2022 xpv_smb_bboard /* cmio_smb_bboard */ 2023 2024 #else /* __xpv */ 2025 2026 /* 2027 * CMI_HDL_NATIVE - ops when apparently running on bare-metal 2028 */ 2029 ntv_vendor, /* cmio_vendor */ 2030 ntv_vendorstr, /* cmio_vendorstr */ 2031 ntv_family, /* cmio_family */ 2032 ntv_model, /* cmio_model */ 2033 ntv_stepping, /* cmio_stepping */ 2034 ntv_chipid, /* cmio_chipid */ 2035 ntv_procnodeid, /* cmio_procnodeid */ 2036 ntv_coreid, /* cmio_coreid */ 2037 ntv_strandid, /* cmio_strandid */ 2038 ntv_procnodes_per_pkg, /* cmio_procnodes_per_pkg */ 2039 ntv_strand_apicid, /* cmio_strand_apicid */ 2040 ntv_chiprev, /* cmio_chiprev */ 2041 ntv_chiprevstr, /* cmio_chiprevstr */ 2042 ntv_getsockettype, /* cmio_getsockettype */ 2043 ntv_getsocketstr, /* cmio_getsocketstr */ 2044 ntv_chipsig, /* cmio_chipsig */ 2045 ntv_logical_id, /* cmio_logical_id */ 2046 ntv_getcr4, /* cmio_getcr4 */ 2047 ntv_setcr4, /* cmio_setcr4 */ 2048 ntv_rdmsr, /* cmio_rdmsr */ 2049 ntv_wrmsr, /* cmio_wrmsr */ 2050 ntv_msrinterpose, /* cmio_msrinterpose */ 2051 ntv_int, /* cmio_int */ 2052 ntv_online, /* cmio_online */ 2053 ntv_smbiosid, /* cmio_smbiosid */ 2054 ntv_smb_chipid, /* cmio_smb_chipid */ 2055 ntv_smb_bboard /* cmio_smb_bboard */ 2056 #endif 2057 }; 2058