xref: /titanic_52/usr/src/uts/i86pc/os/cmi_hw.c (revision fcdeb91bbce674703e2d1423c81e7155f4f13089)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 /*
27  * Copyright (c) 2010, Intel Corporation.
28  * All rights reserved.
29  */
30 
31 /*
32  * CPU Module Interface - hardware abstraction.
33  */
34 
35 #ifdef __xpv
36 #include <sys/xpv_user.h>
37 #endif
38 
39 #include <sys/types.h>
40 #include <sys/cpu_module.h>
41 #include <sys/kmem.h>
42 #include <sys/x86_archext.h>
43 #include <sys/cpuvar.h>
44 #include <sys/ksynch.h>
45 #include <sys/x_call.h>
46 #include <sys/pghw.h>
47 #include <sys/pci_cfgacc.h>
48 #include <sys/pci_cfgspace.h>
49 #include <sys/archsystm.h>
50 #include <sys/ontrap.h>
51 #include <sys/controlregs.h>
52 #include <sys/sunddi.h>
53 #include <sys/trap.h>
54 #include <sys/mca_x86.h>
55 #include <sys/processor.h>
56 #include <sys/cmn_err.h>
57 #include <sys/nvpair.h>
58 #include <sys/fm/util.h>
59 #include <sys/fm/protocol.h>
60 #include <sys/fm/smb/fmsmb.h>
61 #include <sys/cpu_module_impl.h>
62 
63 /*
64  * Variable which determines if the SMBIOS supports x86 generic topology; or
65  * if legacy topolgy enumeration will occur.
66  */
67 extern int x86gentopo_legacy;
68 
69 /*
70  * Outside of this file consumers use the opaque cmi_hdl_t.  This
71  * definition is duplicated in the generic_cpu mdb module, so keep
72  * them in-sync when making changes.
73  */
74 typedef struct cmi_hdl_impl {
75 	enum cmi_hdl_class cmih_class;		/* Handle nature */
76 	const struct cmi_hdl_ops *cmih_ops;	/* Operations vector */
77 	uint_t cmih_chipid;			/* Chipid of cpu resource */
78 	uint_t cmih_procnodeid;			/* Nodeid of cpu resource */
79 	uint_t cmih_coreid;			/* Core within die */
80 	uint_t cmih_strandid;			/* Thread within core */
81 	uint_t cmih_procnodes_per_pkg;		/* Nodes in a processor */
82 	boolean_t cmih_mstrand;			/* cores are multithreaded */
83 	volatile uint32_t *cmih_refcntp;	/* Reference count pointer */
84 	uint64_t cmih_msrsrc;			/* MSR data source flags */
85 	void *cmih_hdlpriv;			/* cmi_hw.c private data */
86 	void *cmih_spec;			/* cmi_hdl_{set,get}_specific */
87 	void *cmih_cmi;				/* cpu mod control structure */
88 	void *cmih_cmidata;			/* cpu mod private data */
89 	const struct cmi_mc_ops *cmih_mcops;	/* Memory-controller ops */
90 	void *cmih_mcdata;			/* Memory-controller data */
91 	uint64_t cmih_flags;			/* See CMIH_F_* below */
92 	uint16_t cmih_smbiosid;			/* SMBIOS Type 4 struct ID */
93 	uint_t cmih_smb_chipid;			/* SMBIOS factored chipid */
94 	nvlist_t *cmih_smb_bboard;		/* SMBIOS bboard nvlist */
95 } cmi_hdl_impl_t;
96 
97 #define	IMPLHDL(ophdl)	((cmi_hdl_impl_t *)ophdl)
98 #define	HDLOPS(hdl)	((hdl)->cmih_ops)
99 
100 #define	CMIH_F_INJACTV		0x1ULL
101 #define	CMIH_F_DEAD		0x2ULL
102 
103 /*
104  * Ops structure for handle operations.
105  */
106 struct cmi_hdl_ops {
107 	/*
108 	 * These ops are required in an implementation.
109 	 */
110 	uint_t (*cmio_vendor)(cmi_hdl_impl_t *);
111 	const char *(*cmio_vendorstr)(cmi_hdl_impl_t *);
112 	uint_t (*cmio_family)(cmi_hdl_impl_t *);
113 	uint_t (*cmio_model)(cmi_hdl_impl_t *);
114 	uint_t (*cmio_stepping)(cmi_hdl_impl_t *);
115 	uint_t (*cmio_chipid)(cmi_hdl_impl_t *);
116 	uint_t (*cmio_procnodeid)(cmi_hdl_impl_t *);
117 	uint_t (*cmio_coreid)(cmi_hdl_impl_t *);
118 	uint_t (*cmio_strandid)(cmi_hdl_impl_t *);
119 	uint_t (*cmio_procnodes_per_pkg)(cmi_hdl_impl_t *);
120 	uint_t (*cmio_strand_apicid)(cmi_hdl_impl_t *);
121 	uint32_t (*cmio_chiprev)(cmi_hdl_impl_t *);
122 	const char *(*cmio_chiprevstr)(cmi_hdl_impl_t *);
123 	uint32_t (*cmio_getsockettype)(cmi_hdl_impl_t *);
124 	const char *(*cmio_getsocketstr)(cmi_hdl_impl_t *);
125 
126 	id_t (*cmio_logical_id)(cmi_hdl_impl_t *);
127 	/*
128 	 * These ops are optional in an implementation.
129 	 */
130 	ulong_t (*cmio_getcr4)(cmi_hdl_impl_t *);
131 	void (*cmio_setcr4)(cmi_hdl_impl_t *, ulong_t);
132 	cmi_errno_t (*cmio_rdmsr)(cmi_hdl_impl_t *, uint_t, uint64_t *);
133 	cmi_errno_t (*cmio_wrmsr)(cmi_hdl_impl_t *, uint_t, uint64_t);
134 	cmi_errno_t (*cmio_msrinterpose)(cmi_hdl_impl_t *, uint_t, uint64_t);
135 	void (*cmio_int)(cmi_hdl_impl_t *, int);
136 	int (*cmio_online)(cmi_hdl_impl_t *, int, int *);
137 	uint16_t (*cmio_smbiosid) (cmi_hdl_impl_t *);
138 	uint_t (*cmio_smb_chipid)(cmi_hdl_impl_t *);
139 	nvlist_t *(*cmio_smb_bboard)(cmi_hdl_impl_t *);
140 };
141 
142 static const struct cmi_hdl_ops cmi_hdl_ops;
143 
144 /*
145  * Handles are looked up from contexts such as polling, injection etc
146  * where the context is reasonably well defined (although a poller could
147  * interrupt any old thread holding any old lock).  They are also looked
148  * up by machine check handlers, which may strike at inconvenient times
149  * such as during handle initialization or destruction or during handle
150  * lookup (which the #MC handler itself will also have to perform).
151  *
152  * So keeping handles in a linked list makes locking difficult when we
153  * consider #MC handlers.  Our solution is to have a look-up table indexed
154  * by that which uniquely identifies a handle - chip/core/strand id -
155  * with each entry a structure including a pointer to a handle
156  * structure for the resource, and a reference count for the handle.
157  * Reference counts are modified atomically.  The public cmi_hdl_hold
158  * always succeeds because this can only be used after handle creation
159  * and before the call to destruct, so the hold count is already at least one.
160  * In other functions that lookup a handle (cmi_hdl_lookup, cmi_hdl_any)
161  * we must be certain that the count has not already decrmented to zero
162  * before applying our hold.
163  *
164  * The table is an array of maximum number of chips defined in
165  * CMI_CHIPID_ARR_SZ indexed by the chip id. If the chip is not present, the
166  * entry is NULL. Each entry is a pointer to another array which contains a
167  * list of all strands of the chip. This first level table is allocated when
168  * first we want to populate an entry. The size of the latter (per chip) table
169  * is CMI_MAX_STRANDS_PER_CHIP and it is populated when one of its cpus starts.
170  *
171  * Ideally we should only allocate to the actual number of chips, cores per
172  * chip and strand per core. The number of chips is not available until all
173  * of them are passed. The number of cores and strands are partially available.
174  * For now we stick with the above approach.
175  */
176 #define	CMI_MAX_CHIPID_NBITS		6	/* max chipid of 63 */
177 #define	CMI_MAX_CORES_PER_CHIP_NBITS	4	/* 16 cores per chip max */
178 #define	CMI_MAX_STRANDS_PER_CORE_NBITS	3	/* 8 strands per core max */
179 
180 #define	CMI_MAX_CHIPID			((1 << (CMI_MAX_CHIPID_NBITS)) - 1)
181 #define	CMI_MAX_CORES_PER_CHIP		(1 << CMI_MAX_CORES_PER_CHIP_NBITS)
182 #define	CMI_MAX_STRANDS_PER_CORE	(1 << CMI_MAX_STRANDS_PER_CORE_NBITS)
183 #define	CMI_MAX_STRANDS_PER_CHIP	(CMI_MAX_CORES_PER_CHIP * \
184 					    CMI_MAX_STRANDS_PER_CORE)
185 
186 /*
187  * Handle array indexing within a per-chip table
188  *	[6:3] = Core in package,
189  *	[2:0] = Strand in core,
190  */
191 #define	CMI_HDL_ARR_IDX_CORE(coreid) \
192 	(((coreid) & (CMI_MAX_CORES_PER_CHIP - 1)) << \
193 	CMI_MAX_STRANDS_PER_CORE_NBITS)
194 
195 #define	CMI_HDL_ARR_IDX_STRAND(strandid) \
196 	(((strandid) & (CMI_MAX_STRANDS_PER_CORE - 1)))
197 
198 #define	CMI_HDL_ARR_IDX(coreid, strandid) \
199 	(CMI_HDL_ARR_IDX_CORE(coreid) | CMI_HDL_ARR_IDX_STRAND(strandid))
200 
201 #define	CMI_CHIPID_ARR_SZ		(1 << CMI_MAX_CHIPID_NBITS)
202 
203 typedef struct cmi_hdl_ent {
204 	volatile uint32_t cmae_refcnt;
205 	cmi_hdl_impl_t *cmae_hdlp;
206 } cmi_hdl_ent_t;
207 
208 static cmi_hdl_ent_t *cmi_chip_tab[CMI_CHIPID_ARR_SZ];
209 
210 /*
211  * Controls where we will source PCI config space data.
212  */
213 #define	CMI_PCICFG_FLAG_RD_HWOK		0x0001
214 #define	CMI_PCICFG_FLAG_RD_INTERPOSEOK	0X0002
215 #define	CMI_PCICFG_FLAG_WR_HWOK		0x0004
216 #define	CMI_PCICFG_FLAG_WR_INTERPOSEOK	0X0008
217 
218 static uint64_t cmi_pcicfg_flags =
219     CMI_PCICFG_FLAG_RD_HWOK | CMI_PCICFG_FLAG_RD_INTERPOSEOK |
220     CMI_PCICFG_FLAG_WR_HWOK | CMI_PCICFG_FLAG_WR_INTERPOSEOK;
221 
222 /*
223  * The flags for individual cpus are kept in their per-cpu handle cmih_msrsrc
224  */
225 #define	CMI_MSR_FLAG_RD_HWOK		0x0001
226 #define	CMI_MSR_FLAG_RD_INTERPOSEOK	0x0002
227 #define	CMI_MSR_FLAG_WR_HWOK		0x0004
228 #define	CMI_MSR_FLAG_WR_INTERPOSEOK	0x0008
229 
230 int cmi_call_func_ntv_tries = 3;
231 
232 static cmi_errno_t
233 call_func_ntv(int cpuid, xc_func_t func, xc_arg_t arg1, xc_arg_t arg2)
234 {
235 	cmi_errno_t rc = -1;
236 	int i;
237 
238 	kpreempt_disable();
239 
240 	if (CPU->cpu_id == cpuid) {
241 		(*func)(arg1, arg2, (xc_arg_t)&rc);
242 	} else {
243 		/*
244 		 * This should not happen for a #MC trap or a poll, so
245 		 * this is likely an error injection or similar.
246 		 * We will try to cross call with xc_trycall - we
247 		 * can't guarantee success with xc_call because
248 		 * the interrupt code in the case of a #MC may
249 		 * already hold the xc mutex.
250 		 */
251 		for (i = 0; i < cmi_call_func_ntv_tries; i++) {
252 			cpuset_t cpus;
253 
254 			CPUSET_ONLY(cpus, cpuid);
255 			xc_priority(arg1, arg2, (xc_arg_t)&rc,
256 			    CPUSET2BV(cpus), func);
257 			if (rc != -1)
258 				break;
259 
260 			DELAY(1);
261 		}
262 	}
263 
264 	kpreempt_enable();
265 
266 	return (rc != -1 ? rc : CMIERR_DEADLOCK);
267 }
268 
269 static uint64_t injcnt;
270 
271 void
272 cmi_hdl_inj_begin(cmi_hdl_t ophdl)
273 {
274 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
275 
276 	if (hdl != NULL)
277 		hdl->cmih_flags |= CMIH_F_INJACTV;
278 	if (injcnt++ == 0) {
279 		cmn_err(CE_NOTE, "Hardware error injection/simulation "
280 		    "activity noted");
281 	}
282 }
283 
284 void
285 cmi_hdl_inj_end(cmi_hdl_t ophdl)
286 {
287 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
288 
289 	ASSERT(hdl == NULL || hdl->cmih_flags & CMIH_F_INJACTV);
290 	if (hdl != NULL)
291 		hdl->cmih_flags &= ~CMIH_F_INJACTV;
292 }
293 
294 boolean_t
295 cmi_inj_tainted(void)
296 {
297 	return (injcnt != 0 ? B_TRUE : B_FALSE);
298 }
299 
300 /*
301  *	 =======================================================
302  *	|	MSR Interposition				|
303  *	|	-----------------				|
304  *	|							|
305  *	 -------------------------------------------------------
306  */
307 
308 #define	CMI_MSRI_HASHSZ		16
309 #define	CMI_MSRI_HASHIDX(hdl, msr) \
310 	(((uintptr_t)(hdl) >> 3 + (msr)) % (CMI_MSRI_HASHSZ - 1))
311 
312 struct cmi_msri_bkt {
313 	kmutex_t msrib_lock;
314 	struct cmi_msri_hashent *msrib_head;
315 };
316 
317 struct cmi_msri_hashent {
318 	struct cmi_msri_hashent *msrie_next;
319 	struct cmi_msri_hashent *msrie_prev;
320 	cmi_hdl_impl_t *msrie_hdl;
321 	uint_t msrie_msrnum;
322 	uint64_t msrie_msrval;
323 };
324 
325 #define	CMI_MSRI_MATCH(ent, hdl, req_msr) \
326 	((ent)->msrie_hdl == (hdl) && (ent)->msrie_msrnum == (req_msr))
327 
328 static struct cmi_msri_bkt msrihash[CMI_MSRI_HASHSZ];
329 
330 static void
331 msri_addent(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
332 {
333 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
334 	struct cmi_msri_bkt *hbp = &msrihash[idx];
335 	struct cmi_msri_hashent *hep;
336 
337 	mutex_enter(&hbp->msrib_lock);
338 
339 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
340 		if (CMI_MSRI_MATCH(hep, hdl, msr))
341 			break;
342 	}
343 
344 	if (hep != NULL) {
345 		hep->msrie_msrval = val;
346 	} else {
347 		hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
348 		hep->msrie_hdl = hdl;
349 		hep->msrie_msrnum = msr;
350 		hep->msrie_msrval = val;
351 
352 		if (hbp->msrib_head != NULL)
353 			hbp->msrib_head->msrie_prev = hep;
354 		hep->msrie_next = hbp->msrib_head;
355 		hep->msrie_prev = NULL;
356 		hbp->msrib_head = hep;
357 	}
358 
359 	mutex_exit(&hbp->msrib_lock);
360 }
361 
362 /*
363  * Look for a match for the given hanlde and msr.  Return 1 with valp
364  * filled if a match is found, otherwise return 0 with valp untouched.
365  */
366 static int
367 msri_lookup(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
368 {
369 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
370 	struct cmi_msri_bkt *hbp = &msrihash[idx];
371 	struct cmi_msri_hashent *hep;
372 
373 	/*
374 	 * This function is called during #MC trap handling, so we should
375 	 * consider the possibility that the hash mutex is held by the
376 	 * interrupted thread.  This should not happen because interposition
377 	 * is an artificial injection mechanism and the #MC is requested
378 	 * after adding entries, but just in case of a real #MC at an
379 	 * unlucky moment we'll use mutex_tryenter here.
380 	 */
381 	if (!mutex_tryenter(&hbp->msrib_lock))
382 		return (0);
383 
384 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
385 		if (CMI_MSRI_MATCH(hep, hdl, msr)) {
386 			*valp = hep->msrie_msrval;
387 			break;
388 		}
389 	}
390 
391 	mutex_exit(&hbp->msrib_lock);
392 
393 	return (hep != NULL);
394 }
395 
396 /*
397  * Remove any interposed value that matches.
398  */
399 static void
400 msri_rment(cmi_hdl_impl_t *hdl, uint_t msr)
401 {
402 
403 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
404 	struct cmi_msri_bkt *hbp = &msrihash[idx];
405 	struct cmi_msri_hashent *hep;
406 
407 	if (!mutex_tryenter(&hbp->msrib_lock))
408 		return;
409 
410 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
411 		if (CMI_MSRI_MATCH(hep, hdl, msr)) {
412 			if (hep->msrie_prev != NULL)
413 				hep->msrie_prev->msrie_next = hep->msrie_next;
414 
415 			if (hep->msrie_next != NULL)
416 				hep->msrie_next->msrie_prev = hep->msrie_prev;
417 
418 			if (hbp->msrib_head == hep)
419 				hbp->msrib_head = hep->msrie_next;
420 
421 			kmem_free(hep, sizeof (*hep));
422 			break;
423 		}
424 	}
425 
426 	mutex_exit(&hbp->msrib_lock);
427 }
428 
429 /*
430  *	 =======================================================
431  *	|	PCI Config Space Interposition			|
432  *	|	------------------------------			|
433  *	|							|
434  *	 -------------------------------------------------------
435  */
436 
437 /*
438  * Hash for interposed PCI config space values.  We lookup on bus/dev/fun/offset
439  * and then record whether the value stashed was made with a byte, word or
440  * doubleword access;  we will only return a hit for an access of the
441  * same size.  If you access say a 32-bit register using byte accesses
442  * and then attempt to read the full 32-bit value back you will not obtain
443  * any sort of merged result - you get a lookup miss.
444  */
445 
446 #define	CMI_PCII_HASHSZ		16
447 #define	CMI_PCII_HASHIDX(b, d, f, o) \
448 	(((b) + (d) + (f) + (o)) % (CMI_PCII_HASHSZ - 1))
449 
450 struct cmi_pcii_bkt {
451 	kmutex_t pciib_lock;
452 	struct cmi_pcii_hashent *pciib_head;
453 };
454 
455 struct cmi_pcii_hashent {
456 	struct cmi_pcii_hashent *pcii_next;
457 	struct cmi_pcii_hashent *pcii_prev;
458 	int pcii_bus;
459 	int pcii_dev;
460 	int pcii_func;
461 	int pcii_reg;
462 	int pcii_asize;
463 	uint32_t pcii_val;
464 };
465 
466 #define	CMI_PCII_MATCH(ent, b, d, f, r, asz) \
467 	((ent)->pcii_bus == (b) && (ent)->pcii_dev == (d) && \
468 	(ent)->pcii_func == (f) && (ent)->pcii_reg == (r) && \
469 	(ent)->pcii_asize == (asz))
470 
471 static struct cmi_pcii_bkt pciihash[CMI_PCII_HASHSZ];
472 
473 
474 /*
475  * Add a new entry to the PCI interpose hash, overwriting any existing
476  * entry that is found.
477  */
478 static void
479 pcii_addent(int bus, int dev, int func, int reg, uint32_t val, int asz)
480 {
481 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
482 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
483 	struct cmi_pcii_hashent *hep;
484 
485 	cmi_hdl_inj_begin(NULL);
486 
487 	mutex_enter(&hbp->pciib_lock);
488 
489 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
490 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz))
491 			break;
492 	}
493 
494 	if (hep != NULL) {
495 		hep->pcii_val = val;
496 	} else {
497 		hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
498 		hep->pcii_bus = bus;
499 		hep->pcii_dev = dev;
500 		hep->pcii_func = func;
501 		hep->pcii_reg = reg;
502 		hep->pcii_asize = asz;
503 		hep->pcii_val = val;
504 
505 		if (hbp->pciib_head != NULL)
506 			hbp->pciib_head->pcii_prev = hep;
507 		hep->pcii_next = hbp->pciib_head;
508 		hep->pcii_prev = NULL;
509 		hbp->pciib_head = hep;
510 	}
511 
512 	mutex_exit(&hbp->pciib_lock);
513 
514 	cmi_hdl_inj_end(NULL);
515 }
516 
517 /*
518  * Look for a match for the given bus/dev/func/reg; return 1 with valp
519  * filled if a match is found, otherwise return 0 with valp untouched.
520  */
521 static int
522 pcii_lookup(int bus, int dev, int func, int reg, int asz, uint32_t *valp)
523 {
524 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
525 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
526 	struct cmi_pcii_hashent *hep;
527 
528 	if (!mutex_tryenter(&hbp->pciib_lock))
529 		return (0);
530 
531 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
532 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
533 			*valp = hep->pcii_val;
534 			break;
535 		}
536 	}
537 
538 	mutex_exit(&hbp->pciib_lock);
539 
540 	return (hep != NULL);
541 }
542 
543 static void
544 pcii_rment(int bus, int dev, int func, int reg, int asz)
545 {
546 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
547 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
548 	struct cmi_pcii_hashent *hep;
549 
550 	mutex_enter(&hbp->pciib_lock);
551 
552 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
553 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
554 			if (hep->pcii_prev != NULL)
555 				hep->pcii_prev->pcii_next = hep->pcii_next;
556 
557 			if (hep->pcii_next != NULL)
558 				hep->pcii_next->pcii_prev = hep->pcii_prev;
559 
560 			if (hbp->pciib_head == hep)
561 				hbp->pciib_head = hep->pcii_next;
562 
563 			kmem_free(hep, sizeof (*hep));
564 			break;
565 		}
566 	}
567 
568 	mutex_exit(&hbp->pciib_lock);
569 }
570 
571 #ifndef __xpv
572 
573 /*
574  *	 =======================================================
575  *	|	Native methods					|
576  *	|	--------------					|
577  *	|							|
578  *	| These are used when we are running native on bare-	|
579  *	| metal, or simply don't know any better.		|
580  *	---------------------------------------------------------
581  */
582 
583 #define	HDLPRIV(hdl)	((cpu_t *)(hdl)->cmih_hdlpriv)
584 
585 static uint_t
586 ntv_vendor(cmi_hdl_impl_t *hdl)
587 {
588 	return (cpuid_getvendor(HDLPRIV(hdl)));
589 }
590 
591 static const char *
592 ntv_vendorstr(cmi_hdl_impl_t *hdl)
593 {
594 	return (cpuid_getvendorstr(HDLPRIV(hdl)));
595 }
596 
597 static uint_t
598 ntv_family(cmi_hdl_impl_t *hdl)
599 {
600 	return (cpuid_getfamily(HDLPRIV(hdl)));
601 }
602 
603 static uint_t
604 ntv_model(cmi_hdl_impl_t *hdl)
605 {
606 	return (cpuid_getmodel(HDLPRIV(hdl)));
607 }
608 
609 static uint_t
610 ntv_stepping(cmi_hdl_impl_t *hdl)
611 {
612 	return (cpuid_getstep(HDLPRIV(hdl)));
613 }
614 
615 static uint_t
616 ntv_chipid(cmi_hdl_impl_t *hdl)
617 {
618 	return (hdl->cmih_chipid);
619 
620 }
621 
622 static uint_t
623 ntv_procnodeid(cmi_hdl_impl_t *hdl)
624 {
625 	return (hdl->cmih_procnodeid);
626 }
627 
628 static uint_t
629 ntv_procnodes_per_pkg(cmi_hdl_impl_t *hdl)
630 {
631 	return (hdl->cmih_procnodes_per_pkg);
632 }
633 
634 static uint_t
635 ntv_coreid(cmi_hdl_impl_t *hdl)
636 {
637 	return (hdl->cmih_coreid);
638 }
639 
640 static uint_t
641 ntv_strandid(cmi_hdl_impl_t *hdl)
642 {
643 	return (hdl->cmih_strandid);
644 }
645 
646 static uint_t
647 ntv_strand_apicid(cmi_hdl_impl_t *hdl)
648 {
649 	return (cpuid_get_apicid(HDLPRIV(hdl)));
650 }
651 
652 static uint16_t
653 ntv_smbiosid(cmi_hdl_impl_t *hdl)
654 {
655 	return (hdl->cmih_smbiosid);
656 }
657 
658 static uint_t
659 ntv_smb_chipid(cmi_hdl_impl_t *hdl)
660 {
661 	return (hdl->cmih_smb_chipid);
662 }
663 
664 static nvlist_t *
665 ntv_smb_bboard(cmi_hdl_impl_t *hdl)
666 {
667 	return (hdl->cmih_smb_bboard);
668 }
669 
670 static uint32_t
671 ntv_chiprev(cmi_hdl_impl_t *hdl)
672 {
673 	return (cpuid_getchiprev(HDLPRIV(hdl)));
674 }
675 
676 static const char *
677 ntv_chiprevstr(cmi_hdl_impl_t *hdl)
678 {
679 	return (cpuid_getchiprevstr(HDLPRIV(hdl)));
680 }
681 
682 static uint32_t
683 ntv_getsockettype(cmi_hdl_impl_t *hdl)
684 {
685 	return (cpuid_getsockettype(HDLPRIV(hdl)));
686 }
687 
688 static const char *
689 ntv_getsocketstr(cmi_hdl_impl_t *hdl)
690 {
691 	return (cpuid_getsocketstr(HDLPRIV(hdl)));
692 }
693 
694 static id_t
695 ntv_logical_id(cmi_hdl_impl_t *hdl)
696 {
697 	return (HDLPRIV(hdl)->cpu_id);
698 }
699 
700 /*ARGSUSED*/
701 static int
702 ntv_getcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
703 {
704 	ulong_t *dest = (ulong_t *)arg1;
705 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
706 
707 	*dest = getcr4();
708 	*rcp = CMI_SUCCESS;
709 
710 	return (0);
711 }
712 
713 static ulong_t
714 ntv_getcr4(cmi_hdl_impl_t *hdl)
715 {
716 	cpu_t *cp = HDLPRIV(hdl);
717 	ulong_t val;
718 
719 	(void) call_func_ntv(cp->cpu_id, ntv_getcr4_xc, (xc_arg_t)&val, NULL);
720 
721 	return (val);
722 }
723 
724 /*ARGSUSED*/
725 static int
726 ntv_setcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
727 {
728 	ulong_t val = (ulong_t)arg1;
729 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
730 
731 	setcr4(val);
732 	*rcp = CMI_SUCCESS;
733 
734 	return (0);
735 }
736 
737 static void
738 ntv_setcr4(cmi_hdl_impl_t *hdl, ulong_t val)
739 {
740 	cpu_t *cp = HDLPRIV(hdl);
741 
742 	(void) call_func_ntv(cp->cpu_id, ntv_setcr4_xc, (xc_arg_t)val, NULL);
743 }
744 
745 volatile uint32_t cmi_trapped_rdmsr;
746 
747 /*ARGSUSED*/
748 static int
749 ntv_rdmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
750 {
751 	uint_t msr = (uint_t)arg1;
752 	uint64_t *valp = (uint64_t *)arg2;
753 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
754 
755 	on_trap_data_t otd;
756 
757 	if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
758 		if (checked_rdmsr(msr, valp) == 0)
759 			*rcp = CMI_SUCCESS;
760 		else
761 			*rcp = CMIERR_NOTSUP;
762 	} else {
763 		*rcp = CMIERR_MSRGPF;
764 		atomic_inc_32(&cmi_trapped_rdmsr);
765 	}
766 	no_trap();
767 
768 	return (0);
769 }
770 
771 static cmi_errno_t
772 ntv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
773 {
774 	cpu_t *cp = HDLPRIV(hdl);
775 
776 	if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_HWOK))
777 		return (CMIERR_INTERPOSE);
778 
779 	return (call_func_ntv(cp->cpu_id, ntv_rdmsr_xc,
780 	    (xc_arg_t)msr, (xc_arg_t)valp));
781 }
782 
783 volatile uint32_t cmi_trapped_wrmsr;
784 
785 /*ARGSUSED*/
786 static int
787 ntv_wrmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
788 {
789 	uint_t msr = (uint_t)arg1;
790 	uint64_t val = *((uint64_t *)arg2);
791 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
792 	on_trap_data_t otd;
793 
794 	if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
795 		if (checked_wrmsr(msr, val) == 0)
796 			*rcp = CMI_SUCCESS;
797 		else
798 			*rcp = CMIERR_NOTSUP;
799 	} else {
800 		*rcp = CMIERR_MSRGPF;
801 		atomic_inc_32(&cmi_trapped_wrmsr);
802 	}
803 	no_trap();
804 
805 	return (0);
806 
807 }
808 
809 static cmi_errno_t
810 ntv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
811 {
812 	cpu_t *cp = HDLPRIV(hdl);
813 
814 	if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_WR_HWOK))
815 		return (CMI_SUCCESS);
816 
817 	return (call_func_ntv(cp->cpu_id, ntv_wrmsr_xc,
818 	    (xc_arg_t)msr, (xc_arg_t)&val));
819 }
820 
821 static cmi_errno_t
822 ntv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
823 {
824 	msri_addent(hdl, msr, val);
825 	return (CMI_SUCCESS);
826 }
827 
828 /*ARGSUSED*/
829 static int
830 ntv_int_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
831 {
832 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
833 	int int_no = (int)arg1;
834 
835 	if (int_no == T_MCE)
836 		int18();
837 	else
838 		int_cmci();
839 	*rcp = CMI_SUCCESS;
840 
841 	return (0);
842 }
843 
844 static void
845 ntv_int(cmi_hdl_impl_t *hdl, int int_no)
846 {
847 	cpu_t *cp = HDLPRIV(hdl);
848 
849 	(void) call_func_ntv(cp->cpu_id, ntv_int_xc, (xc_arg_t)int_no, NULL);
850 }
851 
852 static int
853 ntv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status)
854 {
855 	int rc;
856 	processorid_t cpuid = HDLPRIV(hdl)->cpu_id;
857 
858 	while (mutex_tryenter(&cpu_lock) == 0) {
859 		if (hdl->cmih_flags & CMIH_F_DEAD)
860 			return (EBUSY);
861 		delay(1);
862 	}
863 	rc = p_online_internal_locked(cpuid, new_status, old_status);
864 	mutex_exit(&cpu_lock);
865 
866 	return (rc);
867 }
868 
869 #else	/* __xpv */
870 
871 /*
872  *	 =======================================================
873  *	|	xVM dom0 methods				|
874  *	|	----------------				|
875  *	|							|
876  *	| These are used when we are running as dom0 in		|
877  *	| a Solaris xVM context.				|
878  *	---------------------------------------------------------
879  */
880 
881 #define	HDLPRIV(hdl)	((xen_mc_lcpu_cookie_t)(hdl)->cmih_hdlpriv)
882 
883 extern uint_t _cpuid_vendorstr_to_vendorcode(char *);
884 
885 
886 static uint_t
887 xpv_vendor(cmi_hdl_impl_t *hdl)
888 {
889 	return (_cpuid_vendorstr_to_vendorcode((char *)xen_physcpu_vendorstr(
890 	    HDLPRIV(hdl))));
891 }
892 
893 static const char *
894 xpv_vendorstr(cmi_hdl_impl_t *hdl)
895 {
896 	return (xen_physcpu_vendorstr(HDLPRIV(hdl)));
897 }
898 
899 static uint_t
900 xpv_family(cmi_hdl_impl_t *hdl)
901 {
902 	return (xen_physcpu_family(HDLPRIV(hdl)));
903 }
904 
905 static uint_t
906 xpv_model(cmi_hdl_impl_t *hdl)
907 {
908 	return (xen_physcpu_model(HDLPRIV(hdl)));
909 }
910 
911 static uint_t
912 xpv_stepping(cmi_hdl_impl_t *hdl)
913 {
914 	return (xen_physcpu_stepping(HDLPRIV(hdl)));
915 }
916 
917 static uint_t
918 xpv_chipid(cmi_hdl_impl_t *hdl)
919 {
920 	return (hdl->cmih_chipid);
921 }
922 
923 static uint_t
924 xpv_procnodeid(cmi_hdl_impl_t *hdl)
925 {
926 	return (hdl->cmih_procnodeid);
927 }
928 
929 static uint_t
930 xpv_procnodes_per_pkg(cmi_hdl_impl_t *hdl)
931 {
932 	return (hdl->cmih_procnodes_per_pkg);
933 }
934 
935 static uint_t
936 xpv_coreid(cmi_hdl_impl_t *hdl)
937 {
938 	return (hdl->cmih_coreid);
939 }
940 
941 static uint_t
942 xpv_strandid(cmi_hdl_impl_t *hdl)
943 {
944 	return (hdl->cmih_strandid);
945 }
946 
947 static uint_t
948 xpv_strand_apicid(cmi_hdl_impl_t *hdl)
949 {
950 	return (xen_physcpu_initial_apicid(HDLPRIV(hdl)));
951 }
952 
953 static uint16_t
954 xpv_smbiosid(cmi_hdl_impl_t *hdl)
955 {
956 	return (hdl->cmih_smbiosid);
957 }
958 
959 static uint_t
960 xpv_smb_chipid(cmi_hdl_impl_t *hdl)
961 {
962 	return (hdl->cmih_smb_chipid);
963 }
964 
965 static nvlist_t *
966 xpv_smb_bboard(cmi_hdl_impl_t *hdl)
967 {
968 	return (hdl->cmih_smb_bboard);
969 }
970 
971 extern uint32_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t);
972 
973 static uint32_t
974 xpv_chiprev(cmi_hdl_impl_t *hdl)
975 {
976 	return (_cpuid_chiprev(xpv_vendor(hdl), xpv_family(hdl),
977 	    xpv_model(hdl), xpv_stepping(hdl)));
978 }
979 
980 extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t);
981 
982 static const char *
983 xpv_chiprevstr(cmi_hdl_impl_t *hdl)
984 {
985 	return (_cpuid_chiprevstr(xpv_vendor(hdl), xpv_family(hdl),
986 	    xpv_model(hdl), xpv_stepping(hdl)));
987 }
988 
989 extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t);
990 
991 static uint32_t
992 xpv_getsockettype(cmi_hdl_impl_t *hdl)
993 {
994 	return (_cpuid_skt(xpv_vendor(hdl), xpv_family(hdl),
995 	    xpv_model(hdl), xpv_stepping(hdl)));
996 }
997 
998 extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t);
999 
1000 static const char *
1001 xpv_getsocketstr(cmi_hdl_impl_t *hdl)
1002 {
1003 	return (_cpuid_sktstr(xpv_vendor(hdl), xpv_family(hdl),
1004 	    xpv_model(hdl), xpv_stepping(hdl)));
1005 }
1006 
1007 static id_t
1008 xpv_logical_id(cmi_hdl_impl_t *hdl)
1009 {
1010 	return (xen_physcpu_logical_id(HDLPRIV(hdl)));
1011 }
1012 
1013 static cmi_errno_t
1014 xpv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
1015 {
1016 	switch (msr) {
1017 	case IA32_MSR_MCG_CAP:
1018 		*valp = xen_physcpu_mcg_cap(HDLPRIV(hdl));
1019 		break;
1020 
1021 	default:
1022 		return (CMIERR_NOTSUP);
1023 	}
1024 
1025 	return (CMI_SUCCESS);
1026 }
1027 
1028 /*
1029  * Request the hypervisor to write an MSR for us.  The hypervisor
1030  * will only accept MCA-related MSRs, as this is for MCA error
1031  * simulation purposes alone.  We will pre-screen MSRs for injection
1032  * so we don't bother the HV with bogus requests.  We will permit
1033  * injection to any MCA bank register, and to MCG_STATUS.
1034  */
1035 
1036 #define	IS_MCA_INJ_MSR(msr) \
1037 	(((msr) >= IA32_MSR_MC(0, CTL) && (msr) <= IA32_MSR_MC(10, MISC)) || \
1038 	(msr) == IA32_MSR_MCG_STATUS)
1039 
1040 static cmi_errno_t
1041 xpv_wrmsr_cmn(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val, boolean_t intpose)
1042 {
1043 	xen_mc_t xmc;
1044 	struct xen_mc_msrinject *mci = &xmc.u.mc_msrinject;
1045 
1046 	if (!(hdl->cmih_flags & CMIH_F_INJACTV))
1047 		return (CMIERR_NOTSUP);		/* for injection use only! */
1048 
1049 	if (!IS_MCA_INJ_MSR(msr))
1050 		return (CMIERR_API);
1051 
1052 	if (panicstr)
1053 		return (CMIERR_DEADLOCK);
1054 
1055 	mci->mcinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl));
1056 	mci->mcinj_flags = intpose ? MC_MSRINJ_F_INTERPOSE : 0;
1057 	mci->mcinj_count = 1;	/* learn to batch sometime */
1058 	mci->mcinj_msr[0].reg = msr;
1059 	mci->mcinj_msr[0].value = val;
1060 
1061 	return (HYPERVISOR_mca(XEN_MC_msrinject, &xmc) ==
1062 	    0 ?  CMI_SUCCESS : CMIERR_NOTSUP);
1063 }
1064 
1065 static cmi_errno_t
1066 xpv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
1067 {
1068 	return (xpv_wrmsr_cmn(hdl, msr, val, B_FALSE));
1069 }
1070 
1071 
1072 static cmi_errno_t
1073 xpv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
1074 {
1075 	return (xpv_wrmsr_cmn(hdl, msr, val, B_TRUE));
1076 }
1077 
1078 static void
1079 xpv_int(cmi_hdl_impl_t *hdl, int int_no)
1080 {
1081 	xen_mc_t xmc;
1082 	struct xen_mc_mceinject *mce = &xmc.u.mc_mceinject;
1083 
1084 	if (!(hdl->cmih_flags & CMIH_F_INJACTV))
1085 		return;
1086 
1087 	if (int_no != T_MCE) {
1088 		cmn_err(CE_WARN, "xpv_int: int_no %d unimplemented\n",
1089 		    int_no);
1090 	}
1091 
1092 	mce->mceinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl));
1093 
1094 	(void) HYPERVISOR_mca(XEN_MC_mceinject, &xmc);
1095 }
1096 
1097 static int
1098 xpv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status)
1099 {
1100 	xen_sysctl_t xs;
1101 	int op, rc, status;
1102 
1103 	new_status &= ~P_FORCED;
1104 
1105 	switch (new_status) {
1106 	case P_STATUS:
1107 		op = XEN_SYSCTL_CPU_HOTPLUG_STATUS;
1108 		break;
1109 	case P_FAULTED:
1110 	case P_OFFLINE:
1111 		op = XEN_SYSCTL_CPU_HOTPLUG_OFFLINE;
1112 		break;
1113 	case P_ONLINE:
1114 		op = XEN_SYSCTL_CPU_HOTPLUG_ONLINE;
1115 		break;
1116 	default:
1117 		return (-1);
1118 	}
1119 
1120 	xs.cmd = XEN_SYSCTL_cpu_hotplug;
1121 	xs.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
1122 	xs.u.cpu_hotplug.cpu = xen_physcpu_logical_id(HDLPRIV(hdl));
1123 	xs.u.cpu_hotplug.op = op;
1124 
1125 	if ((rc = HYPERVISOR_sysctl(&xs)) >= 0) {
1126 		status = rc;
1127 		rc = 0;
1128 		switch (status) {
1129 		case XEN_CPU_HOTPLUG_STATUS_NEW:
1130 			*old_status = P_OFFLINE;
1131 			break;
1132 		case XEN_CPU_HOTPLUG_STATUS_OFFLINE:
1133 			*old_status = P_FAULTED;
1134 			break;
1135 		case XEN_CPU_HOTPLUG_STATUS_ONLINE:
1136 			*old_status = P_ONLINE;
1137 			break;
1138 		default:
1139 			return (-1);
1140 		}
1141 	}
1142 
1143 	return (-rc);
1144 }
1145 
1146 #endif
1147 
1148 /*ARGSUSED*/
1149 static void *
1150 cpu_search(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1151     uint_t strandid)
1152 {
1153 #ifdef __xpv
1154 	xen_mc_lcpu_cookie_t cpi;
1155 
1156 	for (cpi = xen_physcpu_next(NULL); cpi != NULL;
1157 	    cpi = xen_physcpu_next(cpi)) {
1158 		if (xen_physcpu_chipid(cpi) == chipid &&
1159 		    xen_physcpu_coreid(cpi) == coreid &&
1160 		    xen_physcpu_strandid(cpi) == strandid)
1161 			return ((void *)cpi);
1162 	}
1163 	return (NULL);
1164 
1165 #else	/* __xpv */
1166 
1167 	cpu_t *cp, *startcp;
1168 
1169 	kpreempt_disable();
1170 	cp = startcp = CPU;
1171 	do {
1172 		if (cmi_ntv_hwchipid(cp) == chipid &&
1173 		    cmi_ntv_hwcoreid(cp) == coreid &&
1174 		    cmi_ntv_hwstrandid(cp) == strandid) {
1175 			kpreempt_enable();
1176 			return ((void *)cp);
1177 		}
1178 
1179 		cp = cp->cpu_next;
1180 	} while (cp != startcp);
1181 	kpreempt_enable();
1182 	return (NULL);
1183 #endif	/* __ xpv */
1184 }
1185 
1186 static boolean_t
1187 cpu_is_cmt(void *priv)
1188 {
1189 #ifdef __xpv
1190 	return (xen_physcpu_is_cmt((xen_mc_lcpu_cookie_t)priv));
1191 #else /* __xpv */
1192 	cpu_t *cp = (cpu_t *)priv;
1193 
1194 	int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1195 	    cpuid_get_ncore_per_chip(cp);
1196 
1197 	return (strands_per_core > 1);
1198 #endif /* __xpv */
1199 }
1200 
1201 /*
1202  * Find the handle entry of a given cpu identified by a <chip,core,strand>
1203  * tuple.
1204  */
1205 static cmi_hdl_ent_t *
1206 cmi_hdl_ent_lookup(uint_t chipid, uint_t coreid, uint_t strandid)
1207 {
1208 	/*
1209 	 * Allocate per-chip table which contains a list of handle of
1210 	 * all strands of the chip.
1211 	 */
1212 	if (cmi_chip_tab[chipid] == NULL) {
1213 		size_t sz;
1214 		cmi_hdl_ent_t *pg;
1215 
1216 		sz = CMI_MAX_STRANDS_PER_CHIP * sizeof (cmi_hdl_ent_t);
1217 		pg = kmem_zalloc(sz, KM_SLEEP);
1218 
1219 		/* test and set the per-chip table if it is not allocated */
1220 		if (atomic_cas_ptr(&cmi_chip_tab[chipid], NULL, pg) != NULL)
1221 			kmem_free(pg, sz); /* someone beat us */
1222 	}
1223 
1224 	return (cmi_chip_tab[chipid] + CMI_HDL_ARR_IDX(coreid, strandid));
1225 }
1226 
1227 cmi_hdl_t
1228 cmi_hdl_create(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1229     uint_t strandid)
1230 {
1231 	cmi_hdl_impl_t *hdl;
1232 	void *priv;
1233 	cmi_hdl_ent_t *ent;
1234 
1235 #ifdef __xpv
1236 	ASSERT(class == CMI_HDL_SOLARIS_xVM_MCA);
1237 #else
1238 	ASSERT(class == CMI_HDL_NATIVE);
1239 #endif
1240 
1241 	if (chipid > CMI_MAX_CHIPID ||
1242 	    coreid > CMI_MAX_CORES_PER_CHIP - 1 ||
1243 	    strandid > CMI_MAX_STRANDS_PER_CORE - 1)
1244 		return (NULL);
1245 
1246 	if ((priv = cpu_search(class, chipid, coreid, strandid)) == NULL)
1247 		return (NULL);
1248 
1249 	hdl = kmem_zalloc(sizeof (*hdl), KM_SLEEP);
1250 
1251 	hdl->cmih_class = class;
1252 	HDLOPS(hdl) = &cmi_hdl_ops;
1253 	hdl->cmih_chipid = chipid;
1254 	hdl->cmih_coreid = coreid;
1255 	hdl->cmih_strandid = strandid;
1256 	hdl->cmih_mstrand = cpu_is_cmt(priv);
1257 	hdl->cmih_hdlpriv = priv;
1258 #ifdef __xpv
1259 	hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_INTERPOSEOK |
1260 	    CMI_MSR_FLAG_WR_INTERPOSEOK;
1261 
1262 	/*
1263 	 * XXX: need hypervisor support for procnodeid, for now assume
1264 	 * single-node processors (procnodeid = chipid)
1265 	 */
1266 	hdl->cmih_procnodeid = xen_physcpu_chipid((xen_mc_lcpu_cookie_t)priv);
1267 	hdl->cmih_procnodes_per_pkg = 1;
1268 #else   /* __xpv */
1269 	hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_HWOK | CMI_MSR_FLAG_RD_INTERPOSEOK |
1270 	    CMI_MSR_FLAG_WR_HWOK | CMI_MSR_FLAG_WR_INTERPOSEOK;
1271 	hdl->cmih_procnodeid = cpuid_get_procnodeid((cpu_t *)priv);
1272 	hdl->cmih_procnodes_per_pkg =
1273 	    cpuid_get_procnodes_per_pkg((cpu_t *)priv);
1274 #endif  /* __xpv */
1275 
1276 	ent = cmi_hdl_ent_lookup(chipid, coreid, strandid);
1277 	if (ent->cmae_refcnt != 0 || ent->cmae_hdlp != NULL) {
1278 		/*
1279 		 * Somehow this (chipid, coreid, strandid) id tuple has
1280 		 * already been assigned!  This indicates that the
1281 		 * callers logic in determining these values is busted,
1282 		 * or perhaps undermined by bad BIOS setup.  Complain,
1283 		 * and refuse to initialize this tuple again as bad things
1284 		 * will happen.
1285 		 */
1286 		cmn_err(CE_NOTE, "cmi_hdl_create: chipid %d coreid %d "
1287 		    "strandid %d handle already allocated!",
1288 		    chipid, coreid, strandid);
1289 		kmem_free(hdl, sizeof (*hdl));
1290 		return (NULL);
1291 	}
1292 
1293 	/*
1294 	 * Once we store a nonzero reference count others can find this
1295 	 * handle via cmi_hdl_lookup etc.  This initial hold on the handle
1296 	 * is to be dropped only if some other part of cmi initialization
1297 	 * fails or, if it succeeds, at later cpu deconfigure.  Note the
1298 	 * the module private data we hold in cmih_cmi and cmih_cmidata
1299 	 * is still NULL at this point (the caller will fill it with
1300 	 * cmi_hdl_setcmi if it initializes) so consumers of handles
1301 	 * should always be ready for that possibility.
1302 	 */
1303 	ent->cmae_hdlp = hdl;
1304 	hdl->cmih_refcntp = &ent->cmae_refcnt;
1305 	ent->cmae_refcnt = 1;
1306 
1307 	return ((cmi_hdl_t)hdl);
1308 }
1309 
1310 void
1311 cmi_read_smbios(cmi_hdl_t ophdl)
1312 {
1313 
1314 	uint_t strand_apicid = UINT_MAX;
1315 	uint_t chip_inst = UINT_MAX;
1316 	uint16_t smb_id = USHRT_MAX;
1317 	int rc = 0;
1318 
1319 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1320 
1321 	/* set x86gentopo compatibility */
1322 	fm_smb_fmacompat();
1323 
1324 #ifndef __xpv
1325 	strand_apicid = ntv_strand_apicid(hdl);
1326 #else
1327 	strand_apicid = xpv_strand_apicid(hdl);
1328 #endif
1329 
1330 	if (!x86gentopo_legacy) {
1331 		/*
1332 		 * If fm_smb_chipinst() or fm_smb_bboard() fails,
1333 		 * topo reverts to legacy mode
1334 		 */
1335 		rc = fm_smb_chipinst(strand_apicid, &chip_inst, &smb_id);
1336 		if (rc == 0) {
1337 			hdl->cmih_smb_chipid = chip_inst;
1338 			hdl->cmih_smbiosid = smb_id;
1339 		} else {
1340 #ifdef DEBUG
1341 			cmn_err(CE_NOTE, "!cmi reads smbios chip info failed");
1342 #endif /* DEBUG */
1343 			return;
1344 		}
1345 
1346 		hdl->cmih_smb_bboard  = fm_smb_bboard(strand_apicid);
1347 #ifdef DEBUG
1348 		if (hdl->cmih_smb_bboard == NULL)
1349 			cmn_err(CE_NOTE,
1350 			    "!cmi reads smbios base boards info failed");
1351 #endif /* DEBUG */
1352 	}
1353 }
1354 
1355 void
1356 cmi_hdl_hold(cmi_hdl_t ophdl)
1357 {
1358 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1359 
1360 	ASSERT(*hdl->cmih_refcntp != 0); /* must not be the initial hold */
1361 
1362 	atomic_inc_32(hdl->cmih_refcntp);
1363 }
1364 
1365 static int
1366 cmi_hdl_canref(cmi_hdl_ent_t *ent)
1367 {
1368 	volatile uint32_t *refcntp;
1369 	uint32_t refcnt;
1370 
1371 	refcntp = &ent->cmae_refcnt;
1372 	refcnt = *refcntp;
1373 
1374 	if (refcnt == 0) {
1375 		/*
1376 		 * Associated object never existed, is being destroyed,
1377 		 * or has been destroyed.
1378 		 */
1379 		return (0);
1380 	}
1381 
1382 	/*
1383 	 * We cannot use atomic increment here because once the reference
1384 	 * count reaches zero it must never be bumped up again.
1385 	 */
1386 	while (refcnt != 0) {
1387 		if (atomic_cas_32(refcntp, refcnt, refcnt + 1) == refcnt)
1388 			return (1);
1389 		refcnt = *refcntp;
1390 	}
1391 
1392 	/*
1393 	 * Somebody dropped the reference count to 0 after our initial
1394 	 * check.
1395 	 */
1396 	return (0);
1397 }
1398 
1399 
1400 void
1401 cmi_hdl_rele(cmi_hdl_t ophdl)
1402 {
1403 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1404 
1405 	ASSERT(*hdl->cmih_refcntp > 0);
1406 	(void) atomic_dec_32_nv(hdl->cmih_refcntp);
1407 }
1408 
1409 void
1410 cmi_hdl_destroy(cmi_hdl_t ophdl)
1411 {
1412 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1413 	cmi_hdl_ent_t *ent;
1414 
1415 	/* Release the reference count held by cmi_hdl_create(). */
1416 	ASSERT(*hdl->cmih_refcntp > 0);
1417 	(void) atomic_dec_32_nv(hdl->cmih_refcntp);
1418 	hdl->cmih_flags |= CMIH_F_DEAD;
1419 
1420 	ent = cmi_hdl_ent_lookup(hdl->cmih_chipid, hdl->cmih_coreid,
1421 	    hdl->cmih_strandid);
1422 	/*
1423 	 * Use busy polling instead of condition variable here because
1424 	 * cmi_hdl_rele() may be called from #MC handler.
1425 	 */
1426 	while (cmi_hdl_canref(ent)) {
1427 		cmi_hdl_rele(ophdl);
1428 		delay(1);
1429 	}
1430 	ent->cmae_hdlp = NULL;
1431 
1432 	kmem_free(hdl, sizeof (*hdl));
1433 }
1434 
1435 void
1436 cmi_hdl_setspecific(cmi_hdl_t ophdl, void *arg)
1437 {
1438 	IMPLHDL(ophdl)->cmih_spec = arg;
1439 }
1440 
1441 void *
1442 cmi_hdl_getspecific(cmi_hdl_t ophdl)
1443 {
1444 	return (IMPLHDL(ophdl)->cmih_spec);
1445 }
1446 
1447 void
1448 cmi_hdl_setmc(cmi_hdl_t ophdl, const struct cmi_mc_ops *mcops, void *mcdata)
1449 {
1450 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1451 
1452 	ASSERT(hdl->cmih_mcops == NULL && hdl->cmih_mcdata == NULL);
1453 	hdl->cmih_mcops = mcops;
1454 	hdl->cmih_mcdata = mcdata;
1455 }
1456 
1457 const struct cmi_mc_ops *
1458 cmi_hdl_getmcops(cmi_hdl_t ophdl)
1459 {
1460 	return (IMPLHDL(ophdl)->cmih_mcops);
1461 }
1462 
1463 void *
1464 cmi_hdl_getmcdata(cmi_hdl_t ophdl)
1465 {
1466 	return (IMPLHDL(ophdl)->cmih_mcdata);
1467 }
1468 
1469 cmi_hdl_t
1470 cmi_hdl_lookup(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1471     uint_t strandid)
1472 {
1473 	cmi_hdl_ent_t *ent;
1474 
1475 	if (chipid > CMI_MAX_CHIPID ||
1476 	    coreid > CMI_MAX_CORES_PER_CHIP - 1 ||
1477 	    strandid > CMI_MAX_STRANDS_PER_CORE - 1)
1478 		return (NULL);
1479 
1480 	ent = cmi_hdl_ent_lookup(chipid, coreid, strandid);
1481 
1482 	if (class == CMI_HDL_NEUTRAL)
1483 #ifdef __xpv
1484 		class = CMI_HDL_SOLARIS_xVM_MCA;
1485 #else
1486 		class = CMI_HDL_NATIVE;
1487 #endif
1488 
1489 	if (!cmi_hdl_canref(ent))
1490 		return (NULL);
1491 
1492 	if (ent->cmae_hdlp->cmih_class != class) {
1493 		cmi_hdl_rele((cmi_hdl_t)ent->cmae_hdlp);
1494 		return (NULL);
1495 	}
1496 
1497 	return ((cmi_hdl_t)ent->cmae_hdlp);
1498 }
1499 
1500 cmi_hdl_t
1501 cmi_hdl_any(void)
1502 {
1503 	int i, j;
1504 	cmi_hdl_ent_t *ent;
1505 
1506 	for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) {
1507 		if (cmi_chip_tab[i] == NULL)
1508 			continue;
1509 		for (j = 0, ent = cmi_chip_tab[i]; j < CMI_MAX_STRANDS_PER_CHIP;
1510 		    j++, ent++) {
1511 			if (cmi_hdl_canref(ent))
1512 				return ((cmi_hdl_t)ent->cmae_hdlp);
1513 		}
1514 	}
1515 
1516 	return (NULL);
1517 }
1518 
1519 void
1520 cmi_hdl_walk(int (*cbfunc)(cmi_hdl_t, void *, void *, void *),
1521     void *arg1, void *arg2, void *arg3)
1522 {
1523 	int i, j;
1524 	cmi_hdl_ent_t *ent;
1525 
1526 	for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) {
1527 		if (cmi_chip_tab[i] == NULL)
1528 			continue;
1529 		for (j = 0, ent = cmi_chip_tab[i]; j < CMI_MAX_STRANDS_PER_CHIP;
1530 		    j++, ent++) {
1531 			if (cmi_hdl_canref(ent)) {
1532 				cmi_hdl_impl_t *hdl = ent->cmae_hdlp;
1533 				if ((*cbfunc)((cmi_hdl_t)hdl, arg1, arg2, arg3)
1534 				    == CMI_HDL_WALK_DONE) {
1535 					cmi_hdl_rele((cmi_hdl_t)hdl);
1536 					return;
1537 				}
1538 				cmi_hdl_rele((cmi_hdl_t)hdl);
1539 			}
1540 		}
1541 	}
1542 }
1543 
1544 void
1545 cmi_hdl_setcmi(cmi_hdl_t ophdl, void *cmi, void *cmidata)
1546 {
1547 	IMPLHDL(ophdl)->cmih_cmidata = cmidata;
1548 	IMPLHDL(ophdl)->cmih_cmi = cmi;
1549 }
1550 
1551 void *
1552 cmi_hdl_getcmi(cmi_hdl_t ophdl)
1553 {
1554 	return (IMPLHDL(ophdl)->cmih_cmi);
1555 }
1556 
1557 void *
1558 cmi_hdl_getcmidata(cmi_hdl_t ophdl)
1559 {
1560 	return (IMPLHDL(ophdl)->cmih_cmidata);
1561 }
1562 
1563 enum cmi_hdl_class
1564 cmi_hdl_class(cmi_hdl_t ophdl)
1565 {
1566 	return (IMPLHDL(ophdl)->cmih_class);
1567 }
1568 
1569 #define	CMI_HDL_OPFUNC(what, type)				\
1570 	type							\
1571 	cmi_hdl_##what(cmi_hdl_t ophdl)				\
1572 	{							\
1573 		return (HDLOPS(IMPLHDL(ophdl))->		\
1574 		    cmio_##what(IMPLHDL(ophdl)));		\
1575 	}
1576 
1577 CMI_HDL_OPFUNC(vendor, uint_t)
1578 CMI_HDL_OPFUNC(vendorstr, const char *)
1579 CMI_HDL_OPFUNC(family, uint_t)
1580 CMI_HDL_OPFUNC(model, uint_t)
1581 CMI_HDL_OPFUNC(stepping, uint_t)
1582 CMI_HDL_OPFUNC(chipid, uint_t)
1583 CMI_HDL_OPFUNC(procnodeid, uint_t)
1584 CMI_HDL_OPFUNC(coreid, uint_t)
1585 CMI_HDL_OPFUNC(strandid, uint_t)
1586 CMI_HDL_OPFUNC(procnodes_per_pkg, uint_t)
1587 CMI_HDL_OPFUNC(strand_apicid, uint_t)
1588 CMI_HDL_OPFUNC(chiprev, uint32_t)
1589 CMI_HDL_OPFUNC(chiprevstr, const char *)
1590 CMI_HDL_OPFUNC(getsockettype, uint32_t)
1591 CMI_HDL_OPFUNC(getsocketstr, const char *)
1592 CMI_HDL_OPFUNC(logical_id, id_t)
1593 CMI_HDL_OPFUNC(smbiosid, uint16_t)
1594 CMI_HDL_OPFUNC(smb_chipid, uint_t)
1595 CMI_HDL_OPFUNC(smb_bboard, nvlist_t *)
1596 
1597 boolean_t
1598 cmi_hdl_is_cmt(cmi_hdl_t ophdl)
1599 {
1600 	return (IMPLHDL(ophdl)->cmih_mstrand);
1601 }
1602 
1603 void
1604 cmi_hdl_int(cmi_hdl_t ophdl, int num)
1605 {
1606 	if (HDLOPS(IMPLHDL(ophdl))->cmio_int == NULL)
1607 		return;
1608 
1609 	cmi_hdl_inj_begin(ophdl);
1610 	HDLOPS(IMPLHDL(ophdl))->cmio_int(IMPLHDL(ophdl), num);
1611 	cmi_hdl_inj_end(NULL);
1612 }
1613 
1614 int
1615 cmi_hdl_online(cmi_hdl_t ophdl, int new_status, int *old_status)
1616 {
1617 	return (HDLOPS(IMPLHDL(ophdl))->cmio_online(IMPLHDL(ophdl),
1618 	    new_status, old_status));
1619 }
1620 
1621 #ifndef	__xpv
1622 /*
1623  * Return hardware chip instance; cpuid_get_chipid provides this directly.
1624  */
1625 uint_t
1626 cmi_ntv_hwchipid(cpu_t *cp)
1627 {
1628 	return (cpuid_get_chipid(cp));
1629 }
1630 
1631 /*
1632  * Return hardware node instance; cpuid_get_procnodeid provides this directly.
1633  */
1634 uint_t
1635 cmi_ntv_hwprocnodeid(cpu_t *cp)
1636 {
1637 	return (cpuid_get_procnodeid(cp));
1638 }
1639 
1640 /*
1641  * Return core instance within a single chip.
1642  */
1643 uint_t
1644 cmi_ntv_hwcoreid(cpu_t *cp)
1645 {
1646 	return (cpuid_get_pkgcoreid(cp));
1647 }
1648 
1649 /*
1650  * Return strand number within a single core.  cpuid_get_clogid numbers
1651  * all execution units (strands, or cores in unstranded models) sequentially
1652  * within a single chip.
1653  */
1654 uint_t
1655 cmi_ntv_hwstrandid(cpu_t *cp)
1656 {
1657 	int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1658 	    cpuid_get_ncore_per_chip(cp);
1659 
1660 	return (cpuid_get_clogid(cp) % strands_per_core);
1661 }
1662 
1663 static void
1664 cmi_ntv_hwdisable_mce_xc(void)
1665 {
1666 	ulong_t cr4;
1667 
1668 	cr4 = getcr4();
1669 	cr4 = cr4 & (~CR4_MCE);
1670 	setcr4(cr4);
1671 }
1672 
1673 void
1674 cmi_ntv_hwdisable_mce(cmi_hdl_t hdl)
1675 {
1676 	cpuset_t	set;
1677 	cmi_hdl_impl_t *thdl = IMPLHDL(hdl);
1678 	cpu_t *cp = HDLPRIV(thdl);
1679 
1680 	if (CPU->cpu_id == cp->cpu_id) {
1681 		cmi_ntv_hwdisable_mce_xc();
1682 	} else {
1683 		CPUSET_ONLY(set, cp->cpu_id);
1684 		xc_call(NULL, NULL, NULL, CPUSET2BV(set),
1685 		    (xc_func_t)cmi_ntv_hwdisable_mce_xc);
1686 	}
1687 }
1688 
1689 #endif	/* __xpv */
1690 
1691 void
1692 cmi_hdlconf_rdmsr_nohw(cmi_hdl_t ophdl)
1693 {
1694 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1695 
1696 	hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_RD_HWOK;
1697 }
1698 
1699 void
1700 cmi_hdlconf_wrmsr_nohw(cmi_hdl_t ophdl)
1701 {
1702 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1703 
1704 	hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_WR_HWOK;
1705 }
1706 
1707 cmi_errno_t
1708 cmi_hdl_rdmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t *valp)
1709 {
1710 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1711 
1712 	/*
1713 	 * Regardless of the handle class, we first check for am
1714 	 * interposed value.  In the xVM case you probably want to
1715 	 * place interposed values within the hypervisor itself, but
1716 	 * we still allow interposing them in dom0 for test and bringup
1717 	 * purposes.
1718 	 */
1719 	if ((hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_INTERPOSEOK) &&
1720 	    msri_lookup(hdl, msr, valp))
1721 		return (CMI_SUCCESS);
1722 
1723 	if (HDLOPS(hdl)->cmio_rdmsr == NULL)
1724 		return (CMIERR_NOTSUP);
1725 
1726 	return (HDLOPS(hdl)->cmio_rdmsr(hdl, msr, valp));
1727 }
1728 
1729 cmi_errno_t
1730 cmi_hdl_wrmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t val)
1731 {
1732 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1733 
1734 	/* Invalidate any interposed value */
1735 	msri_rment(hdl, msr);
1736 
1737 	if (HDLOPS(hdl)->cmio_wrmsr == NULL)
1738 		return (CMI_SUCCESS);	/* pretend all is ok */
1739 
1740 	return (HDLOPS(hdl)->cmio_wrmsr(hdl, msr, val));
1741 }
1742 
1743 void
1744 cmi_hdl_enable_mce(cmi_hdl_t ophdl)
1745 {
1746 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1747 	ulong_t cr4;
1748 
1749 	if (HDLOPS(hdl)->cmio_getcr4 == NULL ||
1750 	    HDLOPS(hdl)->cmio_setcr4 == NULL)
1751 		return;
1752 
1753 	cr4 = HDLOPS(hdl)->cmio_getcr4(hdl);
1754 
1755 	HDLOPS(hdl)->cmio_setcr4(hdl, cr4 | CR4_MCE);
1756 }
1757 
1758 void
1759 cmi_hdl_msrinterpose(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1760 {
1761 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1762 	int i;
1763 
1764 	if (HDLOPS(hdl)->cmio_msrinterpose == NULL)
1765 		return;
1766 
1767 	cmi_hdl_inj_begin(ophdl);
1768 
1769 	for (i = 0; i < nregs; i++, regs++)
1770 		HDLOPS(hdl)->cmio_msrinterpose(hdl, regs->cmr_msrnum,
1771 		    regs->cmr_msrval);
1772 
1773 	cmi_hdl_inj_end(ophdl);
1774 }
1775 
1776 /*ARGSUSED*/
1777 void
1778 cmi_hdl_msrforward(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1779 {
1780 #ifdef __xpv
1781 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1782 	int i;
1783 
1784 	for (i = 0; i < nregs; i++, regs++)
1785 		msri_addent(hdl, regs->cmr_msrnum, regs->cmr_msrval);
1786 #endif
1787 }
1788 
1789 
1790 void
1791 cmi_pcird_nohw(void)
1792 {
1793 	cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_RD_HWOK;
1794 }
1795 
1796 void
1797 cmi_pciwr_nohw(void)
1798 {
1799 	cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_WR_HWOK;
1800 }
1801 
1802 static uint32_t
1803 cmi_pci_get_cmn(int bus, int dev, int func, int reg, int asz,
1804     int *interpose, ddi_acc_handle_t hdl)
1805 {
1806 	uint32_t val;
1807 
1808 	if (cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_INTERPOSEOK &&
1809 	    pcii_lookup(bus, dev, func, reg, asz, &val)) {
1810 		if (interpose)
1811 			*interpose = 1;
1812 		return (val);
1813 	}
1814 	if (interpose)
1815 		*interpose = 0;
1816 
1817 	if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_HWOK))
1818 		return (0);
1819 
1820 	switch (asz) {
1821 	case 1:
1822 		if (hdl)
1823 			val = pci_config_get8(hdl, (off_t)reg);
1824 		else
1825 			val = pci_cfgacc_get8(NULL, PCI_GETBDF(bus, dev, func),
1826 			    reg);
1827 		break;
1828 	case 2:
1829 		if (hdl)
1830 			val = pci_config_get16(hdl, (off_t)reg);
1831 		else
1832 			val = pci_cfgacc_get16(NULL, PCI_GETBDF(bus, dev, func),
1833 			    reg);
1834 		break;
1835 	case 4:
1836 		if (hdl)
1837 			val = pci_config_get32(hdl, (off_t)reg);
1838 		else
1839 			val = pci_cfgacc_get32(NULL, PCI_GETBDF(bus, dev, func),
1840 			    reg);
1841 		break;
1842 	default:
1843 		val = 0;
1844 	}
1845 	return (val);
1846 }
1847 
1848 uint8_t
1849 cmi_pci_getb(int bus, int dev, int func, int reg, int *interpose,
1850     ddi_acc_handle_t hdl)
1851 {
1852 	return ((uint8_t)cmi_pci_get_cmn(bus, dev, func, reg, 1, interpose,
1853 	    hdl));
1854 }
1855 
1856 uint16_t
1857 cmi_pci_getw(int bus, int dev, int func, int reg, int *interpose,
1858     ddi_acc_handle_t hdl)
1859 {
1860 	return ((uint16_t)cmi_pci_get_cmn(bus, dev, func, reg, 2, interpose,
1861 	    hdl));
1862 }
1863 
1864 uint32_t
1865 cmi_pci_getl(int bus, int dev, int func, int reg, int *interpose,
1866     ddi_acc_handle_t hdl)
1867 {
1868 	return (cmi_pci_get_cmn(bus, dev, func, reg, 4, interpose, hdl));
1869 }
1870 
1871 void
1872 cmi_pci_interposeb(int bus, int dev, int func, int reg, uint8_t val)
1873 {
1874 	pcii_addent(bus, dev, func, reg, val, 1);
1875 }
1876 
1877 void
1878 cmi_pci_interposew(int bus, int dev, int func, int reg, uint16_t val)
1879 {
1880 	pcii_addent(bus, dev, func, reg, val, 2);
1881 }
1882 
1883 void
1884 cmi_pci_interposel(int bus, int dev, int func, int reg, uint32_t val)
1885 {
1886 	pcii_addent(bus, dev, func, reg, val, 4);
1887 }
1888 
1889 static void
1890 cmi_pci_put_cmn(int bus, int dev, int func, int reg, int asz,
1891     ddi_acc_handle_t hdl, uint32_t val)
1892 {
1893 	/*
1894 	 * If there is an interposed value for this register invalidate it.
1895 	 */
1896 	pcii_rment(bus, dev, func, reg, asz);
1897 
1898 	if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_WR_HWOK))
1899 		return;
1900 
1901 	switch (asz) {
1902 	case 1:
1903 		if (hdl)
1904 			pci_config_put8(hdl, (off_t)reg, (uint8_t)val);
1905 		else
1906 			pci_cfgacc_put8(NULL, PCI_GETBDF(bus, dev, func), reg,
1907 			    (uint8_t)val);
1908 		break;
1909 
1910 	case 2:
1911 		if (hdl)
1912 			pci_config_put16(hdl, (off_t)reg, (uint16_t)val);
1913 		else
1914 			pci_cfgacc_put16(NULL, PCI_GETBDF(bus, dev, func), reg,
1915 			    (uint16_t)val);
1916 		break;
1917 
1918 	case 4:
1919 		if (hdl)
1920 			pci_config_put32(hdl, (off_t)reg, val);
1921 		else
1922 			pci_cfgacc_put32(NULL, PCI_GETBDF(bus, dev, func), reg,
1923 			    val);
1924 		break;
1925 
1926 	default:
1927 		break;
1928 	}
1929 }
1930 
1931 void
1932 cmi_pci_putb(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1933     uint8_t val)
1934 {
1935 	cmi_pci_put_cmn(bus, dev, func, reg, 1, hdl, val);
1936 }
1937 
1938 void
1939 cmi_pci_putw(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1940     uint16_t val)
1941 {
1942 	cmi_pci_put_cmn(bus, dev, func, reg, 2, hdl, val);
1943 }
1944 
1945 void
1946 cmi_pci_putl(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1947     uint32_t val)
1948 {
1949 	cmi_pci_put_cmn(bus, dev, func, reg, 4, hdl, val);
1950 }
1951 
1952 static const struct cmi_hdl_ops cmi_hdl_ops = {
1953 #ifdef __xpv
1954 	/*
1955 	 * CMI_HDL_SOLARIS_xVM_MCA - ops when we are an xVM dom0
1956 	 */
1957 	xpv_vendor,		/* cmio_vendor */
1958 	xpv_vendorstr,		/* cmio_vendorstr */
1959 	xpv_family,		/* cmio_family */
1960 	xpv_model,		/* cmio_model */
1961 	xpv_stepping,		/* cmio_stepping */
1962 	xpv_chipid,		/* cmio_chipid */
1963 	xpv_procnodeid,		/* cmio_procnodeid */
1964 	xpv_coreid,		/* cmio_coreid */
1965 	xpv_strandid,		/* cmio_strandid */
1966 	xpv_procnodes_per_pkg,	/* cmio_procnodes_per_pkg */
1967 	xpv_strand_apicid,	/* cmio_strand_apicid */
1968 	xpv_chiprev,		/* cmio_chiprev */
1969 	xpv_chiprevstr,		/* cmio_chiprevstr */
1970 	xpv_getsockettype,	/* cmio_getsockettype */
1971 	xpv_getsocketstr,	/* cmio_getsocketstr */
1972 	xpv_logical_id,		/* cmio_logical_id */
1973 	NULL,			/* cmio_getcr4 */
1974 	NULL,			/* cmio_setcr4 */
1975 	xpv_rdmsr,		/* cmio_rdmsr */
1976 	xpv_wrmsr,		/* cmio_wrmsr */
1977 	xpv_msrinterpose,	/* cmio_msrinterpose */
1978 	xpv_int,		/* cmio_int */
1979 	xpv_online,		/* cmio_online */
1980 	xpv_smbiosid,		/* cmio_smbiosid */
1981 	xpv_smb_chipid,		/* cmio_smb_chipid */
1982 	xpv_smb_bboard		/* cmio_smb_bboard */
1983 
1984 #else	/* __xpv */
1985 
1986 	/*
1987 	 * CMI_HDL_NATIVE - ops when apparently running on bare-metal
1988 	 */
1989 	ntv_vendor,		/* cmio_vendor */
1990 	ntv_vendorstr,		/* cmio_vendorstr */
1991 	ntv_family,		/* cmio_family */
1992 	ntv_model,		/* cmio_model */
1993 	ntv_stepping,		/* cmio_stepping */
1994 	ntv_chipid,		/* cmio_chipid */
1995 	ntv_procnodeid,		/* cmio_procnodeid */
1996 	ntv_coreid,		/* cmio_coreid */
1997 	ntv_strandid,		/* cmio_strandid */
1998 	ntv_procnodes_per_pkg,	/* cmio_procnodes_per_pkg */
1999 	ntv_strand_apicid,	/* cmio_strand_apicid */
2000 	ntv_chiprev,		/* cmio_chiprev */
2001 	ntv_chiprevstr,		/* cmio_chiprevstr */
2002 	ntv_getsockettype,	/* cmio_getsockettype */
2003 	ntv_getsocketstr,	/* cmio_getsocketstr */
2004 	ntv_logical_id,		/* cmio_logical_id */
2005 	ntv_getcr4,		/* cmio_getcr4 */
2006 	ntv_setcr4,		/* cmio_setcr4 */
2007 	ntv_rdmsr,		/* cmio_rdmsr */
2008 	ntv_wrmsr,		/* cmio_wrmsr */
2009 	ntv_msrinterpose,	/* cmio_msrinterpose */
2010 	ntv_int,		/* cmio_int */
2011 	ntv_online,		/* cmio_online */
2012 	ntv_smbiosid,		/* cmio_smbiosid */
2013 	ntv_smb_chipid,		/* cmio_smb_chipid */
2014 	ntv_smb_bboard		/* cmio_smb_bboard */
2015 #endif
2016 };
2017