xref: /titanic_41/usr/src/uts/i86pc/os/cmi_hw.c (revision bc5ed3ddd304f9de10f6ff9ce09411ceca00a507)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * CPU Module Interface - hardware abstraction.
29  */
30 
31 #ifdef __xpv
32 #include <sys/xpv_user.h>
33 #endif
34 
35 #include <sys/types.h>
36 #include <sys/cpu_module.h>
37 #include <sys/kmem.h>
38 #include <sys/x86_archext.h>
39 #include <sys/cpuvar.h>
40 #include <sys/ksynch.h>
41 #include <sys/x_call.h>
42 #include <sys/pghw.h>
43 #include <sys/pci_cfgacc.h>
44 #include <sys/pci_cfgspace.h>
45 #include <sys/archsystm.h>
46 #include <sys/ontrap.h>
47 #include <sys/controlregs.h>
48 #include <sys/sunddi.h>
49 #include <sys/trap.h>
50 #include <sys/mca_x86.h>
51 #include <sys/processor.h>
52 #include <sys/cmn_err.h>
53 #include <sys/nvpair.h>
54 #include <sys/fm/util.h>
55 #include <sys/fm/protocol.h>
56 #include <sys/fm/smb/fmsmb.h>
57 #include <sys/cpu_module_impl.h>
58 
59 /*
60  * Variable which determines if the SMBIOS supports x86 generic topology; or
61  * if legacy topolgy enumeration will occur.
62  */
63 extern int x86gentopo_legacy;
64 
65 /*
66  * Outside of this file consumers use the opaque cmi_hdl_t.  This
67  * definition is duplicated in the generic_cpu mdb module, so keep
68  * them in-sync when making changes.
69  */
70 typedef struct cmi_hdl_impl {
71 	enum cmi_hdl_class cmih_class;		/* Handle nature */
72 	const struct cmi_hdl_ops *cmih_ops;	/* Operations vector */
73 	uint_t cmih_chipid;			/* Chipid of cpu resource */
74 	uint_t cmih_procnodeid;			/* Nodeid of cpu resource */
75 	uint_t cmih_coreid;			/* Core within die */
76 	uint_t cmih_strandid;			/* Thread within core */
77 	uint_t cmih_procnodes_per_pkg;		/* Nodes in a processor */
78 	boolean_t cmih_mstrand;			/* cores are multithreaded */
79 	volatile uint32_t *cmih_refcntp;	/* Reference count pointer */
80 	uint64_t cmih_msrsrc;			/* MSR data source flags */
81 	void *cmih_hdlpriv;			/* cmi_hw.c private data */
82 	void *cmih_spec;			/* cmi_hdl_{set,get}_specific */
83 	void *cmih_cmi;				/* cpu mod control structure */
84 	void *cmih_cmidata;			/* cpu mod private data */
85 	const struct cmi_mc_ops *cmih_mcops;	/* Memory-controller ops */
86 	void *cmih_mcdata;			/* Memory-controller data */
87 	uint64_t cmih_flags;			/* See CMIH_F_* below */
88 	uint16_t cmih_smbiosid;			/* SMBIOS Type 4 struct ID */
89 	uint_t cmih_smb_chipid;			/* SMBIOS factored chipid */
90 	nvlist_t *cmih_smb_bboard;		/* SMBIOS bboard nvlist */
91 } cmi_hdl_impl_t;
92 
93 #define	IMPLHDL(ophdl)	((cmi_hdl_impl_t *)ophdl)
94 #define	HDLOPS(hdl)	((hdl)->cmih_ops)
95 
96 #define	CMIH_F_INJACTV		0x1ULL
97 
98 /*
99  * Ops structure for handle operations.
100  */
101 struct cmi_hdl_ops {
102 	/*
103 	 * These ops are required in an implementation.
104 	 */
105 	uint_t (*cmio_vendor)(cmi_hdl_impl_t *);
106 	const char *(*cmio_vendorstr)(cmi_hdl_impl_t *);
107 	uint_t (*cmio_family)(cmi_hdl_impl_t *);
108 	uint_t (*cmio_model)(cmi_hdl_impl_t *);
109 	uint_t (*cmio_stepping)(cmi_hdl_impl_t *);
110 	uint_t (*cmio_chipid)(cmi_hdl_impl_t *);
111 	uint_t (*cmio_procnodeid)(cmi_hdl_impl_t *);
112 	uint_t (*cmio_coreid)(cmi_hdl_impl_t *);
113 	uint_t (*cmio_strandid)(cmi_hdl_impl_t *);
114 	uint_t (*cmio_procnodes_per_pkg)(cmi_hdl_impl_t *);
115 	uint_t (*cmio_strand_apicid)(cmi_hdl_impl_t *);
116 	uint32_t (*cmio_chiprev)(cmi_hdl_impl_t *);
117 	const char *(*cmio_chiprevstr)(cmi_hdl_impl_t *);
118 	uint32_t (*cmio_getsockettype)(cmi_hdl_impl_t *);
119 	const char *(*cmio_getsocketstr)(cmi_hdl_impl_t *);
120 
121 	id_t (*cmio_logical_id)(cmi_hdl_impl_t *);
122 	/*
123 	 * These ops are optional in an implementation.
124 	 */
125 	ulong_t (*cmio_getcr4)(cmi_hdl_impl_t *);
126 	void (*cmio_setcr4)(cmi_hdl_impl_t *, ulong_t);
127 	cmi_errno_t (*cmio_rdmsr)(cmi_hdl_impl_t *, uint_t, uint64_t *);
128 	cmi_errno_t (*cmio_wrmsr)(cmi_hdl_impl_t *, uint_t, uint64_t);
129 	cmi_errno_t (*cmio_msrinterpose)(cmi_hdl_impl_t *, uint_t, uint64_t);
130 	void (*cmio_int)(cmi_hdl_impl_t *, int);
131 	int (*cmio_online)(cmi_hdl_impl_t *, int, int *);
132 	uint16_t (*cmio_smbiosid) (cmi_hdl_impl_t *);
133 	uint_t (*cmio_smb_chipid)(cmi_hdl_impl_t *);
134 	nvlist_t *(*cmio_smb_bboard)(cmi_hdl_impl_t *);
135 };
136 
137 static const struct cmi_hdl_ops cmi_hdl_ops;
138 
139 /*
140  * Handles are looked up from contexts such as polling, injection etc
141  * where the context is reasonably well defined (although a poller could
142  * interrupt any old thread holding any old lock).  They are also looked
143  * up by machine check handlers, which may strike at inconvenient times
144  * such as during handle initialization or destruction or during handle
145  * lookup (which the #MC handler itself will also have to perform).
146  *
147  * So keeping handles in a linked list makes locking difficult when we
148  * consider #MC handlers.  Our solution is to have a look-up table indexed
149  * by that which uniquely identifies a handle - chip/core/strand id -
150  * with each entry a structure including a pointer to a handle
151  * structure for the resource, and a reference count for the handle.
152  * Reference counts are modified atomically.  The public cmi_hdl_hold
153  * always succeeds because this can only be used after handle creation
154  * and before the call to destruct, so the hold count is already at least one.
155  * In other functions that lookup a handle (cmi_hdl_lookup, cmi_hdl_any)
156  * we must be certain that the count has not already decrmented to zero
157  * before applying our hold.
158  *
159  * The table is an array of maximum number of chips defined in
160  * CMI_CHIPID_ARR_SZ indexed by the chip id. If the chip is not present, the
161  * entry is NULL. Each entry is a pointer to another array which contains a
162  * list of all strands of the chip. This first level table is allocated when
163  * first we want to populate an entry. The size of the latter (per chip) table
164  * is CMI_MAX_STRANDS_PER_CHIP and it is populated when one of its cpus starts.
165  *
166  * Ideally we should only allocate to the actual number of chips, cores per
167  * chip and strand per core. The number of chips is not available until all
168  * of them are passed. The number of cores and strands are partially available.
169  * For now we stick with the above approach.
170  */
171 #define	CMI_MAX_CHIPID_NBITS		6	/* max chipid of 63 */
172 #define	CMI_MAX_CORES_PER_CHIP_NBITS	4	/* 16 cores per chip max */
173 #define	CMI_MAX_STRANDS_PER_CORE_NBITS	3	/* 8 strands per core max */
174 
175 #define	CMI_MAX_CHIPID			((1 << (CMI_MAX_CHIPID_NBITS)) - 1)
176 #define	CMI_MAX_CORES_PER_CHIP		(1 << CMI_MAX_CORES_PER_CHIP_NBITS)
177 #define	CMI_MAX_STRANDS_PER_CORE	(1 << CMI_MAX_STRANDS_PER_CORE_NBITS)
178 #define	CMI_MAX_STRANDS_PER_CHIP	(CMI_MAX_CORES_PER_CHIP * \
179 					    CMI_MAX_STRANDS_PER_CORE)
180 
181 /*
182  * Handle array indexing within a per-chip table
183  *	[6:3] = Core in package,
184  *	[2:0] = Strand in core,
185  */
186 #define	CMI_HDL_ARR_IDX_CORE(coreid) \
187 	(((coreid) & (CMI_MAX_CORES_PER_CHIP - 1)) << \
188 	CMI_MAX_STRANDS_PER_CORE_NBITS)
189 
190 #define	CMI_HDL_ARR_IDX_STRAND(strandid) \
191 	(((strandid) & (CMI_MAX_STRANDS_PER_CORE - 1)))
192 
193 #define	CMI_HDL_ARR_IDX(coreid, strandid) \
194 	(CMI_HDL_ARR_IDX_CORE(coreid) | CMI_HDL_ARR_IDX_STRAND(strandid))
195 
196 #define	CMI_CHIPID_ARR_SZ		(1 << CMI_MAX_CHIPID_NBITS)
197 
198 typedef struct cmi_hdl_ent {
199 	volatile uint32_t cmae_refcnt;
200 	cmi_hdl_impl_t *cmae_hdlp;
201 } cmi_hdl_ent_t;
202 
203 static cmi_hdl_ent_t *cmi_chip_tab[CMI_CHIPID_ARR_SZ];
204 
205 /*
206  * Controls where we will source PCI config space data.
207  */
208 #define	CMI_PCICFG_FLAG_RD_HWOK		0x0001
209 #define	CMI_PCICFG_FLAG_RD_INTERPOSEOK	0X0002
210 #define	CMI_PCICFG_FLAG_WR_HWOK		0x0004
211 #define	CMI_PCICFG_FLAG_WR_INTERPOSEOK	0X0008
212 
213 static uint64_t cmi_pcicfg_flags =
214     CMI_PCICFG_FLAG_RD_HWOK | CMI_PCICFG_FLAG_RD_INTERPOSEOK |
215     CMI_PCICFG_FLAG_WR_HWOK | CMI_PCICFG_FLAG_WR_INTERPOSEOK;
216 
217 /*
218  * The flags for individual cpus are kept in their per-cpu handle cmih_msrsrc
219  */
220 #define	CMI_MSR_FLAG_RD_HWOK		0x0001
221 #define	CMI_MSR_FLAG_RD_INTERPOSEOK	0x0002
222 #define	CMI_MSR_FLAG_WR_HWOK		0x0004
223 #define	CMI_MSR_FLAG_WR_INTERPOSEOK	0x0008
224 
225 int cmi_call_func_ntv_tries = 3;
226 
227 static cmi_errno_t
228 call_func_ntv(int cpuid, xc_func_t func, xc_arg_t arg1, xc_arg_t arg2)
229 {
230 	cmi_errno_t rc = -1;
231 	int i;
232 
233 	kpreempt_disable();
234 
235 	if (CPU->cpu_id == cpuid) {
236 		(*func)(arg1, arg2, (xc_arg_t)&rc);
237 	} else {
238 		/*
239 		 * This should not happen for a #MC trap or a poll, so
240 		 * this is likely an error injection or similar.
241 		 * We will try to cross call with xc_trycall - we
242 		 * can't guarantee success with xc_call because
243 		 * the interrupt code in the case of a #MC may
244 		 * already hold the xc mutex.
245 		 */
246 		for (i = 0; i < cmi_call_func_ntv_tries; i++) {
247 			cpuset_t cpus;
248 
249 			CPUSET_ONLY(cpus, cpuid);
250 			xc_priority(arg1, arg2, (xc_arg_t)&rc,
251 			    CPUSET2BV(cpus), func);
252 			if (rc != -1)
253 				break;
254 
255 			DELAY(1);
256 		}
257 	}
258 
259 	kpreempt_enable();
260 
261 	return (rc != -1 ? rc : CMIERR_DEADLOCK);
262 }
263 
264 static uint64_t injcnt;
265 
266 void
267 cmi_hdl_inj_begin(cmi_hdl_t ophdl)
268 {
269 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
270 
271 	if (hdl != NULL)
272 		hdl->cmih_flags |= CMIH_F_INJACTV;
273 	if (injcnt++ == 0) {
274 		cmn_err(CE_NOTE, "Hardware error injection/simulation "
275 		    "activity noted");
276 	}
277 }
278 
279 void
280 cmi_hdl_inj_end(cmi_hdl_t ophdl)
281 {
282 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
283 
284 	ASSERT(hdl == NULL || hdl->cmih_flags & CMIH_F_INJACTV);
285 	if (hdl != NULL)
286 		hdl->cmih_flags &= ~CMIH_F_INJACTV;
287 }
288 
289 boolean_t
290 cmi_inj_tainted(void)
291 {
292 	return (injcnt != 0 ? B_TRUE : B_FALSE);
293 }
294 
295 /*
296  *	 =======================================================
297  *	|	MSR Interposition				|
298  *	|	-----------------				|
299  *	|							|
300  *	 -------------------------------------------------------
301  */
302 
303 #define	CMI_MSRI_HASHSZ		16
304 #define	CMI_MSRI_HASHIDX(hdl, msr) \
305 	(((uintptr_t)(hdl) >> 3 + (msr)) % (CMI_MSRI_HASHSZ - 1))
306 
307 struct cmi_msri_bkt {
308 	kmutex_t msrib_lock;
309 	struct cmi_msri_hashent *msrib_head;
310 };
311 
312 struct cmi_msri_hashent {
313 	struct cmi_msri_hashent *msrie_next;
314 	struct cmi_msri_hashent *msrie_prev;
315 	cmi_hdl_impl_t *msrie_hdl;
316 	uint_t msrie_msrnum;
317 	uint64_t msrie_msrval;
318 };
319 
320 #define	CMI_MSRI_MATCH(ent, hdl, req_msr) \
321 	((ent)->msrie_hdl == (hdl) && (ent)->msrie_msrnum == (req_msr))
322 
323 static struct cmi_msri_bkt msrihash[CMI_MSRI_HASHSZ];
324 
325 static void
326 msri_addent(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
327 {
328 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
329 	struct cmi_msri_bkt *hbp = &msrihash[idx];
330 	struct cmi_msri_hashent *hep;
331 
332 	mutex_enter(&hbp->msrib_lock);
333 
334 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
335 		if (CMI_MSRI_MATCH(hep, hdl, msr))
336 			break;
337 	}
338 
339 	if (hep != NULL) {
340 		hep->msrie_msrval = val;
341 	} else {
342 		hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
343 		hep->msrie_hdl = hdl;
344 		hep->msrie_msrnum = msr;
345 		hep->msrie_msrval = val;
346 
347 		if (hbp->msrib_head != NULL)
348 			hbp->msrib_head->msrie_prev = hep;
349 		hep->msrie_next = hbp->msrib_head;
350 		hep->msrie_prev = NULL;
351 		hbp->msrib_head = hep;
352 	}
353 
354 	mutex_exit(&hbp->msrib_lock);
355 }
356 
357 /*
358  * Look for a match for the given hanlde and msr.  Return 1 with valp
359  * filled if a match is found, otherwise return 0 with valp untouched.
360  */
361 static int
362 msri_lookup(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
363 {
364 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
365 	struct cmi_msri_bkt *hbp = &msrihash[idx];
366 	struct cmi_msri_hashent *hep;
367 
368 	/*
369 	 * This function is called during #MC trap handling, so we should
370 	 * consider the possibility that the hash mutex is held by the
371 	 * interrupted thread.  This should not happen because interposition
372 	 * is an artificial injection mechanism and the #MC is requested
373 	 * after adding entries, but just in case of a real #MC at an
374 	 * unlucky moment we'll use mutex_tryenter here.
375 	 */
376 	if (!mutex_tryenter(&hbp->msrib_lock))
377 		return (0);
378 
379 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
380 		if (CMI_MSRI_MATCH(hep, hdl, msr)) {
381 			*valp = hep->msrie_msrval;
382 			break;
383 		}
384 	}
385 
386 	mutex_exit(&hbp->msrib_lock);
387 
388 	return (hep != NULL);
389 }
390 
391 /*
392  * Remove any interposed value that matches.
393  */
394 static void
395 msri_rment(cmi_hdl_impl_t *hdl, uint_t msr)
396 {
397 
398 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
399 	struct cmi_msri_bkt *hbp = &msrihash[idx];
400 	struct cmi_msri_hashent *hep;
401 
402 	if (!mutex_tryenter(&hbp->msrib_lock))
403 		return;
404 
405 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
406 		if (CMI_MSRI_MATCH(hep, hdl, msr)) {
407 			if (hep->msrie_prev != NULL)
408 				hep->msrie_prev->msrie_next = hep->msrie_next;
409 
410 			if (hep->msrie_next != NULL)
411 				hep->msrie_next->msrie_prev = hep->msrie_prev;
412 
413 			if (hbp->msrib_head == hep)
414 				hbp->msrib_head = hep->msrie_next;
415 
416 			kmem_free(hep, sizeof (*hep));
417 			break;
418 		}
419 	}
420 
421 	mutex_exit(&hbp->msrib_lock);
422 }
423 
424 /*
425  *	 =======================================================
426  *	|	PCI Config Space Interposition			|
427  *	|	------------------------------			|
428  *	|							|
429  *	 -------------------------------------------------------
430  */
431 
432 /*
433  * Hash for interposed PCI config space values.  We lookup on bus/dev/fun/offset
434  * and then record whether the value stashed was made with a byte, word or
435  * doubleword access;  we will only return a hit for an access of the
436  * same size.  If you access say a 32-bit register using byte accesses
437  * and then attempt to read the full 32-bit value back you will not obtain
438  * any sort of merged result - you get a lookup miss.
439  */
440 
441 #define	CMI_PCII_HASHSZ		16
442 #define	CMI_PCII_HASHIDX(b, d, f, o) \
443 	(((b) + (d) + (f) + (o)) % (CMI_PCII_HASHSZ - 1))
444 
445 struct cmi_pcii_bkt {
446 	kmutex_t pciib_lock;
447 	struct cmi_pcii_hashent *pciib_head;
448 };
449 
450 struct cmi_pcii_hashent {
451 	struct cmi_pcii_hashent *pcii_next;
452 	struct cmi_pcii_hashent *pcii_prev;
453 	int pcii_bus;
454 	int pcii_dev;
455 	int pcii_func;
456 	int pcii_reg;
457 	int pcii_asize;
458 	uint32_t pcii_val;
459 };
460 
461 #define	CMI_PCII_MATCH(ent, b, d, f, r, asz) \
462 	((ent)->pcii_bus == (b) && (ent)->pcii_dev == (d) && \
463 	(ent)->pcii_func == (f) && (ent)->pcii_reg == (r) && \
464 	(ent)->pcii_asize == (asz))
465 
466 static struct cmi_pcii_bkt pciihash[CMI_PCII_HASHSZ];
467 
468 
469 /*
470  * Add a new entry to the PCI interpose hash, overwriting any existing
471  * entry that is found.
472  */
473 static void
474 pcii_addent(int bus, int dev, int func, int reg, uint32_t val, int asz)
475 {
476 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
477 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
478 	struct cmi_pcii_hashent *hep;
479 
480 	cmi_hdl_inj_begin(NULL);
481 
482 	mutex_enter(&hbp->pciib_lock);
483 
484 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
485 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz))
486 			break;
487 	}
488 
489 	if (hep != NULL) {
490 		hep->pcii_val = val;
491 	} else {
492 		hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
493 		hep->pcii_bus = bus;
494 		hep->pcii_dev = dev;
495 		hep->pcii_func = func;
496 		hep->pcii_reg = reg;
497 		hep->pcii_asize = asz;
498 		hep->pcii_val = val;
499 
500 		if (hbp->pciib_head != NULL)
501 			hbp->pciib_head->pcii_prev = hep;
502 		hep->pcii_next = hbp->pciib_head;
503 		hep->pcii_prev = NULL;
504 		hbp->pciib_head = hep;
505 	}
506 
507 	mutex_exit(&hbp->pciib_lock);
508 
509 	cmi_hdl_inj_end(NULL);
510 }
511 
512 /*
513  * Look for a match for the given bus/dev/func/reg; return 1 with valp
514  * filled if a match is found, otherwise return 0 with valp untouched.
515  */
516 static int
517 pcii_lookup(int bus, int dev, int func, int reg, int asz, uint32_t *valp)
518 {
519 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
520 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
521 	struct cmi_pcii_hashent *hep;
522 
523 	if (!mutex_tryenter(&hbp->pciib_lock))
524 		return (0);
525 
526 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
527 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
528 			*valp = hep->pcii_val;
529 			break;
530 		}
531 	}
532 
533 	mutex_exit(&hbp->pciib_lock);
534 
535 	return (hep != NULL);
536 }
537 
538 static void
539 pcii_rment(int bus, int dev, int func, int reg, int asz)
540 {
541 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
542 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
543 	struct cmi_pcii_hashent *hep;
544 
545 	mutex_enter(&hbp->pciib_lock);
546 
547 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
548 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
549 			if (hep->pcii_prev != NULL)
550 				hep->pcii_prev->pcii_next = hep->pcii_next;
551 
552 			if (hep->pcii_next != NULL)
553 				hep->pcii_next->pcii_prev = hep->pcii_prev;
554 
555 			if (hbp->pciib_head == hep)
556 				hbp->pciib_head = hep->pcii_next;
557 
558 			kmem_free(hep, sizeof (*hep));
559 			break;
560 		}
561 	}
562 
563 	mutex_exit(&hbp->pciib_lock);
564 }
565 
566 #ifndef __xpv
567 
568 /*
569  *	 =======================================================
570  *	|	Native methods					|
571  *	|	--------------					|
572  *	|							|
573  *	| These are used when we are running native on bare-	|
574  *	| metal, or simply don't know any better.		|
575  *	---------------------------------------------------------
576  */
577 
578 #define	HDLPRIV(hdl)	((cpu_t *)(hdl)->cmih_hdlpriv)
579 
580 static uint_t
581 ntv_vendor(cmi_hdl_impl_t *hdl)
582 {
583 	return (cpuid_getvendor(HDLPRIV(hdl)));
584 }
585 
586 static const char *
587 ntv_vendorstr(cmi_hdl_impl_t *hdl)
588 {
589 	return (cpuid_getvendorstr(HDLPRIV(hdl)));
590 }
591 
592 static uint_t
593 ntv_family(cmi_hdl_impl_t *hdl)
594 {
595 	return (cpuid_getfamily(HDLPRIV(hdl)));
596 }
597 
598 static uint_t
599 ntv_model(cmi_hdl_impl_t *hdl)
600 {
601 	return (cpuid_getmodel(HDLPRIV(hdl)));
602 }
603 
604 static uint_t
605 ntv_stepping(cmi_hdl_impl_t *hdl)
606 {
607 	return (cpuid_getstep(HDLPRIV(hdl)));
608 }
609 
610 static uint_t
611 ntv_chipid(cmi_hdl_impl_t *hdl)
612 {
613 	return (hdl->cmih_chipid);
614 
615 }
616 
617 static uint_t
618 ntv_procnodeid(cmi_hdl_impl_t *hdl)
619 {
620 	return (hdl->cmih_procnodeid);
621 }
622 
623 static uint_t
624 ntv_procnodes_per_pkg(cmi_hdl_impl_t *hdl)
625 {
626 	return (hdl->cmih_procnodes_per_pkg);
627 }
628 
629 static uint_t
630 ntv_coreid(cmi_hdl_impl_t *hdl)
631 {
632 	return (hdl->cmih_coreid);
633 }
634 
635 static uint_t
636 ntv_strandid(cmi_hdl_impl_t *hdl)
637 {
638 	return (hdl->cmih_strandid);
639 }
640 
641 static uint_t
642 ntv_strand_apicid(cmi_hdl_impl_t *hdl)
643 {
644 	return (cpuid_get_apicid(HDLPRIV(hdl)));
645 }
646 
647 static uint16_t
648 ntv_smbiosid(cmi_hdl_impl_t *hdl)
649 {
650 	return (hdl->cmih_smbiosid);
651 }
652 
653 static uint_t
654 ntv_smb_chipid(cmi_hdl_impl_t *hdl)
655 {
656 	return (hdl->cmih_smb_chipid);
657 }
658 
659 static nvlist_t *
660 ntv_smb_bboard(cmi_hdl_impl_t *hdl)
661 {
662 	return (hdl->cmih_smb_bboard);
663 }
664 
665 static uint32_t
666 ntv_chiprev(cmi_hdl_impl_t *hdl)
667 {
668 	return (cpuid_getchiprev(HDLPRIV(hdl)));
669 }
670 
671 static const char *
672 ntv_chiprevstr(cmi_hdl_impl_t *hdl)
673 {
674 	return (cpuid_getchiprevstr(HDLPRIV(hdl)));
675 }
676 
677 static uint32_t
678 ntv_getsockettype(cmi_hdl_impl_t *hdl)
679 {
680 	return (cpuid_getsockettype(HDLPRIV(hdl)));
681 }
682 
683 static const char *
684 ntv_getsocketstr(cmi_hdl_impl_t *hdl)
685 {
686 	return (cpuid_getsocketstr(HDLPRIV(hdl)));
687 }
688 
689 static id_t
690 ntv_logical_id(cmi_hdl_impl_t *hdl)
691 {
692 	return (HDLPRIV(hdl)->cpu_id);
693 }
694 
695 /*ARGSUSED*/
696 static int
697 ntv_getcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
698 {
699 	ulong_t *dest = (ulong_t *)arg1;
700 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
701 
702 	*dest = getcr4();
703 	*rcp = CMI_SUCCESS;
704 
705 	return (0);
706 }
707 
708 static ulong_t
709 ntv_getcr4(cmi_hdl_impl_t *hdl)
710 {
711 	cpu_t *cp = HDLPRIV(hdl);
712 	ulong_t val;
713 
714 	(void) call_func_ntv(cp->cpu_id, ntv_getcr4_xc, (xc_arg_t)&val, NULL);
715 
716 	return (val);
717 }
718 
719 /*ARGSUSED*/
720 static int
721 ntv_setcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
722 {
723 	ulong_t val = (ulong_t)arg1;
724 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
725 
726 	setcr4(val);
727 	*rcp = CMI_SUCCESS;
728 
729 	return (0);
730 }
731 
732 static void
733 ntv_setcr4(cmi_hdl_impl_t *hdl, ulong_t val)
734 {
735 	cpu_t *cp = HDLPRIV(hdl);
736 
737 	(void) call_func_ntv(cp->cpu_id, ntv_setcr4_xc, (xc_arg_t)val, NULL);
738 }
739 
740 volatile uint32_t cmi_trapped_rdmsr;
741 
742 /*ARGSUSED*/
743 static int
744 ntv_rdmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
745 {
746 	uint_t msr = (uint_t)arg1;
747 	uint64_t *valp = (uint64_t *)arg2;
748 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
749 
750 	on_trap_data_t otd;
751 
752 	if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
753 		if (checked_rdmsr(msr, valp) == 0)
754 			*rcp = CMI_SUCCESS;
755 		else
756 			*rcp = CMIERR_NOTSUP;
757 	} else {
758 		*rcp = CMIERR_MSRGPF;
759 		atomic_inc_32(&cmi_trapped_rdmsr);
760 	}
761 	no_trap();
762 
763 	return (0);
764 }
765 
766 static cmi_errno_t
767 ntv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
768 {
769 	cpu_t *cp = HDLPRIV(hdl);
770 
771 	if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_HWOK))
772 		return (CMIERR_INTERPOSE);
773 
774 	return (call_func_ntv(cp->cpu_id, ntv_rdmsr_xc,
775 	    (xc_arg_t)msr, (xc_arg_t)valp));
776 }
777 
778 volatile uint32_t cmi_trapped_wrmsr;
779 
780 /*ARGSUSED*/
781 static int
782 ntv_wrmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
783 {
784 	uint_t msr = (uint_t)arg1;
785 	uint64_t val = *((uint64_t *)arg2);
786 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
787 	on_trap_data_t otd;
788 
789 	if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
790 		if (checked_wrmsr(msr, val) == 0)
791 			*rcp = CMI_SUCCESS;
792 		else
793 			*rcp = CMIERR_NOTSUP;
794 	} else {
795 		*rcp = CMIERR_MSRGPF;
796 		atomic_inc_32(&cmi_trapped_wrmsr);
797 	}
798 	no_trap();
799 
800 	return (0);
801 
802 }
803 
804 static cmi_errno_t
805 ntv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
806 {
807 	cpu_t *cp = HDLPRIV(hdl);
808 
809 	if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_WR_HWOK))
810 		return (CMI_SUCCESS);
811 
812 	return (call_func_ntv(cp->cpu_id, ntv_wrmsr_xc,
813 	    (xc_arg_t)msr, (xc_arg_t)&val));
814 }
815 
816 static cmi_errno_t
817 ntv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
818 {
819 	msri_addent(hdl, msr, val);
820 	return (CMI_SUCCESS);
821 }
822 
823 /*ARGSUSED*/
824 static int
825 ntv_int_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
826 {
827 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
828 	int int_no = (int)arg1;
829 
830 	if (int_no == T_MCE)
831 		int18();
832 	else
833 		int_cmci();
834 	*rcp = CMI_SUCCESS;
835 
836 	return (0);
837 }
838 
839 static void
840 ntv_int(cmi_hdl_impl_t *hdl, int int_no)
841 {
842 	cpu_t *cp = HDLPRIV(hdl);
843 
844 	(void) call_func_ntv(cp->cpu_id, ntv_int_xc, (xc_arg_t)int_no, NULL);
845 }
846 
847 static int
848 ntv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status)
849 {
850 	processorid_t cpuid = HDLPRIV(hdl)->cpu_id;
851 
852 	return (p_online_internal(cpuid, new_status, old_status));
853 }
854 
855 #else	/* __xpv */
856 
857 /*
858  *	 =======================================================
859  *	|	xVM dom0 methods				|
860  *	|	----------------				|
861  *	|							|
862  *	| These are used when we are running as dom0 in		|
863  *	| a Solaris xVM context.				|
864  *	---------------------------------------------------------
865  */
866 
867 #define	HDLPRIV(hdl)	((xen_mc_lcpu_cookie_t)(hdl)->cmih_hdlpriv)
868 
869 extern uint_t _cpuid_vendorstr_to_vendorcode(char *);
870 
871 
872 static uint_t
873 xpv_vendor(cmi_hdl_impl_t *hdl)
874 {
875 	return (_cpuid_vendorstr_to_vendorcode((char *)xen_physcpu_vendorstr(
876 	    HDLPRIV(hdl))));
877 }
878 
879 static const char *
880 xpv_vendorstr(cmi_hdl_impl_t *hdl)
881 {
882 	return (xen_physcpu_vendorstr(HDLPRIV(hdl)));
883 }
884 
885 static uint_t
886 xpv_family(cmi_hdl_impl_t *hdl)
887 {
888 	return (xen_physcpu_family(HDLPRIV(hdl)));
889 }
890 
891 static uint_t
892 xpv_model(cmi_hdl_impl_t *hdl)
893 {
894 	return (xen_physcpu_model(HDLPRIV(hdl)));
895 }
896 
897 static uint_t
898 xpv_stepping(cmi_hdl_impl_t *hdl)
899 {
900 	return (xen_physcpu_stepping(HDLPRIV(hdl)));
901 }
902 
903 static uint_t
904 xpv_chipid(cmi_hdl_impl_t *hdl)
905 {
906 	return (hdl->cmih_chipid);
907 }
908 
909 static uint_t
910 xpv_procnodeid(cmi_hdl_impl_t *hdl)
911 {
912 	return (hdl->cmih_procnodeid);
913 }
914 
915 static uint_t
916 xpv_procnodes_per_pkg(cmi_hdl_impl_t *hdl)
917 {
918 	return (hdl->cmih_procnodes_per_pkg);
919 }
920 
921 static uint_t
922 xpv_coreid(cmi_hdl_impl_t *hdl)
923 {
924 	return (hdl->cmih_coreid);
925 }
926 
927 static uint_t
928 xpv_strandid(cmi_hdl_impl_t *hdl)
929 {
930 	return (hdl->cmih_strandid);
931 }
932 
933 static uint_t
934 xpv_strand_apicid(cmi_hdl_impl_t *hdl)
935 {
936 	return (xen_physcpu_initial_apicid(HDLPRIV(hdl)));
937 }
938 
939 static uint16_t
940 xpv_smbiosid(cmi_hdl_impl_t *hdl)
941 {
942 	return (hdl->cmih_smbiosid);
943 }
944 
945 static uint_t
946 xpv_smb_chipid(cmi_hdl_impl_t *hdl)
947 {
948 	return (hdl->cmih_smb_chipid);
949 }
950 
951 static nvlist_t *
952 xpv_smb_bboard(cmi_hdl_impl_t *hdl)
953 {
954 	return (hdl->cmih_smb_bboard);
955 }
956 
957 extern uint32_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t);
958 
959 static uint32_t
960 xpv_chiprev(cmi_hdl_impl_t *hdl)
961 {
962 	return (_cpuid_chiprev(xpv_vendor(hdl), xpv_family(hdl),
963 	    xpv_model(hdl), xpv_stepping(hdl)));
964 }
965 
966 extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t);
967 
968 static const char *
969 xpv_chiprevstr(cmi_hdl_impl_t *hdl)
970 {
971 	return (_cpuid_chiprevstr(xpv_vendor(hdl), xpv_family(hdl),
972 	    xpv_model(hdl), xpv_stepping(hdl)));
973 }
974 
975 extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t);
976 
977 static uint32_t
978 xpv_getsockettype(cmi_hdl_impl_t *hdl)
979 {
980 	return (_cpuid_skt(xpv_vendor(hdl), xpv_family(hdl),
981 	    xpv_model(hdl), xpv_stepping(hdl)));
982 }
983 
984 extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t);
985 
986 static const char *
987 xpv_getsocketstr(cmi_hdl_impl_t *hdl)
988 {
989 	return (_cpuid_sktstr(xpv_vendor(hdl), xpv_family(hdl),
990 	    xpv_model(hdl), xpv_stepping(hdl)));
991 }
992 
993 static id_t
994 xpv_logical_id(cmi_hdl_impl_t *hdl)
995 {
996 	return (xen_physcpu_logical_id(HDLPRIV(hdl)));
997 }
998 
999 static cmi_errno_t
1000 xpv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
1001 {
1002 	switch (msr) {
1003 	case IA32_MSR_MCG_CAP:
1004 		*valp = xen_physcpu_mcg_cap(HDLPRIV(hdl));
1005 		break;
1006 
1007 	default:
1008 		return (CMIERR_NOTSUP);
1009 	}
1010 
1011 	return (CMI_SUCCESS);
1012 }
1013 
1014 /*
1015  * Request the hypervisor to write an MSR for us.  The hypervisor
1016  * will only accept MCA-related MSRs, as this is for MCA error
1017  * simulation purposes alone.  We will pre-screen MSRs for injection
1018  * so we don't bother the HV with bogus requests.  We will permit
1019  * injection to any MCA bank register, and to MCG_STATUS.
1020  */
1021 
1022 #define	IS_MCA_INJ_MSR(msr) \
1023 	(((msr) >= IA32_MSR_MC(0, CTL) && (msr) <= IA32_MSR_MC(10, MISC)) || \
1024 	(msr) == IA32_MSR_MCG_STATUS)
1025 
1026 static cmi_errno_t
1027 xpv_wrmsr_cmn(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val, boolean_t intpose)
1028 {
1029 	xen_mc_t xmc;
1030 	struct xen_mc_msrinject *mci = &xmc.u.mc_msrinject;
1031 
1032 	if (!(hdl->cmih_flags & CMIH_F_INJACTV))
1033 		return (CMIERR_NOTSUP);		/* for injection use only! */
1034 
1035 	if (!IS_MCA_INJ_MSR(msr))
1036 		return (CMIERR_API);
1037 
1038 	if (panicstr)
1039 		return (CMIERR_DEADLOCK);
1040 
1041 	mci->mcinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl));
1042 	mci->mcinj_flags = intpose ? MC_MSRINJ_F_INTERPOSE : 0;
1043 	mci->mcinj_count = 1;	/* learn to batch sometime */
1044 	mci->mcinj_msr[0].reg = msr;
1045 	mci->mcinj_msr[0].value = val;
1046 
1047 	return (HYPERVISOR_mca(XEN_MC_msrinject, &xmc) ==
1048 	    0 ?  CMI_SUCCESS : CMIERR_NOTSUP);
1049 }
1050 
1051 static cmi_errno_t
1052 xpv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
1053 {
1054 	return (xpv_wrmsr_cmn(hdl, msr, val, B_FALSE));
1055 }
1056 
1057 
1058 static cmi_errno_t
1059 xpv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
1060 {
1061 	return (xpv_wrmsr_cmn(hdl, msr, val, B_TRUE));
1062 }
1063 
1064 static void
1065 xpv_int(cmi_hdl_impl_t *hdl, int int_no)
1066 {
1067 	xen_mc_t xmc;
1068 	struct xen_mc_mceinject *mce = &xmc.u.mc_mceinject;
1069 
1070 	if (!(hdl->cmih_flags & CMIH_F_INJACTV))
1071 		return;
1072 
1073 	if (int_no != T_MCE) {
1074 		cmn_err(CE_WARN, "xpv_int: int_no %d unimplemented\n",
1075 		    int_no);
1076 	}
1077 
1078 	mce->mceinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl));
1079 
1080 	(void) HYPERVISOR_mca(XEN_MC_mceinject, &xmc);
1081 }
1082 
1083 static int
1084 xpv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status)
1085 {
1086 	xen_sysctl_t xs;
1087 	int op, rc, status;
1088 
1089 	new_status &= ~P_FORCED;
1090 
1091 	switch (new_status) {
1092 	case P_STATUS:
1093 		op = XEN_SYSCTL_CPU_HOTPLUG_STATUS;
1094 		break;
1095 	case P_FAULTED:
1096 	case P_OFFLINE:
1097 		op = XEN_SYSCTL_CPU_HOTPLUG_OFFLINE;
1098 		break;
1099 	case P_ONLINE:
1100 		op = XEN_SYSCTL_CPU_HOTPLUG_ONLINE;
1101 		break;
1102 	default:
1103 		return (-1);
1104 	}
1105 
1106 	xs.cmd = XEN_SYSCTL_cpu_hotplug;
1107 	xs.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
1108 	xs.u.cpu_hotplug.cpu = xen_physcpu_logical_id(HDLPRIV(hdl));
1109 	xs.u.cpu_hotplug.op = op;
1110 
1111 	if ((rc = HYPERVISOR_sysctl(&xs)) >= 0) {
1112 		status = rc;
1113 		rc = 0;
1114 		switch (status) {
1115 		case XEN_CPU_HOTPLUG_STATUS_NEW:
1116 			*old_status = P_OFFLINE;
1117 			break;
1118 		case XEN_CPU_HOTPLUG_STATUS_OFFLINE:
1119 			*old_status = P_FAULTED;
1120 			break;
1121 		case XEN_CPU_HOTPLUG_STATUS_ONLINE:
1122 			*old_status = P_ONLINE;
1123 			break;
1124 		default:
1125 			return (-1);
1126 		}
1127 	}
1128 
1129 	return (-rc);
1130 }
1131 
1132 #endif
1133 
1134 /*ARGSUSED*/
1135 static void *
1136 cpu_search(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1137     uint_t strandid)
1138 {
1139 #ifdef __xpv
1140 	xen_mc_lcpu_cookie_t cpi;
1141 
1142 	for (cpi = xen_physcpu_next(NULL); cpi != NULL;
1143 	    cpi = xen_physcpu_next(cpi)) {
1144 		if (xen_physcpu_chipid(cpi) == chipid &&
1145 		    xen_physcpu_coreid(cpi) == coreid &&
1146 		    xen_physcpu_strandid(cpi) == strandid)
1147 			return ((void *)cpi);
1148 	}
1149 	return (NULL);
1150 
1151 #else	/* __xpv */
1152 
1153 	cpu_t *cp, *startcp;
1154 
1155 	kpreempt_disable();
1156 	cp = startcp = CPU;
1157 	do {
1158 		if (cmi_ntv_hwchipid(cp) == chipid &&
1159 		    cmi_ntv_hwcoreid(cp) == coreid &&
1160 		    cmi_ntv_hwstrandid(cp) == strandid) {
1161 			kpreempt_enable();
1162 			return ((void *)cp);
1163 		}
1164 
1165 		cp = cp->cpu_next;
1166 	} while (cp != startcp);
1167 	kpreempt_enable();
1168 	return (NULL);
1169 #endif	/* __ xpv */
1170 }
1171 
1172 static boolean_t
1173 cpu_is_cmt(void *priv)
1174 {
1175 #ifdef __xpv
1176 	return (xen_physcpu_is_cmt((xen_mc_lcpu_cookie_t)priv));
1177 #else /* __xpv */
1178 	cpu_t *cp = (cpu_t *)priv;
1179 
1180 	int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1181 	    cpuid_get_ncore_per_chip(cp);
1182 
1183 	return (strands_per_core > 1);
1184 #endif /* __xpv */
1185 }
1186 
1187 /*
1188  * Find the handle entry of a given cpu identified by a <chip,core,strand>
1189  * tuple.
1190  */
1191 static cmi_hdl_ent_t *
1192 cmi_hdl_ent_lookup(uint_t chipid, uint_t coreid, uint_t strandid)
1193 {
1194 	/*
1195 	 * Allocate per-chip table which contains a list of handle of
1196 	 * all strands of the chip.
1197 	 */
1198 	if (cmi_chip_tab[chipid] == NULL) {
1199 		size_t sz;
1200 		cmi_hdl_ent_t *pg;
1201 
1202 		sz = CMI_MAX_STRANDS_PER_CHIP * sizeof (cmi_hdl_ent_t);
1203 		pg = kmem_zalloc(sz, KM_SLEEP);
1204 
1205 		/* test and set the per-chip table if it is not allocated */
1206 		if (atomic_cas_ptr(&cmi_chip_tab[chipid], NULL, pg) != NULL)
1207 			kmem_free(pg, sz); /* someone beat us */
1208 	}
1209 
1210 	return (cmi_chip_tab[chipid] + CMI_HDL_ARR_IDX(coreid, strandid));
1211 }
1212 
1213 cmi_hdl_t
1214 cmi_hdl_create(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1215     uint_t strandid)
1216 {
1217 	cmi_hdl_impl_t *hdl;
1218 	void *priv;
1219 	cmi_hdl_ent_t *ent;
1220 
1221 #ifdef __xpv
1222 	ASSERT(class == CMI_HDL_SOLARIS_xVM_MCA);
1223 #else
1224 	ASSERT(class == CMI_HDL_NATIVE);
1225 #endif
1226 
1227 	if (chipid > CMI_MAX_CHIPID ||
1228 	    coreid > CMI_MAX_CORES_PER_CHIP - 1 ||
1229 	    strandid > CMI_MAX_STRANDS_PER_CORE - 1)
1230 		return (NULL);
1231 
1232 	if ((priv = cpu_search(class, chipid, coreid, strandid)) == NULL)
1233 		return (NULL);
1234 
1235 	hdl = kmem_zalloc(sizeof (*hdl), KM_SLEEP);
1236 
1237 	hdl->cmih_class = class;
1238 	HDLOPS(hdl) = &cmi_hdl_ops;
1239 	hdl->cmih_chipid = chipid;
1240 	hdl->cmih_coreid = coreid;
1241 	hdl->cmih_strandid = strandid;
1242 	hdl->cmih_mstrand = cpu_is_cmt(priv);
1243 	hdl->cmih_hdlpriv = priv;
1244 #ifdef __xpv
1245 	hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_INTERPOSEOK |
1246 	    CMI_MSR_FLAG_WR_INTERPOSEOK;
1247 
1248 	/*
1249 	 * XXX: need hypervisor support for procnodeid, for now assume
1250 	 * single-node processors (procnodeid = chipid)
1251 	 */
1252 	hdl->cmih_procnodeid = xen_physcpu_chipid((xen_mc_lcpu_cookie_t)priv);
1253 	hdl->cmih_procnodes_per_pkg = 1;
1254 #else   /* __xpv */
1255 	hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_HWOK | CMI_MSR_FLAG_RD_INTERPOSEOK |
1256 	    CMI_MSR_FLAG_WR_HWOK | CMI_MSR_FLAG_WR_INTERPOSEOK;
1257 	hdl->cmih_procnodeid = cpuid_get_procnodeid((cpu_t *)priv);
1258 	hdl->cmih_procnodes_per_pkg =
1259 	    cpuid_get_procnodes_per_pkg((cpu_t *)priv);
1260 #endif  /* __xpv */
1261 
1262 	ent = cmi_hdl_ent_lookup(chipid, coreid, strandid);
1263 	if (ent->cmae_refcnt != 0 || ent->cmae_hdlp != NULL) {
1264 		/*
1265 		 * Somehow this (chipid, coreid, strandid) id tuple has
1266 		 * already been assigned!  This indicates that the
1267 		 * callers logic in determining these values is busted,
1268 		 * or perhaps undermined by bad BIOS setup.  Complain,
1269 		 * and refuse to initialize this tuple again as bad things
1270 		 * will happen.
1271 		 */
1272 		cmn_err(CE_NOTE, "cmi_hdl_create: chipid %d coreid %d "
1273 		    "strandid %d handle already allocated!",
1274 		    chipid, coreid, strandid);
1275 		kmem_free(hdl, sizeof (*hdl));
1276 		return (NULL);
1277 	}
1278 
1279 	/*
1280 	 * Once we store a nonzero reference count others can find this
1281 	 * handle via cmi_hdl_lookup etc.  This initial hold on the handle
1282 	 * is to be dropped only if some other part of cmi initialization
1283 	 * fails or, if it succeeds, at later cpu deconfigure.  Note the
1284 	 * the module private data we hold in cmih_cmi and cmih_cmidata
1285 	 * is still NULL at this point (the caller will fill it with
1286 	 * cmi_hdl_setcmi if it initializes) so consumers of handles
1287 	 * should always be ready for that possibility.
1288 	 */
1289 	ent->cmae_hdlp = hdl;
1290 	hdl->cmih_refcntp = &ent->cmae_refcnt;
1291 	ent->cmae_refcnt = 1;
1292 
1293 	return ((cmi_hdl_t)hdl);
1294 }
1295 
1296 void
1297 cmi_read_smbios(cmi_hdl_t ophdl)
1298 {
1299 
1300 	uint_t strand_apicid = UINT_MAX;
1301 	uint_t chip_inst = UINT_MAX;
1302 	uint16_t smb_id = USHRT_MAX;
1303 	int rc = 0;
1304 
1305 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1306 
1307 	/* set x86gentopo compatibility */
1308 	fm_smb_fmacompat();
1309 
1310 #ifndef __xpv
1311 	strand_apicid = ntv_strand_apicid(hdl);
1312 #else
1313 	strand_apicid = xpv_strand_apicid(hdl);
1314 #endif
1315 
1316 	if (!x86gentopo_legacy) {
1317 		/*
1318 		 * If fm_smb_chipinst() or fm_smb_bboard() fails,
1319 		 * topo reverts to legacy mode
1320 		 */
1321 		rc = fm_smb_chipinst(strand_apicid, &chip_inst, &smb_id);
1322 		if (rc == 0) {
1323 			hdl->cmih_smb_chipid = chip_inst;
1324 			hdl->cmih_smbiosid = smb_id;
1325 		} else {
1326 #ifdef DEBUG
1327 			cmn_err(CE_NOTE, "!cmi reads smbios chip info failed");
1328 #endif /* DEBUG */
1329 			return;
1330 		}
1331 
1332 		hdl->cmih_smb_bboard  = fm_smb_bboard(strand_apicid);
1333 #ifdef DEBUG
1334 		if (hdl->cmih_smb_bboard == NULL)
1335 			cmn_err(CE_NOTE,
1336 			    "!cmi reads smbios base boards info failed");
1337 #endif /* DEBUG */
1338 	}
1339 }
1340 
1341 void
1342 cmi_hdl_hold(cmi_hdl_t ophdl)
1343 {
1344 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1345 
1346 	ASSERT(*hdl->cmih_refcntp != 0); /* must not be the initial hold */
1347 
1348 	atomic_inc_32(hdl->cmih_refcntp);
1349 }
1350 
1351 static int
1352 cmi_hdl_canref(cmi_hdl_ent_t *ent)
1353 {
1354 	volatile uint32_t *refcntp;
1355 	uint32_t refcnt;
1356 
1357 	refcntp = &ent->cmae_refcnt;
1358 	refcnt = *refcntp;
1359 
1360 	if (refcnt == 0) {
1361 		/*
1362 		 * Associated object never existed, is being destroyed,
1363 		 * or has been destroyed.
1364 		 */
1365 		return (0);
1366 	}
1367 
1368 	/*
1369 	 * We cannot use atomic increment here because once the reference
1370 	 * count reaches zero it must never be bumped up again.
1371 	 */
1372 	while (refcnt != 0) {
1373 		if (atomic_cas_32(refcntp, refcnt, refcnt + 1) == refcnt)
1374 			return (1);
1375 		refcnt = *refcntp;
1376 	}
1377 
1378 	/*
1379 	 * Somebody dropped the reference count to 0 after our initial
1380 	 * check.
1381 	 */
1382 	return (0);
1383 }
1384 
1385 
1386 void
1387 cmi_hdl_rele(cmi_hdl_t ophdl)
1388 {
1389 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1390 	cmi_hdl_ent_t *ent;
1391 
1392 	ASSERT(*hdl->cmih_refcntp > 0);
1393 
1394 	if (atomic_dec_32_nv(hdl->cmih_refcntp) > 0)
1395 		return;
1396 
1397 	ent = cmi_hdl_ent_lookup(hdl->cmih_chipid, hdl->cmih_coreid,
1398 	    hdl->cmih_strandid);
1399 	ent->cmae_hdlp = NULL;
1400 
1401 	kmem_free(hdl, sizeof (*hdl));
1402 }
1403 
1404 void
1405 cmi_hdl_setspecific(cmi_hdl_t ophdl, void *arg)
1406 {
1407 	IMPLHDL(ophdl)->cmih_spec = arg;
1408 }
1409 
1410 void *
1411 cmi_hdl_getspecific(cmi_hdl_t ophdl)
1412 {
1413 	return (IMPLHDL(ophdl)->cmih_spec);
1414 }
1415 
1416 void
1417 cmi_hdl_setmc(cmi_hdl_t ophdl, const struct cmi_mc_ops *mcops, void *mcdata)
1418 {
1419 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1420 
1421 	ASSERT(hdl->cmih_mcops == NULL && hdl->cmih_mcdata == NULL);
1422 	hdl->cmih_mcops = mcops;
1423 	hdl->cmih_mcdata = mcdata;
1424 }
1425 
1426 const struct cmi_mc_ops *
1427 cmi_hdl_getmcops(cmi_hdl_t ophdl)
1428 {
1429 	return (IMPLHDL(ophdl)->cmih_mcops);
1430 }
1431 
1432 void *
1433 cmi_hdl_getmcdata(cmi_hdl_t ophdl)
1434 {
1435 	return (IMPLHDL(ophdl)->cmih_mcdata);
1436 }
1437 
1438 cmi_hdl_t
1439 cmi_hdl_lookup(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1440     uint_t strandid)
1441 {
1442 	cmi_hdl_ent_t *ent;
1443 
1444 	if (chipid > CMI_MAX_CHIPID ||
1445 	    coreid > CMI_MAX_CORES_PER_CHIP - 1 ||
1446 	    strandid > CMI_MAX_STRANDS_PER_CORE - 1)
1447 		return (NULL);
1448 
1449 	ent = cmi_hdl_ent_lookup(chipid, coreid, strandid);
1450 
1451 	if (class == CMI_HDL_NEUTRAL)
1452 #ifdef __xpv
1453 		class = CMI_HDL_SOLARIS_xVM_MCA;
1454 #else
1455 		class = CMI_HDL_NATIVE;
1456 #endif
1457 
1458 	if (!cmi_hdl_canref(ent))
1459 		return (NULL);
1460 
1461 	if (ent->cmae_hdlp->cmih_class != class) {
1462 		cmi_hdl_rele((cmi_hdl_t)ent->cmae_hdlp);
1463 		return (NULL);
1464 	}
1465 
1466 	return ((cmi_hdl_t)ent->cmae_hdlp);
1467 }
1468 
1469 cmi_hdl_t
1470 cmi_hdl_any(void)
1471 {
1472 	int i, j;
1473 	cmi_hdl_ent_t *ent;
1474 
1475 	for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) {
1476 		if (cmi_chip_tab[i] == NULL)
1477 			continue;
1478 		for (j = 0, ent = cmi_chip_tab[i]; j < CMI_MAX_STRANDS_PER_CHIP;
1479 		    j++, ent++) {
1480 			if (cmi_hdl_canref(ent))
1481 				return ((cmi_hdl_t)ent->cmae_hdlp);
1482 		}
1483 	}
1484 
1485 	return (NULL);
1486 }
1487 
1488 void
1489 cmi_hdl_walk(int (*cbfunc)(cmi_hdl_t, void *, void *, void *),
1490     void *arg1, void *arg2, void *arg3)
1491 {
1492 	int i, j;
1493 	cmi_hdl_ent_t *ent;
1494 
1495 	for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) {
1496 		if (cmi_chip_tab[i] == NULL)
1497 			continue;
1498 		for (j = 0, ent = cmi_chip_tab[i]; j < CMI_MAX_STRANDS_PER_CHIP;
1499 		    j++, ent++) {
1500 			if (cmi_hdl_canref(ent)) {
1501 				cmi_hdl_impl_t *hdl = ent->cmae_hdlp;
1502 				if ((*cbfunc)((cmi_hdl_t)hdl, arg1, arg2, arg3)
1503 				    == CMI_HDL_WALK_DONE) {
1504 					cmi_hdl_rele((cmi_hdl_t)hdl);
1505 					return;
1506 				}
1507 				cmi_hdl_rele((cmi_hdl_t)hdl);
1508 			}
1509 		}
1510 	}
1511 }
1512 
1513 void
1514 cmi_hdl_setcmi(cmi_hdl_t ophdl, void *cmi, void *cmidata)
1515 {
1516 	IMPLHDL(ophdl)->cmih_cmidata = cmidata;
1517 	IMPLHDL(ophdl)->cmih_cmi = cmi;
1518 }
1519 
1520 void *
1521 cmi_hdl_getcmi(cmi_hdl_t ophdl)
1522 {
1523 	return (IMPLHDL(ophdl)->cmih_cmi);
1524 }
1525 
1526 void *
1527 cmi_hdl_getcmidata(cmi_hdl_t ophdl)
1528 {
1529 	return (IMPLHDL(ophdl)->cmih_cmidata);
1530 }
1531 
1532 enum cmi_hdl_class
1533 cmi_hdl_class(cmi_hdl_t ophdl)
1534 {
1535 	return (IMPLHDL(ophdl)->cmih_class);
1536 }
1537 
1538 #define	CMI_HDL_OPFUNC(what, type)				\
1539 	type							\
1540 	cmi_hdl_##what(cmi_hdl_t ophdl)				\
1541 	{							\
1542 		return (HDLOPS(IMPLHDL(ophdl))->		\
1543 		    cmio_##what(IMPLHDL(ophdl)));		\
1544 	}
1545 
1546 CMI_HDL_OPFUNC(vendor, uint_t)
1547 CMI_HDL_OPFUNC(vendorstr, const char *)
1548 CMI_HDL_OPFUNC(family, uint_t)
1549 CMI_HDL_OPFUNC(model, uint_t)
1550 CMI_HDL_OPFUNC(stepping, uint_t)
1551 CMI_HDL_OPFUNC(chipid, uint_t)
1552 CMI_HDL_OPFUNC(procnodeid, uint_t)
1553 CMI_HDL_OPFUNC(coreid, uint_t)
1554 CMI_HDL_OPFUNC(strandid, uint_t)
1555 CMI_HDL_OPFUNC(procnodes_per_pkg, uint_t)
1556 CMI_HDL_OPFUNC(strand_apicid, uint_t)
1557 CMI_HDL_OPFUNC(chiprev, uint32_t)
1558 CMI_HDL_OPFUNC(chiprevstr, const char *)
1559 CMI_HDL_OPFUNC(getsockettype, uint32_t)
1560 CMI_HDL_OPFUNC(getsocketstr, const char *)
1561 CMI_HDL_OPFUNC(logical_id, id_t)
1562 CMI_HDL_OPFUNC(smbiosid, uint16_t)
1563 CMI_HDL_OPFUNC(smb_chipid, uint_t)
1564 CMI_HDL_OPFUNC(smb_bboard, nvlist_t *)
1565 
1566 boolean_t
1567 cmi_hdl_is_cmt(cmi_hdl_t ophdl)
1568 {
1569 	return (IMPLHDL(ophdl)->cmih_mstrand);
1570 }
1571 
1572 void
1573 cmi_hdl_int(cmi_hdl_t ophdl, int num)
1574 {
1575 	if (HDLOPS(IMPLHDL(ophdl))->cmio_int == NULL)
1576 		return;
1577 
1578 	cmi_hdl_inj_begin(ophdl);
1579 	HDLOPS(IMPLHDL(ophdl))->cmio_int(IMPLHDL(ophdl), num);
1580 	cmi_hdl_inj_end(NULL);
1581 }
1582 
1583 int
1584 cmi_hdl_online(cmi_hdl_t ophdl, int new_status, int *old_status)
1585 {
1586 	return (HDLOPS(IMPLHDL(ophdl))->cmio_online(IMPLHDL(ophdl),
1587 	    new_status, old_status));
1588 }
1589 
1590 #ifndef	__xpv
1591 /*
1592  * Return hardware chip instance; cpuid_get_chipid provides this directly.
1593  */
1594 uint_t
1595 cmi_ntv_hwchipid(cpu_t *cp)
1596 {
1597 	return (cpuid_get_chipid(cp));
1598 }
1599 
1600 /*
1601  * Return hardware node instance; cpuid_get_procnodeid provides this directly.
1602  */
1603 uint_t
1604 cmi_ntv_hwprocnodeid(cpu_t *cp)
1605 {
1606 	return (cpuid_get_procnodeid(cp));
1607 }
1608 
1609 /*
1610  * Return core instance within a single chip.
1611  */
1612 uint_t
1613 cmi_ntv_hwcoreid(cpu_t *cp)
1614 {
1615 	return (cpuid_get_pkgcoreid(cp));
1616 }
1617 
1618 /*
1619  * Return strand number within a single core.  cpuid_get_clogid numbers
1620  * all execution units (strands, or cores in unstranded models) sequentially
1621  * within a single chip.
1622  */
1623 uint_t
1624 cmi_ntv_hwstrandid(cpu_t *cp)
1625 {
1626 	int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1627 	    cpuid_get_ncore_per_chip(cp);
1628 
1629 	return (cpuid_get_clogid(cp) % strands_per_core);
1630 }
1631 #endif	/* __xpv */
1632 
1633 void
1634 cmi_hdlconf_rdmsr_nohw(cmi_hdl_t ophdl)
1635 {
1636 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1637 
1638 	hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_RD_HWOK;
1639 }
1640 
1641 void
1642 cmi_hdlconf_wrmsr_nohw(cmi_hdl_t ophdl)
1643 {
1644 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1645 
1646 	hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_WR_HWOK;
1647 }
1648 
1649 cmi_errno_t
1650 cmi_hdl_rdmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t *valp)
1651 {
1652 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1653 
1654 	/*
1655 	 * Regardless of the handle class, we first check for am
1656 	 * interposed value.  In the xVM case you probably want to
1657 	 * place interposed values within the hypervisor itself, but
1658 	 * we still allow interposing them in dom0 for test and bringup
1659 	 * purposes.
1660 	 */
1661 	if ((hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_INTERPOSEOK) &&
1662 	    msri_lookup(hdl, msr, valp))
1663 		return (CMI_SUCCESS);
1664 
1665 	if (HDLOPS(hdl)->cmio_rdmsr == NULL)
1666 		return (CMIERR_NOTSUP);
1667 
1668 	return (HDLOPS(hdl)->cmio_rdmsr(hdl, msr, valp));
1669 }
1670 
1671 cmi_errno_t
1672 cmi_hdl_wrmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t val)
1673 {
1674 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1675 
1676 	/* Invalidate any interposed value */
1677 	msri_rment(hdl, msr);
1678 
1679 	if (HDLOPS(hdl)->cmio_wrmsr == NULL)
1680 		return (CMI_SUCCESS);	/* pretend all is ok */
1681 
1682 	return (HDLOPS(hdl)->cmio_wrmsr(hdl, msr, val));
1683 }
1684 
1685 void
1686 cmi_hdl_enable_mce(cmi_hdl_t ophdl)
1687 {
1688 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1689 	ulong_t cr4;
1690 
1691 	if (HDLOPS(hdl)->cmio_getcr4 == NULL ||
1692 	    HDLOPS(hdl)->cmio_setcr4 == NULL)
1693 		return;
1694 
1695 	cr4 = HDLOPS(hdl)->cmio_getcr4(hdl);
1696 
1697 	HDLOPS(hdl)->cmio_setcr4(hdl, cr4 | CR4_MCE);
1698 }
1699 
1700 void
1701 cmi_hdl_msrinterpose(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1702 {
1703 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1704 	int i;
1705 
1706 	if (HDLOPS(hdl)->cmio_msrinterpose == NULL)
1707 		return;
1708 
1709 	cmi_hdl_inj_begin(ophdl);
1710 
1711 	for (i = 0; i < nregs; i++, regs++)
1712 		HDLOPS(hdl)->cmio_msrinterpose(hdl, regs->cmr_msrnum,
1713 		    regs->cmr_msrval);
1714 
1715 	cmi_hdl_inj_end(ophdl);
1716 }
1717 
1718 /*ARGSUSED*/
1719 void
1720 cmi_hdl_msrforward(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1721 {
1722 #ifdef __xpv
1723 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1724 	int i;
1725 
1726 	for (i = 0; i < nregs; i++, regs++)
1727 		msri_addent(hdl, regs->cmr_msrnum, regs->cmr_msrval);
1728 #endif
1729 }
1730 
1731 
1732 void
1733 cmi_pcird_nohw(void)
1734 {
1735 	cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_RD_HWOK;
1736 }
1737 
1738 void
1739 cmi_pciwr_nohw(void)
1740 {
1741 	cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_WR_HWOK;
1742 }
1743 
1744 static uint32_t
1745 cmi_pci_get_cmn(int bus, int dev, int func, int reg, int asz,
1746     int *interpose, ddi_acc_handle_t hdl)
1747 {
1748 	uint32_t val;
1749 
1750 	if (cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_INTERPOSEOK &&
1751 	    pcii_lookup(bus, dev, func, reg, asz, &val)) {
1752 		if (interpose)
1753 			*interpose = 1;
1754 		return (val);
1755 	}
1756 	if (interpose)
1757 		*interpose = 0;
1758 
1759 	if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_HWOK))
1760 		return (0);
1761 
1762 	switch (asz) {
1763 	case 1:
1764 		if (hdl)
1765 			val = pci_config_get8(hdl, (off_t)reg);
1766 		else
1767 			val = pci_cfgacc_get8(NULL, PCI_GETBDF(bus, dev, func),
1768 			    reg);
1769 		break;
1770 	case 2:
1771 		if (hdl)
1772 			val = pci_config_get16(hdl, (off_t)reg);
1773 		else
1774 			val = pci_cfgacc_get16(NULL, PCI_GETBDF(bus, dev, func),
1775 			    reg);
1776 		break;
1777 	case 4:
1778 		if (hdl)
1779 			val = pci_config_get32(hdl, (off_t)reg);
1780 		else
1781 			val = pci_cfgacc_get32(NULL, PCI_GETBDF(bus, dev, func),
1782 			    reg);
1783 		break;
1784 	default:
1785 		val = 0;
1786 	}
1787 	return (val);
1788 }
1789 
1790 uint8_t
1791 cmi_pci_getb(int bus, int dev, int func, int reg, int *interpose,
1792     ddi_acc_handle_t hdl)
1793 {
1794 	return ((uint8_t)cmi_pci_get_cmn(bus, dev, func, reg, 1, interpose,
1795 	    hdl));
1796 }
1797 
1798 uint16_t
1799 cmi_pci_getw(int bus, int dev, int func, int reg, int *interpose,
1800     ddi_acc_handle_t hdl)
1801 {
1802 	return ((uint16_t)cmi_pci_get_cmn(bus, dev, func, reg, 2, interpose,
1803 	    hdl));
1804 }
1805 
1806 uint32_t
1807 cmi_pci_getl(int bus, int dev, int func, int reg, int *interpose,
1808     ddi_acc_handle_t hdl)
1809 {
1810 	return (cmi_pci_get_cmn(bus, dev, func, reg, 4, interpose, hdl));
1811 }
1812 
1813 void
1814 cmi_pci_interposeb(int bus, int dev, int func, int reg, uint8_t val)
1815 {
1816 	pcii_addent(bus, dev, func, reg, val, 1);
1817 }
1818 
1819 void
1820 cmi_pci_interposew(int bus, int dev, int func, int reg, uint16_t val)
1821 {
1822 	pcii_addent(bus, dev, func, reg, val, 2);
1823 }
1824 
1825 void
1826 cmi_pci_interposel(int bus, int dev, int func, int reg, uint32_t val)
1827 {
1828 	pcii_addent(bus, dev, func, reg, val, 4);
1829 }
1830 
1831 static void
1832 cmi_pci_put_cmn(int bus, int dev, int func, int reg, int asz,
1833     ddi_acc_handle_t hdl, uint32_t val)
1834 {
1835 	/*
1836 	 * If there is an interposed value for this register invalidate it.
1837 	 */
1838 	pcii_rment(bus, dev, func, reg, asz);
1839 
1840 	if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_WR_HWOK))
1841 		return;
1842 
1843 	switch (asz) {
1844 	case 1:
1845 		if (hdl)
1846 			pci_config_put8(hdl, (off_t)reg, (uint8_t)val);
1847 		else
1848 			pci_cfgacc_put8(NULL, PCI_GETBDF(bus, dev, func), reg,
1849 			    (uint8_t)val);
1850 		break;
1851 
1852 	case 2:
1853 		if (hdl)
1854 			pci_config_put16(hdl, (off_t)reg, (uint16_t)val);
1855 		else
1856 			pci_cfgacc_put16(NULL, PCI_GETBDF(bus, dev, func), reg,
1857 			    (uint16_t)val);
1858 		break;
1859 
1860 	case 4:
1861 		if (hdl)
1862 			pci_config_put32(hdl, (off_t)reg, val);
1863 		else
1864 			pci_cfgacc_put32(NULL, PCI_GETBDF(bus, dev, func), reg,
1865 			    val);
1866 		break;
1867 
1868 	default:
1869 		break;
1870 	}
1871 }
1872 
1873 void
1874 cmi_pci_putb(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1875     uint8_t val)
1876 {
1877 	cmi_pci_put_cmn(bus, dev, func, reg, 1, hdl, val);
1878 }
1879 
1880 void
1881 cmi_pci_putw(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1882     uint16_t val)
1883 {
1884 	cmi_pci_put_cmn(bus, dev, func, reg, 2, hdl, val);
1885 }
1886 
1887 void
1888 cmi_pci_putl(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1889     uint32_t val)
1890 {
1891 	cmi_pci_put_cmn(bus, dev, func, reg, 4, hdl, val);
1892 }
1893 
1894 static const struct cmi_hdl_ops cmi_hdl_ops = {
1895 #ifdef __xpv
1896 	/*
1897 	 * CMI_HDL_SOLARIS_xVM_MCA - ops when we are an xVM dom0
1898 	 */
1899 	xpv_vendor,		/* cmio_vendor */
1900 	xpv_vendorstr,		/* cmio_vendorstr */
1901 	xpv_family,		/* cmio_family */
1902 	xpv_model,		/* cmio_model */
1903 	xpv_stepping,		/* cmio_stepping */
1904 	xpv_chipid,		/* cmio_chipid */
1905 	xpv_procnodeid,		/* cmio_procnodeid */
1906 	xpv_coreid,		/* cmio_coreid */
1907 	xpv_strandid,		/* cmio_strandid */
1908 	xpv_procnodes_per_pkg,	/* cmio_procnodes_per_pkg */
1909 	xpv_strand_apicid,	/* cmio_strand_apicid */
1910 	xpv_chiprev,		/* cmio_chiprev */
1911 	xpv_chiprevstr,		/* cmio_chiprevstr */
1912 	xpv_getsockettype,	/* cmio_getsockettype */
1913 	xpv_getsocketstr,	/* cmio_getsocketstr */
1914 	xpv_logical_id,		/* cmio_logical_id */
1915 	NULL,			/* cmio_getcr4 */
1916 	NULL,			/* cmio_setcr4 */
1917 	xpv_rdmsr,		/* cmio_rdmsr */
1918 	xpv_wrmsr,		/* cmio_wrmsr */
1919 	xpv_msrinterpose,	/* cmio_msrinterpose */
1920 	xpv_int,		/* cmio_int */
1921 	xpv_online,		/* cmio_online */
1922 	xpv_smbiosid,		/* cmio_smbiosid */
1923 	xpv_smb_chipid,		/* cmio_smb_chipid */
1924 	xpv_smb_bboard		/* cmio_smb_bboard */
1925 
1926 #else	/* __xpv */
1927 
1928 	/*
1929 	 * CMI_HDL_NATIVE - ops when apparently running on bare-metal
1930 	 */
1931 	ntv_vendor,		/* cmio_vendor */
1932 	ntv_vendorstr,		/* cmio_vendorstr */
1933 	ntv_family,		/* cmio_family */
1934 	ntv_model,		/* cmio_model */
1935 	ntv_stepping,		/* cmio_stepping */
1936 	ntv_chipid,		/* cmio_chipid */
1937 	ntv_procnodeid,		/* cmio_procnodeid */
1938 	ntv_coreid,		/* cmio_coreid */
1939 	ntv_strandid,		/* cmio_strandid */
1940 	ntv_procnodes_per_pkg,	/* cmio_procnodes_per_pkg */
1941 	ntv_strand_apicid,	/* cmio_strand_apicid */
1942 	ntv_chiprev,		/* cmio_chiprev */
1943 	ntv_chiprevstr,		/* cmio_chiprevstr */
1944 	ntv_getsockettype,	/* cmio_getsockettype */
1945 	ntv_getsocketstr,	/* cmio_getsocketstr */
1946 	ntv_logical_id,		/* cmio_logical_id */
1947 	ntv_getcr4,		/* cmio_getcr4 */
1948 	ntv_setcr4,		/* cmio_setcr4 */
1949 	ntv_rdmsr,		/* cmio_rdmsr */
1950 	ntv_wrmsr,		/* cmio_wrmsr */
1951 	ntv_msrinterpose,	/* cmio_msrinterpose */
1952 	ntv_int,		/* cmio_int */
1953 	ntv_online,		/* cmio_online */
1954 	ntv_smbiosid,		/* cmio_smbiosid */
1955 	ntv_smb_chipid,		/* cmio_smb_chipid */
1956 	ntv_smb_bboard		/* cmio_smb_bboard */
1957 #endif
1958 };
1959