xref: /titanic_50/usr/src/uts/i86pc/os/cmi_hw.c (revision e5816e352629470f540696fb7aa56c52d6719e67)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * CPU Module Interface - hardware abstraction.
29  */
30 
31 #ifdef __xpv
32 #include <sys/xpv_user.h>
33 #endif
34 
35 #include <sys/types.h>
36 #include <sys/cpu_module.h>
37 #include <sys/kmem.h>
38 #include <sys/x86_archext.h>
39 #include <sys/cpuvar.h>
40 #include <sys/ksynch.h>
41 #include <sys/x_call.h>
42 #include <sys/pghw.h>
43 #include <sys/pci_cfgspace.h>
44 #include <sys/archsystm.h>
45 #include <sys/ontrap.h>
46 #include <sys/controlregs.h>
47 #include <sys/sunddi.h>
48 #include <sys/trap.h>
49 #include <sys/mca_x86.h>
50 #include <sys/processor.h>
51 #include <sys/cmn_err.h>
52 #include <sys/nvpair.h>
53 #include <sys/fm/util.h>
54 #include <sys/fm/protocol.h>
55 #include <sys/fm/smb/fmsmb.h>
56 #include <sys/cpu_module_impl.h>
57 
58 /*
59  * Variable which determines if the SMBIOS supports x86 generic topology; or
60  * if legacy topolgy enumeration will occur.
61  */
62 extern int x86gentopo_legacy;
63 
64 /*
65  * Outside of this file consumers use the opaque cmi_hdl_t.  This
66  * definition is duplicated in the generic_cpu mdb module, so keep
67  * them in-sync when making changes.
68  */
69 typedef struct cmi_hdl_impl {
70 	enum cmi_hdl_class cmih_class;		/* Handle nature */
71 	const struct cmi_hdl_ops *cmih_ops;	/* Operations vector */
72 	uint_t cmih_chipid;			/* Chipid of cpu resource */
73 	uint_t cmih_coreid;			/* Core within die */
74 	uint_t cmih_strandid;			/* Thread within core */
75 	boolean_t cmih_mstrand;			/* cores are multithreaded */
76 	volatile uint32_t *cmih_refcntp;	/* Reference count pointer */
77 	uint64_t cmih_msrsrc;			/* MSR data source flags */
78 	void *cmih_hdlpriv;			/* cmi_hw.c private data */
79 	void *cmih_spec;			/* cmi_hdl_{set,get}_specific */
80 	void *cmih_cmi;				/* cpu mod control structure */
81 	void *cmih_cmidata;			/* cpu mod private data */
82 	const struct cmi_mc_ops *cmih_mcops;	/* Memory-controller ops */
83 	void *cmih_mcdata;			/* Memory-controller data */
84 	uint64_t cmih_flags;			/* See CMIH_F_* below */
85 	uint16_t cmih_smbiosid;			/* SMBIOS Type 4 struct ID */
86 	uint_t cmih_smb_chipid;			/* SMBIOS factored chipid */
87 	nvlist_t *cmih_smb_bboard;		/* SMBIOS bboard nvlist */
88 } cmi_hdl_impl_t;
89 
90 #define	IMPLHDL(ophdl)	((cmi_hdl_impl_t *)ophdl)
91 #define	HDLOPS(hdl)	((hdl)->cmih_ops)
92 
93 #define	CMIH_F_INJACTV		0x1ULL
94 
95 /*
96  * Ops structure for handle operations.
97  */
98 struct cmi_hdl_ops {
99 	/*
100 	 * These ops are required in an implementation.
101 	 */
102 	uint_t (*cmio_vendor)(cmi_hdl_impl_t *);
103 	const char *(*cmio_vendorstr)(cmi_hdl_impl_t *);
104 	uint_t (*cmio_family)(cmi_hdl_impl_t *);
105 	uint_t (*cmio_model)(cmi_hdl_impl_t *);
106 	uint_t (*cmio_stepping)(cmi_hdl_impl_t *);
107 	uint_t (*cmio_chipid)(cmi_hdl_impl_t *);
108 	uint_t (*cmio_coreid)(cmi_hdl_impl_t *);
109 	uint_t (*cmio_strandid)(cmi_hdl_impl_t *);
110 	uint_t (*cmio_strand_apicid)(cmi_hdl_impl_t *);
111 	uint32_t (*cmio_chiprev)(cmi_hdl_impl_t *);
112 	const char *(*cmio_chiprevstr)(cmi_hdl_impl_t *);
113 	uint32_t (*cmio_getsockettype)(cmi_hdl_impl_t *);
114 	const char *(*cmio_getsocketstr)(cmi_hdl_impl_t *);
115 
116 	id_t (*cmio_logical_id)(cmi_hdl_impl_t *);
117 	/*
118 	 * These ops are optional in an implementation.
119 	 */
120 	ulong_t (*cmio_getcr4)(cmi_hdl_impl_t *);
121 	void (*cmio_setcr4)(cmi_hdl_impl_t *, ulong_t);
122 	cmi_errno_t (*cmio_rdmsr)(cmi_hdl_impl_t *, uint_t, uint64_t *);
123 	cmi_errno_t (*cmio_wrmsr)(cmi_hdl_impl_t *, uint_t, uint64_t);
124 	cmi_errno_t (*cmio_msrinterpose)(cmi_hdl_impl_t *, uint_t, uint64_t);
125 	void (*cmio_int)(cmi_hdl_impl_t *, int);
126 	int (*cmio_online)(cmi_hdl_impl_t *, int, int *);
127 	uint16_t (*cmio_smbiosid) (cmi_hdl_impl_t *);
128 	uint_t (*cmio_smb_chipid)(cmi_hdl_impl_t *);
129 	nvlist_t *(*cmio_smb_bboard)(cmi_hdl_impl_t *);
130 };
131 
132 static const struct cmi_hdl_ops cmi_hdl_ops;
133 
134 /*
135  * Handles are looked up from contexts such as polling, injection etc
136  * where the context is reasonably well defined (although a poller could
137  * interrupt any old thread holding any old lock).  They are also looked
138  * up by machine check handlers, which may strike at inconvenient times
139  * such as during handle initialization or destruction or during handle
140  * lookup (which the #MC handler itself will also have to perform).
141  *
142  * So keeping handles in a linked list makes locking difficult when we
143  * consider #MC handlers.  Our solution is to have a look-up table indexed
144  * by that which uniquely identifies a handle - chip/core/strand id -
145  * with each entry a structure including a pointer to a handle
146  * structure for the resource, and a reference count for the handle.
147  * Reference counts are modified atomically.  The public cmi_hdl_hold
148  * always succeeds because this can only be used after handle creation
149  * and before the call to destruct, so the hold count is already at least one.
150  * In other functions that lookup a handle (cmi_hdl_lookup, cmi_hdl_any)
151  * we must be certain that the count has not already decrmented to zero
152  * before applying our hold.
153  *
154  * The table is an array of maximum number of chips defined in
155  * CMI_CHIPID_ARR_SZ indexed by the chip id. If the chip is not present, the
156  * entry is NULL. Each entry is a pointer to another array which contains a
157  * list of all strands of the chip. This first level table is allocated when
158  * first we want to populate an entry. The size of the latter (per chip) table
159  * is CMI_MAX_STRANDS_PER_CHIP and it is populated when one of its cpus starts.
160  *
161  * Ideally we should only allocate to the actual number of chips, cores per
162  * chip and strand per core. The number of chips is not available until all
163  * of them are passed. The number of cores and strands are partially available.
164  * For now we stick with the above approach.
165  */
166 #define	CMI_MAX_CHIPID_NBITS		6	/* max chipid of 63 */
167 #define	CMI_MAX_CORES_PER_CHIP_NBITS	4	/* 16 cores per chip max */
168 #define	CMI_MAX_STRANDS_PER_CORE_NBITS	3	/* 8 strands per core max */
169 
170 #define	CMI_MAX_CHIPID			((1 << (CMI_MAX_CHIPID_NBITS)) - 1)
171 #define	CMI_MAX_CORES_PER_CHIP		(1 << CMI_MAX_CORES_PER_CHIP_NBITS)
172 #define	CMI_MAX_STRANDS_PER_CORE	(1 << CMI_MAX_STRANDS_PER_CORE_NBITS)
173 #define	CMI_MAX_STRANDS_PER_CHIP	(CMI_MAX_CORES_PER_CHIP * \
174 					    CMI_MAX_STRANDS_PER_CORE)
175 
176 /*
177  * Handle array indexing within a per-chip table
178  *	[6:3] = Core in package,
179  *	[2:0] = Strand in core,
180  */
181 #define	CMI_HDL_ARR_IDX_CORE(coreid) \
182 	(((coreid) & (CMI_MAX_CORES_PER_CHIP - 1)) << \
183 	CMI_MAX_STRANDS_PER_CORE_NBITS)
184 
185 #define	CMI_HDL_ARR_IDX_STRAND(strandid) \
186 	(((strandid) & (CMI_MAX_STRANDS_PER_CORE - 1)))
187 
188 #define	CMI_HDL_ARR_IDX(coreid, strandid) \
189 	(CMI_HDL_ARR_IDX_CORE(coreid) | CMI_HDL_ARR_IDX_STRAND(strandid))
190 
191 #define	CMI_CHIPID_ARR_SZ		(1 << CMI_MAX_CHIPID_NBITS)
192 
193 typedef struct cmi_hdl_ent {
194 	volatile uint32_t cmae_refcnt;
195 	cmi_hdl_impl_t *cmae_hdlp;
196 } cmi_hdl_ent_t;
197 
198 static cmi_hdl_ent_t *cmi_chip_tab[CMI_CHIPID_ARR_SZ];
199 
200 /*
201  * Controls where we will source PCI config space data.
202  */
203 #define	CMI_PCICFG_FLAG_RD_HWOK		0x0001
204 #define	CMI_PCICFG_FLAG_RD_INTERPOSEOK	0X0002
205 #define	CMI_PCICFG_FLAG_WR_HWOK		0x0004
206 #define	CMI_PCICFG_FLAG_WR_INTERPOSEOK	0X0008
207 
208 static uint64_t cmi_pcicfg_flags =
209     CMI_PCICFG_FLAG_RD_HWOK | CMI_PCICFG_FLAG_RD_INTERPOSEOK |
210     CMI_PCICFG_FLAG_WR_HWOK | CMI_PCICFG_FLAG_WR_INTERPOSEOK;
211 
212 /*
213  * The flags for individual cpus are kept in their per-cpu handle cmih_msrsrc
214  */
215 #define	CMI_MSR_FLAG_RD_HWOK		0x0001
216 #define	CMI_MSR_FLAG_RD_INTERPOSEOK	0x0002
217 #define	CMI_MSR_FLAG_WR_HWOK		0x0004
218 #define	CMI_MSR_FLAG_WR_INTERPOSEOK	0x0008
219 
220 int cmi_call_func_ntv_tries = 3;
221 
222 static cmi_errno_t
223 call_func_ntv(int cpuid, xc_func_t func, xc_arg_t arg1, xc_arg_t arg2)
224 {
225 	cmi_errno_t rc = -1;
226 	int i;
227 
228 	kpreempt_disable();
229 
230 	if (CPU->cpu_id == cpuid) {
231 		(*func)(arg1, arg2, (xc_arg_t)&rc);
232 	} else {
233 		/*
234 		 * This should not happen for a #MC trap or a poll, so
235 		 * this is likely an error injection or similar.
236 		 * We will try to cross call with xc_trycall - we
237 		 * can't guarantee success with xc_call because
238 		 * the interrupt code in the case of a #MC may
239 		 * already hold the xc mutex.
240 		 */
241 		for (i = 0; i < cmi_call_func_ntv_tries; i++) {
242 			cpuset_t cpus;
243 
244 			CPUSET_ONLY(cpus, cpuid);
245 			xc_priority(arg1, arg2, (xc_arg_t)&rc,
246 			    CPUSET2BV(cpus), func);
247 			if (rc != -1)
248 				break;
249 
250 			DELAY(1);
251 		}
252 	}
253 
254 	kpreempt_enable();
255 
256 	return (rc != -1 ? rc : CMIERR_DEADLOCK);
257 }
258 
259 static uint64_t injcnt;
260 
261 void
262 cmi_hdl_inj_begin(cmi_hdl_t ophdl)
263 {
264 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
265 
266 	if (hdl != NULL)
267 		hdl->cmih_flags |= CMIH_F_INJACTV;
268 	if (injcnt++ == 0) {
269 		cmn_err(CE_NOTE, "Hardware error injection/simulation "
270 		    "activity noted");
271 	}
272 }
273 
274 void
275 cmi_hdl_inj_end(cmi_hdl_t ophdl)
276 {
277 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
278 
279 	ASSERT(hdl == NULL || hdl->cmih_flags & CMIH_F_INJACTV);
280 	if (hdl != NULL)
281 		hdl->cmih_flags &= ~CMIH_F_INJACTV;
282 }
283 
284 boolean_t
285 cmi_inj_tainted(void)
286 {
287 	return (injcnt != 0 ? B_TRUE : B_FALSE);
288 }
289 
290 /*
291  *	 =======================================================
292  *	|	MSR Interposition				|
293  *	|	-----------------				|
294  *	|							|
295  *	 -------------------------------------------------------
296  */
297 
298 #define	CMI_MSRI_HASHSZ		16
299 #define	CMI_MSRI_HASHIDX(hdl, msr) \
300 	(((uintptr_t)(hdl) >> 3 + (msr)) % (CMI_MSRI_HASHSZ - 1))
301 
302 struct cmi_msri_bkt {
303 	kmutex_t msrib_lock;
304 	struct cmi_msri_hashent *msrib_head;
305 };
306 
307 struct cmi_msri_hashent {
308 	struct cmi_msri_hashent *msrie_next;
309 	struct cmi_msri_hashent *msrie_prev;
310 	cmi_hdl_impl_t *msrie_hdl;
311 	uint_t msrie_msrnum;
312 	uint64_t msrie_msrval;
313 };
314 
315 #define	CMI_MSRI_MATCH(ent, hdl, req_msr) \
316 	((ent)->msrie_hdl == (hdl) && (ent)->msrie_msrnum == (req_msr))
317 
318 static struct cmi_msri_bkt msrihash[CMI_MSRI_HASHSZ];
319 
320 static void
321 msri_addent(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
322 {
323 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
324 	struct cmi_msri_bkt *hbp = &msrihash[idx];
325 	struct cmi_msri_hashent *hep;
326 
327 	mutex_enter(&hbp->msrib_lock);
328 
329 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
330 		if (CMI_MSRI_MATCH(hep, hdl, msr))
331 			break;
332 	}
333 
334 	if (hep != NULL) {
335 		hep->msrie_msrval = val;
336 	} else {
337 		hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
338 		hep->msrie_hdl = hdl;
339 		hep->msrie_msrnum = msr;
340 		hep->msrie_msrval = val;
341 
342 		if (hbp->msrib_head != NULL)
343 			hbp->msrib_head->msrie_prev = hep;
344 		hep->msrie_next = hbp->msrib_head;
345 		hep->msrie_prev = NULL;
346 		hbp->msrib_head = hep;
347 	}
348 
349 	mutex_exit(&hbp->msrib_lock);
350 }
351 
352 /*
353  * Look for a match for the given hanlde and msr.  Return 1 with valp
354  * filled if a match is found, otherwise return 0 with valp untouched.
355  */
356 static int
357 msri_lookup(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
358 {
359 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
360 	struct cmi_msri_bkt *hbp = &msrihash[idx];
361 	struct cmi_msri_hashent *hep;
362 
363 	/*
364 	 * This function is called during #MC trap handling, so we should
365 	 * consider the possibility that the hash mutex is held by the
366 	 * interrupted thread.  This should not happen because interposition
367 	 * is an artificial injection mechanism and the #MC is requested
368 	 * after adding entries, but just in case of a real #MC at an
369 	 * unlucky moment we'll use mutex_tryenter here.
370 	 */
371 	if (!mutex_tryenter(&hbp->msrib_lock))
372 		return (0);
373 
374 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
375 		if (CMI_MSRI_MATCH(hep, hdl, msr)) {
376 			*valp = hep->msrie_msrval;
377 			break;
378 		}
379 	}
380 
381 	mutex_exit(&hbp->msrib_lock);
382 
383 	return (hep != NULL);
384 }
385 
386 /*
387  * Remove any interposed value that matches.
388  */
389 static void
390 msri_rment(cmi_hdl_impl_t *hdl, uint_t msr)
391 {
392 
393 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
394 	struct cmi_msri_bkt *hbp = &msrihash[idx];
395 	struct cmi_msri_hashent *hep;
396 
397 	if (!mutex_tryenter(&hbp->msrib_lock))
398 		return;
399 
400 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
401 		if (CMI_MSRI_MATCH(hep, hdl, msr)) {
402 			if (hep->msrie_prev != NULL)
403 				hep->msrie_prev->msrie_next = hep->msrie_next;
404 
405 			if (hep->msrie_next != NULL)
406 				hep->msrie_next->msrie_prev = hep->msrie_prev;
407 
408 			if (hbp->msrib_head == hep)
409 				hbp->msrib_head = hep->msrie_next;
410 
411 			kmem_free(hep, sizeof (*hep));
412 			break;
413 		}
414 	}
415 
416 	mutex_exit(&hbp->msrib_lock);
417 }
418 
419 /*
420  *	 =======================================================
421  *	|	PCI Config Space Interposition			|
422  *	|	------------------------------			|
423  *	|							|
424  *	 -------------------------------------------------------
425  */
426 
427 /*
428  * Hash for interposed PCI config space values.  We lookup on bus/dev/fun/offset
429  * and then record whether the value stashed was made with a byte, word or
430  * doubleword access;  we will only return a hit for an access of the
431  * same size.  If you access say a 32-bit register using byte accesses
432  * and then attempt to read the full 32-bit value back you will not obtain
433  * any sort of merged result - you get a lookup miss.
434  */
435 
436 #define	CMI_PCII_HASHSZ		16
437 #define	CMI_PCII_HASHIDX(b, d, f, o) \
438 	(((b) + (d) + (f) + (o)) % (CMI_PCII_HASHSZ - 1))
439 
440 struct cmi_pcii_bkt {
441 	kmutex_t pciib_lock;
442 	struct cmi_pcii_hashent *pciib_head;
443 };
444 
445 struct cmi_pcii_hashent {
446 	struct cmi_pcii_hashent *pcii_next;
447 	struct cmi_pcii_hashent *pcii_prev;
448 	int pcii_bus;
449 	int pcii_dev;
450 	int pcii_func;
451 	int pcii_reg;
452 	int pcii_asize;
453 	uint32_t pcii_val;
454 };
455 
456 #define	CMI_PCII_MATCH(ent, b, d, f, r, asz) \
457 	((ent)->pcii_bus == (b) && (ent)->pcii_dev == (d) && \
458 	(ent)->pcii_func == (f) && (ent)->pcii_reg == (r) && \
459 	(ent)->pcii_asize == (asz))
460 
461 static struct cmi_pcii_bkt pciihash[CMI_PCII_HASHSZ];
462 
463 
464 /*
465  * Add a new entry to the PCI interpose hash, overwriting any existing
466  * entry that is found.
467  */
468 static void
469 pcii_addent(int bus, int dev, int func, int reg, uint32_t val, int asz)
470 {
471 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
472 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
473 	struct cmi_pcii_hashent *hep;
474 
475 	cmi_hdl_inj_begin(NULL);
476 
477 	mutex_enter(&hbp->pciib_lock);
478 
479 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
480 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz))
481 			break;
482 	}
483 
484 	if (hep != NULL) {
485 		hep->pcii_val = val;
486 	} else {
487 		hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
488 		hep->pcii_bus = bus;
489 		hep->pcii_dev = dev;
490 		hep->pcii_func = func;
491 		hep->pcii_reg = reg;
492 		hep->pcii_asize = asz;
493 		hep->pcii_val = val;
494 
495 		if (hbp->pciib_head != NULL)
496 			hbp->pciib_head->pcii_prev = hep;
497 		hep->pcii_next = hbp->pciib_head;
498 		hep->pcii_prev = NULL;
499 		hbp->pciib_head = hep;
500 	}
501 
502 	mutex_exit(&hbp->pciib_lock);
503 
504 	cmi_hdl_inj_end(NULL);
505 }
506 
507 /*
508  * Look for a match for the given bus/dev/func/reg; return 1 with valp
509  * filled if a match is found, otherwise return 0 with valp untouched.
510  */
511 static int
512 pcii_lookup(int bus, int dev, int func, int reg, int asz, uint32_t *valp)
513 {
514 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
515 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
516 	struct cmi_pcii_hashent *hep;
517 
518 	if (!mutex_tryenter(&hbp->pciib_lock))
519 		return (0);
520 
521 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
522 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
523 			*valp = hep->pcii_val;
524 			break;
525 		}
526 	}
527 
528 	mutex_exit(&hbp->pciib_lock);
529 
530 	return (hep != NULL);
531 }
532 
533 static void
534 pcii_rment(int bus, int dev, int func, int reg, int asz)
535 {
536 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
537 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
538 	struct cmi_pcii_hashent *hep;
539 
540 	mutex_enter(&hbp->pciib_lock);
541 
542 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
543 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
544 			if (hep->pcii_prev != NULL)
545 				hep->pcii_prev->pcii_next = hep->pcii_next;
546 
547 			if (hep->pcii_next != NULL)
548 				hep->pcii_next->pcii_prev = hep->pcii_prev;
549 
550 			if (hbp->pciib_head == hep)
551 				hbp->pciib_head = hep->pcii_next;
552 
553 			kmem_free(hep, sizeof (*hep));
554 			break;
555 		}
556 	}
557 
558 	mutex_exit(&hbp->pciib_lock);
559 }
560 
561 #ifndef __xpv
562 
563 /*
564  *	 =======================================================
565  *	|	Native methods					|
566  *	|	--------------					|
567  *	|							|
568  *	| These are used when we are running native on bare-	|
569  *	| metal, or simply don't know any better.		|
570  *	---------------------------------------------------------
571  */
572 
573 #define	HDLPRIV(hdl)	((cpu_t *)(hdl)->cmih_hdlpriv)
574 
575 static uint_t
576 ntv_vendor(cmi_hdl_impl_t *hdl)
577 {
578 	return (cpuid_getvendor(HDLPRIV(hdl)));
579 }
580 
581 static const char *
582 ntv_vendorstr(cmi_hdl_impl_t *hdl)
583 {
584 	return (cpuid_getvendorstr(HDLPRIV(hdl)));
585 }
586 
587 static uint_t
588 ntv_family(cmi_hdl_impl_t *hdl)
589 {
590 	return (cpuid_getfamily(HDLPRIV(hdl)));
591 }
592 
593 static uint_t
594 ntv_model(cmi_hdl_impl_t *hdl)
595 {
596 	return (cpuid_getmodel(HDLPRIV(hdl)));
597 }
598 
599 static uint_t
600 ntv_stepping(cmi_hdl_impl_t *hdl)
601 {
602 	return (cpuid_getstep(HDLPRIV(hdl)));
603 }
604 
605 static uint_t
606 ntv_chipid(cmi_hdl_impl_t *hdl)
607 {
608 	return (hdl->cmih_chipid);
609 
610 }
611 
612 static uint_t
613 ntv_coreid(cmi_hdl_impl_t *hdl)
614 {
615 	return (hdl->cmih_coreid);
616 }
617 
618 static uint_t
619 ntv_strandid(cmi_hdl_impl_t *hdl)
620 {
621 	return (hdl->cmih_strandid);
622 }
623 
624 static uint_t
625 ntv_strand_apicid(cmi_hdl_impl_t *hdl)
626 {
627 	return (cpuid_get_apicid(HDLPRIV(hdl)));
628 }
629 
630 static uint16_t
631 ntv_smbiosid(cmi_hdl_impl_t *hdl)
632 {
633 	return (hdl->cmih_smbiosid);
634 }
635 
636 static uint_t
637 ntv_smb_chipid(cmi_hdl_impl_t *hdl)
638 {
639 	return (hdl->cmih_smb_chipid);
640 }
641 
642 static nvlist_t *
643 ntv_smb_bboard(cmi_hdl_impl_t *hdl)
644 {
645 	return (hdl->cmih_smb_bboard);
646 }
647 
648 static uint32_t
649 ntv_chiprev(cmi_hdl_impl_t *hdl)
650 {
651 	return (cpuid_getchiprev(HDLPRIV(hdl)));
652 }
653 
654 static const char *
655 ntv_chiprevstr(cmi_hdl_impl_t *hdl)
656 {
657 	return (cpuid_getchiprevstr(HDLPRIV(hdl)));
658 }
659 
660 static uint32_t
661 ntv_getsockettype(cmi_hdl_impl_t *hdl)
662 {
663 	return (cpuid_getsockettype(HDLPRIV(hdl)));
664 }
665 
666 static const char *
667 ntv_getsocketstr(cmi_hdl_impl_t *hdl)
668 {
669 	return (cpuid_getsocketstr(HDLPRIV(hdl)));
670 }
671 
672 static id_t
673 ntv_logical_id(cmi_hdl_impl_t *hdl)
674 {
675 	return (HDLPRIV(hdl)->cpu_id);
676 }
677 
678 /*ARGSUSED*/
679 static int
680 ntv_getcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
681 {
682 	ulong_t *dest = (ulong_t *)arg1;
683 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
684 
685 	*dest = getcr4();
686 	*rcp = CMI_SUCCESS;
687 
688 	return (0);
689 }
690 
691 static ulong_t
692 ntv_getcr4(cmi_hdl_impl_t *hdl)
693 {
694 	cpu_t *cp = HDLPRIV(hdl);
695 	ulong_t val;
696 
697 	(void) call_func_ntv(cp->cpu_id, ntv_getcr4_xc, (xc_arg_t)&val, NULL);
698 
699 	return (val);
700 }
701 
702 /*ARGSUSED*/
703 static int
704 ntv_setcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
705 {
706 	ulong_t val = (ulong_t)arg1;
707 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
708 
709 	setcr4(val);
710 	*rcp = CMI_SUCCESS;
711 
712 	return (0);
713 }
714 
715 static void
716 ntv_setcr4(cmi_hdl_impl_t *hdl, ulong_t val)
717 {
718 	cpu_t *cp = HDLPRIV(hdl);
719 
720 	(void) call_func_ntv(cp->cpu_id, ntv_setcr4_xc, (xc_arg_t)val, NULL);
721 }
722 
723 volatile uint32_t cmi_trapped_rdmsr;
724 
725 /*ARGSUSED*/
726 static int
727 ntv_rdmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
728 {
729 	uint_t msr = (uint_t)arg1;
730 	uint64_t *valp = (uint64_t *)arg2;
731 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
732 
733 	on_trap_data_t otd;
734 
735 	if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
736 		if (checked_rdmsr(msr, valp) == 0)
737 			*rcp = CMI_SUCCESS;
738 		else
739 			*rcp = CMIERR_NOTSUP;
740 	} else {
741 		*rcp = CMIERR_MSRGPF;
742 		atomic_inc_32(&cmi_trapped_rdmsr);
743 	}
744 	no_trap();
745 
746 	return (0);
747 }
748 
749 static cmi_errno_t
750 ntv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
751 {
752 	cpu_t *cp = HDLPRIV(hdl);
753 
754 	if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_HWOK))
755 		return (CMIERR_INTERPOSE);
756 
757 	return (call_func_ntv(cp->cpu_id, ntv_rdmsr_xc,
758 	    (xc_arg_t)msr, (xc_arg_t)valp));
759 }
760 
761 volatile uint32_t cmi_trapped_wrmsr;
762 
763 /*ARGSUSED*/
764 static int
765 ntv_wrmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
766 {
767 	uint_t msr = (uint_t)arg1;
768 	uint64_t val = *((uint64_t *)arg2);
769 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
770 	on_trap_data_t otd;
771 
772 	if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
773 		if (checked_wrmsr(msr, val) == 0)
774 			*rcp = CMI_SUCCESS;
775 		else
776 			*rcp = CMIERR_NOTSUP;
777 	} else {
778 		*rcp = CMIERR_MSRGPF;
779 		atomic_inc_32(&cmi_trapped_wrmsr);
780 	}
781 	no_trap();
782 
783 	return (0);
784 
785 }
786 
787 static cmi_errno_t
788 ntv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
789 {
790 	cpu_t *cp = HDLPRIV(hdl);
791 
792 	if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_WR_HWOK))
793 		return (CMI_SUCCESS);
794 
795 	return (call_func_ntv(cp->cpu_id, ntv_wrmsr_xc,
796 	    (xc_arg_t)msr, (xc_arg_t)&val));
797 }
798 
799 static cmi_errno_t
800 ntv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
801 {
802 	msri_addent(hdl, msr, val);
803 	return (CMI_SUCCESS);
804 }
805 
806 /*ARGSUSED*/
807 static int
808 ntv_int_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
809 {
810 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
811 	int int_no = (int)arg1;
812 
813 	if (int_no == T_MCE)
814 		int18();
815 	else
816 		int_cmci();
817 	*rcp = CMI_SUCCESS;
818 
819 	return (0);
820 }
821 
822 static void
823 ntv_int(cmi_hdl_impl_t *hdl, int int_no)
824 {
825 	cpu_t *cp = HDLPRIV(hdl);
826 
827 	(void) call_func_ntv(cp->cpu_id, ntv_int_xc, (xc_arg_t)int_no, NULL);
828 }
829 
830 static int
831 ntv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status)
832 {
833 	processorid_t cpuid = HDLPRIV(hdl)->cpu_id;
834 
835 	return (p_online_internal(cpuid, new_status, old_status));
836 }
837 
838 #else	/* __xpv */
839 
840 /*
841  *	 =======================================================
842  *	|	xVM dom0 methods				|
843  *	|	----------------				|
844  *	|							|
845  *	| These are used when we are running as dom0 in		|
846  *	| a Solaris xVM context.				|
847  *	---------------------------------------------------------
848  */
849 
850 #define	HDLPRIV(hdl)	((xen_mc_lcpu_cookie_t)(hdl)->cmih_hdlpriv)
851 
852 extern uint_t _cpuid_vendorstr_to_vendorcode(char *);
853 
854 
855 static uint_t
856 xpv_vendor(cmi_hdl_impl_t *hdl)
857 {
858 	return (_cpuid_vendorstr_to_vendorcode((char *)xen_physcpu_vendorstr(
859 	    HDLPRIV(hdl))));
860 }
861 
862 static const char *
863 xpv_vendorstr(cmi_hdl_impl_t *hdl)
864 {
865 	return (xen_physcpu_vendorstr(HDLPRIV(hdl)));
866 }
867 
868 static uint_t
869 xpv_family(cmi_hdl_impl_t *hdl)
870 {
871 	return (xen_physcpu_family(HDLPRIV(hdl)));
872 }
873 
874 static uint_t
875 xpv_model(cmi_hdl_impl_t *hdl)
876 {
877 	return (xen_physcpu_model(HDLPRIV(hdl)));
878 }
879 
880 static uint_t
881 xpv_stepping(cmi_hdl_impl_t *hdl)
882 {
883 	return (xen_physcpu_stepping(HDLPRIV(hdl)));
884 }
885 
886 static uint_t
887 xpv_chipid(cmi_hdl_impl_t *hdl)
888 {
889 	return (hdl->cmih_chipid);
890 }
891 
892 static uint_t
893 xpv_coreid(cmi_hdl_impl_t *hdl)
894 {
895 	return (hdl->cmih_coreid);
896 }
897 
898 static uint_t
899 xpv_strandid(cmi_hdl_impl_t *hdl)
900 {
901 	return (hdl->cmih_strandid);
902 }
903 
904 static uint_t
905 xpv_strand_apicid(cmi_hdl_impl_t *hdl)
906 {
907 	return (xen_physcpu_initial_apicid(HDLPRIV(hdl)));
908 }
909 
910 static uint16_t
911 xpv_smbiosid(cmi_hdl_impl_t *hdl)
912 {
913 	return (hdl->cmih_smbiosid);
914 }
915 
916 static uint_t
917 xpv_smb_chipid(cmi_hdl_impl_t *hdl)
918 {
919 	return (hdl->cmih_smb_chipid);
920 }
921 
922 static nvlist_t *
923 xpv_smb_bboard(cmi_hdl_impl_t *hdl)
924 {
925 	return (hdl->cmih_smb_bboard);
926 }
927 
928 extern uint32_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t);
929 
930 static uint32_t
931 xpv_chiprev(cmi_hdl_impl_t *hdl)
932 {
933 	return (_cpuid_chiprev(xpv_vendor(hdl), xpv_family(hdl),
934 	    xpv_model(hdl), xpv_stepping(hdl)));
935 }
936 
937 extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t);
938 
939 static const char *
940 xpv_chiprevstr(cmi_hdl_impl_t *hdl)
941 {
942 	return (_cpuid_chiprevstr(xpv_vendor(hdl), xpv_family(hdl),
943 	    xpv_model(hdl), xpv_stepping(hdl)));
944 }
945 
946 extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t);
947 
948 static uint32_t
949 xpv_getsockettype(cmi_hdl_impl_t *hdl)
950 {
951 	return (_cpuid_skt(xpv_vendor(hdl), xpv_family(hdl),
952 	    xpv_model(hdl), xpv_stepping(hdl)));
953 }
954 
955 extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t);
956 
957 static const char *
958 xpv_getsocketstr(cmi_hdl_impl_t *hdl)
959 {
960 	return (_cpuid_sktstr(xpv_vendor(hdl), xpv_family(hdl),
961 	    xpv_model(hdl), xpv_stepping(hdl)));
962 }
963 
964 static id_t
965 xpv_logical_id(cmi_hdl_impl_t *hdl)
966 {
967 	return (xen_physcpu_logical_id(HDLPRIV(hdl)));
968 }
969 
970 static cmi_errno_t
971 xpv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
972 {
973 	switch (msr) {
974 	case IA32_MSR_MCG_CAP:
975 		*valp = xen_physcpu_mcg_cap(HDLPRIV(hdl));
976 		break;
977 
978 	default:
979 		return (CMIERR_NOTSUP);
980 	}
981 
982 	return (CMI_SUCCESS);
983 }
984 
985 /*
986  * Request the hypervisor to write an MSR for us.  The hypervisor
987  * will only accept MCA-related MSRs, as this is for MCA error
988  * simulation purposes alone.  We will pre-screen MSRs for injection
989  * so we don't bother the HV with bogus requests.  We will permit
990  * injection to any MCA bank register, and to MCG_STATUS.
991  */
992 
993 #define	IS_MCA_INJ_MSR(msr) \
994 	(((msr) >= IA32_MSR_MC(0, CTL) && (msr) <= IA32_MSR_MC(10, MISC)) || \
995 	(msr) == IA32_MSR_MCG_STATUS)
996 
997 static cmi_errno_t
998 xpv_wrmsr_cmn(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val, boolean_t intpose)
999 {
1000 	struct xen_mc_msrinject mci;
1001 
1002 	if (!(hdl->cmih_flags & CMIH_F_INJACTV))
1003 		return (CMIERR_NOTSUP);		/* for injection use only! */
1004 
1005 	if (!IS_MCA_INJ_MSR(msr))
1006 		return (CMIERR_API);
1007 
1008 	if (panicstr)
1009 		return (CMIERR_DEADLOCK);
1010 
1011 	mci.mcinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl));
1012 	mci.mcinj_flags = intpose ? MC_MSRINJ_F_INTERPOSE : 0;
1013 	mci.mcinj_count = 1;	/* learn to batch sometime */
1014 	mci.mcinj_msr[0].reg = msr;
1015 	mci.mcinj_msr[0].value = val;
1016 
1017 	return (HYPERVISOR_mca(XEN_MC_msrinject, (xen_mc_arg_t *)&mci) ==
1018 	    0 ?  CMI_SUCCESS : CMIERR_NOTSUP);
1019 }
1020 
1021 static cmi_errno_t
1022 xpv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
1023 {
1024 	return (xpv_wrmsr_cmn(hdl, msr, val, B_FALSE));
1025 }
1026 
1027 
1028 static cmi_errno_t
1029 xpv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
1030 {
1031 	return (xpv_wrmsr_cmn(hdl, msr, val, B_TRUE));
1032 }
1033 
1034 static void
1035 xpv_int(cmi_hdl_impl_t *hdl, int int_no)
1036 {
1037 	struct xen_mc_mceinject mce;
1038 
1039 	if (!(hdl->cmih_flags & CMIH_F_INJACTV))
1040 		return;
1041 
1042 	if (int_no != T_MCE) {
1043 		cmn_err(CE_WARN, "xpv_int: int_no %d unimplemented\n",
1044 		    int_no);
1045 	}
1046 
1047 	mce.mceinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl));
1048 
1049 	(void) HYPERVISOR_mca(XEN_MC_mceinject, (xen_mc_arg_t *)&mce);
1050 }
1051 
1052 static int
1053 xpv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status)
1054 {
1055 	xen_sysctl_t xs;
1056 	int op, rc, status;
1057 
1058 	new_status &= ~P_FORCED;
1059 
1060 	switch (new_status) {
1061 	case P_STATUS:
1062 		op = XEN_SYSCTL_CPU_HOTPLUG_STATUS;
1063 		break;
1064 	case P_FAULTED:
1065 	case P_OFFLINE:
1066 		op = XEN_SYSCTL_CPU_HOTPLUG_OFFLINE;
1067 		break;
1068 	case P_ONLINE:
1069 		op = XEN_SYSCTL_CPU_HOTPLUG_ONLINE;
1070 		break;
1071 	default:
1072 		return (-1);
1073 	}
1074 
1075 	xs.cmd = XEN_SYSCTL_cpu_hotplug;
1076 	xs.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
1077 	xs.u.cpu_hotplug.cpu = xen_physcpu_logical_id(HDLPRIV(hdl));
1078 	xs.u.cpu_hotplug.op = op;
1079 
1080 	if ((rc = HYPERVISOR_sysctl(&xs)) >= 0) {
1081 		status = rc;
1082 		rc = 0;
1083 		switch (status) {
1084 		case XEN_CPU_HOTPLUG_STATUS_NEW:
1085 			*old_status = P_OFFLINE;
1086 			break;
1087 		case XEN_CPU_HOTPLUG_STATUS_OFFLINE:
1088 			*old_status = P_FAULTED;
1089 			break;
1090 		case XEN_CPU_HOTPLUG_STATUS_ONLINE:
1091 			*old_status = P_ONLINE;
1092 			break;
1093 		default:
1094 			return (-1);
1095 		}
1096 	}
1097 
1098 	return (-rc);
1099 }
1100 
1101 #endif
1102 
1103 /*ARGSUSED*/
1104 static void *
1105 cpu_search(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1106     uint_t strandid)
1107 {
1108 #ifdef __xpv
1109 	xen_mc_lcpu_cookie_t cpi;
1110 
1111 	for (cpi = xen_physcpu_next(NULL); cpi != NULL;
1112 	    cpi = xen_physcpu_next(cpi)) {
1113 		if (xen_physcpu_chipid(cpi) == chipid &&
1114 		    xen_physcpu_coreid(cpi) == coreid &&
1115 		    xen_physcpu_strandid(cpi) == strandid)
1116 			return ((void *)cpi);
1117 	}
1118 	return (NULL);
1119 
1120 #else	/* __xpv */
1121 
1122 	cpu_t *cp, *startcp;
1123 
1124 	kpreempt_disable();
1125 	cp = startcp = CPU;
1126 	do {
1127 		if (cmi_ntv_hwchipid(cp) == chipid &&
1128 		    cmi_ntv_hwcoreid(cp) == coreid &&
1129 		    cmi_ntv_hwstrandid(cp) == strandid) {
1130 			kpreempt_enable();
1131 			return ((void *)cp);
1132 		}
1133 
1134 		cp = cp->cpu_next;
1135 	} while (cp != startcp);
1136 	kpreempt_enable();
1137 	return (NULL);
1138 #endif	/* __ xpv */
1139 }
1140 
1141 static boolean_t
1142 cpu_is_cmt(void *priv)
1143 {
1144 #ifdef __xpv
1145 	return (xen_physcpu_is_cmt((xen_mc_lcpu_cookie_t)priv));
1146 #else /* __xpv */
1147 	cpu_t *cp = (cpu_t *)priv;
1148 
1149 	int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1150 	    cpuid_get_ncore_per_chip(cp);
1151 
1152 	return (strands_per_core > 1);
1153 #endif /* __xpv */
1154 }
1155 
1156 /*
1157  * Find the handle entry of a given cpu identified by a <chip,core,strand>
1158  * tuple.
1159  */
1160 static cmi_hdl_ent_t *
1161 cmi_hdl_ent_lookup(uint_t chipid, uint_t coreid, uint_t strandid)
1162 {
1163 	/*
1164 	 * Allocate per-chip table which contains a list of handle of
1165 	 * all strands of the chip.
1166 	 */
1167 	if (cmi_chip_tab[chipid] == NULL) {
1168 		size_t sz;
1169 		cmi_hdl_ent_t *pg;
1170 
1171 		sz = CMI_MAX_STRANDS_PER_CHIP * sizeof (cmi_hdl_ent_t);
1172 		pg = kmem_zalloc(sz, KM_SLEEP);
1173 
1174 		/* test and set the per-chip table if it is not allocated */
1175 		if (atomic_cas_ptr(&cmi_chip_tab[chipid], NULL, pg) != NULL)
1176 			kmem_free(pg, sz); /* someone beat us */
1177 	}
1178 
1179 	return (cmi_chip_tab[chipid] + CMI_HDL_ARR_IDX(coreid, strandid));
1180 }
1181 
1182 cmi_hdl_t
1183 cmi_hdl_create(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1184     uint_t strandid)
1185 {
1186 	cmi_hdl_impl_t *hdl;
1187 	void *priv;
1188 	cmi_hdl_ent_t *ent;
1189 
1190 #ifdef __xpv
1191 	ASSERT(class == CMI_HDL_SOLARIS_xVM_MCA);
1192 #else
1193 	ASSERT(class == CMI_HDL_NATIVE);
1194 #endif
1195 
1196 	if (chipid > CMI_MAX_CHIPID ||
1197 	    coreid > CMI_MAX_CORES_PER_CHIP - 1 ||
1198 	    strandid > CMI_MAX_STRANDS_PER_CORE - 1)
1199 		return (NULL);
1200 
1201 	if ((priv = cpu_search(class, chipid, coreid, strandid)) == NULL)
1202 		return (NULL);
1203 
1204 	hdl = kmem_zalloc(sizeof (*hdl), KM_SLEEP);
1205 
1206 	hdl->cmih_class = class;
1207 	HDLOPS(hdl) = &cmi_hdl_ops;
1208 	hdl->cmih_chipid = chipid;
1209 	hdl->cmih_coreid = coreid;
1210 	hdl->cmih_strandid = strandid;
1211 	hdl->cmih_mstrand = cpu_is_cmt(priv);
1212 	hdl->cmih_hdlpriv = priv;
1213 #ifdef __xpv
1214 	hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_INTERPOSEOK |
1215 	    CMI_MSR_FLAG_WR_INTERPOSEOK;
1216 #else	/* __xpv */
1217 	hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_HWOK | CMI_MSR_FLAG_RD_INTERPOSEOK |
1218 	    CMI_MSR_FLAG_WR_HWOK | CMI_MSR_FLAG_WR_INTERPOSEOK;
1219 #endif
1220 
1221 	ent = cmi_hdl_ent_lookup(chipid, coreid, strandid);
1222 	if (ent->cmae_refcnt != 0 || ent->cmae_hdlp != NULL) {
1223 		/*
1224 		 * Somehow this (chipid, coreid, strandid) id tuple has
1225 		 * already been assigned!  This indicates that the
1226 		 * callers logic in determining these values is busted,
1227 		 * or perhaps undermined by bad BIOS setup.  Complain,
1228 		 * and refuse to initialize this tuple again as bad things
1229 		 * will happen.
1230 		 */
1231 		cmn_err(CE_NOTE, "cmi_hdl_create: chipid %d coreid %d "
1232 		    "strandid %d handle already allocated!",
1233 		    chipid, coreid, strandid);
1234 		kmem_free(hdl, sizeof (*hdl));
1235 		return (NULL);
1236 	}
1237 
1238 	/*
1239 	 * Once we store a nonzero reference count others can find this
1240 	 * handle via cmi_hdl_lookup etc.  This initial hold on the handle
1241 	 * is to be dropped only if some other part of cmi initialization
1242 	 * fails or, if it succeeds, at later cpu deconfigure.  Note the
1243 	 * the module private data we hold in cmih_cmi and cmih_cmidata
1244 	 * is still NULL at this point (the caller will fill it with
1245 	 * cmi_hdl_setcmi if it initializes) so consumers of handles
1246 	 * should always be ready for that possibility.
1247 	 */
1248 	ent->cmae_hdlp = hdl;
1249 	hdl->cmih_refcntp = &ent->cmae_refcnt;
1250 	ent->cmae_refcnt = 1;
1251 
1252 	return ((cmi_hdl_t)hdl);
1253 }
1254 
1255 void
1256 cmi_read_smbios(cmi_hdl_t ophdl)
1257 {
1258 
1259 	uint_t strand_apicid;
1260 	uint_t chip_inst;
1261 	uint16_t smb_id;
1262 	int rc = 0;
1263 
1264 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1265 
1266 	/* set x86gentopo compatibility */
1267 	fm_smb_fmacompat();
1268 
1269 #ifndef __xpv
1270 	strand_apicid = ntv_strand_apicid(hdl);
1271 #else
1272 	strand_apicid = xpv_strand_apicid(hdl);
1273 #endif
1274 
1275 	if (!x86gentopo_legacy) {
1276 		/*
1277 		 * If fm_smb_chipinst() or fm_smb_bboard() fails,
1278 		 * topo reverts to legacy mode
1279 		 */
1280 		rc = fm_smb_chipinst(strand_apicid, &chip_inst, &smb_id);
1281 		if (rc == 0) {
1282 			hdl->cmih_smb_chipid = chip_inst;
1283 			hdl->cmih_smbiosid = smb_id;
1284 		} else {
1285 #ifdef DEBUG
1286 			cmn_err(CE_NOTE, "cmi reads smbios chip info failed");
1287 #endif /* DEBUG */
1288 			return;
1289 		}
1290 
1291 		hdl->cmih_smb_bboard  = fm_smb_bboard(strand_apicid);
1292 #ifdef DEBUG
1293 		if (hdl->cmih_smb_bboard == NULL)
1294 			cmn_err(CE_NOTE,
1295 			    "cmi reads smbios base boards info failed");
1296 #endif /* DEBUG */
1297 	}
1298 }
1299 
1300 void
1301 cmi_hdl_hold(cmi_hdl_t ophdl)
1302 {
1303 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1304 
1305 	ASSERT(*hdl->cmih_refcntp != 0); /* must not be the initial hold */
1306 
1307 	atomic_inc_32(hdl->cmih_refcntp);
1308 }
1309 
1310 static int
1311 cmi_hdl_canref(cmi_hdl_ent_t *ent)
1312 {
1313 	volatile uint32_t *refcntp;
1314 	uint32_t refcnt;
1315 
1316 	refcntp = &ent->cmae_refcnt;
1317 	refcnt = *refcntp;
1318 
1319 	if (refcnt == 0) {
1320 		/*
1321 		 * Associated object never existed, is being destroyed,
1322 		 * or has been destroyed.
1323 		 */
1324 		return (0);
1325 	}
1326 
1327 	/*
1328 	 * We cannot use atomic increment here because once the reference
1329 	 * count reaches zero it must never be bumped up again.
1330 	 */
1331 	while (refcnt != 0) {
1332 		if (atomic_cas_32(refcntp, refcnt, refcnt + 1) == refcnt)
1333 			return (1);
1334 		refcnt = *refcntp;
1335 	}
1336 
1337 	/*
1338 	 * Somebody dropped the reference count to 0 after our initial
1339 	 * check.
1340 	 */
1341 	return (0);
1342 }
1343 
1344 
1345 void
1346 cmi_hdl_rele(cmi_hdl_t ophdl)
1347 {
1348 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1349 	cmi_hdl_ent_t *ent;
1350 
1351 	ASSERT(*hdl->cmih_refcntp > 0);
1352 
1353 	if (atomic_dec_32_nv(hdl->cmih_refcntp) > 0)
1354 		return;
1355 
1356 	ent = cmi_hdl_ent_lookup(hdl->cmih_chipid, hdl->cmih_coreid,
1357 	    hdl->cmih_strandid);
1358 	ent->cmae_hdlp = NULL;
1359 
1360 	kmem_free(hdl, sizeof (*hdl));
1361 }
1362 
1363 void
1364 cmi_hdl_setspecific(cmi_hdl_t ophdl, void *arg)
1365 {
1366 	IMPLHDL(ophdl)->cmih_spec = arg;
1367 }
1368 
1369 void *
1370 cmi_hdl_getspecific(cmi_hdl_t ophdl)
1371 {
1372 	return (IMPLHDL(ophdl)->cmih_spec);
1373 }
1374 
1375 void
1376 cmi_hdl_setmc(cmi_hdl_t ophdl, const struct cmi_mc_ops *mcops, void *mcdata)
1377 {
1378 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1379 
1380 	ASSERT(hdl->cmih_mcops == NULL && hdl->cmih_mcdata == NULL);
1381 	hdl->cmih_mcops = mcops;
1382 	hdl->cmih_mcdata = mcdata;
1383 }
1384 
1385 const struct cmi_mc_ops *
1386 cmi_hdl_getmcops(cmi_hdl_t ophdl)
1387 {
1388 	return (IMPLHDL(ophdl)->cmih_mcops);
1389 }
1390 
1391 void *
1392 cmi_hdl_getmcdata(cmi_hdl_t ophdl)
1393 {
1394 	return (IMPLHDL(ophdl)->cmih_mcdata);
1395 }
1396 
1397 cmi_hdl_t
1398 cmi_hdl_lookup(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1399     uint_t strandid)
1400 {
1401 	cmi_hdl_ent_t *ent;
1402 
1403 	if (chipid > CMI_MAX_CHIPID ||
1404 	    coreid > CMI_MAX_CORES_PER_CHIP - 1 ||
1405 	    strandid > CMI_MAX_STRANDS_PER_CORE - 1)
1406 		return (NULL);
1407 
1408 	ent = cmi_hdl_ent_lookup(chipid, coreid, strandid);
1409 
1410 	if (class == CMI_HDL_NEUTRAL)
1411 #ifdef __xpv
1412 		class = CMI_HDL_SOLARIS_xVM_MCA;
1413 #else
1414 		class = CMI_HDL_NATIVE;
1415 #endif
1416 
1417 	if (!cmi_hdl_canref(ent))
1418 		return (NULL);
1419 
1420 	if (ent->cmae_hdlp->cmih_class != class) {
1421 		cmi_hdl_rele((cmi_hdl_t)ent->cmae_hdlp);
1422 		return (NULL);
1423 	}
1424 
1425 	return ((cmi_hdl_t)ent->cmae_hdlp);
1426 }
1427 
1428 cmi_hdl_t
1429 cmi_hdl_any(void)
1430 {
1431 	int i, j;
1432 	cmi_hdl_ent_t *ent;
1433 
1434 	for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) {
1435 		if (cmi_chip_tab[i] == NULL)
1436 			continue;
1437 		for (j = 0, ent = cmi_chip_tab[i]; j < CMI_MAX_STRANDS_PER_CHIP;
1438 		    j++, ent++) {
1439 			if (cmi_hdl_canref(ent))
1440 				return ((cmi_hdl_t)ent->cmae_hdlp);
1441 		}
1442 	}
1443 
1444 	return (NULL);
1445 }
1446 
1447 void
1448 cmi_hdl_walk(int (*cbfunc)(cmi_hdl_t, void *, void *, void *),
1449     void *arg1, void *arg2, void *arg3)
1450 {
1451 	int i, j;
1452 	cmi_hdl_ent_t *ent;
1453 
1454 	for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) {
1455 		if (cmi_chip_tab[i] == NULL)
1456 			continue;
1457 		for (j = 0, ent = cmi_chip_tab[i]; j < CMI_MAX_STRANDS_PER_CHIP;
1458 		    j++, ent++) {
1459 			if (cmi_hdl_canref(ent)) {
1460 				cmi_hdl_impl_t *hdl = ent->cmae_hdlp;
1461 				if ((*cbfunc)((cmi_hdl_t)hdl, arg1, arg2, arg3)
1462 				    == CMI_HDL_WALK_DONE) {
1463 					cmi_hdl_rele((cmi_hdl_t)hdl);
1464 					return;
1465 				}
1466 				cmi_hdl_rele((cmi_hdl_t)hdl);
1467 			}
1468 		}
1469 	}
1470 }
1471 
1472 void
1473 cmi_hdl_setcmi(cmi_hdl_t ophdl, void *cmi, void *cmidata)
1474 {
1475 	IMPLHDL(ophdl)->cmih_cmidata = cmidata;
1476 	IMPLHDL(ophdl)->cmih_cmi = cmi;
1477 }
1478 
1479 void *
1480 cmi_hdl_getcmi(cmi_hdl_t ophdl)
1481 {
1482 	return (IMPLHDL(ophdl)->cmih_cmi);
1483 }
1484 
1485 void *
1486 cmi_hdl_getcmidata(cmi_hdl_t ophdl)
1487 {
1488 	return (IMPLHDL(ophdl)->cmih_cmidata);
1489 }
1490 
1491 enum cmi_hdl_class
1492 cmi_hdl_class(cmi_hdl_t ophdl)
1493 {
1494 	return (IMPLHDL(ophdl)->cmih_class);
1495 }
1496 
1497 #define	CMI_HDL_OPFUNC(what, type)				\
1498 	type							\
1499 	cmi_hdl_##what(cmi_hdl_t ophdl)				\
1500 	{							\
1501 		return (HDLOPS(IMPLHDL(ophdl))->		\
1502 		    cmio_##what(IMPLHDL(ophdl)));		\
1503 	}
1504 
1505 CMI_HDL_OPFUNC(vendor, uint_t)
1506 CMI_HDL_OPFUNC(vendorstr, const char *)
1507 CMI_HDL_OPFUNC(family, uint_t)
1508 CMI_HDL_OPFUNC(model, uint_t)
1509 CMI_HDL_OPFUNC(stepping, uint_t)
1510 CMI_HDL_OPFUNC(chipid, uint_t)
1511 CMI_HDL_OPFUNC(coreid, uint_t)
1512 CMI_HDL_OPFUNC(strandid, uint_t)
1513 CMI_HDL_OPFUNC(strand_apicid, uint_t)
1514 CMI_HDL_OPFUNC(chiprev, uint32_t)
1515 CMI_HDL_OPFUNC(chiprevstr, const char *)
1516 CMI_HDL_OPFUNC(getsockettype, uint32_t)
1517 CMI_HDL_OPFUNC(getsocketstr, const char *)
1518 CMI_HDL_OPFUNC(logical_id, id_t)
1519 CMI_HDL_OPFUNC(smbiosid, uint16_t)
1520 CMI_HDL_OPFUNC(smb_chipid, uint_t)
1521 CMI_HDL_OPFUNC(smb_bboard, nvlist_t *)
1522 
1523 boolean_t
1524 cmi_hdl_is_cmt(cmi_hdl_t ophdl)
1525 {
1526 	return (IMPLHDL(ophdl)->cmih_mstrand);
1527 }
1528 
1529 void
1530 cmi_hdl_int(cmi_hdl_t ophdl, int num)
1531 {
1532 	if (HDLOPS(IMPLHDL(ophdl))->cmio_int == NULL)
1533 		return;
1534 
1535 	cmi_hdl_inj_begin(ophdl);
1536 	HDLOPS(IMPLHDL(ophdl))->cmio_int(IMPLHDL(ophdl), num);
1537 	cmi_hdl_inj_end(NULL);
1538 }
1539 
1540 int
1541 cmi_hdl_online(cmi_hdl_t ophdl, int new_status, int *old_status)
1542 {
1543 	return (HDLOPS(IMPLHDL(ophdl))->cmio_online(IMPLHDL(ophdl),
1544 	    new_status, old_status));
1545 }
1546 
1547 #ifndef	__xpv
1548 /*
1549  * Return hardware chip instance; cpuid_get_chipid provides this directly.
1550  */
1551 uint_t
1552 cmi_ntv_hwchipid(cpu_t *cp)
1553 {
1554 	return (cpuid_get_chipid(cp));
1555 }
1556 
1557 /*
1558  * Return core instance within a single chip.
1559  */
1560 uint_t
1561 cmi_ntv_hwcoreid(cpu_t *cp)
1562 {
1563 	return (cpuid_get_pkgcoreid(cp));
1564 }
1565 
1566 /*
1567  * Return strand number within a single core.  cpuid_get_clogid numbers
1568  * all execution units (strands, or cores in unstranded models) sequentially
1569  * within a single chip.
1570  */
1571 uint_t
1572 cmi_ntv_hwstrandid(cpu_t *cp)
1573 {
1574 	int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1575 	    cpuid_get_ncore_per_chip(cp);
1576 
1577 	return (cpuid_get_clogid(cp) % strands_per_core);
1578 }
1579 #endif	/* __xpv */
1580 
1581 void
1582 cmi_hdlconf_rdmsr_nohw(cmi_hdl_t ophdl)
1583 {
1584 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1585 
1586 	hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_RD_HWOK;
1587 }
1588 
1589 void
1590 cmi_hdlconf_wrmsr_nohw(cmi_hdl_t ophdl)
1591 {
1592 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1593 
1594 	hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_WR_HWOK;
1595 }
1596 
1597 cmi_errno_t
1598 cmi_hdl_rdmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t *valp)
1599 {
1600 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1601 
1602 	/*
1603 	 * Regardless of the handle class, we first check for am
1604 	 * interposed value.  In the xVM case you probably want to
1605 	 * place interposed values within the hypervisor itself, but
1606 	 * we still allow interposing them in dom0 for test and bringup
1607 	 * purposes.
1608 	 */
1609 	if ((hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_INTERPOSEOK) &&
1610 	    msri_lookup(hdl, msr, valp))
1611 		return (CMI_SUCCESS);
1612 
1613 	if (HDLOPS(hdl)->cmio_rdmsr == NULL)
1614 		return (CMIERR_NOTSUP);
1615 
1616 	return (HDLOPS(hdl)->cmio_rdmsr(hdl, msr, valp));
1617 }
1618 
1619 cmi_errno_t
1620 cmi_hdl_wrmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t val)
1621 {
1622 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1623 
1624 	/* Invalidate any interposed value */
1625 	msri_rment(hdl, msr);
1626 
1627 	if (HDLOPS(hdl)->cmio_wrmsr == NULL)
1628 		return (CMI_SUCCESS);	/* pretend all is ok */
1629 
1630 	return (HDLOPS(hdl)->cmio_wrmsr(hdl, msr, val));
1631 }
1632 
1633 void
1634 cmi_hdl_enable_mce(cmi_hdl_t ophdl)
1635 {
1636 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1637 	ulong_t cr4;
1638 
1639 	if (HDLOPS(hdl)->cmio_getcr4 == NULL ||
1640 	    HDLOPS(hdl)->cmio_setcr4 == NULL)
1641 		return;
1642 
1643 	cr4 = HDLOPS(hdl)->cmio_getcr4(hdl);
1644 
1645 	HDLOPS(hdl)->cmio_setcr4(hdl, cr4 | CR4_MCE);
1646 }
1647 
1648 void
1649 cmi_hdl_msrinterpose(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1650 {
1651 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1652 	int i;
1653 
1654 	if (HDLOPS(hdl)->cmio_msrinterpose == NULL)
1655 		return;
1656 
1657 	cmi_hdl_inj_begin(ophdl);
1658 
1659 	for (i = 0; i < nregs; i++, regs++)
1660 		HDLOPS(hdl)->cmio_msrinterpose(hdl, regs->cmr_msrnum,
1661 		    regs->cmr_msrval);
1662 
1663 	cmi_hdl_inj_end(ophdl);
1664 }
1665 
1666 /*ARGSUSED*/
1667 void
1668 cmi_hdl_msrforward(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1669 {
1670 #ifdef __xpv
1671 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1672 	int i;
1673 
1674 	for (i = 0; i < nregs; i++, regs++)
1675 		msri_addent(hdl, regs->cmr_msrnum, regs->cmr_msrval);
1676 #endif
1677 }
1678 
1679 
1680 void
1681 cmi_pcird_nohw(void)
1682 {
1683 	cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_RD_HWOK;
1684 }
1685 
1686 void
1687 cmi_pciwr_nohw(void)
1688 {
1689 	cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_WR_HWOK;
1690 }
1691 
1692 static uint32_t
1693 cmi_pci_get_cmn(int bus, int dev, int func, int reg, int asz,
1694     int *interpose, ddi_acc_handle_t hdl)
1695 {
1696 	uint32_t val;
1697 
1698 	if (cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_INTERPOSEOK &&
1699 	    pcii_lookup(bus, dev, func, reg, asz, &val)) {
1700 		if (interpose)
1701 			*interpose = 1;
1702 		return (val);
1703 	}
1704 	if (interpose)
1705 		*interpose = 0;
1706 
1707 	if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_HWOK))
1708 		return (0);
1709 
1710 	switch (asz) {
1711 	case 1:
1712 		if (hdl)
1713 			val = pci_config_get8(hdl, (off_t)reg);
1714 		else
1715 			val = (*pci_getb_func)(bus, dev, func, reg);
1716 		break;
1717 	case 2:
1718 		if (hdl)
1719 			val = pci_config_get16(hdl, (off_t)reg);
1720 		else
1721 			val = (*pci_getw_func)(bus, dev, func, reg);
1722 		break;
1723 	case 4:
1724 		if (hdl)
1725 			val = pci_config_get32(hdl, (off_t)reg);
1726 		else
1727 			val = (*pci_getl_func)(bus, dev, func, reg);
1728 		break;
1729 	default:
1730 		val = 0;
1731 	}
1732 	return (val);
1733 }
1734 
1735 uint8_t
1736 cmi_pci_getb(int bus, int dev, int func, int reg, int *interpose,
1737     ddi_acc_handle_t hdl)
1738 {
1739 	return ((uint8_t)cmi_pci_get_cmn(bus, dev, func, reg, 1, interpose,
1740 	    hdl));
1741 }
1742 
1743 uint16_t
1744 cmi_pci_getw(int bus, int dev, int func, int reg, int *interpose,
1745     ddi_acc_handle_t hdl)
1746 {
1747 	return ((uint16_t)cmi_pci_get_cmn(bus, dev, func, reg, 2, interpose,
1748 	    hdl));
1749 }
1750 
1751 uint32_t
1752 cmi_pci_getl(int bus, int dev, int func, int reg, int *interpose,
1753     ddi_acc_handle_t hdl)
1754 {
1755 	return (cmi_pci_get_cmn(bus, dev, func, reg, 4, interpose, hdl));
1756 }
1757 
1758 void
1759 cmi_pci_interposeb(int bus, int dev, int func, int reg, uint8_t val)
1760 {
1761 	pcii_addent(bus, dev, func, reg, val, 1);
1762 }
1763 
1764 void
1765 cmi_pci_interposew(int bus, int dev, int func, int reg, uint16_t val)
1766 {
1767 	pcii_addent(bus, dev, func, reg, val, 2);
1768 }
1769 
1770 void
1771 cmi_pci_interposel(int bus, int dev, int func, int reg, uint32_t val)
1772 {
1773 	pcii_addent(bus, dev, func, reg, val, 4);
1774 }
1775 
1776 static void
1777 cmi_pci_put_cmn(int bus, int dev, int func, int reg, int asz,
1778     ddi_acc_handle_t hdl, uint32_t val)
1779 {
1780 	/*
1781 	 * If there is an interposed value for this register invalidate it.
1782 	 */
1783 	pcii_rment(bus, dev, func, reg, asz);
1784 
1785 	if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_WR_HWOK))
1786 		return;
1787 
1788 	switch (asz) {
1789 	case 1:
1790 		if (hdl)
1791 			pci_config_put8(hdl, (off_t)reg, (uint8_t)val);
1792 		else
1793 			(*pci_putb_func)(bus, dev, func, reg, (uint8_t)val);
1794 		break;
1795 
1796 	case 2:
1797 		if (hdl)
1798 			pci_config_put16(hdl, (off_t)reg, (uint16_t)val);
1799 		else
1800 			(*pci_putw_func)(bus, dev, func, reg, (uint16_t)val);
1801 		break;
1802 
1803 	case 4:
1804 		if (hdl)
1805 			pci_config_put32(hdl, (off_t)reg, val);
1806 		else
1807 			(*pci_putl_func)(bus, dev, func, reg, val);
1808 		break;
1809 
1810 	default:
1811 		break;
1812 	}
1813 }
1814 
1815 void
1816 cmi_pci_putb(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1817     uint8_t val)
1818 {
1819 	cmi_pci_put_cmn(bus, dev, func, reg, 1, hdl, val);
1820 }
1821 
1822 void
1823 cmi_pci_putw(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1824     uint16_t val)
1825 {
1826 	cmi_pci_put_cmn(bus, dev, func, reg, 2, hdl, val);
1827 }
1828 
1829 void
1830 cmi_pci_putl(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1831     uint32_t val)
1832 {
1833 	cmi_pci_put_cmn(bus, dev, func, reg, 4, hdl, val);
1834 }
1835 
1836 static const struct cmi_hdl_ops cmi_hdl_ops = {
1837 #ifdef __xpv
1838 	/*
1839 	 * CMI_HDL_SOLARIS_xVM_MCA - ops when we are an xVM dom0
1840 	 */
1841 	xpv_vendor,		/* cmio_vendor */
1842 	xpv_vendorstr,		/* cmio_vendorstr */
1843 	xpv_family,		/* cmio_family */
1844 	xpv_model,		/* cmio_model */
1845 	xpv_stepping,		/* cmio_stepping */
1846 	xpv_chipid,		/* cmio_chipid */
1847 	xpv_coreid,		/* cmio_coreid */
1848 	xpv_strandid,		/* cmio_strandid */
1849 	xpv_strand_apicid,	/* cmio_strand_apicid */
1850 	xpv_chiprev,		/* cmio_chiprev */
1851 	xpv_chiprevstr,		/* cmio_chiprevstr */
1852 	xpv_getsockettype,	/* cmio_getsockettype */
1853 	xpv_getsocketstr,	/* cmio_getsocketstr */
1854 	xpv_logical_id,		/* cmio_logical_id */
1855 	NULL,			/* cmio_getcr4 */
1856 	NULL,			/* cmio_setcr4 */
1857 	xpv_rdmsr,		/* cmio_rdmsr */
1858 	xpv_wrmsr,		/* cmio_wrmsr */
1859 	xpv_msrinterpose,	/* cmio_msrinterpose */
1860 	xpv_int,		/* cmio_int */
1861 	xpv_online,		/* cmio_online */
1862 	xpv_smbiosid,		/* cmio_smbiosid */
1863 	xpv_smb_chipid,		/* cmio_smb_chipid */
1864 	xpv_smb_bboard		/* cmio_smb_bboard */
1865 
1866 #else	/* __xpv */
1867 
1868 	/*
1869 	 * CMI_HDL_NATIVE - ops when apparently running on bare-metal
1870 	 */
1871 	ntv_vendor,		/* cmio_vendor */
1872 	ntv_vendorstr,		/* cmio_vendorstr */
1873 	ntv_family,		/* cmio_family */
1874 	ntv_model,		/* cmio_model */
1875 	ntv_stepping,		/* cmio_stepping */
1876 	ntv_chipid,		/* cmio_chipid */
1877 	ntv_coreid,		/* cmio_coreid */
1878 	ntv_strandid,		/* cmio_strandid */
1879 	ntv_strand_apicid,	/* cmio_strandid */
1880 	ntv_chiprev,		/* cmio_chiprev */
1881 	ntv_chiprevstr,		/* cmio_chiprevstr */
1882 	ntv_getsockettype,	/* cmio_getsockettype */
1883 	ntv_getsocketstr,	/* cmio_getsocketstr */
1884 	ntv_logical_id,		/* cmio_logical_id */
1885 	ntv_getcr4,		/* cmio_getcr4 */
1886 	ntv_setcr4,		/* cmio_setcr4 */
1887 	ntv_rdmsr,		/* cmio_rdmsr */
1888 	ntv_wrmsr,		/* cmio_wrmsr */
1889 	ntv_msrinterpose,	/* cmio_msrinterpose */
1890 	ntv_int,		/* cmio_int */
1891 	ntv_online,		/* cmio_online */
1892 	ntv_smbiosid,		/* cmio_smbiosid */
1893 	ntv_smb_chipid,		/* cmio_smb_chipid */
1894 	ntv_smb_bboard		/* cmio_smb_bboard */
1895 #endif
1896 };
1897