xref: /illumos-gate/usr/src/uts/i86pc/os/cmi_hw.c (revision 9c2acf00e275b6b2125a306f33cdddcc58393220)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2019, Joyent, Inc.
25  */
26 /*
27  * Copyright (c) 2010, Intel Corporation.
28  * All rights reserved.
29  */
30 
31 /*
32  * CPU Module Interface - hardware abstraction.
33  */
34 
35 #ifdef __xpv
36 #include <sys/xpv_user.h>
37 #endif
38 
39 #include <sys/types.h>
40 #include <sys/cpu_module.h>
41 #include <sys/kmem.h>
42 #include <sys/x86_archext.h>
43 #include <sys/cpuvar.h>
44 #include <sys/ksynch.h>
45 #include <sys/x_call.h>
46 #include <sys/pghw.h>
47 #include <sys/pci_cfgacc.h>
48 #include <sys/pci_cfgspace.h>
49 #include <sys/archsystm.h>
50 #include <sys/ontrap.h>
51 #include <sys/controlregs.h>
52 #include <sys/sunddi.h>
53 #include <sys/trap.h>
54 #include <sys/mca_x86.h>
55 #include <sys/processor.h>
56 #include <sys/cmn_err.h>
57 #include <sys/nvpair.h>
58 #include <sys/fm/util.h>
59 #include <sys/fm/protocol.h>
60 #include <sys/fm/smb/fmsmb.h>
61 #include <sys/cpu_module_impl.h>
62 
63 /*
64  * Variable which determines if the SMBIOS supports x86 generic topology; or
65  * if legacy topolgy enumeration will occur.
66  */
67 extern int x86gentopo_legacy;
68 
69 /*
70  * Outside of this file consumers use the opaque cmi_hdl_t.  This
71  * definition is duplicated in the generic_cpu mdb module, so keep
72  * them in-sync when making changes.
73  */
74 typedef struct cmi_hdl_impl {
75 	enum cmi_hdl_class cmih_class;		/* Handle nature */
76 	const struct cmi_hdl_ops *cmih_ops;	/* Operations vector */
77 	uint_t cmih_chipid;			/* Chipid of cpu resource */
78 	uint_t cmih_procnodeid;			/* Nodeid of cpu resource */
79 	uint_t cmih_coreid;			/* Core within die */
80 	uint_t cmih_strandid;			/* Thread within core */
81 	uint_t cmih_procnodes_per_pkg;		/* Nodes in a processor */
82 	boolean_t cmih_mstrand;			/* cores are multithreaded */
83 	volatile uint32_t *cmih_refcntp;	/* Reference count pointer */
84 	uint64_t cmih_msrsrc;			/* MSR data source flags */
85 	void *cmih_hdlpriv;			/* cmi_hw.c private data */
86 	void *cmih_spec;			/* cmi_hdl_{set,get}_specific */
87 	void *cmih_cmi;				/* cpu mod control structure */
88 	void *cmih_cmidata;			/* cpu mod private data */
89 	const struct cmi_mc_ops *cmih_mcops;	/* Memory-controller ops */
90 	void *cmih_mcdata;			/* Memory-controller data */
91 	uint64_t cmih_flags;			/* See CMIH_F_* below */
92 	uint16_t cmih_smbiosid;			/* SMBIOS Type 4 struct ID */
93 	uint_t cmih_smb_chipid;			/* SMBIOS factored chipid */
94 	nvlist_t *cmih_smb_bboard;		/* SMBIOS bboard nvlist */
95 } cmi_hdl_impl_t;
96 
97 #define	IMPLHDL(ophdl)	((cmi_hdl_impl_t *)ophdl)
98 #define	HDLOPS(hdl)	((hdl)->cmih_ops)
99 
100 #define	CMIH_F_INJACTV		0x1ULL
101 #define	CMIH_F_DEAD		0x2ULL
102 
103 /*
104  * Ops structure for handle operations.
105  */
106 struct cmi_hdl_ops {
107 	/*
108 	 * These ops are required in an implementation.
109 	 */
110 	uint_t (*cmio_vendor)(cmi_hdl_impl_t *);
111 	const char *(*cmio_vendorstr)(cmi_hdl_impl_t *);
112 	uint_t (*cmio_family)(cmi_hdl_impl_t *);
113 	uint_t (*cmio_model)(cmi_hdl_impl_t *);
114 	uint_t (*cmio_stepping)(cmi_hdl_impl_t *);
115 	uint_t (*cmio_chipid)(cmi_hdl_impl_t *);
116 	uint_t (*cmio_procnodeid)(cmi_hdl_impl_t *);
117 	uint_t (*cmio_coreid)(cmi_hdl_impl_t *);
118 	uint_t (*cmio_strandid)(cmi_hdl_impl_t *);
119 	uint_t (*cmio_procnodes_per_pkg)(cmi_hdl_impl_t *);
120 	uint_t (*cmio_strand_apicid)(cmi_hdl_impl_t *);
121 	uint32_t (*cmio_chiprev)(cmi_hdl_impl_t *);
122 	const char *(*cmio_chiprevstr)(cmi_hdl_impl_t *);
123 	uint32_t (*cmio_getsockettype)(cmi_hdl_impl_t *);
124 	const char *(*cmio_getsocketstr)(cmi_hdl_impl_t *);
125 	uint_t (*cmio_chipsig)(cmi_hdl_impl_t *);
126 
127 	id_t (*cmio_logical_id)(cmi_hdl_impl_t *);
128 	/*
129 	 * These ops are optional in an implementation.
130 	 */
131 	ulong_t (*cmio_getcr4)(cmi_hdl_impl_t *);
132 	void (*cmio_setcr4)(cmi_hdl_impl_t *, ulong_t);
133 	cmi_errno_t (*cmio_rdmsr)(cmi_hdl_impl_t *, uint_t, uint64_t *);
134 	cmi_errno_t (*cmio_wrmsr)(cmi_hdl_impl_t *, uint_t, uint64_t);
135 	cmi_errno_t (*cmio_msrinterpose)(cmi_hdl_impl_t *, uint_t, uint64_t);
136 	void (*cmio_int)(cmi_hdl_impl_t *, int);
137 	int (*cmio_online)(cmi_hdl_impl_t *, int, int *);
138 	uint16_t (*cmio_smbiosid) (cmi_hdl_impl_t *);
139 	uint_t (*cmio_smb_chipid)(cmi_hdl_impl_t *);
140 	nvlist_t *(*cmio_smb_bboard)(cmi_hdl_impl_t *);
141 };
142 
143 static const struct cmi_hdl_ops cmi_hdl_ops;
144 
145 /*
146  * Handles are looked up from contexts such as polling, injection etc
147  * where the context is reasonably well defined (although a poller could
148  * interrupt any old thread holding any old lock).  They are also looked
149  * up by machine check handlers, which may strike at inconvenient times
150  * such as during handle initialization or destruction or during handle
151  * lookup (which the #MC handler itself will also have to perform).
152  *
153  * So keeping handles in a linked list makes locking difficult when we
154  * consider #MC handlers.  Our solution is to have a look-up table indexed
155  * by that which uniquely identifies a handle - chip/core/strand id -
156  * with each entry a structure including a pointer to a handle
157  * structure for the resource, and a reference count for the handle.
158  * Reference counts are modified atomically.  The public cmi_hdl_hold
159  * always succeeds because this can only be used after handle creation
160  * and before the call to destruct, so the hold count is already at least one.
161  * In other functions that lookup a handle (cmi_hdl_lookup, cmi_hdl_any)
162  * we must be certain that the count has not already decrmented to zero
163  * before applying our hold.
164  *
165  * The table is an array of maximum number of chips defined in
166  * CMI_CHIPID_ARR_SZ indexed by the chip id. If the chip is not present, the
167  * entry is NULL. Each entry is a pointer to another array which contains a
168  * list of all strands of the chip. This first level table is allocated when
169  * first we want to populate an entry. The size of the latter (per chip) table
170  * is CMI_MAX_STRANDS_PER_CHIP and it is populated when one of its cpus starts.
171  *
172  * Ideally we should only allocate to the actual number of chips, cores per
173  * chip and strand per core. The number of chips is not available until all
174  * of them are passed. The number of cores and strands are partially available.
175  * For now we stick with the above approach.
176  */
177 #define	CMI_MAX_CHIPID_NBITS		6	/* max chipid of 63 */
178 #define	CMI_MAX_CORES_PER_CHIP_NBITS	4	/* 16 cores per chip max */
179 #define	CMI_MAX_STRANDS_PER_CORE_NBITS	3	/* 8 strands per core max */
180 
181 #define	CMI_MAX_CHIPID			((1 << (CMI_MAX_CHIPID_NBITS)) - 1)
182 #define	CMI_MAX_CORES_PER_CHIP(cbits)	(1 << (cbits))
183 #define	CMI_MAX_COREID(cbits)		((1 << (cbits)) - 1)
184 #define	CMI_MAX_STRANDS_PER_CORE(sbits)	(1 << (sbits))
185 #define	CMI_MAX_STRANDID(sbits)		((1 << (sbits)) - 1)
186 #define	CMI_MAX_STRANDS_PER_CHIP(cbits, sbits)	\
187 	(CMI_MAX_CORES_PER_CHIP(cbits) * CMI_MAX_STRANDS_PER_CORE(sbits))
188 
189 #define	CMI_CHIPID_ARR_SZ		(1 << CMI_MAX_CHIPID_NBITS)
190 
191 typedef struct cmi_hdl_ent {
192 	volatile uint32_t cmae_refcnt;
193 	cmi_hdl_impl_t *cmae_hdlp;
194 } cmi_hdl_ent_t;
195 
196 static cmi_hdl_ent_t *cmi_chip_tab[CMI_CHIPID_ARR_SZ];
197 
198 /*
199  * Default values for the number of core and strand bits.
200  */
201 uint_t cmi_core_nbits = CMI_MAX_CORES_PER_CHIP_NBITS;
202 uint_t cmi_strand_nbits = CMI_MAX_STRANDS_PER_CORE_NBITS;
203 static int cmi_ext_topo_check = 0;
204 
205 /*
206  * Controls where we will source PCI config space data.
207  */
208 #define	CMI_PCICFG_FLAG_RD_HWOK		0x0001
209 #define	CMI_PCICFG_FLAG_RD_INTERPOSEOK	0X0002
210 #define	CMI_PCICFG_FLAG_WR_HWOK		0x0004
211 #define	CMI_PCICFG_FLAG_WR_INTERPOSEOK	0X0008
212 
213 static uint64_t cmi_pcicfg_flags =
214     CMI_PCICFG_FLAG_RD_HWOK | CMI_PCICFG_FLAG_RD_INTERPOSEOK |
215     CMI_PCICFG_FLAG_WR_HWOK | CMI_PCICFG_FLAG_WR_INTERPOSEOK;
216 
217 /*
218  * The flags for individual cpus are kept in their per-cpu handle cmih_msrsrc
219  */
220 #define	CMI_MSR_FLAG_RD_HWOK		0x0001
221 #define	CMI_MSR_FLAG_RD_INTERPOSEOK	0x0002
222 #define	CMI_MSR_FLAG_WR_HWOK		0x0004
223 #define	CMI_MSR_FLAG_WR_INTERPOSEOK	0x0008
224 
225 int cmi_call_func_ntv_tries = 3;
226 
227 static cmi_errno_t
228 call_func_ntv(int cpuid, xc_func_t func, xc_arg_t arg1, xc_arg_t arg2)
229 {
230 	cmi_errno_t rc = -1;
231 	int i;
232 
233 	kpreempt_disable();
234 
235 	if (CPU->cpu_id == cpuid) {
236 		(*func)(arg1, arg2, (xc_arg_t)&rc);
237 	} else {
238 		/*
239 		 * This should not happen for a #MC trap or a poll, so
240 		 * this is likely an error injection or similar.
241 		 * We will try to cross call with xc_trycall - we
242 		 * can't guarantee success with xc_call because
243 		 * the interrupt code in the case of a #MC may
244 		 * already hold the xc mutex.
245 		 */
246 		for (i = 0; i < cmi_call_func_ntv_tries; i++) {
247 			cpuset_t cpus;
248 
249 			CPUSET_ONLY(cpus, cpuid);
250 			xc_priority(arg1, arg2, (xc_arg_t)&rc,
251 			    CPUSET2BV(cpus), func);
252 			if (rc != -1)
253 				break;
254 
255 			DELAY(1);
256 		}
257 	}
258 
259 	kpreempt_enable();
260 
261 	return (rc != -1 ? rc : CMIERR_DEADLOCK);
262 }
263 
264 static uint64_t injcnt;
265 
266 void
267 cmi_hdl_inj_begin(cmi_hdl_t ophdl)
268 {
269 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
270 
271 	if (hdl != NULL)
272 		hdl->cmih_flags |= CMIH_F_INJACTV;
273 	if (injcnt++ == 0) {
274 		cmn_err(CE_NOTE, "Hardware error injection/simulation "
275 		    "activity noted");
276 	}
277 }
278 
279 void
280 cmi_hdl_inj_end(cmi_hdl_t ophdl)
281 {
282 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
283 
284 	ASSERT(hdl == NULL || hdl->cmih_flags & CMIH_F_INJACTV);
285 	if (hdl != NULL)
286 		hdl->cmih_flags &= ~CMIH_F_INJACTV;
287 }
288 
289 boolean_t
290 cmi_inj_tainted(void)
291 {
292 	return (injcnt != 0 ? B_TRUE : B_FALSE);
293 }
294 
295 /*
296  *	 =======================================================
297  *	|	MSR Interposition				|
298  *	|	-----------------				|
299  *	|							|
300  *	 -------------------------------------------------------
301  */
302 
303 #define	CMI_MSRI_HASHSZ		16
304 #define	CMI_MSRI_HASHIDX(hdl, msr) \
305 	(((uintptr_t)(hdl) >> 3 + (msr)) % (CMI_MSRI_HASHSZ - 1))
306 
307 struct cmi_msri_bkt {
308 	kmutex_t msrib_lock;
309 	struct cmi_msri_hashent *msrib_head;
310 };
311 
312 struct cmi_msri_hashent {
313 	struct cmi_msri_hashent *msrie_next;
314 	struct cmi_msri_hashent *msrie_prev;
315 	cmi_hdl_impl_t *msrie_hdl;
316 	uint_t msrie_msrnum;
317 	uint64_t msrie_msrval;
318 };
319 
320 #define	CMI_MSRI_MATCH(ent, hdl, req_msr) \
321 	((ent)->msrie_hdl == (hdl) && (ent)->msrie_msrnum == (req_msr))
322 
323 static struct cmi_msri_bkt msrihash[CMI_MSRI_HASHSZ];
324 
325 static void
326 msri_addent(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
327 {
328 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
329 	struct cmi_msri_bkt *hbp = &msrihash[idx];
330 	struct cmi_msri_hashent *hep;
331 
332 	mutex_enter(&hbp->msrib_lock);
333 
334 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
335 		if (CMI_MSRI_MATCH(hep, hdl, msr))
336 			break;
337 	}
338 
339 	if (hep != NULL) {
340 		hep->msrie_msrval = val;
341 	} else {
342 		hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
343 		hep->msrie_hdl = hdl;
344 		hep->msrie_msrnum = msr;
345 		hep->msrie_msrval = val;
346 
347 		if (hbp->msrib_head != NULL)
348 			hbp->msrib_head->msrie_prev = hep;
349 		hep->msrie_next = hbp->msrib_head;
350 		hep->msrie_prev = NULL;
351 		hbp->msrib_head = hep;
352 	}
353 
354 	mutex_exit(&hbp->msrib_lock);
355 }
356 
357 /*
358  * Look for a match for the given hanlde and msr.  Return 1 with valp
359  * filled if a match is found, otherwise return 0 with valp untouched.
360  */
361 static int
362 msri_lookup(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
363 {
364 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
365 	struct cmi_msri_bkt *hbp = &msrihash[idx];
366 	struct cmi_msri_hashent *hep;
367 
368 	/*
369 	 * This function is called during #MC trap handling, so we should
370 	 * consider the possibility that the hash mutex is held by the
371 	 * interrupted thread.  This should not happen because interposition
372 	 * is an artificial injection mechanism and the #MC is requested
373 	 * after adding entries, but just in case of a real #MC at an
374 	 * unlucky moment we'll use mutex_tryenter here.
375 	 */
376 	if (!mutex_tryenter(&hbp->msrib_lock))
377 		return (0);
378 
379 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
380 		if (CMI_MSRI_MATCH(hep, hdl, msr)) {
381 			*valp = hep->msrie_msrval;
382 			break;
383 		}
384 	}
385 
386 	mutex_exit(&hbp->msrib_lock);
387 
388 	return (hep != NULL);
389 }
390 
391 /*
392  * Remove any interposed value that matches.
393  */
394 static void
395 msri_rment(cmi_hdl_impl_t *hdl, uint_t msr)
396 {
397 
398 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
399 	struct cmi_msri_bkt *hbp = &msrihash[idx];
400 	struct cmi_msri_hashent *hep;
401 
402 	if (!mutex_tryenter(&hbp->msrib_lock))
403 		return;
404 
405 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
406 		if (CMI_MSRI_MATCH(hep, hdl, msr)) {
407 			if (hep->msrie_prev != NULL)
408 				hep->msrie_prev->msrie_next = hep->msrie_next;
409 
410 			if (hep->msrie_next != NULL)
411 				hep->msrie_next->msrie_prev = hep->msrie_prev;
412 
413 			if (hbp->msrib_head == hep)
414 				hbp->msrib_head = hep->msrie_next;
415 
416 			kmem_free(hep, sizeof (*hep));
417 			break;
418 		}
419 	}
420 
421 	mutex_exit(&hbp->msrib_lock);
422 }
423 
424 /*
425  *	 =======================================================
426  *	|	PCI Config Space Interposition			|
427  *	|	------------------------------			|
428  *	|							|
429  *	 -------------------------------------------------------
430  */
431 
432 /*
433  * Hash for interposed PCI config space values.  We lookup on bus/dev/fun/offset
434  * and then record whether the value stashed was made with a byte, word or
435  * doubleword access;  we will only return a hit for an access of the
436  * same size.  If you access say a 32-bit register using byte accesses
437  * and then attempt to read the full 32-bit value back you will not obtain
438  * any sort of merged result - you get a lookup miss.
439  */
440 
441 #define	CMI_PCII_HASHSZ		16
442 #define	CMI_PCII_HASHIDX(b, d, f, o) \
443 	(((b) + (d) + (f) + (o)) % (CMI_PCII_HASHSZ - 1))
444 
445 struct cmi_pcii_bkt {
446 	kmutex_t pciib_lock;
447 	struct cmi_pcii_hashent *pciib_head;
448 };
449 
450 struct cmi_pcii_hashent {
451 	struct cmi_pcii_hashent *pcii_next;
452 	struct cmi_pcii_hashent *pcii_prev;
453 	int pcii_bus;
454 	int pcii_dev;
455 	int pcii_func;
456 	int pcii_reg;
457 	int pcii_asize;
458 	uint32_t pcii_val;
459 };
460 
461 #define	CMI_PCII_MATCH(ent, b, d, f, r, asz) \
462 	((ent)->pcii_bus == (b) && (ent)->pcii_dev == (d) && \
463 	(ent)->pcii_func == (f) && (ent)->pcii_reg == (r) && \
464 	(ent)->pcii_asize == (asz))
465 
466 static struct cmi_pcii_bkt pciihash[CMI_PCII_HASHSZ];
467 
468 
469 /*
470  * Add a new entry to the PCI interpose hash, overwriting any existing
471  * entry that is found.
472  */
473 static void
474 pcii_addent(int bus, int dev, int func, int reg, uint32_t val, int asz)
475 {
476 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
477 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
478 	struct cmi_pcii_hashent *hep;
479 
480 	cmi_hdl_inj_begin(NULL);
481 
482 	mutex_enter(&hbp->pciib_lock);
483 
484 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
485 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz))
486 			break;
487 	}
488 
489 	if (hep != NULL) {
490 		hep->pcii_val = val;
491 	} else {
492 		hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
493 		hep->pcii_bus = bus;
494 		hep->pcii_dev = dev;
495 		hep->pcii_func = func;
496 		hep->pcii_reg = reg;
497 		hep->pcii_asize = asz;
498 		hep->pcii_val = val;
499 
500 		if (hbp->pciib_head != NULL)
501 			hbp->pciib_head->pcii_prev = hep;
502 		hep->pcii_next = hbp->pciib_head;
503 		hep->pcii_prev = NULL;
504 		hbp->pciib_head = hep;
505 	}
506 
507 	mutex_exit(&hbp->pciib_lock);
508 
509 	cmi_hdl_inj_end(NULL);
510 }
511 
512 /*
513  * Look for a match for the given bus/dev/func/reg; return 1 with valp
514  * filled if a match is found, otherwise return 0 with valp untouched.
515  */
516 static int
517 pcii_lookup(int bus, int dev, int func, int reg, int asz, uint32_t *valp)
518 {
519 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
520 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
521 	struct cmi_pcii_hashent *hep;
522 
523 	if (!mutex_tryenter(&hbp->pciib_lock))
524 		return (0);
525 
526 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
527 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
528 			*valp = hep->pcii_val;
529 			break;
530 		}
531 	}
532 
533 	mutex_exit(&hbp->pciib_lock);
534 
535 	return (hep != NULL);
536 }
537 
538 static void
539 pcii_rment(int bus, int dev, int func, int reg, int asz)
540 {
541 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
542 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
543 	struct cmi_pcii_hashent *hep;
544 
545 	mutex_enter(&hbp->pciib_lock);
546 
547 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
548 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
549 			if (hep->pcii_prev != NULL)
550 				hep->pcii_prev->pcii_next = hep->pcii_next;
551 
552 			if (hep->pcii_next != NULL)
553 				hep->pcii_next->pcii_prev = hep->pcii_prev;
554 
555 			if (hbp->pciib_head == hep)
556 				hbp->pciib_head = hep->pcii_next;
557 
558 			kmem_free(hep, sizeof (*hep));
559 			break;
560 		}
561 	}
562 
563 	mutex_exit(&hbp->pciib_lock);
564 }
565 
566 #ifndef __xpv
567 
568 /*
569  *	 =======================================================
570  *	|	Native methods					|
571  *	|	--------------					|
572  *	|							|
573  *	| These are used when we are running native on bare-	|
574  *	| metal, or simply don't know any better.		|
575  *	---------------------------------------------------------
576  */
577 
578 #define	HDLPRIV(hdl)	((cpu_t *)(hdl)->cmih_hdlpriv)
579 
580 static uint_t
581 ntv_vendor(cmi_hdl_impl_t *hdl)
582 {
583 	return (cpuid_getvendor(HDLPRIV(hdl)));
584 }
585 
586 static const char *
587 ntv_vendorstr(cmi_hdl_impl_t *hdl)
588 {
589 	return (cpuid_getvendorstr(HDLPRIV(hdl)));
590 }
591 
592 static uint_t
593 ntv_family(cmi_hdl_impl_t *hdl)
594 {
595 	return (cpuid_getfamily(HDLPRIV(hdl)));
596 }
597 
598 static uint_t
599 ntv_model(cmi_hdl_impl_t *hdl)
600 {
601 	return (cpuid_getmodel(HDLPRIV(hdl)));
602 }
603 
604 static uint_t
605 ntv_stepping(cmi_hdl_impl_t *hdl)
606 {
607 	return (cpuid_getstep(HDLPRIV(hdl)));
608 }
609 
610 static uint_t
611 ntv_chipid(cmi_hdl_impl_t *hdl)
612 {
613 	return (hdl->cmih_chipid);
614 
615 }
616 
617 static uint_t
618 ntv_procnodeid(cmi_hdl_impl_t *hdl)
619 {
620 	return (hdl->cmih_procnodeid);
621 }
622 
623 static uint_t
624 ntv_procnodes_per_pkg(cmi_hdl_impl_t *hdl)
625 {
626 	return (hdl->cmih_procnodes_per_pkg);
627 }
628 
629 static uint_t
630 ntv_coreid(cmi_hdl_impl_t *hdl)
631 {
632 	return (hdl->cmih_coreid);
633 }
634 
635 static uint_t
636 ntv_strandid(cmi_hdl_impl_t *hdl)
637 {
638 	return (hdl->cmih_strandid);
639 }
640 
641 static uint_t
642 ntv_strand_apicid(cmi_hdl_impl_t *hdl)
643 {
644 	return (cpuid_get_apicid(HDLPRIV(hdl)));
645 }
646 
647 static uint16_t
648 ntv_smbiosid(cmi_hdl_impl_t *hdl)
649 {
650 	return (hdl->cmih_smbiosid);
651 }
652 
653 static uint_t
654 ntv_smb_chipid(cmi_hdl_impl_t *hdl)
655 {
656 	return (hdl->cmih_smb_chipid);
657 }
658 
659 static nvlist_t *
660 ntv_smb_bboard(cmi_hdl_impl_t *hdl)
661 {
662 	return (hdl->cmih_smb_bboard);
663 }
664 
665 static uint32_t
666 ntv_chiprev(cmi_hdl_impl_t *hdl)
667 {
668 	return (cpuid_getchiprev(HDLPRIV(hdl)));
669 }
670 
671 static const char *
672 ntv_chiprevstr(cmi_hdl_impl_t *hdl)
673 {
674 	return (cpuid_getchiprevstr(HDLPRIV(hdl)));
675 }
676 
677 static uint32_t
678 ntv_getsockettype(cmi_hdl_impl_t *hdl)
679 {
680 	return (cpuid_getsockettype(HDLPRIV(hdl)));
681 }
682 
683 static const char *
684 ntv_getsocketstr(cmi_hdl_impl_t *hdl)
685 {
686 	return (cpuid_getsocketstr(HDLPRIV(hdl)));
687 }
688 
689 static uint_t
690 ntv_chipsig(cmi_hdl_impl_t *hdl)
691 {
692 	return (cpuid_getsig(HDLPRIV(hdl)));
693 }
694 
695 static id_t
696 ntv_logical_id(cmi_hdl_impl_t *hdl)
697 {
698 	return (HDLPRIV(hdl)->cpu_id);
699 }
700 
701 /*ARGSUSED*/
702 static int
703 ntv_getcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
704 {
705 	ulong_t *dest = (ulong_t *)arg1;
706 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
707 
708 	*dest = getcr4();
709 	*rcp = CMI_SUCCESS;
710 
711 	return (0);
712 }
713 
714 static ulong_t
715 ntv_getcr4(cmi_hdl_impl_t *hdl)
716 {
717 	cpu_t *cp = HDLPRIV(hdl);
718 	ulong_t val;
719 
720 	(void) call_func_ntv(cp->cpu_id, ntv_getcr4_xc, (xc_arg_t)&val, NULL);
721 
722 	return (val);
723 }
724 
725 /*ARGSUSED*/
726 static int
727 ntv_setcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
728 {
729 	ulong_t val = (ulong_t)arg1;
730 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
731 
732 	setcr4(val);
733 	*rcp = CMI_SUCCESS;
734 
735 	return (0);
736 }
737 
738 static void
739 ntv_setcr4(cmi_hdl_impl_t *hdl, ulong_t val)
740 {
741 	cpu_t *cp = HDLPRIV(hdl);
742 
743 	(void) call_func_ntv(cp->cpu_id, ntv_setcr4_xc, (xc_arg_t)val, NULL);
744 }
745 
746 volatile uint32_t cmi_trapped_rdmsr;
747 
748 /*ARGSUSED*/
749 static int
750 ntv_rdmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
751 {
752 	uint_t msr = (uint_t)arg1;
753 	uint64_t *valp = (uint64_t *)arg2;
754 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
755 
756 	on_trap_data_t otd;
757 
758 	if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
759 		if (checked_rdmsr(msr, valp) == 0)
760 			*rcp = CMI_SUCCESS;
761 		else
762 			*rcp = CMIERR_NOTSUP;
763 	} else {
764 		*rcp = CMIERR_MSRGPF;
765 		atomic_inc_32(&cmi_trapped_rdmsr);
766 	}
767 	no_trap();
768 
769 	return (0);
770 }
771 
772 static cmi_errno_t
773 ntv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
774 {
775 	cpu_t *cp = HDLPRIV(hdl);
776 
777 	if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_HWOK))
778 		return (CMIERR_INTERPOSE);
779 
780 	return (call_func_ntv(cp->cpu_id, ntv_rdmsr_xc,
781 	    (xc_arg_t)msr, (xc_arg_t)valp));
782 }
783 
784 volatile uint32_t cmi_trapped_wrmsr;
785 
786 /*ARGSUSED*/
787 static int
788 ntv_wrmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
789 {
790 	uint_t msr = (uint_t)arg1;
791 	uint64_t val = *((uint64_t *)arg2);
792 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
793 	on_trap_data_t otd;
794 
795 	if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
796 		if (checked_wrmsr(msr, val) == 0)
797 			*rcp = CMI_SUCCESS;
798 		else
799 			*rcp = CMIERR_NOTSUP;
800 	} else {
801 		*rcp = CMIERR_MSRGPF;
802 		atomic_inc_32(&cmi_trapped_wrmsr);
803 	}
804 	no_trap();
805 
806 	return (0);
807 
808 }
809 
810 static cmi_errno_t
811 ntv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
812 {
813 	cpu_t *cp = HDLPRIV(hdl);
814 
815 	if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_WR_HWOK))
816 		return (CMI_SUCCESS);
817 
818 	return (call_func_ntv(cp->cpu_id, ntv_wrmsr_xc,
819 	    (xc_arg_t)msr, (xc_arg_t)&val));
820 }
821 
822 static cmi_errno_t
823 ntv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
824 {
825 	msri_addent(hdl, msr, val);
826 	return (CMI_SUCCESS);
827 }
828 
829 /*ARGSUSED*/
830 static int
831 ntv_int_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
832 {
833 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
834 	int int_no = (int)arg1;
835 
836 	if (int_no == T_MCE)
837 		int18();
838 	else
839 		int_cmci();
840 	*rcp = CMI_SUCCESS;
841 
842 	return (0);
843 }
844 
845 static void
846 ntv_int(cmi_hdl_impl_t *hdl, int int_no)
847 {
848 	cpu_t *cp = HDLPRIV(hdl);
849 
850 	(void) call_func_ntv(cp->cpu_id, ntv_int_xc, (xc_arg_t)int_no, NULL);
851 }
852 
853 static int
854 ntv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status)
855 {
856 	int rc;
857 	processorid_t cpuid = HDLPRIV(hdl)->cpu_id;
858 
859 	while (mutex_tryenter(&cpu_lock) == 0) {
860 		if (hdl->cmih_flags & CMIH_F_DEAD)
861 			return (EBUSY);
862 		delay(1);
863 	}
864 	rc = p_online_internal_locked(cpuid, new_status, old_status);
865 	mutex_exit(&cpu_lock);
866 
867 	return (rc);
868 }
869 
870 #else	/* __xpv */
871 
872 /*
873  *	 =======================================================
874  *	|	xVM dom0 methods				|
875  *	|	----------------				|
876  *	|							|
877  *	| These are used when we are running as dom0 in		|
878  *	| a Solaris xVM context.				|
879  *	---------------------------------------------------------
880  */
881 
882 #define	HDLPRIV(hdl)	((xen_mc_lcpu_cookie_t)(hdl)->cmih_hdlpriv)
883 
884 extern uint_t _cpuid_vendorstr_to_vendorcode(char *);
885 
886 
887 static uint_t
888 xpv_vendor(cmi_hdl_impl_t *hdl)
889 {
890 	return (_cpuid_vendorstr_to_vendorcode((char *)xen_physcpu_vendorstr(
891 	    HDLPRIV(hdl))));
892 }
893 
894 static const char *
895 xpv_vendorstr(cmi_hdl_impl_t *hdl)
896 {
897 	return (xen_physcpu_vendorstr(HDLPRIV(hdl)));
898 }
899 
900 static uint_t
901 xpv_family(cmi_hdl_impl_t *hdl)
902 {
903 	return (xen_physcpu_family(HDLPRIV(hdl)));
904 }
905 
906 static uint_t
907 xpv_model(cmi_hdl_impl_t *hdl)
908 {
909 	return (xen_physcpu_model(HDLPRIV(hdl)));
910 }
911 
912 static uint_t
913 xpv_stepping(cmi_hdl_impl_t *hdl)
914 {
915 	return (xen_physcpu_stepping(HDLPRIV(hdl)));
916 }
917 
918 static uint_t
919 xpv_chipid(cmi_hdl_impl_t *hdl)
920 {
921 	return (hdl->cmih_chipid);
922 }
923 
924 static uint_t
925 xpv_procnodeid(cmi_hdl_impl_t *hdl)
926 {
927 	return (hdl->cmih_procnodeid);
928 }
929 
930 static uint_t
931 xpv_procnodes_per_pkg(cmi_hdl_impl_t *hdl)
932 {
933 	return (hdl->cmih_procnodes_per_pkg);
934 }
935 
936 static uint_t
937 xpv_coreid(cmi_hdl_impl_t *hdl)
938 {
939 	return (hdl->cmih_coreid);
940 }
941 
942 static uint_t
943 xpv_strandid(cmi_hdl_impl_t *hdl)
944 {
945 	return (hdl->cmih_strandid);
946 }
947 
948 static uint_t
949 xpv_strand_apicid(cmi_hdl_impl_t *hdl)
950 {
951 	return (xen_physcpu_initial_apicid(HDLPRIV(hdl)));
952 }
953 
954 static uint16_t
955 xpv_smbiosid(cmi_hdl_impl_t *hdl)
956 {
957 	return (hdl->cmih_smbiosid);
958 }
959 
960 static uint_t
961 xpv_smb_chipid(cmi_hdl_impl_t *hdl)
962 {
963 	return (hdl->cmih_smb_chipid);
964 }
965 
966 static nvlist_t *
967 xpv_smb_bboard(cmi_hdl_impl_t *hdl)
968 {
969 	return (hdl->cmih_smb_bboard);
970 }
971 
972 extern uint32_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t);
973 
974 static uint32_t
975 xpv_chiprev(cmi_hdl_impl_t *hdl)
976 {
977 	return (_cpuid_chiprev(xpv_vendor(hdl), xpv_family(hdl),
978 	    xpv_model(hdl), xpv_stepping(hdl)));
979 }
980 
981 extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t);
982 
983 static const char *
984 xpv_chiprevstr(cmi_hdl_impl_t *hdl)
985 {
986 	return (_cpuid_chiprevstr(xpv_vendor(hdl), xpv_family(hdl),
987 	    xpv_model(hdl), xpv_stepping(hdl)));
988 }
989 
990 extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t);
991 
992 static uint32_t
993 xpv_getsockettype(cmi_hdl_impl_t *hdl)
994 {
995 	return (_cpuid_skt(xpv_vendor(hdl), xpv_family(hdl),
996 	    xpv_model(hdl), xpv_stepping(hdl)));
997 }
998 
999 extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t);
1000 
1001 static const char *
1002 xpv_getsocketstr(cmi_hdl_impl_t *hdl)
1003 {
1004 	return (_cpuid_sktstr(xpv_vendor(hdl), xpv_family(hdl),
1005 	    xpv_model(hdl), xpv_stepping(hdl)));
1006 }
1007 
1008 /* ARGSUSED */
1009 static uint_t
1010 xpv_chipsig(cmi_hdl_impl_t *hdl)
1011 {
1012 	return (0);
1013 }
1014 
1015 static id_t
1016 xpv_logical_id(cmi_hdl_impl_t *hdl)
1017 {
1018 	return (xen_physcpu_logical_id(HDLPRIV(hdl)));
1019 }
1020 
1021 static cmi_errno_t
1022 xpv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
1023 {
1024 	switch (msr) {
1025 	case IA32_MSR_MCG_CAP:
1026 		*valp = xen_physcpu_mcg_cap(HDLPRIV(hdl));
1027 		break;
1028 
1029 	default:
1030 		return (CMIERR_NOTSUP);
1031 	}
1032 
1033 	return (CMI_SUCCESS);
1034 }
1035 
1036 /*
1037  * Request the hypervisor to write an MSR for us.  The hypervisor
1038  * will only accept MCA-related MSRs, as this is for MCA error
1039  * simulation purposes alone.  We will pre-screen MSRs for injection
1040  * so we don't bother the HV with bogus requests.  We will permit
1041  * injection to any MCA bank register, and to MCG_STATUS.
1042  */
1043 
1044 #define	IS_MCA_INJ_MSR(msr) \
1045 	(((msr) >= IA32_MSR_MC(0, CTL) && (msr) <= IA32_MSR_MC(10, MISC)) || \
1046 	(msr) == IA32_MSR_MCG_STATUS)
1047 
1048 static cmi_errno_t
1049 xpv_wrmsr_cmn(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val, boolean_t intpose)
1050 {
1051 	xen_mc_t xmc;
1052 	struct xen_mc_msrinject *mci = &xmc.u.mc_msrinject;
1053 
1054 	if (!(hdl->cmih_flags & CMIH_F_INJACTV))
1055 		return (CMIERR_NOTSUP);		/* for injection use only! */
1056 
1057 	if (!IS_MCA_INJ_MSR(msr))
1058 		return (CMIERR_API);
1059 
1060 	if (panicstr)
1061 		return (CMIERR_DEADLOCK);
1062 
1063 	mci->mcinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl));
1064 	mci->mcinj_flags = intpose ? MC_MSRINJ_F_INTERPOSE : 0;
1065 	mci->mcinj_count = 1;	/* learn to batch sometime */
1066 	mci->mcinj_msr[0].reg = msr;
1067 	mci->mcinj_msr[0].value = val;
1068 
1069 	return (HYPERVISOR_mca(XEN_MC_msrinject, &xmc) ==
1070 	    0 ?  CMI_SUCCESS : CMIERR_NOTSUP);
1071 }
1072 
1073 static cmi_errno_t
1074 xpv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
1075 {
1076 	return (xpv_wrmsr_cmn(hdl, msr, val, B_FALSE));
1077 }
1078 
1079 
1080 static cmi_errno_t
1081 xpv_msrinterpose(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
1082 {
1083 	return (xpv_wrmsr_cmn(hdl, msr, val, B_TRUE));
1084 }
1085 
1086 static void
1087 xpv_int(cmi_hdl_impl_t *hdl, int int_no)
1088 {
1089 	xen_mc_t xmc;
1090 	struct xen_mc_mceinject *mce = &xmc.u.mc_mceinject;
1091 
1092 	if (!(hdl->cmih_flags & CMIH_F_INJACTV))
1093 		return;
1094 
1095 	if (int_no != T_MCE) {
1096 		cmn_err(CE_WARN, "xpv_int: int_no %d unimplemented\n",
1097 		    int_no);
1098 	}
1099 
1100 	mce->mceinj_cpunr = xen_physcpu_logical_id(HDLPRIV(hdl));
1101 
1102 	(void) HYPERVISOR_mca(XEN_MC_mceinject, &xmc);
1103 }
1104 
1105 static int
1106 xpv_online(cmi_hdl_impl_t *hdl, int new_status, int *old_status)
1107 {
1108 	xen_sysctl_t xs;
1109 	int op, rc, status;
1110 
1111 	new_status &= ~P_FORCED;
1112 
1113 	switch (new_status) {
1114 	case P_STATUS:
1115 		op = XEN_SYSCTL_CPU_HOTPLUG_STATUS;
1116 		break;
1117 	case P_FAULTED:
1118 	case P_OFFLINE:
1119 		op = XEN_SYSCTL_CPU_HOTPLUG_OFFLINE;
1120 		break;
1121 	case P_ONLINE:
1122 		op = XEN_SYSCTL_CPU_HOTPLUG_ONLINE;
1123 		break;
1124 	default:
1125 		return (-1);
1126 	}
1127 
1128 	xs.cmd = XEN_SYSCTL_cpu_hotplug;
1129 	xs.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
1130 	xs.u.cpu_hotplug.cpu = xen_physcpu_logical_id(HDLPRIV(hdl));
1131 	xs.u.cpu_hotplug.op = op;
1132 
1133 	if ((rc = HYPERVISOR_sysctl(&xs)) >= 0) {
1134 		status = rc;
1135 		rc = 0;
1136 		switch (status) {
1137 		case XEN_CPU_HOTPLUG_STATUS_NEW:
1138 			*old_status = P_OFFLINE;
1139 			break;
1140 		case XEN_CPU_HOTPLUG_STATUS_OFFLINE:
1141 			*old_status = P_FAULTED;
1142 			break;
1143 		case XEN_CPU_HOTPLUG_STATUS_ONLINE:
1144 			*old_status = P_ONLINE;
1145 			break;
1146 		default:
1147 			return (-1);
1148 		}
1149 	}
1150 
1151 	return (-rc);
1152 }
1153 
1154 #endif
1155 
1156 /*ARGSUSED*/
1157 static void *
1158 cpu_search(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1159     uint_t strandid)
1160 {
1161 #ifdef __xpv
1162 	xen_mc_lcpu_cookie_t cpi;
1163 
1164 	for (cpi = xen_physcpu_next(NULL); cpi != NULL;
1165 	    cpi = xen_physcpu_next(cpi)) {
1166 		if (xen_physcpu_chipid(cpi) == chipid &&
1167 		    xen_physcpu_coreid(cpi) == coreid &&
1168 		    xen_physcpu_strandid(cpi) == strandid)
1169 			return ((void *)cpi);
1170 	}
1171 	return (NULL);
1172 
1173 #else	/* __xpv */
1174 
1175 	cpu_t *cp, *startcp;
1176 
1177 	kpreempt_disable();
1178 	cp = startcp = CPU;
1179 	do {
1180 		if (cmi_ntv_hwchipid(cp) == chipid &&
1181 		    cmi_ntv_hwcoreid(cp) == coreid &&
1182 		    cmi_ntv_hwstrandid(cp) == strandid) {
1183 			kpreempt_enable();
1184 			return ((void *)cp);
1185 		}
1186 
1187 		cp = cp->cpu_next;
1188 	} while (cp != startcp);
1189 	kpreempt_enable();
1190 	return (NULL);
1191 #endif	/* __ xpv */
1192 }
1193 
1194 static boolean_t
1195 cpu_is_cmt(void *priv)
1196 {
1197 #ifdef __xpv
1198 	return (xen_physcpu_is_cmt((xen_mc_lcpu_cookie_t)priv));
1199 #else /* __xpv */
1200 	cpu_t *cp = (cpu_t *)priv;
1201 
1202 	int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1203 	    cpuid_get_ncore_per_chip(cp);
1204 
1205 	return (strands_per_core > 1);
1206 #endif /* __xpv */
1207 }
1208 
1209 /*
1210  * Find the handle entry of a given cpu identified by a <chip,core,strand>
1211  * tuple.
1212  */
1213 static cmi_hdl_ent_t *
1214 cmi_hdl_ent_lookup(uint_t chipid, uint_t coreid, uint_t strandid)
1215 {
1216 	int max_strands = CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits,
1217 	    cmi_strand_nbits);
1218 
1219 	/*
1220 	 * Allocate per-chip table which contains a list of handle of
1221 	 * all strands of the chip.
1222 	 */
1223 	if (cmi_chip_tab[chipid] == NULL) {
1224 		size_t sz;
1225 		cmi_hdl_ent_t *pg;
1226 
1227 		sz = max_strands * sizeof (cmi_hdl_ent_t);
1228 		pg = kmem_zalloc(sz, KM_SLEEP);
1229 
1230 		/* test and set the per-chip table if it is not allocated */
1231 		if (atomic_cas_ptr(&cmi_chip_tab[chipid], NULL, pg) != NULL)
1232 			kmem_free(pg, sz); /* someone beats us */
1233 	}
1234 
1235 	return (cmi_chip_tab[chipid] +
1236 	    ((((coreid) & CMI_MAX_COREID(cmi_core_nbits)) << cmi_strand_nbits) |
1237 	    ((strandid) & CMI_MAX_STRANDID(cmi_strand_nbits))));
1238 }
1239 
1240 extern void cpuid_get_ext_topo(cpu_t *, uint_t *, uint_t *);
1241 
1242 cmi_hdl_t
1243 cmi_hdl_create(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1244     uint_t strandid)
1245 {
1246 	cmi_hdl_impl_t *hdl;
1247 	void *priv;
1248 	cmi_hdl_ent_t *ent;
1249 	uint_t vendor;
1250 
1251 #ifdef __xpv
1252 	ASSERT(class == CMI_HDL_SOLARIS_xVM_MCA);
1253 #else
1254 	ASSERT(class == CMI_HDL_NATIVE);
1255 #endif
1256 
1257 	if ((priv = cpu_search(class, chipid, coreid, strandid)) == NULL)
1258 		return (NULL);
1259 
1260 	/*
1261 	 * Assume all chips in the system are the same type.
1262 	 * For Intel, attempt to check if extended topology is available
1263 	 * CPUID.EAX=0xB. If so, get the number of core and strand bits.
1264 	 */
1265 #ifdef __xpv
1266 	vendor = _cpuid_vendorstr_to_vendorcode(
1267 	    (char *)xen_physcpu_vendorstr((xen_mc_lcpu_cookie_t)priv));
1268 #else
1269 	vendor = cpuid_getvendor((cpu_t *)priv);
1270 #endif
1271 
1272 	switch (vendor) {
1273 	case X86_VENDOR_Intel:
1274 	case X86_VENDOR_AMD:
1275 		if (cmi_ext_topo_check == 0) {
1276 			cpuid_get_ext_topo((cpu_t *)priv, &cmi_core_nbits,
1277 			    &cmi_strand_nbits);
1278 			cmi_ext_topo_check = 1;
1279 		}
1280 	default:
1281 		break;
1282 	}
1283 
1284 	if (chipid > CMI_MAX_CHIPID ||
1285 	    coreid > CMI_MAX_COREID(cmi_core_nbits) ||
1286 	    strandid > CMI_MAX_STRANDID(cmi_strand_nbits))
1287 		return (NULL);
1288 
1289 	hdl = kmem_zalloc(sizeof (*hdl), KM_SLEEP);
1290 
1291 	hdl->cmih_class = class;
1292 	HDLOPS(hdl) = &cmi_hdl_ops;
1293 	hdl->cmih_chipid = chipid;
1294 	hdl->cmih_coreid = coreid;
1295 	hdl->cmih_strandid = strandid;
1296 	hdl->cmih_mstrand = cpu_is_cmt(priv);
1297 	hdl->cmih_hdlpriv = priv;
1298 #ifdef __xpv
1299 	hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_INTERPOSEOK |
1300 	    CMI_MSR_FLAG_WR_INTERPOSEOK;
1301 
1302 	/*
1303 	 * XXX: need hypervisor support for procnodeid, for now assume
1304 	 * single-node processors (procnodeid = chipid)
1305 	 */
1306 	hdl->cmih_procnodeid = xen_physcpu_chipid((xen_mc_lcpu_cookie_t)priv);
1307 	hdl->cmih_procnodes_per_pkg = 1;
1308 #else   /* __xpv */
1309 	hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_HWOK | CMI_MSR_FLAG_RD_INTERPOSEOK |
1310 	    CMI_MSR_FLAG_WR_HWOK | CMI_MSR_FLAG_WR_INTERPOSEOK;
1311 	hdl->cmih_procnodeid = cpuid_get_procnodeid((cpu_t *)priv);
1312 	hdl->cmih_procnodes_per_pkg =
1313 	    cpuid_get_procnodes_per_pkg((cpu_t *)priv);
1314 #endif  /* __xpv */
1315 
1316 	ent = cmi_hdl_ent_lookup(chipid, coreid, strandid);
1317 	if (ent->cmae_refcnt != 0 || ent->cmae_hdlp != NULL) {
1318 		/*
1319 		 * Somehow this (chipid, coreid, strandid) id tuple has
1320 		 * already been assigned!  This indicates that the
1321 		 * callers logic in determining these values is busted,
1322 		 * or perhaps undermined by bad BIOS setup.  Complain,
1323 		 * and refuse to initialize this tuple again as bad things
1324 		 * will happen.
1325 		 */
1326 		cmn_err(CE_NOTE, "cmi_hdl_create: chipid %d coreid %d "
1327 		    "strandid %d handle already allocated!",
1328 		    chipid, coreid, strandid);
1329 		kmem_free(hdl, sizeof (*hdl));
1330 		return (NULL);
1331 	}
1332 
1333 	/*
1334 	 * Once we store a nonzero reference count others can find this
1335 	 * handle via cmi_hdl_lookup etc.  This initial hold on the handle
1336 	 * is to be dropped only if some other part of cmi initialization
1337 	 * fails or, if it succeeds, at later cpu deconfigure.  Note the
1338 	 * the module private data we hold in cmih_cmi and cmih_cmidata
1339 	 * is still NULL at this point (the caller will fill it with
1340 	 * cmi_hdl_setcmi if it initializes) so consumers of handles
1341 	 * should always be ready for that possibility.
1342 	 */
1343 	ent->cmae_hdlp = hdl;
1344 	hdl->cmih_refcntp = &ent->cmae_refcnt;
1345 	ent->cmae_refcnt = 1;
1346 
1347 	return ((cmi_hdl_t)hdl);
1348 }
1349 
1350 void
1351 cmi_read_smbios(cmi_hdl_t ophdl)
1352 {
1353 
1354 	uint_t strand_apicid = UINT_MAX;
1355 	uint_t chip_inst = UINT_MAX;
1356 	uint16_t smb_id = USHRT_MAX;
1357 	int rc = 0;
1358 
1359 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1360 
1361 	/* set x86gentopo compatibility */
1362 	fm_smb_fmacompat();
1363 
1364 #ifndef __xpv
1365 	strand_apicid = ntv_strand_apicid(hdl);
1366 #else
1367 	strand_apicid = xpv_strand_apicid(hdl);
1368 #endif
1369 
1370 	if (!x86gentopo_legacy) {
1371 		/*
1372 		 * If fm_smb_chipinst() or fm_smb_bboard() fails,
1373 		 * topo reverts to legacy mode
1374 		 */
1375 		rc = fm_smb_chipinst(strand_apicid, &chip_inst, &smb_id);
1376 		if (rc == 0) {
1377 			hdl->cmih_smb_chipid = chip_inst;
1378 			hdl->cmih_smbiosid = smb_id;
1379 		} else {
1380 #ifdef DEBUG
1381 			cmn_err(CE_NOTE, "!cmi reads smbios chip info failed");
1382 #endif /* DEBUG */
1383 			return;
1384 		}
1385 
1386 		hdl->cmih_smb_bboard  = fm_smb_bboard(strand_apicid);
1387 #ifdef DEBUG
1388 		if (hdl->cmih_smb_bboard == NULL)
1389 			cmn_err(CE_NOTE,
1390 			    "!cmi reads smbios base boards info failed");
1391 #endif /* DEBUG */
1392 	}
1393 }
1394 
1395 void
1396 cmi_hdl_hold(cmi_hdl_t ophdl)
1397 {
1398 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1399 
1400 	ASSERT(*hdl->cmih_refcntp != 0); /* must not be the initial hold */
1401 
1402 	atomic_inc_32(hdl->cmih_refcntp);
1403 }
1404 
1405 static int
1406 cmi_hdl_canref(cmi_hdl_ent_t *ent)
1407 {
1408 	volatile uint32_t *refcntp;
1409 	uint32_t refcnt;
1410 
1411 	refcntp = &ent->cmae_refcnt;
1412 	refcnt = *refcntp;
1413 
1414 	if (refcnt == 0) {
1415 		/*
1416 		 * Associated object never existed, is being destroyed,
1417 		 * or has been destroyed.
1418 		 */
1419 		return (0);
1420 	}
1421 
1422 	/*
1423 	 * We cannot use atomic increment here because once the reference
1424 	 * count reaches zero it must never be bumped up again.
1425 	 */
1426 	while (refcnt != 0) {
1427 		if (atomic_cas_32(refcntp, refcnt, refcnt + 1) == refcnt)
1428 			return (1);
1429 		refcnt = *refcntp;
1430 	}
1431 
1432 	/*
1433 	 * Somebody dropped the reference count to 0 after our initial
1434 	 * check.
1435 	 */
1436 	return (0);
1437 }
1438 
1439 
1440 void
1441 cmi_hdl_rele(cmi_hdl_t ophdl)
1442 {
1443 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1444 
1445 	ASSERT(*hdl->cmih_refcntp > 0);
1446 	atomic_dec_32(hdl->cmih_refcntp);
1447 }
1448 
1449 void
1450 cmi_hdl_destroy(cmi_hdl_t ophdl)
1451 {
1452 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1453 	cmi_hdl_ent_t *ent;
1454 
1455 	/* Release the reference count held by cmi_hdl_create(). */
1456 	ASSERT(*hdl->cmih_refcntp > 0);
1457 	atomic_dec_32(hdl->cmih_refcntp);
1458 	hdl->cmih_flags |= CMIH_F_DEAD;
1459 
1460 	ent = cmi_hdl_ent_lookup(hdl->cmih_chipid, hdl->cmih_coreid,
1461 	    hdl->cmih_strandid);
1462 	/*
1463 	 * Use busy polling instead of condition variable here because
1464 	 * cmi_hdl_rele() may be called from #MC handler.
1465 	 */
1466 	while (cmi_hdl_canref(ent)) {
1467 		cmi_hdl_rele(ophdl);
1468 		delay(1);
1469 	}
1470 	ent->cmae_hdlp = NULL;
1471 
1472 	kmem_free(hdl, sizeof (*hdl));
1473 }
1474 
1475 void
1476 cmi_hdl_setspecific(cmi_hdl_t ophdl, void *arg)
1477 {
1478 	IMPLHDL(ophdl)->cmih_spec = arg;
1479 }
1480 
1481 void *
1482 cmi_hdl_getspecific(cmi_hdl_t ophdl)
1483 {
1484 	return (IMPLHDL(ophdl)->cmih_spec);
1485 }
1486 
1487 void
1488 cmi_hdl_setmc(cmi_hdl_t ophdl, const struct cmi_mc_ops *mcops, void *mcdata)
1489 {
1490 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1491 
1492 	ASSERT(hdl->cmih_mcops == NULL && hdl->cmih_mcdata == NULL);
1493 	hdl->cmih_mcops = mcops;
1494 	hdl->cmih_mcdata = mcdata;
1495 }
1496 
1497 const struct cmi_mc_ops *
1498 cmi_hdl_getmcops(cmi_hdl_t ophdl)
1499 {
1500 	return (IMPLHDL(ophdl)->cmih_mcops);
1501 }
1502 
1503 void *
1504 cmi_hdl_getmcdata(cmi_hdl_t ophdl)
1505 {
1506 	return (IMPLHDL(ophdl)->cmih_mcdata);
1507 }
1508 
1509 cmi_hdl_t
1510 cmi_hdl_lookup(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
1511     uint_t strandid)
1512 {
1513 	cmi_hdl_ent_t *ent;
1514 
1515 	if (chipid > CMI_MAX_CHIPID ||
1516 	    coreid > CMI_MAX_COREID(cmi_core_nbits) ||
1517 	    strandid > CMI_MAX_STRANDID(cmi_strand_nbits))
1518 		return (NULL);
1519 
1520 	ent = cmi_hdl_ent_lookup(chipid, coreid, strandid);
1521 
1522 	if (class == CMI_HDL_NEUTRAL)
1523 #ifdef __xpv
1524 		class = CMI_HDL_SOLARIS_xVM_MCA;
1525 #else
1526 		class = CMI_HDL_NATIVE;
1527 #endif
1528 
1529 	if (!cmi_hdl_canref(ent))
1530 		return (NULL);
1531 
1532 	if (ent->cmae_hdlp->cmih_class != class) {
1533 		cmi_hdl_rele((cmi_hdl_t)ent->cmae_hdlp);
1534 		return (NULL);
1535 	}
1536 
1537 	return ((cmi_hdl_t)ent->cmae_hdlp);
1538 }
1539 
1540 cmi_hdl_t
1541 cmi_hdl_any(void)
1542 {
1543 	int i, j;
1544 	cmi_hdl_ent_t *ent;
1545 	int max_strands = CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits,
1546 	    cmi_strand_nbits);
1547 
1548 	for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) {
1549 		if (cmi_chip_tab[i] == NULL)
1550 			continue;
1551 		for (j = 0, ent = cmi_chip_tab[i]; j < max_strands;
1552 		    j++, ent++) {
1553 			if (cmi_hdl_canref(ent))
1554 				return ((cmi_hdl_t)ent->cmae_hdlp);
1555 		}
1556 	}
1557 
1558 	return (NULL);
1559 }
1560 
1561 void
1562 cmi_hdl_walk(int (*cbfunc)(cmi_hdl_t, void *, void *, void *),
1563     void *arg1, void *arg2, void *arg3)
1564 {
1565 	int i, j;
1566 	cmi_hdl_ent_t *ent;
1567 	int max_strands = CMI_MAX_STRANDS_PER_CHIP(cmi_core_nbits,
1568 	    cmi_strand_nbits);
1569 
1570 	for (i = 0; i < CMI_CHIPID_ARR_SZ; i++) {
1571 		if (cmi_chip_tab[i] == NULL)
1572 			continue;
1573 		for (j = 0, ent = cmi_chip_tab[i]; j < max_strands;
1574 		    j++, ent++) {
1575 			if (cmi_hdl_canref(ent)) {
1576 				cmi_hdl_impl_t *hdl = ent->cmae_hdlp;
1577 				if ((*cbfunc)((cmi_hdl_t)hdl, arg1, arg2, arg3)
1578 				    == CMI_HDL_WALK_DONE) {
1579 					cmi_hdl_rele((cmi_hdl_t)hdl);
1580 					return;
1581 				}
1582 				cmi_hdl_rele((cmi_hdl_t)hdl);
1583 			}
1584 		}
1585 	}
1586 }
1587 
1588 void
1589 cmi_hdl_setcmi(cmi_hdl_t ophdl, void *cmi, void *cmidata)
1590 {
1591 	IMPLHDL(ophdl)->cmih_cmidata = cmidata;
1592 	IMPLHDL(ophdl)->cmih_cmi = cmi;
1593 }
1594 
1595 void *
1596 cmi_hdl_getcmi(cmi_hdl_t ophdl)
1597 {
1598 	return (IMPLHDL(ophdl)->cmih_cmi);
1599 }
1600 
1601 void *
1602 cmi_hdl_getcmidata(cmi_hdl_t ophdl)
1603 {
1604 	return (IMPLHDL(ophdl)->cmih_cmidata);
1605 }
1606 
1607 enum cmi_hdl_class
1608 cmi_hdl_class(cmi_hdl_t ophdl)
1609 {
1610 	return (IMPLHDL(ophdl)->cmih_class);
1611 }
1612 
1613 #define	CMI_HDL_OPFUNC(what, type)				\
1614 	type							\
1615 	cmi_hdl_##what(cmi_hdl_t ophdl)				\
1616 	{							\
1617 		return (HDLOPS(IMPLHDL(ophdl))->		\
1618 		    cmio_##what(IMPLHDL(ophdl)));		\
1619 	}
1620 
1621 /* BEGIN CSTYLED */
1622 CMI_HDL_OPFUNC(vendor, uint_t)
1623 CMI_HDL_OPFUNC(vendorstr, const char *)
1624 CMI_HDL_OPFUNC(family, uint_t)
1625 CMI_HDL_OPFUNC(model, uint_t)
1626 CMI_HDL_OPFUNC(stepping, uint_t)
1627 CMI_HDL_OPFUNC(chipid, uint_t)
1628 CMI_HDL_OPFUNC(procnodeid, uint_t)
1629 CMI_HDL_OPFUNC(coreid, uint_t)
1630 CMI_HDL_OPFUNC(strandid, uint_t)
1631 CMI_HDL_OPFUNC(procnodes_per_pkg, uint_t)
1632 CMI_HDL_OPFUNC(strand_apicid, uint_t)
1633 CMI_HDL_OPFUNC(chiprev, uint32_t)
1634 CMI_HDL_OPFUNC(chiprevstr, const char *)
1635 CMI_HDL_OPFUNC(getsockettype, uint32_t)
1636 CMI_HDL_OPFUNC(getsocketstr, const char *)
1637 CMI_HDL_OPFUNC(logical_id, id_t)
1638 CMI_HDL_OPFUNC(smbiosid, uint16_t)
1639 CMI_HDL_OPFUNC(smb_chipid, uint_t)
1640 CMI_HDL_OPFUNC(smb_bboard, nvlist_t *)
1641 CMI_HDL_OPFUNC(chipsig, uint_t)
1642 /* END CSTYLED */
1643 
1644 boolean_t
1645 cmi_hdl_is_cmt(cmi_hdl_t ophdl)
1646 {
1647 	return (IMPLHDL(ophdl)->cmih_mstrand);
1648 }
1649 
1650 void
1651 cmi_hdl_int(cmi_hdl_t ophdl, int num)
1652 {
1653 	if (HDLOPS(IMPLHDL(ophdl))->cmio_int == NULL)
1654 		return;
1655 
1656 	cmi_hdl_inj_begin(ophdl);
1657 	HDLOPS(IMPLHDL(ophdl))->cmio_int(IMPLHDL(ophdl), num);
1658 	cmi_hdl_inj_end(NULL);
1659 }
1660 
1661 int
1662 cmi_hdl_online(cmi_hdl_t ophdl, int new_status, int *old_status)
1663 {
1664 	return (HDLOPS(IMPLHDL(ophdl))->cmio_online(IMPLHDL(ophdl),
1665 	    new_status, old_status));
1666 }
1667 
1668 #ifndef	__xpv
1669 /*
1670  * Return hardware chip instance; cpuid_get_chipid provides this directly.
1671  */
1672 uint_t
1673 cmi_ntv_hwchipid(cpu_t *cp)
1674 {
1675 	return (cpuid_get_chipid(cp));
1676 }
1677 
1678 /*
1679  * Return hardware node instance; cpuid_get_procnodeid provides this directly.
1680  */
1681 uint_t
1682 cmi_ntv_hwprocnodeid(cpu_t *cp)
1683 {
1684 	return (cpuid_get_procnodeid(cp));
1685 }
1686 
1687 /*
1688  * Return core instance within a single chip.
1689  */
1690 uint_t
1691 cmi_ntv_hwcoreid(cpu_t *cp)
1692 {
1693 	return (cpuid_get_pkgcoreid(cp));
1694 }
1695 
1696 /*
1697  * Return strand number within a single core.  cpuid_get_clogid numbers
1698  * all execution units (strands, or cores in unstranded models) sequentially
1699  * within a single chip.
1700  */
1701 uint_t
1702 cmi_ntv_hwstrandid(cpu_t *cp)
1703 {
1704 	int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1705 	    cpuid_get_ncore_per_chip(cp);
1706 
1707 	return (cpuid_get_clogid(cp) % strands_per_core);
1708 }
1709 
1710 static void
1711 cmi_ntv_hwdisable_mce_xc(void)
1712 {
1713 	ulong_t cr4;
1714 
1715 	cr4 = getcr4();
1716 	cr4 = cr4 & (~CR4_MCE);
1717 	setcr4(cr4);
1718 }
1719 
1720 void
1721 cmi_ntv_hwdisable_mce(cmi_hdl_t hdl)
1722 {
1723 	cpuset_t	set;
1724 	cmi_hdl_impl_t *thdl = IMPLHDL(hdl);
1725 	cpu_t *cp = HDLPRIV(thdl);
1726 
1727 	if (CPU->cpu_id == cp->cpu_id) {
1728 		cmi_ntv_hwdisable_mce_xc();
1729 	} else {
1730 		CPUSET_ONLY(set, cp->cpu_id);
1731 		xc_call(NULL, NULL, NULL, CPUSET2BV(set),
1732 		    (xc_func_t)cmi_ntv_hwdisable_mce_xc);
1733 	}
1734 }
1735 
1736 #endif	/* __xpv */
1737 
1738 void
1739 cmi_hdlconf_rdmsr_nohw(cmi_hdl_t ophdl)
1740 {
1741 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1742 
1743 	hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_RD_HWOK;
1744 }
1745 
1746 void
1747 cmi_hdlconf_wrmsr_nohw(cmi_hdl_t ophdl)
1748 {
1749 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1750 
1751 	hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_WR_HWOK;
1752 }
1753 
1754 cmi_errno_t
1755 cmi_hdl_rdmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t *valp)
1756 {
1757 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1758 
1759 	/*
1760 	 * Regardless of the handle class, we first check for am
1761 	 * interposed value.  In the xVM case you probably want to
1762 	 * place interposed values within the hypervisor itself, but
1763 	 * we still allow interposing them in dom0 for test and bringup
1764 	 * purposes.
1765 	 */
1766 	if ((hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_INTERPOSEOK) &&
1767 	    msri_lookup(hdl, msr, valp))
1768 		return (CMI_SUCCESS);
1769 
1770 	if (HDLOPS(hdl)->cmio_rdmsr == NULL)
1771 		return (CMIERR_NOTSUP);
1772 
1773 	return (HDLOPS(hdl)->cmio_rdmsr(hdl, msr, valp));
1774 }
1775 
1776 cmi_errno_t
1777 cmi_hdl_wrmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t val)
1778 {
1779 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1780 
1781 	/* Invalidate any interposed value */
1782 	msri_rment(hdl, msr);
1783 
1784 	if (HDLOPS(hdl)->cmio_wrmsr == NULL)
1785 		return (CMI_SUCCESS);	/* pretend all is ok */
1786 
1787 	return (HDLOPS(hdl)->cmio_wrmsr(hdl, msr, val));
1788 }
1789 
1790 void
1791 cmi_hdl_enable_mce(cmi_hdl_t ophdl)
1792 {
1793 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1794 	ulong_t cr4;
1795 
1796 	if (HDLOPS(hdl)->cmio_getcr4 == NULL ||
1797 	    HDLOPS(hdl)->cmio_setcr4 == NULL)
1798 		return;
1799 
1800 	cr4 = HDLOPS(hdl)->cmio_getcr4(hdl);
1801 
1802 	HDLOPS(hdl)->cmio_setcr4(hdl, cr4 | CR4_MCE);
1803 }
1804 
1805 void
1806 cmi_hdl_msrinterpose(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1807 {
1808 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1809 	int i;
1810 
1811 	if (HDLOPS(hdl)->cmio_msrinterpose == NULL)
1812 		return;
1813 
1814 	cmi_hdl_inj_begin(ophdl);
1815 
1816 	for (i = 0; i < nregs; i++, regs++)
1817 		HDLOPS(hdl)->cmio_msrinterpose(hdl, regs->cmr_msrnum,
1818 		    regs->cmr_msrval);
1819 
1820 	cmi_hdl_inj_end(ophdl);
1821 }
1822 
1823 /*ARGSUSED*/
1824 void
1825 cmi_hdl_msrforward(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1826 {
1827 #ifdef __xpv
1828 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1829 	int i;
1830 
1831 	for (i = 0; i < nregs; i++, regs++)
1832 		msri_addent(hdl, regs->cmr_msrnum, regs->cmr_msrval);
1833 #endif
1834 }
1835 
1836 
1837 void
1838 cmi_pcird_nohw(void)
1839 {
1840 	cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_RD_HWOK;
1841 }
1842 
1843 void
1844 cmi_pciwr_nohw(void)
1845 {
1846 	cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_WR_HWOK;
1847 }
1848 
1849 static uint32_t
1850 cmi_pci_get_cmn(int bus, int dev, int func, int reg, int asz,
1851     int *interpose, ddi_acc_handle_t hdl)
1852 {
1853 	uint32_t val;
1854 
1855 	if (cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_INTERPOSEOK &&
1856 	    pcii_lookup(bus, dev, func, reg, asz, &val)) {
1857 		if (interpose)
1858 			*interpose = 1;
1859 		return (val);
1860 	}
1861 	if (interpose)
1862 		*interpose = 0;
1863 
1864 	if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_HWOK))
1865 		return (0);
1866 
1867 	switch (asz) {
1868 	case 1:
1869 		if (hdl)
1870 			val = pci_config_get8(hdl, (off_t)reg);
1871 		else
1872 			val = pci_cfgacc_get8(NULL, PCI_GETBDF(bus, dev, func),
1873 			    reg);
1874 		break;
1875 	case 2:
1876 		if (hdl)
1877 			val = pci_config_get16(hdl, (off_t)reg);
1878 		else
1879 			val = pci_cfgacc_get16(NULL, PCI_GETBDF(bus, dev, func),
1880 			    reg);
1881 		break;
1882 	case 4:
1883 		if (hdl)
1884 			val = pci_config_get32(hdl, (off_t)reg);
1885 		else
1886 			val = pci_cfgacc_get32(NULL, PCI_GETBDF(bus, dev, func),
1887 			    reg);
1888 		break;
1889 	default:
1890 		val = 0;
1891 	}
1892 	return (val);
1893 }
1894 
1895 uint8_t
1896 cmi_pci_getb(int bus, int dev, int func, int reg, int *interpose,
1897     ddi_acc_handle_t hdl)
1898 {
1899 	return ((uint8_t)cmi_pci_get_cmn(bus, dev, func, reg, 1, interpose,
1900 	    hdl));
1901 }
1902 
1903 uint16_t
1904 cmi_pci_getw(int bus, int dev, int func, int reg, int *interpose,
1905     ddi_acc_handle_t hdl)
1906 {
1907 	return ((uint16_t)cmi_pci_get_cmn(bus, dev, func, reg, 2, interpose,
1908 	    hdl));
1909 }
1910 
1911 uint32_t
1912 cmi_pci_getl(int bus, int dev, int func, int reg, int *interpose,
1913     ddi_acc_handle_t hdl)
1914 {
1915 	return (cmi_pci_get_cmn(bus, dev, func, reg, 4, interpose, hdl));
1916 }
1917 
1918 void
1919 cmi_pci_interposeb(int bus, int dev, int func, int reg, uint8_t val)
1920 {
1921 	pcii_addent(bus, dev, func, reg, val, 1);
1922 }
1923 
1924 void
1925 cmi_pci_interposew(int bus, int dev, int func, int reg, uint16_t val)
1926 {
1927 	pcii_addent(bus, dev, func, reg, val, 2);
1928 }
1929 
1930 void
1931 cmi_pci_interposel(int bus, int dev, int func, int reg, uint32_t val)
1932 {
1933 	pcii_addent(bus, dev, func, reg, val, 4);
1934 }
1935 
1936 static void
1937 cmi_pci_put_cmn(int bus, int dev, int func, int reg, int asz,
1938     ddi_acc_handle_t hdl, uint32_t val)
1939 {
1940 	/*
1941 	 * If there is an interposed value for this register invalidate it.
1942 	 */
1943 	pcii_rment(bus, dev, func, reg, asz);
1944 
1945 	if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_WR_HWOK))
1946 		return;
1947 
1948 	switch (asz) {
1949 	case 1:
1950 		if (hdl)
1951 			pci_config_put8(hdl, (off_t)reg, (uint8_t)val);
1952 		else
1953 			pci_cfgacc_put8(NULL, PCI_GETBDF(bus, dev, func), reg,
1954 			    (uint8_t)val);
1955 		break;
1956 
1957 	case 2:
1958 		if (hdl)
1959 			pci_config_put16(hdl, (off_t)reg, (uint16_t)val);
1960 		else
1961 			pci_cfgacc_put16(NULL, PCI_GETBDF(bus, dev, func), reg,
1962 			    (uint16_t)val);
1963 		break;
1964 
1965 	case 4:
1966 		if (hdl)
1967 			pci_config_put32(hdl, (off_t)reg, val);
1968 		else
1969 			pci_cfgacc_put32(NULL, PCI_GETBDF(bus, dev, func), reg,
1970 			    val);
1971 		break;
1972 
1973 	default:
1974 		break;
1975 	}
1976 }
1977 
1978 void
1979 cmi_pci_putb(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1980     uint8_t val)
1981 {
1982 	cmi_pci_put_cmn(bus, dev, func, reg, 1, hdl, val);
1983 }
1984 
1985 void
1986 cmi_pci_putw(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1987     uint16_t val)
1988 {
1989 	cmi_pci_put_cmn(bus, dev, func, reg, 2, hdl, val);
1990 }
1991 
1992 void
1993 cmi_pci_putl(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1994     uint32_t val)
1995 {
1996 	cmi_pci_put_cmn(bus, dev, func, reg, 4, hdl, val);
1997 }
1998 
1999 static const struct cmi_hdl_ops cmi_hdl_ops = {
2000 #ifdef __xpv
2001 	/*
2002 	 * CMI_HDL_SOLARIS_xVM_MCA - ops when we are an xVM dom0
2003 	 */
2004 	xpv_vendor,		/* cmio_vendor */
2005 	xpv_vendorstr,		/* cmio_vendorstr */
2006 	xpv_family,		/* cmio_family */
2007 	xpv_model,		/* cmio_model */
2008 	xpv_stepping,		/* cmio_stepping */
2009 	xpv_chipid,		/* cmio_chipid */
2010 	xpv_procnodeid,		/* cmio_procnodeid */
2011 	xpv_coreid,		/* cmio_coreid */
2012 	xpv_strandid,		/* cmio_strandid */
2013 	xpv_procnodes_per_pkg,	/* cmio_procnodes_per_pkg */
2014 	xpv_strand_apicid,	/* cmio_strand_apicid */
2015 	xpv_chiprev,		/* cmio_chiprev */
2016 	xpv_chiprevstr,		/* cmio_chiprevstr */
2017 	xpv_getsockettype,	/* cmio_getsockettype */
2018 	xpv_getsocketstr,	/* cmio_getsocketstr */
2019 	xpv_chipsig,		/* cmio_chipsig */
2020 	xpv_logical_id,		/* cmio_logical_id */
2021 	NULL,			/* cmio_getcr4 */
2022 	NULL,			/* cmio_setcr4 */
2023 	xpv_rdmsr,		/* cmio_rdmsr */
2024 	xpv_wrmsr,		/* cmio_wrmsr */
2025 	xpv_msrinterpose,	/* cmio_msrinterpose */
2026 	xpv_int,		/* cmio_int */
2027 	xpv_online,		/* cmio_online */
2028 	xpv_smbiosid,		/* cmio_smbiosid */
2029 	xpv_smb_chipid,		/* cmio_smb_chipid */
2030 	xpv_smb_bboard		/* cmio_smb_bboard */
2031 
2032 #else	/* __xpv */
2033 
2034 	/*
2035 	 * CMI_HDL_NATIVE - ops when apparently running on bare-metal
2036 	 */
2037 	ntv_vendor,		/* cmio_vendor */
2038 	ntv_vendorstr,		/* cmio_vendorstr */
2039 	ntv_family,		/* cmio_family */
2040 	ntv_model,		/* cmio_model */
2041 	ntv_stepping,		/* cmio_stepping */
2042 	ntv_chipid,		/* cmio_chipid */
2043 	ntv_procnodeid,		/* cmio_procnodeid */
2044 	ntv_coreid,		/* cmio_coreid */
2045 	ntv_strandid,		/* cmio_strandid */
2046 	ntv_procnodes_per_pkg,	/* cmio_procnodes_per_pkg */
2047 	ntv_strand_apicid,	/* cmio_strand_apicid */
2048 	ntv_chiprev,		/* cmio_chiprev */
2049 	ntv_chiprevstr,		/* cmio_chiprevstr */
2050 	ntv_getsockettype,	/* cmio_getsockettype */
2051 	ntv_getsocketstr,	/* cmio_getsocketstr */
2052 	ntv_chipsig,		/* cmio_chipsig */
2053 	ntv_logical_id,		/* cmio_logical_id */
2054 	ntv_getcr4,		/* cmio_getcr4 */
2055 	ntv_setcr4,		/* cmio_setcr4 */
2056 	ntv_rdmsr,		/* cmio_rdmsr */
2057 	ntv_wrmsr,		/* cmio_wrmsr */
2058 	ntv_msrinterpose,	/* cmio_msrinterpose */
2059 	ntv_int,		/* cmio_int */
2060 	ntv_online,		/* cmio_online */
2061 	ntv_smbiosid,		/* cmio_smbiosid */
2062 	ntv_smb_chipid,		/* cmio_smb_chipid */
2063 	ntv_smb_bboard		/* cmio_smb_bboard */
2064 #endif
2065 };
2066