xref: /illumos-gate/usr/src/uts/i86pc/os/cmi_hw.c (revision 4764d912222e53f8386bae7bf491f5780fd102ec)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * CPU Module Interface - hardware abstraction.
29  */
30 
31 #include <sys/types.h>
32 #include <sys/cpu_module.h>
33 #include <sys/kmem.h>
34 #include <sys/x86_archext.h>
35 #include <sys/cpuvar.h>
36 #include <sys/ksynch.h>
37 #include <sys/x_call.h>
38 #include <sys/pghw.h>
39 #include <sys/pci_cfgspace.h>
40 #include <sys/archsystm.h>
41 #include <sys/ontrap.h>
42 #include <sys/controlregs.h>
43 #include <sys/sunddi.h>
44 #include <sys/trap.h>
45 
46 /*
47  * Outside of this file consumers use the opaque cmi_hdl_t.  This
48  * definition is duplicated in the generic_cpu mdb module, so keep
49  * them in-sync when making changes.
50  */
51 typedef struct cmi_hdl_impl {
52 	enum cmi_hdl_class cmih_class;		/* Handle nature */
53 	struct cmi_hdl_ops *cmih_ops;		/* Operations vector */
54 	uint_t cmih_chipid;			/* Chipid of cpu resource */
55 	uint_t cmih_coreid;			/* Core within die */
56 	uint_t cmih_strandid;			/* Thread within core */
57 	boolean_t cmih_mstrand;			/* chip multithreading */
58 	volatile uint32_t *cmih_refcntp;	/* Reference count pointer */
59 	uint64_t cmih_msrsrc;			/* MSR data source flags */
60 	void *cmih_hdlpriv;			/* cmi_hw.c private data */
61 	void *cmih_spec;			/* cmi_hdl_{set,get}_specific */
62 	void *cmih_cmi;				/* cpu mod control structure */
63 	void *cmih_cmidata;			/* cpu mod private data */
64 	const struct cmi_mc_ops *cmih_mcops;	/* Memory-controller ops */
65 	void *cmih_mcdata;			/* Memory-controller data */
66 } cmi_hdl_impl_t;
67 
68 #define	IMPLHDL(ophdl)	((cmi_hdl_impl_t *)ophdl)
69 
70 /*
71  * Handles are looked up from contexts such as polling, injection etc
72  * where the context is reasonably well defined (although a poller could
73  * interrupt any old thread holding any old lock).  They are also looked
74  * up by machine check handlers, which may strike at inconvenient times
75  * such as during handle initialization or destruction or during handle
76  * lookup (which the #MC handler itself will also have to perform).
77  *
78  * So keeping handles in a linked list makes locking difficult when we
79  * consider #MC handlers.  Our solution is to have an array indexed
80  * by that which uniquely identifies a handle - chip/core/strand id -
81  * with each array member a structure including a pointer to a handle
82  * structure for the resource, and a reference count for the handle.
83  * Reference counts are modified atomically.  The public cmi_hdl_hold
84  * always succeeds because this can only be used after handle creation
85  * and before the call to destruct, so the hold count it already at least one.
86  * In other functions that lookup a handle (cmi_hdl_lookup, cmi_hdl_any)
87  * we must be certain that the count has not already decrmented to zero
88  * before applying our hold.
89  *
90  * This array is allocated when first we want to populate an entry.
91  * When allocated it is maximal - ideally we should scale to the
92  * actual number of chips, cores per chip and strand per core but
93  * that info is not readily available if we are virtualized so
94  * for now we stick with the dumb approach.
95  */
96 #define	CMI_MAX_CHIPS_NBITS		4	/* 16 chips packages max */
97 #define	CMI_MAX_CORES_PER_CHIP_NBITS	3	/* 8 cores per chip max */
98 #define	CMI_MAX_STRANDS_PER_CORE_NBITS	1	/* 2 strands per core max */
99 
100 #define	CMI_MAX_CHIPS			(1 << CMI_MAX_CHIPS_NBITS)
101 #define	CMI_MAX_CORES_PER_CHIP		(1 << CMI_MAX_CORES_PER_CHIP_NBITS)
102 #define	CMI_MAX_STRANDS_PER_CORE	(1 << CMI_MAX_STRANDS_PER_CORE_NBITS)
103 
104 /*
105  * Handle array indexing.
106  *	[7:4] = Chip package.
107  *	[3:1] = Core in package,
108  *	[0:0] = Strand in core,
109  */
110 #define	CMI_HDL_ARR_IDX_CHIP(chipid) \
111 	(((chipid) & (CMI_MAX_CHIPS - 1)) << \
112 	(CMI_MAX_STRANDS_PER_CORE_NBITS + CMI_MAX_CORES_PER_CHIP_NBITS))
113 
114 #define	CMI_HDL_ARR_IDX_CORE(coreid) \
115 	(((coreid) & (CMI_MAX_CORES_PER_CHIP - 1)) << \
116 	CMI_MAX_STRANDS_PER_CORE_NBITS)
117 
118 #define	CMI_HDL_ARR_IDX_STRAND(strandid) \
119 	(((strandid) & (CMI_MAX_STRANDS_PER_CORE - 1)))
120 
121 #define	CMI_HDL_ARR_IDX(chipid, coreid, strandid) \
122 	(CMI_HDL_ARR_IDX_CHIP(chipid) | CMI_HDL_ARR_IDX_CORE(coreid) | \
123 	CMI_HDL_ARR_IDX_STRAND(strandid))
124 
125 #define	CMI_HDL_ARR_SZ (CMI_MAX_CHIPS * CMI_MAX_CORES_PER_CHIP * \
126     CMI_MAX_STRANDS_PER_CORE)
127 
128 struct cmi_hdl_arr_ent {
129 	volatile uint32_t cmae_refcnt;
130 	cmi_hdl_impl_t *cmae_hdlp;
131 };
132 
133 static struct cmi_hdl_arr_ent *cmi_hdl_arr;
134 
135 /*
136  * Controls where we will source PCI config space data.
137  */
138 #define	CMI_PCICFG_FLAG_RD_HWOK		0x0001
139 #define	CMI_PCICFG_FLAG_RD_INTERPOSEOK	0X0002
140 #define	CMI_PCICFG_FLAG_WR_HWOK		0x0004
141 #define	CMI_PCICFG_FLAG_WR_INTERPOSEOK	0X0008
142 
143 static uint64_t cmi_pcicfg_flags =
144     CMI_PCICFG_FLAG_RD_HWOK | CMI_PCICFG_FLAG_RD_INTERPOSEOK |
145     CMI_PCICFG_FLAG_WR_HWOK | CMI_PCICFG_FLAG_WR_INTERPOSEOK;
146 
147 /*
148  * The flags for individual cpus are kept in their per-cpu handle cmih_msrsrc
149  */
150 #define	CMI_MSR_FLAG_RD_HWOK		0x0001
151 #define	CMI_MSR_FLAG_RD_INTERPOSEOK	0x0002
152 #define	CMI_MSR_FLAG_WR_HWOK		0x0004
153 #define	CMI_MSR_FLAG_WR_INTERPOSEOK	0x0008
154 
155 int cmi_call_func_ntv_tries = 3;
156 
157 static cmi_errno_t
158 call_func_ntv(int cpuid, xc_func_t func, xc_arg_t arg1, xc_arg_t arg2)
159 {
160 	cmi_errno_t rc = -1;
161 	int i;
162 
163 	kpreempt_disable();
164 
165 	if (CPU->cpu_id == cpuid) {
166 		(*func)(arg1, arg2, (xc_arg_t)&rc);
167 	} else {
168 		/*
169 		 * This should not happen for a #MC trap or a poll, so
170 		 * this is likely an error injection or similar.
171 		 * We will try to cross call with xc_trycall - we
172 		 * can't guarantee success with xc_call because
173 		 * the interrupt code in the case of a #MC may
174 		 * already hold the xc mutex.
175 		 */
176 		for (i = 0; i < cmi_call_func_ntv_tries; i++) {
177 			cpuset_t cpus;
178 
179 			CPUSET_ONLY(cpus, cpuid);
180 			xc_trycall(arg1, arg2, (xc_arg_t)&rc, cpus, func);
181 			if (rc != -1)
182 				break;
183 
184 			DELAY(1);
185 		}
186 	}
187 
188 	kpreempt_enable();
189 
190 	return (rc != -1 ? rc : CMIERR_DEADLOCK);
191 }
192 
193 /*
194  *	 =======================================================
195  *	|	MSR Interposition				|
196  *	|	-----------------				|
197  *	|							|
198  *	 -------------------------------------------------------
199  */
200 
201 #define	CMI_MSRI_HASHSZ		16
202 #define	CMI_MSRI_HASHIDX(hdl, msr) \
203 	(((uintptr_t)(hdl) >> 3 + (msr)) % (CMI_MSRI_HASHSZ - 1))
204 
205 struct cmi_msri_bkt {
206 	kmutex_t msrib_lock;
207 	struct cmi_msri_hashent *msrib_head;
208 };
209 
210 struct cmi_msri_hashent {
211 	struct cmi_msri_hashent *msrie_next;
212 	struct cmi_msri_hashent *msrie_prev;
213 	cmi_hdl_impl_t *msrie_hdl;
214 	uint_t msrie_msrnum;
215 	uint64_t msrie_msrval;
216 };
217 
218 #define	CMI_MSRI_MATCH(ent, hdl, req_msr) \
219 	((ent)->msrie_hdl == (hdl) && (ent)->msrie_msrnum == (req_msr))
220 
221 static struct cmi_msri_bkt msrihash[CMI_MSRI_HASHSZ];
222 
223 static void
224 msri_addent(cmi_hdl_impl_t *hdl, cmi_mca_regs_t *regp)
225 {
226 	int idx = CMI_MSRI_HASHIDX(hdl, regp->cmr_msrnum);
227 	struct cmi_msri_bkt *hbp = &msrihash[idx];
228 	struct cmi_msri_hashent *hep;
229 
230 	mutex_enter(&hbp->msrib_lock);
231 
232 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
233 		if (CMI_MSRI_MATCH(hep, hdl, regp->cmr_msrnum))
234 			break;
235 	}
236 
237 	if (hep != NULL) {
238 		hep->msrie_msrval = regp->cmr_msrval;
239 	} else {
240 		hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
241 		hep->msrie_hdl = hdl;
242 		hep->msrie_msrnum = regp->cmr_msrnum;
243 		hep->msrie_msrval = regp->cmr_msrval;
244 
245 		if (hbp->msrib_head != NULL)
246 			hbp->msrib_head->msrie_prev = hep;
247 		hep->msrie_next = hbp->msrib_head;
248 		hep->msrie_prev = NULL;
249 		hbp->msrib_head = hep;
250 	}
251 
252 	mutex_exit(&hbp->msrib_lock);
253 }
254 
255 /*
256  * Look for a match for the given hanlde and msr.  Return 1 with valp
257  * filled if a match is found, otherwise return 0 with valp untouched.
258  */
259 static int
260 msri_lookup(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
261 {
262 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
263 	struct cmi_msri_bkt *hbp = &msrihash[idx];
264 	struct cmi_msri_hashent *hep;
265 
266 	/*
267 	 * This function is called during #MC trap handling, so we should
268 	 * consider the possibility that the hash mutex is held by the
269 	 * interrupted thread.  This should not happen because interposition
270 	 * is an artificial injection mechanism and the #MC is requested
271 	 * after adding entries, but just in case of a real #MC at an
272 	 * unlucky moment we'll use mutex_tryenter here.
273 	 */
274 	if (!mutex_tryenter(&hbp->msrib_lock))
275 		return (0);
276 
277 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
278 		if (CMI_MSRI_MATCH(hep, hdl, msr)) {
279 			*valp = hep->msrie_msrval;
280 			break;
281 		}
282 	}
283 
284 	mutex_exit(&hbp->msrib_lock);
285 
286 	return (hep != NULL);
287 }
288 
289 /*
290  * Remove any interposed value that matches.
291  */
292 static void
293 msri_rment(cmi_hdl_impl_t *hdl, uint_t msr)
294 {
295 
296 	int idx = CMI_MSRI_HASHIDX(hdl, msr);
297 	struct cmi_msri_bkt *hbp = &msrihash[idx];
298 	struct cmi_msri_hashent *hep;
299 
300 	if (!mutex_tryenter(&hbp->msrib_lock))
301 		return;
302 
303 	for (hep = hbp->msrib_head; hep != NULL; hep = hep->msrie_next) {
304 		if (CMI_MSRI_MATCH(hep, hdl, msr)) {
305 			if (hep->msrie_prev != NULL)
306 				hep->msrie_prev->msrie_next = hep->msrie_next;
307 
308 			if (hep->msrie_next != NULL)
309 				hep->msrie_next->msrie_prev = hep->msrie_prev;
310 
311 			if (hbp->msrib_head == hep)
312 				hbp->msrib_head = hep->msrie_next;
313 
314 			kmem_free(hep, sizeof (*hep));
315 			break;
316 		}
317 	}
318 
319 	mutex_exit(&hbp->msrib_lock);
320 }
321 
322 /*
323  *	 =======================================================
324  *	|	PCI Config Space Interposition			|
325  *	|	------------------------------			|
326  *	|							|
327  *	 -------------------------------------------------------
328  */
329 
330 /*
331  * Hash for interposed PCI config space values.  We lookup on bus/dev/fun/offset
332  * and then record whether the value stashed was made with a byte, word or
333  * doubleword access;  we will only return a hit for an access of the
334  * same size.  If you access say a 32-bit register using byte accesses
335  * and then attempt to read the full 32-bit value back you will not obtain
336  * any sort of merged result - you get a lookup miss.
337  */
338 
339 #define	CMI_PCII_HASHSZ		16
340 #define	CMI_PCII_HASHIDX(b, d, f, o) \
341 	(((b) + (d) + (f) + (o)) % (CMI_PCII_HASHSZ - 1))
342 
343 struct cmi_pcii_bkt {
344 	kmutex_t pciib_lock;
345 	struct cmi_pcii_hashent *pciib_head;
346 };
347 
348 struct cmi_pcii_hashent {
349 	struct cmi_pcii_hashent *pcii_next;
350 	struct cmi_pcii_hashent *pcii_prev;
351 	int pcii_bus;
352 	int pcii_dev;
353 	int pcii_func;
354 	int pcii_reg;
355 	int pcii_asize;
356 	uint32_t pcii_val;
357 };
358 
359 #define	CMI_PCII_MATCH(ent, b, d, f, r, asz) \
360 	((ent)->pcii_bus == (b) && (ent)->pcii_dev == (d) && \
361 	(ent)->pcii_func == (f) && (ent)->pcii_reg == (r) && \
362 	(ent)->pcii_asize == (asz))
363 
364 static struct cmi_pcii_bkt pciihash[CMI_PCII_HASHSZ];
365 
366 
367 /*
368  * Add a new entry to the PCI interpose hash, overwriting any existing
369  * entry that is found.
370  */
371 static void
372 pcii_addent(int bus, int dev, int func, int reg, uint32_t val, int asz)
373 {
374 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
375 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
376 	struct cmi_pcii_hashent *hep;
377 
378 	mutex_enter(&hbp->pciib_lock);
379 
380 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
381 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz))
382 			break;
383 	}
384 
385 	if (hep != NULL) {
386 		hep->pcii_val = val;
387 	} else {
388 		hep = kmem_alloc(sizeof (*hep), KM_SLEEP);
389 		hep->pcii_bus = bus;
390 		hep->pcii_dev = dev;
391 		hep->pcii_func = func;
392 		hep->pcii_reg = reg;
393 		hep->pcii_asize = asz;
394 		hep->pcii_val = val;
395 
396 		if (hbp->pciib_head != NULL)
397 			hbp->pciib_head->pcii_prev = hep;
398 		hep->pcii_next = hbp->pciib_head;
399 		hep->pcii_prev = NULL;
400 		hbp->pciib_head = hep;
401 	}
402 
403 	mutex_exit(&hbp->pciib_lock);
404 }
405 
406 /*
407  * Look for a match for the given bus/dev/func/reg; return 1 with valp
408  * filled if a match is found, otherwise return 0 with valp untouched.
409  */
410 static int
411 pcii_lookup(int bus, int dev, int func, int reg, int asz, uint32_t *valp)
412 {
413 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
414 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
415 	struct cmi_pcii_hashent *hep;
416 
417 	if (!mutex_tryenter(&hbp->pciib_lock))
418 		return (0);
419 
420 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
421 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
422 			*valp = hep->pcii_val;
423 			break;
424 		}
425 	}
426 
427 	mutex_exit(&hbp->pciib_lock);
428 
429 	return (hep != NULL);
430 }
431 
432 static void
433 pcii_rment(int bus, int dev, int func, int reg, int asz)
434 {
435 	int idx = CMI_PCII_HASHIDX(bus, dev, func, reg);
436 	struct cmi_pcii_bkt *hbp = &pciihash[idx];
437 	struct cmi_pcii_hashent *hep;
438 
439 	mutex_enter(&hbp->pciib_lock);
440 
441 	for (hep = hbp->pciib_head; hep != NULL; hep = hep->pcii_next) {
442 		if (CMI_PCII_MATCH(hep, bus, dev, func, reg, asz)) {
443 			if (hep->pcii_prev != NULL)
444 				hep->pcii_prev->pcii_next = hep->pcii_next;
445 
446 			if (hep->pcii_next != NULL)
447 				hep->pcii_next->pcii_prev = hep->pcii_prev;
448 
449 			if (hbp->pciib_head == hep)
450 				hbp->pciib_head = hep->pcii_next;
451 
452 			kmem_free(hep, sizeof (*hep));
453 			break;
454 		}
455 	}
456 
457 	mutex_exit(&hbp->pciib_lock);
458 }
459 
460 /*
461  *	 =======================================================
462  *	|	Native methods					|
463  *	|	--------------					|
464  *	|							|
465  *	| These are used when we are running native on bare-	|
466  *	| metal, or simply don't know any better.		|
467  *	---------------------------------------------------------
468  */
469 
470 static uint_t
471 ntv_vendor(cmi_hdl_impl_t *hdl)
472 {
473 	return (cpuid_getvendor((cpu_t *)hdl->cmih_hdlpriv));
474 }
475 
476 static const char *
477 ntv_vendorstr(cmi_hdl_impl_t *hdl)
478 {
479 	return (cpuid_getvendorstr((cpu_t *)hdl->cmih_hdlpriv));
480 }
481 
482 static uint_t
483 ntv_family(cmi_hdl_impl_t *hdl)
484 {
485 	return (cpuid_getfamily((cpu_t *)hdl->cmih_hdlpriv));
486 }
487 
488 static uint_t
489 ntv_model(cmi_hdl_impl_t *hdl)
490 {
491 	return (cpuid_getmodel((cpu_t *)hdl->cmih_hdlpriv));
492 }
493 
494 static uint_t
495 ntv_stepping(cmi_hdl_impl_t *hdl)
496 {
497 	return (cpuid_getstep((cpu_t *)hdl->cmih_hdlpriv));
498 }
499 
500 static uint_t
501 ntv_chipid(cmi_hdl_impl_t *hdl)
502 {
503 	return (hdl->cmih_chipid);
504 
505 }
506 
507 static uint_t
508 ntv_coreid(cmi_hdl_impl_t *hdl)
509 {
510 	return (hdl->cmih_coreid);
511 }
512 
513 static uint_t
514 ntv_strandid(cmi_hdl_impl_t *hdl)
515 {
516 	return (hdl->cmih_strandid);
517 }
518 
519 static boolean_t
520 ntv_mstrand(cmi_hdl_impl_t *hdl)
521 {
522 	return (hdl->cmih_mstrand);
523 }
524 
525 static uint32_t
526 ntv_chiprev(cmi_hdl_impl_t *hdl)
527 {
528 	return (cpuid_getchiprev((cpu_t *)hdl->cmih_hdlpriv));
529 }
530 
531 static const char *
532 ntv_chiprevstr(cmi_hdl_impl_t *hdl)
533 {
534 	return (cpuid_getchiprevstr((cpu_t *)hdl->cmih_hdlpriv));
535 }
536 
537 static uint32_t
538 ntv_getsockettype(cmi_hdl_impl_t *hdl)
539 {
540 	return (cpuid_getsockettype((cpu_t *)hdl->cmih_hdlpriv));
541 }
542 
543 /*ARGSUSED*/
544 static int
545 ntv_getcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
546 {
547 	ulong_t *dest = (ulong_t *)arg1;
548 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
549 
550 	*dest = getcr4();
551 	*rcp = CMI_SUCCESS;
552 
553 	return (0);
554 }
555 
556 static ulong_t
557 ntv_getcr4(cmi_hdl_impl_t *hdl)
558 {
559 	cpu_t *cp = (cpu_t *)hdl->cmih_hdlpriv;
560 	ulong_t val;
561 
562 	(void) call_func_ntv(cp->cpu_id, ntv_getcr4_xc, (xc_arg_t)&val, NULL);
563 
564 	return (val);
565 }
566 
567 /*ARGSUSED*/
568 static int
569 ntv_setcr4_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
570 {
571 	ulong_t val = (ulong_t)arg1;
572 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
573 
574 	setcr4(val);
575 	*rcp = CMI_SUCCESS;
576 
577 	return (0);
578 }
579 
580 static void
581 ntv_setcr4(cmi_hdl_impl_t *hdl, ulong_t val)
582 {
583 	cpu_t *cp = (cpu_t *)hdl->cmih_hdlpriv;
584 
585 	(void) call_func_ntv(cp->cpu_id, ntv_setcr4_xc, (xc_arg_t)val, NULL);
586 }
587 
588 volatile uint32_t cmi_trapped_rdmsr;
589 
590 /*ARGSUSED*/
591 static int
592 ntv_rdmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
593 {
594 	uint_t msr = (uint_t)arg1;
595 	uint64_t *valp = (uint64_t *)arg2;
596 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
597 
598 	on_trap_data_t otd;
599 
600 	if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
601 		if (checked_rdmsr(msr, valp) == 0)
602 			*rcp = CMI_SUCCESS;
603 		else
604 			*rcp = CMIERR_NOTSUP;
605 	} else {
606 		*rcp = CMIERR_MSRGPF;
607 		atomic_inc_32(&cmi_trapped_rdmsr);
608 	}
609 	no_trap();
610 
611 	return (0);
612 }
613 
614 static cmi_errno_t
615 ntv_rdmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t *valp)
616 {
617 	cpu_t *cp = (cpu_t *)hdl->cmih_hdlpriv;
618 
619 	return (call_func_ntv(cp->cpu_id, ntv_rdmsr_xc,
620 	    (xc_arg_t)msr, (xc_arg_t)valp));
621 }
622 
623 volatile uint32_t cmi_trapped_wrmsr;
624 
625 /*ARGSUSED*/
626 static int
627 ntv_wrmsr_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
628 {
629 	uint_t msr = (uint_t)arg1;
630 	uint64_t val = *((uint64_t *)arg2);
631 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
632 	on_trap_data_t otd;
633 
634 	if (on_trap(&otd, OT_DATA_ACCESS) == 0) {
635 		if (checked_wrmsr(msr, val) == 0)
636 			*rcp = CMI_SUCCESS;
637 		else
638 			*rcp = CMIERR_NOTSUP;
639 	} else {
640 		*rcp = CMIERR_MSRGPF;
641 		atomic_inc_32(&cmi_trapped_wrmsr);
642 	}
643 	no_trap();
644 
645 	return (0);
646 
647 }
648 
649 static cmi_errno_t
650 ntv_wrmsr(cmi_hdl_impl_t *hdl, uint_t msr, uint64_t val)
651 {
652 	cpu_t *cp = (cpu_t *)hdl->cmih_hdlpriv;
653 
654 	return (call_func_ntv(cp->cpu_id, ntv_wrmsr_xc,
655 	    (xc_arg_t)msr, (xc_arg_t)&val));
656 }
657 
658 /*ARGSUSED*/
659 static int
660 ntv_int_xc(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
661 {
662 	cmi_errno_t *rcp = (cmi_errno_t *)arg3;
663 	int int_no = (int)arg1;
664 
665 	if (int_no == T_MCE)
666 		int18();
667 	else
668 		int_cmci();
669 	*rcp = CMI_SUCCESS;
670 
671 	return (0);
672 }
673 
674 static void
675 ntv_int(cmi_hdl_impl_t *hdl, int int_no)
676 {
677 	cpu_t *cp = (cpu_t *)hdl->cmih_hdlpriv;
678 
679 	(void) call_func_ntv(cp->cpu_id, ntv_int_xc, (xc_arg_t)int_no, NULL);
680 }
681 
682 /*
683  * Ops structure for handle operations.
684  */
685 struct cmi_hdl_ops {
686 	uint_t (*cmio_vendor)(cmi_hdl_impl_t *);
687 	const char *(*cmio_vendorstr)(cmi_hdl_impl_t *);
688 	uint_t (*cmio_family)(cmi_hdl_impl_t *);
689 	uint_t (*cmio_model)(cmi_hdl_impl_t *);
690 	uint_t (*cmio_stepping)(cmi_hdl_impl_t *);
691 	uint_t (*cmio_chipid)(cmi_hdl_impl_t *);
692 	uint_t (*cmio_coreid)(cmi_hdl_impl_t *);
693 	uint_t (*cmio_strandid)(cmi_hdl_impl_t *);
694 	boolean_t (*cmio_mstrand)(cmi_hdl_impl_t *);
695 	uint32_t (*cmio_chiprev)(cmi_hdl_impl_t *);
696 	const char *(*cmio_chiprevstr)(cmi_hdl_impl_t *);
697 	uint32_t (*cmio_getsockettype)(cmi_hdl_impl_t *);
698 	ulong_t (*cmio_getcr4)(cmi_hdl_impl_t *);
699 	void (*cmio_setcr4)(cmi_hdl_impl_t *, ulong_t);
700 	cmi_errno_t (*cmio_rdmsr)(cmi_hdl_impl_t *, uint_t, uint64_t *);
701 	cmi_errno_t (*cmio_wrmsr)(cmi_hdl_impl_t *, uint_t, uint64_t);
702 	void (*cmio_int)(cmi_hdl_impl_t *, int);
703 } cmi_hdl_ops[] = {
704 	/*
705 	 * CMI_HDL_NATIVE - ops when apparently running on bare-metal
706 	 */
707 	{
708 		ntv_vendor,
709 		ntv_vendorstr,
710 		ntv_family,
711 		ntv_model,
712 		ntv_stepping,
713 		ntv_chipid,
714 		ntv_coreid,
715 		ntv_strandid,
716 		ntv_mstrand,
717 		ntv_chiprev,
718 		ntv_chiprevstr,
719 		ntv_getsockettype,
720 		ntv_getcr4,
721 		ntv_setcr4,
722 		ntv_rdmsr,
723 		ntv_wrmsr,
724 		ntv_int
725 	},
726 };
727 
728 #ifndef __xpv
729 static void *
730 cpu_search(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
731     uint_t strandid)
732 {
733 	switch (class) {
734 	case CMI_HDL_NATIVE: {
735 		cpu_t *cp, *startcp;
736 
737 		kpreempt_disable();
738 		cp = startcp = CPU;
739 		do {
740 			if (cmi_ntv_hwchipid(cp) == chipid &&
741 			    cmi_ntv_hwcoreid(cp) == coreid &&
742 			    cmi_ntv_hwstrandid(cp) == strandid) {
743 				kpreempt_enable();
744 				return ((void *)cp);
745 			}
746 
747 			cp = cp->cpu_next;
748 		} while (cp != startcp);
749 		kpreempt_enable();
750 		return (NULL);
751 	}
752 
753 	default:
754 		return (NULL);
755 	}
756 }
757 #endif
758 
759 cmi_hdl_t
760 cmi_hdl_create(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
761     uint_t strandid, boolean_t mstrand)
762 {
763 	cmi_hdl_impl_t *hdl;
764 	void *priv = NULL;
765 	int idx;
766 
767 	if (chipid > CMI_MAX_CHIPS - 1 || coreid > CMI_MAX_CORES_PER_CHIP - 1 ||
768 	    strandid > CMI_MAX_STRANDS_PER_CORE - 1)
769 		return (NULL);
770 
771 #ifndef __xpv
772 	if ((priv = cpu_search(class, chipid, coreid, strandid)) == NULL)
773 		return (NULL);
774 #endif
775 
776 	hdl = kmem_zalloc(sizeof (*hdl), KM_SLEEP);
777 
778 	hdl->cmih_class = class;
779 	hdl->cmih_ops = &cmi_hdl_ops[class];
780 	hdl->cmih_chipid = chipid;
781 	hdl->cmih_coreid = coreid;
782 	hdl->cmih_strandid = strandid;
783 	hdl->cmih_mstrand = mstrand;
784 	hdl->cmih_hdlpriv = priv;
785 	hdl->cmih_msrsrc = CMI_MSR_FLAG_RD_HWOK | CMI_MSR_FLAG_RD_INTERPOSEOK |
786 	    CMI_MSR_FLAG_WR_HWOK | CMI_MSR_FLAG_WR_INTERPOSEOK;
787 
788 	if (cmi_hdl_arr == NULL) {
789 		size_t sz = CMI_HDL_ARR_SZ * sizeof (struct cmi_hdl_arr_ent);
790 		void *arr = kmem_zalloc(sz, KM_SLEEP);
791 
792 		if (atomic_cas_ptr(&cmi_hdl_arr, NULL, arr) != NULL)
793 			kmem_free(arr, sz); /* someone beat us */
794 	}
795 
796 	idx = CMI_HDL_ARR_IDX(chipid, coreid, strandid);
797 	if (cmi_hdl_arr[idx].cmae_refcnt != 0 ||
798 	    cmi_hdl_arr[idx].cmae_hdlp != NULL) {
799 		/*
800 		 * Somehow this (chipid, coreid, strandid) id tuple has
801 		 * already been assigned!  This indicates that the
802 		 * callers logic in determining these values is busted,
803 		 * or perhaps undermined by bad BIOS setup.  Complain,
804 		 * and refuse to initialize this tuple again as bad things
805 		 * will happen.
806 		 */
807 		cmn_err(CE_NOTE, "cmi_hdl_create: chipid %d coreid %d "
808 		    "strandid %d handle already allocated!",
809 		    chipid, coreid, strandid);
810 		kmem_free(hdl, sizeof (*hdl));
811 		return (NULL);
812 	}
813 
814 	/*
815 	 * Once we store a nonzero reference count others can find this
816 	 * handle via cmi_hdl_lookup etc.  This initial hold on the handle
817 	 * is to be dropped only if some other part of cmi initialization
818 	 * fails or, if it succeeds, at later cpu deconfigure.  Note the
819 	 * the module private data we hold in cmih_cmi and cmih_cmidata
820 	 * is still NULL at this point (the caller will fill it with
821 	 * cmi_hdl_setcmi if it initializes) so consumers of handles
822 	 * should always be ready for that possibility.
823 	 */
824 	cmi_hdl_arr[idx].cmae_hdlp = hdl;
825 	hdl->cmih_refcntp = &cmi_hdl_arr[idx].cmae_refcnt;
826 	cmi_hdl_arr[idx].cmae_refcnt = 1;
827 
828 	return ((cmi_hdl_t)hdl);
829 }
830 
831 void
832 cmi_hdl_hold(cmi_hdl_t ophdl)
833 {
834 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
835 
836 	ASSERT(*hdl->cmih_refcntp != 0); /* must not be the initial hold */
837 
838 	atomic_inc_32(hdl->cmih_refcntp);
839 }
840 
841 static int
842 cmi_hdl_canref(int arridx)
843 {
844 	volatile uint32_t *refcntp;
845 	uint32_t refcnt;
846 
847 	if (cmi_hdl_arr == NULL)
848 		return (0);
849 
850 	refcntp = &cmi_hdl_arr[arridx].cmae_refcnt;
851 	refcnt = *refcntp;
852 
853 	if (refcnt == 0) {
854 		/*
855 		 * Associated object never existed, is being destroyed,
856 		 * or has been destroyed.
857 		 */
858 		return (0);
859 	}
860 
861 	/*
862 	 * We cannot use atomic increment here because once the reference
863 	 * count reaches zero it must never be bumped up again.
864 	 */
865 	while (refcnt != 0) {
866 		if (atomic_cas_32(refcntp, refcnt, refcnt + 1) == refcnt)
867 			return (1);
868 		refcnt = *refcntp;
869 	}
870 
871 	/*
872 	 * Somebody dropped the reference count to 0 after our initial
873 	 * check.
874 	 */
875 	return (0);
876 }
877 
878 
879 void
880 cmi_hdl_rele(cmi_hdl_t ophdl)
881 {
882 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
883 	int idx;
884 
885 	ASSERT(*hdl->cmih_refcntp > 0);
886 
887 	if (atomic_dec_32_nv(hdl->cmih_refcntp) > 0)
888 		return;
889 
890 	idx = CMI_HDL_ARR_IDX(hdl->cmih_chipid, hdl->cmih_coreid,
891 	    hdl->cmih_strandid);
892 	cmi_hdl_arr[idx].cmae_hdlp = NULL;
893 
894 	kmem_free(hdl, sizeof (*hdl));
895 }
896 
897 void
898 cmi_hdl_setspecific(cmi_hdl_t ophdl, void *arg)
899 {
900 	IMPLHDL(ophdl)->cmih_spec = arg;
901 }
902 
903 void *
904 cmi_hdl_getspecific(cmi_hdl_t ophdl)
905 {
906 	return (IMPLHDL(ophdl)->cmih_spec);
907 }
908 
909 void
910 cmi_hdl_setmc(cmi_hdl_t ophdl, const struct cmi_mc_ops *mcops, void *mcdata)
911 {
912 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
913 
914 	ASSERT(hdl->cmih_mcops == NULL && hdl->cmih_mcdata == NULL);
915 	hdl->cmih_mcops = mcops;
916 	hdl->cmih_mcdata = mcdata;
917 }
918 
919 const struct cmi_mc_ops *
920 cmi_hdl_getmcops(cmi_hdl_t ophdl)
921 {
922 	return (IMPLHDL(ophdl)->cmih_mcops);
923 }
924 
925 void *
926 cmi_hdl_getmcdata(cmi_hdl_t ophdl)
927 {
928 	return (IMPLHDL(ophdl)->cmih_mcdata);
929 }
930 
931 cmi_hdl_t
932 cmi_hdl_lookup(enum cmi_hdl_class class, uint_t chipid, uint_t coreid,
933     uint_t strandid)
934 {
935 	int idx;
936 
937 	if (chipid > CMI_MAX_CHIPS - 1 || coreid > CMI_MAX_CORES_PER_CHIP - 1 ||
938 	    strandid > CMI_MAX_STRANDS_PER_CORE - 1)
939 		return (NULL);
940 
941 	idx = CMI_HDL_ARR_IDX(chipid, coreid, strandid);
942 
943 	if (!cmi_hdl_canref(idx))
944 		return (NULL);
945 
946 	if (cmi_hdl_arr[idx].cmae_hdlp->cmih_class != class) {
947 		cmi_hdl_rele((cmi_hdl_t)cmi_hdl_arr[idx].cmae_hdlp);
948 		return (NULL);
949 	}
950 
951 	return ((cmi_hdl_t)cmi_hdl_arr[idx].cmae_hdlp);
952 }
953 
954 cmi_hdl_t
955 cmi_hdl_any(void)
956 {
957 	int i;
958 
959 	for (i = 0; i < CMI_HDL_ARR_SZ; i++) {
960 		if (cmi_hdl_canref(i))
961 			return ((cmi_hdl_t)cmi_hdl_arr[i].cmae_hdlp);
962 	}
963 
964 	return (NULL);
965 }
966 
967 void
968 cmi_hdl_walk(int (*cbfunc)(cmi_hdl_t, void *, void *, void *),
969     void *arg1, void *arg2, void *arg3)
970 {
971 	int i;
972 
973 	for (i = 0; i < CMI_HDL_ARR_SZ; i++) {
974 		if (cmi_hdl_canref(i)) {
975 			cmi_hdl_impl_t *hdl = cmi_hdl_arr[i].cmae_hdlp;
976 
977 			if ((*cbfunc)((cmi_hdl_t)hdl, arg1, arg2, arg3) ==
978 			    CMI_HDL_WALK_DONE) {
979 				cmi_hdl_rele((cmi_hdl_t)hdl);
980 				break;
981 			}
982 			cmi_hdl_rele((cmi_hdl_t)hdl);
983 		}
984 	}
985 }
986 
987 void
988 cmi_hdl_setcmi(cmi_hdl_t ophdl, void *cmi, void *cmidata)
989 {
990 	IMPLHDL(ophdl)->cmih_cmidata = cmidata;
991 	IMPLHDL(ophdl)->cmih_cmi = cmi;
992 }
993 
994 void *
995 cmi_hdl_getcmi(cmi_hdl_t ophdl)
996 {
997 	return (IMPLHDL(ophdl)->cmih_cmi);
998 }
999 
1000 void *
1001 cmi_hdl_getcmidata(cmi_hdl_t ophdl)
1002 {
1003 	return (IMPLHDL(ophdl)->cmih_cmidata);
1004 }
1005 
1006 enum cmi_hdl_class
1007 cmi_hdl_class(cmi_hdl_t ophdl)
1008 {
1009 	return (IMPLHDL(ophdl)->cmih_class);
1010 }
1011 
1012 #define	CMI_HDL_OPFUNC(what, type)				\
1013 	type							\
1014 	cmi_hdl_##what(cmi_hdl_t ophdl)				\
1015 	{							\
1016 		return (IMPLHDL(ophdl)->cmih_ops->		\
1017 		    cmio_##what(IMPLHDL(ophdl)));		\
1018 	}
1019 
1020 CMI_HDL_OPFUNC(vendor, uint_t)
1021 CMI_HDL_OPFUNC(vendorstr, const char *)
1022 CMI_HDL_OPFUNC(family, uint_t)
1023 CMI_HDL_OPFUNC(model, uint_t)
1024 CMI_HDL_OPFUNC(stepping, uint_t)
1025 CMI_HDL_OPFUNC(chipid, uint_t)
1026 CMI_HDL_OPFUNC(coreid, uint_t)
1027 CMI_HDL_OPFUNC(strandid, uint_t)
1028 CMI_HDL_OPFUNC(mstrand, boolean_t)
1029 CMI_HDL_OPFUNC(chiprev, uint32_t)
1030 CMI_HDL_OPFUNC(chiprevstr, const char *)
1031 CMI_HDL_OPFUNC(getsockettype, uint32_t)
1032 
1033 void
1034 cmi_hdl_int(cmi_hdl_t ophdl, int num)
1035 {
1036 	IMPLHDL(ophdl)->cmih_ops->cmio_int(IMPLHDL(ophdl), num);
1037 }
1038 
1039 #ifndef	__xpv
1040 /*
1041  * Return hardware chip instance; cpuid_get_chipid provides this directly.
1042  */
1043 uint_t
1044 cmi_ntv_hwchipid(cpu_t *cp)
1045 {
1046 	return (cpuid_get_chipid(cp));
1047 }
1048 
1049 /*
1050  * Return core instance within a single chip.
1051  */
1052 uint_t
1053 cmi_ntv_hwcoreid(cpu_t *cp)
1054 {
1055 	return (cpuid_get_pkgcoreid(cp));
1056 }
1057 
1058 /*
1059  * Return strand number within a single core.  cpuid_get_clogid numbers
1060  * all execution units (strands, or cores in unstranded models) sequentially
1061  * within a single chip.
1062  */
1063 uint_t
1064 cmi_ntv_hwstrandid(cpu_t *cp)
1065 {
1066 	int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1067 	    cpuid_get_ncore_per_chip(cp);
1068 
1069 	return (cpuid_get_clogid(cp) % strands_per_core);
1070 }
1071 
1072 boolean_t
1073 cmi_ntv_hwmstrand(cpu_t *cp)
1074 {
1075 	int strands_per_core = cpuid_get_ncpu_per_chip(cp) /
1076 	    cpuid_get_ncore_per_chip(cp);
1077 
1078 	return (strands_per_core > 1);
1079 }
1080 #endif	/* __xpv */
1081 
1082 void
1083 cmi_hdlconf_rdmsr_nohw(cmi_hdl_t ophdl)
1084 {
1085 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1086 
1087 	hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_RD_HWOK;
1088 }
1089 
1090 void
1091 cmi_hdlconf_wrmsr_nohw(cmi_hdl_t ophdl)
1092 {
1093 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1094 
1095 	hdl->cmih_msrsrc &= ~CMI_MSR_FLAG_WR_HWOK;
1096 }
1097 
1098 cmi_errno_t
1099 cmi_hdl_rdmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t *valp)
1100 {
1101 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1102 
1103 	/*
1104 	 * Regardless of the handle class, we first check for am
1105 	 * interposed value.  In the xVM case you probably want to
1106 	 * place interposed values within the hypervisor itself, but
1107 	 * we still allow interposing them in dom0 for test and bringup
1108 	 * purposes.
1109 	 */
1110 	if ((hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_INTERPOSEOK) &&
1111 	    msri_lookup(hdl, msr, valp))
1112 		return (CMI_SUCCESS);
1113 
1114 	if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_RD_HWOK))
1115 		return (CMIERR_INTERPOSE);
1116 
1117 	return (hdl->cmih_ops->cmio_rdmsr(hdl, msr, valp));
1118 }
1119 
1120 cmi_errno_t
1121 cmi_hdl_wrmsr(cmi_hdl_t ophdl, uint_t msr, uint64_t val)
1122 {
1123 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1124 
1125 	/* Invalidate any interposed value */
1126 	msri_rment(hdl, msr);
1127 
1128 	if (!(hdl->cmih_msrsrc & CMI_MSR_FLAG_WR_HWOK))
1129 		return (CMI_SUCCESS);
1130 
1131 	return (hdl->cmih_ops->cmio_wrmsr(hdl, msr, val));
1132 }
1133 
1134 void
1135 cmi_hdl_enable_mce(cmi_hdl_t ophdl)
1136 {
1137 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1138 	ulong_t cr4 = hdl->cmih_ops->cmio_getcr4(hdl);
1139 
1140 	hdl->cmih_ops->cmio_setcr4(hdl, cr4 | CR4_MCE);
1141 }
1142 
1143 void
1144 cmi_hdl_msrinterpose(cmi_hdl_t ophdl, cmi_mca_regs_t *regs, uint_t nregs)
1145 {
1146 	cmi_hdl_impl_t *hdl = IMPLHDL(ophdl);
1147 	int i;
1148 
1149 	for (i = 0; i < nregs; i++)
1150 		msri_addent(hdl, regs++);
1151 }
1152 
1153 void
1154 cmi_pcird_nohw(void)
1155 {
1156 	cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_RD_HWOK;
1157 }
1158 
1159 void
1160 cmi_pciwr_nohw(void)
1161 {
1162 	cmi_pcicfg_flags &= ~CMI_PCICFG_FLAG_WR_HWOK;
1163 }
1164 
1165 static uint32_t
1166 cmi_pci_get_cmn(int bus, int dev, int func, int reg, int asz,
1167     int *interpose, ddi_acc_handle_t hdl)
1168 {
1169 	uint32_t val;
1170 
1171 	if (cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_INTERPOSEOK &&
1172 	    pcii_lookup(bus, dev, func, reg, asz, &val)) {
1173 		if (interpose)
1174 			*interpose = 1;
1175 		return (val);
1176 	}
1177 	if (interpose)
1178 		*interpose = 0;
1179 
1180 	if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_RD_HWOK))
1181 		return (0);
1182 
1183 	switch (asz) {
1184 	case 1:
1185 		if (hdl)
1186 			val = pci_config_get8(hdl, (off_t)reg);
1187 		else
1188 			val = (*pci_getb_func)(bus, dev, func, reg);
1189 		break;
1190 	case 2:
1191 		if (hdl)
1192 			val = pci_config_get16(hdl, (off_t)reg);
1193 		else
1194 			val = (*pci_getw_func)(bus, dev, func, reg);
1195 		break;
1196 	case 4:
1197 		if (hdl)
1198 			val = pci_config_get32(hdl, (off_t)reg);
1199 		else
1200 			val = (*pci_getl_func)(bus, dev, func, reg);
1201 		break;
1202 	default:
1203 		val = 0;
1204 	}
1205 	return (val);
1206 }
1207 
1208 uint8_t
1209 cmi_pci_getb(int bus, int dev, int func, int reg, int *interpose,
1210     ddi_acc_handle_t hdl)
1211 {
1212 	return ((uint8_t)cmi_pci_get_cmn(bus, dev, func, reg, 1, interpose,
1213 	    hdl));
1214 }
1215 
1216 uint16_t
1217 cmi_pci_getw(int bus, int dev, int func, int reg, int *interpose,
1218     ddi_acc_handle_t hdl)
1219 {
1220 	return ((uint16_t)cmi_pci_get_cmn(bus, dev, func, reg, 2, interpose,
1221 	    hdl));
1222 }
1223 
1224 uint32_t
1225 cmi_pci_getl(int bus, int dev, int func, int reg, int *interpose,
1226     ddi_acc_handle_t hdl)
1227 {
1228 	return (cmi_pci_get_cmn(bus, dev, func, reg, 4, interpose, hdl));
1229 }
1230 
1231 void
1232 cmi_pci_interposeb(int bus, int dev, int func, int reg, uint8_t val)
1233 {
1234 	pcii_addent(bus, dev, func, reg, val, 1);
1235 }
1236 
1237 void
1238 cmi_pci_interposew(int bus, int dev, int func, int reg, uint16_t val)
1239 {
1240 	pcii_addent(bus, dev, func, reg, val, 2);
1241 }
1242 
1243 void
1244 cmi_pci_interposel(int bus, int dev, int func, int reg, uint32_t val)
1245 {
1246 	pcii_addent(bus, dev, func, reg, val, 4);
1247 }
1248 
1249 static void
1250 cmi_pci_put_cmn(int bus, int dev, int func, int reg, int asz,
1251     ddi_acc_handle_t hdl, uint32_t val)
1252 {
1253 	/*
1254 	 * If there is an interposed value for this register invalidate it.
1255 	 */
1256 	pcii_rment(bus, dev, func, reg, asz);
1257 
1258 	if (!(cmi_pcicfg_flags & CMI_PCICFG_FLAG_WR_HWOK))
1259 		return;
1260 
1261 	switch (asz) {
1262 	case 1:
1263 		if (hdl)
1264 			pci_config_put8(hdl, (off_t)reg, (uint8_t)val);
1265 		else
1266 			(*pci_putb_func)(bus, dev, func, reg, (uint8_t)val);
1267 		break;
1268 
1269 	case 2:
1270 		if (hdl)
1271 			pci_config_put16(hdl, (off_t)reg, (uint16_t)val);
1272 		else
1273 			(*pci_putw_func)(bus, dev, func, reg, (uint16_t)val);
1274 		break;
1275 
1276 	case 4:
1277 		if (hdl)
1278 			pci_config_put32(hdl, (off_t)reg, val);
1279 		else
1280 			(*pci_putl_func)(bus, dev, func, reg, val);
1281 		break;
1282 
1283 	default:
1284 		break;
1285 	}
1286 }
1287 
1288 extern void
1289 cmi_pci_putb(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1290     uint8_t val)
1291 {
1292 	cmi_pci_put_cmn(bus, dev, func, reg, 1, hdl, val);
1293 }
1294 
1295 extern void
1296 cmi_pci_putw(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1297     uint16_t val)
1298 {
1299 	cmi_pci_put_cmn(bus, dev, func, reg, 2, hdl, val);
1300 }
1301 
1302 extern void
1303 cmi_pci_putl(int bus, int dev, int func, int reg, ddi_acc_handle_t hdl,
1304     uint32_t val)
1305 {
1306 	cmi_pci_put_cmn(bus, dev, func, reg, 4, hdl, val);
1307 }
1308