xref: /titanic_50/usr/src/uts/sun4u/io/px/px_lib4u.c (revision bb121940c2fe627557326e0143391ace6e6b7372)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/kmem.h>
30 #include <sys/conf.h>
31 #include <sys/ddi.h>
32 #include <sys/sunddi.h>
33 #include <sys/fm/protocol.h>
34 #include <sys/fm/util.h>
35 #include <sys/modctl.h>
36 #include <sys/disp.h>
37 #include <sys/stat.h>
38 #include <sys/ddi_impldefs.h>
39 #include <sys/vmem.h>
40 #include <sys/iommutsb.h>
41 #include <sys/cpuvar.h>
42 #include <sys/ivintr.h>
43 #include <sys/byteorder.h>
44 #include <sys/hotplug/pci/pciehpc.h>
45 #include <sys/spl.h>
46 #include <px_obj.h>
47 #include <pcie_pwr.h>
48 #include "px_tools_var.h"
49 #include <px_regs.h>
50 #include <px_csr.h>
51 #include <sys/machsystm.h>
52 #include "px_lib4u.h"
53 #include "px_err.h"
54 #include "oberon_regs.h"
55 
56 #pragma weak jbus_stst_order
57 
58 extern void jbus_stst_order();
59 
60 ulong_t px_mmu_dvma_end = 0xfffffffful;
61 uint_t px_ranges_phi_mask = 0xfffffffful;
62 uint64_t *px_oberon_ubc_scratch_regs;
63 uint64_t px_paddr_mask;
64 
65 static int px_goto_l23ready(px_t *px_p);
66 static int px_goto_l0(px_t *px_p);
67 static int px_pre_pwron_check(px_t *px_p);
68 static uint32_t px_identity_init(px_t *px_p);
69 static boolean_t px_cpr_callb(void *arg, int code);
70 static uint_t px_cb_intr(caddr_t arg);
71 
72 /*
73  * px_lib_map_registers
74  *
75  * This function is called from the attach routine to map the registers
76  * accessed by this driver.
77  *
78  * used by: px_attach()
79  *
80  * return value: DDI_FAILURE on failure
81  */
82 int
83 px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip)
84 {
85 	ddi_device_acc_attr_t	attr;
86 	px_reg_bank_t		reg_bank = PX_REG_CSR;
87 
88 	DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n",
89 		pxu_p, dip);
90 
91 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
92 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
93 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
94 
95 	/*
96 	 * PCI CSR Base
97 	 */
98 	if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
99 	    0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
100 		goto fail;
101 	}
102 
103 	reg_bank++;
104 
105 	/*
106 	 * XBUS CSR Base
107 	 */
108 	if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
109 	    0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
110 		goto fail;
111 	}
112 
113 	pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS;
114 
115 done:
116 	for (; reg_bank >= PX_REG_CSR; reg_bank--) {
117 		DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n",
118 		    reg_bank, pxu_p->px_address[reg_bank]);
119 	}
120 
121 	return (DDI_SUCCESS);
122 
123 fail:
124 	cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n",
125 	    ddi_driver_name(dip), ddi_get_instance(dip), reg_bank);
126 
127 	for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) {
128 		pxu_p->px_address[reg_bank] = NULL;
129 		ddi_regs_map_free(&pxu_p->px_ac[reg_bank]);
130 	}
131 
132 	return (DDI_FAILURE);
133 }
134 
135 /*
136  * px_lib_unmap_regs:
137  *
138  * This routine unmaps the registers mapped by map_px_registers.
139  *
140  * used by: px_detach(), and error conditions in px_attach()
141  *
142  * return value: none
143  */
144 void
145 px_lib_unmap_regs(pxu_t *pxu_p)
146 {
147 	int i;
148 
149 	for (i = 0; i < PX_REG_MAX; i++) {
150 		if (pxu_p->px_ac[i])
151 			ddi_regs_map_free(&pxu_p->px_ac[i]);
152 	}
153 }
154 
155 int
156 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl)
157 {
158 
159 	caddr_t			xbc_csr_base, csr_base;
160 	px_dvma_range_prop_t	px_dvma_range;
161 	pxu_t			*pxu_p;
162 	uint8_t			chip_mask;
163 	px_t			*px_p = DIP_TO_STATE(dip);
164 	px_chip_type_t		chip_type = px_identity_init(px_p);
165 
166 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p", dip);
167 
168 	if (chip_type == PX_CHIP_UNIDENTIFIED) {
169 		cmn_err(CE_WARN, "%s%d: Unrecognized Hardware Version\n",
170 		    NAMEINST(dip));
171 		return (DDI_FAILURE);
172 	}
173 
174 	chip_mask = BITMASK(chip_type);
175 	px_paddr_mask = (chip_type == PX_CHIP_FIRE) ? MMU_FIRE_PADDR_MASK :
176 	    MMU_OBERON_PADDR_MASK;
177 
178 	/*
179 	 * Allocate platform specific structure and link it to
180 	 * the px state structure.
181 	 */
182 	pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP);
183 	pxu_p->chip_type = chip_type;
184 	pxu_p->portid  = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
185 	    "portid", -1);
186 
187 	/* Map in the registers */
188 	if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) {
189 		kmem_free(pxu_p, sizeof (pxu_t));
190 
191 		return (DDI_FAILURE);
192 	}
193 
194 	xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC];
195 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
196 
197 	pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid);
198 	pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie);
199 	pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie);
200 
201 	pxu_p->tsb_paddr = va_to_pa(pxu_p->tsb_vaddr);
202 
203 	/*
204 	 * Create "virtual-dma" property to support child devices
205 	 * needing to know DVMA range.
206 	 */
207 	px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1
208 	    - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT);
209 	px_dvma_range.dvma_len = (uint32_t)
210 	    px_mmu_dvma_end - px_dvma_range.dvma_base + 1;
211 
212 	(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
213 		"virtual-dma", (caddr_t)&px_dvma_range,
214 		sizeof (px_dvma_range_prop_t));
215 	/*
216 	 * Initilize all fire hardware specific blocks.
217 	 */
218 	hvio_cb_init(xbc_csr_base, pxu_p);
219 	hvio_ib_init(csr_base, pxu_p);
220 	hvio_pec_init(csr_base, pxu_p);
221 	hvio_mmu_init(csr_base, pxu_p);
222 
223 	px_p->px_plat_p = (void *)pxu_p;
224 
225 	/*
226 	 * Initialize all the interrupt handlers
227 	 */
228 	switch (PX_CHIP_TYPE(pxu_p)) {
229 	case PX_CHIP_OBERON:
230 		/*
231 		 * Oberon hotplug uses SPARE3 field in ILU Error Log Enable
232 		 * register to indicate the status of leaf reset,
233 		 * we need to preserve the value of this bit, and keep it in
234 		 * px_ilu_log_mask to reflect the state of the bit
235 		 */
236 		if (CSR_BR(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3))
237 			px_ilu_log_mask |= (1ull <<
238 			    ILU_ERROR_LOG_ENABLE_SPARE3);
239 		else
240 			px_ilu_log_mask &= ~(1ull <<
241 			    ILU_ERROR_LOG_ENABLE_SPARE3);
242 
243 		px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_ENABLE);
244 		px_fabric_die_rc_ue |= PCIE_AER_UCE_UC;
245 		break;
246 
247 	case PX_CHIP_FIRE:
248 		px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_ENABLE);
249 		break;
250 
251 	default:
252 		cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n",
253 		    ddi_driver_name(dip), ddi_get_instance(dip));
254 		return (DDI_FAILURE);
255 	}
256 
257 	/* Initilize device handle */
258 	*dev_hdl = (devhandle_t)csr_base;
259 
260 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl);
261 
262 	return (DDI_SUCCESS);
263 }
264 
265 int
266 px_lib_dev_fini(dev_info_t *dip)
267 {
268 	caddr_t			csr_base;
269 	uint8_t			chip_mask;
270 	px_t			*px_p = DIP_TO_STATE(dip);
271 	pxu_t			*pxu_p = (pxu_t *)px_p->px_plat_p;
272 
273 	DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip);
274 
275 	/*
276 	 * Deinitialize all the interrupt handlers
277 	 */
278 	switch (PX_CHIP_TYPE(pxu_p)) {
279 	case PX_CHIP_OBERON:
280 	case PX_CHIP_FIRE:
281 		chip_mask = BITMASK(PX_CHIP_TYPE(pxu_p));
282 		csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
283 		px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_DISABLE);
284 		break;
285 
286 	default:
287 		cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n",
288 		    ddi_driver_name(dip), ddi_get_instance(dip));
289 		return (DDI_FAILURE);
290 	}
291 
292 	iommu_tsb_free(pxu_p->tsb_cookie);
293 
294 	px_lib_unmap_regs((pxu_t *)px_p->px_plat_p);
295 	kmem_free(px_p->px_plat_p, sizeof (pxu_t));
296 	px_p->px_plat_p = NULL;
297 
298 	return (DDI_SUCCESS);
299 }
300 
301 /*ARGSUSED*/
302 int
303 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino,
304     sysino_t *sysino)
305 {
306 	px_t	*px_p = DIP_TO_STATE(dip);
307 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
308 	uint64_t	ret;
309 
310 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p "
311 	    "devino 0x%x\n", dip, devino);
312 
313 	if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip),
314 	    pxu_p, devino, sysino)) != H_EOK) {
315 		DBG(DBG_LIB_INT, dip,
316 		    "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret);
317 		return (DDI_FAILURE);
318 	}
319 
320 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n",
321 	    *sysino);
322 
323 	return (DDI_SUCCESS);
324 }
325 
326 /*ARGSUSED*/
327 int
328 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino,
329     intr_valid_state_t *intr_valid_state)
330 {
331 	uint64_t	ret;
332 
333 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n",
334 	    dip, sysino);
335 
336 	if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip),
337 	    sysino, intr_valid_state)) != H_EOK) {
338 		DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n",
339 		    ret);
340 		return (DDI_FAILURE);
341 	}
342 
343 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n",
344 	    *intr_valid_state);
345 
346 	return (DDI_SUCCESS);
347 }
348 
349 /*ARGSUSED*/
350 int
351 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino,
352     intr_valid_state_t intr_valid_state)
353 {
354 	uint64_t	ret;
355 
356 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx "
357 	    "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state);
358 
359 	if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip),
360 	    sysino, intr_valid_state)) != H_EOK) {
361 		DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n",
362 		    ret);
363 		return (DDI_FAILURE);
364 	}
365 
366 	return (DDI_SUCCESS);
367 }
368 
369 /*ARGSUSED*/
370 int
371 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino,
372     intr_state_t *intr_state)
373 {
374 	uint64_t	ret;
375 
376 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n",
377 	    dip, sysino);
378 
379 	if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip),
380 	    sysino, intr_state)) != H_EOK) {
381 		DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n",
382 		    ret);
383 		return (DDI_FAILURE);
384 	}
385 
386 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n",
387 	    *intr_state);
388 
389 	return (DDI_SUCCESS);
390 }
391 
392 /*ARGSUSED*/
393 int
394 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino,
395     intr_state_t intr_state)
396 {
397 	uint64_t	ret;
398 
399 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx "
400 	    "intr_state 0x%x\n", dip, sysino, intr_state);
401 
402 	if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip),
403 	    sysino, intr_state)) != H_EOK) {
404 		DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n",
405 		    ret);
406 		return (DDI_FAILURE);
407 	}
408 
409 	return (DDI_SUCCESS);
410 }
411 
412 /*ARGSUSED*/
413 int
414 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid)
415 {
416 	px_t		*px_p = DIP_TO_STATE(dip);
417 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
418 	uint64_t	ret;
419 
420 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n",
421 	    dip, sysino);
422 
423 	if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip), pxu_p,
424 	    sysino, cpuid)) != H_EOK) {
425 		DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n",
426 		    ret);
427 		return (DDI_FAILURE);
428 	}
429 
430 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid);
431 
432 	return (DDI_SUCCESS);
433 }
434 
435 /*ARGSUSED*/
436 int
437 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid)
438 {
439 	px_t		*px_p = DIP_TO_STATE(dip);
440 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
441 	uint64_t	ret;
442 
443 	DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx "
444 	    "cpuid 0x%x\n", dip, sysino, cpuid);
445 
446 	if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip), pxu_p,
447 	    sysino, cpuid)) != H_EOK) {
448 		DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n",
449 		    ret);
450 		return (DDI_FAILURE);
451 	}
452 
453 	return (DDI_SUCCESS);
454 }
455 
456 /*ARGSUSED*/
457 int
458 px_lib_intr_reset(dev_info_t *dip)
459 {
460 	devino_t	ino;
461 	sysino_t	sysino;
462 
463 	DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip);
464 
465 	/* Reset all Interrupts */
466 	for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) {
467 		if (px_lib_intr_devino_to_sysino(dip, ino,
468 		    &sysino) != DDI_SUCCESS)
469 			return (BF_FATAL);
470 
471 		if (px_lib_intr_setstate(dip, sysino,
472 		    INTR_IDLE_STATE) != DDI_SUCCESS)
473 			return (BF_FATAL);
474 	}
475 
476 	return (BF_NONE);
477 }
478 
479 /*ARGSUSED*/
480 int
481 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages,
482     io_attributes_t attr, void *addr, size_t pfn_index, int flags)
483 {
484 	px_t		*px_p = DIP_TO_STATE(dip);
485 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
486 	uint64_t	ret;
487 
488 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx "
489 	    "pages 0x%x attr 0x%x addr 0x%p pfn_index 0x%llx flags 0x%x\n",
490 	    dip, tsbid, pages, attr, addr, pfn_index, flags);
491 
492 	if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages,
493 	    attr, addr, pfn_index, flags)) != H_EOK) {
494 		DBG(DBG_LIB_DMA, dip,
495 		    "px_lib_iommu_map failed, ret 0x%lx\n", ret);
496 		return (DDI_FAILURE);
497 	}
498 
499 	return (DDI_SUCCESS);
500 }
501 
502 /*ARGSUSED*/
503 int
504 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages)
505 {
506 	px_t		*px_p = DIP_TO_STATE(dip);
507 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
508 	uint64_t	ret;
509 
510 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx "
511 	    "pages 0x%x\n", dip, tsbid, pages);
512 
513 	if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages))
514 	    != H_EOK) {
515 		DBG(DBG_LIB_DMA, dip,
516 		    "px_lib_iommu_demap failed, ret 0x%lx\n", ret);
517 
518 		return (DDI_FAILURE);
519 	}
520 
521 	return (DDI_SUCCESS);
522 }
523 
524 /*ARGSUSED*/
525 int
526 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p,
527     r_addr_t *r_addr_p)
528 {
529 	px_t	*px_p = DIP_TO_STATE(dip);
530 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
531 	uint64_t	ret;
532 
533 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n",
534 	    dip, tsbid);
535 
536 	if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid,
537 	    attr_p, r_addr_p)) != H_EOK) {
538 		DBG(DBG_LIB_DMA, dip,
539 		    "hvio_iommu_getmap failed, ret 0x%lx\n", ret);
540 
541 		return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE);
542 	}
543 
544 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n",
545 	    *attr_p, *r_addr_p);
546 
547 	return (DDI_SUCCESS);
548 }
549 
550 
551 /*
552  * Checks dma attributes against system bypass ranges
553  * The bypass range is determined by the hardware. Return them so the
554  * common code can do generic checking against them.
555  */
556 /*ARGSUSED*/
557 int
558 px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p,
559     uint64_t *lo_p, uint64_t *hi_p)
560 {
561 	px_t	*px_p = DIP_TO_STATE(dip);
562 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
563 
564 	*lo_p = hvio_get_bypass_base(pxu_p);
565 	*hi_p = hvio_get_bypass_end(pxu_p);
566 
567 	return (DDI_SUCCESS);
568 }
569 
570 
571 /*ARGSUSED*/
572 int
573 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, io_attributes_t attr,
574     io_addr_t *io_addr_p)
575 {
576 	uint64_t	ret;
577 	px_t	*px_p = DIP_TO_STATE(dip);
578 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
579 
580 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx "
581 	    "attr 0x%x\n", dip, ra, attr);
582 
583 	if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), pxu_p, ra,
584 	    attr, io_addr_p)) != H_EOK) {
585 		DBG(DBG_LIB_DMA, dip,
586 		    "hvio_iommu_getbypass failed, ret 0x%lx\n", ret);
587 		return (DDI_FAILURE);
588 	}
589 
590 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n",
591 	    *io_addr_p);
592 
593 	return (DDI_SUCCESS);
594 }
595 
596 /*
597  * bus dma sync entry point.
598  */
599 /*ARGSUSED*/
600 int
601 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
602     off_t off, size_t len, uint_t cache_flags)
603 {
604 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
605 	px_t	*px_p = DIP_TO_STATE(dip);
606 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
607 
608 	DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p "
609 	    "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n",
610 	    dip, rdip, handle, off, len, cache_flags);
611 
612 	/*
613 	 * No flush needed for Oberon
614 	 */
615 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
616 		return (DDI_SUCCESS);
617 
618 	/*
619 	 * jbus_stst_order is found only in certain cpu modules.
620 	 * Just return success if not present.
621 	 */
622 	if (&jbus_stst_order == NULL)
623 		return (DDI_SUCCESS);
624 
625 	if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) {
626 		cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.",
627 		    ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp);
628 
629 		return (DDI_FAILURE);
630 	}
631 
632 	if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC)
633 		return (DDI_SUCCESS);
634 
635 	/*
636 	 * No flush needed when sending data from memory to device.
637 	 * Nothing to do to "sync" memory to what device would already see.
638 	 */
639 	if (!(mp->dmai_rflags & DDI_DMA_READ) ||
640 	    ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV))
641 		return (DDI_SUCCESS);
642 
643 	/*
644 	 * Perform necessary cpu workaround to ensure jbus ordering.
645 	 * CPU's internal "invalidate FIFOs" are flushed.
646 	 */
647 
648 #if !defined(lint)
649 	kpreempt_disable();
650 #endif
651 	jbus_stst_order();
652 #if !defined(lint)
653 	kpreempt_enable();
654 #endif
655 	return (DDI_SUCCESS);
656 }
657 
658 /*
659  * MSIQ Functions:
660  */
661 /*ARGSUSED*/
662 int
663 px_lib_msiq_init(dev_info_t *dip)
664 {
665 	px_t		*px_p = DIP_TO_STATE(dip);
666 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
667 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
668 	px_dvma_addr_t	pg_index;
669 	size_t		size;
670 	int		ret;
671 
672 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip);
673 
674 	/*
675 	 * Map the EQ memory into the Fire MMU (has to be 512KB aligned)
676 	 * and then initialize the base address register.
677 	 *
678 	 * Allocate entries from Fire IOMMU so that the resulting address
679 	 * is properly aligned.  Calculate the index of the first allocated
680 	 * entry.  Note: The size of the mapping is assumed to be a multiple
681 	 * of the page size.
682 	 */
683 	size = msiq_state_p->msiq_cnt *
684 	    msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
685 
686 	pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map,
687 	    size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT);
688 
689 	if (pxu_p->msiq_mapped_p == NULL)
690 		return (DDI_FAILURE);
691 
692 	pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
693 	    MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
694 
695 	if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index),
696 	    MMU_BTOP(size), PCI_MAP_ATTR_WRITE, msiq_state_p->msiq_buf_p,
697 	    0, MMU_MAP_BUF)) != DDI_SUCCESS) {
698 		DBG(DBG_LIB_MSIQ, dip,
699 		    "hvio_msiq_init failed, ret 0x%lx\n", ret);
700 
701 		(void) px_lib_msiq_fini(dip);
702 		return (DDI_FAILURE);
703 	}
704 
705 	(void) hvio_msiq_init(DIP_TO_HANDLE(dip), pxu_p);
706 
707 	return (DDI_SUCCESS);
708 }
709 
710 /*ARGSUSED*/
711 int
712 px_lib_msiq_fini(dev_info_t *dip)
713 {
714 	px_t		*px_p = DIP_TO_STATE(dip);
715 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
716 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
717 	px_dvma_addr_t	pg_index;
718 	size_t		size;
719 
720 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip);
721 
722 	/*
723 	 * Unmap and free the EQ memory that had been mapped
724 	 * into the Fire IOMMU.
725 	 */
726 	size = msiq_state_p->msiq_cnt *
727 	    msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
728 
729 	pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
730 	    MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
731 
732 	(void) px_lib_iommu_demap(px_p->px_dip,
733 	    PCI_TSBID(0, pg_index), MMU_BTOP(size));
734 
735 	/* Free the entries from the Fire MMU */
736 	vmem_xfree(px_p->px_mmu_p->mmu_dvma_map,
737 	    (void *)pxu_p->msiq_mapped_p, size);
738 
739 	return (DDI_SUCCESS);
740 }
741 
742 /*ARGSUSED*/
743 int
744 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p,
745     uint_t *msiq_rec_cnt_p)
746 {
747 	px_t		*px_p = DIP_TO_STATE(dip);
748 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
749 	size_t		msiq_size;
750 
751 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n",
752 	    dip, msiq_id);
753 
754 	msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
755 	ra_p = (r_addr_t *)((caddr_t)msiq_state_p->msiq_buf_p +
756 	    (msiq_id * msiq_size));
757 
758 	*msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt;
759 
760 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n",
761 	    ra_p, *msiq_rec_cnt_p);
762 
763 	return (DDI_SUCCESS);
764 }
765 
766 /*ARGSUSED*/
767 int
768 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id,
769     pci_msiq_valid_state_t *msiq_valid_state)
770 {
771 	uint64_t	ret;
772 
773 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n",
774 	    dip, msiq_id);
775 
776 	if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip),
777 	    msiq_id, msiq_valid_state)) != H_EOK) {
778 		DBG(DBG_LIB_MSIQ, dip,
779 		    "hvio_msiq_getvalid failed, ret 0x%lx\n", ret);
780 		return (DDI_FAILURE);
781 	}
782 
783 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n",
784 	    *msiq_valid_state);
785 
786 	return (DDI_SUCCESS);
787 }
788 
789 /*ARGSUSED*/
790 int
791 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id,
792     pci_msiq_valid_state_t msiq_valid_state)
793 {
794 	uint64_t	ret;
795 
796 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x "
797 	    "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state);
798 
799 	if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip),
800 	    msiq_id, msiq_valid_state)) != H_EOK) {
801 		DBG(DBG_LIB_MSIQ, dip,
802 		    "hvio_msiq_setvalid failed, ret 0x%lx\n", ret);
803 		return (DDI_FAILURE);
804 	}
805 
806 	return (DDI_SUCCESS);
807 }
808 
809 /*ARGSUSED*/
810 int
811 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id,
812     pci_msiq_state_t *msiq_state)
813 {
814 	uint64_t	ret;
815 
816 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n",
817 	    dip, msiq_id);
818 
819 	if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip),
820 	    msiq_id, msiq_state)) != H_EOK) {
821 		DBG(DBG_LIB_MSIQ, dip,
822 		    "hvio_msiq_getstate failed, ret 0x%lx\n", ret);
823 		return (DDI_FAILURE);
824 	}
825 
826 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n",
827 	    *msiq_state);
828 
829 	return (DDI_SUCCESS);
830 }
831 
832 /*ARGSUSED*/
833 int
834 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id,
835     pci_msiq_state_t msiq_state)
836 {
837 	uint64_t	ret;
838 
839 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x "
840 	    "msiq_state 0x%x\n", dip, msiq_id, msiq_state);
841 
842 	if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip),
843 	    msiq_id, msiq_state)) != H_EOK) {
844 		DBG(DBG_LIB_MSIQ, dip,
845 		    "hvio_msiq_setstate failed, ret 0x%lx\n", ret);
846 		return (DDI_FAILURE);
847 	}
848 
849 	return (DDI_SUCCESS);
850 }
851 
852 /*ARGSUSED*/
853 int
854 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id,
855     msiqhead_t *msiq_head)
856 {
857 	uint64_t	ret;
858 
859 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n",
860 	    dip, msiq_id);
861 
862 	if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip),
863 	    msiq_id, msiq_head)) != H_EOK) {
864 		DBG(DBG_LIB_MSIQ, dip,
865 		    "hvio_msiq_gethead failed, ret 0x%lx\n", ret);
866 		return (DDI_FAILURE);
867 	}
868 
869 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n",
870 	    *msiq_head);
871 
872 	return (DDI_SUCCESS);
873 }
874 
875 /*ARGSUSED*/
876 int
877 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id,
878     msiqhead_t msiq_head)
879 {
880 	uint64_t	ret;
881 
882 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x "
883 	    "msiq_head 0x%x\n", dip, msiq_id, msiq_head);
884 
885 	if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip),
886 	    msiq_id, msiq_head)) != H_EOK) {
887 		DBG(DBG_LIB_MSIQ, dip,
888 		    "hvio_msiq_sethead failed, ret 0x%lx\n", ret);
889 		return (DDI_FAILURE);
890 	}
891 
892 	return (DDI_SUCCESS);
893 }
894 
895 /*ARGSUSED*/
896 int
897 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id,
898     msiqtail_t *msiq_tail)
899 {
900 	uint64_t	ret;
901 
902 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n",
903 	    dip, msiq_id);
904 
905 	if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip),
906 	    msiq_id, msiq_tail)) != H_EOK) {
907 		DBG(DBG_LIB_MSIQ, dip,
908 		    "hvio_msiq_gettail failed, ret 0x%lx\n", ret);
909 		return (DDI_FAILURE);
910 	}
911 
912 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n",
913 	    *msiq_tail);
914 
915 	return (DDI_SUCCESS);
916 }
917 
918 /*ARGSUSED*/
919 void
920 px_lib_get_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p,
921     msiq_rec_t *msiq_rec_p)
922 {
923 	eq_rec_t	*eq_rec_p = (eq_rec_t *)msiq_head_p;
924 
925 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n",
926 	    dip, eq_rec_p);
927 
928 	if (!eq_rec_p->eq_rec_fmt_type) {
929 		/* Set msiq_rec_type to zero */
930 		msiq_rec_p->msiq_rec_type = 0;
931 
932 		return;
933 	}
934 
935 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, "
936 	    "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx "
937 	    "eq_rec_len 0x%llx eq_rec_addr0 0x%llx "
938 	    "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx "
939 	    "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid,
940 	    eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len,
941 	    eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1,
942 	    eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1);
943 
944 	/*
945 	 * Only upper 4 bits of eq_rec_fmt_type is used
946 	 * to identify the EQ record type.
947 	 */
948 	switch (eq_rec_p->eq_rec_fmt_type >> 3) {
949 	case EQ_REC_MSI32:
950 		msiq_rec_p->msiq_rec_type = MSI32_REC;
951 
952 		msiq_rec_p->msiq_rec_data.msi.msi_data =
953 		    eq_rec_p->eq_rec_data0;
954 		break;
955 	case EQ_REC_MSI64:
956 		msiq_rec_p->msiq_rec_type = MSI64_REC;
957 
958 		msiq_rec_p->msiq_rec_data.msi.msi_data =
959 		    eq_rec_p->eq_rec_data0;
960 		break;
961 	case EQ_REC_MSG:
962 		msiq_rec_p->msiq_rec_type = MSG_REC;
963 
964 		msiq_rec_p->msiq_rec_data.msg.msg_route =
965 		    eq_rec_p->eq_rec_fmt_type & 7;
966 		msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid;
967 		msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0;
968 		break;
969 	default:
970 		cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: "
971 		    "0x%x is an unknown EQ record type",
972 		    ddi_driver_name(dip), ddi_get_instance(dip),
973 		    (int)eq_rec_p->eq_rec_fmt_type);
974 		break;
975 	}
976 
977 	msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid;
978 	msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) |
979 	    (eq_rec_p->eq_rec_addr0 << 2));
980 }
981 
982 /*ARGSUSED*/
983 void
984 px_lib_clr_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p)
985 {
986 	eq_rec_t	*eq_rec_p = (eq_rec_t *)msiq_head_p;
987 
988 	DBG(DBG_LIB_MSIQ, dip, "px_lib_clr_msiq_rec: dip 0x%p eq_rec_p 0x%p\n",
989 	    dip, eq_rec_p);
990 
991 	if (eq_rec_p->eq_rec_fmt_type) {
992 		/* Zero out eq_rec_fmt_type field */
993 		eq_rec_p->eq_rec_fmt_type = 0;
994 	}
995 }
996 
997 /*
998  * MSI Functions:
999  */
1000 /*ARGSUSED*/
1001 int
1002 px_lib_msi_init(dev_info_t *dip)
1003 {
1004 	px_t		*px_p = DIP_TO_STATE(dip);
1005 	px_msi_state_t	*msi_state_p = &px_p->px_ib_p->ib_msi_state;
1006 	uint64_t	ret;
1007 
1008 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip);
1009 
1010 	if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip),
1011 	    msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) {
1012 		DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n",
1013 		    ret);
1014 		return (DDI_FAILURE);
1015 	}
1016 
1017 	return (DDI_SUCCESS);
1018 }
1019 
1020 /*ARGSUSED*/
1021 int
1022 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num,
1023     msiqid_t *msiq_id)
1024 {
1025 	uint64_t	ret;
1026 
1027 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n",
1028 	    dip, msi_num);
1029 
1030 	if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip),
1031 	    msi_num, msiq_id)) != H_EOK) {
1032 		DBG(DBG_LIB_MSI, dip,
1033 		    "hvio_msi_getmsiq failed, ret 0x%lx\n", ret);
1034 		return (DDI_FAILURE);
1035 	}
1036 
1037 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n",
1038 	    *msiq_id);
1039 
1040 	return (DDI_SUCCESS);
1041 }
1042 
1043 /*ARGSUSED*/
1044 int
1045 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num,
1046     msiqid_t msiq_id, msi_type_t msitype)
1047 {
1048 	uint64_t	ret;
1049 
1050 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x "
1051 	    "msq_id 0x%x\n", dip, msi_num, msiq_id);
1052 
1053 	if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip),
1054 	    msi_num, msiq_id)) != H_EOK) {
1055 		DBG(DBG_LIB_MSI, dip,
1056 		    "hvio_msi_setmsiq failed, ret 0x%lx\n", ret);
1057 		return (DDI_FAILURE);
1058 	}
1059 
1060 	return (DDI_SUCCESS);
1061 }
1062 
1063 /*ARGSUSED*/
1064 int
1065 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num,
1066     pci_msi_valid_state_t *msi_valid_state)
1067 {
1068 	uint64_t	ret;
1069 
1070 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n",
1071 	    dip, msi_num);
1072 
1073 	if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip),
1074 	    msi_num, msi_valid_state)) != H_EOK) {
1075 		DBG(DBG_LIB_MSI, dip,
1076 		    "hvio_msi_getvalid failed, ret 0x%lx\n", ret);
1077 		return (DDI_FAILURE);
1078 	}
1079 
1080 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n",
1081 	    *msi_valid_state);
1082 
1083 	return (DDI_SUCCESS);
1084 }
1085 
1086 /*ARGSUSED*/
1087 int
1088 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num,
1089     pci_msi_valid_state_t msi_valid_state)
1090 {
1091 	uint64_t	ret;
1092 
1093 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x "
1094 	    "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state);
1095 
1096 	if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip),
1097 	    msi_num, msi_valid_state)) != H_EOK) {
1098 		DBG(DBG_LIB_MSI, dip,
1099 		    "hvio_msi_setvalid failed, ret 0x%lx\n", ret);
1100 		return (DDI_FAILURE);
1101 	}
1102 
1103 	return (DDI_SUCCESS);
1104 }
1105 
1106 /*ARGSUSED*/
1107 int
1108 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num,
1109     pci_msi_state_t *msi_state)
1110 {
1111 	uint64_t	ret;
1112 
1113 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n",
1114 	    dip, msi_num);
1115 
1116 	if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip),
1117 	    msi_num, msi_state)) != H_EOK) {
1118 		DBG(DBG_LIB_MSI, dip,
1119 		    "hvio_msi_getstate failed, ret 0x%lx\n", ret);
1120 		return (DDI_FAILURE);
1121 	}
1122 
1123 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n",
1124 	    *msi_state);
1125 
1126 	return (DDI_SUCCESS);
1127 }
1128 
1129 /*ARGSUSED*/
1130 int
1131 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num,
1132     pci_msi_state_t msi_state)
1133 {
1134 	uint64_t	ret;
1135 
1136 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x "
1137 	    "msi_state 0x%x\n", dip, msi_num, msi_state);
1138 
1139 	if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip),
1140 	    msi_num, msi_state)) != H_EOK) {
1141 		DBG(DBG_LIB_MSI, dip,
1142 		    "hvio_msi_setstate failed, ret 0x%lx\n", ret);
1143 		return (DDI_FAILURE);
1144 	}
1145 
1146 	return (DDI_SUCCESS);
1147 }
1148 
1149 /*
1150  * MSG Functions:
1151  */
1152 /*ARGSUSED*/
1153 int
1154 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
1155     msiqid_t *msiq_id)
1156 {
1157 	uint64_t	ret;
1158 
1159 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n",
1160 	    dip, msg_type);
1161 
1162 	if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip),
1163 	    msg_type, msiq_id)) != H_EOK) {
1164 		DBG(DBG_LIB_MSG, dip,
1165 		    "hvio_msg_getmsiq failed, ret 0x%lx\n", ret);
1166 		return (DDI_FAILURE);
1167 	}
1168 
1169 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n",
1170 	    *msiq_id);
1171 
1172 	return (DDI_SUCCESS);
1173 }
1174 
1175 /*ARGSUSED*/
1176 int
1177 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
1178     msiqid_t msiq_id)
1179 {
1180 	uint64_t	ret;
1181 
1182 	DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x "
1183 	    "msiq_id 0x%x\n", dip, msg_type, msiq_id);
1184 
1185 	if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip),
1186 	    msg_type, msiq_id)) != H_EOK) {
1187 		DBG(DBG_LIB_MSG, dip,
1188 		    "hvio_msg_setmsiq failed, ret 0x%lx\n", ret);
1189 		return (DDI_FAILURE);
1190 	}
1191 
1192 	return (DDI_SUCCESS);
1193 }
1194 
1195 /*ARGSUSED*/
1196 int
1197 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
1198     pcie_msg_valid_state_t *msg_valid_state)
1199 {
1200 	uint64_t	ret;
1201 
1202 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n",
1203 	    dip, msg_type);
1204 
1205 	if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type,
1206 	    msg_valid_state)) != H_EOK) {
1207 		DBG(DBG_LIB_MSG, dip,
1208 		    "hvio_msg_getvalid failed, ret 0x%lx\n", ret);
1209 		return (DDI_FAILURE);
1210 	}
1211 
1212 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n",
1213 	    *msg_valid_state);
1214 
1215 	return (DDI_SUCCESS);
1216 }
1217 
1218 /*ARGSUSED*/
1219 int
1220 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
1221     pcie_msg_valid_state_t msg_valid_state)
1222 {
1223 	uint64_t	ret;
1224 
1225 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x "
1226 	    "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state);
1227 
1228 	if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type,
1229 	    msg_valid_state)) != H_EOK) {
1230 		DBG(DBG_LIB_MSG, dip,
1231 		    "hvio_msg_setvalid failed, ret 0x%lx\n", ret);
1232 		return (DDI_FAILURE);
1233 	}
1234 
1235 	return (DDI_SUCCESS);
1236 }
1237 
1238 /*
1239  * Suspend/Resume Functions:
1240  * Currently unsupported by hypervisor
1241  */
1242 int
1243 px_lib_suspend(dev_info_t *dip)
1244 {
1245 	px_t		*px_p = DIP_TO_STATE(dip);
1246 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1247 	px_cb_t		*cb_p = PX2CB(px_p);
1248 	devhandle_t	dev_hdl, xbus_dev_hdl;
1249 	uint64_t	ret = H_EOK;
1250 
1251 	DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip);
1252 
1253 	dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
1254 	xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
1255 
1256 	if ((ret = hvio_suspend(dev_hdl, pxu_p)) != H_EOK)
1257 		goto fail;
1258 
1259 	if (--cb_p->attachcnt == 0) {
1260 		ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p);
1261 		if (ret != H_EOK)
1262 			cb_p->attachcnt++;
1263 	}
1264 	pxu_p->cpr_flag = PX_ENTERED_CPR;
1265 
1266 fail:
1267 	return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS);
1268 }
1269 
1270 void
1271 px_lib_resume(dev_info_t *dip)
1272 {
1273 	px_t		*px_p = DIP_TO_STATE(dip);
1274 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1275 	px_cb_t		*cb_p = PX2CB(px_p);
1276 	devhandle_t	dev_hdl, xbus_dev_hdl;
1277 	devino_t	pec_ino = px_p->px_inos[PX_INTR_PEC];
1278 	devino_t	xbc_ino = px_p->px_inos[PX_INTR_XBC];
1279 
1280 	DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip);
1281 
1282 	dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
1283 	xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
1284 
1285 	if (++cb_p->attachcnt == 1)
1286 		hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p);
1287 
1288 	hvio_resume(dev_hdl, pec_ino, pxu_p);
1289 }
1290 
1291 /*
1292  * Generate a unique Oberon UBC ID based on the Logicial System Board and
1293  * the IO Channel from the portid property field.
1294  */
1295 static uint64_t
1296 oberon_get_ubc_id(dev_info_t *dip)
1297 {
1298 	px_t	*px_p = DIP_TO_STATE(dip);
1299 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
1300 	uint64_t	ubc_id;
1301 
1302 	/*
1303 	 * Generate a unique 6 bit UBC ID using the 2 IO_Channel#[1:0] bits and
1304 	 * the 4 LSB_ID[3:0] bits from the Oberon's portid property.
1305 	 */
1306 	ubc_id = (((pxu_p->portid >> OBERON_PORT_ID_IOC) &
1307 	    OBERON_PORT_ID_IOC_MASK) | (((pxu_p->portid >>
1308 	    OBERON_PORT_ID_LSB) & OBERON_PORT_ID_LSB_MASK)
1309 	    << OBERON_UBC_ID_LSB));
1310 
1311 	return (ubc_id);
1312 }
1313 
1314 /*
1315  * Oberon does not have a UBC scratch register, so alloc an array of scratch
1316  * registers when needed and use a unique UBC ID as an index. This code
1317  * can be simplified if we use a pre-allocated array. They are currently
1318  * being dynamically allocated because it's only needed by the Oberon.
1319  */
1320 static void
1321 oberon_set_cb(dev_info_t *dip, uint64_t val)
1322 {
1323 	uint64_t	ubc_id;
1324 
1325 	if (px_oberon_ubc_scratch_regs == NULL)
1326 		px_oberon_ubc_scratch_regs =
1327 		    (uint64_t *)kmem_zalloc(sizeof (uint64_t)*
1328 		    OBERON_UBC_ID_MAX, KM_SLEEP);
1329 
1330 	ubc_id = oberon_get_ubc_id(dip);
1331 
1332 	px_oberon_ubc_scratch_regs[ubc_id] = val;
1333 
1334 	/*
1335 	 * Check if any scratch registers are still in use. If all scratch
1336 	 * registers are currently set to zero, then deallocate the scratch
1337 	 * register array.
1338 	 */
1339 	for (ubc_id = 0; ubc_id < OBERON_UBC_ID_MAX; ubc_id++) {
1340 		if (px_oberon_ubc_scratch_regs[ubc_id] != NULL)
1341 			return;
1342 	}
1343 
1344 	/*
1345 	 * All scratch registers are set to zero so deallocate the scratch
1346 	 * register array and set the pointer to NULL.
1347 	 */
1348 	kmem_free(px_oberon_ubc_scratch_regs,
1349 	    (sizeof (uint64_t)*OBERON_UBC_ID_MAX));
1350 
1351 	px_oberon_ubc_scratch_regs = NULL;
1352 }
1353 
1354 /*
1355  * Oberon does not have a UBC scratch register, so use an allocated array of
1356  * scratch registers and use the unique UBC ID as an index into that array.
1357  */
1358 static uint64_t
1359 oberon_get_cb(dev_info_t *dip)
1360 {
1361 	uint64_t	ubc_id;
1362 
1363 	if (px_oberon_ubc_scratch_regs == NULL)
1364 		return (0);
1365 
1366 	ubc_id = oberon_get_ubc_id(dip);
1367 
1368 	return (px_oberon_ubc_scratch_regs[ubc_id]);
1369 }
1370 
1371 /*
1372  * Misc Functions:
1373  * Currently unsupported by hypervisor
1374  */
1375 static uint64_t
1376 px_get_cb(dev_info_t *dip)
1377 {
1378 	px_t	*px_p = DIP_TO_STATE(dip);
1379 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
1380 
1381 	/*
1382 	 * Oberon does not currently have Scratchpad registers.
1383 	 */
1384 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
1385 		return (oberon_get_cb(dip));
1386 
1387 	return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1));
1388 }
1389 
1390 static void
1391 px_set_cb(dev_info_t *dip, uint64_t val)
1392 {
1393 	px_t	*px_p = DIP_TO_STATE(dip);
1394 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
1395 
1396 	/*
1397 	 * Oberon does not currently have Scratchpad registers.
1398 	 */
1399 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
1400 		oberon_set_cb(dip, val);
1401 		return;
1402 	}
1403 
1404 	CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val);
1405 }
1406 
1407 /*ARGSUSED*/
1408 int
1409 px_lib_map_vconfig(dev_info_t *dip,
1410 	ddi_map_req_t *mp, pci_config_offset_t off,
1411 		pci_regspec_t *rp, caddr_t *addrp)
1412 {
1413 	/*
1414 	 * No special config space access services in this layer.
1415 	 */
1416 	return (DDI_FAILURE);
1417 }
1418 
1419 void
1420 px_lib_map_attr_check(ddi_map_req_t *mp)
1421 {
1422 	ddi_acc_hdl_t *hp = mp->map_handlep;
1423 
1424 	/* fire does not accept byte masks from PIO store merge */
1425 	if (hp->ah_acc.devacc_attr_dataorder == DDI_STORECACHING_OK_ACC)
1426 		hp->ah_acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1427 }
1428 
1429 /* This function is called only by poke, caut put and pxtool poke. */
1430 void
1431 px_lib_clr_errs(px_t *px_p, dev_info_t *rdip, uint64_t addr)
1432 {
1433 	px_pec_t	*pec_p = px_p->px_pec_p;
1434 	dev_info_t	*rpdip = px_p->px_dip;
1435 	int		rc_err, fab_err, i;
1436 	int		acctype = pec_p->pec_safeacc_type;
1437 	ddi_fm_error_t	derr;
1438 	px_ranges_t	*ranges_p;
1439 	int		range_len;
1440 	uint32_t	addr_high, addr_low;
1441 	pcie_req_id_t	bdf = 0;
1442 
1443 	/* Create the derr */
1444 	bzero(&derr, sizeof (ddi_fm_error_t));
1445 	derr.fme_version = DDI_FME_VERSION;
1446 	derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
1447 	derr.fme_flag = acctype;
1448 
1449 	if (acctype == DDI_FM_ERR_EXPECTED) {
1450 		derr.fme_status = DDI_FM_NONFATAL;
1451 		ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr);
1452 	}
1453 
1454 	mutex_enter(&px_p->px_fm_mutex);
1455 
1456 	/* send ereport/handle/clear fire registers */
1457 	rc_err = px_err_cmn_intr(px_p, &derr, PX_LIB_CALL, PX_FM_BLOCK_ALL);
1458 
1459 	/* Figure out if this is a cfg or mem32 access */
1460 	addr_high = (uint32_t)(addr >> 32);
1461 	addr_low = (uint32_t)addr;
1462 	range_len = px_p->px_ranges_length / sizeof (px_ranges_t);
1463 	i = 0;
1464 	for (ranges_p = px_p->px_ranges_p; i < range_len; i++, ranges_p++) {
1465 		if (ranges_p->parent_high == addr_high) {
1466 			switch (ranges_p->child_high & PCI_ADDR_MASK) {
1467 			case PCI_ADDR_CONFIG:
1468 				bdf = (pcie_req_id_t)(addr_low >> 12);
1469 				addr_low = 0;
1470 				break;
1471 			case PCI_ADDR_MEM32:
1472 				if (rdip)
1473 					(void) pcie_get_bdf_from_dip(rdip,
1474 					    &bdf);
1475 				else
1476 					bdf = NULL;
1477 				break;
1478 			}
1479 			break;
1480 		}
1481 	}
1482 
1483 	px_rp_en_q(px_p, bdf, addr_low, NULL);
1484 
1485 	/*
1486 	 * XXX - Current code scans the fabric for all px_tool accesses.
1487 	 * In future, do not scan fabric for px_tool access to IO Root Nexus
1488 	 */
1489 	fab_err = pf_scan_fabric(rpdip, &derr, px_p->px_dq_p,
1490 	    &px_p->px_dq_tail);
1491 
1492 	mutex_exit(&px_p->px_fm_mutex);
1493 
1494 	px_err_panic(rc_err, PX_RC, fab_err);
1495 }
1496 
1497 #ifdef  DEBUG
1498 int	px_peekfault_cnt = 0;
1499 int	px_pokefault_cnt = 0;
1500 #endif  /* DEBUG */
1501 
1502 /*ARGSUSED*/
1503 static int
1504 px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip,
1505     peekpoke_ctlops_t *in_args)
1506 {
1507 	px_t *px_p = DIP_TO_STATE(dip);
1508 	px_pec_t *pec_p = px_p->px_pec_p;
1509 	int err = DDI_SUCCESS;
1510 	on_trap_data_t otd;
1511 
1512 	mutex_enter(&pec_p->pec_pokefault_mutex);
1513 	pec_p->pec_ontrap_data = &otd;
1514 	pec_p->pec_safeacc_type = DDI_FM_ERR_POKE;
1515 
1516 	/* Set up protected environment. */
1517 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
1518 		uintptr_t tramp = otd.ot_trampoline;
1519 
1520 		otd.ot_trampoline = (uintptr_t)&poke_fault;
1521 		err = do_poke(in_args->size, (void *)in_args->dev_addr,
1522 		    (void *)in_args->host_addr);
1523 		otd.ot_trampoline = tramp;
1524 	} else
1525 		err = DDI_FAILURE;
1526 
1527 	px_lib_clr_errs(px_p, rdip, in_args->dev_addr);
1528 
1529 	if (otd.ot_trap & OT_DATA_ACCESS)
1530 		err = DDI_FAILURE;
1531 
1532 	/* Take down protected environment. */
1533 	no_trap();
1534 
1535 	pec_p->pec_ontrap_data = NULL;
1536 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1537 	mutex_exit(&pec_p->pec_pokefault_mutex);
1538 
1539 #ifdef  DEBUG
1540 	if (err == DDI_FAILURE)
1541 		px_pokefault_cnt++;
1542 #endif
1543 	return (err);
1544 }
1545 
1546 /*ARGSUSED*/
1547 static int
1548 px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip,
1549     peekpoke_ctlops_t *cautacc_ctlops_arg)
1550 {
1551 	size_t size = cautacc_ctlops_arg->size;
1552 	uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
1553 	uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
1554 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
1555 	size_t repcount = cautacc_ctlops_arg->repcount;
1556 	uint_t flags = cautacc_ctlops_arg->flags;
1557 
1558 	px_t *px_p = DIP_TO_STATE(dip);
1559 	px_pec_t *pec_p = px_p->px_pec_p;
1560 	int err = DDI_SUCCESS;
1561 
1562 	/*
1563 	 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
1564 	 * mutex.
1565 	 */
1566 	i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1567 
1568 	pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
1569 	pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
1570 	hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
1571 
1572 	if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1573 		for (; repcount; repcount--) {
1574 			switch (size) {
1575 
1576 			case sizeof (uint8_t):
1577 				i_ddi_put8(hp, (uint8_t *)dev_addr,
1578 				    *(uint8_t *)host_addr);
1579 				break;
1580 
1581 			case sizeof (uint16_t):
1582 				i_ddi_put16(hp, (uint16_t *)dev_addr,
1583 				    *(uint16_t *)host_addr);
1584 				break;
1585 
1586 			case sizeof (uint32_t):
1587 				i_ddi_put32(hp, (uint32_t *)dev_addr,
1588 				    *(uint32_t *)host_addr);
1589 				break;
1590 
1591 			case sizeof (uint64_t):
1592 				i_ddi_put64(hp, (uint64_t *)dev_addr,
1593 				    *(uint64_t *)host_addr);
1594 				break;
1595 			}
1596 
1597 			host_addr += size;
1598 
1599 			if (flags == DDI_DEV_AUTOINCR)
1600 				dev_addr += size;
1601 
1602 			px_lib_clr_errs(px_p, rdip, dev_addr);
1603 
1604 			if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) {
1605 				err = DDI_FAILURE;
1606 #ifdef  DEBUG
1607 				px_pokefault_cnt++;
1608 #endif
1609 				break;
1610 			}
1611 		}
1612 	}
1613 
1614 	i_ddi_notrap((ddi_acc_handle_t)hp);
1615 	pec_p->pec_ontrap_data = NULL;
1616 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1617 	i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1618 	hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
1619 
1620 	return (err);
1621 }
1622 
1623 
1624 int
1625 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip,
1626     peekpoke_ctlops_t *in_args)
1627 {
1628 	return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) :
1629 	    px_lib_do_poke(dip, rdip, in_args));
1630 }
1631 
1632 
1633 /*ARGSUSED*/
1634 static int
1635 px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args)
1636 {
1637 	px_t *px_p = DIP_TO_STATE(dip);
1638 	px_pec_t *pec_p = px_p->px_pec_p;
1639 	int err = DDI_SUCCESS;
1640 	on_trap_data_t otd;
1641 
1642 	mutex_enter(&pec_p->pec_pokefault_mutex);
1643 	mutex_enter(&px_p->px_fm_mutex);
1644 	pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK;
1645 	mutex_exit(&px_p->px_fm_mutex);
1646 
1647 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
1648 		uintptr_t tramp = otd.ot_trampoline;
1649 
1650 		otd.ot_trampoline = (uintptr_t)&peek_fault;
1651 		err = do_peek(in_args->size, (void *)in_args->dev_addr,
1652 		    (void *)in_args->host_addr);
1653 		otd.ot_trampoline = tramp;
1654 	} else
1655 		err = DDI_FAILURE;
1656 
1657 	no_trap();
1658 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1659 	mutex_exit(&pec_p->pec_pokefault_mutex);
1660 
1661 #ifdef  DEBUG
1662 	if (err == DDI_FAILURE)
1663 		px_peekfault_cnt++;
1664 #endif
1665 	return (err);
1666 }
1667 
1668 
1669 static int
1670 px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg)
1671 {
1672 	size_t size = cautacc_ctlops_arg->size;
1673 	uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
1674 	uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
1675 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
1676 	size_t repcount = cautacc_ctlops_arg->repcount;
1677 	uint_t flags = cautacc_ctlops_arg->flags;
1678 
1679 	px_t *px_p = DIP_TO_STATE(dip);
1680 	px_pec_t *pec_p = px_p->px_pec_p;
1681 	int err = DDI_SUCCESS;
1682 
1683 	/*
1684 	 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
1685 	 * mutex.
1686 	 */
1687 	i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1688 
1689 	pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
1690 	pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
1691 	hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
1692 
1693 	if (repcount == 1) {
1694 		if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1695 			i_ddi_caut_get(size, (void *)dev_addr,
1696 			    (void *)host_addr);
1697 		} else {
1698 			int i;
1699 			uint8_t *ff_addr = (uint8_t *)host_addr;
1700 			for (i = 0; i < size; i++)
1701 				*ff_addr++ = 0xff;
1702 
1703 			err = DDI_FAILURE;
1704 #ifdef  DEBUG
1705 			px_peekfault_cnt++;
1706 #endif
1707 		}
1708 	} else {
1709 		if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1710 			for (; repcount; repcount--) {
1711 				i_ddi_caut_get(size, (void *)dev_addr,
1712 				    (void *)host_addr);
1713 
1714 				host_addr += size;
1715 
1716 				if (flags == DDI_DEV_AUTOINCR)
1717 					dev_addr += size;
1718 			}
1719 		} else {
1720 			err = DDI_FAILURE;
1721 #ifdef  DEBUG
1722 			px_peekfault_cnt++;
1723 #endif
1724 		}
1725 	}
1726 
1727 	i_ddi_notrap((ddi_acc_handle_t)hp);
1728 	pec_p->pec_ontrap_data = NULL;
1729 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1730 	i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1731 	hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
1732 
1733 	return (err);
1734 }
1735 
1736 /*ARGSUSED*/
1737 int
1738 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip,
1739     peekpoke_ctlops_t *in_args, void *result)
1740 {
1741 	result = (void *)in_args->host_addr;
1742 	return (in_args->handle ? px_lib_do_caut_get(dip, in_args) :
1743 	    px_lib_do_peek(dip, in_args));
1744 }
1745 
1746 /*
1747  * implements PPM interface
1748  */
1749 int
1750 px_lib_pmctl(int cmd, px_t *px_p)
1751 {
1752 	ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ);
1753 	switch (cmd) {
1754 	case PPMREQ_PRE_PWR_OFF:
1755 		/*
1756 		 * Currently there is no device power management for
1757 		 * the root complex (fire). When there is we need to make
1758 		 * sure that it is at full power before trying to send the
1759 		 * PME_Turn_Off message.
1760 		 */
1761 		DBG(DBG_PWR, px_p->px_dip,
1762 		    "ioctl: request to send PME_Turn_Off\n");
1763 		return (px_goto_l23ready(px_p));
1764 
1765 	case PPMREQ_PRE_PWR_ON:
1766 		DBG(DBG_PWR, px_p->px_dip, "ioctl: PRE_PWR_ON request\n");
1767 		return (px_pre_pwron_check(px_p));
1768 
1769 	case PPMREQ_POST_PWR_ON:
1770 		DBG(DBG_PWR, px_p->px_dip, "ioctl: POST_PWR_ON request\n");
1771 		return (px_goto_l0(px_p));
1772 
1773 	default:
1774 		return (DDI_FAILURE);
1775 	}
1776 }
1777 
1778 /*
1779  * sends PME_Turn_Off message to put the link in L2/L3 ready state.
1780  * called by px_ioctl.
1781  * returns DDI_SUCCESS or DDI_FAILURE
1782  * 1. Wait for link to be in L1 state (link status reg)
1783  * 2. write to PME_Turn_off reg to boradcast
1784  * 3. set timeout
1785  * 4. If timeout, return failure.
1786  * 5. If PM_TO_Ack, wait till link is in L2/L3 ready
1787  */
1788 static int
1789 px_goto_l23ready(px_t *px_p)
1790 {
1791 	pcie_pwr_t	*pwr_p;
1792 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1793 	caddr_t	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
1794 	int		ret = DDI_SUCCESS;
1795 	clock_t		end, timeleft;
1796 	int		mutex_held = 1;
1797 
1798 	/* If no PM info, return failure */
1799 	if (!PCIE_PMINFO(px_p->px_dip) ||
1800 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1801 		return (DDI_FAILURE);
1802 
1803 	mutex_enter(&pwr_p->pwr_lock);
1804 	mutex_enter(&px_p->px_l23ready_lock);
1805 	/* Clear the PME_To_ACK receieved flag */
1806 	px_p->px_pm_flags &= ~PX_PMETOACK_RECVD;
1807 	/*
1808 	 * When P25 is the downstream device, after receiving
1809 	 * PME_To_ACK, fire will go to Detect state, which causes
1810 	 * the link down event. Inform FMA that this is expected.
1811 	 * In case of all other cards complaint with the pci express
1812 	 * spec, this will happen when the power is re-applied. FMA
1813 	 * code will clear this flag after one instance of LDN. Since
1814 	 * there will not be a LDN event for the spec compliant cards,
1815 	 * we need to clear the flag after receiving PME_To_ACK.
1816 	 */
1817 	px_p->px_pm_flags |= PX_LDN_EXPECTED;
1818 	if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) {
1819 		ret = DDI_FAILURE;
1820 		goto l23ready_done;
1821 	}
1822 	px_p->px_pm_flags |= PX_PME_TURNOFF_PENDING;
1823 
1824 	end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout);
1825 	while (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1826 		timeleft = cv_timedwait(&px_p->px_l23ready_cv,
1827 		    &px_p->px_l23ready_lock, end);
1828 		/*
1829 		 * if cv_timedwait returns -1, it is either
1830 		 * 1) timed out or
1831 		 * 2) there was a pre-mature wakeup but by the time
1832 		 * cv_timedwait is called again end < lbolt i.e.
1833 		 * end is in the past.
1834 		 * 3) By the time we make first cv_timedwait call,
1835 		 * end < lbolt is true.
1836 		 */
1837 		if (timeleft == -1)
1838 			break;
1839 	}
1840 	if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1841 		/*
1842 		 * Either timedout or interrupt didn't get a
1843 		 * chance to grab the mutex and set the flag.
1844 		 * release the mutex and delay for sometime.
1845 		 * This will 1) give a chance for interrupt to
1846 		 * set the flag 2) creates a delay between two
1847 		 * consequetive requests.
1848 		 */
1849 		mutex_exit(&px_p->px_l23ready_lock);
1850 		delay(drv_usectohz(50 * PX_MSEC_TO_USEC));
1851 		mutex_held = 0;
1852 		if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1853 			ret = DDI_FAILURE;
1854 			DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting"
1855 			    " for PME_TO_ACK\n");
1856 		}
1857 	}
1858 	px_p->px_pm_flags &=
1859 	    ~(PX_PME_TURNOFF_PENDING | PX_PMETOACK_RECVD | PX_LDN_EXPECTED);
1860 
1861 l23ready_done:
1862 	if (mutex_held)
1863 		mutex_exit(&px_p->px_l23ready_lock);
1864 	/*
1865 	 * Wait till link is in L1 idle, if sending PME_Turn_Off
1866 	 * was succesful.
1867 	 */
1868 	if (ret == DDI_SUCCESS) {
1869 		if (px_link_wait4l1idle(csr_base) != DDI_SUCCESS) {
1870 			DBG(DBG_PWR, px_p->px_dip, " Link is not at L1"
1871 			    " even though we received PME_To_ACK.\n");
1872 			/*
1873 			 * Workaround for hardware bug with P25.
1874 			 * Due to a hardware bug with P25, link state
1875 			 * will be Detect state rather than L1 after
1876 			 * link is transitioned to L23Ready state. Since
1877 			 * we don't know whether link is L23ready state
1878 			 * without Fire's state being L1_idle, we delay
1879 			 * here just to make sure that we wait till link
1880 			 * is transitioned to L23Ready state.
1881 			 */
1882 			delay(drv_usectohz(100 * PX_MSEC_TO_USEC));
1883 		}
1884 		pwr_p->pwr_link_lvl = PM_LEVEL_L3;
1885 
1886 	}
1887 	mutex_exit(&pwr_p->pwr_lock);
1888 	return (ret);
1889 }
1890 
1891 /*
1892  * Message interrupt handler intended to be shared for both
1893  * PME and PME_TO_ACK msg handling, currently only handles
1894  * PME_To_ACK message.
1895  */
1896 uint_t
1897 px_pmeq_intr(caddr_t arg)
1898 {
1899 	px_t	*px_p = (px_t *)arg;
1900 
1901 	DBG(DBG_PWR, px_p->px_dip, " PME_To_ACK received \n");
1902 	mutex_enter(&px_p->px_l23ready_lock);
1903 	cv_broadcast(&px_p->px_l23ready_cv);
1904 	if (px_p->px_pm_flags & PX_PME_TURNOFF_PENDING) {
1905 		px_p->px_pm_flags |= PX_PMETOACK_RECVD;
1906 	} else {
1907 		/*
1908 		 * This maybe the second ack received. If so then,
1909 		 * we should be receiving it during wait4L1 stage.
1910 		 */
1911 		px_p->px_pmetoack_ignored++;
1912 	}
1913 	mutex_exit(&px_p->px_l23ready_lock);
1914 	return (DDI_INTR_CLAIMED);
1915 }
1916 
1917 static int
1918 px_pre_pwron_check(px_t *px_p)
1919 {
1920 	pcie_pwr_t	*pwr_p;
1921 
1922 	/* If no PM info, return failure */
1923 	if (!PCIE_PMINFO(px_p->px_dip) ||
1924 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1925 		return (DDI_FAILURE);
1926 
1927 	/*
1928 	 * For the spec compliant downstream cards link down
1929 	 * is expected when the device is powered on.
1930 	 */
1931 	px_p->px_pm_flags |= PX_LDN_EXPECTED;
1932 	return (pwr_p->pwr_link_lvl == PM_LEVEL_L3 ? DDI_SUCCESS : DDI_FAILURE);
1933 }
1934 
1935 static int
1936 px_goto_l0(px_t *px_p)
1937 {
1938 	pcie_pwr_t	*pwr_p;
1939 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1940 	caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
1941 	int		ret = DDI_SUCCESS;
1942 	uint64_t	time_spent = 0;
1943 
1944 	/* If no PM info, return failure */
1945 	if (!PCIE_PMINFO(px_p->px_dip) ||
1946 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1947 		return (DDI_FAILURE);
1948 
1949 	mutex_enter(&pwr_p->pwr_lock);
1950 	/*
1951 	 * The following link retrain activity will cause LDN and LUP event.
1952 	 * Receiving LDN prior to receiving LUP is expected, not an error in
1953 	 * this case.  Receiving LUP indicates link is fully up to support
1954 	 * powering up down stream device, and of course any further LDN and
1955 	 * LUP outside this context will be error.
1956 	 */
1957 	px_p->px_lup_pending = 1;
1958 	if (px_link_retrain(csr_base) != DDI_SUCCESS) {
1959 		ret = DDI_FAILURE;
1960 		goto l0_done;
1961 	}
1962 
1963 	/* LUP event takes the order of 15ms amount of time to occur */
1964 	for (; px_p->px_lup_pending && (time_spent < px_lup_poll_to);
1965 	    time_spent += px_lup_poll_interval)
1966 		drv_usecwait(px_lup_poll_interval);
1967 	if (px_p->px_lup_pending)
1968 		ret = DDI_FAILURE;
1969 l0_done:
1970 	px_enable_detect_quiet(csr_base);
1971 	if (ret == DDI_SUCCESS)
1972 		pwr_p->pwr_link_lvl = PM_LEVEL_L0;
1973 	mutex_exit(&pwr_p->pwr_lock);
1974 	return (ret);
1975 }
1976 
1977 /*
1978  * Extract the drivers binding name to identify which chip we're binding to.
1979  * Whenever a new bus bridge is created, the driver alias entry should be
1980  * added here to identify the device if needed.  If a device isn't added,
1981  * the identity defaults to PX_CHIP_UNIDENTIFIED.
1982  */
1983 static uint32_t
1984 px_identity_init(px_t *px_p)
1985 {
1986 	dev_info_t	*dip = px_p->px_dip;
1987 	char		*name = ddi_binding_name(dip);
1988 	uint32_t	revision = 0;
1989 
1990 	revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1991 	    "module-revision#", 0);
1992 
1993 	/* Check for Fire driver binding name */
1994 	if (strcmp(name, "pciex108e,80f0") == 0) {
1995 		DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: "
1996 		    "(FIRE), module-revision %d\n", NAMEINST(dip),
1997 		    revision);
1998 
1999 		return ((revision >= FIRE_MOD_REV_20) ?
2000 		    PX_CHIP_FIRE : PX_CHIP_UNIDENTIFIED);
2001 	}
2002 
2003 	/* Check for Oberon driver binding name */
2004 	if (strcmp(name, "pciex108e,80f8") == 0) {
2005 		DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: "
2006 		    "(OBERON), module-revision %d\n", NAMEINST(dip),
2007 		    revision);
2008 
2009 		return (PX_CHIP_OBERON);
2010 	}
2011 
2012 	DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n",
2013 	    ddi_driver_name(dip), ddi_get_instance(dip), name, revision);
2014 
2015 	return (PX_CHIP_UNIDENTIFIED);
2016 }
2017 
2018 int
2019 px_err_add_intr(px_fault_t *px_fault_p)
2020 {
2021 	dev_info_t	*dip = px_fault_p->px_fh_dip;
2022 	px_t		*px_p = DIP_TO_STATE(dip);
2023 
2024 	VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL,
2025 	    (intrfunc)px_fault_p->px_err_func, (caddr_t)px_fault_p,
2026 	    NULL, NULL) == 0);
2027 
2028 	px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino);
2029 
2030 	return (DDI_SUCCESS);
2031 }
2032 
2033 void
2034 px_err_rem_intr(px_fault_t *px_fault_p)
2035 {
2036 	dev_info_t	*dip = px_fault_p->px_fh_dip;
2037 	px_t		*px_p = DIP_TO_STATE(dip);
2038 
2039 	px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino,
2040 		IB_INTR_WAIT);
2041 
2042 	VERIFY(rem_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL) == 0);
2043 }
2044 
2045 /*
2046  * px_cb_intr_redist() - sun4u only, CB interrupt redistribution
2047  */
2048 void
2049 px_cb_intr_redist(void *arg)
2050 {
2051 	px_cb_t		*cb_p = (px_cb_t *)arg;
2052 	px_cb_list_t	*pxl;
2053 	px_t		*pxp = NULL;
2054 	px_fault_t	*f_p = NULL;
2055 	uint32_t	new_cpuid;
2056 	intr_valid_state_t	enabled = 0;
2057 
2058 	mutex_enter(&cb_p->cb_mutex);
2059 
2060 	pxl = cb_p->pxl;
2061 	if (!pxl)
2062 		goto cb_done;
2063 
2064 	pxp = pxl->pxp;
2065 	f_p = &pxp->px_cb_fault;
2066 	for (; pxl && (f_p->px_fh_sysino != cb_p->sysino); ) {
2067 		pxl = pxl->next;
2068 		pxp = pxl->pxp;
2069 		f_p = &pxp->px_cb_fault;
2070 	}
2071 	if (pxl == NULL)
2072 		goto cb_done;
2073 
2074 	new_cpuid =  intr_dist_cpuid();
2075 	if (new_cpuid == cb_p->cpuid)
2076 		goto cb_done;
2077 
2078 	if ((px_lib_intr_getvalid(pxp->px_dip, f_p->px_fh_sysino, &enabled)
2079 	    != DDI_SUCCESS) || !enabled) {
2080 		DBG(DBG_IB, pxp->px_dip, "px_cb_intr_redist: CB not enabled, "
2081 		    "sysino(0x%x)\n", f_p->px_fh_sysino);
2082 		goto cb_done;
2083 	}
2084 
2085 	PX_INTR_DISABLE(pxp->px_dip, f_p->px_fh_sysino);
2086 
2087 	cb_p->cpuid = new_cpuid;
2088 	cb_p->sysino = f_p->px_fh_sysino;
2089 	PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid);
2090 
2091 cb_done:
2092 	mutex_exit(&cb_p->cb_mutex);
2093 }
2094 
2095 /*
2096  * px_cb_add_intr() - Called from attach(9E) to create CB if not yet
2097  * created, to add CB interrupt vector always, but enable only once.
2098  */
2099 int
2100 px_cb_add_intr(px_fault_t *fault_p)
2101 {
2102 	px_t		*px_p = DIP_TO_STATE(fault_p->px_fh_dip);
2103 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
2104 	px_cb_t		*cb_p = (px_cb_t *)px_get_cb(fault_p->px_fh_dip);
2105 	px_cb_list_t	*pxl, *pxl_new;
2106 	boolean_t	is_proxy = B_FALSE;
2107 
2108 	/* create cb */
2109 	if (cb_p == NULL) {
2110 		cb_p = kmem_zalloc(sizeof (px_cb_t), KM_SLEEP);
2111 
2112 		mutex_init(&cb_p->cb_mutex, NULL, MUTEX_DRIVER,
2113 		    (void *) ipltospl(FM_ERR_PIL));
2114 
2115 		cb_p->px_cb_func = px_cb_intr;
2116 		pxu_p->px_cb_p = cb_p;
2117 		px_set_cb(fault_p->px_fh_dip, (uint64_t)cb_p);
2118 
2119 		/* px_lib_dev_init allows only FIRE and OBERON */
2120 		px_err_reg_enable(
2121 		    (pxu_p->chip_type == PX_CHIP_FIRE) ?
2122 			PX_ERR_JBC : PX_ERR_UBC,
2123 		    pxu_p->px_address[PX_REG_XBC]);
2124 	} else
2125 		pxu_p->px_cb_p = cb_p;
2126 
2127 	/* register cb interrupt */
2128 	VERIFY(add_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL,
2129 	    (intrfunc)cb_p->px_cb_func, (caddr_t)cb_p, NULL, NULL) == 0);
2130 
2131 
2132 	/* update cb list */
2133 	mutex_enter(&cb_p->cb_mutex);
2134 	if (cb_p->pxl == NULL) {
2135 		is_proxy = B_TRUE;
2136 		pxl = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP);
2137 		pxl->pxp = px_p;
2138 		cb_p->pxl = pxl;
2139 		cb_p->sysino = fault_p->px_fh_sysino;
2140 		cb_p->cpuid = intr_dist_cpuid();
2141 	} else {
2142 		/*
2143 		 * Find the last pxl or
2144 		 * stop short at encountering a redundent entry, or
2145 		 * both.
2146 		 */
2147 		pxl = cb_p->pxl;
2148 		for (; !(pxl->pxp == px_p) && pxl->next; pxl = pxl->next);
2149 		ASSERT(pxl->pxp != px_p);
2150 
2151 		/* add to linked list */
2152 		pxl_new = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP);
2153 		pxl_new->pxp = px_p;
2154 		pxl->next = pxl_new;
2155 	}
2156 	cb_p->attachcnt++;
2157 	mutex_exit(&cb_p->cb_mutex);
2158 
2159 	if (is_proxy) {
2160 		/* add to interrupt redistribution list */
2161 		intr_dist_add(px_cb_intr_redist, cb_p);
2162 
2163 		/* enable cb hw interrupt */
2164 		px_ib_intr_enable(px_p, cb_p->cpuid, fault_p->px_intr_ino);
2165 	}
2166 
2167 	return (DDI_SUCCESS);
2168 }
2169 
2170 /*
2171  * px_cb_rem_intr() - Called from detach(9E) to remove its CB
2172  * interrupt vector, to shift proxy to the next available px,
2173  * or disable CB interrupt when itself is the last.
2174  */
2175 void
2176 px_cb_rem_intr(px_fault_t *fault_p)
2177 {
2178 	px_t		*px_p = DIP_TO_STATE(fault_p->px_fh_dip), *pxp;
2179 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
2180 	px_cb_t		*cb_p = PX2CB(px_p);
2181 	px_cb_list_t	*pxl, *prev;
2182 	px_fault_t	*f_p;
2183 
2184 	ASSERT(cb_p->pxl);
2185 
2186 	/* find and remove this px, and update cb list */
2187 	mutex_enter(&cb_p->cb_mutex);
2188 
2189 	pxl = cb_p->pxl;
2190 	if (pxl->pxp == px_p) {
2191 		cb_p->pxl = pxl->next;
2192 	} else {
2193 		prev = pxl;
2194 		pxl = pxl->next;
2195 		for (; pxl && (pxl->pxp != px_p); prev = pxl, pxl = pxl->next);
2196 		if (!pxl) {
2197 			cmn_err(CE_WARN, "px_cb_rem_intr: can't find px_p 0x%p "
2198 			    "in registered CB list.", (void *)px_p);
2199 			mutex_exit(&cb_p->cb_mutex);
2200 			return;
2201 		}
2202 		prev->next = pxl->next;
2203 	}
2204 	pxu_p->px_cb_p = NULL;
2205 	cb_p->attachcnt--;
2206 	kmem_free(pxl, sizeof (px_cb_list_t));
2207 	mutex_exit(&cb_p->cb_mutex);
2208 
2209 	/* disable cb hw interrupt */
2210 	if (fault_p->px_fh_sysino == cb_p->sysino)
2211 		px_ib_intr_disable(px_p->px_ib_p, fault_p->px_intr_ino,
2212 		    IB_INTR_WAIT);
2213 
2214 	/* if last px, remove from interrupt redistribution list */
2215 	if (cb_p->pxl == NULL)
2216 		intr_dist_rem(px_cb_intr_redist, cb_p);
2217 
2218 	/* de-register interrupt */
2219 	VERIFY(rem_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL) == 0);
2220 
2221 	/* if not last px, assign next px to manage cb */
2222 	mutex_enter(&cb_p->cb_mutex);
2223 	if (cb_p->pxl) {
2224 		if (fault_p->px_fh_sysino == cb_p->sysino) {
2225 			pxp = cb_p->pxl->pxp;
2226 			f_p = &pxp->px_cb_fault;
2227 			cb_p->sysino = f_p->px_fh_sysino;
2228 
2229 			PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid);
2230 			(void) px_lib_intr_setstate(pxp->px_dip, cb_p->sysino,
2231 			    INTR_IDLE_STATE);
2232 		}
2233 		mutex_exit(&cb_p->cb_mutex);
2234 		return;
2235 	}
2236 
2237 	/* clean up after the last px */
2238 	mutex_exit(&cb_p->cb_mutex);
2239 
2240 	/* px_lib_dev_init allows only FIRE and OBERON */
2241 	px_err_reg_disable(
2242 	    (pxu_p->chip_type == PX_CHIP_FIRE) ? PX_ERR_JBC : PX_ERR_UBC,
2243 	    pxu_p->px_address[PX_REG_XBC]);
2244 
2245 	mutex_destroy(&cb_p->cb_mutex);
2246 	px_set_cb(fault_p->px_fh_dip, 0ull);
2247 	kmem_free(cb_p, sizeof (px_cb_t));
2248 }
2249 
2250 /*
2251  * px_cb_intr() - sun4u only,  CB interrupt dispatcher
2252  */
2253 uint_t
2254 px_cb_intr(caddr_t arg)
2255 {
2256 	px_cb_t		*cb_p = (px_cb_t *)arg;
2257 	px_t		*pxp;
2258 	px_fault_t	*f_p;
2259 	int		ret;
2260 
2261 	mutex_enter(&cb_p->cb_mutex);
2262 
2263 	if (!cb_p->pxl) {
2264 		mutex_exit(&cb_p->cb_mutex);
2265 		return (DDI_INTR_UNCLAIMED);
2266 	}
2267 
2268 	pxp = cb_p->pxl->pxp;
2269 	f_p = &pxp->px_cb_fault;
2270 
2271 	ret = f_p->px_err_func((caddr_t)f_p);
2272 
2273 	mutex_exit(&cb_p->cb_mutex);
2274 	return (ret);
2275 }
2276 
2277 #ifdef	FMA
2278 void
2279 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status)
2280 {
2281 	/* populate the rc_status by reading the registers - TBD */
2282 }
2283 #endif /* FMA */
2284 
2285 /*
2286  * Unprotected raw reads/writes of fabric device's config space.
2287  * Only used for temporary PCI-E Fabric Error Handling.
2288  */
2289 uint32_t
2290 px_fab_get(px_t *px_p, pcie_req_id_t bdf, uint16_t offset)
2291 {
2292 	px_ranges_t	*rp = px_p->px_ranges_p;
2293 	uint64_t	range_prop, base_addr;
2294 	int		bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG);
2295 	uint32_t	val;
2296 
2297 	/* Get Fire's Physical Base Address */
2298 	range_prop = px_get_range_prop(px_p, rp, bank);
2299 
2300 	/* Get config space first. */
2301 	base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset);
2302 
2303 	val = ldphysio(base_addr);
2304 
2305 	return (LE_32(val));
2306 }
2307 
2308 void
2309 px_fab_set(px_t *px_p, pcie_req_id_t bdf, uint16_t offset,
2310     uint32_t val) {
2311 	px_ranges_t	*rp = px_p->px_ranges_p;
2312 	uint64_t	range_prop, base_addr;
2313 	int		bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG);
2314 
2315 	/* Get Fire's Physical Base Address */
2316 	range_prop = px_get_range_prop(px_p, rp, bank);
2317 
2318 	/* Get config space first. */
2319 	base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset);
2320 
2321 	stphysio(base_addr, LE_32(val));
2322 }
2323 
2324 /*
2325  * cpr callback
2326  *
2327  * disable fabric error msg interrupt prior to suspending
2328  * all device drivers; re-enable fabric error msg interrupt
2329  * after all devices are resumed.
2330  */
2331 static boolean_t
2332 px_cpr_callb(void *arg, int code)
2333 {
2334 	px_t		*px_p = (px_t *)arg;
2335 	px_ib_t		*ib_p = px_p->px_ib_p;
2336 	px_pec_t	*pec_p = px_p->px_pec_p;
2337 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
2338 	caddr_t		csr_base;
2339 	devino_t	ce_ino, nf_ino, f_ino;
2340 	px_ino_t	*ce_ino_p, *nf_ino_p, *f_ino_p;
2341 	uint64_t	imu_log_enable, imu_intr_enable;
2342 	uint64_t	imu_log_mask, imu_intr_mask;
2343 
2344 	ce_ino = px_msiqid_to_devino(px_p, pec_p->pec_corr_msg_msiq_id);
2345 	nf_ino = px_msiqid_to_devino(px_p, pec_p->pec_non_fatal_msg_msiq_id);
2346 	f_ino = px_msiqid_to_devino(px_p, pec_p->pec_fatal_msg_msiq_id);
2347 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2348 
2349 	imu_log_enable = CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE);
2350 	imu_intr_enable = CSR_XR(csr_base, IMU_INTERRUPT_ENABLE);
2351 
2352 	imu_log_mask = BITMASK(IMU_ERROR_LOG_ENABLE_FATAL_MES_NOT_EN_LOG_EN) |
2353 	    BITMASK(IMU_ERROR_LOG_ENABLE_NONFATAL_MES_NOT_EN_LOG_EN) |
2354 	    BITMASK(IMU_ERROR_LOG_ENABLE_COR_MES_NOT_EN_LOG_EN);
2355 
2356 	imu_intr_mask =
2357 	    BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_S_INT_EN) |
2358 	    BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_S_INT_EN) |
2359 	    BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_S_INT_EN) |
2360 	    BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_P_INT_EN) |
2361 	    BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_P_INT_EN) |
2362 	    BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_P_INT_EN);
2363 
2364 	switch (code) {
2365 	case CB_CODE_CPR_CHKPT:
2366 		/* disable imu rbne on corr/nonfatal/fatal errors */
2367 		CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE,
2368 		    imu_log_enable & (~imu_log_mask));
2369 
2370 		CSR_XS(csr_base, IMU_INTERRUPT_ENABLE,
2371 		    imu_intr_enable & (~imu_intr_mask));
2372 
2373 		/* disable CORR intr mapping */
2374 		px_ib_intr_disable(ib_p, ce_ino, IB_INTR_NOWAIT);
2375 
2376 		/* disable NON FATAL intr mapping */
2377 		px_ib_intr_disable(ib_p, nf_ino, IB_INTR_NOWAIT);
2378 
2379 		/* disable FATAL intr mapping */
2380 		px_ib_intr_disable(ib_p, f_ino, IB_INTR_NOWAIT);
2381 
2382 		break;
2383 
2384 	case CB_CODE_CPR_RESUME:
2385 		pxu_p->cpr_flag = PX_NOT_CPR;
2386 		mutex_enter(&ib_p->ib_ino_lst_mutex);
2387 
2388 		ce_ino_p = px_ib_locate_ino(ib_p, ce_ino);
2389 		nf_ino_p = px_ib_locate_ino(ib_p, nf_ino);
2390 		f_ino_p = px_ib_locate_ino(ib_p, f_ino);
2391 
2392 		/* enable CORR intr mapping */
2393 		if (ce_ino_p)
2394 			px_ib_intr_enable(px_p, ce_ino_p->ino_cpuid, ce_ino);
2395 		else
2396 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2397 			    "reenable PCIe Correctable msg intr.\n");
2398 
2399 		/* enable NON FATAL intr mapping */
2400 		if (nf_ino_p)
2401 			px_ib_intr_enable(px_p, nf_ino_p->ino_cpuid, nf_ino);
2402 		else
2403 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2404 			    "reenable PCIe Non Fatal msg intr.\n");
2405 
2406 		/* enable FATAL intr mapping */
2407 		if (f_ino_p)
2408 			px_ib_intr_enable(px_p, f_ino_p->ino_cpuid, f_ino);
2409 		else
2410 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2411 			    "reenable PCIe Fatal msg intr.\n");
2412 
2413 		mutex_exit(&ib_p->ib_ino_lst_mutex);
2414 
2415 		/* enable corr/nonfatal/fatal not enable error */
2416 		CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, (imu_log_enable |
2417 		    (imu_log_mask & px_imu_log_mask)));
2418 		CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, (imu_intr_enable |
2419 		    (imu_intr_mask & px_imu_intr_mask)));
2420 
2421 		break;
2422 	}
2423 
2424 	return (B_TRUE);
2425 }
2426 
2427 uint64_t
2428 px_get_rng_parent_hi_mask(px_t *px_p)
2429 {
2430 	pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
2431 	uint64_t mask;
2432 
2433 	switch (PX_CHIP_TYPE(pxu_p)) {
2434 	case PX_CHIP_OBERON:
2435 		mask = OBERON_RANGE_PROP_MASK;
2436 		break;
2437 	case PX_CHIP_FIRE:
2438 		mask = PX_RANGE_PROP_MASK;
2439 		break;
2440 	default:
2441 		mask = PX_RANGE_PROP_MASK;
2442 	}
2443 
2444 	return (mask);
2445 }
2446 
2447 /*
2448  * fetch chip's range propery's value
2449  */
2450 uint64_t
2451 px_get_range_prop(px_t *px_p, px_ranges_t *rp, int bank)
2452 {
2453 	uint64_t mask, range_prop;
2454 
2455 	mask = px_get_rng_parent_hi_mask(px_p);
2456 	range_prop = (((uint64_t)(rp[bank].parent_high & mask)) << 32) |
2457 		rp[bank].parent_low;
2458 
2459 	return (range_prop);
2460 }
2461 
2462 /*
2463  * add cpr callback
2464  */
2465 void
2466 px_cpr_add_callb(px_t *px_p)
2467 {
2468 	px_p->px_cprcb_id = callb_add(px_cpr_callb, (void *)px_p,
2469 	CB_CL_CPR_POST_USER, "px_cpr");
2470 }
2471 
2472 /*
2473  * remove cpr callback
2474  */
2475 void
2476 px_cpr_rem_callb(px_t *px_p)
2477 {
2478 	(void) callb_delete(px_p->px_cprcb_id);
2479 }
2480 
2481 /*ARGSUSED*/
2482 static uint_t
2483 px_hp_intr(caddr_t arg1, caddr_t arg2)
2484 {
2485 	px_t *px_p = (px_t *)arg1;
2486 	int rval;
2487 
2488 	rval = pciehpc_intr(px_p->px_dip);
2489 
2490 #ifdef  DEBUG
2491 	if (rval == DDI_INTR_UNCLAIMED)
2492 	    cmn_err(CE_WARN, "%s%d: UNCLAIMED intr\n",
2493 		ddi_driver_name(px_p->px_dip),
2494 		ddi_get_instance(px_p->px_dip));
2495 #endif
2496 
2497 	return (rval);
2498 }
2499 
2500 int
2501 px_lib_hotplug_init(dev_info_t *dip, void *arg)
2502 {
2503 	px_t	*px_p = DIP_TO_STATE(dip);
2504 	uint64_t ret;
2505 
2506 	if ((ret = hvio_hotplug_init(dip, arg)) == DDI_SUCCESS) {
2507 		sysino_t sysino;
2508 
2509 		if (px_lib_intr_devino_to_sysino(px_p->px_dip,
2510 		    px_p->px_inos[PX_INTR_HOTPLUG], &sysino) !=
2511 		    DDI_SUCCESS) {
2512 #ifdef	DEBUG
2513 			cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n",
2514 			    ddi_driver_name(px_p->px_dip),
2515 			    ddi_get_instance(px_p->px_dip));
2516 #endif
2517 			return (DDI_FAILURE);
2518 		}
2519 
2520 		VERIFY(add_ivintr(sysino, PX_PCIEHP_PIL,
2521 		    (intrfunc)px_hp_intr, (caddr_t)px_p, NULL, NULL) == 0);
2522 	}
2523 
2524 	return (ret);
2525 }
2526 
2527 void
2528 px_lib_hotplug_uninit(dev_info_t *dip)
2529 {
2530 	if (hvio_hotplug_uninit(dip) == DDI_SUCCESS) {
2531 		px_t	*px_p = DIP_TO_STATE(dip);
2532 		sysino_t sysino;
2533 
2534 		if (px_lib_intr_devino_to_sysino(px_p->px_dip,
2535 		    px_p->px_inos[PX_INTR_HOTPLUG], &sysino) !=
2536 		    DDI_SUCCESS) {
2537 #ifdef	DEBUG
2538 			cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n",
2539 			    ddi_driver_name(px_p->px_dip),
2540 			    ddi_get_instance(px_p->px_dip));
2541 #endif
2542 			return;
2543 		}
2544 
2545 		VERIFY(rem_ivintr(sysino, PX_PCIEHP_PIL) == 0);
2546 	}
2547 }
2548 
2549 boolean_t
2550 px_lib_is_in_drain_state(px_t *px_p)
2551 {
2552 	pxu_t 	*pxu_p = (pxu_t *)px_p->px_plat_p;
2553 	caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2554 	uint64_t drain_status;
2555 
2556 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
2557 		drain_status = CSR_BR(csr_base, DRAIN_CONTROL_STATUS, DRAIN);
2558 	} else {
2559 		drain_status = CSR_BR(csr_base, TLU_STATUS, DRAIN);
2560 	}
2561 
2562 	return (drain_status);
2563 }
2564 
2565 pcie_req_id_t
2566 px_lib_get_bdf(px_t *px_p)
2567 {
2568 	pxu_t 	*pxu_p = (pxu_t *)px_p->px_plat_p;
2569 	caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2570 	pcie_req_id_t bdf;
2571 
2572 	bdf = CSR_BR(csr_base, DMC_PCI_EXPRESS_CONFIGURATION, REQ_ID);
2573 
2574 	return (bdf);
2575 }
2576