xref: /illumos-gate/usr/src/uts/sun4u/io/px/px_lib4u.c (revision 753d2d2e8e7fd0c9bcf736d9bf2f2faf4d6234cc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/kmem.h>
30 #include <sys/conf.h>
31 #include <sys/ddi.h>
32 #include <sys/sunddi.h>
33 #include <sys/fm/protocol.h>
34 #include <sys/fm/util.h>
35 #include <sys/modctl.h>
36 #include <sys/disp.h>
37 #include <sys/stat.h>
38 #include <sys/ddi_impldefs.h>
39 #include <sys/vmem.h>
40 #include <sys/iommutsb.h>
41 #include <sys/cpuvar.h>
42 #include <sys/ivintr.h>
43 #include <sys/byteorder.h>
44 #include <sys/hotplug/pci/pciehpc.h>
45 #include <px_obj.h>
46 #include <pcie_pwr.h>
47 #include "px_tools_var.h"
48 #include <px_regs.h>
49 #include <px_csr.h>
50 #include <sys/machsystm.h>
51 #include "px_lib4u.h"
52 #include "px_err.h"
53 #include "oberon_regs.h"
54 
55 #pragma weak jbus_stst_order
56 
57 extern void jbus_stst_order();
58 
59 ulong_t px_mmu_dvma_end = 0xfffffffful;
60 uint_t px_ranges_phi_mask = 0xfffffffful;
61 uint64_t *px_oberon_ubc_scratch_regs;
62 uint64_t px_paddr_mask;
63 
64 static int px_goto_l23ready(px_t *px_p);
65 static int px_goto_l0(px_t *px_p);
66 static int px_pre_pwron_check(px_t *px_p);
67 static uint32_t px_identity_init(px_t *px_p);
68 static boolean_t px_cpr_callb(void *arg, int code);
69 static uint_t px_cb_intr(caddr_t arg);
70 
71 /*
72  * px_lib_map_registers
73  *
74  * This function is called from the attach routine to map the registers
75  * accessed by this driver.
76  *
77  * used by: px_attach()
78  *
79  * return value: DDI_FAILURE on failure
80  */
81 int
82 px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip)
83 {
84 	ddi_device_acc_attr_t	attr;
85 	px_reg_bank_t		reg_bank = PX_REG_CSR;
86 
87 	DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n",
88 		pxu_p, dip);
89 
90 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
91 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
92 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
93 
94 	/*
95 	 * PCI CSR Base
96 	 */
97 	if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
98 	    0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
99 		goto fail;
100 	}
101 
102 	reg_bank++;
103 
104 	/*
105 	 * XBUS CSR Base
106 	 */
107 	if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
108 	    0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
109 		goto fail;
110 	}
111 
112 	pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS;
113 
114 done:
115 	for (; reg_bank >= PX_REG_CSR; reg_bank--) {
116 		DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n",
117 		    reg_bank, pxu_p->px_address[reg_bank]);
118 	}
119 
120 	return (DDI_SUCCESS);
121 
122 fail:
123 	cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n",
124 	    ddi_driver_name(dip), ddi_get_instance(dip), reg_bank);
125 
126 	for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) {
127 		pxu_p->px_address[reg_bank] = NULL;
128 		ddi_regs_map_free(&pxu_p->px_ac[reg_bank]);
129 	}
130 
131 	return (DDI_FAILURE);
132 }
133 
134 /*
135  * px_lib_unmap_regs:
136  *
137  * This routine unmaps the registers mapped by map_px_registers.
138  *
139  * used by: px_detach(), and error conditions in px_attach()
140  *
141  * return value: none
142  */
143 void
144 px_lib_unmap_regs(pxu_t *pxu_p)
145 {
146 	int i;
147 
148 	for (i = 0; i < PX_REG_MAX; i++) {
149 		if (pxu_p->px_ac[i])
150 			ddi_regs_map_free(&pxu_p->px_ac[i]);
151 	}
152 }
153 
154 int
155 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl)
156 {
157 
158 	caddr_t			xbc_csr_base, csr_base;
159 	px_dvma_range_prop_t	px_dvma_range;
160 	pxu_t			*pxu_p;
161 	uint8_t			chip_mask;
162 	px_t			*px_p = DIP_TO_STATE(dip);
163 	px_chip_type_t		chip_type = px_identity_init(px_p);
164 
165 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p", dip);
166 
167 	if (chip_type == PX_CHIP_UNIDENTIFIED) {
168 		cmn_err(CE_WARN, "%s%d: Unrecognized Hardware Version\n",
169 		    NAMEINST(dip));
170 		return (DDI_FAILURE);
171 	}
172 
173 	chip_mask = BITMASK(chip_type);
174 	px_paddr_mask = (chip_type == PX_CHIP_FIRE) ? MMU_FIRE_PADDR_MASK :
175 	    MMU_OBERON_PADDR_MASK;
176 
177 	/*
178 	 * Allocate platform specific structure and link it to
179 	 * the px state structure.
180 	 */
181 	pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP);
182 	pxu_p->chip_type = chip_type;
183 	pxu_p->portid  = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
184 	    "portid", -1);
185 
186 	/* Map in the registers */
187 	if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) {
188 		kmem_free(pxu_p, sizeof (pxu_t));
189 
190 		return (DDI_FAILURE);
191 	}
192 
193 	xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC];
194 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
195 
196 	pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid);
197 	pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie);
198 	pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie);
199 
200 	pxu_p->tsb_paddr = va_to_pa(pxu_p->tsb_vaddr);
201 
202 	/*
203 	 * Create "virtual-dma" property to support child devices
204 	 * needing to know DVMA range.
205 	 */
206 	px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1
207 	    - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT);
208 	px_dvma_range.dvma_len = (uint32_t)
209 	    px_mmu_dvma_end - px_dvma_range.dvma_base + 1;
210 
211 	(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
212 		"virtual-dma", (caddr_t)&px_dvma_range,
213 		sizeof (px_dvma_range_prop_t));
214 	/*
215 	 * Initilize all fire hardware specific blocks.
216 	 */
217 	hvio_cb_init(xbc_csr_base, pxu_p);
218 	hvio_ib_init(csr_base, pxu_p);
219 	hvio_pec_init(csr_base, pxu_p);
220 	hvio_mmu_init(csr_base, pxu_p);
221 
222 	px_p->px_plat_p = (void *)pxu_p;
223 
224 	/*
225 	 * Initialize all the interrupt handlers
226 	 */
227 	switch (PX_CHIP_TYPE(pxu_p)) {
228 	case PX_CHIP_OBERON:
229 		/*
230 		 * Oberon hotplug uses SPARE3 field in ILU Error Log Enable
231 		 * register to indicate the status of leaf reset,
232 		 * we need to preserve the value of this bit, and keep it in
233 		 * px_ilu_log_mask to reflect the state of the bit
234 		 */
235 		if (CSR_BR(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3))
236 			px_ilu_log_mask |= (1ull <<
237 			    ILU_ERROR_LOG_ENABLE_SPARE3);
238 		else
239 			px_ilu_log_mask &= ~(1ull <<
240 			    ILU_ERROR_LOG_ENABLE_SPARE3);
241 
242 		px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_ENABLE);
243 		px_fabric_die_rc_ue |= PCIE_AER_UCE_UC;
244 		break;
245 
246 	case PX_CHIP_FIRE:
247 		px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_ENABLE);
248 		break;
249 
250 	default:
251 		cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n",
252 		    ddi_driver_name(dip), ddi_get_instance(dip));
253 		return (DDI_FAILURE);
254 	}
255 
256 	/* Initilize device handle */
257 	*dev_hdl = (devhandle_t)csr_base;
258 
259 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl);
260 
261 	return (DDI_SUCCESS);
262 }
263 
264 int
265 px_lib_dev_fini(dev_info_t *dip)
266 {
267 	caddr_t			csr_base;
268 	uint8_t			chip_mask;
269 	px_t			*px_p = DIP_TO_STATE(dip);
270 	pxu_t			*pxu_p = (pxu_t *)px_p->px_plat_p;
271 
272 	DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip);
273 
274 	/*
275 	 * Deinitialize all the interrupt handlers
276 	 */
277 	switch (PX_CHIP_TYPE(pxu_p)) {
278 	case PX_CHIP_OBERON:
279 	case PX_CHIP_FIRE:
280 		chip_mask = BITMASK(PX_CHIP_TYPE(pxu_p));
281 		csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
282 		px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_DISABLE);
283 		break;
284 
285 	default:
286 		cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n",
287 		    ddi_driver_name(dip), ddi_get_instance(dip));
288 		return (DDI_FAILURE);
289 	}
290 
291 	iommu_tsb_free(pxu_p->tsb_cookie);
292 
293 	px_lib_unmap_regs((pxu_t *)px_p->px_plat_p);
294 	kmem_free(px_p->px_plat_p, sizeof (pxu_t));
295 	px_p->px_plat_p = NULL;
296 
297 	return (DDI_SUCCESS);
298 }
299 
300 /*ARGSUSED*/
301 int
302 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino,
303     sysino_t *sysino)
304 {
305 	px_t	*px_p = DIP_TO_STATE(dip);
306 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
307 	uint64_t	ret;
308 
309 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p "
310 	    "devino 0x%x\n", dip, devino);
311 
312 	if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip),
313 	    pxu_p, devino, sysino)) != H_EOK) {
314 		DBG(DBG_LIB_INT, dip,
315 		    "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret);
316 		return (DDI_FAILURE);
317 	}
318 
319 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n",
320 	    *sysino);
321 
322 	return (DDI_SUCCESS);
323 }
324 
325 /*ARGSUSED*/
326 int
327 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino,
328     intr_valid_state_t *intr_valid_state)
329 {
330 	uint64_t	ret;
331 
332 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n",
333 	    dip, sysino);
334 
335 	if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip),
336 	    sysino, intr_valid_state)) != H_EOK) {
337 		DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n",
338 		    ret);
339 		return (DDI_FAILURE);
340 	}
341 
342 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n",
343 	    *intr_valid_state);
344 
345 	return (DDI_SUCCESS);
346 }
347 
348 /*ARGSUSED*/
349 int
350 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino,
351     intr_valid_state_t intr_valid_state)
352 {
353 	uint64_t	ret;
354 
355 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx "
356 	    "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state);
357 
358 	if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip),
359 	    sysino, intr_valid_state)) != H_EOK) {
360 		DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n",
361 		    ret);
362 		return (DDI_FAILURE);
363 	}
364 
365 	return (DDI_SUCCESS);
366 }
367 
368 /*ARGSUSED*/
369 int
370 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino,
371     intr_state_t *intr_state)
372 {
373 	uint64_t	ret;
374 
375 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n",
376 	    dip, sysino);
377 
378 	if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip),
379 	    sysino, intr_state)) != H_EOK) {
380 		DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n",
381 		    ret);
382 		return (DDI_FAILURE);
383 	}
384 
385 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n",
386 	    *intr_state);
387 
388 	return (DDI_SUCCESS);
389 }
390 
391 /*ARGSUSED*/
392 int
393 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino,
394     intr_state_t intr_state)
395 {
396 	uint64_t	ret;
397 
398 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx "
399 	    "intr_state 0x%x\n", dip, sysino, intr_state);
400 
401 	if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip),
402 	    sysino, intr_state)) != H_EOK) {
403 		DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n",
404 		    ret);
405 		return (DDI_FAILURE);
406 	}
407 
408 	return (DDI_SUCCESS);
409 }
410 
411 /*ARGSUSED*/
412 int
413 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid)
414 {
415 	px_t		*px_p = DIP_TO_STATE(dip);
416 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
417 	uint64_t	ret;
418 
419 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n",
420 	    dip, sysino);
421 
422 	if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip), pxu_p,
423 	    sysino, cpuid)) != H_EOK) {
424 		DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n",
425 		    ret);
426 		return (DDI_FAILURE);
427 	}
428 
429 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid);
430 
431 	return (DDI_SUCCESS);
432 }
433 
434 /*ARGSUSED*/
435 int
436 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid)
437 {
438 	px_t		*px_p = DIP_TO_STATE(dip);
439 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
440 	uint64_t	ret;
441 
442 	DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx "
443 	    "cpuid 0x%x\n", dip, sysino, cpuid);
444 
445 	if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip), pxu_p,
446 	    sysino, cpuid)) != H_EOK) {
447 		DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n",
448 		    ret);
449 		return (DDI_FAILURE);
450 	}
451 
452 	return (DDI_SUCCESS);
453 }
454 
455 /*ARGSUSED*/
456 int
457 px_lib_intr_reset(dev_info_t *dip)
458 {
459 	devino_t	ino;
460 	sysino_t	sysino;
461 
462 	DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip);
463 
464 	/* Reset all Interrupts */
465 	for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) {
466 		if (px_lib_intr_devino_to_sysino(dip, ino,
467 		    &sysino) != DDI_SUCCESS)
468 			return (BF_FATAL);
469 
470 		if (px_lib_intr_setstate(dip, sysino,
471 		    INTR_IDLE_STATE) != DDI_SUCCESS)
472 			return (BF_FATAL);
473 	}
474 
475 	return (BF_NONE);
476 }
477 
478 /*ARGSUSED*/
479 int
480 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages,
481     io_attributes_t attr, void *addr, size_t pfn_index, int flags)
482 {
483 	px_t		*px_p = DIP_TO_STATE(dip);
484 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
485 	uint64_t	ret;
486 
487 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx "
488 	    "pages 0x%x attr 0x%x addr 0x%p pfn_index 0x%llx flags 0x%x\n",
489 	    dip, tsbid, pages, attr, addr, pfn_index, flags);
490 
491 	if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages,
492 	    attr, addr, pfn_index, flags)) != H_EOK) {
493 		DBG(DBG_LIB_DMA, dip,
494 		    "px_lib_iommu_map failed, ret 0x%lx\n", ret);
495 		return (DDI_FAILURE);
496 	}
497 
498 	return (DDI_SUCCESS);
499 }
500 
501 /*ARGSUSED*/
502 int
503 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages)
504 {
505 	px_t		*px_p = DIP_TO_STATE(dip);
506 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
507 	uint64_t	ret;
508 
509 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx "
510 	    "pages 0x%x\n", dip, tsbid, pages);
511 
512 	if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages))
513 	    != H_EOK) {
514 		DBG(DBG_LIB_DMA, dip,
515 		    "px_lib_iommu_demap failed, ret 0x%lx\n", ret);
516 
517 		return (DDI_FAILURE);
518 	}
519 
520 	return (DDI_SUCCESS);
521 }
522 
523 /*ARGSUSED*/
524 int
525 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p,
526     r_addr_t *r_addr_p)
527 {
528 	px_t	*px_p = DIP_TO_STATE(dip);
529 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
530 	uint64_t	ret;
531 
532 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n",
533 	    dip, tsbid);
534 
535 	if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid,
536 	    attr_p, r_addr_p)) != H_EOK) {
537 		DBG(DBG_LIB_DMA, dip,
538 		    "hvio_iommu_getmap failed, ret 0x%lx\n", ret);
539 
540 		return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE);
541 	}
542 
543 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n",
544 	    *attr_p, *r_addr_p);
545 
546 	return (DDI_SUCCESS);
547 }
548 
549 
550 /*
551  * Checks dma attributes against system bypass ranges
552  * The bypass range is determined by the hardware. Return them so the
553  * common code can do generic checking against them.
554  */
555 /*ARGSUSED*/
556 int
557 px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p,
558     uint64_t *lo_p, uint64_t *hi_p)
559 {
560 	px_t	*px_p = DIP_TO_STATE(dip);
561 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
562 
563 	*lo_p = hvio_get_bypass_base(pxu_p);
564 	*hi_p = hvio_get_bypass_end(pxu_p);
565 
566 	return (DDI_SUCCESS);
567 }
568 
569 
570 /*ARGSUSED*/
571 int
572 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, io_attributes_t attr,
573     io_addr_t *io_addr_p)
574 {
575 	uint64_t	ret;
576 	px_t	*px_p = DIP_TO_STATE(dip);
577 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
578 
579 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx "
580 	    "attr 0x%x\n", dip, ra, attr);
581 
582 	if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), pxu_p, ra,
583 	    attr, io_addr_p)) != H_EOK) {
584 		DBG(DBG_LIB_DMA, dip,
585 		    "hvio_iommu_getbypass failed, ret 0x%lx\n", ret);
586 		return (DDI_FAILURE);
587 	}
588 
589 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n",
590 	    *io_addr_p);
591 
592 	return (DDI_SUCCESS);
593 }
594 
595 /*
596  * bus dma sync entry point.
597  */
598 /*ARGSUSED*/
599 int
600 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
601     off_t off, size_t len, uint_t cache_flags)
602 {
603 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
604 	px_t	*px_p = DIP_TO_STATE(dip);
605 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
606 
607 	DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p "
608 	    "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n",
609 	    dip, rdip, handle, off, len, cache_flags);
610 
611 	/*
612 	 * No flush needed for Oberon
613 	 */
614 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
615 		return (DDI_SUCCESS);
616 
617 	/*
618 	 * jbus_stst_order is found only in certain cpu modules.
619 	 * Just return success if not present.
620 	 */
621 	if (&jbus_stst_order == NULL)
622 		return (DDI_SUCCESS);
623 
624 	if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) {
625 		cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.",
626 		    ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp);
627 
628 		return (DDI_FAILURE);
629 	}
630 
631 	if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC)
632 		return (DDI_SUCCESS);
633 
634 	/*
635 	 * No flush needed when sending data from memory to device.
636 	 * Nothing to do to "sync" memory to what device would already see.
637 	 */
638 	if (!(mp->dmai_rflags & DDI_DMA_READ) ||
639 	    ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV))
640 		return (DDI_SUCCESS);
641 
642 	/*
643 	 * Perform necessary cpu workaround to ensure jbus ordering.
644 	 * CPU's internal "invalidate FIFOs" are flushed.
645 	 */
646 
647 #if !defined(lint)
648 	kpreempt_disable();
649 #endif
650 	jbus_stst_order();
651 #if !defined(lint)
652 	kpreempt_enable();
653 #endif
654 	return (DDI_SUCCESS);
655 }
656 
657 /*
658  * MSIQ Functions:
659  */
660 /*ARGSUSED*/
661 int
662 px_lib_msiq_init(dev_info_t *dip)
663 {
664 	px_t		*px_p = DIP_TO_STATE(dip);
665 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
666 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
667 	px_dvma_addr_t	pg_index;
668 	size_t		size;
669 	int		ret;
670 
671 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip);
672 
673 	/*
674 	 * Map the EQ memory into the Fire MMU (has to be 512KB aligned)
675 	 * and then initialize the base address register.
676 	 *
677 	 * Allocate entries from Fire IOMMU so that the resulting address
678 	 * is properly aligned.  Calculate the index of the first allocated
679 	 * entry.  Note: The size of the mapping is assumed to be a multiple
680 	 * of the page size.
681 	 */
682 	size = msiq_state_p->msiq_cnt *
683 	    msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
684 
685 	pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map,
686 	    size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT);
687 
688 	if (pxu_p->msiq_mapped_p == NULL)
689 		return (DDI_FAILURE);
690 
691 	pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
692 	    MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
693 
694 	if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index),
695 	    MMU_BTOP(size), PCI_MAP_ATTR_WRITE, msiq_state_p->msiq_buf_p,
696 	    0, MMU_MAP_BUF)) != DDI_SUCCESS) {
697 		DBG(DBG_LIB_MSIQ, dip,
698 		    "hvio_msiq_init failed, ret 0x%lx\n", ret);
699 
700 		(void) px_lib_msiq_fini(dip);
701 		return (DDI_FAILURE);
702 	}
703 
704 	(void) hvio_msiq_init(DIP_TO_HANDLE(dip), pxu_p);
705 
706 	return (DDI_SUCCESS);
707 }
708 
709 /*ARGSUSED*/
710 int
711 px_lib_msiq_fini(dev_info_t *dip)
712 {
713 	px_t		*px_p = DIP_TO_STATE(dip);
714 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
715 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
716 	px_dvma_addr_t	pg_index;
717 	size_t		size;
718 
719 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip);
720 
721 	/*
722 	 * Unmap and free the EQ memory that had been mapped
723 	 * into the Fire IOMMU.
724 	 */
725 	size = msiq_state_p->msiq_cnt *
726 	    msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
727 
728 	pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
729 	    MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
730 
731 	(void) px_lib_iommu_demap(px_p->px_dip,
732 	    PCI_TSBID(0, pg_index), MMU_BTOP(size));
733 
734 	/* Free the entries from the Fire MMU */
735 	vmem_xfree(px_p->px_mmu_p->mmu_dvma_map,
736 	    (void *)pxu_p->msiq_mapped_p, size);
737 
738 	return (DDI_SUCCESS);
739 }
740 
741 /*ARGSUSED*/
742 int
743 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p,
744     uint_t *msiq_rec_cnt_p)
745 {
746 	px_t		*px_p = DIP_TO_STATE(dip);
747 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
748 	size_t		msiq_size;
749 
750 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n",
751 	    dip, msiq_id);
752 
753 	msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
754 	ra_p = (r_addr_t *)((caddr_t)msiq_state_p->msiq_buf_p +
755 	    (msiq_id * msiq_size));
756 
757 	*msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt;
758 
759 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n",
760 	    ra_p, *msiq_rec_cnt_p);
761 
762 	return (DDI_SUCCESS);
763 }
764 
765 /*ARGSUSED*/
766 int
767 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id,
768     pci_msiq_valid_state_t *msiq_valid_state)
769 {
770 	uint64_t	ret;
771 
772 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n",
773 	    dip, msiq_id);
774 
775 	if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip),
776 	    msiq_id, msiq_valid_state)) != H_EOK) {
777 		DBG(DBG_LIB_MSIQ, dip,
778 		    "hvio_msiq_getvalid failed, ret 0x%lx\n", ret);
779 		return (DDI_FAILURE);
780 	}
781 
782 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n",
783 	    *msiq_valid_state);
784 
785 	return (DDI_SUCCESS);
786 }
787 
788 /*ARGSUSED*/
789 int
790 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id,
791     pci_msiq_valid_state_t msiq_valid_state)
792 {
793 	uint64_t	ret;
794 
795 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x "
796 	    "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state);
797 
798 	if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip),
799 	    msiq_id, msiq_valid_state)) != H_EOK) {
800 		DBG(DBG_LIB_MSIQ, dip,
801 		    "hvio_msiq_setvalid failed, ret 0x%lx\n", ret);
802 		return (DDI_FAILURE);
803 	}
804 
805 	return (DDI_SUCCESS);
806 }
807 
808 /*ARGSUSED*/
809 int
810 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id,
811     pci_msiq_state_t *msiq_state)
812 {
813 	uint64_t	ret;
814 
815 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n",
816 	    dip, msiq_id);
817 
818 	if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip),
819 	    msiq_id, msiq_state)) != H_EOK) {
820 		DBG(DBG_LIB_MSIQ, dip,
821 		    "hvio_msiq_getstate failed, ret 0x%lx\n", ret);
822 		return (DDI_FAILURE);
823 	}
824 
825 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n",
826 	    *msiq_state);
827 
828 	return (DDI_SUCCESS);
829 }
830 
831 /*ARGSUSED*/
832 int
833 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id,
834     pci_msiq_state_t msiq_state)
835 {
836 	uint64_t	ret;
837 
838 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x "
839 	    "msiq_state 0x%x\n", dip, msiq_id, msiq_state);
840 
841 	if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip),
842 	    msiq_id, msiq_state)) != H_EOK) {
843 		DBG(DBG_LIB_MSIQ, dip,
844 		    "hvio_msiq_setstate failed, ret 0x%lx\n", ret);
845 		return (DDI_FAILURE);
846 	}
847 
848 	return (DDI_SUCCESS);
849 }
850 
851 /*ARGSUSED*/
852 int
853 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id,
854     msiqhead_t *msiq_head)
855 {
856 	uint64_t	ret;
857 
858 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n",
859 	    dip, msiq_id);
860 
861 	if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip),
862 	    msiq_id, msiq_head)) != H_EOK) {
863 		DBG(DBG_LIB_MSIQ, dip,
864 		    "hvio_msiq_gethead failed, ret 0x%lx\n", ret);
865 		return (DDI_FAILURE);
866 	}
867 
868 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n",
869 	    *msiq_head);
870 
871 	return (DDI_SUCCESS);
872 }
873 
874 /*ARGSUSED*/
875 int
876 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id,
877     msiqhead_t msiq_head)
878 {
879 	uint64_t	ret;
880 
881 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x "
882 	    "msiq_head 0x%x\n", dip, msiq_id, msiq_head);
883 
884 	if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip),
885 	    msiq_id, msiq_head)) != H_EOK) {
886 		DBG(DBG_LIB_MSIQ, dip,
887 		    "hvio_msiq_sethead failed, ret 0x%lx\n", ret);
888 		return (DDI_FAILURE);
889 	}
890 
891 	return (DDI_SUCCESS);
892 }
893 
894 /*ARGSUSED*/
895 int
896 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id,
897     msiqtail_t *msiq_tail)
898 {
899 	uint64_t	ret;
900 
901 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n",
902 	    dip, msiq_id);
903 
904 	if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip),
905 	    msiq_id, msiq_tail)) != H_EOK) {
906 		DBG(DBG_LIB_MSIQ, dip,
907 		    "hvio_msiq_gettail failed, ret 0x%lx\n", ret);
908 		return (DDI_FAILURE);
909 	}
910 
911 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n",
912 	    *msiq_tail);
913 
914 	return (DDI_SUCCESS);
915 }
916 
917 /*ARGSUSED*/
918 void
919 px_lib_get_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p,
920     msiq_rec_t *msiq_rec_p)
921 {
922 	eq_rec_t	*eq_rec_p = (eq_rec_t *)msiq_head_p;
923 
924 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n",
925 	    dip, eq_rec_p);
926 
927 	if (!eq_rec_p->eq_rec_fmt_type) {
928 		/* Set msiq_rec_type to zero */
929 		msiq_rec_p->msiq_rec_type = 0;
930 
931 		return;
932 	}
933 
934 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, "
935 	    "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx "
936 	    "eq_rec_len 0x%llx eq_rec_addr0 0x%llx "
937 	    "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx "
938 	    "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid,
939 	    eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len,
940 	    eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1,
941 	    eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1);
942 
943 	/*
944 	 * Only upper 4 bits of eq_rec_fmt_type is used
945 	 * to identify the EQ record type.
946 	 */
947 	switch (eq_rec_p->eq_rec_fmt_type >> 3) {
948 	case EQ_REC_MSI32:
949 		msiq_rec_p->msiq_rec_type = MSI32_REC;
950 
951 		msiq_rec_p->msiq_rec_data.msi.msi_data =
952 		    eq_rec_p->eq_rec_data0;
953 		break;
954 	case EQ_REC_MSI64:
955 		msiq_rec_p->msiq_rec_type = MSI64_REC;
956 
957 		msiq_rec_p->msiq_rec_data.msi.msi_data =
958 		    eq_rec_p->eq_rec_data0;
959 		break;
960 	case EQ_REC_MSG:
961 		msiq_rec_p->msiq_rec_type = MSG_REC;
962 
963 		msiq_rec_p->msiq_rec_data.msg.msg_route =
964 		    eq_rec_p->eq_rec_fmt_type & 7;
965 		msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid;
966 		msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0;
967 		break;
968 	default:
969 		cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: "
970 		    "0x%x is an unknown EQ record type",
971 		    ddi_driver_name(dip), ddi_get_instance(dip),
972 		    (int)eq_rec_p->eq_rec_fmt_type);
973 		break;
974 	}
975 
976 	msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid;
977 	msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) |
978 	    (eq_rec_p->eq_rec_addr0 << 2));
979 
980 	/* Zero out eq_rec_fmt_type field */
981 	eq_rec_p->eq_rec_fmt_type = 0;
982 }
983 
984 /*
985  * MSI Functions:
986  */
987 /*ARGSUSED*/
988 int
989 px_lib_msi_init(dev_info_t *dip)
990 {
991 	px_t		*px_p = DIP_TO_STATE(dip);
992 	px_msi_state_t	*msi_state_p = &px_p->px_ib_p->ib_msi_state;
993 	uint64_t	ret;
994 
995 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip);
996 
997 	if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip),
998 	    msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) {
999 		DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n",
1000 		    ret);
1001 		return (DDI_FAILURE);
1002 	}
1003 
1004 	return (DDI_SUCCESS);
1005 }
1006 
1007 /*ARGSUSED*/
1008 int
1009 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num,
1010     msiqid_t *msiq_id)
1011 {
1012 	uint64_t	ret;
1013 
1014 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n",
1015 	    dip, msi_num);
1016 
1017 	if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip),
1018 	    msi_num, msiq_id)) != H_EOK) {
1019 		DBG(DBG_LIB_MSI, dip,
1020 		    "hvio_msi_getmsiq failed, ret 0x%lx\n", ret);
1021 		return (DDI_FAILURE);
1022 	}
1023 
1024 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n",
1025 	    *msiq_id);
1026 
1027 	return (DDI_SUCCESS);
1028 }
1029 
1030 /*ARGSUSED*/
1031 int
1032 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num,
1033     msiqid_t msiq_id, msi_type_t msitype)
1034 {
1035 	uint64_t	ret;
1036 
1037 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x "
1038 	    "msq_id 0x%x\n", dip, msi_num, msiq_id);
1039 
1040 	if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip),
1041 	    msi_num, msiq_id)) != H_EOK) {
1042 		DBG(DBG_LIB_MSI, dip,
1043 		    "hvio_msi_setmsiq failed, ret 0x%lx\n", ret);
1044 		return (DDI_FAILURE);
1045 	}
1046 
1047 	return (DDI_SUCCESS);
1048 }
1049 
1050 /*ARGSUSED*/
1051 int
1052 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num,
1053     pci_msi_valid_state_t *msi_valid_state)
1054 {
1055 	uint64_t	ret;
1056 
1057 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n",
1058 	    dip, msi_num);
1059 
1060 	if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip),
1061 	    msi_num, msi_valid_state)) != H_EOK) {
1062 		DBG(DBG_LIB_MSI, dip,
1063 		    "hvio_msi_getvalid failed, ret 0x%lx\n", ret);
1064 		return (DDI_FAILURE);
1065 	}
1066 
1067 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n",
1068 	    *msi_valid_state);
1069 
1070 	return (DDI_SUCCESS);
1071 }
1072 
1073 /*ARGSUSED*/
1074 int
1075 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num,
1076     pci_msi_valid_state_t msi_valid_state)
1077 {
1078 	uint64_t	ret;
1079 
1080 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x "
1081 	    "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state);
1082 
1083 	if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip),
1084 	    msi_num, msi_valid_state)) != H_EOK) {
1085 		DBG(DBG_LIB_MSI, dip,
1086 		    "hvio_msi_setvalid failed, ret 0x%lx\n", ret);
1087 		return (DDI_FAILURE);
1088 	}
1089 
1090 	return (DDI_SUCCESS);
1091 }
1092 
1093 /*ARGSUSED*/
1094 int
1095 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num,
1096     pci_msi_state_t *msi_state)
1097 {
1098 	uint64_t	ret;
1099 
1100 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n",
1101 	    dip, msi_num);
1102 
1103 	if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip),
1104 	    msi_num, msi_state)) != H_EOK) {
1105 		DBG(DBG_LIB_MSI, dip,
1106 		    "hvio_msi_getstate failed, ret 0x%lx\n", ret);
1107 		return (DDI_FAILURE);
1108 	}
1109 
1110 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n",
1111 	    *msi_state);
1112 
1113 	return (DDI_SUCCESS);
1114 }
1115 
1116 /*ARGSUSED*/
1117 int
1118 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num,
1119     pci_msi_state_t msi_state)
1120 {
1121 	uint64_t	ret;
1122 
1123 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x "
1124 	    "msi_state 0x%x\n", dip, msi_num, msi_state);
1125 
1126 	if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip),
1127 	    msi_num, msi_state)) != H_EOK) {
1128 		DBG(DBG_LIB_MSI, dip,
1129 		    "hvio_msi_setstate failed, ret 0x%lx\n", ret);
1130 		return (DDI_FAILURE);
1131 	}
1132 
1133 	return (DDI_SUCCESS);
1134 }
1135 
1136 /*
1137  * MSG Functions:
1138  */
1139 /*ARGSUSED*/
1140 int
1141 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
1142     msiqid_t *msiq_id)
1143 {
1144 	uint64_t	ret;
1145 
1146 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n",
1147 	    dip, msg_type);
1148 
1149 	if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip),
1150 	    msg_type, msiq_id)) != H_EOK) {
1151 		DBG(DBG_LIB_MSG, dip,
1152 		    "hvio_msg_getmsiq failed, ret 0x%lx\n", ret);
1153 		return (DDI_FAILURE);
1154 	}
1155 
1156 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n",
1157 	    *msiq_id);
1158 
1159 	return (DDI_SUCCESS);
1160 }
1161 
1162 /*ARGSUSED*/
1163 int
1164 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
1165     msiqid_t msiq_id)
1166 {
1167 	uint64_t	ret;
1168 
1169 	DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x "
1170 	    "msiq_id 0x%x\n", dip, msg_type, msiq_id);
1171 
1172 	if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip),
1173 	    msg_type, msiq_id)) != H_EOK) {
1174 		DBG(DBG_LIB_MSG, dip,
1175 		    "hvio_msg_setmsiq failed, ret 0x%lx\n", ret);
1176 		return (DDI_FAILURE);
1177 	}
1178 
1179 	return (DDI_SUCCESS);
1180 }
1181 
1182 /*ARGSUSED*/
1183 int
1184 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
1185     pcie_msg_valid_state_t *msg_valid_state)
1186 {
1187 	uint64_t	ret;
1188 
1189 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n",
1190 	    dip, msg_type);
1191 
1192 	if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type,
1193 	    msg_valid_state)) != H_EOK) {
1194 		DBG(DBG_LIB_MSG, dip,
1195 		    "hvio_msg_getvalid failed, ret 0x%lx\n", ret);
1196 		return (DDI_FAILURE);
1197 	}
1198 
1199 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n",
1200 	    *msg_valid_state);
1201 
1202 	return (DDI_SUCCESS);
1203 }
1204 
1205 /*ARGSUSED*/
1206 int
1207 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
1208     pcie_msg_valid_state_t msg_valid_state)
1209 {
1210 	uint64_t	ret;
1211 
1212 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x "
1213 	    "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state);
1214 
1215 	if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type,
1216 	    msg_valid_state)) != H_EOK) {
1217 		DBG(DBG_LIB_MSG, dip,
1218 		    "hvio_msg_setvalid failed, ret 0x%lx\n", ret);
1219 		return (DDI_FAILURE);
1220 	}
1221 
1222 	return (DDI_SUCCESS);
1223 }
1224 
1225 /*
1226  * Suspend/Resume Functions:
1227  * Currently unsupported by hypervisor
1228  */
1229 int
1230 px_lib_suspend(dev_info_t *dip)
1231 {
1232 	px_t		*px_p = DIP_TO_STATE(dip);
1233 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1234 	px_cb_t		*cb_p = PX2CB(px_p);
1235 	devhandle_t	dev_hdl, xbus_dev_hdl;
1236 	uint64_t	ret = H_EOK;
1237 
1238 	DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip);
1239 
1240 	dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
1241 	xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
1242 
1243 	if ((ret = hvio_suspend(dev_hdl, pxu_p)) != H_EOK)
1244 		goto fail;
1245 
1246 	if (--cb_p->attachcnt == 0) {
1247 		ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p);
1248 		if (ret != H_EOK)
1249 			cb_p->attachcnt++;
1250 	}
1251 
1252 fail:
1253 	return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS);
1254 }
1255 
1256 void
1257 px_lib_resume(dev_info_t *dip)
1258 {
1259 	px_t		*px_p = DIP_TO_STATE(dip);
1260 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1261 	px_cb_t		*cb_p = PX2CB(px_p);
1262 	devhandle_t	dev_hdl, xbus_dev_hdl;
1263 	devino_t	pec_ino = px_p->px_inos[PX_INTR_PEC];
1264 	devino_t	xbc_ino = px_p->px_inos[PX_INTR_XBC];
1265 
1266 	DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip);
1267 
1268 	dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
1269 	xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
1270 
1271 	if (++cb_p->attachcnt == 1)
1272 		hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p);
1273 
1274 	hvio_resume(dev_hdl, pec_ino, pxu_p);
1275 }
1276 
1277 /*
1278  * Generate a unique Oberon UBC ID based on the Logicial System Board and
1279  * the IO Channel from the portid property field.
1280  */
1281 static uint64_t
1282 oberon_get_ubc_id(dev_info_t *dip)
1283 {
1284 	px_t	*px_p = DIP_TO_STATE(dip);
1285 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
1286 	uint64_t	ubc_id;
1287 
1288 	/*
1289 	 * Generate a unique 6 bit UBC ID using the 2 IO_Channel#[1:0] bits and
1290 	 * the 4 LSB_ID[3:0] bits from the Oberon's portid property.
1291 	 */
1292 	ubc_id = (((pxu_p->portid >> OBERON_PORT_ID_IOC) &
1293 	    OBERON_PORT_ID_IOC_MASK) | (((pxu_p->portid >>
1294 	    OBERON_PORT_ID_LSB) & OBERON_PORT_ID_LSB_MASK)
1295 	    << OBERON_UBC_ID_LSB));
1296 
1297 	return (ubc_id);
1298 }
1299 
1300 /*
1301  * Oberon does not have a UBC scratch register, so alloc an array of scratch
1302  * registers when needed and use a unique UBC ID as an index. This code
1303  * can be simplified if we use a pre-allocated array. They are currently
1304  * being dynamically allocated because it's only needed by the Oberon.
1305  */
1306 static void
1307 oberon_set_cb(dev_info_t *dip, uint64_t val)
1308 {
1309 	uint64_t	ubc_id;
1310 
1311 	if (px_oberon_ubc_scratch_regs == NULL)
1312 		px_oberon_ubc_scratch_regs =
1313 		    (uint64_t *)kmem_zalloc(sizeof (uint64_t)*
1314 		    OBERON_UBC_ID_MAX, KM_SLEEP);
1315 
1316 	ubc_id = oberon_get_ubc_id(dip);
1317 
1318 	px_oberon_ubc_scratch_regs[ubc_id] = val;
1319 
1320 	/*
1321 	 * Check if any scratch registers are still in use. If all scratch
1322 	 * registers are currently set to zero, then deallocate the scratch
1323 	 * register array.
1324 	 */
1325 	for (ubc_id = 0; ubc_id < OBERON_UBC_ID_MAX; ubc_id++) {
1326 		if (px_oberon_ubc_scratch_regs[ubc_id] != NULL)
1327 			return;
1328 	}
1329 
1330 	/*
1331 	 * All scratch registers are set to zero so deallocate the scratch
1332 	 * register array and set the pointer to NULL.
1333 	 */
1334 	kmem_free(px_oberon_ubc_scratch_regs,
1335 	    (sizeof (uint64_t)*OBERON_UBC_ID_MAX));
1336 
1337 	px_oberon_ubc_scratch_regs = NULL;
1338 }
1339 
1340 /*
1341  * Oberon does not have a UBC scratch register, so use an allocated array of
1342  * scratch registers and use the unique UBC ID as an index into that array.
1343  */
1344 static uint64_t
1345 oberon_get_cb(dev_info_t *dip)
1346 {
1347 	uint64_t	ubc_id;
1348 
1349 	if (px_oberon_ubc_scratch_regs == NULL)
1350 		return (0);
1351 
1352 	ubc_id = oberon_get_ubc_id(dip);
1353 
1354 	return (px_oberon_ubc_scratch_regs[ubc_id]);
1355 }
1356 
1357 /*
1358  * Misc Functions:
1359  * Currently unsupported by hypervisor
1360  */
1361 static uint64_t
1362 px_get_cb(dev_info_t *dip)
1363 {
1364 	px_t	*px_p = DIP_TO_STATE(dip);
1365 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
1366 
1367 	/*
1368 	 * Oberon does not currently have Scratchpad registers.
1369 	 */
1370 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
1371 		return (oberon_get_cb(dip));
1372 
1373 	return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1));
1374 }
1375 
1376 static void
1377 px_set_cb(dev_info_t *dip, uint64_t val)
1378 {
1379 	px_t	*px_p = DIP_TO_STATE(dip);
1380 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
1381 
1382 	/*
1383 	 * Oberon does not currently have Scratchpad registers.
1384 	 */
1385 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
1386 		oberon_set_cb(dip, val);
1387 		return;
1388 	}
1389 
1390 	CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val);
1391 }
1392 
1393 /*ARGSUSED*/
1394 int
1395 px_lib_map_vconfig(dev_info_t *dip,
1396 	ddi_map_req_t *mp, pci_config_offset_t off,
1397 		pci_regspec_t *rp, caddr_t *addrp)
1398 {
1399 	/*
1400 	 * No special config space access services in this layer.
1401 	 */
1402 	return (DDI_FAILURE);
1403 }
1404 
1405 void
1406 px_lib_map_attr_check(ddi_map_req_t *mp)
1407 {
1408 	ddi_acc_hdl_t *hp = mp->map_handlep;
1409 
1410 	/* fire does not accept byte masks from PIO store merge */
1411 	if (hp->ah_acc.devacc_attr_dataorder == DDI_STORECACHING_OK_ACC)
1412 		hp->ah_acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1413 }
1414 
1415 void
1416 px_lib_clr_errs(px_t *px_p)
1417 {
1418 	px_pec_t	*pec_p = px_p->px_pec_p;
1419 	dev_info_t	*rpdip = px_p->px_dip;
1420 	int		err = PX_OK, ret;
1421 	int		acctype = pec_p->pec_safeacc_type;
1422 	ddi_fm_error_t	derr;
1423 
1424 	/* Create the derr */
1425 	bzero(&derr, sizeof (ddi_fm_error_t));
1426 	derr.fme_version = DDI_FME_VERSION;
1427 	derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
1428 	derr.fme_flag = acctype;
1429 
1430 	if (acctype == DDI_FM_ERR_EXPECTED) {
1431 		derr.fme_status = DDI_FM_NONFATAL;
1432 		ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr);
1433 	}
1434 
1435 	mutex_enter(&px_p->px_fm_mutex);
1436 
1437 	/* send ereport/handle/clear fire registers */
1438 	err = px_err_handle(px_p, &derr, PX_LIB_CALL, B_TRUE);
1439 
1440 	/* Check all child devices for errors */
1441 	ret = ndi_fm_handler_dispatch(rpdip, NULL, &derr);
1442 
1443 	mutex_exit(&px_p->px_fm_mutex);
1444 
1445 	/*
1446 	 * PX_FATAL_HW indicates a condition recovered from Fatal-Reset,
1447 	 * therefore it does not cause panic.
1448 	 */
1449 	if ((err & (PX_FATAL_GOS | PX_FATAL_SW)) || (ret == DDI_FM_FATAL))
1450 		PX_FM_PANIC("Fatal System Port Error has occurred\n");
1451 }
1452 
1453 #ifdef  DEBUG
1454 int	px_peekfault_cnt = 0;
1455 int	px_pokefault_cnt = 0;
1456 #endif  /* DEBUG */
1457 
1458 /*ARGSUSED*/
1459 static int
1460 px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip,
1461     peekpoke_ctlops_t *in_args)
1462 {
1463 	px_t *px_p = DIP_TO_STATE(dip);
1464 	px_pec_t *pec_p = px_p->px_pec_p;
1465 	int err = DDI_SUCCESS;
1466 	on_trap_data_t otd;
1467 
1468 	mutex_enter(&pec_p->pec_pokefault_mutex);
1469 	pec_p->pec_ontrap_data = &otd;
1470 	pec_p->pec_safeacc_type = DDI_FM_ERR_POKE;
1471 
1472 	/* Set up protected environment. */
1473 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
1474 		uintptr_t tramp = otd.ot_trampoline;
1475 
1476 		otd.ot_trampoline = (uintptr_t)&poke_fault;
1477 		err = do_poke(in_args->size, (void *)in_args->dev_addr,
1478 		    (void *)in_args->host_addr);
1479 		otd.ot_trampoline = tramp;
1480 	} else
1481 		err = DDI_FAILURE;
1482 
1483 	px_lib_clr_errs(px_p);
1484 
1485 	if (otd.ot_trap & OT_DATA_ACCESS)
1486 		err = DDI_FAILURE;
1487 
1488 	/* Take down protected environment. */
1489 	no_trap();
1490 
1491 	pec_p->pec_ontrap_data = NULL;
1492 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1493 	mutex_exit(&pec_p->pec_pokefault_mutex);
1494 
1495 #ifdef  DEBUG
1496 	if (err == DDI_FAILURE)
1497 		px_pokefault_cnt++;
1498 #endif
1499 	return (err);
1500 }
1501 
1502 /*ARGSUSED*/
1503 static int
1504 px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip,
1505     peekpoke_ctlops_t *cautacc_ctlops_arg)
1506 {
1507 	size_t size = cautacc_ctlops_arg->size;
1508 	uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
1509 	uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
1510 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
1511 	size_t repcount = cautacc_ctlops_arg->repcount;
1512 	uint_t flags = cautacc_ctlops_arg->flags;
1513 
1514 	px_t *px_p = DIP_TO_STATE(dip);
1515 	px_pec_t *pec_p = px_p->px_pec_p;
1516 	int err = DDI_SUCCESS;
1517 
1518 	/*
1519 	 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
1520 	 * mutex.
1521 	 */
1522 	i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1523 
1524 	pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
1525 	pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
1526 	hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
1527 
1528 	if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1529 		for (; repcount; repcount--) {
1530 			switch (size) {
1531 
1532 			case sizeof (uint8_t):
1533 				i_ddi_put8(hp, (uint8_t *)dev_addr,
1534 				    *(uint8_t *)host_addr);
1535 				break;
1536 
1537 			case sizeof (uint16_t):
1538 				i_ddi_put16(hp, (uint16_t *)dev_addr,
1539 				    *(uint16_t *)host_addr);
1540 				break;
1541 
1542 			case sizeof (uint32_t):
1543 				i_ddi_put32(hp, (uint32_t *)dev_addr,
1544 				    *(uint32_t *)host_addr);
1545 				break;
1546 
1547 			case sizeof (uint64_t):
1548 				i_ddi_put64(hp, (uint64_t *)dev_addr,
1549 				    *(uint64_t *)host_addr);
1550 				break;
1551 			}
1552 
1553 			host_addr += size;
1554 
1555 			if (flags == DDI_DEV_AUTOINCR)
1556 				dev_addr += size;
1557 
1558 			px_lib_clr_errs(px_p);
1559 
1560 			if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) {
1561 				err = DDI_FAILURE;
1562 #ifdef  DEBUG
1563 				px_pokefault_cnt++;
1564 #endif
1565 				break;
1566 			}
1567 		}
1568 	}
1569 
1570 	i_ddi_notrap((ddi_acc_handle_t)hp);
1571 	pec_p->pec_ontrap_data = NULL;
1572 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1573 	i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1574 	hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
1575 
1576 	return (err);
1577 }
1578 
1579 
1580 int
1581 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip,
1582     peekpoke_ctlops_t *in_args)
1583 {
1584 	return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) :
1585 	    px_lib_do_poke(dip, rdip, in_args));
1586 }
1587 
1588 
1589 /*ARGSUSED*/
1590 static int
1591 px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args)
1592 {
1593 	px_t *px_p = DIP_TO_STATE(dip);
1594 	px_pec_t *pec_p = px_p->px_pec_p;
1595 	int err = DDI_SUCCESS;
1596 	on_trap_data_t otd;
1597 
1598 	mutex_enter(&pec_p->pec_pokefault_mutex);
1599 	pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK;
1600 
1601 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
1602 		uintptr_t tramp = otd.ot_trampoline;
1603 
1604 		otd.ot_trampoline = (uintptr_t)&peek_fault;
1605 		err = do_peek(in_args->size, (void *)in_args->dev_addr,
1606 		    (void *)in_args->host_addr);
1607 		otd.ot_trampoline = tramp;
1608 	} else
1609 		err = DDI_FAILURE;
1610 
1611 	no_trap();
1612 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1613 	mutex_exit(&pec_p->pec_pokefault_mutex);
1614 
1615 #ifdef  DEBUG
1616 	if (err == DDI_FAILURE)
1617 		px_peekfault_cnt++;
1618 #endif
1619 	return (err);
1620 }
1621 
1622 
1623 static int
1624 px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg)
1625 {
1626 	size_t size = cautacc_ctlops_arg->size;
1627 	uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
1628 	uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
1629 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
1630 	size_t repcount = cautacc_ctlops_arg->repcount;
1631 	uint_t flags = cautacc_ctlops_arg->flags;
1632 
1633 	px_t *px_p = DIP_TO_STATE(dip);
1634 	px_pec_t *pec_p = px_p->px_pec_p;
1635 	int err = DDI_SUCCESS;
1636 
1637 	/*
1638 	 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
1639 	 * mutex.
1640 	 */
1641 	i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1642 
1643 	pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
1644 	pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
1645 	hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
1646 
1647 	if (repcount == 1) {
1648 		if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1649 			i_ddi_caut_get(size, (void *)dev_addr,
1650 			    (void *)host_addr);
1651 		} else {
1652 			int i;
1653 			uint8_t *ff_addr = (uint8_t *)host_addr;
1654 			for (i = 0; i < size; i++)
1655 				*ff_addr++ = 0xff;
1656 
1657 			err = DDI_FAILURE;
1658 #ifdef  DEBUG
1659 			px_peekfault_cnt++;
1660 #endif
1661 		}
1662 	} else {
1663 		if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1664 			for (; repcount; repcount--) {
1665 				i_ddi_caut_get(size, (void *)dev_addr,
1666 				    (void *)host_addr);
1667 
1668 				host_addr += size;
1669 
1670 				if (flags == DDI_DEV_AUTOINCR)
1671 					dev_addr += size;
1672 			}
1673 		} else {
1674 			err = DDI_FAILURE;
1675 #ifdef  DEBUG
1676 			px_peekfault_cnt++;
1677 #endif
1678 		}
1679 	}
1680 
1681 	i_ddi_notrap((ddi_acc_handle_t)hp);
1682 	pec_p->pec_ontrap_data = NULL;
1683 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1684 	i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1685 	hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
1686 
1687 	return (err);
1688 }
1689 
1690 /*ARGSUSED*/
1691 int
1692 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip,
1693     peekpoke_ctlops_t *in_args, void *result)
1694 {
1695 	result = (void *)in_args->host_addr;
1696 	return (in_args->handle ? px_lib_do_caut_get(dip, in_args) :
1697 	    px_lib_do_peek(dip, in_args));
1698 }
1699 
1700 /*
1701  * implements PPM interface
1702  */
1703 int
1704 px_lib_pmctl(int cmd, px_t *px_p)
1705 {
1706 	ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ);
1707 	switch (cmd) {
1708 	case PPMREQ_PRE_PWR_OFF:
1709 		/*
1710 		 * Currently there is no device power management for
1711 		 * the root complex (fire). When there is we need to make
1712 		 * sure that it is at full power before trying to send the
1713 		 * PME_Turn_Off message.
1714 		 */
1715 		DBG(DBG_PWR, px_p->px_dip,
1716 		    "ioctl: request to send PME_Turn_Off\n");
1717 		return (px_goto_l23ready(px_p));
1718 
1719 	case PPMREQ_PRE_PWR_ON:
1720 		DBG(DBG_PWR, px_p->px_dip, "ioctl: PRE_PWR_ON request\n");
1721 		return (px_pre_pwron_check(px_p));
1722 
1723 	case PPMREQ_POST_PWR_ON:
1724 		DBG(DBG_PWR, px_p->px_dip, "ioctl: POST_PWR_ON request\n");
1725 		return (px_goto_l0(px_p));
1726 
1727 	default:
1728 		return (DDI_FAILURE);
1729 	}
1730 }
1731 
1732 /*
1733  * sends PME_Turn_Off message to put the link in L2/L3 ready state.
1734  * called by px_ioctl.
1735  * returns DDI_SUCCESS or DDI_FAILURE
1736  * 1. Wait for link to be in L1 state (link status reg)
1737  * 2. write to PME_Turn_off reg to boradcast
1738  * 3. set timeout
1739  * 4. If timeout, return failure.
1740  * 5. If PM_TO_Ack, wait till link is in L2/L3 ready
1741  */
1742 static int
1743 px_goto_l23ready(px_t *px_p)
1744 {
1745 	pcie_pwr_t	*pwr_p;
1746 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1747 	caddr_t	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
1748 	int		ret = DDI_SUCCESS;
1749 	clock_t		end, timeleft;
1750 	int		mutex_held = 1;
1751 
1752 	/* If no PM info, return failure */
1753 	if (!PCIE_PMINFO(px_p->px_dip) ||
1754 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1755 		return (DDI_FAILURE);
1756 
1757 	mutex_enter(&pwr_p->pwr_lock);
1758 	mutex_enter(&px_p->px_l23ready_lock);
1759 	/* Clear the PME_To_ACK receieved flag */
1760 	px_p->px_pm_flags &= ~PX_PMETOACK_RECVD;
1761 	/*
1762 	 * When P25 is the downstream device, after receiving
1763 	 * PME_To_ACK, fire will go to Detect state, which causes
1764 	 * the link down event. Inform FMA that this is expected.
1765 	 * In case of all other cards complaint with the pci express
1766 	 * spec, this will happen when the power is re-applied. FMA
1767 	 * code will clear this flag after one instance of LDN. Since
1768 	 * there will not be a LDN event for the spec compliant cards,
1769 	 * we need to clear the flag after receiving PME_To_ACK.
1770 	 */
1771 	px_p->px_pm_flags |= PX_LDN_EXPECTED;
1772 	if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) {
1773 		ret = DDI_FAILURE;
1774 		goto l23ready_done;
1775 	}
1776 	px_p->px_pm_flags |= PX_PME_TURNOFF_PENDING;
1777 
1778 	end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout);
1779 	while (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1780 		timeleft = cv_timedwait(&px_p->px_l23ready_cv,
1781 		    &px_p->px_l23ready_lock, end);
1782 		/*
1783 		 * if cv_timedwait returns -1, it is either
1784 		 * 1) timed out or
1785 		 * 2) there was a pre-mature wakeup but by the time
1786 		 * cv_timedwait is called again end < lbolt i.e.
1787 		 * end is in the past.
1788 		 * 3) By the time we make first cv_timedwait call,
1789 		 * end < lbolt is true.
1790 		 */
1791 		if (timeleft == -1)
1792 			break;
1793 	}
1794 	if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1795 		/*
1796 		 * Either timedout or interrupt didn't get a
1797 		 * chance to grab the mutex and set the flag.
1798 		 * release the mutex and delay for sometime.
1799 		 * This will 1) give a chance for interrupt to
1800 		 * set the flag 2) creates a delay between two
1801 		 * consequetive requests.
1802 		 */
1803 		mutex_exit(&px_p->px_l23ready_lock);
1804 		delay(drv_usectohz(50 * PX_MSEC_TO_USEC));
1805 		mutex_held = 0;
1806 		if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1807 			ret = DDI_FAILURE;
1808 			DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting"
1809 			    " for PME_TO_ACK\n");
1810 		}
1811 	}
1812 	px_p->px_pm_flags &=
1813 	    ~(PX_PME_TURNOFF_PENDING | PX_PMETOACK_RECVD | PX_LDN_EXPECTED);
1814 
1815 l23ready_done:
1816 	if (mutex_held)
1817 		mutex_exit(&px_p->px_l23ready_lock);
1818 	/*
1819 	 * Wait till link is in L1 idle, if sending PME_Turn_Off
1820 	 * was succesful.
1821 	 */
1822 	if (ret == DDI_SUCCESS) {
1823 		if (px_link_wait4l1idle(csr_base) != DDI_SUCCESS) {
1824 			DBG(DBG_PWR, px_p->px_dip, " Link is not at L1"
1825 			    " even though we received PME_To_ACK.\n");
1826 			/*
1827 			 * Workaround for hardware bug with P25.
1828 			 * Due to a hardware bug with P25, link state
1829 			 * will be Detect state rather than L1 after
1830 			 * link is transitioned to L23Ready state. Since
1831 			 * we don't know whether link is L23ready state
1832 			 * without Fire's state being L1_idle, we delay
1833 			 * here just to make sure that we wait till link
1834 			 * is transitioned to L23Ready state.
1835 			 */
1836 			delay(drv_usectohz(100 * PX_MSEC_TO_USEC));
1837 		}
1838 		pwr_p->pwr_link_lvl = PM_LEVEL_L3;
1839 
1840 	}
1841 	mutex_exit(&pwr_p->pwr_lock);
1842 	return (ret);
1843 }
1844 
1845 /*
1846  * Message interrupt handler intended to be shared for both
1847  * PME and PME_TO_ACK msg handling, currently only handles
1848  * PME_To_ACK message.
1849  */
1850 uint_t
1851 px_pmeq_intr(caddr_t arg)
1852 {
1853 	px_t	*px_p = (px_t *)arg;
1854 
1855 	DBG(DBG_PWR, px_p->px_dip, " PME_To_ACK received \n");
1856 	mutex_enter(&px_p->px_l23ready_lock);
1857 	cv_broadcast(&px_p->px_l23ready_cv);
1858 	if (px_p->px_pm_flags & PX_PME_TURNOFF_PENDING) {
1859 		px_p->px_pm_flags |= PX_PMETOACK_RECVD;
1860 	} else {
1861 		/*
1862 		 * This maybe the second ack received. If so then,
1863 		 * we should be receiving it during wait4L1 stage.
1864 		 */
1865 		px_p->px_pmetoack_ignored++;
1866 	}
1867 	mutex_exit(&px_p->px_l23ready_lock);
1868 	return (DDI_INTR_CLAIMED);
1869 }
1870 
1871 static int
1872 px_pre_pwron_check(px_t *px_p)
1873 {
1874 	pcie_pwr_t	*pwr_p;
1875 
1876 	/* If no PM info, return failure */
1877 	if (!PCIE_PMINFO(px_p->px_dip) ||
1878 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1879 		return (DDI_FAILURE);
1880 
1881 	/*
1882 	 * For the spec compliant downstream cards link down
1883 	 * is expected when the device is powered on.
1884 	 */
1885 	px_p->px_pm_flags |= PX_LDN_EXPECTED;
1886 	return (pwr_p->pwr_link_lvl == PM_LEVEL_L3 ? DDI_SUCCESS : DDI_FAILURE);
1887 }
1888 
1889 static int
1890 px_goto_l0(px_t *px_p)
1891 {
1892 	pcie_pwr_t	*pwr_p;
1893 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1894 	caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
1895 	int		ret = DDI_SUCCESS;
1896 	uint64_t	time_spent = 0;
1897 
1898 	/* If no PM info, return failure */
1899 	if (!PCIE_PMINFO(px_p->px_dip) ||
1900 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1901 		return (DDI_FAILURE);
1902 
1903 	mutex_enter(&pwr_p->pwr_lock);
1904 	/*
1905 	 * The following link retrain activity will cause LDN and LUP event.
1906 	 * Receiving LDN prior to receiving LUP is expected, not an error in
1907 	 * this case.  Receiving LUP indicates link is fully up to support
1908 	 * powering up down stream device, and of course any further LDN and
1909 	 * LUP outside this context will be error.
1910 	 */
1911 	px_p->px_lup_pending = 1;
1912 	if (px_link_retrain(csr_base) != DDI_SUCCESS) {
1913 		ret = DDI_FAILURE;
1914 		goto l0_done;
1915 	}
1916 
1917 	/* LUP event takes the order of 15ms amount of time to occur */
1918 	for (; px_p->px_lup_pending && (time_spent < px_lup_poll_to);
1919 	    time_spent += px_lup_poll_interval)
1920 		drv_usecwait(px_lup_poll_interval);
1921 	if (px_p->px_lup_pending)
1922 		ret = DDI_FAILURE;
1923 l0_done:
1924 	px_enable_detect_quiet(csr_base);
1925 	if (ret == DDI_SUCCESS)
1926 		pwr_p->pwr_link_lvl = PM_LEVEL_L0;
1927 	mutex_exit(&pwr_p->pwr_lock);
1928 	return (ret);
1929 }
1930 
1931 /*
1932  * Extract the drivers binding name to identify which chip we're binding to.
1933  * Whenever a new bus bridge is created, the driver alias entry should be
1934  * added here to identify the device if needed.  If a device isn't added,
1935  * the identity defaults to PX_CHIP_UNIDENTIFIED.
1936  */
1937 static uint32_t
1938 px_identity_init(px_t *px_p)
1939 {
1940 	dev_info_t	*dip = px_p->px_dip;
1941 	char		*name = ddi_binding_name(dip);
1942 	uint32_t	revision = 0;
1943 
1944 	revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1945 	    "module-revision#", 0);
1946 
1947 	/* Check for Fire driver binding name */
1948 	if (strcmp(name, "pciex108e,80f0") == 0) {
1949 		DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: "
1950 		    "(FIRE), module-revision %d\n", NAMEINST(dip),
1951 		    revision);
1952 
1953 		return ((revision >= FIRE_MOD_REV_20) ?
1954 		    PX_CHIP_FIRE : PX_CHIP_UNIDENTIFIED);
1955 	}
1956 
1957 	/* Check for Oberon driver binding name */
1958 	if (strcmp(name, "pciex108e,80f8") == 0) {
1959 		DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: "
1960 		    "(OBERON), module-revision %d\n", NAMEINST(dip),
1961 		    revision);
1962 
1963 		return (PX_CHIP_OBERON);
1964 	}
1965 
1966 	DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n",
1967 	    ddi_driver_name(dip), ddi_get_instance(dip), name, revision);
1968 
1969 	return (PX_CHIP_UNIDENTIFIED);
1970 }
1971 
1972 int
1973 px_err_add_intr(px_fault_t *px_fault_p)
1974 {
1975 	dev_info_t	*dip = px_fault_p->px_fh_dip;
1976 	px_t		*px_p = DIP_TO_STATE(dip);
1977 
1978 	VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL,
1979 		px_fault_p->px_err_func, (caddr_t)px_fault_p, NULL) == 0);
1980 
1981 	px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino);
1982 
1983 	return (DDI_SUCCESS);
1984 }
1985 
1986 void
1987 px_err_rem_intr(px_fault_t *px_fault_p)
1988 {
1989 	dev_info_t	*dip = px_fault_p->px_fh_dip;
1990 	px_t		*px_p = DIP_TO_STATE(dip);
1991 
1992 	px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino,
1993 		IB_INTR_WAIT);
1994 
1995 	rem_ivintr(px_fault_p->px_fh_sysino, NULL);
1996 }
1997 
1998 /*
1999  * px_cb_add_intr() - Called from attach(9E) to create CB if not yet
2000  * created, to add CB interrupt vector always, but enable only once.
2001  */
2002 int
2003 px_cb_add_intr(px_fault_t *fault_p)
2004 {
2005 	px_t		*px_p = DIP_TO_STATE(fault_p->px_fh_dip);
2006 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
2007 	px_cb_t		*cb_p = (px_cb_t *)px_get_cb(fault_p->px_fh_dip);
2008 	px_cb_list_t	*pxl, *pxl_new;
2009 	cpuid_t		cpuid;
2010 
2011 
2012 	if (cb_p == NULL) {
2013 		cb_p = kmem_zalloc(sizeof (px_cb_t), KM_SLEEP);
2014 		mutex_init(&cb_p->cb_mutex, NULL, MUTEX_DRIVER, NULL);
2015 		cb_p->px_cb_func = px_cb_intr;
2016 		pxu_p->px_cb_p = cb_p;
2017 		px_set_cb(fault_p->px_fh_dip, (uint64_t)cb_p);
2018 
2019 		/* px_lib_dev_init allows only FIRE and OBERON */
2020 		px_err_reg_enable(
2021 		    (pxu_p->chip_type == PX_CHIP_FIRE) ?
2022 			PX_ERR_JBC : PX_ERR_UBC,
2023 		    pxu_p->px_address[PX_REG_XBC]);
2024 	} else
2025 		pxu_p->px_cb_p = cb_p;
2026 
2027 	mutex_enter(&cb_p->cb_mutex);
2028 
2029 	VERIFY(add_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL,
2030 	    cb_p->px_cb_func, (caddr_t)cb_p, NULL) == 0);
2031 
2032 	if (cb_p->pxl == NULL) {
2033 
2034 		cpuid = intr_dist_cpuid(),
2035 		px_ib_intr_enable(px_p, cpuid, fault_p->px_intr_ino);
2036 
2037 		pxl = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP);
2038 		pxl->pxp = px_p;
2039 
2040 		cb_p->pxl = pxl;
2041 		cb_p->sysino = fault_p->px_fh_sysino;
2042 		cb_p->cpuid = cpuid;
2043 
2044 	} else {
2045 		/*
2046 		 * Find the last pxl or
2047 		 * stop short at encoutering a redundent, or
2048 		 * both.
2049 		 */
2050 		pxl = cb_p->pxl;
2051 		for (; !(pxl->pxp == px_p) && pxl->next; pxl = pxl->next);
2052 		if (pxl->pxp == px_p) {
2053 			cmn_err(CE_WARN, "px_cb_add_intr: reregister sysino "
2054 			    "%lx by px_p 0x%p\n", cb_p->sysino, (void *)px_p);
2055 			return (DDI_FAILURE);
2056 		}
2057 
2058 		/* add to linked list */
2059 		pxl_new = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP);
2060 		pxl_new->pxp = px_p;
2061 		pxl->next = pxl_new;
2062 	}
2063 	cb_p->attachcnt++;
2064 
2065 	mutex_exit(&cb_p->cb_mutex);
2066 
2067 	return (DDI_SUCCESS);
2068 }
2069 
2070 /*
2071  * px_cb_rem_intr() - Called from detach(9E) to remove its CB
2072  * interrupt vector, to shift proxy to the next available px,
2073  * or disable CB interrupt when itself is the last.
2074  */
2075 void
2076 px_cb_rem_intr(px_fault_t *fault_p)
2077 {
2078 	px_t		*px_p = DIP_TO_STATE(fault_p->px_fh_dip), *pxp;
2079 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
2080 	px_cb_t		*cb_p = PX2CB(px_p);
2081 	px_cb_list_t	*pxl, *prev;
2082 	px_fault_t	*f_p;
2083 
2084 	ASSERT(cb_p->pxl);
2085 
2086 	/* De-list the target px, move the next px up */
2087 
2088 	mutex_enter(&cb_p->cb_mutex);
2089 
2090 	pxl = cb_p->pxl;
2091 	if (pxl->pxp == px_p) {
2092 		cb_p->pxl = pxl->next;
2093 	} else {
2094 		prev = pxl;
2095 		pxl = pxl->next;
2096 		for (; pxl && (pxl->pxp != px_p); prev = pxl, pxl = pxl->next);
2097 		if (!pxl) {
2098 			cmn_err(CE_WARN, "px_cb_rem_intr: can't find px_p 0x%p "
2099 			    "in registered CB list.", (void *)px_p);
2100 			return;
2101 		}
2102 		prev->next = pxl->next;
2103 	}
2104 	kmem_free(pxl, sizeof (px_cb_list_t));
2105 
2106 	if (fault_p->px_fh_sysino == cb_p->sysino) {
2107 		px_ib_intr_disable(px_p->px_ib_p, fault_p->px_intr_ino,
2108 		    IB_INTR_WAIT);
2109 
2110 		if (cb_p->pxl) {
2111 			pxp = cb_p->pxl->pxp;
2112 			f_p = &pxp->px_cb_fault;
2113 			cb_p->sysino = f_p->px_fh_sysino;
2114 
2115 			PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid);
2116 			(void) px_lib_intr_setstate(pxp->px_dip, cb_p->sysino,
2117 			    INTR_IDLE_STATE);
2118 		}
2119 	}
2120 
2121 	rem_ivintr(fault_p->px_fh_sysino, NULL);
2122 	pxu_p->px_cb_p = NULL;
2123 	cb_p->attachcnt--;
2124 	if (cb_p->pxl) {
2125 		mutex_exit(&cb_p->cb_mutex);
2126 		return;
2127 	}
2128 	mutex_exit(&cb_p->cb_mutex);
2129 
2130 	/* px_lib_dev_init allows only FIRE and OBERON */
2131 	px_err_reg_disable(
2132 	    (pxu_p->chip_type == PX_CHIP_FIRE) ? PX_ERR_JBC : PX_ERR_UBC,
2133 	    pxu_p->px_address[PX_REG_XBC]);
2134 
2135 	mutex_destroy(&cb_p->cb_mutex);
2136 	px_set_cb(fault_p->px_fh_dip, 0ull);
2137 	kmem_free(cb_p, sizeof (px_cb_t));
2138 }
2139 
2140 /*
2141  * px_cb_intr() - sun4u only,  CB interrupt dispatcher
2142  */
2143 uint_t
2144 px_cb_intr(caddr_t arg)
2145 {
2146 	px_cb_t		*cb_p = (px_cb_t *)arg;
2147 	px_cb_list_t	*pxl = cb_p->pxl;
2148 	px_t		*pxp = pxl ? pxl->pxp : NULL;
2149 	px_fault_t	*fault_p;
2150 
2151 	while (pxl && pxp && (pxp->px_state != PX_ATTACHED)) {
2152 		pxl = pxl->next;
2153 		pxp = (pxl) ? pxl->pxp : NULL;
2154 	}
2155 
2156 	if (pxp) {
2157 		fault_p = &pxp->px_cb_fault;
2158 		return (fault_p->px_err_func((caddr_t)fault_p));
2159 	} else
2160 		return (DDI_INTR_UNCLAIMED);
2161 }
2162 
2163 /*
2164  * px_cb_intr_redist() - sun4u only, CB interrupt redistribution
2165  */
2166 void
2167 px_cb_intr_redist(px_t	*px_p)
2168 {
2169 	px_fault_t	*f_p = &px_p->px_cb_fault;
2170 	px_cb_t		*cb_p = PX2CB(px_p);
2171 	devino_t	ino = px_p->px_inos[PX_INTR_XBC];
2172 	cpuid_t		cpuid;
2173 
2174 	mutex_enter(&cb_p->cb_mutex);
2175 
2176 	if (cb_p->sysino != f_p->px_fh_sysino) {
2177 		mutex_exit(&cb_p->cb_mutex);
2178 		return;
2179 	}
2180 
2181 	cb_p->cpuid = cpuid = intr_dist_cpuid();
2182 	px_ib_intr_dist_en(px_p->px_dip, cpuid, ino, B_FALSE);
2183 
2184 	mutex_exit(&cb_p->cb_mutex);
2185 }
2186 
2187 #ifdef FMA
2188 void
2189 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status)
2190 {
2191 	/* populate the rc_status by reading the registers - TBD */
2192 }
2193 #endif /* FMA */
2194 
2195 /*
2196  * Unprotected raw reads/writes of fabric device's config space.
2197  * Only used for temporary PCI-E Fabric Error Handling.
2198  */
2199 uint32_t
2200 px_fab_get(px_t *px_p, pcie_req_id_t bdf, uint16_t offset)
2201 {
2202 	px_ranges_t	*rp = px_p->px_ranges_p;
2203 	uint64_t	range_prop, base_addr;
2204 	int		bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG);
2205 	uint32_t	val;
2206 
2207 	/* Get Fire's Physical Base Address */
2208 	range_prop = px_get_range_prop(px_p, rp, bank);
2209 
2210 	/* Get config space first. */
2211 	base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset);
2212 
2213 	val = ldphysio(base_addr);
2214 
2215 	return (LE_32(val));
2216 }
2217 
2218 void
2219 px_fab_set(px_t *px_p, pcie_req_id_t bdf, uint16_t offset,
2220     uint32_t val) {
2221 	px_ranges_t	*rp = px_p->px_ranges_p;
2222 	uint64_t	range_prop, base_addr;
2223 	int		bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG);
2224 
2225 	/* Get Fire's Physical Base Address */
2226 	range_prop = px_get_range_prop(px_p, rp, bank);
2227 
2228 	/* Get config space first. */
2229 	base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset);
2230 
2231 	stphysio(base_addr, LE_32(val));
2232 }
2233 
2234 /*
2235  * cpr callback
2236  *
2237  * disable fabric error msg interrupt prior to suspending
2238  * all device drivers; re-enable fabric error msg interrupt
2239  * after all devices are resumed.
2240  */
2241 static boolean_t
2242 px_cpr_callb(void *arg, int code)
2243 {
2244 	px_t		*px_p = (px_t *)arg;
2245 	px_ib_t		*ib_p = px_p->px_ib_p;
2246 	px_pec_t	*pec_p = px_p->px_pec_p;
2247 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
2248 	caddr_t		csr_base;
2249 	devino_t	ce_ino, nf_ino, f_ino;
2250 	px_ib_ino_info_t	*ce_ino_p, *nf_ino_p, *f_ino_p;
2251 	uint64_t	imu_log_enable, imu_intr_enable;
2252 	uint64_t	imu_log_mask, imu_intr_mask;
2253 
2254 	ce_ino = px_msiqid_to_devino(px_p, pec_p->pec_corr_msg_msiq_id);
2255 	nf_ino = px_msiqid_to_devino(px_p, pec_p->pec_non_fatal_msg_msiq_id);
2256 	f_ino = px_msiqid_to_devino(px_p, pec_p->pec_fatal_msg_msiq_id);
2257 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2258 
2259 	imu_log_enable = CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE);
2260 	imu_intr_enable = CSR_XR(csr_base, IMU_INTERRUPT_ENABLE);
2261 
2262 	imu_log_mask = BITMASK(IMU_ERROR_LOG_ENABLE_FATAL_MES_NOT_EN_LOG_EN) |
2263 	    BITMASK(IMU_ERROR_LOG_ENABLE_NONFATAL_MES_NOT_EN_LOG_EN) |
2264 	    BITMASK(IMU_ERROR_LOG_ENABLE_COR_MES_NOT_EN_LOG_EN);
2265 
2266 	imu_intr_mask =
2267 	    BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_S_INT_EN) |
2268 	    BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_S_INT_EN) |
2269 	    BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_S_INT_EN) |
2270 	    BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_P_INT_EN) |
2271 	    BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_P_INT_EN) |
2272 	    BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_P_INT_EN);
2273 
2274 	switch (code) {
2275 	case CB_CODE_CPR_CHKPT:
2276 		/* disable imu rbne on corr/nonfatal/fatal errors */
2277 		CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE,
2278 		    imu_log_enable & (~imu_log_mask));
2279 
2280 		CSR_XS(csr_base, IMU_INTERRUPT_ENABLE,
2281 		    imu_intr_enable & (~imu_intr_mask));
2282 
2283 		/* disable CORR intr mapping */
2284 		px_ib_intr_disable(ib_p, ce_ino, IB_INTR_NOWAIT);
2285 
2286 		/* disable NON FATAL intr mapping */
2287 		px_ib_intr_disable(ib_p, nf_ino, IB_INTR_NOWAIT);
2288 
2289 		/* disable FATAL intr mapping */
2290 		px_ib_intr_disable(ib_p, f_ino, IB_INTR_NOWAIT);
2291 
2292 		break;
2293 
2294 	case CB_CODE_CPR_RESUME:
2295 		mutex_enter(&ib_p->ib_ino_lst_mutex);
2296 
2297 		ce_ino_p = px_ib_locate_ino(ib_p, ce_ino);
2298 		nf_ino_p = px_ib_locate_ino(ib_p, nf_ino);
2299 		f_ino_p = px_ib_locate_ino(ib_p, f_ino);
2300 
2301 		/* enable CORR intr mapping */
2302 		if (ce_ino_p)
2303 			px_ib_intr_enable(px_p, ce_ino_p->ino_cpuid, ce_ino);
2304 		else
2305 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2306 			    "reenable PCIe Correctable msg intr.\n");
2307 
2308 		/* enable NON FATAL intr mapping */
2309 		if (nf_ino_p)
2310 			px_ib_intr_enable(px_p, nf_ino_p->ino_cpuid, nf_ino);
2311 		else
2312 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2313 			    "reenable PCIe Non Fatal msg intr.\n");
2314 
2315 		/* enable FATAL intr mapping */
2316 		if (f_ino_p)
2317 			px_ib_intr_enable(px_p, f_ino_p->ino_cpuid, f_ino);
2318 		else
2319 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2320 			    "reenable PCIe Fatal msg intr.\n");
2321 
2322 		mutex_exit(&ib_p->ib_ino_lst_mutex);
2323 
2324 		/* enable corr/nonfatal/fatal not enable error */
2325 		CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, (imu_log_enable |
2326 		    (imu_log_mask & px_imu_log_mask)));
2327 		CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, (imu_intr_enable |
2328 		    (imu_intr_mask & px_imu_intr_mask)));
2329 
2330 		break;
2331 	}
2332 
2333 	return (B_TRUE);
2334 }
2335 
2336 uint64_t
2337 px_get_rng_parent_hi_mask(px_t *px_p)
2338 {
2339 	pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
2340 	uint64_t mask;
2341 
2342 	switch (PX_CHIP_TYPE(pxu_p)) {
2343 	case PX_CHIP_OBERON:
2344 		mask = OBERON_RANGE_PROP_MASK;
2345 		break;
2346 	case PX_CHIP_FIRE:
2347 		mask = PX_RANGE_PROP_MASK;
2348 		break;
2349 	default:
2350 		mask = PX_RANGE_PROP_MASK;
2351 	}
2352 
2353 	return (mask);
2354 }
2355 
2356 /*
2357  * fetch chip's range propery's value
2358  */
2359 uint64_t
2360 px_get_range_prop(px_t *px_p, px_ranges_t *rp, int bank)
2361 {
2362 	uint64_t mask, range_prop;
2363 
2364 	mask = px_get_rng_parent_hi_mask(px_p);
2365 	range_prop = (((uint64_t)(rp[bank].parent_high & mask)) << 32) |
2366 		rp[bank].parent_low;
2367 
2368 	return (range_prop);
2369 }
2370 
2371 /*
2372  * add cpr callback
2373  */
2374 void
2375 px_cpr_add_callb(px_t *px_p)
2376 {
2377 	px_p->px_cprcb_id = callb_add(px_cpr_callb, (void *)px_p,
2378 	CB_CL_CPR_POST_USER, "px_cpr");
2379 }
2380 
2381 /*
2382  * remove cpr callback
2383  */
2384 void
2385 px_cpr_rem_callb(px_t *px_p)
2386 {
2387 	(void) callb_delete(px_p->px_cprcb_id);
2388 }
2389 
2390 /*ARGSUSED*/
2391 static uint_t
2392 px_hp_intr(caddr_t arg1, caddr_t arg2)
2393 {
2394 	px_t *px_p = (px_t *)arg1;
2395 	int rval;
2396 
2397 	rval = pciehpc_intr(px_p->px_dip);
2398 
2399 #ifdef  DEBUG
2400 	if (rval == DDI_INTR_UNCLAIMED)
2401 	    cmn_err(CE_WARN, "%s%d: UNCLAIMED intr\n",
2402 		ddi_driver_name(px_p->px_dip),
2403 		ddi_get_instance(px_p->px_dip));
2404 #endif
2405 
2406 	return (rval);
2407 }
2408 
2409 int
2410 px_lib_hotplug_init(dev_info_t *dip, void *arg)
2411 {
2412 	px_t	*px_p = DIP_TO_STATE(dip);
2413 	uint64_t ret;
2414 
2415 	if ((ret = hvio_hotplug_init(dip, arg)) == DDI_SUCCESS) {
2416 		sysino_t sysino;
2417 
2418 		if (px_lib_intr_devino_to_sysino(px_p->px_dip,
2419 		    px_p->px_inos[PX_INTR_HOTPLUG], &sysino) !=
2420 		    DDI_SUCCESS) {
2421 #ifdef	DEBUG
2422 			cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n",
2423 			    ddi_driver_name(px_p->px_dip),
2424 			    ddi_get_instance(px_p->px_dip));
2425 #endif
2426 			return (DDI_FAILURE);
2427 		}
2428 
2429 		VERIFY(add_ivintr(sysino, PX_PCIEHP_PIL,
2430 		    (intrfunc)px_hp_intr, (caddr_t)px_p, NULL) == 0);
2431 	}
2432 
2433 	return (ret);
2434 }
2435 
2436 void
2437 px_lib_hotplug_uninit(dev_info_t *dip)
2438 {
2439 	if (hvio_hotplug_uninit(dip) == DDI_SUCCESS) {
2440 		px_t	*px_p = DIP_TO_STATE(dip);
2441 		sysino_t sysino;
2442 
2443 		if (px_lib_intr_devino_to_sysino(px_p->px_dip,
2444 		    px_p->px_inos[PX_INTR_HOTPLUG], &sysino) !=
2445 		    DDI_SUCCESS) {
2446 #ifdef	DEBUG
2447 			cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n",
2448 			    ddi_driver_name(px_p->px_dip),
2449 			    ddi_get_instance(px_p->px_dip));
2450 #endif
2451 			return;
2452 		}
2453 
2454 		rem_ivintr(sysino, NULL);
2455 	}
2456 }
2457 
2458 boolean_t
2459 px_lib_is_in_drain_state(px_t *px_p)
2460 {
2461 	pxu_t 	*pxu_p = (pxu_t *)px_p->px_plat_p;
2462 	caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2463 	uint64_t drain_status;
2464 
2465 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
2466 		drain_status = CSR_BR(csr_base, DRAIN_CONTROL_STATUS, DRAIN);
2467 	} else {
2468 		drain_status = CSR_BR(csr_base, TLU_STATUS, DRAIN);
2469 	}
2470 
2471 	return (drain_status);
2472 }
2473