xref: /titanic_51/usr/src/uts/sun4u/io/px/px_lib4u.c (revision 45916cd2fec6e79bca5dee0421bd39e3c2910d1e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/kmem.h>
30 #include <sys/conf.h>
31 #include <sys/ddi.h>
32 #include <sys/sunddi.h>
33 #include <sys/fm/protocol.h>
34 #include <sys/fm/util.h>
35 #include <sys/modctl.h>
36 #include <sys/disp.h>
37 #include <sys/stat.h>
38 #include <sys/ddi_impldefs.h>
39 #include <sys/vmem.h>
40 #include <sys/iommutsb.h>
41 #include <sys/cpuvar.h>
42 #include <sys/ivintr.h>
43 #include <sys/byteorder.h>
44 #include <sys/hotplug/pci/pciehpc.h>
45 #include <px_obj.h>
46 #include <pcie_pwr.h>
47 #include <px_regs.h>
48 #include <px_csr.h>
49 #include <sys/machsystm.h>
50 #include "px_lib4u.h"
51 #include "px_err.h"
52 
53 #pragma weak jbus_stst_order
54 
55 extern void jbus_stst_order();
56 
57 ulong_t px_mmu_dvma_end = 0xfffffffful;
58 uint_t px_ranges_phi_mask = 0xfffffffful;
59 
60 static int px_goto_l23ready(px_t *px_p);
61 static int px_goto_l0(px_t *px_p);
62 static int px_pre_pwron_check(px_t *px_p);
63 static uint32_t px_identity_chip(px_t *px_p);
64 static boolean_t px_cpr_callb(void *arg, int code);
65 static uint_t px_cb_intr(caddr_t arg);
66 
67 /*
68  * px_lib_map_registers
69  *
70  * This function is called from the attach routine to map the registers
71  * accessed by this driver.
72  *
73  * used by: px_attach()
74  *
75  * return value: DDI_FAILURE on failure
76  */
77 int
78 px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip)
79 {
80 	ddi_device_acc_attr_t	attr;
81 	px_reg_bank_t		reg_bank = PX_REG_CSR;
82 
83 	DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n",
84 		pxu_p, dip);
85 
86 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
87 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
88 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
89 
90 	/*
91 	 * PCI CSR Base
92 	 */
93 	if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
94 	    0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
95 		goto fail;
96 	}
97 
98 	reg_bank++;
99 
100 	/*
101 	 * XBUS CSR Base
102 	 */
103 	if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
104 	    0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
105 		goto fail;
106 	}
107 
108 	pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS;
109 
110 done:
111 	for (; reg_bank >= PX_REG_CSR; reg_bank--) {
112 		DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n",
113 		    reg_bank, pxu_p->px_address[reg_bank]);
114 	}
115 
116 	return (DDI_SUCCESS);
117 
118 fail:
119 	cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n",
120 	    ddi_driver_name(dip), ddi_get_instance(dip), reg_bank);
121 
122 	for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) {
123 		pxu_p->px_address[reg_bank] = NULL;
124 		ddi_regs_map_free(&pxu_p->px_ac[reg_bank]);
125 	}
126 
127 	return (DDI_FAILURE);
128 }
129 
130 /*
131  * px_lib_unmap_regs:
132  *
133  * This routine unmaps the registers mapped by map_px_registers.
134  *
135  * used by: px_detach(), and error conditions in px_attach()
136  *
137  * return value: none
138  */
139 void
140 px_lib_unmap_regs(pxu_t *pxu_p)
141 {
142 	int i;
143 
144 	for (i = 0; i < PX_REG_MAX; i++) {
145 		if (pxu_p->px_ac[i])
146 			ddi_regs_map_free(&pxu_p->px_ac[i]);
147 	}
148 }
149 
150 int
151 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl)
152 {
153 	px_t		*px_p = DIP_TO_STATE(dip);
154 	caddr_t		xbc_csr_base, csr_base;
155 	px_dvma_range_prop_t	px_dvma_range;
156 	uint32_t	chip_id;
157 	pxu_t		*pxu_p;
158 
159 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p\n", dip);
160 
161 	if ((chip_id = px_identity_chip(px_p)) == PX_CHIP_UNIDENTIFIED)
162 		return (DDI_FAILURE);
163 
164 	switch (chip_id) {
165 	case FIRE_VER_10:
166 		cmn_err(CE_WARN, "FIRE Hardware Version 1.0 is not supported");
167 		return (DDI_FAILURE);
168 	case FIRE_VER_20:
169 		DBG(DBG_ATTACH, dip, "FIRE Hardware Version 2.0\n");
170 		break;
171 	default:
172 		cmn_err(CE_WARN, "%s%d: FIRE Hardware Version Unknown\n",
173 		    ddi_driver_name(dip), ddi_get_instance(dip));
174 		return (DDI_FAILURE);
175 	}
176 
177 	/*
178 	 * Allocate platform specific structure and link it to
179 	 * the px state structure.
180 	 */
181 	pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP);
182 	pxu_p->chip_id = chip_id;
183 	pxu_p->portid  = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
184 	    "portid", -1);
185 
186 	/* Map in the registers */
187 	if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) {
188 		kmem_free(pxu_p, sizeof (pxu_t));
189 
190 		return (DDI_FAILURE);
191 	}
192 
193 	xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC];
194 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
195 
196 	pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid);
197 	pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie);
198 	pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie);
199 
200 	/*
201 	 * Create "virtual-dma" property to support child devices
202 	 * needing to know DVMA range.
203 	 */
204 	px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1
205 	    - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT);
206 	px_dvma_range.dvma_len = (uint32_t)
207 	    px_mmu_dvma_end - px_dvma_range.dvma_base + 1;
208 
209 	(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
210 		"virtual-dma", (caddr_t)&px_dvma_range,
211 		sizeof (px_dvma_range_prop_t));
212 	/*
213 	 * Initilize all fire hardware specific blocks.
214 	 */
215 	hvio_cb_init(xbc_csr_base, pxu_p);
216 	hvio_ib_init(csr_base, pxu_p);
217 	hvio_pec_init(csr_base, pxu_p);
218 	hvio_mmu_init(csr_base, pxu_p);
219 
220 	px_p->px_plat_p = (void *)pxu_p;
221 
222 	/*
223 	 * Initialize all the interrupt handlers
224 	 */
225 	px_err_reg_enable(px_p, PX_ERR_JBC);
226 	px_err_reg_enable(px_p, PX_ERR_MMU);
227 	px_err_reg_enable(px_p, PX_ERR_IMU);
228 	px_err_reg_enable(px_p, PX_ERR_TLU_UE);
229 	px_err_reg_enable(px_p, PX_ERR_TLU_CE);
230 	px_err_reg_enable(px_p, PX_ERR_TLU_OE);
231 	px_err_reg_enable(px_p, PX_ERR_ILU);
232 	px_err_reg_enable(px_p, PX_ERR_LPU_LINK);
233 	px_err_reg_enable(px_p, PX_ERR_LPU_PHY);
234 	px_err_reg_enable(px_p, PX_ERR_LPU_RX);
235 	px_err_reg_enable(px_p, PX_ERR_LPU_TX);
236 	px_err_reg_enable(px_p, PX_ERR_LPU_LTSSM);
237 	px_err_reg_enable(px_p, PX_ERR_LPU_GIGABLZ);
238 
239 	/* Initilize device handle */
240 	*dev_hdl = (devhandle_t)csr_base;
241 
242 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl);
243 
244 	return (DDI_SUCCESS);
245 }
246 
247 int
248 px_lib_dev_fini(dev_info_t *dip)
249 {
250 	px_t	*px_p = DIP_TO_STATE(dip);
251 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
252 
253 	DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip);
254 
255 	/*
256 	 * Deinitialize all the interrupt handlers
257 	 */
258 	px_err_reg_disable(px_p, PX_ERR_JBC);
259 	px_err_reg_disable(px_p, PX_ERR_MMU);
260 	px_err_reg_disable(px_p, PX_ERR_IMU);
261 	px_err_reg_disable(px_p, PX_ERR_TLU_UE);
262 	px_err_reg_disable(px_p, PX_ERR_TLU_CE);
263 	px_err_reg_disable(px_p, PX_ERR_TLU_OE);
264 	px_err_reg_disable(px_p, PX_ERR_ILU);
265 	px_err_reg_disable(px_p, PX_ERR_LPU_LINK);
266 	px_err_reg_disable(px_p, PX_ERR_LPU_PHY);
267 	px_err_reg_disable(px_p, PX_ERR_LPU_RX);
268 	px_err_reg_disable(px_p, PX_ERR_LPU_TX);
269 	px_err_reg_disable(px_p, PX_ERR_LPU_LTSSM);
270 	px_err_reg_disable(px_p, PX_ERR_LPU_GIGABLZ);
271 
272 	iommu_tsb_free(pxu_p->tsb_cookie);
273 
274 	px_lib_unmap_regs((pxu_t *)px_p->px_plat_p);
275 	kmem_free(px_p->px_plat_p, sizeof (pxu_t));
276 	px_p->px_plat_p = NULL;
277 
278 	return (DDI_SUCCESS);
279 }
280 
281 /*ARGSUSED*/
282 int
283 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino,
284     sysino_t *sysino)
285 {
286 	px_t	*px_p = DIP_TO_STATE(dip);
287 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
288 	uint64_t	ret;
289 
290 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p "
291 	    "devino 0x%x\n", dip, devino);
292 
293 	if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip),
294 	    pxu_p, devino, sysino)) != H_EOK) {
295 		DBG(DBG_LIB_INT, dip,
296 		    "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret);
297 		return (DDI_FAILURE);
298 	}
299 
300 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n",
301 	    *sysino);
302 
303 	return (DDI_SUCCESS);
304 }
305 
306 /*ARGSUSED*/
307 int
308 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino,
309     intr_valid_state_t *intr_valid_state)
310 {
311 	uint64_t	ret;
312 
313 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n",
314 	    dip, sysino);
315 
316 	if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip),
317 	    sysino, intr_valid_state)) != H_EOK) {
318 		DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n",
319 		    ret);
320 		return (DDI_FAILURE);
321 	}
322 
323 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n",
324 	    *intr_valid_state);
325 
326 	return (DDI_SUCCESS);
327 }
328 
329 /*ARGSUSED*/
330 int
331 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino,
332     intr_valid_state_t intr_valid_state)
333 {
334 	uint64_t	ret;
335 
336 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx "
337 	    "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state);
338 
339 	if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip),
340 	    sysino, intr_valid_state)) != H_EOK) {
341 		DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n",
342 		    ret);
343 		return (DDI_FAILURE);
344 	}
345 
346 	return (DDI_SUCCESS);
347 }
348 
349 /*ARGSUSED*/
350 int
351 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino,
352     intr_state_t *intr_state)
353 {
354 	uint64_t	ret;
355 
356 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n",
357 	    dip, sysino);
358 
359 	if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip),
360 	    sysino, intr_state)) != H_EOK) {
361 		DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n",
362 		    ret);
363 		return (DDI_FAILURE);
364 	}
365 
366 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n",
367 	    *intr_state);
368 
369 	return (DDI_SUCCESS);
370 }
371 
372 /*ARGSUSED*/
373 int
374 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino,
375     intr_state_t intr_state)
376 {
377 	uint64_t	ret;
378 
379 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx "
380 	    "intr_state 0x%x\n", dip, sysino, intr_state);
381 
382 	if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip),
383 	    sysino, intr_state)) != H_EOK) {
384 		DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n",
385 		    ret);
386 		return (DDI_FAILURE);
387 	}
388 
389 	return (DDI_SUCCESS);
390 }
391 
392 /*ARGSUSED*/
393 int
394 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid)
395 {
396 	uint64_t	ret;
397 
398 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n",
399 	    dip, sysino);
400 
401 	if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip),
402 	    sysino, cpuid)) != H_EOK) {
403 		DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n",
404 		    ret);
405 		return (DDI_FAILURE);
406 	}
407 
408 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid);
409 
410 	return (DDI_SUCCESS);
411 }
412 
413 /*ARGSUSED*/
414 int
415 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid)
416 {
417 	uint64_t	ret;
418 
419 	DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx "
420 	    "cpuid 0x%x\n", dip, sysino, cpuid);
421 
422 	if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip),
423 	    sysino, cpuid)) != H_EOK) {
424 		DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n",
425 		    ret);
426 		return (DDI_FAILURE);
427 	}
428 
429 	return (DDI_SUCCESS);
430 }
431 
432 /*ARGSUSED*/
433 int
434 px_lib_intr_reset(dev_info_t *dip)
435 {
436 	devino_t	ino;
437 	sysino_t	sysino;
438 
439 	DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip);
440 
441 	/* Reset all Interrupts */
442 	for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) {
443 		if (px_lib_intr_devino_to_sysino(dip, ino,
444 		    &sysino) != DDI_SUCCESS)
445 			return (BF_FATAL);
446 
447 		if (px_lib_intr_setstate(dip, sysino,
448 		    INTR_IDLE_STATE) != DDI_SUCCESS)
449 			return (BF_FATAL);
450 	}
451 
452 	return (BF_NONE);
453 }
454 
455 /*ARGSUSED*/
456 int
457 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages,
458     io_attributes_t attr, void *addr, size_t pfn_index, int flags)
459 {
460 	px_t		*px_p = DIP_TO_STATE(dip);
461 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
462 	uint64_t	ret;
463 
464 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx "
465 	    "pages 0x%x attr 0x%x addr 0x%p pfn_index 0x%llx flags 0x%x\n",
466 	    dip, tsbid, pages, attr, addr, pfn_index, flags);
467 
468 	if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages,
469 	    attr, addr, pfn_index, flags)) != H_EOK) {
470 		DBG(DBG_LIB_DMA, dip,
471 		    "px_lib_iommu_map failed, ret 0x%lx\n", ret);
472 		return (DDI_FAILURE);
473 	}
474 
475 	return (DDI_SUCCESS);
476 }
477 
478 /*ARGSUSED*/
479 int
480 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages)
481 {
482 	px_t		*px_p = DIP_TO_STATE(dip);
483 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
484 	uint64_t	ret;
485 
486 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx "
487 	    "pages 0x%x\n", dip, tsbid, pages);
488 
489 	if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages))
490 	    != H_EOK) {
491 		DBG(DBG_LIB_DMA, dip,
492 		    "px_lib_iommu_demap failed, ret 0x%lx\n", ret);
493 
494 		return (DDI_FAILURE);
495 	}
496 
497 	return (DDI_SUCCESS);
498 }
499 
500 /*ARGSUSED*/
501 int
502 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p,
503     r_addr_t *r_addr_p)
504 {
505 	px_t	*px_p = DIP_TO_STATE(dip);
506 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
507 	uint64_t	ret;
508 
509 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n",
510 	    dip, tsbid);
511 
512 	if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid,
513 	    attr_p, r_addr_p)) != H_EOK) {
514 		DBG(DBG_LIB_DMA, dip,
515 		    "hvio_iommu_getmap failed, ret 0x%lx\n", ret);
516 
517 		return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE);
518 	}
519 
520 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n",
521 	    *attr_p, *r_addr_p);
522 
523 	return (DDI_SUCCESS);
524 }
525 
526 
527 /*
528  * Checks dma attributes against system bypass ranges
529  * The bypass range is determined by the hardware. Return them so the
530  * common code can do generic checking against them.
531  */
532 /*ARGSUSED*/
533 int
534 px_lib_dma_bypass_rngchk(ddi_dma_attr_t *attr_p, uint64_t *lo_p, uint64_t *hi_p)
535 {
536 	*lo_p = MMU_BYPASS_BASE;
537 	*hi_p = MMU_BYPASS_END;
538 
539 	return (DDI_SUCCESS);
540 }
541 
542 
543 /*ARGSUSED*/
544 int
545 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, io_attributes_t attr,
546     io_addr_t *io_addr_p)
547 {
548 	uint64_t	ret;
549 
550 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx "
551 	    "attr 0x%x\n", dip, ra, attr);
552 
553 	if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), ra, attr,
554 	    io_addr_p)) != H_EOK) {
555 		DBG(DBG_LIB_DMA, dip,
556 		    "hvio_iommu_getbypass failed, ret 0x%lx\n", ret);
557 		return (DDI_FAILURE);
558 	}
559 
560 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n",
561 	    *io_addr_p);
562 
563 	return (DDI_SUCCESS);
564 }
565 
566 /*
567  * bus dma sync entry point.
568  */
569 /*ARGSUSED*/
570 int
571 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
572     off_t off, size_t len, uint_t cache_flags)
573 {
574 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
575 
576 	DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p "
577 	    "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n",
578 	    dip, rdip, handle, off, len, cache_flags);
579 
580 	/*
581 	 * jbus_stst_order is found only in certain cpu modules.
582 	 * Just return success if not present.
583 	 */
584 	if (&jbus_stst_order == NULL)
585 		return (DDI_SUCCESS);
586 
587 	if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) {
588 		cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.",
589 		    ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp);
590 
591 		return (DDI_FAILURE);
592 	}
593 
594 	if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC)
595 		return (DDI_SUCCESS);
596 
597 	/*
598 	 * No flush needed when sending data from memory to device.
599 	 * Nothing to do to "sync" memory to what device would already see.
600 	 */
601 	if (!(mp->dmai_rflags & DDI_DMA_READ) ||
602 	    ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV))
603 		return (DDI_SUCCESS);
604 
605 	/*
606 	 * Perform necessary cpu workaround to ensure jbus ordering.
607 	 * CPU's internal "invalidate FIFOs" are flushed.
608 	 */
609 
610 #if !defined(lint)
611 	kpreempt_disable();
612 #endif
613 	jbus_stst_order();
614 #if !defined(lint)
615 	kpreempt_enable();
616 #endif
617 	return (DDI_SUCCESS);
618 }
619 
620 /*
621  * MSIQ Functions:
622  */
623 /*ARGSUSED*/
624 int
625 px_lib_msiq_init(dev_info_t *dip)
626 {
627 	px_t		*px_p = DIP_TO_STATE(dip);
628 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
629 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
630 	caddr_t		msiq_addr;
631 	px_dvma_addr_t	pg_index;
632 	size_t		size;
633 	int		ret;
634 
635 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip);
636 
637 	/*
638 	 * Map the EQ memory into the Fire MMU (has to be 512KB aligned)
639 	 * and then initialize the base address register.
640 	 *
641 	 * Allocate entries from Fire IOMMU so that the resulting address
642 	 * is properly aligned.  Calculate the index of the first allocated
643 	 * entry.  Note: The size of the mapping is assumed to be a multiple
644 	 * of the page size.
645 	 */
646 	msiq_addr = (caddr_t)(((uint64_t)msiq_state_p->msiq_buf_p +
647 	    (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT);
648 
649 	size = msiq_state_p->msiq_cnt *
650 	    msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
651 
652 	pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map,
653 	    size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT);
654 
655 	if (pxu_p->msiq_mapped_p == NULL)
656 		return (DDI_FAILURE);
657 
658 	pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
659 	    MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
660 
661 	if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index),
662 	    MMU_BTOP(size), PCI_MAP_ATTR_WRITE, (void *)msiq_addr, 0,
663 	    MMU_MAP_BUF)) != DDI_SUCCESS) {
664 		DBG(DBG_LIB_MSIQ, dip,
665 		    "hvio_msiq_init failed, ret 0x%lx\n", ret);
666 
667 		(void) px_lib_msiq_fini(dip);
668 		return (DDI_FAILURE);
669 	}
670 
671 	(void) hvio_msiq_init(DIP_TO_HANDLE(dip), pxu_p);
672 
673 	return (DDI_SUCCESS);
674 }
675 
676 /*ARGSUSED*/
677 int
678 px_lib_msiq_fini(dev_info_t *dip)
679 {
680 	px_t		*px_p = DIP_TO_STATE(dip);
681 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
682 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
683 	px_dvma_addr_t	pg_index;
684 	size_t		size;
685 
686 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip);
687 
688 	/*
689 	 * Unmap and free the EQ memory that had been mapped
690 	 * into the Fire IOMMU.
691 	 */
692 	size = msiq_state_p->msiq_cnt *
693 	    msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
694 
695 	pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
696 	    MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
697 
698 	(void) px_lib_iommu_demap(px_p->px_dip,
699 	    PCI_TSBID(0, pg_index), MMU_BTOP(size));
700 
701 	/* Free the entries from the Fire MMU */
702 	vmem_xfree(px_p->px_mmu_p->mmu_dvma_map,
703 	    (void *)pxu_p->msiq_mapped_p, size);
704 
705 	return (DDI_SUCCESS);
706 }
707 
708 /*ARGSUSED*/
709 int
710 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p,
711     uint_t *msiq_rec_cnt_p)
712 {
713 	px_t		*px_p = DIP_TO_STATE(dip);
714 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
715 	uint64_t	*msiq_addr;
716 	size_t		msiq_size;
717 
718 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n",
719 	    dip, msiq_id);
720 
721 	msiq_addr = (uint64_t *)(((uint64_t)msiq_state_p->msiq_buf_p +
722 	    (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT);
723 	msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
724 	ra_p = (r_addr_t *)((caddr_t)msiq_addr + (msiq_id * msiq_size));
725 
726 	*msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt;
727 
728 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n",
729 	    ra_p, *msiq_rec_cnt_p);
730 
731 	return (DDI_SUCCESS);
732 }
733 
734 /*ARGSUSED*/
735 int
736 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id,
737     pci_msiq_valid_state_t *msiq_valid_state)
738 {
739 	uint64_t	ret;
740 
741 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n",
742 	    dip, msiq_id);
743 
744 	if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip),
745 	    msiq_id, msiq_valid_state)) != H_EOK) {
746 		DBG(DBG_LIB_MSIQ, dip,
747 		    "hvio_msiq_getvalid failed, ret 0x%lx\n", ret);
748 		return (DDI_FAILURE);
749 	}
750 
751 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n",
752 	    *msiq_valid_state);
753 
754 	return (DDI_SUCCESS);
755 }
756 
757 /*ARGSUSED*/
758 int
759 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id,
760     pci_msiq_valid_state_t msiq_valid_state)
761 {
762 	uint64_t	ret;
763 
764 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x "
765 	    "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state);
766 
767 	if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip),
768 	    msiq_id, msiq_valid_state)) != H_EOK) {
769 		DBG(DBG_LIB_MSIQ, dip,
770 		    "hvio_msiq_setvalid failed, ret 0x%lx\n", ret);
771 		return (DDI_FAILURE);
772 	}
773 
774 	return (DDI_SUCCESS);
775 }
776 
777 /*ARGSUSED*/
778 int
779 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id,
780     pci_msiq_state_t *msiq_state)
781 {
782 	uint64_t	ret;
783 
784 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n",
785 	    dip, msiq_id);
786 
787 	if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip),
788 	    msiq_id, msiq_state)) != H_EOK) {
789 		DBG(DBG_LIB_MSIQ, dip,
790 		    "hvio_msiq_getstate failed, ret 0x%lx\n", ret);
791 		return (DDI_FAILURE);
792 	}
793 
794 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n",
795 	    *msiq_state);
796 
797 	return (DDI_SUCCESS);
798 }
799 
800 /*ARGSUSED*/
801 int
802 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id,
803     pci_msiq_state_t msiq_state)
804 {
805 	uint64_t	ret;
806 
807 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x "
808 	    "msiq_state 0x%x\n", dip, msiq_id, msiq_state);
809 
810 	if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip),
811 	    msiq_id, msiq_state)) != H_EOK) {
812 		DBG(DBG_LIB_MSIQ, dip,
813 		    "hvio_msiq_setstate failed, ret 0x%lx\n", ret);
814 		return (DDI_FAILURE);
815 	}
816 
817 	return (DDI_SUCCESS);
818 }
819 
820 /*ARGSUSED*/
821 int
822 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id,
823     msiqhead_t *msiq_head)
824 {
825 	uint64_t	ret;
826 
827 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n",
828 	    dip, msiq_id);
829 
830 	if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip),
831 	    msiq_id, msiq_head)) != H_EOK) {
832 		DBG(DBG_LIB_MSIQ, dip,
833 		    "hvio_msiq_gethead failed, ret 0x%lx\n", ret);
834 		return (DDI_FAILURE);
835 	}
836 
837 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n",
838 	    *msiq_head);
839 
840 	return (DDI_SUCCESS);
841 }
842 
843 /*ARGSUSED*/
844 int
845 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id,
846     msiqhead_t msiq_head)
847 {
848 	uint64_t	ret;
849 
850 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x "
851 	    "msiq_head 0x%x\n", dip, msiq_id, msiq_head);
852 
853 	if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip),
854 	    msiq_id, msiq_head)) != H_EOK) {
855 		DBG(DBG_LIB_MSIQ, dip,
856 		    "hvio_msiq_sethead failed, ret 0x%lx\n", ret);
857 		return (DDI_FAILURE);
858 	}
859 
860 	return (DDI_SUCCESS);
861 }
862 
863 /*ARGSUSED*/
864 int
865 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id,
866     msiqtail_t *msiq_tail)
867 {
868 	uint64_t	ret;
869 
870 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n",
871 	    dip, msiq_id);
872 
873 	if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip),
874 	    msiq_id, msiq_tail)) != H_EOK) {
875 		DBG(DBG_LIB_MSIQ, dip,
876 		    "hvio_msiq_gettail failed, ret 0x%lx\n", ret);
877 		return (DDI_FAILURE);
878 	}
879 
880 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n",
881 	    *msiq_tail);
882 
883 	return (DDI_SUCCESS);
884 }
885 
886 /*ARGSUSED*/
887 void
888 px_lib_get_msiq_rec(dev_info_t *dip, px_msiq_t *msiq_p, msiq_rec_t *msiq_rec_p)
889 {
890 	eq_rec_t	*eq_rec_p = (eq_rec_t *)msiq_p->msiq_curr;
891 
892 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n",
893 	    dip, eq_rec_p);
894 
895 	if (!eq_rec_p->eq_rec_fmt_type) {
896 		/* Set msiq_rec_type to zero */
897 		msiq_rec_p->msiq_rec_type = 0;
898 
899 		return;
900 	}
901 
902 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, "
903 	    "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx "
904 	    "eq_rec_len 0x%llx eq_rec_addr0 0x%llx "
905 	    "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx "
906 	    "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid,
907 	    eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len,
908 	    eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1,
909 	    eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1);
910 
911 	/*
912 	 * Only upper 4 bits of eq_rec_fmt_type is used
913 	 * to identify the EQ record type.
914 	 */
915 	switch (eq_rec_p->eq_rec_fmt_type >> 3) {
916 	case EQ_REC_MSI32:
917 		msiq_rec_p->msiq_rec_type = MSI32_REC;
918 
919 		msiq_rec_p->msiq_rec_data.msi.msi_data =
920 		    eq_rec_p->eq_rec_data0;
921 		break;
922 	case EQ_REC_MSI64:
923 		msiq_rec_p->msiq_rec_type = MSI64_REC;
924 
925 		msiq_rec_p->msiq_rec_data.msi.msi_data =
926 		    eq_rec_p->eq_rec_data0;
927 		break;
928 	case EQ_REC_MSG:
929 		msiq_rec_p->msiq_rec_type = MSG_REC;
930 
931 		msiq_rec_p->msiq_rec_data.msg.msg_route =
932 		    eq_rec_p->eq_rec_fmt_type & 7;
933 		msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid;
934 		msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0;
935 		break;
936 	default:
937 		cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: "
938 		    "0x%x is an unknown EQ record type",
939 		    ddi_driver_name(dip), ddi_get_instance(dip),
940 		    (int)eq_rec_p->eq_rec_fmt_type);
941 		break;
942 	}
943 
944 	msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid;
945 	msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) |
946 	    (eq_rec_p->eq_rec_addr0 << 2));
947 
948 	/* Zero out eq_rec_fmt_type field */
949 	eq_rec_p->eq_rec_fmt_type = 0;
950 }
951 
952 /*
953  * MSI Functions:
954  */
955 /*ARGSUSED*/
956 int
957 px_lib_msi_init(dev_info_t *dip)
958 {
959 	px_t		*px_p = DIP_TO_STATE(dip);
960 	px_msi_state_t	*msi_state_p = &px_p->px_ib_p->ib_msi_state;
961 	uint64_t	ret;
962 
963 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip);
964 
965 	if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip),
966 	    msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) {
967 		DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n",
968 		    ret);
969 		return (DDI_FAILURE);
970 	}
971 
972 	return (DDI_SUCCESS);
973 }
974 
975 /*ARGSUSED*/
976 int
977 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num,
978     msiqid_t *msiq_id)
979 {
980 	uint64_t	ret;
981 
982 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n",
983 	    dip, msi_num);
984 
985 	if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip),
986 	    msi_num, msiq_id)) != H_EOK) {
987 		DBG(DBG_LIB_MSI, dip,
988 		    "hvio_msi_getmsiq failed, ret 0x%lx\n", ret);
989 		return (DDI_FAILURE);
990 	}
991 
992 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n",
993 	    *msiq_id);
994 
995 	return (DDI_SUCCESS);
996 }
997 
998 /*ARGSUSED*/
999 int
1000 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num,
1001     msiqid_t msiq_id, msi_type_t msitype)
1002 {
1003 	uint64_t	ret;
1004 
1005 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x "
1006 	    "msq_id 0x%x\n", dip, msi_num, msiq_id);
1007 
1008 	if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip),
1009 	    msi_num, msiq_id)) != H_EOK) {
1010 		DBG(DBG_LIB_MSI, dip,
1011 		    "hvio_msi_setmsiq failed, ret 0x%lx\n", ret);
1012 		return (DDI_FAILURE);
1013 	}
1014 
1015 	return (DDI_SUCCESS);
1016 }
1017 
1018 /*ARGSUSED*/
1019 int
1020 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num,
1021     pci_msi_valid_state_t *msi_valid_state)
1022 {
1023 	uint64_t	ret;
1024 
1025 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n",
1026 	    dip, msi_num);
1027 
1028 	if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip),
1029 	    msi_num, msi_valid_state)) != H_EOK) {
1030 		DBG(DBG_LIB_MSI, dip,
1031 		    "hvio_msi_getvalid failed, ret 0x%lx\n", ret);
1032 		return (DDI_FAILURE);
1033 	}
1034 
1035 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n",
1036 	    *msi_valid_state);
1037 
1038 	return (DDI_SUCCESS);
1039 }
1040 
1041 /*ARGSUSED*/
1042 int
1043 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num,
1044     pci_msi_valid_state_t msi_valid_state)
1045 {
1046 	uint64_t	ret;
1047 
1048 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x "
1049 	    "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state);
1050 
1051 	if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip),
1052 	    msi_num, msi_valid_state)) != H_EOK) {
1053 		DBG(DBG_LIB_MSI, dip,
1054 		    "hvio_msi_setvalid failed, ret 0x%lx\n", ret);
1055 		return (DDI_FAILURE);
1056 	}
1057 
1058 	return (DDI_SUCCESS);
1059 }
1060 
1061 /*ARGSUSED*/
1062 int
1063 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num,
1064     pci_msi_state_t *msi_state)
1065 {
1066 	uint64_t	ret;
1067 
1068 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n",
1069 	    dip, msi_num);
1070 
1071 	if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip),
1072 	    msi_num, msi_state)) != H_EOK) {
1073 		DBG(DBG_LIB_MSI, dip,
1074 		    "hvio_msi_getstate failed, ret 0x%lx\n", ret);
1075 		return (DDI_FAILURE);
1076 	}
1077 
1078 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n",
1079 	    *msi_state);
1080 
1081 	return (DDI_SUCCESS);
1082 }
1083 
1084 /*ARGSUSED*/
1085 int
1086 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num,
1087     pci_msi_state_t msi_state)
1088 {
1089 	uint64_t	ret;
1090 
1091 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x "
1092 	    "msi_state 0x%x\n", dip, msi_num, msi_state);
1093 
1094 	if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip),
1095 	    msi_num, msi_state)) != H_EOK) {
1096 		DBG(DBG_LIB_MSI, dip,
1097 		    "hvio_msi_setstate failed, ret 0x%lx\n", ret);
1098 		return (DDI_FAILURE);
1099 	}
1100 
1101 	return (DDI_SUCCESS);
1102 }
1103 
1104 /*
1105  * MSG Functions:
1106  */
1107 /*ARGSUSED*/
1108 int
1109 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
1110     msiqid_t *msiq_id)
1111 {
1112 	uint64_t	ret;
1113 
1114 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n",
1115 	    dip, msg_type);
1116 
1117 	if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip),
1118 	    msg_type, msiq_id)) != H_EOK) {
1119 		DBG(DBG_LIB_MSG, dip,
1120 		    "hvio_msg_getmsiq failed, ret 0x%lx\n", ret);
1121 		return (DDI_FAILURE);
1122 	}
1123 
1124 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n",
1125 	    *msiq_id);
1126 
1127 	return (DDI_SUCCESS);
1128 }
1129 
1130 /*ARGSUSED*/
1131 int
1132 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
1133     msiqid_t msiq_id)
1134 {
1135 	uint64_t	ret;
1136 
1137 	DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x "
1138 	    "msiq_id 0x%x\n", dip, msg_type, msiq_id);
1139 
1140 	if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip),
1141 	    msg_type, msiq_id)) != H_EOK) {
1142 		DBG(DBG_LIB_MSG, dip,
1143 		    "hvio_msg_setmsiq failed, ret 0x%lx\n", ret);
1144 		return (DDI_FAILURE);
1145 	}
1146 
1147 	return (DDI_SUCCESS);
1148 }
1149 
1150 /*ARGSUSED*/
1151 int
1152 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
1153     pcie_msg_valid_state_t *msg_valid_state)
1154 {
1155 	uint64_t	ret;
1156 
1157 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n",
1158 	    dip, msg_type);
1159 
1160 	if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type,
1161 	    msg_valid_state)) != H_EOK) {
1162 		DBG(DBG_LIB_MSG, dip,
1163 		    "hvio_msg_getvalid failed, ret 0x%lx\n", ret);
1164 		return (DDI_FAILURE);
1165 	}
1166 
1167 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n",
1168 	    *msg_valid_state);
1169 
1170 	return (DDI_SUCCESS);
1171 }
1172 
1173 /*ARGSUSED*/
1174 int
1175 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
1176     pcie_msg_valid_state_t msg_valid_state)
1177 {
1178 	uint64_t	ret;
1179 
1180 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x "
1181 	    "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state);
1182 
1183 	if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type,
1184 	    msg_valid_state)) != H_EOK) {
1185 		DBG(DBG_LIB_MSG, dip,
1186 		    "hvio_msg_setvalid failed, ret 0x%lx\n", ret);
1187 		return (DDI_FAILURE);
1188 	}
1189 
1190 	return (DDI_SUCCESS);
1191 }
1192 
1193 /*
1194  * Suspend/Resume Functions:
1195  * Currently unsupported by hypervisor
1196  */
1197 int
1198 px_lib_suspend(dev_info_t *dip)
1199 {
1200 	px_t		*px_p = DIP_TO_STATE(dip);
1201 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1202 	px_cb_t		*cb_p = PX2CB(px_p);
1203 	devhandle_t	dev_hdl, xbus_dev_hdl;
1204 	uint64_t	ret = H_EOK;
1205 
1206 	DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip);
1207 
1208 	dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
1209 	xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
1210 
1211 	if ((ret = hvio_suspend(dev_hdl, pxu_p)) != H_EOK)
1212 		goto fail;
1213 
1214 	if (--cb_p->attachcnt == 0) {
1215 		ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p);
1216 		if (ret != H_EOK)
1217 			cb_p->attachcnt++;
1218 	}
1219 
1220 fail:
1221 	return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS);
1222 }
1223 
1224 void
1225 px_lib_resume(dev_info_t *dip)
1226 {
1227 	px_t		*px_p = DIP_TO_STATE(dip);
1228 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1229 	px_cb_t		*cb_p = PX2CB(px_p);
1230 	devhandle_t	dev_hdl, xbus_dev_hdl;
1231 	devino_t	pec_ino = px_p->px_inos[PX_INTR_PEC];
1232 	devino_t	xbc_ino = px_p->px_inos[PX_INTR_XBC];
1233 
1234 	DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip);
1235 
1236 	dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
1237 	xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
1238 
1239 	if (++cb_p->attachcnt == 1)
1240 		hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p);
1241 
1242 	hvio_resume(dev_hdl, pec_ino, pxu_p);
1243 }
1244 
1245 /*ARGSUSED*/
1246 int
1247 px_lib_map_vconfig(dev_info_t *dip,
1248 	ddi_map_req_t *mp, pci_config_offset_t off,
1249 		pci_regspec_t *rp, caddr_t *addrp)
1250 {
1251 	/*
1252 	 * No special config space access services in this layer.
1253 	 */
1254 	return (DDI_FAILURE);
1255 }
1256 
1257 void
1258 px_lib_map_attr_check(ddi_map_req_t *mp)
1259 {
1260 	ddi_acc_hdl_t *hp = mp->map_handlep;
1261 
1262 	/* fire does not accept byte masks from PIO store merge */
1263 	if (hp->ah_acc.devacc_attr_dataorder == DDI_STORECACHING_OK_ACC)
1264 		hp->ah_acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1265 }
1266 
1267 void
1268 px_lib_clr_errs(px_t *px_p)
1269 {
1270 	px_pec_t	*pec_p = px_p->px_pec_p;
1271 	dev_info_t	*rpdip = px_p->px_dip;
1272 	int		err = PX_OK, ret;
1273 	int		acctype = pec_p->pec_safeacc_type;
1274 	ddi_fm_error_t	derr;
1275 
1276 	/* Create the derr */
1277 	bzero(&derr, sizeof (ddi_fm_error_t));
1278 	derr.fme_version = DDI_FME_VERSION;
1279 	derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
1280 	derr.fme_flag = acctype;
1281 
1282 	if (acctype == DDI_FM_ERR_EXPECTED) {
1283 		derr.fme_status = DDI_FM_NONFATAL;
1284 		ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr);
1285 	}
1286 
1287 	mutex_enter(&px_p->px_fm_mutex);
1288 
1289 	/* send ereport/handle/clear fire registers */
1290 	err = px_err_handle(px_p, &derr, PX_LIB_CALL, B_TRUE);
1291 
1292 	/* Check all child devices for errors */
1293 	ret = ndi_fm_handler_dispatch(rpdip, NULL, &derr);
1294 
1295 	mutex_exit(&px_p->px_fm_mutex);
1296 
1297 	/*
1298 	 * PX_FATAL_HW indicates a condition recovered from Fatal-Reset,
1299 	 * therefore it does not cause panic.
1300 	 */
1301 	if ((err & (PX_FATAL_GOS | PX_FATAL_SW)) || (ret == DDI_FM_FATAL))
1302 		PX_FM_PANIC("Fatal System Port Error has occurred\n");
1303 }
1304 
1305 #ifdef  DEBUG
1306 int	px_peekfault_cnt = 0;
1307 int	px_pokefault_cnt = 0;
1308 #endif  /* DEBUG */
1309 
1310 /*ARGSUSED*/
1311 static int
1312 px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip,
1313     peekpoke_ctlops_t *in_args)
1314 {
1315 	px_t *px_p = DIP_TO_STATE(dip);
1316 	px_pec_t *pec_p = px_p->px_pec_p;
1317 	int err = DDI_SUCCESS;
1318 	on_trap_data_t otd;
1319 
1320 	mutex_enter(&pec_p->pec_pokefault_mutex);
1321 	pec_p->pec_ontrap_data = &otd;
1322 	pec_p->pec_safeacc_type = DDI_FM_ERR_POKE;
1323 
1324 	/* Set up protected environment. */
1325 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
1326 		uintptr_t tramp = otd.ot_trampoline;
1327 
1328 		otd.ot_trampoline = (uintptr_t)&poke_fault;
1329 		err = do_poke(in_args->size, (void *)in_args->dev_addr,
1330 		    (void *)in_args->host_addr);
1331 		otd.ot_trampoline = tramp;
1332 	} else
1333 		err = DDI_FAILURE;
1334 
1335 	px_lib_clr_errs(px_p);
1336 
1337 	if (otd.ot_trap & OT_DATA_ACCESS)
1338 		err = DDI_FAILURE;
1339 
1340 	/* Take down protected environment. */
1341 	no_trap();
1342 
1343 	pec_p->pec_ontrap_data = NULL;
1344 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1345 	mutex_exit(&pec_p->pec_pokefault_mutex);
1346 
1347 #ifdef  DEBUG
1348 	if (err == DDI_FAILURE)
1349 		px_pokefault_cnt++;
1350 #endif
1351 	return (err);
1352 }
1353 
1354 /*ARGSUSED*/
1355 static int
1356 px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip,
1357     peekpoke_ctlops_t *cautacc_ctlops_arg)
1358 {
1359 	size_t size = cautacc_ctlops_arg->size;
1360 	uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
1361 	uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
1362 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
1363 	size_t repcount = cautacc_ctlops_arg->repcount;
1364 	uint_t flags = cautacc_ctlops_arg->flags;
1365 
1366 	px_t *px_p = DIP_TO_STATE(dip);
1367 	px_pec_t *pec_p = px_p->px_pec_p;
1368 	int err = DDI_SUCCESS;
1369 
1370 	/*
1371 	 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
1372 	 * mutex.
1373 	 */
1374 	i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1375 
1376 	pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
1377 	pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
1378 	hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
1379 
1380 	if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1381 		for (; repcount; repcount--) {
1382 			switch (size) {
1383 
1384 			case sizeof (uint8_t):
1385 				i_ddi_put8(hp, (uint8_t *)dev_addr,
1386 				    *(uint8_t *)host_addr);
1387 				break;
1388 
1389 			case sizeof (uint16_t):
1390 				i_ddi_put16(hp, (uint16_t *)dev_addr,
1391 				    *(uint16_t *)host_addr);
1392 				break;
1393 
1394 			case sizeof (uint32_t):
1395 				i_ddi_put32(hp, (uint32_t *)dev_addr,
1396 				    *(uint32_t *)host_addr);
1397 				break;
1398 
1399 			case sizeof (uint64_t):
1400 				i_ddi_put64(hp, (uint64_t *)dev_addr,
1401 				    *(uint64_t *)host_addr);
1402 				break;
1403 			}
1404 
1405 			host_addr += size;
1406 
1407 			if (flags == DDI_DEV_AUTOINCR)
1408 				dev_addr += size;
1409 
1410 			px_lib_clr_errs(px_p);
1411 
1412 			if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) {
1413 				err = DDI_FAILURE;
1414 #ifdef  DEBUG
1415 				px_pokefault_cnt++;
1416 #endif
1417 				break;
1418 			}
1419 		}
1420 	}
1421 
1422 	i_ddi_notrap((ddi_acc_handle_t)hp);
1423 	pec_p->pec_ontrap_data = NULL;
1424 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1425 	i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1426 	hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
1427 
1428 	return (err);
1429 }
1430 
1431 
1432 int
1433 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip,
1434     peekpoke_ctlops_t *in_args)
1435 {
1436 	return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) :
1437 	    px_lib_do_poke(dip, rdip, in_args));
1438 }
1439 
1440 
1441 /*ARGSUSED*/
1442 static int
1443 px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args)
1444 {
1445 	px_t *px_p = DIP_TO_STATE(dip);
1446 	px_pec_t *pec_p = px_p->px_pec_p;
1447 	int err = DDI_SUCCESS;
1448 	on_trap_data_t otd;
1449 
1450 	mutex_enter(&pec_p->pec_pokefault_mutex);
1451 	pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK;
1452 
1453 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
1454 		uintptr_t tramp = otd.ot_trampoline;
1455 
1456 		otd.ot_trampoline = (uintptr_t)&peek_fault;
1457 		err = do_peek(in_args->size, (void *)in_args->dev_addr,
1458 		    (void *)in_args->host_addr);
1459 		otd.ot_trampoline = tramp;
1460 	} else
1461 		err = DDI_FAILURE;
1462 
1463 	no_trap();
1464 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1465 	mutex_exit(&pec_p->pec_pokefault_mutex);
1466 
1467 #ifdef  DEBUG
1468 	if (err == DDI_FAILURE)
1469 		px_peekfault_cnt++;
1470 #endif
1471 	return (err);
1472 }
1473 
1474 
1475 static int
1476 px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg)
1477 {
1478 	size_t size = cautacc_ctlops_arg->size;
1479 	uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
1480 	uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
1481 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
1482 	size_t repcount = cautacc_ctlops_arg->repcount;
1483 	uint_t flags = cautacc_ctlops_arg->flags;
1484 
1485 	px_t *px_p = DIP_TO_STATE(dip);
1486 	px_pec_t *pec_p = px_p->px_pec_p;
1487 	int err = DDI_SUCCESS;
1488 
1489 	/*
1490 	 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
1491 	 * mutex.
1492 	 */
1493 	i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1494 
1495 	pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
1496 	pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
1497 	hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
1498 
1499 	if (repcount == 1) {
1500 		if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1501 			i_ddi_caut_get(size, (void *)dev_addr,
1502 			    (void *)host_addr);
1503 		} else {
1504 			int i;
1505 			uint8_t *ff_addr = (uint8_t *)host_addr;
1506 			for (i = 0; i < size; i++)
1507 				*ff_addr++ = 0xff;
1508 
1509 			err = DDI_FAILURE;
1510 #ifdef  DEBUG
1511 			px_peekfault_cnt++;
1512 #endif
1513 		}
1514 	} else {
1515 		if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1516 			for (; repcount; repcount--) {
1517 				i_ddi_caut_get(size, (void *)dev_addr,
1518 				    (void *)host_addr);
1519 
1520 				host_addr += size;
1521 
1522 				if (flags == DDI_DEV_AUTOINCR)
1523 					dev_addr += size;
1524 			}
1525 		} else {
1526 			err = DDI_FAILURE;
1527 #ifdef  DEBUG
1528 			px_peekfault_cnt++;
1529 #endif
1530 		}
1531 	}
1532 
1533 	i_ddi_notrap((ddi_acc_handle_t)hp);
1534 	pec_p->pec_ontrap_data = NULL;
1535 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1536 	i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1537 	hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
1538 
1539 	return (err);
1540 }
1541 
1542 /*ARGSUSED*/
1543 int
1544 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip,
1545     peekpoke_ctlops_t *in_args, void *result)
1546 {
1547 	result = (void *)in_args->host_addr;
1548 	return (in_args->handle ? px_lib_do_caut_get(dip, in_args) :
1549 	    px_lib_do_peek(dip, in_args));
1550 }
1551 
1552 /*
1553  * implements PPM interface
1554  */
1555 int
1556 px_lib_pmctl(int cmd, px_t *px_p)
1557 {
1558 	ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ);
1559 	switch (cmd) {
1560 	case PPMREQ_PRE_PWR_OFF:
1561 		/*
1562 		 * Currently there is no device power management for
1563 		 * the root complex (fire). When there is we need to make
1564 		 * sure that it is at full power before trying to send the
1565 		 * PME_Turn_Off message.
1566 		 */
1567 		DBG(DBG_PWR, px_p->px_dip,
1568 		    "ioctl: request to send PME_Turn_Off\n");
1569 		return (px_goto_l23ready(px_p));
1570 
1571 	case PPMREQ_PRE_PWR_ON:
1572 		DBG(DBG_PWR, px_p->px_dip, "ioctl: PRE_PWR_ON request\n");
1573 		return (px_pre_pwron_check(px_p));
1574 
1575 	case PPMREQ_POST_PWR_ON:
1576 		DBG(DBG_PWR, px_p->px_dip, "ioctl: POST_PWR_ON request\n");
1577 		return (px_goto_l0(px_p));
1578 
1579 	default:
1580 		return (DDI_FAILURE);
1581 	}
1582 }
1583 
1584 /*
1585  * sends PME_Turn_Off message to put the link in L2/L3 ready state.
1586  * called by px_ioctl.
1587  * returns DDI_SUCCESS or DDI_FAILURE
1588  * 1. Wait for link to be in L1 state (link status reg)
1589  * 2. write to PME_Turn_off reg to boradcast
1590  * 3. set timeout
1591  * 4. If timeout, return failure.
1592  * 5. If PM_TO_Ack, wait till link is in L2/L3 ready
1593  */
1594 static int
1595 px_goto_l23ready(px_t *px_p)
1596 {
1597 	pcie_pwr_t	*pwr_p;
1598 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1599 	caddr_t	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
1600 	int		ret = DDI_SUCCESS;
1601 	clock_t		end, timeleft;
1602 	int		mutex_held = 1;
1603 
1604 	/* If no PM info, return failure */
1605 	if (!PCIE_PMINFO(px_p->px_dip) ||
1606 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1607 		return (DDI_FAILURE);
1608 
1609 	mutex_enter(&pwr_p->pwr_lock);
1610 	mutex_enter(&px_p->px_l23ready_lock);
1611 	/* Clear the PME_To_ACK receieved flag */
1612 	px_p->px_pm_flags &= ~PX_PMETOACK_RECVD;
1613 	/*
1614 	 * When P25 is the downstream device, after receiving
1615 	 * PME_To_ACK, fire will go to Detect state, which causes
1616 	 * the link down event. Inform FMA that this is expected.
1617 	 * In case of all other cards complaint with the pci express
1618 	 * spec, this will happen when the power is re-applied. FMA
1619 	 * code will clear this flag after one instance of LDN. Since
1620 	 * there will not be a LDN event for the spec compliant cards,
1621 	 * we need to clear the flag after receiving PME_To_ACK.
1622 	 */
1623 	px_p->px_pm_flags |= PX_LDN_EXPECTED;
1624 	if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) {
1625 		ret = DDI_FAILURE;
1626 		goto l23ready_done;
1627 	}
1628 	px_p->px_pm_flags |= PX_PME_TURNOFF_PENDING;
1629 
1630 	end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout);
1631 	while (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1632 		timeleft = cv_timedwait(&px_p->px_l23ready_cv,
1633 		    &px_p->px_l23ready_lock, end);
1634 		/*
1635 		 * if cv_timedwait returns -1, it is either
1636 		 * 1) timed out or
1637 		 * 2) there was a pre-mature wakeup but by the time
1638 		 * cv_timedwait is called again end < lbolt i.e.
1639 		 * end is in the past.
1640 		 * 3) By the time we make first cv_timedwait call,
1641 		 * end < lbolt is true.
1642 		 */
1643 		if (timeleft == -1)
1644 			break;
1645 	}
1646 	if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1647 		/*
1648 		 * Either timedout or interrupt didn't get a
1649 		 * chance to grab the mutex and set the flag.
1650 		 * release the mutex and delay for sometime.
1651 		 * This will 1) give a chance for interrupt to
1652 		 * set the flag 2) creates a delay between two
1653 		 * consequetive requests.
1654 		 */
1655 		mutex_exit(&px_p->px_l23ready_lock);
1656 		delay(drv_usectohz(50 * PX_MSEC_TO_USEC));
1657 		mutex_held = 0;
1658 		if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1659 			ret = DDI_FAILURE;
1660 			DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting"
1661 			    " for PME_TO_ACK\n");
1662 		}
1663 	}
1664 	px_p->px_pm_flags &=
1665 	    ~(PX_PME_TURNOFF_PENDING | PX_PMETOACK_RECVD | PX_LDN_EXPECTED);
1666 
1667 l23ready_done:
1668 	if (mutex_held)
1669 		mutex_exit(&px_p->px_l23ready_lock);
1670 	/*
1671 	 * Wait till link is in L1 idle, if sending PME_Turn_Off
1672 	 * was succesful.
1673 	 */
1674 	if (ret == DDI_SUCCESS) {
1675 		if (px_link_wait4l1idle(csr_base) != DDI_SUCCESS) {
1676 			DBG(DBG_PWR, px_p->px_dip, " Link is not at L1"
1677 			    " even though we received PME_To_ACK.\n");
1678 			/*
1679 			 * Workaround for hardware bug with P25.
1680 			 * Due to a hardware bug with P25, link state
1681 			 * will be Detect state rather than L1 after
1682 			 * link is transitioned to L23Ready state. Since
1683 			 * we don't know whether link is L23ready state
1684 			 * without Fire's state being L1_idle, we delay
1685 			 * here just to make sure that we wait till link
1686 			 * is transitioned to L23Ready state.
1687 			 */
1688 			delay(drv_usectohz(100 * PX_MSEC_TO_USEC));
1689 		}
1690 		pwr_p->pwr_link_lvl = PM_LEVEL_L3;
1691 
1692 	}
1693 	mutex_exit(&pwr_p->pwr_lock);
1694 	return (ret);
1695 }
1696 
1697 /*
1698  * Message interrupt handler intended to be shared for both
1699  * PME and PME_TO_ACK msg handling, currently only handles
1700  * PME_To_ACK message.
1701  */
1702 uint_t
1703 px_pmeq_intr(caddr_t arg)
1704 {
1705 	px_t	*px_p = (px_t *)arg;
1706 
1707 	DBG(DBG_PWR, px_p->px_dip, " PME_To_ACK received \n");
1708 	mutex_enter(&px_p->px_l23ready_lock);
1709 	cv_broadcast(&px_p->px_l23ready_cv);
1710 	if (px_p->px_pm_flags & PX_PME_TURNOFF_PENDING) {
1711 		px_p->px_pm_flags |= PX_PMETOACK_RECVD;
1712 	} else {
1713 		/*
1714 		 * This maybe the second ack received. If so then,
1715 		 * we should be receiving it during wait4L1 stage.
1716 		 */
1717 		px_p->px_pmetoack_ignored++;
1718 	}
1719 	mutex_exit(&px_p->px_l23ready_lock);
1720 	return (DDI_INTR_CLAIMED);
1721 }
1722 
1723 static int
1724 px_pre_pwron_check(px_t *px_p)
1725 {
1726 	pcie_pwr_t	*pwr_p;
1727 
1728 	/* If no PM info, return failure */
1729 	if (!PCIE_PMINFO(px_p->px_dip) ||
1730 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1731 		return (DDI_FAILURE);
1732 
1733 	/*
1734 	 * For the spec compliant downstream cards link down
1735 	 * is expected when the device is powered on.
1736 	 */
1737 	px_p->px_pm_flags |= PX_LDN_EXPECTED;
1738 	return (pwr_p->pwr_link_lvl == PM_LEVEL_L3 ? DDI_SUCCESS : DDI_FAILURE);
1739 }
1740 
1741 static int
1742 px_goto_l0(px_t *px_p)
1743 {
1744 	pcie_pwr_t	*pwr_p;
1745 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1746 	caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
1747 	int		ret = DDI_SUCCESS;
1748 	uint64_t	time_spent = 0;
1749 
1750 	/* If no PM info, return failure */
1751 	if (!PCIE_PMINFO(px_p->px_dip) ||
1752 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1753 		return (DDI_FAILURE);
1754 
1755 	mutex_enter(&pwr_p->pwr_lock);
1756 	/*
1757 	 * The following link retrain activity will cause LDN and LUP event.
1758 	 * Receiving LDN prior to receiving LUP is expected, not an error in
1759 	 * this case.  Receiving LUP indicates link is fully up to support
1760 	 * powering up down stream device, and of course any further LDN and
1761 	 * LUP outside this context will be error.
1762 	 */
1763 	px_p->px_lup_pending = 1;
1764 	if (px_link_retrain(csr_base) != DDI_SUCCESS) {
1765 		ret = DDI_FAILURE;
1766 		goto l0_done;
1767 	}
1768 
1769 	/* LUP event takes the order of 15ms amount of time to occur */
1770 	for (; px_p->px_lup_pending && (time_spent < px_lup_poll_to);
1771 	    time_spent += px_lup_poll_interval)
1772 		drv_usecwait(px_lup_poll_interval);
1773 	if (px_p->px_lup_pending)
1774 		ret = DDI_FAILURE;
1775 l0_done:
1776 	px_enable_detect_quiet(csr_base);
1777 	if (ret == DDI_SUCCESS)
1778 		pwr_p->pwr_link_lvl = PM_LEVEL_L0;
1779 	mutex_exit(&pwr_p->pwr_lock);
1780 	return (ret);
1781 }
1782 
1783 /*
1784  * Extract the drivers binding name to identify which chip we're binding to.
1785  * Whenever a new bus bridge is created, the driver alias entry should be
1786  * added here to identify the device if needed.  If a device isn't added,
1787  * the identity defaults to PX_CHIP_UNIDENTIFIED.
1788  */
1789 static uint32_t
1790 px_identity_chip(px_t *px_p)
1791 {
1792 	dev_info_t	*dip = px_p->px_dip;
1793 	char		*name = ddi_binding_name(dip);
1794 	uint32_t	revision = 0;
1795 
1796 	revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1797 	    "module-revision#", 0);
1798 
1799 	/* Check for Fire driver binding name */
1800 	if ((strcmp(name, "pci108e,80f0") == 0) ||
1801 	    (strcmp(name, "pciex108e,80f0") == 0)) {
1802 		DBG(DBG_ATTACH, dip, "px_identity_chip: %s%d: "
1803 		    "name %s module-revision %d\n", ddi_driver_name(dip),
1804 		    ddi_get_instance(dip), name, revision);
1805 
1806 		return (PX_CHIP_ID(PX_CHIP_FIRE, revision, 0x00));
1807 	}
1808 
1809 	DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n",
1810 	    ddi_driver_name(dip), ddi_get_instance(dip), name, revision);
1811 
1812 	return (PX_CHIP_UNIDENTIFIED);
1813 }
1814 
1815 int
1816 px_err_add_intr(px_fault_t *px_fault_p)
1817 {
1818 	dev_info_t	*dip = px_fault_p->px_fh_dip;
1819 	px_t		*px_p = DIP_TO_STATE(dip);
1820 
1821 	VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL,
1822 		px_fault_p->px_err_func, (caddr_t)px_fault_p, NULL) == 0);
1823 
1824 	px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino);
1825 
1826 	return (DDI_SUCCESS);
1827 }
1828 
1829 void
1830 px_err_rem_intr(px_fault_t *px_fault_p)
1831 {
1832 	dev_info_t	*dip = px_fault_p->px_fh_dip;
1833 	px_t		*px_p = DIP_TO_STATE(dip);
1834 
1835 	px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino,
1836 		IB_INTR_WAIT);
1837 
1838 	rem_ivintr(px_fault_p->px_fh_sysino, NULL);
1839 }
1840 
1841 /*
1842  * px_cb_add_intr() - Called from attach(9E) to create CB if not yet
1843  * created, to add CB interrupt vector always, but enable only once.
1844  */
1845 int
1846 px_cb_add_intr(px_fault_t *fault_p)
1847 {
1848 	px_t		*px_p = DIP_TO_STATE(fault_p->px_fh_dip);
1849 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1850 	px_cb_t		*cb_p = (px_cb_t *)CSR_XR((caddr_t)
1851 	    pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1);
1852 	px_cb_list_t	*pxl, *pxl_new;
1853 	cpuid_t		cpuid;
1854 
1855 
1856 	if (cb_p == NULL) {
1857 		cb_p = kmem_zalloc(sizeof (px_cb_t), KM_SLEEP);
1858 		mutex_init(&cb_p->cb_mutex, NULL, MUTEX_DRIVER, NULL);
1859 		cb_p->px_cb_func = px_cb_intr;
1860 		pxu_p->px_cb_p = cb_p;
1861 		CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1,
1862 		    (uint64_t)cb_p);
1863 	} else
1864 		pxu_p->px_cb_p = cb_p;
1865 
1866 	mutex_enter(&cb_p->cb_mutex);
1867 
1868 	VERIFY(add_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL,
1869 	    cb_p->px_cb_func, (caddr_t)cb_p, NULL) == 0);
1870 
1871 	if (cb_p->pxl == NULL) {
1872 
1873 		cpuid = intr_dist_cpuid(),
1874 		px_ib_intr_enable(px_p, cpuid, fault_p->px_intr_ino);
1875 
1876 		pxl = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP);
1877 		pxl->pxp = px_p;
1878 
1879 		cb_p->pxl = pxl;
1880 		cb_p->sysino = fault_p->px_fh_sysino;
1881 		cb_p->cpuid = cpuid;
1882 
1883 	} else {
1884 		/*
1885 		 * Find the last pxl or
1886 		 * stop short at encoutering a redundent, or
1887 		 * both.
1888 		 */
1889 		pxl = cb_p->pxl;
1890 		for (; !(pxl->pxp == px_p) && pxl->next; pxl = pxl->next);
1891 		if (pxl->pxp == px_p) {
1892 			cmn_err(CE_WARN, "px_cb_add_intr: reregister sysino "
1893 			    "%lx by px_p 0x%p\n", cb_p->sysino, (void *)px_p);
1894 			return (DDI_FAILURE);
1895 		}
1896 
1897 		/* add to linked list */
1898 		pxl_new = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP);
1899 		pxl_new->pxp = px_p;
1900 		pxl->next = pxl_new;
1901 	}
1902 	cb_p->attachcnt++;
1903 
1904 	mutex_exit(&cb_p->cb_mutex);
1905 
1906 	return (DDI_SUCCESS);
1907 }
1908 
1909 /*
1910  * px_cb_rem_intr() - Called from detach(9E) to remove its CB
1911  * interrupt vector, to shift proxy to the next available px,
1912  * or disable CB interrupt when itself is the last.
1913  */
1914 void
1915 px_cb_rem_intr(px_fault_t *fault_p)
1916 {
1917 	px_t		*px_p = DIP_TO_STATE(fault_p->px_fh_dip), *pxp;
1918 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1919 	px_cb_t		*cb_p = PX2CB(px_p);
1920 	px_cb_list_t	*pxl, *prev;
1921 	px_fault_t	*f_p;
1922 
1923 	ASSERT(cb_p->pxl);
1924 
1925 	/* De-list the target px, move the next px up */
1926 
1927 	mutex_enter(&cb_p->cb_mutex);
1928 
1929 	pxl = cb_p->pxl;
1930 	if (pxl->pxp == px_p) {
1931 		cb_p->pxl = pxl->next;
1932 	} else {
1933 		prev = pxl;
1934 		pxl = pxl->next;
1935 		for (; pxl && (pxl->pxp != px_p); prev = pxl, pxl = pxl->next);
1936 		if (!pxl) {
1937 			cmn_err(CE_WARN, "px_cb_rem_intr: can't find px_p 0x%p "
1938 			    "in registered CB list.", (void *)px_p);
1939 			return;
1940 		}
1941 		prev->next = pxl->next;
1942 	}
1943 	kmem_free(pxl, sizeof (px_cb_list_t));
1944 
1945 	if (fault_p->px_fh_sysino == cb_p->sysino) {
1946 		px_ib_intr_disable(px_p->px_ib_p, fault_p->px_intr_ino,
1947 		    IB_INTR_WAIT);
1948 
1949 		if (cb_p->pxl) {
1950 			pxp = cb_p->pxl->pxp;
1951 			f_p = &pxp->px_cb_fault;
1952 			cb_p->sysino = f_p->px_fh_sysino;
1953 
1954 			PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid);
1955 			(void) px_lib_intr_setstate(pxp->px_dip, cb_p->sysino,
1956 			    INTR_IDLE_STATE);
1957 		}
1958 	}
1959 
1960 	rem_ivintr(fault_p->px_fh_sysino, NULL);
1961 	pxu_p->px_cb_p = NULL;
1962 	cb_p->attachcnt--;
1963 	if (cb_p->pxl) {
1964 		mutex_exit(&cb_p->cb_mutex);
1965 		return;
1966 	}
1967 	mutex_exit(&cb_p->cb_mutex);
1968 
1969 	mutex_destroy(&cb_p->cb_mutex);
1970 	CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, 0ull);
1971 	kmem_free(cb_p, sizeof (px_cb_t));
1972 }
1973 
1974 /*
1975  * px_cb_intr() - sun4u only,  CB interrupt dispatcher
1976  */
1977 uint_t
1978 px_cb_intr(caddr_t arg)
1979 {
1980 	px_cb_t		*cb_p = (px_cb_t *)arg;
1981 	px_cb_list_t	*pxl = cb_p->pxl;
1982 	px_t		*pxp = pxl ? pxl->pxp : NULL;
1983 	px_fault_t	*fault_p;
1984 
1985 	while (pxl && pxp && (pxp->px_state != PX_ATTACHED)) {
1986 		pxl = pxl->next;
1987 		pxp = (pxl) ? pxl->pxp : NULL;
1988 	}
1989 
1990 	if (pxp) {
1991 		fault_p = &pxp->px_cb_fault;
1992 		return (fault_p->px_err_func((caddr_t)fault_p));
1993 	} else
1994 		return (DDI_INTR_UNCLAIMED);
1995 }
1996 
1997 /*
1998  * px_cb_intr_redist() - sun4u only, CB interrupt redistribution
1999  */
2000 void
2001 px_cb_intr_redist(px_t	*px_p)
2002 {
2003 	px_fault_t	*f_p = &px_p->px_cb_fault;
2004 	px_cb_t		*cb_p = PX2CB(px_p);
2005 	devino_t	ino = px_p->px_inos[PX_INTR_XBC];
2006 	cpuid_t		cpuid;
2007 
2008 	mutex_enter(&cb_p->cb_mutex);
2009 
2010 	if (cb_p->sysino != f_p->px_fh_sysino) {
2011 		mutex_exit(&cb_p->cb_mutex);
2012 		return;
2013 	}
2014 
2015 	cb_p->cpuid = cpuid = intr_dist_cpuid();
2016 	px_ib_intr_dist_en(px_p->px_dip, cpuid, ino, B_FALSE);
2017 
2018 	mutex_exit(&cb_p->cb_mutex);
2019 }
2020 
2021 #ifdef FMA
2022 void
2023 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status)
2024 {
2025 	/* populate the rc_status by reading the registers - TBD */
2026 }
2027 #endif /* FMA */
2028 
2029 /*
2030  * Unprotected raw reads/writes of fabric device's config space.
2031  * Only used for temporary PCI-E Fabric Error Handling.
2032  */
2033 uint32_t
2034 px_fab_get(px_t *px_p, pcie_req_id_t bdf, uint16_t offset)
2035 {
2036 	px_ranges_t	*rp = px_p->px_ranges_p;
2037 	uint64_t	range_prop, base_addr;
2038 	int		bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG);
2039 	uint32_t	val;
2040 
2041 	/* Get Fire's Physical Base Address */
2042 	range_prop = (((uint64_t)(rp[bank].parent_high & 0x7ff)) << 32) |
2043 	    rp[bank].parent_low;
2044 
2045 	/* Get config space first. */
2046 	base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset);
2047 
2048 	val = ldphysio(base_addr);
2049 
2050 	return (LE_32(val));
2051 }
2052 
2053 void
2054 px_fab_set(px_t *px_p, pcie_req_id_t bdf, uint16_t offset,
2055     uint32_t val) {
2056 	px_ranges_t	*rp = px_p->px_ranges_p;
2057 	uint64_t	range_prop, base_addr;
2058 	int		bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG);
2059 
2060 	/* Get Fire's Physical Base Address */
2061 	range_prop = (((uint64_t)(rp[bank].parent_high & 0x7ff)) << 32) |
2062 	    rp[bank].parent_low;
2063 
2064 	/* Get config space first. */
2065 	base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset);
2066 
2067 	stphysio(base_addr, LE_32(val));
2068 }
2069 
2070 /*
2071  * cpr callback
2072  *
2073  * disable fabric error msg interrupt prior to suspending
2074  * all device drivers; re-enable fabric error msg interrupt
2075  * after all devices are resumed.
2076  */
2077 static boolean_t
2078 px_cpr_callb(void *arg, int code)
2079 {
2080 	px_t		*px_p = (px_t *)arg;
2081 	px_ib_t		*ib_p = px_p->px_ib_p;
2082 	px_pec_t	*pec_p = px_p->px_pec_p;
2083 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
2084 	caddr_t		csr_base;
2085 	devino_t	ce_ino, nf_ino, f_ino;
2086 	px_ib_ino_info_t	*ce_ino_p, *nf_ino_p, *f_ino_p;
2087 	uint64_t	imu_log_enable, imu_intr_enable;
2088 	uint64_t	imu_log_mask, imu_intr_mask;
2089 
2090 	ce_ino = px_msiqid_to_devino(px_p, pec_p->pec_corr_msg_msiq_id);
2091 	nf_ino = px_msiqid_to_devino(px_p, pec_p->pec_non_fatal_msg_msiq_id);
2092 	f_ino = px_msiqid_to_devino(px_p, pec_p->pec_fatal_msg_msiq_id);
2093 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2094 
2095 	imu_log_enable = CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE);
2096 	imu_intr_enable = CSR_XR(csr_base, IMU_INTERRUPT_ENABLE);
2097 
2098 	imu_log_mask = BITMASK(IMU_ERROR_LOG_ENABLE_FATAL_MES_NOT_EN_LOG_EN) |
2099 	    BITMASK(IMU_ERROR_LOG_ENABLE_NONFATAL_MES_NOT_EN_LOG_EN) |
2100 	    BITMASK(IMU_ERROR_LOG_ENABLE_COR_MES_NOT_EN_LOG_EN);
2101 
2102 	imu_intr_mask =
2103 	    BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_S_INT_EN) |
2104 	    BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_S_INT_EN) |
2105 	    BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_S_INT_EN) |
2106 	    BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_P_INT_EN) |
2107 	    BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_P_INT_EN) |
2108 	    BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_P_INT_EN);
2109 
2110 	switch (code) {
2111 	case CB_CODE_CPR_CHKPT:
2112 		/* disable imu rbne on corr/nonfatal/fatal errors */
2113 		CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE,
2114 		    imu_log_enable & (~imu_log_mask));
2115 
2116 		CSR_XS(csr_base, IMU_INTERRUPT_ENABLE,
2117 		    imu_intr_enable & (~imu_intr_mask));
2118 
2119 		/* disable CORR intr mapping */
2120 		px_ib_intr_disable(ib_p, ce_ino, IB_INTR_NOWAIT);
2121 
2122 		/* disable NON FATAL intr mapping */
2123 		px_ib_intr_disable(ib_p, nf_ino, IB_INTR_NOWAIT);
2124 
2125 		/* disable FATAL intr mapping */
2126 		px_ib_intr_disable(ib_p, f_ino, IB_INTR_NOWAIT);
2127 
2128 		break;
2129 
2130 	case CB_CODE_CPR_RESUME:
2131 		mutex_enter(&ib_p->ib_ino_lst_mutex);
2132 
2133 		ce_ino_p = px_ib_locate_ino(ib_p, ce_ino);
2134 		nf_ino_p = px_ib_locate_ino(ib_p, nf_ino);
2135 		f_ino_p = px_ib_locate_ino(ib_p, f_ino);
2136 
2137 		/* enable CORR intr mapping */
2138 		if (ce_ino_p)
2139 			px_ib_intr_enable(px_p, ce_ino_p->ino_cpuid, ce_ino);
2140 		else
2141 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2142 			    "reenable PCIe Correctable msg intr.\n");
2143 
2144 		/* enable NON FATAL intr mapping */
2145 		if (nf_ino_p)
2146 			px_ib_intr_enable(px_p, nf_ino_p->ino_cpuid, nf_ino);
2147 		else
2148 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2149 			    "reenable PCIe Non Fatal msg intr.\n");
2150 
2151 		/* enable FATAL intr mapping */
2152 		if (f_ino_p)
2153 			px_ib_intr_enable(px_p, f_ino_p->ino_cpuid, f_ino);
2154 		else
2155 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2156 			    "reenable PCIe Fatal msg intr.\n");
2157 
2158 		mutex_exit(&ib_p->ib_ino_lst_mutex);
2159 
2160 		/* enable corr/nonfatal/fatal not enable error */
2161 		CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, (imu_log_enable |
2162 		    (imu_log_mask & px_imu_log_mask)));
2163 		CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, (imu_intr_enable |
2164 		    (imu_intr_mask & px_imu_intr_mask)));
2165 
2166 		break;
2167 	}
2168 
2169 	return (B_TRUE);
2170 }
2171 
2172 /*
2173  * add cpr callback
2174  */
2175 void
2176 px_cpr_add_callb(px_t *px_p)
2177 {
2178 	px_p->px_cprcb_id = callb_add(px_cpr_callb, (void *)px_p,
2179 	CB_CL_CPR_POST_USER, "px_cpr");
2180 }
2181 
2182 /*
2183  * remove cpr callback
2184  */
2185 void
2186 px_cpr_rem_callb(px_t *px_p)
2187 {
2188 	(void) callb_delete(px_p->px_cprcb_id);
2189 }
2190 
2191 /*ARGSUSED*/
2192 int
2193 px_lib_hotplug_init(dev_info_t *dip, void *arg)
2194 {
2195 	return (DDI_ENOTSUP);
2196 }
2197 
2198 /*ARGSUSED*/
2199 void
2200 px_lib_hotplug_uninit(dev_info_t *dip)
2201 {
2202 }
2203