xref: /titanic_51/usr/src/uts/sun4u/io/px/px_lib4u.c (revision 42487ff1899d230ad6c2d55cf9573489ca5eb770)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/kmem.h>
31 #include <sys/conf.h>
32 #include <sys/ddi.h>
33 #include <sys/sunddi.h>
34 #include <sys/fm/protocol.h>
35 #include <sys/fm/util.h>
36 #include <sys/modctl.h>
37 #include <sys/disp.h>
38 #include <sys/stat.h>
39 #include <sys/ddi_impldefs.h>
40 #include <sys/vmem.h>
41 #include <sys/iommutsb.h>
42 #include <sys/cpuvar.h>
43 #include <sys/ivintr.h>
44 #include <sys/byteorder.h>
45 #include <sys/hotplug/pci/pciehpc.h>
46 #include <px_obj.h>
47 #include <pcie_pwr.h>
48 #include <px_regs.h>
49 #include <px_csr.h>
50 #include <sys/machsystm.h>
51 #include "px_lib4u.h"
52 #include "px_err.h"
53 
54 #pragma weak jbus_stst_order
55 
56 extern void jbus_stst_order();
57 
58 ulong_t px_mmu_dvma_end = 0xfffffffful;
59 uint_t px_ranges_phi_mask = 0xfffffffful;
60 
61 static int px_goto_l23ready(px_t *px_p);
62 static int px_goto_l0(px_t *px_p);
63 static int px_pre_pwron_check(px_t *px_p);
64 static uint32_t px_identity_chip(px_t *px_p);
65 static boolean_t px_cpr_callb(void *arg, int code);
66 
67 /*
68  * px_lib_map_registers
69  *
70  * This function is called from the attach routine to map the registers
71  * accessed by this driver.
72  *
73  * used by: px_attach()
74  *
75  * return value: DDI_FAILURE on failure
76  */
77 int
78 px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip)
79 {
80 	ddi_device_acc_attr_t	attr;
81 	px_reg_bank_t		reg_bank = PX_REG_CSR;
82 
83 	DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n",
84 		pxu_p, dip);
85 
86 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
87 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
88 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
89 
90 	/*
91 	 * PCI CSR Base
92 	 */
93 	if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
94 	    0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
95 		goto fail;
96 	}
97 
98 	reg_bank++;
99 
100 	/*
101 	 * XBUS CSR Base
102 	 */
103 	if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
104 	    0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
105 		goto fail;
106 	}
107 
108 	pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS;
109 
110 done:
111 	for (; reg_bank >= PX_REG_CSR; reg_bank--) {
112 		DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n",
113 		    reg_bank, pxu_p->px_address[reg_bank]);
114 	}
115 
116 	return (DDI_SUCCESS);
117 
118 fail:
119 	cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n",
120 	    ddi_driver_name(dip), ddi_get_instance(dip), reg_bank);
121 
122 	for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) {
123 		pxu_p->px_address[reg_bank] = NULL;
124 		ddi_regs_map_free(&pxu_p->px_ac[reg_bank]);
125 	}
126 
127 	return (DDI_FAILURE);
128 }
129 
130 /*
131  * px_lib_unmap_regs:
132  *
133  * This routine unmaps the registers mapped by map_px_registers.
134  *
135  * used by: px_detach(), and error conditions in px_attach()
136  *
137  * return value: none
138  */
139 void
140 px_lib_unmap_regs(pxu_t *pxu_p)
141 {
142 	int i;
143 
144 	for (i = 0; i < PX_REG_MAX; i++) {
145 		if (pxu_p->px_ac[i])
146 			ddi_regs_map_free(&pxu_p->px_ac[i]);
147 	}
148 }
149 
150 int
151 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl)
152 {
153 	px_t		*px_p = DIP_TO_STATE(dip);
154 	caddr_t		xbc_csr_base, csr_base;
155 	px_dvma_range_prop_t	px_dvma_range;
156 	uint32_t	chip_id;
157 	pxu_t		*pxu_p;
158 
159 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p\n", dip);
160 
161 	if ((chip_id = px_identity_chip(px_p)) == PX_CHIP_UNIDENTIFIED)
162 		return (DDI_FAILURE);
163 
164 	switch (chip_id) {
165 	case FIRE_VER_10:
166 		cmn_err(CE_WARN, "FIRE Hardware Version 1.0 is not supported");
167 		return (DDI_FAILURE);
168 	case FIRE_VER_20:
169 		DBG(DBG_ATTACH, dip, "FIRE Hardware Version 2.0\n");
170 		break;
171 	default:
172 		cmn_err(CE_WARN, "%s%d: FIRE Hardware Version Unknown\n",
173 		    ddi_driver_name(dip), ddi_get_instance(dip));
174 		return (DDI_FAILURE);
175 	}
176 
177 	/*
178 	 * Allocate platform specific structure and link it to
179 	 * the px state structure.
180 	 */
181 	pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP);
182 	pxu_p->chip_id = chip_id;
183 	pxu_p->portid  = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
184 	    "portid", -1);
185 
186 	/* Map in the registers */
187 	if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) {
188 		kmem_free(pxu_p, sizeof (pxu_t));
189 
190 		return (DDI_FAILURE);
191 	}
192 
193 	xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC];
194 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
195 
196 	pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid);
197 	pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie);
198 	pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie);
199 
200 	/*
201 	 * Create "virtual-dma" property to support child devices
202 	 * needing to know DVMA range.
203 	 */
204 	px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1
205 	    - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT);
206 	px_dvma_range.dvma_len = (uint32_t)
207 	    px_mmu_dvma_end - px_dvma_range.dvma_base + 1;
208 
209 	(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
210 		"virtual-dma", (caddr_t)&px_dvma_range,
211 		sizeof (px_dvma_range_prop_t));
212 	/*
213 	 * Initilize all fire hardware specific blocks.
214 	 */
215 	hvio_cb_init(xbc_csr_base, pxu_p);
216 	hvio_ib_init(csr_base, pxu_p);
217 	hvio_pec_init(csr_base, pxu_p);
218 	hvio_mmu_init(csr_base, pxu_p);
219 
220 	px_p->px_plat_p = (void *)pxu_p;
221 
222 	/*
223 	 * Initialize all the interrupt handlers
224 	 */
225 	px_err_reg_enable(px_p, PX_ERR_JBC);
226 	px_err_reg_enable(px_p, PX_ERR_MMU);
227 	px_err_reg_enable(px_p, PX_ERR_IMU);
228 	px_err_reg_enable(px_p, PX_ERR_TLU_UE);
229 	px_err_reg_enable(px_p, PX_ERR_TLU_CE);
230 	px_err_reg_enable(px_p, PX_ERR_TLU_OE);
231 	px_err_reg_enable(px_p, PX_ERR_ILU);
232 	px_err_reg_enable(px_p, PX_ERR_LPU_LINK);
233 	px_err_reg_enable(px_p, PX_ERR_LPU_PHY);
234 	px_err_reg_enable(px_p, PX_ERR_LPU_RX);
235 	px_err_reg_enable(px_p, PX_ERR_LPU_TX);
236 	px_err_reg_enable(px_p, PX_ERR_LPU_LTSSM);
237 	px_err_reg_enable(px_p, PX_ERR_LPU_GIGABLZ);
238 
239 	/* Initilize device handle */
240 	*dev_hdl = (devhandle_t)csr_base;
241 
242 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl);
243 
244 	return (DDI_SUCCESS);
245 }
246 
247 int
248 px_lib_dev_fini(dev_info_t *dip)
249 {
250 	px_t	*px_p = DIP_TO_STATE(dip);
251 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
252 
253 	DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip);
254 
255 	/*
256 	 * Deinitialize all the interrupt handlers
257 	 */
258 	px_err_reg_disable(px_p, PX_ERR_JBC);
259 	px_err_reg_disable(px_p, PX_ERR_MMU);
260 	px_err_reg_disable(px_p, PX_ERR_IMU);
261 	px_err_reg_disable(px_p, PX_ERR_TLU_UE);
262 	px_err_reg_disable(px_p, PX_ERR_TLU_CE);
263 	px_err_reg_disable(px_p, PX_ERR_TLU_OE);
264 	px_err_reg_disable(px_p, PX_ERR_ILU);
265 	px_err_reg_disable(px_p, PX_ERR_LPU_LINK);
266 	px_err_reg_disable(px_p, PX_ERR_LPU_PHY);
267 	px_err_reg_disable(px_p, PX_ERR_LPU_RX);
268 	px_err_reg_disable(px_p, PX_ERR_LPU_TX);
269 	px_err_reg_disable(px_p, PX_ERR_LPU_LTSSM);
270 	px_err_reg_disable(px_p, PX_ERR_LPU_GIGABLZ);
271 
272 	iommu_tsb_free(pxu_p->tsb_cookie);
273 
274 	px_lib_unmap_regs((pxu_t *)px_p->px_plat_p);
275 	kmem_free(px_p->px_plat_p, sizeof (pxu_t));
276 	px_p->px_plat_p = NULL;
277 
278 	return (DDI_SUCCESS);
279 }
280 
281 /*ARGSUSED*/
282 int
283 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino,
284     sysino_t *sysino)
285 {
286 	px_t	*px_p = DIP_TO_STATE(dip);
287 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
288 	uint64_t	ret;
289 
290 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p "
291 	    "devino 0x%x\n", dip, devino);
292 
293 	if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip),
294 	    pxu_p, devino, sysino)) != H_EOK) {
295 		DBG(DBG_LIB_INT, dip,
296 		    "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret);
297 		return (DDI_FAILURE);
298 	}
299 
300 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n",
301 	    *sysino);
302 
303 	return (DDI_SUCCESS);
304 }
305 
306 /*ARGSUSED*/
307 int
308 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino,
309     intr_valid_state_t *intr_valid_state)
310 {
311 	uint64_t	ret;
312 
313 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n",
314 	    dip, sysino);
315 
316 	if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip),
317 	    sysino, intr_valid_state)) != H_EOK) {
318 		DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n",
319 		    ret);
320 		return (DDI_FAILURE);
321 	}
322 
323 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n",
324 	    *intr_valid_state);
325 
326 	return (DDI_SUCCESS);
327 }
328 
329 /*ARGSUSED*/
330 int
331 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino,
332     intr_valid_state_t intr_valid_state)
333 {
334 	uint64_t	ret;
335 
336 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx "
337 	    "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state);
338 
339 	if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip),
340 	    sysino, intr_valid_state)) != H_EOK) {
341 		DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n",
342 		    ret);
343 		return (DDI_FAILURE);
344 	}
345 
346 	return (DDI_SUCCESS);
347 }
348 
349 /*ARGSUSED*/
350 int
351 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino,
352     intr_state_t *intr_state)
353 {
354 	uint64_t	ret;
355 
356 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n",
357 	    dip, sysino);
358 
359 	if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip),
360 	    sysino, intr_state)) != H_EOK) {
361 		DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n",
362 		    ret);
363 		return (DDI_FAILURE);
364 	}
365 
366 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n",
367 	    *intr_state);
368 
369 	return (DDI_SUCCESS);
370 }
371 
372 /*ARGSUSED*/
373 int
374 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino,
375     intr_state_t intr_state)
376 {
377 	uint64_t	ret;
378 
379 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx "
380 	    "intr_state 0x%x\n", dip, sysino, intr_state);
381 
382 	if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip),
383 	    sysino, intr_state)) != H_EOK) {
384 		DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n",
385 		    ret);
386 		return (DDI_FAILURE);
387 	}
388 
389 	return (DDI_SUCCESS);
390 }
391 
392 /*ARGSUSED*/
393 int
394 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid)
395 {
396 	uint64_t	ret;
397 
398 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n",
399 	    dip, sysino);
400 
401 	if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip),
402 	    sysino, cpuid)) != H_EOK) {
403 		DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n",
404 		    ret);
405 		return (DDI_FAILURE);
406 	}
407 
408 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid);
409 
410 	return (DDI_SUCCESS);
411 }
412 
413 /*ARGSUSED*/
414 int
415 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid)
416 {
417 	uint64_t	ret;
418 
419 	DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx "
420 	    "cpuid 0x%x\n", dip, sysino, cpuid);
421 
422 	if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip),
423 	    sysino, cpuid)) != H_EOK) {
424 		DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n",
425 		    ret);
426 		return (DDI_FAILURE);
427 	}
428 
429 	return (DDI_SUCCESS);
430 }
431 
432 /*ARGSUSED*/
433 int
434 px_lib_intr_reset(dev_info_t *dip)
435 {
436 	devino_t	ino;
437 	sysino_t	sysino;
438 
439 	DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip);
440 
441 	/* Reset all Interrupts */
442 	for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) {
443 		if (px_lib_intr_devino_to_sysino(dip, ino,
444 		    &sysino) != DDI_SUCCESS)
445 			return (BF_FATAL);
446 
447 		if (px_lib_intr_setstate(dip, sysino,
448 		    INTR_IDLE_STATE) != DDI_SUCCESS)
449 			return (BF_FATAL);
450 	}
451 
452 	return (BF_NONE);
453 }
454 
455 /*ARGSUSED*/
456 int
457 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages,
458     io_attributes_t io_attributes, void *addr, size_t pfn_index,
459     int flag)
460 {
461 	px_t		*px_p = DIP_TO_STATE(dip);
462 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
463 	uint64_t	ret;
464 
465 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx "
466 	    "pages 0x%x atrr 0x%x addr 0x%p pfn_index 0x%llx, flag 0x%x\n",
467 	    dip, tsbid, pages, io_attributes, addr, pfn_index, flag);
468 
469 	if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages,
470 	    io_attributes, addr, pfn_index, flag)) != H_EOK) {
471 		DBG(DBG_LIB_DMA, dip,
472 		    "px_lib_iommu_map failed, ret 0x%lx\n", ret);
473 		return (DDI_FAILURE);
474 	}
475 
476 	return (DDI_SUCCESS);
477 }
478 
479 /*ARGSUSED*/
480 int
481 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages)
482 {
483 	px_t		*px_p = DIP_TO_STATE(dip);
484 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
485 	uint64_t	ret;
486 
487 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx "
488 	    "pages 0x%x\n", dip, tsbid, pages);
489 
490 	if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages))
491 	    != H_EOK) {
492 		DBG(DBG_LIB_DMA, dip,
493 		    "px_lib_iommu_demap failed, ret 0x%lx\n", ret);
494 
495 		return (DDI_FAILURE);
496 	}
497 
498 	return (DDI_SUCCESS);
499 }
500 
501 /*ARGSUSED*/
502 int
503 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid,
504     io_attributes_t *attributes_p, r_addr_t *r_addr_p)
505 {
506 	px_t	*px_p = DIP_TO_STATE(dip);
507 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
508 	uint64_t	ret;
509 
510 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n",
511 	    dip, tsbid);
512 
513 	if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid,
514 	    attributes_p, r_addr_p)) != H_EOK) {
515 		DBG(DBG_LIB_DMA, dip,
516 		    "hvio_iommu_getmap failed, ret 0x%lx\n", ret);
517 
518 		return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE);
519 	}
520 
521 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n",
522 	    *attributes_p, *r_addr_p);
523 
524 	return (DDI_SUCCESS);
525 }
526 
527 
528 /*
529  * Checks dma attributes against system bypass ranges
530  * The bypass range is determined by the hardware. Return them so the
531  * common code can do generic checking against them.
532  */
533 /*ARGSUSED*/
534 int
535 px_lib_dma_bypass_rngchk(ddi_dma_attr_t *attrp, uint64_t *lo_p, uint64_t *hi_p)
536 {
537 	*lo_p = MMU_BYPASS_BASE;
538 	*hi_p = MMU_BYPASS_END;
539 
540 	return (DDI_SUCCESS);
541 }
542 
543 
544 /*ARGSUSED*/
545 int
546 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra,
547     io_attributes_t io_attributes, io_addr_t *io_addr_p)
548 {
549 	uint64_t	ret;
550 
551 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx "
552 	    "attr 0x%x\n", dip, ra, io_attributes);
553 
554 	if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), ra,
555 	    io_attributes, io_addr_p)) != H_EOK) {
556 		DBG(DBG_LIB_DMA, dip,
557 		    "hvio_iommu_getbypass failed, ret 0x%lx\n", ret);
558 		return (DDI_FAILURE);
559 	}
560 
561 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n",
562 	    *io_addr_p);
563 
564 	return (DDI_SUCCESS);
565 }
566 
567 /*
568  * bus dma sync entry point.
569  */
570 /*ARGSUSED*/
571 int
572 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
573 	off_t off, size_t len, uint_t cache_flags)
574 {
575 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
576 
577 	DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p "
578 	    "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n",
579 	    dip, rdip, handle, off, len, cache_flags);
580 
581 	/*
582 	 * jbus_stst_order is found only in certain cpu modules.
583 	 * Just return success if not present.
584 	 */
585 	if (&jbus_stst_order == NULL)
586 		return (DDI_SUCCESS);
587 
588 	if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) {
589 		cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.",
590 		    ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp);
591 
592 		return (DDI_FAILURE);
593 	}
594 
595 	if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC)
596 		return (DDI_SUCCESS);
597 
598 	/*
599 	 * No flush needed when sending data from memory to device.
600 	 * Nothing to do to "sync" memory to what device would already see.
601 	 */
602 	if (!(mp->dmai_rflags & DDI_DMA_READ) ||
603 	    ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV))
604 		return (DDI_SUCCESS);
605 
606 	/*
607 	 * Perform necessary cpu workaround to ensure jbus ordering.
608 	 * CPU's internal "invalidate FIFOs" are flushed.
609 	 */
610 
611 #if !defined(lint)
612 	kpreempt_disable();
613 #endif
614 	jbus_stst_order();
615 #if !defined(lint)
616 	kpreempt_enable();
617 #endif
618 	return (DDI_SUCCESS);
619 }
620 
621 /*
622  * MSIQ Functions:
623  */
624 /*ARGSUSED*/
625 int
626 px_lib_msiq_init(dev_info_t *dip)
627 {
628 	px_t		*px_p = DIP_TO_STATE(dip);
629 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
630 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
631 	caddr_t		msiq_addr;
632 	px_dvma_addr_t	pg_index;
633 	size_t		size;
634 	int		ret;
635 
636 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip);
637 
638 	/*
639 	 * Map the EQ memory into the Fire MMU (has to be 512KB aligned)
640 	 * and then initialize the base address register.
641 	 *
642 	 * Allocate entries from Fire IOMMU so that the resulting address
643 	 * is properly aligned.  Calculate the index of the first allocated
644 	 * entry.  Note: The size of the mapping is assumed to be a multiple
645 	 * of the page size.
646 	 */
647 	msiq_addr = (caddr_t)(((uint64_t)msiq_state_p->msiq_buf_p +
648 	    (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT);
649 
650 	size = msiq_state_p->msiq_cnt *
651 	    msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
652 
653 	pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map,
654 	    size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT);
655 
656 	if (pxu_p->msiq_mapped_p == NULL)
657 		return (DDI_FAILURE);
658 
659 	pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
660 	    MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
661 
662 	if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index),
663 	    MMU_BTOP(size), PCI_MAP_ATTR_WRITE, (void *)msiq_addr, 0,
664 	    MMU_MAP_BUF)) != DDI_SUCCESS) {
665 		DBG(DBG_LIB_MSIQ, dip,
666 		    "hvio_msiq_init failed, ret 0x%lx\n", ret);
667 
668 		(void) px_lib_msiq_fini(dip);
669 		return (DDI_FAILURE);
670 	}
671 
672 	(void) hvio_msiq_init(DIP_TO_HANDLE(dip), pxu_p);
673 
674 	return (DDI_SUCCESS);
675 }
676 
677 /*ARGSUSED*/
678 int
679 px_lib_msiq_fini(dev_info_t *dip)
680 {
681 	px_t		*px_p = DIP_TO_STATE(dip);
682 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
683 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
684 	px_dvma_addr_t	pg_index;
685 	size_t		size;
686 
687 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip);
688 
689 	/*
690 	 * Unmap and free the EQ memory that had been mapped
691 	 * into the Fire IOMMU.
692 	 */
693 	size = msiq_state_p->msiq_cnt *
694 	    msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
695 
696 	pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
697 	    MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
698 
699 	(void) px_lib_iommu_demap(px_p->px_dip,
700 	    PCI_TSBID(0, pg_index), MMU_BTOP(size));
701 
702 	/* Free the entries from the Fire MMU */
703 	vmem_xfree(px_p->px_mmu_p->mmu_dvma_map,
704 	    (void *)pxu_p->msiq_mapped_p, size);
705 
706 	return (DDI_SUCCESS);
707 }
708 
709 /*ARGSUSED*/
710 int
711 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p,
712     uint_t *msiq_rec_cnt_p)
713 {
714 	px_t		*px_p = DIP_TO_STATE(dip);
715 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
716 	uint64_t	*msiq_addr;
717 	size_t		msiq_size;
718 
719 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n",
720 	    dip, msiq_id);
721 
722 	msiq_addr = (uint64_t *)(((uint64_t)msiq_state_p->msiq_buf_p +
723 	    (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT);
724 	msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
725 	ra_p = (r_addr_t *)((caddr_t)msiq_addr + (msiq_id * msiq_size));
726 
727 	*msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt;
728 
729 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n",
730 	    ra_p, *msiq_rec_cnt_p);
731 
732 	return (DDI_SUCCESS);
733 }
734 
735 /*ARGSUSED*/
736 int
737 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id,
738     pci_msiq_valid_state_t *msiq_valid_state)
739 {
740 	uint64_t	ret;
741 
742 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n",
743 	    dip, msiq_id);
744 
745 	if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip),
746 	    msiq_id, msiq_valid_state)) != H_EOK) {
747 		DBG(DBG_LIB_MSIQ, dip,
748 		    "hvio_msiq_getvalid failed, ret 0x%lx\n", ret);
749 		return (DDI_FAILURE);
750 	}
751 
752 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n",
753 	    *msiq_valid_state);
754 
755 	return (DDI_SUCCESS);
756 }
757 
758 /*ARGSUSED*/
759 int
760 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id,
761     pci_msiq_valid_state_t msiq_valid_state)
762 {
763 	uint64_t	ret;
764 
765 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x "
766 	    "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state);
767 
768 	if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip),
769 	    msiq_id, msiq_valid_state)) != H_EOK) {
770 		DBG(DBG_LIB_MSIQ, dip,
771 		    "hvio_msiq_setvalid failed, ret 0x%lx\n", ret);
772 		return (DDI_FAILURE);
773 	}
774 
775 	return (DDI_SUCCESS);
776 }
777 
778 /*ARGSUSED*/
779 int
780 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id,
781     pci_msiq_state_t *msiq_state)
782 {
783 	uint64_t	ret;
784 
785 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n",
786 	    dip, msiq_id);
787 
788 	if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip),
789 	    msiq_id, msiq_state)) != H_EOK) {
790 		DBG(DBG_LIB_MSIQ, dip,
791 		    "hvio_msiq_getstate failed, ret 0x%lx\n", ret);
792 		return (DDI_FAILURE);
793 	}
794 
795 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n",
796 	    *msiq_state);
797 
798 	return (DDI_SUCCESS);
799 }
800 
801 /*ARGSUSED*/
802 int
803 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id,
804     pci_msiq_state_t msiq_state)
805 {
806 	uint64_t	ret;
807 
808 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x "
809 	    "msiq_state 0x%x\n", dip, msiq_id, msiq_state);
810 
811 	if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip),
812 	    msiq_id, msiq_state)) != H_EOK) {
813 		DBG(DBG_LIB_MSIQ, dip,
814 		    "hvio_msiq_setstate failed, ret 0x%lx\n", ret);
815 		return (DDI_FAILURE);
816 	}
817 
818 	return (DDI_SUCCESS);
819 }
820 
821 /*ARGSUSED*/
822 int
823 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id,
824     msiqhead_t *msiq_head)
825 {
826 	uint64_t	ret;
827 
828 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n",
829 	    dip, msiq_id);
830 
831 	if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip),
832 	    msiq_id, msiq_head)) != H_EOK) {
833 		DBG(DBG_LIB_MSIQ, dip,
834 		    "hvio_msiq_gethead failed, ret 0x%lx\n", ret);
835 		return (DDI_FAILURE);
836 	}
837 
838 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n",
839 	    *msiq_head);
840 
841 	return (DDI_SUCCESS);
842 }
843 
844 /*ARGSUSED*/
845 int
846 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id,
847     msiqhead_t msiq_head)
848 {
849 	uint64_t	ret;
850 
851 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x "
852 	    "msiq_head 0x%x\n", dip, msiq_id, msiq_head);
853 
854 	if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip),
855 	    msiq_id, msiq_head)) != H_EOK) {
856 		DBG(DBG_LIB_MSIQ, dip,
857 		    "hvio_msiq_sethead failed, ret 0x%lx\n", ret);
858 		return (DDI_FAILURE);
859 	}
860 
861 	return (DDI_SUCCESS);
862 }
863 
864 /*ARGSUSED*/
865 int
866 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id,
867     msiqtail_t *msiq_tail)
868 {
869 	uint64_t	ret;
870 
871 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n",
872 	    dip, msiq_id);
873 
874 	if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip),
875 	    msiq_id, msiq_tail)) != H_EOK) {
876 		DBG(DBG_LIB_MSIQ, dip,
877 		    "hvio_msiq_gettail failed, ret 0x%lx\n", ret);
878 		return (DDI_FAILURE);
879 	}
880 
881 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n",
882 	    *msiq_tail);
883 
884 	return (DDI_SUCCESS);
885 }
886 
887 /*ARGSUSED*/
888 void
889 px_lib_get_msiq_rec(dev_info_t *dip, px_msiq_t *msiq_p, msiq_rec_t *msiq_rec_p)
890 {
891 	eq_rec_t	*eq_rec_p = (eq_rec_t *)msiq_p->msiq_curr;
892 
893 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n",
894 	    dip, eq_rec_p);
895 
896 	if (!eq_rec_p->eq_rec_fmt_type) {
897 		/* Set msiq_rec_type to zero */
898 		msiq_rec_p->msiq_rec_type = 0;
899 
900 		return;
901 	}
902 
903 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, "
904 	    "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx "
905 	    "eq_rec_len 0x%llx eq_rec_addr0 0x%llx "
906 	    "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx "
907 	    "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid,
908 	    eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len,
909 	    eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1,
910 	    eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1);
911 
912 	/*
913 	 * Only upper 4 bits of eq_rec_fmt_type is used
914 	 * to identify the EQ record type.
915 	 */
916 	switch (eq_rec_p->eq_rec_fmt_type >> 3) {
917 	case EQ_REC_MSI32:
918 		msiq_rec_p->msiq_rec_type = MSI32_REC;
919 
920 		msiq_rec_p->msiq_rec_data.msi.msi_data =
921 		    eq_rec_p->eq_rec_data0;
922 		break;
923 	case EQ_REC_MSI64:
924 		msiq_rec_p->msiq_rec_type = MSI64_REC;
925 
926 		msiq_rec_p->msiq_rec_data.msi.msi_data =
927 		    eq_rec_p->eq_rec_data0;
928 		break;
929 	case EQ_REC_MSG:
930 		msiq_rec_p->msiq_rec_type = MSG_REC;
931 
932 		msiq_rec_p->msiq_rec_data.msg.msg_route =
933 		    eq_rec_p->eq_rec_fmt_type & 7;
934 		msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid;
935 		msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0;
936 		break;
937 	default:
938 		cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: "
939 		    "0x%x is an unknown EQ record type",
940 		    ddi_driver_name(dip), ddi_get_instance(dip),
941 		    (int)eq_rec_p->eq_rec_fmt_type);
942 		break;
943 	}
944 
945 	msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid;
946 	msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) |
947 	    (eq_rec_p->eq_rec_addr0 << 2));
948 
949 	/* Zero out eq_rec_fmt_type field */
950 	eq_rec_p->eq_rec_fmt_type = 0;
951 }
952 
953 /*
954  * MSI Functions:
955  */
956 /*ARGSUSED*/
957 int
958 px_lib_msi_init(dev_info_t *dip)
959 {
960 	px_t		*px_p = DIP_TO_STATE(dip);
961 	px_msi_state_t	*msi_state_p = &px_p->px_ib_p->ib_msi_state;
962 	uint64_t	ret;
963 
964 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip);
965 
966 	if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip),
967 	    msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) {
968 		DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n",
969 		    ret);
970 		return (DDI_FAILURE);
971 	}
972 
973 	return (DDI_SUCCESS);
974 }
975 
976 /*ARGSUSED*/
977 int
978 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num,
979     msiqid_t *msiq_id)
980 {
981 	uint64_t	ret;
982 
983 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n",
984 	    dip, msi_num);
985 
986 	if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip),
987 	    msi_num, msiq_id)) != H_EOK) {
988 		DBG(DBG_LIB_MSI, dip,
989 		    "hvio_msi_getmsiq failed, ret 0x%lx\n", ret);
990 		return (DDI_FAILURE);
991 	}
992 
993 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n",
994 	    *msiq_id);
995 
996 	return (DDI_SUCCESS);
997 }
998 
999 /*ARGSUSED*/
1000 int
1001 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num,
1002     msiqid_t msiq_id, msi_type_t msitype)
1003 {
1004 	uint64_t	ret;
1005 
1006 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x "
1007 	    "msq_id 0x%x\n", dip, msi_num, msiq_id);
1008 
1009 	if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip),
1010 	    msi_num, msiq_id)) != H_EOK) {
1011 		DBG(DBG_LIB_MSI, dip,
1012 		    "hvio_msi_setmsiq failed, ret 0x%lx\n", ret);
1013 		return (DDI_FAILURE);
1014 	}
1015 
1016 	return (DDI_SUCCESS);
1017 }
1018 
1019 /*ARGSUSED*/
1020 int
1021 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num,
1022     pci_msi_valid_state_t *msi_valid_state)
1023 {
1024 	uint64_t	ret;
1025 
1026 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n",
1027 	    dip, msi_num);
1028 
1029 	if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip),
1030 	    msi_num, msi_valid_state)) != H_EOK) {
1031 		DBG(DBG_LIB_MSI, dip,
1032 		    "hvio_msi_getvalid failed, ret 0x%lx\n", ret);
1033 		return (DDI_FAILURE);
1034 	}
1035 
1036 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n",
1037 	    *msi_valid_state);
1038 
1039 	return (DDI_SUCCESS);
1040 }
1041 
1042 /*ARGSUSED*/
1043 int
1044 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num,
1045     pci_msi_valid_state_t msi_valid_state)
1046 {
1047 	uint64_t	ret;
1048 
1049 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x "
1050 	    "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state);
1051 
1052 	if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip),
1053 	    msi_num, msi_valid_state)) != H_EOK) {
1054 		DBG(DBG_LIB_MSI, dip,
1055 		    "hvio_msi_setvalid failed, ret 0x%lx\n", ret);
1056 		return (DDI_FAILURE);
1057 	}
1058 
1059 	return (DDI_SUCCESS);
1060 }
1061 
1062 /*ARGSUSED*/
1063 int
1064 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num,
1065     pci_msi_state_t *msi_state)
1066 {
1067 	uint64_t	ret;
1068 
1069 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n",
1070 	    dip, msi_num);
1071 
1072 	if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip),
1073 	    msi_num, msi_state)) != H_EOK) {
1074 		DBG(DBG_LIB_MSI, dip,
1075 		    "hvio_msi_getstate failed, ret 0x%lx\n", ret);
1076 		return (DDI_FAILURE);
1077 	}
1078 
1079 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n",
1080 	    *msi_state);
1081 
1082 	return (DDI_SUCCESS);
1083 }
1084 
1085 /*ARGSUSED*/
1086 int
1087 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num,
1088     pci_msi_state_t msi_state)
1089 {
1090 	uint64_t	ret;
1091 
1092 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x "
1093 	    "msi_state 0x%x\n", dip, msi_num, msi_state);
1094 
1095 	if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip),
1096 	    msi_num, msi_state)) != H_EOK) {
1097 		DBG(DBG_LIB_MSI, dip,
1098 		    "hvio_msi_setstate failed, ret 0x%lx\n", ret);
1099 		return (DDI_FAILURE);
1100 	}
1101 
1102 	return (DDI_SUCCESS);
1103 }
1104 
1105 /*
1106  * MSG Functions:
1107  */
1108 /*ARGSUSED*/
1109 int
1110 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
1111     msiqid_t *msiq_id)
1112 {
1113 	uint64_t	ret;
1114 
1115 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n",
1116 	    dip, msg_type);
1117 
1118 	if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip),
1119 	    msg_type, msiq_id)) != H_EOK) {
1120 		DBG(DBG_LIB_MSG, dip,
1121 		    "hvio_msg_getmsiq failed, ret 0x%lx\n", ret);
1122 		return (DDI_FAILURE);
1123 	}
1124 
1125 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n",
1126 	    *msiq_id);
1127 
1128 	return (DDI_SUCCESS);
1129 }
1130 
1131 /*ARGSUSED*/
1132 int
1133 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
1134     msiqid_t msiq_id)
1135 {
1136 	uint64_t	ret;
1137 
1138 	DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x "
1139 	    "msiq_id 0x%x\n", dip, msg_type, msiq_id);
1140 
1141 	if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip),
1142 	    msg_type, msiq_id)) != H_EOK) {
1143 		DBG(DBG_LIB_MSG, dip,
1144 		    "hvio_msg_setmsiq failed, ret 0x%lx\n", ret);
1145 		return (DDI_FAILURE);
1146 	}
1147 
1148 	return (DDI_SUCCESS);
1149 }
1150 
1151 /*ARGSUSED*/
1152 int
1153 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
1154     pcie_msg_valid_state_t *msg_valid_state)
1155 {
1156 	uint64_t	ret;
1157 
1158 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n",
1159 	    dip, msg_type);
1160 
1161 	if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type,
1162 	    msg_valid_state)) != H_EOK) {
1163 		DBG(DBG_LIB_MSG, dip,
1164 		    "hvio_msg_getvalid failed, ret 0x%lx\n", ret);
1165 		return (DDI_FAILURE);
1166 	}
1167 
1168 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n",
1169 	    *msg_valid_state);
1170 
1171 	return (DDI_SUCCESS);
1172 }
1173 
1174 /*ARGSUSED*/
1175 int
1176 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
1177     pcie_msg_valid_state_t msg_valid_state)
1178 {
1179 	uint64_t	ret;
1180 
1181 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x "
1182 	    "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state);
1183 
1184 	if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type,
1185 	    msg_valid_state)) != H_EOK) {
1186 		DBG(DBG_LIB_MSG, dip,
1187 		    "hvio_msg_setvalid failed, ret 0x%lx\n", ret);
1188 		return (DDI_FAILURE);
1189 	}
1190 
1191 	return (DDI_SUCCESS);
1192 }
1193 
1194 /*
1195  * Suspend/Resume Functions:
1196  * Currently unsupported by hypervisor
1197  */
1198 int
1199 px_lib_suspend(dev_info_t *dip)
1200 {
1201 	px_t		*px_p = DIP_TO_STATE(dip);
1202 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1203 	devhandle_t	dev_hdl, xbus_dev_hdl;
1204 	uint64_t	ret;
1205 
1206 	DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip);
1207 
1208 	dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
1209 	xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
1210 
1211 	if ((ret = hvio_suspend(dev_hdl, pxu_p)) == H_EOK) {
1212 		px_p->px_cb_p->xbc_attachcnt--;
1213 		if (px_p->px_cb_p->xbc_attachcnt == 0)
1214 			if ((ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p))
1215 			    != H_EOK)
1216 				px_p->px_cb_p->xbc_attachcnt++;
1217 	}
1218 
1219 	return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS);
1220 }
1221 
1222 void
1223 px_lib_resume(dev_info_t *dip)
1224 {
1225 	px_t		*px_p = DIP_TO_STATE(dip);
1226 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1227 	devhandle_t	dev_hdl, xbus_dev_hdl;
1228 	devino_t	pec_ino = px_p->px_inos[PX_INTR_PEC];
1229 	devino_t	xbc_ino = px_p->px_inos[PX_INTR_XBC];
1230 
1231 	DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip);
1232 
1233 	dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
1234 	xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
1235 
1236 	px_p->px_cb_p->xbc_attachcnt++;
1237 	if (px_p->px_cb_p->xbc_attachcnt == 1)
1238 		hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p);
1239 	hvio_resume(dev_hdl, pec_ino, pxu_p);
1240 }
1241 
1242 /*
1243  * Misc Functions:
1244  * Currently unsupported by hypervisor
1245  */
1246 uint64_t
1247 px_lib_get_cb(dev_info_t *dip)
1248 {
1249 	px_t	*px_p = DIP_TO_STATE(dip);
1250 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
1251 
1252 	return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1));
1253 }
1254 
1255 void
1256 px_lib_set_cb(dev_info_t *dip, uint64_t val)
1257 {
1258 	px_t	*px_p = DIP_TO_STATE(dip);
1259 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
1260 
1261 	CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val);
1262 }
1263 
1264 /*ARGSUSED*/
1265 int
1266 px_lib_map_vconfig(dev_info_t *dip,
1267 	ddi_map_req_t *mp, pci_config_offset_t off,
1268 		pci_regspec_t *rp, caddr_t *addrp)
1269 {
1270 	/*
1271 	 * No special config space access services in this layer.
1272 	 */
1273 	return (DDI_FAILURE);
1274 }
1275 
1276 void
1277 px_lib_map_attr_check(ddi_map_req_t *mp)
1278 {
1279 	ddi_acc_hdl_t *hp = mp->map_handlep;
1280 
1281 	/* fire does not accept byte masks from PIO store merge */
1282 	if (hp->ah_acc.devacc_attr_dataorder == DDI_STORECACHING_OK_ACC)
1283 		hp->ah_acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1284 }
1285 
1286 void
1287 px_lib_clr_errs(px_t *px_p)
1288 {
1289 	px_pec_t	*pec_p = px_p->px_pec_p;
1290 	dev_info_t	*rpdip = px_p->px_dip;
1291 	px_cb_t		*cb_p = px_p->px_cb_p;
1292 	int		err = PX_OK, ret;
1293 	int		acctype = pec_p->pec_safeacc_type;
1294 	ddi_fm_error_t	derr;
1295 
1296 	/* Create the derr */
1297 	bzero(&derr, sizeof (ddi_fm_error_t));
1298 	derr.fme_version = DDI_FME_VERSION;
1299 	derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
1300 	derr.fme_flag = acctype;
1301 
1302 	if (acctype == DDI_FM_ERR_EXPECTED) {
1303 		derr.fme_status = DDI_FM_NONFATAL;
1304 		ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr);
1305 	}
1306 
1307 	mutex_enter(&cb_p->xbc_fm_mutex);
1308 
1309 	/* send ereport/handle/clear fire registers */
1310 	err = px_err_handle(px_p, &derr, PX_LIB_CALL, B_TRUE);
1311 
1312 	/* Check all child devices for errors */
1313 	ret = ndi_fm_handler_dispatch(rpdip, NULL, &derr);
1314 
1315 	mutex_exit(&cb_p->xbc_fm_mutex);
1316 
1317 	/*
1318 	 * PX_FATAL_HW indicates a condition recovered from Fatal-Reset,
1319 	 * therefore it does not cause panic.
1320 	 */
1321 	if ((err & (PX_FATAL_GOS | PX_FATAL_SW)) || (ret == DDI_FM_FATAL))
1322 		PX_FM_PANIC("Fatal System Port Error has occurred\n");
1323 }
1324 
1325 #ifdef  DEBUG
1326 int	px_peekfault_cnt = 0;
1327 int	px_pokefault_cnt = 0;
1328 #endif  /* DEBUG */
1329 
1330 /*ARGSUSED*/
1331 static int
1332 px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip,
1333     peekpoke_ctlops_t *in_args)
1334 {
1335 	px_t *px_p = DIP_TO_STATE(dip);
1336 	px_pec_t *pec_p = px_p->px_pec_p;
1337 	int err = DDI_SUCCESS;
1338 	on_trap_data_t otd;
1339 
1340 	mutex_enter(&pec_p->pec_pokefault_mutex);
1341 	pec_p->pec_ontrap_data = &otd;
1342 	pec_p->pec_safeacc_type = DDI_FM_ERR_POKE;
1343 
1344 	/* Set up protected environment. */
1345 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
1346 		uintptr_t tramp = otd.ot_trampoline;
1347 
1348 		otd.ot_trampoline = (uintptr_t)&poke_fault;
1349 		err = do_poke(in_args->size, (void *)in_args->dev_addr,
1350 		    (void *)in_args->host_addr);
1351 		otd.ot_trampoline = tramp;
1352 	} else
1353 		err = DDI_FAILURE;
1354 
1355 	px_lib_clr_errs(px_p);
1356 
1357 	if (otd.ot_trap & OT_DATA_ACCESS)
1358 		err = DDI_FAILURE;
1359 
1360 	/* Take down protected environment. */
1361 	no_trap();
1362 
1363 	pec_p->pec_ontrap_data = NULL;
1364 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1365 	mutex_exit(&pec_p->pec_pokefault_mutex);
1366 
1367 #ifdef  DEBUG
1368 	if (err == DDI_FAILURE)
1369 		px_pokefault_cnt++;
1370 #endif
1371 	return (err);
1372 }
1373 
1374 /*ARGSUSED*/
1375 static int
1376 px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip,
1377     peekpoke_ctlops_t *cautacc_ctlops_arg)
1378 {
1379 	size_t size = cautacc_ctlops_arg->size;
1380 	uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
1381 	uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
1382 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
1383 	size_t repcount = cautacc_ctlops_arg->repcount;
1384 	uint_t flags = cautacc_ctlops_arg->flags;
1385 
1386 	px_t *px_p = DIP_TO_STATE(dip);
1387 	px_pec_t *pec_p = px_p->px_pec_p;
1388 	int err = DDI_SUCCESS;
1389 
1390 	/*
1391 	 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
1392 	 * mutex.
1393 	 */
1394 	i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1395 
1396 	pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
1397 	pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
1398 	hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
1399 
1400 	if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1401 		for (; repcount; repcount--) {
1402 			switch (size) {
1403 
1404 			case sizeof (uint8_t):
1405 				i_ddi_put8(hp, (uint8_t *)dev_addr,
1406 				    *(uint8_t *)host_addr);
1407 				break;
1408 
1409 			case sizeof (uint16_t):
1410 				i_ddi_put16(hp, (uint16_t *)dev_addr,
1411 				    *(uint16_t *)host_addr);
1412 				break;
1413 
1414 			case sizeof (uint32_t):
1415 				i_ddi_put32(hp, (uint32_t *)dev_addr,
1416 				    *(uint32_t *)host_addr);
1417 				break;
1418 
1419 			case sizeof (uint64_t):
1420 				i_ddi_put64(hp, (uint64_t *)dev_addr,
1421 				    *(uint64_t *)host_addr);
1422 				break;
1423 			}
1424 
1425 			host_addr += size;
1426 
1427 			if (flags == DDI_DEV_AUTOINCR)
1428 				dev_addr += size;
1429 
1430 			px_lib_clr_errs(px_p);
1431 
1432 			if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) {
1433 				err = DDI_FAILURE;
1434 #ifdef  DEBUG
1435 				px_pokefault_cnt++;
1436 #endif
1437 				break;
1438 			}
1439 		}
1440 	}
1441 
1442 	i_ddi_notrap((ddi_acc_handle_t)hp);
1443 	pec_p->pec_ontrap_data = NULL;
1444 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1445 	i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1446 	hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
1447 
1448 	return (err);
1449 }
1450 
1451 
1452 int
1453 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip,
1454     peekpoke_ctlops_t *in_args)
1455 {
1456 	return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) :
1457 	    px_lib_do_poke(dip, rdip, in_args));
1458 }
1459 
1460 
1461 /*ARGSUSED*/
1462 static int
1463 px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args)
1464 {
1465 	px_t *px_p = DIP_TO_STATE(dip);
1466 	px_pec_t *pec_p = px_p->px_pec_p;
1467 	int err = DDI_SUCCESS;
1468 	on_trap_data_t otd;
1469 
1470 	mutex_enter(&pec_p->pec_pokefault_mutex);
1471 	pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK;
1472 
1473 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
1474 		uintptr_t tramp = otd.ot_trampoline;
1475 
1476 		otd.ot_trampoline = (uintptr_t)&peek_fault;
1477 		err = do_peek(in_args->size, (void *)in_args->dev_addr,
1478 		    (void *)in_args->host_addr);
1479 		otd.ot_trampoline = tramp;
1480 	} else
1481 		err = DDI_FAILURE;
1482 
1483 	no_trap();
1484 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1485 	mutex_exit(&pec_p->pec_pokefault_mutex);
1486 
1487 #ifdef  DEBUG
1488 	if (err == DDI_FAILURE)
1489 		px_peekfault_cnt++;
1490 #endif
1491 	return (err);
1492 }
1493 
1494 
1495 static int
1496 px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg)
1497 {
1498 	size_t size = cautacc_ctlops_arg->size;
1499 	uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
1500 	uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
1501 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
1502 	size_t repcount = cautacc_ctlops_arg->repcount;
1503 	uint_t flags = cautacc_ctlops_arg->flags;
1504 
1505 	px_t *px_p = DIP_TO_STATE(dip);
1506 	px_pec_t *pec_p = px_p->px_pec_p;
1507 	int err = DDI_SUCCESS;
1508 
1509 	/*
1510 	 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
1511 	 * mutex.
1512 	 */
1513 	i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1514 
1515 	pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
1516 	pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
1517 	hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
1518 
1519 	if (repcount == 1) {
1520 		if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1521 			i_ddi_caut_get(size, (void *)dev_addr,
1522 			    (void *)host_addr);
1523 		} else {
1524 			int i;
1525 			uint8_t *ff_addr = (uint8_t *)host_addr;
1526 			for (i = 0; i < size; i++)
1527 				*ff_addr++ = 0xff;
1528 
1529 			err = DDI_FAILURE;
1530 #ifdef  DEBUG
1531 			px_peekfault_cnt++;
1532 #endif
1533 		}
1534 	} else {
1535 		if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1536 			for (; repcount; repcount--) {
1537 				i_ddi_caut_get(size, (void *)dev_addr,
1538 				    (void *)host_addr);
1539 
1540 				host_addr += size;
1541 
1542 				if (flags == DDI_DEV_AUTOINCR)
1543 					dev_addr += size;
1544 			}
1545 		} else {
1546 			err = DDI_FAILURE;
1547 #ifdef  DEBUG
1548 			px_peekfault_cnt++;
1549 #endif
1550 		}
1551 	}
1552 
1553 	i_ddi_notrap((ddi_acc_handle_t)hp);
1554 	pec_p->pec_ontrap_data = NULL;
1555 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1556 	i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1557 	hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
1558 
1559 	return (err);
1560 }
1561 
1562 /*ARGSUSED*/
1563 int
1564 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip,
1565     peekpoke_ctlops_t *in_args, void *result)
1566 {
1567 	result = (void *)in_args->host_addr;
1568 	return (in_args->handle ? px_lib_do_caut_get(dip, in_args) :
1569 	    px_lib_do_peek(dip, in_args));
1570 }
1571 
1572 /*
1573  * implements PPM interface
1574  */
1575 int
1576 px_lib_pmctl(int cmd, px_t *px_p)
1577 {
1578 	ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ);
1579 	switch (cmd) {
1580 	case PPMREQ_PRE_PWR_OFF:
1581 		/*
1582 		 * Currently there is no device power management for
1583 		 * the root complex (fire). When there is we need to make
1584 		 * sure that it is at full power before trying to send the
1585 		 * PME_Turn_Off message.
1586 		 */
1587 		DBG(DBG_PWR, px_p->px_dip,
1588 		    "ioctl: request to send PME_Turn_Off\n");
1589 		return (px_goto_l23ready(px_p));
1590 
1591 	case PPMREQ_PRE_PWR_ON:
1592 		DBG(DBG_PWR, px_p->px_dip, "ioctl: PRE_PWR_ON request\n");
1593 		return (px_pre_pwron_check(px_p));
1594 
1595 	case PPMREQ_POST_PWR_ON:
1596 		DBG(DBG_PWR, px_p->px_dip, "ioctl: POST_PWR_ON request\n");
1597 		return (px_goto_l0(px_p));
1598 
1599 	default:
1600 		return (DDI_FAILURE);
1601 	}
1602 }
1603 
1604 /*
1605  * sends PME_Turn_Off message to put the link in L2/L3 ready state.
1606  * called by px_ioctl.
1607  * returns DDI_SUCCESS or DDI_FAILURE
1608  * 1. Wait for link to be in L1 state (link status reg)
1609  * 2. write to PME_Turn_off reg to boradcast
1610  * 3. set timeout
1611  * 4. If timeout, return failure.
1612  * 5. If PM_TO_Ack, wait till link is in L2/L3 ready
1613  */
1614 static int
1615 px_goto_l23ready(px_t *px_p)
1616 {
1617 	pcie_pwr_t	*pwr_p;
1618 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1619 	caddr_t	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
1620 	int		ret = DDI_SUCCESS;
1621 	clock_t		end, timeleft;
1622 	int		mutex_held = 1;
1623 
1624 	/* If no PM info, return failure */
1625 	if (!PCIE_PMINFO(px_p->px_dip) ||
1626 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1627 		return (DDI_FAILURE);
1628 
1629 	mutex_enter(&pwr_p->pwr_lock);
1630 	mutex_enter(&px_p->px_l23ready_lock);
1631 	/* Clear the PME_To_ACK receieved flag */
1632 	px_p->px_pm_flags &= ~PX_PMETOACK_RECVD;
1633 	/*
1634 	 * When P25 is the downstream device, after receiving
1635 	 * PME_To_ACK, fire will go to Detect state, which causes
1636 	 * the link down event. Inform FMA that this is expected.
1637 	 * In case of all other cards complaint with the pci express
1638 	 * spec, this will happen when the power is re-applied. FMA
1639 	 * code will clear this flag after one instance of LDN. Since
1640 	 * there will not be a LDN event for the spec compliant cards,
1641 	 * we need to clear the flag after receiving PME_To_ACK.
1642 	 */
1643 	px_p->px_pm_flags |= PX_LDN_EXPECTED;
1644 	if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) {
1645 		ret = DDI_FAILURE;
1646 		goto l23ready_done;
1647 	}
1648 	px_p->px_pm_flags |= PX_PME_TURNOFF_PENDING;
1649 
1650 	end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout);
1651 	while (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1652 		timeleft = cv_timedwait(&px_p->px_l23ready_cv,
1653 		    &px_p->px_l23ready_lock, end);
1654 		/*
1655 		 * if cv_timedwait returns -1, it is either
1656 		 * 1) timed out or
1657 		 * 2) there was a pre-mature wakeup but by the time
1658 		 * cv_timedwait is called again end < lbolt i.e.
1659 		 * end is in the past.
1660 		 * 3) By the time we make first cv_timedwait call,
1661 		 * end < lbolt is true.
1662 		 */
1663 		if (timeleft == -1)
1664 			break;
1665 	}
1666 	if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1667 		/*
1668 		 * Either timedout or interrupt didn't get a
1669 		 * chance to grab the mutex and set the flag.
1670 		 * release the mutex and delay for sometime.
1671 		 * This will 1) give a chance for interrupt to
1672 		 * set the flag 2) creates a delay between two
1673 		 * consequetive requests.
1674 		 */
1675 		mutex_exit(&px_p->px_l23ready_lock);
1676 		delay(drv_usectohz(50 * PX_MSEC_TO_USEC));
1677 		mutex_held = 0;
1678 		if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1679 			ret = DDI_FAILURE;
1680 			DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting"
1681 			    " for PME_TO_ACK\n");
1682 		}
1683 	}
1684 	px_p->px_pm_flags &=
1685 	    ~(PX_PME_TURNOFF_PENDING | PX_PMETOACK_RECVD | PX_LDN_EXPECTED);
1686 
1687 l23ready_done:
1688 	if (mutex_held)
1689 		mutex_exit(&px_p->px_l23ready_lock);
1690 	/*
1691 	 * Wait till link is in L1 idle, if sending PME_Turn_Off
1692 	 * was succesful.
1693 	 */
1694 	if (ret == DDI_SUCCESS) {
1695 		if (px_link_wait4l1idle(csr_base) != DDI_SUCCESS) {
1696 			DBG(DBG_PWR, px_p->px_dip, " Link is not at L1"
1697 			    " even though we received PME_To_ACK.\n");
1698 			/*
1699 			 * Workaround for hardware bug with P25.
1700 			 * Due to a hardware bug with P25, link state
1701 			 * will be Detect state rather than L1 after
1702 			 * link is transitioned to L23Ready state. Since
1703 			 * we don't know whether link is L23ready state
1704 			 * without Fire's state being L1_idle, we delay
1705 			 * here just to make sure that we wait till link
1706 			 * is transitioned to L23Ready state.
1707 			 */
1708 			delay(drv_usectohz(100 * PX_MSEC_TO_USEC));
1709 		}
1710 		pwr_p->pwr_link_lvl = PM_LEVEL_L3;
1711 
1712 	}
1713 	mutex_exit(&pwr_p->pwr_lock);
1714 	return (ret);
1715 }
1716 
1717 /*
1718  * Message interrupt handler intended to be shared for both
1719  * PME and PME_TO_ACK msg handling, currently only handles
1720  * PME_To_ACK message.
1721  */
1722 uint_t
1723 px_pmeq_intr(caddr_t arg)
1724 {
1725 	px_t	*px_p = (px_t *)arg;
1726 
1727 	DBG(DBG_PWR, px_p->px_dip, " PME_To_ACK received \n");
1728 	mutex_enter(&px_p->px_l23ready_lock);
1729 	cv_broadcast(&px_p->px_l23ready_cv);
1730 	if (px_p->px_pm_flags & PX_PME_TURNOFF_PENDING) {
1731 		px_p->px_pm_flags |= PX_PMETOACK_RECVD;
1732 	} else {
1733 		/*
1734 		 * This maybe the second ack received. If so then,
1735 		 * we should be receiving it during wait4L1 stage.
1736 		 */
1737 		px_p->px_pmetoack_ignored++;
1738 	}
1739 	mutex_exit(&px_p->px_l23ready_lock);
1740 	return (DDI_INTR_CLAIMED);
1741 }
1742 
1743 static int
1744 px_pre_pwron_check(px_t *px_p)
1745 {
1746 	pcie_pwr_t	*pwr_p;
1747 
1748 	/* If no PM info, return failure */
1749 	if (!PCIE_PMINFO(px_p->px_dip) ||
1750 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1751 		return (DDI_FAILURE);
1752 
1753 	/*
1754 	 * For the spec compliant downstream cards link down
1755 	 * is expected when the device is powered on.
1756 	 */
1757 	px_p->px_pm_flags |= PX_LDN_EXPECTED;
1758 	return (pwr_p->pwr_link_lvl == PM_LEVEL_L3 ? DDI_SUCCESS : DDI_FAILURE);
1759 }
1760 
1761 static int
1762 px_goto_l0(px_t *px_p)
1763 {
1764 	pcie_pwr_t	*pwr_p;
1765 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1766 	caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
1767 	int		ret = DDI_SUCCESS;
1768 	uint64_t	time_spent = 0;
1769 
1770 	/* If no PM info, return failure */
1771 	if (!PCIE_PMINFO(px_p->px_dip) ||
1772 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1773 		return (DDI_FAILURE);
1774 
1775 	mutex_enter(&pwr_p->pwr_lock);
1776 	/*
1777 	 * The following link retrain activity will cause LDN and LUP event.
1778 	 * Receiving LDN prior to receiving LUP is expected, not an error in
1779 	 * this case.  Receiving LUP indicates link is fully up to support
1780 	 * powering up down stream device, and of course any further LDN and
1781 	 * LUP outside this context will be error.
1782 	 */
1783 	px_p->px_lup_pending = 1;
1784 	if (px_link_retrain(csr_base) != DDI_SUCCESS) {
1785 		ret = DDI_FAILURE;
1786 		goto l0_done;
1787 	}
1788 
1789 	/* LUP event takes the order of 15ms amount of time to occur */
1790 	for (; px_p->px_lup_pending && (time_spent < px_lup_poll_to);
1791 	    time_spent += px_lup_poll_interval)
1792 		drv_usecwait(px_lup_poll_interval);
1793 	if (px_p->px_lup_pending)
1794 		ret = DDI_FAILURE;
1795 l0_done:
1796 	px_enable_detect_quiet(csr_base);
1797 	if (ret == DDI_SUCCESS)
1798 		pwr_p->pwr_link_lvl = PM_LEVEL_L0;
1799 	mutex_exit(&pwr_p->pwr_lock);
1800 	return (ret);
1801 }
1802 
1803 /*
1804  * Extract the drivers binding name to identify which chip we're binding to.
1805  * Whenever a new bus bridge is created, the driver alias entry should be
1806  * added here to identify the device if needed.  If a device isn't added,
1807  * the identity defaults to PX_CHIP_UNIDENTIFIED.
1808  */
1809 static uint32_t
1810 px_identity_chip(px_t *px_p)
1811 {
1812 	dev_info_t	*dip = px_p->px_dip;
1813 	char		*name = ddi_binding_name(dip);
1814 	uint32_t	revision = 0;
1815 
1816 	revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1817 	    "module-revision#", 0);
1818 
1819 	/* Check for Fire driver binding name */
1820 	if ((strcmp(name, "pci108e,80f0") == 0) ||
1821 	    (strcmp(name, "pciex108e,80f0") == 0)) {
1822 		DBG(DBG_ATTACH, dip, "px_identity_chip: %s%d: "
1823 		    "name %s module-revision %d\n", ddi_driver_name(dip),
1824 		    ddi_get_instance(dip), name, revision);
1825 
1826 		return (PX_CHIP_ID(PX_CHIP_FIRE, revision, 0x00));
1827 	}
1828 
1829 	DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n",
1830 	    ddi_driver_name(dip), ddi_get_instance(dip), name, revision);
1831 
1832 	return (PX_CHIP_UNIDENTIFIED);
1833 }
1834 
1835 int
1836 px_err_add_intr(px_fault_t *px_fault_p)
1837 {
1838 	dev_info_t	*dip = px_fault_p->px_fh_dip;
1839 	px_t		*px_p = DIP_TO_STATE(dip);
1840 
1841 	VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL,
1842 		px_fault_p->px_err_func, (caddr_t)px_fault_p, NULL) == 0);
1843 
1844 	px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino);
1845 
1846 	return (DDI_SUCCESS);
1847 }
1848 
1849 void
1850 px_err_rem_intr(px_fault_t *px_fault_p)
1851 {
1852 	dev_info_t	*dip = px_fault_p->px_fh_dip;
1853 	px_t		*px_p = DIP_TO_STATE(dip);
1854 
1855 	px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino,
1856 		IB_INTR_WAIT);
1857 
1858 	rem_ivintr(px_fault_p->px_fh_sysino, NULL);
1859 }
1860 
1861 #ifdef FMA
1862 void
1863 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status)
1864 {
1865 	/* populate the rc_status by reading the registers - TBD */
1866 }
1867 #endif /* FMA */
1868 
1869 /*
1870  * Unprotected raw reads/writes of fabric device's config space.
1871  * Only used for temporary PCI-E Fabric Error Handling.
1872  */
1873 uint32_t
1874 px_fab_get(px_t *px_p, pcie_req_id_t bdf, uint16_t offset) {
1875 	px_ranges_t	*rp = px_p->px_ranges_p;
1876 	uint64_t	range_prop, base_addr;
1877 	int		bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG);
1878 	uint32_t	val;
1879 
1880 	/* Get Fire's Physical Base Address */
1881 	range_prop = (((uint64_t)(rp[bank].parent_high & 0x7ff)) << 32) |
1882 	    rp[bank].parent_low;
1883 
1884 	/* Get config space first. */
1885 	base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset);
1886 
1887 	val = ldphysio(base_addr);
1888 
1889 	return (LE_32(val));
1890 }
1891 
1892 void
1893 px_fab_set(px_t *px_p, pcie_req_id_t bdf, uint16_t offset,
1894     uint32_t val) {
1895 	px_ranges_t	*rp = px_p->px_ranges_p;
1896 	uint64_t	range_prop, base_addr;
1897 	int		bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG);
1898 
1899 	/* Get Fire's Physical Base Address */
1900 	range_prop = (((uint64_t)(rp[bank].parent_high & 0x7ff)) << 32) |
1901 	    rp[bank].parent_low;
1902 
1903 	/* Get config space first. */
1904 	base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset);
1905 
1906 	stphysio(base_addr, LE_32(val));
1907 }
1908 
1909 /*
1910  * cpr callback
1911  *
1912  * disable fabric error msg interrupt prior to suspending
1913  * all device drivers; re-enable fabric error msg interrupt
1914  * after all devices are resumed.
1915  */
1916 static boolean_t
1917 px_cpr_callb(void *arg, int code)
1918 {
1919 	px_t		*px_p = (px_t *)arg;
1920 	px_ib_t		*ib_p = px_p->px_ib_p;
1921 	px_pec_t	*pec_p = px_p->px_pec_p;
1922 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1923 	caddr_t		csr_base;
1924 	devino_t	ce_ino, nf_ino, f_ino;
1925 	px_ib_ino_info_t	*ce_ino_p, *nf_ino_p, *f_ino_p;
1926 	uint64_t	imu_log_enable, imu_intr_enable;
1927 	uint64_t	imu_log_mask, imu_intr_mask;
1928 
1929 	ce_ino = px_msiqid_to_devino(px_p, pec_p->pec_corr_msg_msiq_id);
1930 	nf_ino = px_msiqid_to_devino(px_p, pec_p->pec_non_fatal_msg_msiq_id);
1931 	f_ino = px_msiqid_to_devino(px_p, pec_p->pec_fatal_msg_msiq_id);
1932 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
1933 
1934 	imu_log_enable = CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE);
1935 	imu_intr_enable = CSR_XR(csr_base, IMU_INTERRUPT_ENABLE);
1936 
1937 	imu_log_mask = BITMASK(IMU_ERROR_LOG_ENABLE_FATAL_MES_NOT_EN_LOG_EN) |
1938 	    BITMASK(IMU_ERROR_LOG_ENABLE_NONFATAL_MES_NOT_EN_LOG_EN) |
1939 	    BITMASK(IMU_ERROR_LOG_ENABLE_COR_MES_NOT_EN_LOG_EN);
1940 
1941 	imu_intr_mask =
1942 	    BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_S_INT_EN) |
1943 	    BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_S_INT_EN) |
1944 	    BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_S_INT_EN) |
1945 	    BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_P_INT_EN) |
1946 	    BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_P_INT_EN) |
1947 	    BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_P_INT_EN);
1948 
1949 	switch (code) {
1950 	case CB_CODE_CPR_CHKPT:
1951 		/* disable imu rbne on corr/nonfatal/fatal errors */
1952 		CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE,
1953 		    imu_log_enable & (~imu_log_mask));
1954 
1955 		CSR_XS(csr_base, IMU_INTERRUPT_ENABLE,
1956 		    imu_intr_enable & (~imu_intr_mask));
1957 
1958 		/* disable CORR intr mapping */
1959 		px_ib_intr_disable(ib_p, ce_ino, IB_INTR_NOWAIT);
1960 
1961 		/* disable NON FATAL intr mapping */
1962 		px_ib_intr_disable(ib_p, nf_ino, IB_INTR_NOWAIT);
1963 
1964 		/* disable FATAL intr mapping */
1965 		px_ib_intr_disable(ib_p, f_ino, IB_INTR_NOWAIT);
1966 
1967 		break;
1968 
1969 	case CB_CODE_CPR_RESUME:
1970 		mutex_enter(&ib_p->ib_ino_lst_mutex);
1971 
1972 		ce_ino_p = px_ib_locate_ino(ib_p, ce_ino);
1973 		nf_ino_p = px_ib_locate_ino(ib_p, nf_ino);
1974 		f_ino_p = px_ib_locate_ino(ib_p, f_ino);
1975 
1976 		/* enable CORR intr mapping */
1977 		if (ce_ino_p)
1978 			px_ib_intr_enable(px_p, ce_ino_p->ino_cpuid, ce_ino);
1979 		else
1980 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
1981 			    "reenable PCIe Correctable msg intr.\n");
1982 
1983 		/* enable NON FATAL intr mapping */
1984 		if (nf_ino_p)
1985 			px_ib_intr_enable(px_p, nf_ino_p->ino_cpuid, nf_ino);
1986 		else
1987 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
1988 			    "reenable PCIe Non Fatal msg intr.\n");
1989 
1990 		/* enable FATAL intr mapping */
1991 		if (f_ino_p)
1992 			px_ib_intr_enable(px_p, f_ino_p->ino_cpuid, f_ino);
1993 		else
1994 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
1995 			    "reenable PCIe Fatal msg intr.\n");
1996 
1997 		mutex_exit(&ib_p->ib_ino_lst_mutex);
1998 
1999 		/* enable corr/nonfatal/fatal not enable error */
2000 		CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, (imu_log_enable |
2001 		    (imu_log_mask & px_imu_log_mask)));
2002 		CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, (imu_intr_enable |
2003 		    (imu_intr_mask & px_imu_intr_mask)));
2004 
2005 		break;
2006 	}
2007 
2008 	return (B_TRUE);
2009 }
2010 
2011 /*
2012  * add cpr callback
2013  */
2014 void
2015 px_cpr_add_callb(px_t *px_p)
2016 {
2017 	px_p->px_cprcb_id = callb_add(px_cpr_callb, (void *)px_p,
2018 	CB_CL_CPR_POST_USER, "px_cpr");
2019 }
2020 
2021 /*
2022  * remove cpr callback
2023  */
2024 void
2025 px_cpr_rem_callb(px_t *px_p)
2026 {
2027 	(void) callb_delete(px_p->px_cprcb_id);
2028 }
2029 
2030 /*ARGSUSED*/
2031 int
2032 px_lib_hotplug_init(dev_info_t *dip, void *arg)
2033 {
2034 	return (DDI_ENOTSUP);
2035 }
2036 
2037 /*ARGSUSED*/
2038 void
2039 px_lib_hotplug_uninit(dev_info_t *dip)
2040 {
2041 }
2042