xref: /titanic_44/usr/src/uts/sun4u/io/px/px_lib4u.c (revision 72ca8cc954e6ca79da549736925d4569dc23b239)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/kmem.h>
28 #include <sys/conf.h>
29 #include <sys/ddi.h>
30 #include <sys/sunddi.h>
31 #include <sys/sunndi.h>
32 #include <sys/fm/protocol.h>
33 #include <sys/fm/util.h>
34 #include <sys/modctl.h>
35 #include <sys/disp.h>
36 #include <sys/stat.h>
37 #include <sys/ddi_impldefs.h>
38 #include <sys/vmem.h>
39 #include <sys/iommutsb.h>
40 #include <sys/cpuvar.h>
41 #include <sys/ivintr.h>
42 #include <sys/byteorder.h>
43 #include <sys/hotplug/pci/pciehpc.h>
44 #include <sys/spl.h>
45 #include <px_obj.h>
46 #include <pcie_pwr.h>
47 #include "px_tools_var.h"
48 #include <px_regs.h>
49 #include <px_csr.h>
50 #include <sys/machsystm.h>
51 #include "px_lib4u.h"
52 #include "px_err.h"
53 #include "oberon_regs.h"
54 
55 #pragma weak jbus_stst_order
56 
57 extern void jbus_stst_order();
58 
59 ulong_t px_mmu_dvma_end = 0xfffffffful;
60 uint_t px_ranges_phi_mask = 0xfffffffful;
61 uint64_t *px_oberon_ubc_scratch_regs;
62 uint64_t px_paddr_mask;
63 
64 static int px_goto_l23ready(px_t *px_p);
65 static int px_goto_l0(px_t *px_p);
66 static int px_pre_pwron_check(px_t *px_p);
67 static uint32_t px_identity_init(px_t *px_p);
68 static boolean_t px_cpr_callb(void *arg, int code);
69 static uint_t px_cb_intr(caddr_t arg);
70 
71 /*
72  * ACKNAK Latency Threshold Table.
73  * See Fire PRM 2.0 section 1.2.12.2, table 1-17.
74  */
75 int px_acknak_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = {
76 	{0xED,   0x49,  0x43,  0x30},
77 	{0x1A0,  0x76,  0x6B,  0x48},
78 	{0x22F,  0x9A,  0x56,  0x56},
79 	{0x42F,  0x11A, 0x96,  0x96},
80 	{0x82F,  0x21A, 0x116, 0x116},
81 	{0x102F, 0x41A, 0x216, 0x216}
82 };
83 
84 /*
85  * TxLink Replay Timer Latency Table
86  * See Fire PRM 2.0 sections 1.2.12.3, table 1-18.
87  */
88 int px_replay_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = {
89 	{0x379,  0x112, 0xFC,  0xB4},
90 	{0x618,  0x1BA, 0x192, 0x10E},
91 	{0x831,  0x242, 0x143, 0x143},
92 	{0xFB1,  0x422, 0x233, 0x233},
93 	{0x1EB0, 0x7E1, 0x412, 0x412},
94 	{0x3CB0, 0xF61, 0x7D2, 0x7D2}
95 };
96 /*
97  * px_lib_map_registers
98  *
99  * This function is called from the attach routine to map the registers
100  * accessed by this driver.
101  *
102  * used by: px_attach()
103  *
104  * return value: DDI_FAILURE on failure
105  */
106 int
107 px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip)
108 {
109 	ddi_device_acc_attr_t	attr;
110 	px_reg_bank_t		reg_bank = PX_REG_CSR;
111 
112 	DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n",
113 	    pxu_p, dip);
114 
115 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
116 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
117 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
118 
119 	/*
120 	 * PCI CSR Base
121 	 */
122 	if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
123 	    0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
124 		goto fail;
125 	}
126 
127 	reg_bank++;
128 
129 	/*
130 	 * XBUS CSR Base
131 	 */
132 	if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
133 	    0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
134 		goto fail;
135 	}
136 
137 	pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS;
138 
139 done:
140 	for (; reg_bank >= PX_REG_CSR; reg_bank--) {
141 		DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n",
142 		    reg_bank, pxu_p->px_address[reg_bank]);
143 	}
144 
145 	return (DDI_SUCCESS);
146 
147 fail:
148 	cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n",
149 	    ddi_driver_name(dip), ddi_get_instance(dip), reg_bank);
150 
151 	for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) {
152 		pxu_p->px_address[reg_bank] = NULL;
153 		ddi_regs_map_free(&pxu_p->px_ac[reg_bank]);
154 	}
155 
156 	return (DDI_FAILURE);
157 }
158 
159 /*
160  * px_lib_unmap_regs:
161  *
162  * This routine unmaps the registers mapped by map_px_registers.
163  *
164  * used by: px_detach(), and error conditions in px_attach()
165  *
166  * return value: none
167  */
168 void
169 px_lib_unmap_regs(pxu_t *pxu_p)
170 {
171 	int i;
172 
173 	for (i = 0; i < PX_REG_MAX; i++) {
174 		if (pxu_p->px_ac[i])
175 			ddi_regs_map_free(&pxu_p->px_ac[i]);
176 	}
177 }
178 
179 int
180 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl)
181 {
182 
183 	caddr_t			xbc_csr_base, csr_base;
184 	px_dvma_range_prop_t	px_dvma_range;
185 	pxu_t			*pxu_p;
186 	uint8_t			chip_mask;
187 	px_t			*px_p = DIP_TO_STATE(dip);
188 	px_chip_type_t		chip_type = px_identity_init(px_p);
189 
190 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p", dip);
191 
192 	if (chip_type == PX_CHIP_UNIDENTIFIED) {
193 		cmn_err(CE_WARN, "%s%d: Unrecognized Hardware Version\n",
194 		    NAMEINST(dip));
195 		return (DDI_FAILURE);
196 	}
197 
198 	chip_mask = BITMASK(chip_type);
199 	px_paddr_mask = (chip_type == PX_CHIP_FIRE) ? MMU_FIRE_PADDR_MASK :
200 	    MMU_OBERON_PADDR_MASK;
201 
202 	/*
203 	 * Allocate platform specific structure and link it to
204 	 * the px state structure.
205 	 */
206 	pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP);
207 	pxu_p->chip_type = chip_type;
208 	pxu_p->portid  = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
209 	    "portid", -1);
210 
211 	/* Map in the registers */
212 	if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) {
213 		kmem_free(pxu_p, sizeof (pxu_t));
214 
215 		return (DDI_FAILURE);
216 	}
217 
218 	xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC];
219 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
220 
221 	pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid);
222 	pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie);
223 	pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie);
224 
225 	pxu_p->tsb_paddr = va_to_pa(pxu_p->tsb_vaddr);
226 
227 	/*
228 	 * Create "virtual-dma" property to support child devices
229 	 * needing to know DVMA range.
230 	 */
231 	px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1
232 	    - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT);
233 	px_dvma_range.dvma_len = (uint32_t)
234 	    px_mmu_dvma_end - px_dvma_range.dvma_base + 1;
235 
236 	(void) ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
237 	    "virtual-dma", (int *)&px_dvma_range,
238 	    sizeof (px_dvma_range_prop_t) / sizeof (int));
239 	/*
240 	 * Initilize all fire hardware specific blocks.
241 	 */
242 	hvio_cb_init(xbc_csr_base, pxu_p);
243 	hvio_ib_init(csr_base, pxu_p);
244 	hvio_pec_init(csr_base, pxu_p);
245 	hvio_mmu_init(csr_base, pxu_p);
246 
247 	px_p->px_plat_p = (void *)pxu_p;
248 
249 	/*
250 	 * Initialize all the interrupt handlers
251 	 */
252 	switch (PX_CHIP_TYPE(pxu_p)) {
253 	case PX_CHIP_OBERON:
254 		/*
255 		 * Oberon hotplug uses SPARE3 field in ILU Error Log Enable
256 		 * register to indicate the status of leaf reset,
257 		 * we need to preserve the value of this bit, and keep it in
258 		 * px_ilu_log_mask to reflect the state of the bit
259 		 */
260 		if (CSR_BR(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3))
261 			px_ilu_log_mask |= (1ull <<
262 			    ILU_ERROR_LOG_ENABLE_SPARE3);
263 		else
264 			px_ilu_log_mask &= ~(1ull <<
265 			    ILU_ERROR_LOG_ENABLE_SPARE3);
266 
267 		px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_ENABLE);
268 		break;
269 
270 	case PX_CHIP_FIRE:
271 		px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_ENABLE);
272 		break;
273 
274 	default:
275 		cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n",
276 		    ddi_driver_name(dip), ddi_get_instance(dip));
277 		return (DDI_FAILURE);
278 	}
279 
280 	/* Initilize device handle */
281 	*dev_hdl = (devhandle_t)csr_base;
282 
283 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl);
284 
285 	return (DDI_SUCCESS);
286 }
287 
288 int
289 px_lib_dev_fini(dev_info_t *dip)
290 {
291 	caddr_t			csr_base;
292 	uint8_t			chip_mask;
293 	px_t			*px_p = DIP_TO_STATE(dip);
294 	pxu_t			*pxu_p = (pxu_t *)px_p->px_plat_p;
295 
296 	DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip);
297 
298 	/*
299 	 * Deinitialize all the interrupt handlers
300 	 */
301 	switch (PX_CHIP_TYPE(pxu_p)) {
302 	case PX_CHIP_OBERON:
303 	case PX_CHIP_FIRE:
304 		chip_mask = BITMASK(PX_CHIP_TYPE(pxu_p));
305 		csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
306 		px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_DISABLE);
307 		break;
308 
309 	default:
310 		cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n",
311 		    ddi_driver_name(dip), ddi_get_instance(dip));
312 		return (DDI_FAILURE);
313 	}
314 
315 	iommu_tsb_free(pxu_p->tsb_cookie);
316 
317 	px_lib_unmap_regs((pxu_t *)px_p->px_plat_p);
318 	kmem_free(px_p->px_plat_p, sizeof (pxu_t));
319 	px_p->px_plat_p = NULL;
320 	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "virtual-dma");
321 
322 	return (DDI_SUCCESS);
323 }
324 
325 /*ARGSUSED*/
326 int
327 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino,
328     sysino_t *sysino)
329 {
330 	px_t	*px_p = DIP_TO_STATE(dip);
331 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
332 	uint64_t	ret;
333 
334 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p "
335 	    "devino 0x%x\n", dip, devino);
336 
337 	if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip),
338 	    pxu_p, devino, sysino)) != H_EOK) {
339 		DBG(DBG_LIB_INT, dip,
340 		    "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret);
341 		return (DDI_FAILURE);
342 	}
343 
344 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n",
345 	    *sysino);
346 
347 	return (DDI_SUCCESS);
348 }
349 
350 /*ARGSUSED*/
351 int
352 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino,
353     intr_valid_state_t *intr_valid_state)
354 {
355 	uint64_t	ret;
356 
357 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n",
358 	    dip, sysino);
359 
360 	if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip),
361 	    sysino, intr_valid_state)) != H_EOK) {
362 		DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n",
363 		    ret);
364 		return (DDI_FAILURE);
365 	}
366 
367 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n",
368 	    *intr_valid_state);
369 
370 	return (DDI_SUCCESS);
371 }
372 
373 /*ARGSUSED*/
374 int
375 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino,
376     intr_valid_state_t intr_valid_state)
377 {
378 	uint64_t	ret;
379 
380 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx "
381 	    "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state);
382 
383 	if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip),
384 	    sysino, intr_valid_state)) != H_EOK) {
385 		DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n",
386 		    ret);
387 		return (DDI_FAILURE);
388 	}
389 
390 	return (DDI_SUCCESS);
391 }
392 
393 /*ARGSUSED*/
394 int
395 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino,
396     intr_state_t *intr_state)
397 {
398 	uint64_t	ret;
399 
400 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n",
401 	    dip, sysino);
402 
403 	if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip),
404 	    sysino, intr_state)) != H_EOK) {
405 		DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n",
406 		    ret);
407 		return (DDI_FAILURE);
408 	}
409 
410 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n",
411 	    *intr_state);
412 
413 	return (DDI_SUCCESS);
414 }
415 
416 /*ARGSUSED*/
417 int
418 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino,
419     intr_state_t intr_state)
420 {
421 	uint64_t	ret;
422 
423 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx "
424 	    "intr_state 0x%x\n", dip, sysino, intr_state);
425 
426 	if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip),
427 	    sysino, intr_state)) != H_EOK) {
428 		DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n",
429 		    ret);
430 		return (DDI_FAILURE);
431 	}
432 
433 	return (DDI_SUCCESS);
434 }
435 
436 /*ARGSUSED*/
437 int
438 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid)
439 {
440 	px_t		*px_p = DIP_TO_STATE(dip);
441 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
442 	uint64_t	ret;
443 
444 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n",
445 	    dip, sysino);
446 
447 	if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip), pxu_p,
448 	    sysino, cpuid)) != H_EOK) {
449 		DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n",
450 		    ret);
451 		return (DDI_FAILURE);
452 	}
453 
454 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid);
455 
456 	return (DDI_SUCCESS);
457 }
458 
459 /*ARGSUSED*/
460 int
461 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid)
462 {
463 	px_t		*px_p = DIP_TO_STATE(dip);
464 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
465 	uint64_t	ret;
466 
467 	DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx "
468 	    "cpuid 0x%x\n", dip, sysino, cpuid);
469 
470 	if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip), pxu_p,
471 	    sysino, cpuid)) != H_EOK) {
472 		DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n",
473 		    ret);
474 		return (DDI_FAILURE);
475 	}
476 
477 	return (DDI_SUCCESS);
478 }
479 
480 /*ARGSUSED*/
481 int
482 px_lib_intr_reset(dev_info_t *dip)
483 {
484 	devino_t	ino;
485 	sysino_t	sysino;
486 
487 	DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip);
488 
489 	/* Reset all Interrupts */
490 	for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) {
491 		if (px_lib_intr_devino_to_sysino(dip, ino,
492 		    &sysino) != DDI_SUCCESS)
493 			return (BF_FATAL);
494 
495 		if (px_lib_intr_setstate(dip, sysino,
496 		    INTR_IDLE_STATE) != DDI_SUCCESS)
497 			return (BF_FATAL);
498 	}
499 
500 	return (BF_NONE);
501 }
502 
503 /*ARGSUSED*/
504 int
505 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages,
506     io_attributes_t attr, void *addr, size_t pfn_index, int flags)
507 {
508 	px_t		*px_p = DIP_TO_STATE(dip);
509 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
510 	uint64_t	ret;
511 
512 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx "
513 	    "pages 0x%x attr 0x%x addr 0x%p pfn_index 0x%llx flags 0x%x\n",
514 	    dip, tsbid, pages, attr, addr, pfn_index, flags);
515 
516 	if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages,
517 	    attr, addr, pfn_index, flags)) != H_EOK) {
518 		DBG(DBG_LIB_DMA, dip,
519 		    "px_lib_iommu_map failed, ret 0x%lx\n", ret);
520 		return (DDI_FAILURE);
521 	}
522 
523 	return (DDI_SUCCESS);
524 }
525 
526 /*ARGSUSED*/
527 int
528 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages)
529 {
530 	px_t		*px_p = DIP_TO_STATE(dip);
531 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
532 	uint64_t	ret;
533 
534 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx "
535 	    "pages 0x%x\n", dip, tsbid, pages);
536 
537 	if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages))
538 	    != H_EOK) {
539 		DBG(DBG_LIB_DMA, dip,
540 		    "px_lib_iommu_demap failed, ret 0x%lx\n", ret);
541 
542 		return (DDI_FAILURE);
543 	}
544 
545 	return (DDI_SUCCESS);
546 }
547 
548 /*ARGSUSED*/
549 int
550 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p,
551     r_addr_t *r_addr_p)
552 {
553 	px_t	*px_p = DIP_TO_STATE(dip);
554 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
555 	uint64_t	ret;
556 
557 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n",
558 	    dip, tsbid);
559 
560 	if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid,
561 	    attr_p, r_addr_p)) != H_EOK) {
562 		DBG(DBG_LIB_DMA, dip,
563 		    "hvio_iommu_getmap failed, ret 0x%lx\n", ret);
564 
565 		return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE);
566 	}
567 
568 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n",
569 	    *attr_p, *r_addr_p);
570 
571 	return (DDI_SUCCESS);
572 }
573 
574 
575 /*
576  * Checks dma attributes against system bypass ranges
577  * The bypass range is determined by the hardware. Return them so the
578  * common code can do generic checking against them.
579  */
580 /*ARGSUSED*/
581 int
582 px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p,
583     uint64_t *lo_p, uint64_t *hi_p)
584 {
585 	px_t	*px_p = DIP_TO_STATE(dip);
586 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
587 
588 	*lo_p = hvio_get_bypass_base(pxu_p);
589 	*hi_p = hvio_get_bypass_end(pxu_p);
590 
591 	return (DDI_SUCCESS);
592 }
593 
594 
595 /*ARGSUSED*/
596 int
597 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, io_attributes_t attr,
598     io_addr_t *io_addr_p)
599 {
600 	uint64_t	ret;
601 	px_t	*px_p = DIP_TO_STATE(dip);
602 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
603 
604 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx "
605 	    "attr 0x%x\n", dip, ra, attr);
606 
607 	if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), pxu_p, ra,
608 	    attr, io_addr_p)) != H_EOK) {
609 		DBG(DBG_LIB_DMA, dip,
610 		    "hvio_iommu_getbypass failed, ret 0x%lx\n", ret);
611 		return (DDI_FAILURE);
612 	}
613 
614 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n",
615 	    *io_addr_p);
616 
617 	return (DDI_SUCCESS);
618 }
619 
620 /*
621  * bus dma sync entry point.
622  */
623 /*ARGSUSED*/
624 int
625 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
626     off_t off, size_t len, uint_t cache_flags)
627 {
628 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
629 	px_t	*px_p = DIP_TO_STATE(dip);
630 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
631 
632 	DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p "
633 	    "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n",
634 	    dip, rdip, handle, off, len, cache_flags);
635 
636 	/*
637 	 * No flush needed for Oberon
638 	 */
639 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
640 		return (DDI_SUCCESS);
641 
642 	/*
643 	 * jbus_stst_order is found only in certain cpu modules.
644 	 * Just return success if not present.
645 	 */
646 	if (&jbus_stst_order == NULL)
647 		return (DDI_SUCCESS);
648 
649 	if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) {
650 		cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.",
651 		    ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp);
652 
653 		return (DDI_FAILURE);
654 	}
655 
656 	if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC)
657 		return (DDI_SUCCESS);
658 
659 	/*
660 	 * No flush needed when sending data from memory to device.
661 	 * Nothing to do to "sync" memory to what device would already see.
662 	 */
663 	if (!(mp->dmai_rflags & DDI_DMA_READ) ||
664 	    ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV))
665 		return (DDI_SUCCESS);
666 
667 	/*
668 	 * Perform necessary cpu workaround to ensure jbus ordering.
669 	 * CPU's internal "invalidate FIFOs" are flushed.
670 	 */
671 
672 #if !defined(lint)
673 	kpreempt_disable();
674 #endif
675 	jbus_stst_order();
676 #if !defined(lint)
677 	kpreempt_enable();
678 #endif
679 	return (DDI_SUCCESS);
680 }
681 
682 /*
683  * MSIQ Functions:
684  */
685 /*ARGSUSED*/
686 int
687 px_lib_msiq_init(dev_info_t *dip)
688 {
689 	px_t		*px_p = DIP_TO_STATE(dip);
690 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
691 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
692 	px_dvma_addr_t	pg_index;
693 	size_t		q_sz = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
694 	size_t		size;
695 	int		i, ret;
696 
697 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip);
698 
699 	/* must aligned on q_sz (happens to be !!! page) boundary */
700 	ASSERT(q_sz == 8 * 1024);
701 
702 	/*
703 	 * Map the EQ memory into the Fire MMU (has to be 512KB aligned)
704 	 * and then initialize the base address register.
705 	 *
706 	 * Allocate entries from Fire IOMMU so that the resulting address
707 	 * is properly aligned.  Calculate the index of the first allocated
708 	 * entry.  Note: The size of the mapping is assumed to be a multiple
709 	 * of the page size.
710 	 */
711 	size = msiq_state_p->msiq_cnt * q_sz;
712 
713 	msiq_state_p->msiq_buf_p = kmem_zalloc(size, KM_SLEEP);
714 
715 	for (i = 0; i < msiq_state_p->msiq_cnt; i++)
716 		msiq_state_p->msiq_p[i].msiq_base_p = (msiqhead_t *)
717 		    ((caddr_t)msiq_state_p->msiq_buf_p + (i * q_sz));
718 
719 	pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map,
720 	    size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT);
721 
722 	if (pxu_p->msiq_mapped_p == NULL)
723 		return (DDI_FAILURE);
724 
725 	pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
726 	    MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
727 
728 	if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index),
729 	    MMU_BTOP(size), PCI_MAP_ATTR_WRITE, msiq_state_p->msiq_buf_p,
730 	    0, MMU_MAP_BUF)) != DDI_SUCCESS) {
731 		DBG(DBG_LIB_MSIQ, dip,
732 		    "px_lib_msiq_init: px_lib_iommu_map failed, "
733 		    "ret 0x%lx\n", ret);
734 
735 		(void) px_lib_msiq_fini(dip);
736 		return (DDI_FAILURE);
737 	}
738 
739 	if ((ret = hvio_msiq_init(DIP_TO_HANDLE(dip),
740 	    pxu_p)) != H_EOK) {
741 		DBG(DBG_LIB_MSIQ, dip,
742 		    "hvio_msiq_init failed, ret 0x%lx\n", ret);
743 
744 		(void) px_lib_msiq_fini(dip);
745 		return (DDI_FAILURE);
746 	}
747 
748 	return (DDI_SUCCESS);
749 }
750 
751 /*ARGSUSED*/
752 int
753 px_lib_msiq_fini(dev_info_t *dip)
754 {
755 	px_t		*px_p = DIP_TO_STATE(dip);
756 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
757 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
758 	px_dvma_addr_t	pg_index;
759 	size_t		size;
760 
761 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip);
762 
763 	/*
764 	 * Unmap and free the EQ memory that had been mapped
765 	 * into the Fire IOMMU.
766 	 */
767 	size = msiq_state_p->msiq_cnt *
768 	    msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
769 
770 	pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
771 	    MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
772 
773 	(void) px_lib_iommu_demap(px_p->px_dip,
774 	    PCI_TSBID(0, pg_index), MMU_BTOP(size));
775 
776 	/* Free the entries from the Fire MMU */
777 	vmem_xfree(px_p->px_mmu_p->mmu_dvma_map,
778 	    (void *)pxu_p->msiq_mapped_p, size);
779 
780 	kmem_free(msiq_state_p->msiq_buf_p, msiq_state_p->msiq_cnt *
781 	    msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t));
782 
783 	return (DDI_SUCCESS);
784 }
785 
786 /*ARGSUSED*/
787 int
788 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p,
789     uint_t *msiq_rec_cnt_p)
790 {
791 	px_t		*px_p = DIP_TO_STATE(dip);
792 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
793 	size_t		msiq_size;
794 
795 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n",
796 	    dip, msiq_id);
797 
798 	msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
799 	ra_p = (r_addr_t *)((caddr_t)msiq_state_p->msiq_buf_p +
800 	    (msiq_id * msiq_size));
801 
802 	*msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt;
803 
804 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n",
805 	    ra_p, *msiq_rec_cnt_p);
806 
807 	return (DDI_SUCCESS);
808 }
809 
810 /*ARGSUSED*/
811 int
812 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id,
813     pci_msiq_valid_state_t *msiq_valid_state)
814 {
815 	uint64_t	ret;
816 
817 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n",
818 	    dip, msiq_id);
819 
820 	if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip),
821 	    msiq_id, msiq_valid_state)) != H_EOK) {
822 		DBG(DBG_LIB_MSIQ, dip,
823 		    "hvio_msiq_getvalid failed, ret 0x%lx\n", ret);
824 		return (DDI_FAILURE);
825 	}
826 
827 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n",
828 	    *msiq_valid_state);
829 
830 	return (DDI_SUCCESS);
831 }
832 
833 /*ARGSUSED*/
834 int
835 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id,
836     pci_msiq_valid_state_t msiq_valid_state)
837 {
838 	uint64_t	ret;
839 
840 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x "
841 	    "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state);
842 
843 	if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip),
844 	    msiq_id, msiq_valid_state)) != H_EOK) {
845 		DBG(DBG_LIB_MSIQ, dip,
846 		    "hvio_msiq_setvalid failed, ret 0x%lx\n", ret);
847 		return (DDI_FAILURE);
848 	}
849 
850 	return (DDI_SUCCESS);
851 }
852 
853 /*ARGSUSED*/
854 int
855 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id,
856     pci_msiq_state_t *msiq_state)
857 {
858 	uint64_t	ret;
859 
860 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n",
861 	    dip, msiq_id);
862 
863 	if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip),
864 	    msiq_id, msiq_state)) != H_EOK) {
865 		DBG(DBG_LIB_MSIQ, dip,
866 		    "hvio_msiq_getstate failed, ret 0x%lx\n", ret);
867 		return (DDI_FAILURE);
868 	}
869 
870 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n",
871 	    *msiq_state);
872 
873 	return (DDI_SUCCESS);
874 }
875 
876 /*ARGSUSED*/
877 int
878 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id,
879     pci_msiq_state_t msiq_state)
880 {
881 	uint64_t	ret;
882 
883 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x "
884 	    "msiq_state 0x%x\n", dip, msiq_id, msiq_state);
885 
886 	if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip),
887 	    msiq_id, msiq_state)) != H_EOK) {
888 		DBG(DBG_LIB_MSIQ, dip,
889 		    "hvio_msiq_setstate failed, ret 0x%lx\n", ret);
890 		return (DDI_FAILURE);
891 	}
892 
893 	return (DDI_SUCCESS);
894 }
895 
896 /*ARGSUSED*/
897 int
898 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id,
899     msiqhead_t *msiq_head)
900 {
901 	uint64_t	ret;
902 
903 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n",
904 	    dip, msiq_id);
905 
906 	if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip),
907 	    msiq_id, msiq_head)) != H_EOK) {
908 		DBG(DBG_LIB_MSIQ, dip,
909 		    "hvio_msiq_gethead failed, ret 0x%lx\n", ret);
910 		return (DDI_FAILURE);
911 	}
912 
913 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n",
914 	    *msiq_head);
915 
916 	return (DDI_SUCCESS);
917 }
918 
919 /*ARGSUSED*/
920 int
921 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id,
922     msiqhead_t msiq_head)
923 {
924 	uint64_t	ret;
925 
926 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x "
927 	    "msiq_head 0x%x\n", dip, msiq_id, msiq_head);
928 
929 	if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip),
930 	    msiq_id, msiq_head)) != H_EOK) {
931 		DBG(DBG_LIB_MSIQ, dip,
932 		    "hvio_msiq_sethead failed, ret 0x%lx\n", ret);
933 		return (DDI_FAILURE);
934 	}
935 
936 	return (DDI_SUCCESS);
937 }
938 
939 /*ARGSUSED*/
940 int
941 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id,
942     msiqtail_t *msiq_tail)
943 {
944 	uint64_t	ret;
945 
946 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n",
947 	    dip, msiq_id);
948 
949 	if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip),
950 	    msiq_id, msiq_tail)) != H_EOK) {
951 		DBG(DBG_LIB_MSIQ, dip,
952 		    "hvio_msiq_gettail failed, ret 0x%lx\n", ret);
953 		return (DDI_FAILURE);
954 	}
955 
956 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n",
957 	    *msiq_tail);
958 
959 	return (DDI_SUCCESS);
960 }
961 
962 /*ARGSUSED*/
963 void
964 px_lib_get_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p,
965     msiq_rec_t *msiq_rec_p)
966 {
967 	eq_rec_t	*eq_rec_p = (eq_rec_t *)msiq_head_p;
968 
969 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n",
970 	    dip, eq_rec_p);
971 
972 	if (!eq_rec_p->eq_rec_fmt_type) {
973 		/* Set msiq_rec_type to zero */
974 		msiq_rec_p->msiq_rec_type = 0;
975 
976 		return;
977 	}
978 
979 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, "
980 	    "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx "
981 	    "eq_rec_len 0x%llx eq_rec_addr0 0x%llx "
982 	    "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx "
983 	    "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid,
984 	    eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len,
985 	    eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1,
986 	    eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1);
987 
988 	/*
989 	 * Only upper 4 bits of eq_rec_fmt_type is used
990 	 * to identify the EQ record type.
991 	 */
992 	switch (eq_rec_p->eq_rec_fmt_type >> 3) {
993 	case EQ_REC_MSI32:
994 		msiq_rec_p->msiq_rec_type = MSI32_REC;
995 
996 		msiq_rec_p->msiq_rec_data.msi.msi_data =
997 		    eq_rec_p->eq_rec_data0;
998 		break;
999 	case EQ_REC_MSI64:
1000 		msiq_rec_p->msiq_rec_type = MSI64_REC;
1001 
1002 		msiq_rec_p->msiq_rec_data.msi.msi_data =
1003 		    eq_rec_p->eq_rec_data0;
1004 		break;
1005 	case EQ_REC_MSG:
1006 		msiq_rec_p->msiq_rec_type = MSG_REC;
1007 
1008 		msiq_rec_p->msiq_rec_data.msg.msg_route =
1009 		    eq_rec_p->eq_rec_fmt_type & 7;
1010 		msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid;
1011 		msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0;
1012 		break;
1013 	default:
1014 		cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: "
1015 		    "0x%x is an unknown EQ record type",
1016 		    ddi_driver_name(dip), ddi_get_instance(dip),
1017 		    (int)eq_rec_p->eq_rec_fmt_type);
1018 		break;
1019 	}
1020 
1021 	msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid;
1022 	msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) |
1023 	    (eq_rec_p->eq_rec_addr0 << 2));
1024 }
1025 
1026 /*ARGSUSED*/
1027 void
1028 px_lib_clr_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p)
1029 {
1030 	eq_rec_t	*eq_rec_p = (eq_rec_t *)msiq_head_p;
1031 
1032 	DBG(DBG_LIB_MSIQ, dip, "px_lib_clr_msiq_rec: dip 0x%p eq_rec_p 0x%p\n",
1033 	    dip, eq_rec_p);
1034 
1035 	if (eq_rec_p->eq_rec_fmt_type) {
1036 		/* Zero out eq_rec_fmt_type field */
1037 		eq_rec_p->eq_rec_fmt_type = 0;
1038 	}
1039 }
1040 
1041 /*
1042  * MSI Functions:
1043  */
1044 /*ARGSUSED*/
1045 int
1046 px_lib_msi_init(dev_info_t *dip)
1047 {
1048 	px_t		*px_p = DIP_TO_STATE(dip);
1049 	px_msi_state_t	*msi_state_p = &px_p->px_ib_p->ib_msi_state;
1050 	uint64_t	ret;
1051 
1052 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip);
1053 
1054 	if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip),
1055 	    msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) {
1056 		DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n",
1057 		    ret);
1058 		return (DDI_FAILURE);
1059 	}
1060 
1061 	return (DDI_SUCCESS);
1062 }
1063 
1064 /*ARGSUSED*/
1065 int
1066 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num,
1067     msiqid_t *msiq_id)
1068 {
1069 	uint64_t	ret;
1070 
1071 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n",
1072 	    dip, msi_num);
1073 
1074 	if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip),
1075 	    msi_num, msiq_id)) != H_EOK) {
1076 		DBG(DBG_LIB_MSI, dip,
1077 		    "hvio_msi_getmsiq failed, ret 0x%lx\n", ret);
1078 		return (DDI_FAILURE);
1079 	}
1080 
1081 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n",
1082 	    *msiq_id);
1083 
1084 	return (DDI_SUCCESS);
1085 }
1086 
1087 /*ARGSUSED*/
1088 int
1089 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num,
1090     msiqid_t msiq_id, msi_type_t msitype)
1091 {
1092 	uint64_t	ret;
1093 
1094 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x "
1095 	    "msq_id 0x%x\n", dip, msi_num, msiq_id);
1096 
1097 	if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip),
1098 	    msi_num, msiq_id)) != H_EOK) {
1099 		DBG(DBG_LIB_MSI, dip,
1100 		    "hvio_msi_setmsiq failed, ret 0x%lx\n", ret);
1101 		return (DDI_FAILURE);
1102 	}
1103 
1104 	return (DDI_SUCCESS);
1105 }
1106 
1107 /*ARGSUSED*/
1108 int
1109 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num,
1110     pci_msi_valid_state_t *msi_valid_state)
1111 {
1112 	uint64_t	ret;
1113 
1114 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n",
1115 	    dip, msi_num);
1116 
1117 	if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip),
1118 	    msi_num, msi_valid_state)) != H_EOK) {
1119 		DBG(DBG_LIB_MSI, dip,
1120 		    "hvio_msi_getvalid failed, ret 0x%lx\n", ret);
1121 		return (DDI_FAILURE);
1122 	}
1123 
1124 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n",
1125 	    *msi_valid_state);
1126 
1127 	return (DDI_SUCCESS);
1128 }
1129 
1130 /*ARGSUSED*/
1131 int
1132 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num,
1133     pci_msi_valid_state_t msi_valid_state)
1134 {
1135 	uint64_t	ret;
1136 
1137 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x "
1138 	    "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state);
1139 
1140 	if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip),
1141 	    msi_num, msi_valid_state)) != H_EOK) {
1142 		DBG(DBG_LIB_MSI, dip,
1143 		    "hvio_msi_setvalid failed, ret 0x%lx\n", ret);
1144 		return (DDI_FAILURE);
1145 	}
1146 
1147 	return (DDI_SUCCESS);
1148 }
1149 
1150 /*ARGSUSED*/
1151 int
1152 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num,
1153     pci_msi_state_t *msi_state)
1154 {
1155 	uint64_t	ret;
1156 
1157 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n",
1158 	    dip, msi_num);
1159 
1160 	if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip),
1161 	    msi_num, msi_state)) != H_EOK) {
1162 		DBG(DBG_LIB_MSI, dip,
1163 		    "hvio_msi_getstate failed, ret 0x%lx\n", ret);
1164 		return (DDI_FAILURE);
1165 	}
1166 
1167 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n",
1168 	    *msi_state);
1169 
1170 	return (DDI_SUCCESS);
1171 }
1172 
1173 /*ARGSUSED*/
1174 int
1175 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num,
1176     pci_msi_state_t msi_state)
1177 {
1178 	uint64_t	ret;
1179 
1180 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x "
1181 	    "msi_state 0x%x\n", dip, msi_num, msi_state);
1182 
1183 	if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip),
1184 	    msi_num, msi_state)) != H_EOK) {
1185 		DBG(DBG_LIB_MSI, dip,
1186 		    "hvio_msi_setstate failed, ret 0x%lx\n", ret);
1187 		return (DDI_FAILURE);
1188 	}
1189 
1190 	return (DDI_SUCCESS);
1191 }
1192 
1193 /*
1194  * MSG Functions:
1195  */
1196 /*ARGSUSED*/
1197 int
1198 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
1199     msiqid_t *msiq_id)
1200 {
1201 	uint64_t	ret;
1202 
1203 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n",
1204 	    dip, msg_type);
1205 
1206 	if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip),
1207 	    msg_type, msiq_id)) != H_EOK) {
1208 		DBG(DBG_LIB_MSG, dip,
1209 		    "hvio_msg_getmsiq failed, ret 0x%lx\n", ret);
1210 		return (DDI_FAILURE);
1211 	}
1212 
1213 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n",
1214 	    *msiq_id);
1215 
1216 	return (DDI_SUCCESS);
1217 }
1218 
1219 /*ARGSUSED*/
1220 int
1221 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
1222     msiqid_t msiq_id)
1223 {
1224 	uint64_t	ret;
1225 
1226 	DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x "
1227 	    "msiq_id 0x%x\n", dip, msg_type, msiq_id);
1228 
1229 	if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip),
1230 	    msg_type, msiq_id)) != H_EOK) {
1231 		DBG(DBG_LIB_MSG, dip,
1232 		    "hvio_msg_setmsiq failed, ret 0x%lx\n", ret);
1233 		return (DDI_FAILURE);
1234 	}
1235 
1236 	return (DDI_SUCCESS);
1237 }
1238 
1239 /*ARGSUSED*/
1240 int
1241 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
1242     pcie_msg_valid_state_t *msg_valid_state)
1243 {
1244 	uint64_t	ret;
1245 
1246 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n",
1247 	    dip, msg_type);
1248 
1249 	if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type,
1250 	    msg_valid_state)) != H_EOK) {
1251 		DBG(DBG_LIB_MSG, dip,
1252 		    "hvio_msg_getvalid failed, ret 0x%lx\n", ret);
1253 		return (DDI_FAILURE);
1254 	}
1255 
1256 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n",
1257 	    *msg_valid_state);
1258 
1259 	return (DDI_SUCCESS);
1260 }
1261 
1262 /*ARGSUSED*/
1263 int
1264 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
1265     pcie_msg_valid_state_t msg_valid_state)
1266 {
1267 	uint64_t	ret;
1268 
1269 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x "
1270 	    "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state);
1271 
1272 	if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type,
1273 	    msg_valid_state)) != H_EOK) {
1274 		DBG(DBG_LIB_MSG, dip,
1275 		    "hvio_msg_setvalid failed, ret 0x%lx\n", ret);
1276 		return (DDI_FAILURE);
1277 	}
1278 
1279 	return (DDI_SUCCESS);
1280 }
1281 
1282 /*
1283  * Suspend/Resume Functions:
1284  * Currently unsupported by hypervisor
1285  */
1286 int
1287 px_lib_suspend(dev_info_t *dip)
1288 {
1289 	px_t		*px_p = DIP_TO_STATE(dip);
1290 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1291 	px_cb_t		*cb_p = PX2CB(px_p);
1292 	devhandle_t	dev_hdl, xbus_dev_hdl;
1293 	uint64_t	ret = H_EOK;
1294 
1295 	DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip);
1296 
1297 	dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
1298 	xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
1299 
1300 	if ((ret = hvio_suspend(dev_hdl, pxu_p)) != H_EOK)
1301 		goto fail;
1302 
1303 	if (--cb_p->attachcnt == 0) {
1304 		ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p);
1305 		if (ret != H_EOK)
1306 			cb_p->attachcnt++;
1307 	}
1308 	pxu_p->cpr_flag = PX_ENTERED_CPR;
1309 
1310 fail:
1311 	return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS);
1312 }
1313 
1314 void
1315 px_lib_resume(dev_info_t *dip)
1316 {
1317 	px_t		*px_p = DIP_TO_STATE(dip);
1318 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1319 	px_cb_t		*cb_p = PX2CB(px_p);
1320 	devhandle_t	dev_hdl, xbus_dev_hdl;
1321 	devino_t	pec_ino = px_p->px_inos[PX_INTR_PEC];
1322 	devino_t	xbc_ino = px_p->px_inos[PX_INTR_XBC];
1323 
1324 	DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip);
1325 
1326 	dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
1327 	xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
1328 
1329 	if (++cb_p->attachcnt == 1)
1330 		hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p);
1331 
1332 	hvio_resume(dev_hdl, pec_ino, pxu_p);
1333 }
1334 
1335 /*
1336  * Generate a unique Oberon UBC ID based on the Logicial System Board and
1337  * the IO Channel from the portid property field.
1338  */
1339 static uint64_t
1340 oberon_get_ubc_id(dev_info_t *dip)
1341 {
1342 	px_t	*px_p = DIP_TO_STATE(dip);
1343 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
1344 	uint64_t	ubc_id;
1345 
1346 	/*
1347 	 * Generate a unique 6 bit UBC ID using the 2 IO_Channel#[1:0] bits and
1348 	 * the 4 LSB_ID[3:0] bits from the Oberon's portid property.
1349 	 */
1350 	ubc_id = (((pxu_p->portid >> OBERON_PORT_ID_IOC) &
1351 	    OBERON_PORT_ID_IOC_MASK) | (((pxu_p->portid >>
1352 	    OBERON_PORT_ID_LSB) & OBERON_PORT_ID_LSB_MASK)
1353 	    << OBERON_UBC_ID_LSB));
1354 
1355 	return (ubc_id);
1356 }
1357 
1358 /*
1359  * Oberon does not have a UBC scratch register, so alloc an array of scratch
1360  * registers when needed and use a unique UBC ID as an index. This code
1361  * can be simplified if we use a pre-allocated array. They are currently
1362  * being dynamically allocated because it's only needed by the Oberon.
1363  */
1364 static void
1365 oberon_set_cb(dev_info_t *dip, uint64_t val)
1366 {
1367 	uint64_t	ubc_id;
1368 
1369 	if (px_oberon_ubc_scratch_regs == NULL)
1370 		px_oberon_ubc_scratch_regs =
1371 		    (uint64_t *)kmem_zalloc(sizeof (uint64_t)*
1372 		    OBERON_UBC_ID_MAX, KM_SLEEP);
1373 
1374 	ubc_id = oberon_get_ubc_id(dip);
1375 
1376 	px_oberon_ubc_scratch_regs[ubc_id] = val;
1377 
1378 	/*
1379 	 * Check if any scratch registers are still in use. If all scratch
1380 	 * registers are currently set to zero, then deallocate the scratch
1381 	 * register array.
1382 	 */
1383 	for (ubc_id = 0; ubc_id < OBERON_UBC_ID_MAX; ubc_id++) {
1384 		if (px_oberon_ubc_scratch_regs[ubc_id] != NULL)
1385 			return;
1386 	}
1387 
1388 	/*
1389 	 * All scratch registers are set to zero so deallocate the scratch
1390 	 * register array and set the pointer to NULL.
1391 	 */
1392 	kmem_free(px_oberon_ubc_scratch_regs,
1393 	    (sizeof (uint64_t)*OBERON_UBC_ID_MAX));
1394 
1395 	px_oberon_ubc_scratch_regs = NULL;
1396 }
1397 
1398 /*
1399  * Oberon does not have a UBC scratch register, so use an allocated array of
1400  * scratch registers and use the unique UBC ID as an index into that array.
1401  */
1402 static uint64_t
1403 oberon_get_cb(dev_info_t *dip)
1404 {
1405 	uint64_t	ubc_id;
1406 
1407 	if (px_oberon_ubc_scratch_regs == NULL)
1408 		return (0);
1409 
1410 	ubc_id = oberon_get_ubc_id(dip);
1411 
1412 	return (px_oberon_ubc_scratch_regs[ubc_id]);
1413 }
1414 
1415 /*
1416  * Misc Functions:
1417  * Currently unsupported by hypervisor
1418  */
1419 static uint64_t
1420 px_get_cb(dev_info_t *dip)
1421 {
1422 	px_t	*px_p = DIP_TO_STATE(dip);
1423 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
1424 
1425 	/*
1426 	 * Oberon does not currently have Scratchpad registers.
1427 	 */
1428 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
1429 		return (oberon_get_cb(dip));
1430 
1431 	return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1));
1432 }
1433 
1434 static void
1435 px_set_cb(dev_info_t *dip, uint64_t val)
1436 {
1437 	px_t	*px_p = DIP_TO_STATE(dip);
1438 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
1439 
1440 	/*
1441 	 * Oberon does not currently have Scratchpad registers.
1442 	 */
1443 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
1444 		oberon_set_cb(dip, val);
1445 		return;
1446 	}
1447 
1448 	CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val);
1449 }
1450 
1451 /*ARGSUSED*/
1452 int
1453 px_lib_map_vconfig(dev_info_t *dip,
1454 	ddi_map_req_t *mp, pci_config_offset_t off,
1455 		pci_regspec_t *rp, caddr_t *addrp)
1456 {
1457 	/*
1458 	 * No special config space access services in this layer.
1459 	 */
1460 	return (DDI_FAILURE);
1461 }
1462 
1463 void
1464 px_lib_map_attr_check(ddi_map_req_t *mp)
1465 {
1466 	ddi_acc_hdl_t *hp = mp->map_handlep;
1467 
1468 	/* fire does not accept byte masks from PIO store merge */
1469 	if (hp->ah_acc.devacc_attr_dataorder == DDI_STORECACHING_OK_ACC)
1470 		hp->ah_acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1471 }
1472 
1473 /* This function is called only by poke, caut put and pxtool poke. */
1474 void
1475 px_lib_clr_errs(px_t *px_p, dev_info_t *rdip, uint64_t addr)
1476 {
1477 	px_pec_t	*pec_p = px_p->px_pec_p;
1478 	dev_info_t	*rpdip = px_p->px_dip;
1479 	int		rc_err, fab_err, i;
1480 	int		acctype = pec_p->pec_safeacc_type;
1481 	ddi_fm_error_t	derr;
1482 	px_ranges_t	*ranges_p;
1483 	int		range_len;
1484 	uint32_t	addr_high, addr_low;
1485 	pcie_req_id_t	bdf = 0;
1486 
1487 	/* Create the derr */
1488 	bzero(&derr, sizeof (ddi_fm_error_t));
1489 	derr.fme_version = DDI_FME_VERSION;
1490 	derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
1491 	derr.fme_flag = acctype;
1492 
1493 	if (acctype == DDI_FM_ERR_EXPECTED) {
1494 		derr.fme_status = DDI_FM_NONFATAL;
1495 		ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr);
1496 	}
1497 
1498 	if (px_fm_enter(px_p) != DDI_SUCCESS)
1499 		return;
1500 
1501 	/* send ereport/handle/clear fire registers */
1502 	rc_err = px_err_cmn_intr(px_p, &derr, PX_LIB_CALL, PX_FM_BLOCK_ALL);
1503 
1504 	/* Figure out if this is a cfg or mem32 access */
1505 	addr_high = (uint32_t)(addr >> 32);
1506 	addr_low = (uint32_t)addr;
1507 	range_len = px_p->px_ranges_length / sizeof (px_ranges_t);
1508 	i = 0;
1509 	for (ranges_p = px_p->px_ranges_p; i < range_len; i++, ranges_p++) {
1510 		if (ranges_p->parent_high == addr_high) {
1511 			switch (ranges_p->child_high & PCI_ADDR_MASK) {
1512 			case PCI_ADDR_CONFIG:
1513 				bdf = (pcie_req_id_t)(addr_low >> 12);
1514 				addr_low = 0;
1515 				break;
1516 			case PCI_ADDR_MEM32:
1517 				if (rdip)
1518 					bdf = PCI_GET_BDF(rdip);
1519 				else
1520 					bdf = NULL;
1521 				break;
1522 			}
1523 			break;
1524 		}
1525 	}
1526 
1527 	px_rp_en_q(px_p, bdf, addr_low, NULL);
1528 
1529 	/*
1530 	 * XXX - Current code scans the fabric for all px_tool accesses.
1531 	 * In future, do not scan fabric for px_tool access to IO Root Nexus
1532 	 */
1533 	fab_err = px_scan_fabric(px_p, rpdip, &derr);
1534 
1535 	px_err_panic(rc_err, PX_RC, fab_err, B_TRUE);
1536 	px_fm_exit(px_p);
1537 	px_err_panic(rc_err, PX_RC, fab_err, B_FALSE);
1538 }
1539 
1540 #ifdef  DEBUG
1541 int	px_peekfault_cnt = 0;
1542 int	px_pokefault_cnt = 0;
1543 #endif  /* DEBUG */
1544 
1545 /*ARGSUSED*/
1546 static int
1547 px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip,
1548     peekpoke_ctlops_t *in_args)
1549 {
1550 	px_t *px_p = DIP_TO_STATE(dip);
1551 	px_pec_t *pec_p = px_p->px_pec_p;
1552 	int err = DDI_SUCCESS;
1553 	on_trap_data_t otd;
1554 
1555 	mutex_enter(&pec_p->pec_pokefault_mutex);
1556 	pec_p->pec_ontrap_data = &otd;
1557 	pec_p->pec_safeacc_type = DDI_FM_ERR_POKE;
1558 
1559 	/* Set up protected environment. */
1560 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
1561 		uintptr_t tramp = otd.ot_trampoline;
1562 
1563 		otd.ot_trampoline = (uintptr_t)&poke_fault;
1564 		err = do_poke(in_args->size, (void *)in_args->dev_addr,
1565 		    (void *)in_args->host_addr);
1566 		otd.ot_trampoline = tramp;
1567 	} else
1568 		err = DDI_FAILURE;
1569 
1570 	px_lib_clr_errs(px_p, rdip, in_args->dev_addr);
1571 
1572 	if (otd.ot_trap & OT_DATA_ACCESS)
1573 		err = DDI_FAILURE;
1574 
1575 	/* Take down protected environment. */
1576 	no_trap();
1577 
1578 	pec_p->pec_ontrap_data = NULL;
1579 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1580 	mutex_exit(&pec_p->pec_pokefault_mutex);
1581 
1582 #ifdef  DEBUG
1583 	if (err == DDI_FAILURE)
1584 		px_pokefault_cnt++;
1585 #endif
1586 	return (err);
1587 }
1588 
1589 /*ARGSUSED*/
1590 static int
1591 px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip,
1592     peekpoke_ctlops_t *cautacc_ctlops_arg)
1593 {
1594 	size_t size = cautacc_ctlops_arg->size;
1595 	uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
1596 	uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
1597 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
1598 	size_t repcount = cautacc_ctlops_arg->repcount;
1599 	uint_t flags = cautacc_ctlops_arg->flags;
1600 
1601 	px_t *px_p = DIP_TO_STATE(dip);
1602 	px_pec_t *pec_p = px_p->px_pec_p;
1603 	int err = DDI_SUCCESS;
1604 
1605 	/*
1606 	 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
1607 	 * mutex.
1608 	 */
1609 	i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1610 
1611 	pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
1612 	pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
1613 	hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
1614 
1615 	if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1616 		for (; repcount; repcount--) {
1617 			switch (size) {
1618 
1619 			case sizeof (uint8_t):
1620 				i_ddi_put8(hp, (uint8_t *)dev_addr,
1621 				    *(uint8_t *)host_addr);
1622 				break;
1623 
1624 			case sizeof (uint16_t):
1625 				i_ddi_put16(hp, (uint16_t *)dev_addr,
1626 				    *(uint16_t *)host_addr);
1627 				break;
1628 
1629 			case sizeof (uint32_t):
1630 				i_ddi_put32(hp, (uint32_t *)dev_addr,
1631 				    *(uint32_t *)host_addr);
1632 				break;
1633 
1634 			case sizeof (uint64_t):
1635 				i_ddi_put64(hp, (uint64_t *)dev_addr,
1636 				    *(uint64_t *)host_addr);
1637 				break;
1638 			}
1639 
1640 			host_addr += size;
1641 
1642 			if (flags == DDI_DEV_AUTOINCR)
1643 				dev_addr += size;
1644 
1645 			px_lib_clr_errs(px_p, rdip, dev_addr);
1646 
1647 			if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) {
1648 				err = DDI_FAILURE;
1649 #ifdef  DEBUG
1650 				px_pokefault_cnt++;
1651 #endif
1652 				break;
1653 			}
1654 		}
1655 	}
1656 
1657 	i_ddi_notrap((ddi_acc_handle_t)hp);
1658 	pec_p->pec_ontrap_data = NULL;
1659 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1660 	i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1661 	hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
1662 
1663 	return (err);
1664 }
1665 
1666 
1667 int
1668 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip,
1669     peekpoke_ctlops_t *in_args)
1670 {
1671 	return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) :
1672 	    px_lib_do_poke(dip, rdip, in_args));
1673 }
1674 
1675 
1676 /*ARGSUSED*/
1677 static int
1678 px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args)
1679 {
1680 	px_t *px_p = DIP_TO_STATE(dip);
1681 	px_pec_t *pec_p = px_p->px_pec_p;
1682 	int err = DDI_SUCCESS;
1683 	on_trap_data_t otd;
1684 
1685 	mutex_enter(&pec_p->pec_pokefault_mutex);
1686 	if (px_fm_enter(px_p) != DDI_SUCCESS)
1687 		return (DDI_FAILURE);
1688 	pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK;
1689 	px_fm_exit(px_p);
1690 
1691 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
1692 		uintptr_t tramp = otd.ot_trampoline;
1693 
1694 		otd.ot_trampoline = (uintptr_t)&peek_fault;
1695 		err = do_peek(in_args->size, (void *)in_args->dev_addr,
1696 		    (void *)in_args->host_addr);
1697 		otd.ot_trampoline = tramp;
1698 	} else
1699 		err = DDI_FAILURE;
1700 
1701 	no_trap();
1702 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1703 	mutex_exit(&pec_p->pec_pokefault_mutex);
1704 
1705 #ifdef  DEBUG
1706 	if (err == DDI_FAILURE)
1707 		px_peekfault_cnt++;
1708 #endif
1709 	return (err);
1710 }
1711 
1712 
1713 static int
1714 px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg)
1715 {
1716 	size_t size = cautacc_ctlops_arg->size;
1717 	uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
1718 	uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
1719 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
1720 	size_t repcount = cautacc_ctlops_arg->repcount;
1721 	uint_t flags = cautacc_ctlops_arg->flags;
1722 
1723 	px_t *px_p = DIP_TO_STATE(dip);
1724 	px_pec_t *pec_p = px_p->px_pec_p;
1725 	int err = DDI_SUCCESS;
1726 
1727 	/*
1728 	 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
1729 	 * mutex.
1730 	 */
1731 	i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1732 
1733 	pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
1734 	pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
1735 	hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
1736 
1737 	if (repcount == 1) {
1738 		if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1739 			i_ddi_caut_get(size, (void *)dev_addr,
1740 			    (void *)host_addr);
1741 		} else {
1742 			int i;
1743 			uint8_t *ff_addr = (uint8_t *)host_addr;
1744 			for (i = 0; i < size; i++)
1745 				*ff_addr++ = 0xff;
1746 
1747 			err = DDI_FAILURE;
1748 #ifdef  DEBUG
1749 			px_peekfault_cnt++;
1750 #endif
1751 		}
1752 	} else {
1753 		if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
1754 			for (; repcount; repcount--) {
1755 				i_ddi_caut_get(size, (void *)dev_addr,
1756 				    (void *)host_addr);
1757 
1758 				host_addr += size;
1759 
1760 				if (flags == DDI_DEV_AUTOINCR)
1761 					dev_addr += size;
1762 			}
1763 		} else {
1764 			err = DDI_FAILURE;
1765 #ifdef  DEBUG
1766 			px_peekfault_cnt++;
1767 #endif
1768 		}
1769 	}
1770 
1771 	i_ddi_notrap((ddi_acc_handle_t)hp);
1772 	pec_p->pec_ontrap_data = NULL;
1773 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1774 	i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
1775 	hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
1776 
1777 	return (err);
1778 }
1779 
1780 /*ARGSUSED*/
1781 int
1782 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip,
1783     peekpoke_ctlops_t *in_args, void *result)
1784 {
1785 	result = (void *)in_args->host_addr;
1786 	return (in_args->handle ? px_lib_do_caut_get(dip, in_args) :
1787 	    px_lib_do_peek(dip, in_args));
1788 }
1789 
1790 /*
1791  * implements PPM interface
1792  */
1793 int
1794 px_lib_pmctl(int cmd, px_t *px_p)
1795 {
1796 	ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ);
1797 	switch (cmd) {
1798 	case PPMREQ_PRE_PWR_OFF:
1799 		/*
1800 		 * Currently there is no device power management for
1801 		 * the root complex (fire). When there is we need to make
1802 		 * sure that it is at full power before trying to send the
1803 		 * PME_Turn_Off message.
1804 		 */
1805 		DBG(DBG_PWR, px_p->px_dip,
1806 		    "ioctl: request to send PME_Turn_Off\n");
1807 		return (px_goto_l23ready(px_p));
1808 
1809 	case PPMREQ_PRE_PWR_ON:
1810 		DBG(DBG_PWR, px_p->px_dip, "ioctl: PRE_PWR_ON request\n");
1811 		return (px_pre_pwron_check(px_p));
1812 
1813 	case PPMREQ_POST_PWR_ON:
1814 		DBG(DBG_PWR, px_p->px_dip, "ioctl: POST_PWR_ON request\n");
1815 		return (px_goto_l0(px_p));
1816 
1817 	default:
1818 		return (DDI_FAILURE);
1819 	}
1820 }
1821 
1822 /*
1823  * sends PME_Turn_Off message to put the link in L2/L3 ready state.
1824  * called by px_ioctl.
1825  * returns DDI_SUCCESS or DDI_FAILURE
1826  * 1. Wait for link to be in L1 state (link status reg)
1827  * 2. write to PME_Turn_off reg to boradcast
1828  * 3. set timeout
1829  * 4. If timeout, return failure.
1830  * 5. If PM_TO_Ack, wait till link is in L2/L3 ready
1831  */
1832 static int
1833 px_goto_l23ready(px_t *px_p)
1834 {
1835 	pcie_pwr_t	*pwr_p;
1836 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1837 	caddr_t	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
1838 	int		ret = DDI_SUCCESS;
1839 	clock_t		end, timeleft;
1840 	int		mutex_held = 1;
1841 
1842 	/* If no PM info, return failure */
1843 	if (!PCIE_PMINFO(px_p->px_dip) ||
1844 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1845 		return (DDI_FAILURE);
1846 
1847 	mutex_enter(&pwr_p->pwr_lock);
1848 	mutex_enter(&px_p->px_l23ready_lock);
1849 	/* Clear the PME_To_ACK receieved flag */
1850 	px_p->px_pm_flags &= ~PX_PMETOACK_RECVD;
1851 	/*
1852 	 * When P25 is the downstream device, after receiving
1853 	 * PME_To_ACK, fire will go to Detect state, which causes
1854 	 * the link down event. Inform FMA that this is expected.
1855 	 * In case of all other cards complaint with the pci express
1856 	 * spec, this will happen when the power is re-applied. FMA
1857 	 * code will clear this flag after one instance of LDN. Since
1858 	 * there will not be a LDN event for the spec compliant cards,
1859 	 * we need to clear the flag after receiving PME_To_ACK.
1860 	 */
1861 	px_p->px_pm_flags |= PX_LDN_EXPECTED;
1862 	if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) {
1863 		ret = DDI_FAILURE;
1864 		goto l23ready_done;
1865 	}
1866 	px_p->px_pm_flags |= PX_PME_TURNOFF_PENDING;
1867 
1868 	end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout);
1869 	while (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1870 		timeleft = cv_timedwait(&px_p->px_l23ready_cv,
1871 		    &px_p->px_l23ready_lock, end);
1872 		/*
1873 		 * if cv_timedwait returns -1, it is either
1874 		 * 1) timed out or
1875 		 * 2) there was a pre-mature wakeup but by the time
1876 		 * cv_timedwait is called again end < lbolt i.e.
1877 		 * end is in the past.
1878 		 * 3) By the time we make first cv_timedwait call,
1879 		 * end < lbolt is true.
1880 		 */
1881 		if (timeleft == -1)
1882 			break;
1883 	}
1884 	if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1885 		/*
1886 		 * Either timedout or interrupt didn't get a
1887 		 * chance to grab the mutex and set the flag.
1888 		 * release the mutex and delay for sometime.
1889 		 * This will 1) give a chance for interrupt to
1890 		 * set the flag 2) creates a delay between two
1891 		 * consequetive requests.
1892 		 */
1893 		mutex_exit(&px_p->px_l23ready_lock);
1894 		delay(drv_usectohz(50 * PX_MSEC_TO_USEC));
1895 		mutex_held = 0;
1896 		if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1897 			ret = DDI_FAILURE;
1898 			DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting"
1899 			    " for PME_TO_ACK\n");
1900 		}
1901 	}
1902 	px_p->px_pm_flags &=
1903 	    ~(PX_PME_TURNOFF_PENDING | PX_PMETOACK_RECVD | PX_LDN_EXPECTED);
1904 
1905 l23ready_done:
1906 	if (mutex_held)
1907 		mutex_exit(&px_p->px_l23ready_lock);
1908 	/*
1909 	 * Wait till link is in L1 idle, if sending PME_Turn_Off
1910 	 * was succesful.
1911 	 */
1912 	if (ret == DDI_SUCCESS) {
1913 		if (px_link_wait4l1idle(csr_base) != DDI_SUCCESS) {
1914 			DBG(DBG_PWR, px_p->px_dip, " Link is not at L1"
1915 			    " even though we received PME_To_ACK.\n");
1916 			/*
1917 			 * Workaround for hardware bug with P25.
1918 			 * Due to a hardware bug with P25, link state
1919 			 * will be Detect state rather than L1 after
1920 			 * link is transitioned to L23Ready state. Since
1921 			 * we don't know whether link is L23ready state
1922 			 * without Fire's state being L1_idle, we delay
1923 			 * here just to make sure that we wait till link
1924 			 * is transitioned to L23Ready state.
1925 			 */
1926 			delay(drv_usectohz(100 * PX_MSEC_TO_USEC));
1927 		}
1928 		pwr_p->pwr_link_lvl = PM_LEVEL_L3;
1929 
1930 	}
1931 	mutex_exit(&pwr_p->pwr_lock);
1932 	return (ret);
1933 }
1934 
1935 /*
1936  * Message interrupt handler intended to be shared for both
1937  * PME and PME_TO_ACK msg handling, currently only handles
1938  * PME_To_ACK message.
1939  */
1940 uint_t
1941 px_pmeq_intr(caddr_t arg)
1942 {
1943 	px_t	*px_p = (px_t *)arg;
1944 
1945 	DBG(DBG_PWR, px_p->px_dip, " PME_To_ACK received \n");
1946 	mutex_enter(&px_p->px_l23ready_lock);
1947 	cv_broadcast(&px_p->px_l23ready_cv);
1948 	if (px_p->px_pm_flags & PX_PME_TURNOFF_PENDING) {
1949 		px_p->px_pm_flags |= PX_PMETOACK_RECVD;
1950 	} else {
1951 		/*
1952 		 * This maybe the second ack received. If so then,
1953 		 * we should be receiving it during wait4L1 stage.
1954 		 */
1955 		px_p->px_pmetoack_ignored++;
1956 	}
1957 	mutex_exit(&px_p->px_l23ready_lock);
1958 	return (DDI_INTR_CLAIMED);
1959 }
1960 
1961 static int
1962 px_pre_pwron_check(px_t *px_p)
1963 {
1964 	pcie_pwr_t	*pwr_p;
1965 
1966 	/* If no PM info, return failure */
1967 	if (!PCIE_PMINFO(px_p->px_dip) ||
1968 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1969 		return (DDI_FAILURE);
1970 
1971 	/*
1972 	 * For the spec compliant downstream cards link down
1973 	 * is expected when the device is powered on.
1974 	 */
1975 	px_p->px_pm_flags |= PX_LDN_EXPECTED;
1976 	return (pwr_p->pwr_link_lvl == PM_LEVEL_L3 ? DDI_SUCCESS : DDI_FAILURE);
1977 }
1978 
1979 static int
1980 px_goto_l0(px_t *px_p)
1981 {
1982 	pcie_pwr_t	*pwr_p;
1983 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1984 	caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
1985 	int		ret = DDI_SUCCESS;
1986 	uint64_t	time_spent = 0;
1987 
1988 	/* If no PM info, return failure */
1989 	if (!PCIE_PMINFO(px_p->px_dip) ||
1990 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1991 		return (DDI_FAILURE);
1992 
1993 	mutex_enter(&pwr_p->pwr_lock);
1994 	/*
1995 	 * The following link retrain activity will cause LDN and LUP event.
1996 	 * Receiving LDN prior to receiving LUP is expected, not an error in
1997 	 * this case.  Receiving LUP indicates link is fully up to support
1998 	 * powering up down stream device, and of course any further LDN and
1999 	 * LUP outside this context will be error.
2000 	 */
2001 	px_p->px_lup_pending = 1;
2002 	if (px_link_retrain(csr_base) != DDI_SUCCESS) {
2003 		ret = DDI_FAILURE;
2004 		goto l0_done;
2005 	}
2006 
2007 	/* LUP event takes the order of 15ms amount of time to occur */
2008 	for (; px_p->px_lup_pending && (time_spent < px_lup_poll_to);
2009 	    time_spent += px_lup_poll_interval)
2010 		drv_usecwait(px_lup_poll_interval);
2011 	if (px_p->px_lup_pending)
2012 		ret = DDI_FAILURE;
2013 l0_done:
2014 	px_enable_detect_quiet(csr_base);
2015 	if (ret == DDI_SUCCESS)
2016 		pwr_p->pwr_link_lvl = PM_LEVEL_L0;
2017 	mutex_exit(&pwr_p->pwr_lock);
2018 	return (ret);
2019 }
2020 
2021 /*
2022  * Extract the drivers binding name to identify which chip we're binding to.
2023  * Whenever a new bus bridge is created, the driver alias entry should be
2024  * added here to identify the device if needed.  If a device isn't added,
2025  * the identity defaults to PX_CHIP_UNIDENTIFIED.
2026  */
2027 static uint32_t
2028 px_identity_init(px_t *px_p)
2029 {
2030 	dev_info_t	*dip = px_p->px_dip;
2031 	char		*name = ddi_binding_name(dip);
2032 	uint32_t	revision = 0;
2033 
2034 	revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2035 	    "module-revision#", 0);
2036 
2037 	/* Check for Fire driver binding name */
2038 	if (strcmp(name, "pciex108e,80f0") == 0) {
2039 		DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: "
2040 		    "(FIRE), module-revision %d\n", NAMEINST(dip),
2041 		    revision);
2042 
2043 		return ((revision >= FIRE_MOD_REV_20) ?
2044 		    PX_CHIP_FIRE : PX_CHIP_UNIDENTIFIED);
2045 	}
2046 
2047 	/* Check for Oberon driver binding name */
2048 	if (strcmp(name, "pciex108e,80f8") == 0) {
2049 		DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: "
2050 		    "(OBERON), module-revision %d\n", NAMEINST(dip),
2051 		    revision);
2052 
2053 		return (PX_CHIP_OBERON);
2054 	}
2055 
2056 	DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n",
2057 	    ddi_driver_name(dip), ddi_get_instance(dip), name, revision);
2058 
2059 	return (PX_CHIP_UNIDENTIFIED);
2060 }
2061 
2062 int
2063 px_err_add_intr(px_fault_t *px_fault_p)
2064 {
2065 	dev_info_t	*dip = px_fault_p->px_fh_dip;
2066 	px_t		*px_p = DIP_TO_STATE(dip);
2067 
2068 	VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL,
2069 	    (intrfunc)px_fault_p->px_err_func, (caddr_t)px_fault_p,
2070 	    NULL, NULL) == 0);
2071 
2072 	px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino);
2073 
2074 	return (DDI_SUCCESS);
2075 }
2076 
2077 void
2078 px_err_rem_intr(px_fault_t *px_fault_p)
2079 {
2080 	dev_info_t	*dip = px_fault_p->px_fh_dip;
2081 	px_t		*px_p = DIP_TO_STATE(dip);
2082 
2083 	px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino,
2084 	    IB_INTR_WAIT);
2085 
2086 	VERIFY(rem_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL) == 0);
2087 }
2088 
2089 /*
2090  * px_cb_intr_redist() - sun4u only, CB interrupt redistribution
2091  */
2092 void
2093 px_cb_intr_redist(void *arg)
2094 {
2095 	px_cb_t		*cb_p = (px_cb_t *)arg;
2096 	px_cb_list_t	*pxl;
2097 	px_t		*pxp = NULL;
2098 	px_fault_t	*f_p = NULL;
2099 	uint32_t	new_cpuid;
2100 	intr_valid_state_t	enabled = 0;
2101 
2102 	mutex_enter(&cb_p->cb_mutex);
2103 
2104 	pxl = cb_p->pxl;
2105 	if (!pxl)
2106 		goto cb_done;
2107 
2108 	pxp = pxl->pxp;
2109 	f_p = &pxp->px_cb_fault;
2110 	for (; pxl && (f_p->px_fh_sysino != cb_p->sysino); ) {
2111 		pxl = pxl->next;
2112 		pxp = pxl->pxp;
2113 		f_p = &pxp->px_cb_fault;
2114 	}
2115 	if (pxl == NULL)
2116 		goto cb_done;
2117 
2118 	new_cpuid =  intr_dist_cpuid();
2119 	if (new_cpuid == cb_p->cpuid)
2120 		goto cb_done;
2121 
2122 	if ((px_lib_intr_getvalid(pxp->px_dip, f_p->px_fh_sysino, &enabled)
2123 	    != DDI_SUCCESS) || !enabled) {
2124 		DBG(DBG_IB, pxp->px_dip, "px_cb_intr_redist: CB not enabled, "
2125 		    "sysino(0x%x)\n", f_p->px_fh_sysino);
2126 		goto cb_done;
2127 	}
2128 
2129 	PX_INTR_DISABLE(pxp->px_dip, f_p->px_fh_sysino);
2130 
2131 	cb_p->cpuid = new_cpuid;
2132 	cb_p->sysino = f_p->px_fh_sysino;
2133 	PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid);
2134 
2135 cb_done:
2136 	mutex_exit(&cb_p->cb_mutex);
2137 }
2138 
2139 /*
2140  * px_cb_add_intr() - Called from attach(9E) to create CB if not yet
2141  * created, to add CB interrupt vector always, but enable only once.
2142  */
2143 int
2144 px_cb_add_intr(px_fault_t *fault_p)
2145 {
2146 	px_t		*px_p = DIP_TO_STATE(fault_p->px_fh_dip);
2147 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
2148 	px_cb_t		*cb_p = (px_cb_t *)px_get_cb(fault_p->px_fh_dip);
2149 	px_cb_list_t	*pxl, *pxl_new;
2150 	boolean_t	is_proxy = B_FALSE;
2151 
2152 	/* create cb */
2153 	if (cb_p == NULL) {
2154 		cb_p = kmem_zalloc(sizeof (px_cb_t), KM_SLEEP);
2155 
2156 		mutex_init(&cb_p->cb_mutex, NULL, MUTEX_DRIVER,
2157 		    (void *) ipltospl(FM_ERR_PIL));
2158 
2159 		cb_p->px_cb_func = px_cb_intr;
2160 		pxu_p->px_cb_p = cb_p;
2161 		px_set_cb(fault_p->px_fh_dip, (uint64_t)cb_p);
2162 
2163 		/* px_lib_dev_init allows only FIRE and OBERON */
2164 		px_err_reg_enable(
2165 		    (pxu_p->chip_type == PX_CHIP_FIRE) ?
2166 		    PX_ERR_JBC : PX_ERR_UBC,
2167 		    pxu_p->px_address[PX_REG_XBC]);
2168 	} else
2169 		pxu_p->px_cb_p = cb_p;
2170 
2171 	/* register cb interrupt */
2172 	VERIFY(add_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL,
2173 	    (intrfunc)cb_p->px_cb_func, (caddr_t)cb_p, NULL, NULL) == 0);
2174 
2175 
2176 	/* update cb list */
2177 	mutex_enter(&cb_p->cb_mutex);
2178 	if (cb_p->pxl == NULL) {
2179 		is_proxy = B_TRUE;
2180 		pxl = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP);
2181 		pxl->pxp = px_p;
2182 		cb_p->pxl = pxl;
2183 		cb_p->sysino = fault_p->px_fh_sysino;
2184 		cb_p->cpuid = intr_dist_cpuid();
2185 	} else {
2186 		/*
2187 		 * Find the last pxl or
2188 		 * stop short at encountering a redundent entry, or
2189 		 * both.
2190 		 */
2191 		pxl = cb_p->pxl;
2192 		for (; !(pxl->pxp == px_p) && pxl->next; pxl = pxl->next) {};
2193 		ASSERT(pxl->pxp != px_p);
2194 
2195 		/* add to linked list */
2196 		pxl_new = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP);
2197 		pxl_new->pxp = px_p;
2198 		pxl->next = pxl_new;
2199 	}
2200 	cb_p->attachcnt++;
2201 	mutex_exit(&cb_p->cb_mutex);
2202 
2203 	if (is_proxy) {
2204 		/* add to interrupt redistribution list */
2205 		intr_dist_add(px_cb_intr_redist, cb_p);
2206 
2207 		/* enable cb hw interrupt */
2208 		px_ib_intr_enable(px_p, cb_p->cpuid, fault_p->px_intr_ino);
2209 	}
2210 
2211 	return (DDI_SUCCESS);
2212 }
2213 
2214 /*
2215  * px_cb_rem_intr() - Called from detach(9E) to remove its CB
2216  * interrupt vector, to shift proxy to the next available px,
2217  * or disable CB interrupt when itself is the last.
2218  */
2219 void
2220 px_cb_rem_intr(px_fault_t *fault_p)
2221 {
2222 	px_t		*px_p = DIP_TO_STATE(fault_p->px_fh_dip), *pxp;
2223 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
2224 	px_cb_t		*cb_p = PX2CB(px_p);
2225 	px_cb_list_t	*pxl, *prev;
2226 	px_fault_t	*f_p;
2227 
2228 	ASSERT(cb_p->pxl);
2229 
2230 	/* find and remove this px, and update cb list */
2231 	mutex_enter(&cb_p->cb_mutex);
2232 
2233 	pxl = cb_p->pxl;
2234 	if (pxl->pxp == px_p) {
2235 		cb_p->pxl = pxl->next;
2236 	} else {
2237 		prev = pxl;
2238 		pxl = pxl->next;
2239 		for (; pxl && (pxl->pxp != px_p); prev = pxl, pxl = pxl->next) {
2240 		};
2241 		if (!pxl) {
2242 			cmn_err(CE_WARN, "px_cb_rem_intr: can't find px_p 0x%p "
2243 			    "in registered CB list.", (void *)px_p);
2244 			mutex_exit(&cb_p->cb_mutex);
2245 			return;
2246 		}
2247 		prev->next = pxl->next;
2248 	}
2249 	pxu_p->px_cb_p = NULL;
2250 	cb_p->attachcnt--;
2251 	kmem_free(pxl, sizeof (px_cb_list_t));
2252 	mutex_exit(&cb_p->cb_mutex);
2253 
2254 	/* disable cb hw interrupt */
2255 	if (fault_p->px_fh_sysino == cb_p->sysino)
2256 		px_ib_intr_disable(px_p->px_ib_p, fault_p->px_intr_ino,
2257 		    IB_INTR_WAIT);
2258 
2259 	/* if last px, remove from interrupt redistribution list */
2260 	if (cb_p->pxl == NULL)
2261 		intr_dist_rem(px_cb_intr_redist, cb_p);
2262 
2263 	/* de-register interrupt */
2264 	VERIFY(rem_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL) == 0);
2265 
2266 	/* if not last px, assign next px to manage cb */
2267 	mutex_enter(&cb_p->cb_mutex);
2268 	if (cb_p->pxl) {
2269 		if (fault_p->px_fh_sysino == cb_p->sysino) {
2270 			pxp = cb_p->pxl->pxp;
2271 			f_p = &pxp->px_cb_fault;
2272 			cb_p->sysino = f_p->px_fh_sysino;
2273 
2274 			PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid);
2275 			(void) px_lib_intr_setstate(pxp->px_dip, cb_p->sysino,
2276 			    INTR_IDLE_STATE);
2277 		}
2278 		mutex_exit(&cb_p->cb_mutex);
2279 		return;
2280 	}
2281 
2282 	/* clean up after the last px */
2283 	mutex_exit(&cb_p->cb_mutex);
2284 
2285 	/* px_lib_dev_init allows only FIRE and OBERON */
2286 	px_err_reg_disable(
2287 	    (pxu_p->chip_type == PX_CHIP_FIRE) ? PX_ERR_JBC : PX_ERR_UBC,
2288 	    pxu_p->px_address[PX_REG_XBC]);
2289 
2290 	mutex_destroy(&cb_p->cb_mutex);
2291 	px_set_cb(fault_p->px_fh_dip, 0ull);
2292 	kmem_free(cb_p, sizeof (px_cb_t));
2293 }
2294 
2295 /*
2296  * px_cb_intr() - sun4u only,  CB interrupt dispatcher
2297  */
2298 uint_t
2299 px_cb_intr(caddr_t arg)
2300 {
2301 	px_cb_t		*cb_p = (px_cb_t *)arg;
2302 	px_t		*pxp;
2303 	px_fault_t	*f_p;
2304 	int		ret;
2305 
2306 	mutex_enter(&cb_p->cb_mutex);
2307 
2308 	if (!cb_p->pxl) {
2309 		mutex_exit(&cb_p->cb_mutex);
2310 		return (DDI_INTR_UNCLAIMED);
2311 	}
2312 
2313 	pxp = cb_p->pxl->pxp;
2314 	f_p = &pxp->px_cb_fault;
2315 
2316 	ret = f_p->px_err_func((caddr_t)f_p);
2317 
2318 	mutex_exit(&cb_p->cb_mutex);
2319 	return (ret);
2320 }
2321 
2322 #ifdef	FMA
2323 void
2324 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status)
2325 {
2326 	/* populate the rc_status by reading the registers - TBD */
2327 }
2328 #endif /* FMA */
2329 
2330 /*
2331  * Unprotected raw reads/writes of fabric device's config space.
2332  * Only used for temporary PCI-E Fabric Error Handling.
2333  */
2334 uint32_t
2335 px_fab_get(px_t *px_p, pcie_req_id_t bdf, uint16_t offset)
2336 {
2337 	px_ranges_t	*rp = px_p->px_ranges_p;
2338 	uint64_t	range_prop, base_addr;
2339 	int		bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG);
2340 	uint32_t	val;
2341 
2342 	/* Get Fire's Physical Base Address */
2343 	range_prop = px_get_range_prop(px_p, rp, bank);
2344 
2345 	/* Get config space first. */
2346 	base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset);
2347 
2348 	val = ldphysio(base_addr);
2349 
2350 	return (LE_32(val));
2351 }
2352 
2353 void
2354 px_fab_set(px_t *px_p, pcie_req_id_t bdf, uint16_t offset,
2355     uint32_t val) {
2356 	px_ranges_t	*rp = px_p->px_ranges_p;
2357 	uint64_t	range_prop, base_addr;
2358 	int		bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG);
2359 
2360 	/* Get Fire's Physical Base Address */
2361 	range_prop = px_get_range_prop(px_p, rp, bank);
2362 
2363 	/* Get config space first. */
2364 	base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset);
2365 
2366 	stphysio(base_addr, LE_32(val));
2367 }
2368 
2369 /*
2370  * cpr callback
2371  *
2372  * disable fabric error msg interrupt prior to suspending
2373  * all device drivers; re-enable fabric error msg interrupt
2374  * after all devices are resumed.
2375  */
2376 static boolean_t
2377 px_cpr_callb(void *arg, int code)
2378 {
2379 	px_t		*px_p = (px_t *)arg;
2380 	px_ib_t		*ib_p = px_p->px_ib_p;
2381 	px_pec_t	*pec_p = px_p->px_pec_p;
2382 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
2383 	caddr_t		csr_base;
2384 	devino_t	ce_ino, nf_ino, f_ino;
2385 	px_ino_t	*ce_ino_p, *nf_ino_p, *f_ino_p;
2386 	uint64_t	imu_log_enable, imu_intr_enable;
2387 	uint64_t	imu_log_mask, imu_intr_mask;
2388 
2389 	ce_ino = px_msiqid_to_devino(px_p, pec_p->pec_corr_msg_msiq_id);
2390 	nf_ino = px_msiqid_to_devino(px_p, pec_p->pec_non_fatal_msg_msiq_id);
2391 	f_ino = px_msiqid_to_devino(px_p, pec_p->pec_fatal_msg_msiq_id);
2392 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2393 
2394 	imu_log_enable = CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE);
2395 	imu_intr_enable = CSR_XR(csr_base, IMU_INTERRUPT_ENABLE);
2396 
2397 	imu_log_mask = BITMASK(IMU_ERROR_LOG_ENABLE_FATAL_MES_NOT_EN_LOG_EN) |
2398 	    BITMASK(IMU_ERROR_LOG_ENABLE_NONFATAL_MES_NOT_EN_LOG_EN) |
2399 	    BITMASK(IMU_ERROR_LOG_ENABLE_COR_MES_NOT_EN_LOG_EN);
2400 
2401 	imu_intr_mask =
2402 	    BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_S_INT_EN) |
2403 	    BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_S_INT_EN) |
2404 	    BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_S_INT_EN) |
2405 	    BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_P_INT_EN) |
2406 	    BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_P_INT_EN) |
2407 	    BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_P_INT_EN);
2408 
2409 	switch (code) {
2410 	case CB_CODE_CPR_CHKPT:
2411 		/* disable imu rbne on corr/nonfatal/fatal errors */
2412 		CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE,
2413 		    imu_log_enable & (~imu_log_mask));
2414 
2415 		CSR_XS(csr_base, IMU_INTERRUPT_ENABLE,
2416 		    imu_intr_enable & (~imu_intr_mask));
2417 
2418 		/* disable CORR intr mapping */
2419 		px_ib_intr_disable(ib_p, ce_ino, IB_INTR_NOWAIT);
2420 
2421 		/* disable NON FATAL intr mapping */
2422 		px_ib_intr_disable(ib_p, nf_ino, IB_INTR_NOWAIT);
2423 
2424 		/* disable FATAL intr mapping */
2425 		px_ib_intr_disable(ib_p, f_ino, IB_INTR_NOWAIT);
2426 
2427 		break;
2428 
2429 	case CB_CODE_CPR_RESUME:
2430 		pxu_p->cpr_flag = PX_NOT_CPR;
2431 		mutex_enter(&ib_p->ib_ino_lst_mutex);
2432 
2433 		ce_ino_p = px_ib_locate_ino(ib_p, ce_ino);
2434 		nf_ino_p = px_ib_locate_ino(ib_p, nf_ino);
2435 		f_ino_p = px_ib_locate_ino(ib_p, f_ino);
2436 
2437 		/* enable CORR intr mapping */
2438 		if (ce_ino_p)
2439 			px_ib_intr_enable(px_p, ce_ino_p->ino_cpuid, ce_ino);
2440 		else
2441 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2442 			    "reenable PCIe Correctable msg intr.\n");
2443 
2444 		/* enable NON FATAL intr mapping */
2445 		if (nf_ino_p)
2446 			px_ib_intr_enable(px_p, nf_ino_p->ino_cpuid, nf_ino);
2447 		else
2448 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2449 			    "reenable PCIe Non Fatal msg intr.\n");
2450 
2451 		/* enable FATAL intr mapping */
2452 		if (f_ino_p)
2453 			px_ib_intr_enable(px_p, f_ino_p->ino_cpuid, f_ino);
2454 		else
2455 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2456 			    "reenable PCIe Fatal msg intr.\n");
2457 
2458 		mutex_exit(&ib_p->ib_ino_lst_mutex);
2459 
2460 		/* enable corr/nonfatal/fatal not enable error */
2461 		CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, (imu_log_enable |
2462 		    (imu_log_mask & px_imu_log_mask)));
2463 		CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, (imu_intr_enable |
2464 		    (imu_intr_mask & px_imu_intr_mask)));
2465 
2466 		break;
2467 	}
2468 
2469 	return (B_TRUE);
2470 }
2471 
2472 uint64_t
2473 px_get_rng_parent_hi_mask(px_t *px_p)
2474 {
2475 	pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
2476 	uint64_t mask;
2477 
2478 	switch (PX_CHIP_TYPE(pxu_p)) {
2479 	case PX_CHIP_OBERON:
2480 		mask = OBERON_RANGE_PROP_MASK;
2481 		break;
2482 	case PX_CHIP_FIRE:
2483 		mask = PX_RANGE_PROP_MASK;
2484 		break;
2485 	default:
2486 		mask = PX_RANGE_PROP_MASK;
2487 	}
2488 
2489 	return (mask);
2490 }
2491 
2492 /*
2493  * fetch chip's range propery's value
2494  */
2495 uint64_t
2496 px_get_range_prop(px_t *px_p, px_ranges_t *rp, int bank)
2497 {
2498 	uint64_t mask, range_prop;
2499 
2500 	mask = px_get_rng_parent_hi_mask(px_p);
2501 	range_prop = (((uint64_t)(rp[bank].parent_high & mask)) << 32) |
2502 	    rp[bank].parent_low;
2503 
2504 	return (range_prop);
2505 }
2506 
2507 /*
2508  * add cpr callback
2509  */
2510 void
2511 px_cpr_add_callb(px_t *px_p)
2512 {
2513 	px_p->px_cprcb_id = callb_add(px_cpr_callb, (void *)px_p,
2514 	    CB_CL_CPR_POST_USER, "px_cpr");
2515 }
2516 
2517 /*
2518  * remove cpr callback
2519  */
2520 void
2521 px_cpr_rem_callb(px_t *px_p)
2522 {
2523 	(void) callb_delete(px_p->px_cprcb_id);
2524 }
2525 
2526 /*ARGSUSED*/
2527 static uint_t
2528 px_hp_intr(caddr_t arg1, caddr_t arg2)
2529 {
2530 	px_t	*px_p = (px_t *)arg1;
2531 	pxu_t 	*pxu_p = (pxu_t *)px_p->px_plat_p;
2532 	int	rval;
2533 
2534 	rval = pciehpc_intr(px_p->px_dip);
2535 
2536 #ifdef  DEBUG
2537 	if (rval == DDI_INTR_UNCLAIMED)
2538 		cmn_err(CE_WARN, "%s%d: UNCLAIMED intr\n",
2539 		    ddi_driver_name(px_p->px_dip),
2540 		    ddi_get_instance(px_p->px_dip));
2541 #endif
2542 
2543 	/* Set the interrupt state to idle */
2544 	if (px_lib_intr_setstate(px_p->px_dip,
2545 	    pxu_p->hp_sysino, INTR_IDLE_STATE) != DDI_SUCCESS)
2546 		return (DDI_INTR_UNCLAIMED);
2547 
2548 	return (rval);
2549 }
2550 
2551 int
2552 px_lib_hotplug_init(dev_info_t *dip, void *arg)
2553 {
2554 	px_t	*px_p = DIP_TO_STATE(dip);
2555 	pxu_t 	*pxu_p = (pxu_t *)px_p->px_plat_p;
2556 	uint64_t ret;
2557 
2558 	if ((ret = hvio_hotplug_init(dip, arg)) == DDI_SUCCESS) {
2559 		if (px_lib_intr_devino_to_sysino(px_p->px_dip,
2560 		    px_p->px_inos[PX_INTR_HOTPLUG], &pxu_p->hp_sysino) !=
2561 		    DDI_SUCCESS) {
2562 #ifdef	DEBUG
2563 			cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n",
2564 			    ddi_driver_name(px_p->px_dip),
2565 			    ddi_get_instance(px_p->px_dip));
2566 #endif
2567 			return (DDI_FAILURE);
2568 		}
2569 
2570 		VERIFY(add_ivintr(pxu_p->hp_sysino, PX_PCIEHP_PIL,
2571 		    (intrfunc)px_hp_intr, (caddr_t)px_p, NULL, NULL) == 0);
2572 
2573 		px_ib_intr_enable(px_p, intr_dist_cpuid(),
2574 		    px_p->px_inos[PX_INTR_HOTPLUG]);
2575 	}
2576 
2577 	return (ret);
2578 }
2579 
2580 void
2581 px_lib_hotplug_uninit(dev_info_t *dip)
2582 {
2583 	if (hvio_hotplug_uninit(dip) == DDI_SUCCESS) {
2584 		px_t	*px_p = DIP_TO_STATE(dip);
2585 		pxu_t 	*pxu_p = (pxu_t *)px_p->px_plat_p;
2586 
2587 		px_ib_intr_disable(px_p->px_ib_p,
2588 		    px_p->px_inos[PX_INTR_HOTPLUG], IB_INTR_WAIT);
2589 
2590 		VERIFY(rem_ivintr(pxu_p->hp_sysino, PX_PCIEHP_PIL) == 0);
2591 	}
2592 }
2593 
2594 /*
2595  * px_hp_intr_redist() - sun4u only, HP interrupt redistribution
2596  */
2597 void
2598 px_hp_intr_redist(px_t *px_p)
2599 {
2600 	if (px_p && (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE)) {
2601 		px_ib_intr_dist_en(px_p->px_dip, intr_dist_cpuid(),
2602 		    px_p->px_inos[PX_INTR_HOTPLUG], B_FALSE);
2603 	}
2604 }
2605 
2606 boolean_t
2607 px_lib_is_in_drain_state(px_t *px_p)
2608 {
2609 	pxu_t 	*pxu_p = (pxu_t *)px_p->px_plat_p;
2610 	caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2611 	uint64_t drain_status;
2612 
2613 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
2614 		drain_status = CSR_BR(csr_base, DRAIN_CONTROL_STATUS, DRAIN);
2615 	} else {
2616 		drain_status = CSR_BR(csr_base, TLU_STATUS, DRAIN);
2617 	}
2618 
2619 	return (drain_status);
2620 }
2621 
2622 pcie_req_id_t
2623 px_lib_get_bdf(px_t *px_p)
2624 {
2625 	pxu_t 	*pxu_p = (pxu_t *)px_p->px_plat_p;
2626 	caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2627 	pcie_req_id_t bdf;
2628 
2629 	bdf = CSR_BR(csr_base, DMC_PCI_EXPRESS_CONFIGURATION, REQ_ID);
2630 
2631 	return (bdf);
2632 }
2633 
2634 /*ARGSUSED*/
2635 int
2636 px_lib_get_root_complex_mps(px_t *px_p, dev_info_t *dip, int *mps)
2637 {
2638 	pxu_t	*pxu_p;
2639 	caddr_t csr_base;
2640 
2641 	pxu_p = (pxu_t *)px_p->px_plat_p;
2642 
2643 	if (pxu_p == NULL)
2644 		return (DDI_FAILURE);
2645 
2646 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2647 
2648 
2649 	*mps = CSR_XR(csr_base, TLU_DEVICE_CAPABILITIES) &
2650 	    TLU_DEVICE_CAPABILITIES_MPS_MASK;
2651 
2652 	return (DDI_SUCCESS);
2653 }
2654 
2655 /*ARGSUSED*/
2656 int
2657 px_lib_set_root_complex_mps(px_t *px_p,  dev_info_t *dip, int mps)
2658 {
2659 	pxu_t	*pxu_p;
2660 	caddr_t csr_base;
2661 	uint64_t dev_ctrl;
2662 	int link_width, val;
2663 	px_chip_type_t chip_type = px_identity_init(px_p);
2664 
2665 	pxu_p = (pxu_t *)px_p->px_plat_p;
2666 
2667 	if (pxu_p == NULL)
2668 		return (DDI_FAILURE);
2669 
2670 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2671 
2672 	dev_ctrl = CSR_XR(csr_base, TLU_DEVICE_CONTROL);
2673 	dev_ctrl |= (mps << TLU_DEVICE_CONTROL_MPS);
2674 
2675 	CSR_XS(csr_base, TLU_DEVICE_CONTROL, dev_ctrl);
2676 
2677 	link_width = CSR_FR(csr_base, TLU_LINK_STATUS, WIDTH);
2678 
2679 	/*
2680 	 * Convert link_width to match timer array configuration.
2681 	 */
2682 	switch (link_width) {
2683 	case 1:
2684 		link_width = 0;
2685 		break;
2686 	case 4:
2687 		link_width = 1;
2688 		break;
2689 	case 8:
2690 		link_width = 2;
2691 		break;
2692 	case 16:
2693 		link_width = 3;
2694 		break;
2695 	default:
2696 		link_width = 0;
2697 	}
2698 
2699 	val = px_replay_timer_table[mps][link_width];
2700 	CSR_XS(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD, val);
2701 
2702 	if (chip_type == PX_CHIP_OBERON)
2703 		return (DDI_SUCCESS);
2704 
2705 	val = px_acknak_timer_table[mps][link_width];
2706 	CSR_XS(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, val);
2707 
2708 	return (DDI_SUCCESS);
2709 }
2710