xref: /titanic_51/usr/src/uts/sun4v/io/px/px_lib4v.c (revision cbdcbd056f15c9c9fd82d5543b3a502677c4d391)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/sysmacros.h>
28 #include <sys/ddi.h>
29 #include <sys/async.h>
30 #include <sys/sunddi.h>
31 #include <sys/ddifm.h>
32 #include <sys/fm/protocol.h>
33 #include <sys/vmem.h>
34 #include <sys/intr.h>
35 #include <sys/ivintr.h>
36 #include <sys/errno.h>
37 #include <sys/hypervisor_api.h>
38 #include <sys/hsvc.h>
39 #include <px_obj.h>
40 #include <sys/machsystm.h>
41 #include "px_lib4v.h"
42 #include "px_err.h"
43 #include <sys/pci_cfgacc.h>
44 #include <sys/pci_cfgacc_4v.h>
45 
46 
47 /* mask for the ranges property in calculating the real PFN range */
48 uint_t px_ranges_phi_mask = ((1 << 28) -1);
49 
50 /*
51  * Hypervisor VPCI services information for the px nexus driver.
52  */
53 static	uint64_t	px_vpci_min_ver; /* Negotiated VPCI API minor version */
54 static	uint_t		px_vpci_users = 0; /* VPCI API users */
55 
56 static hsvc_info_t px_hsvc = {
57 	HSVC_REV_1, NULL, HSVC_GROUP_VPCI, PX_VPCI_MAJOR_VER,
58 	PX_VPCI_MINOR_VER, "PX"
59 };
60 
61 int
62 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl)
63 {
64 	px_nexus_regspec_t	*rp;
65 	uint_t			reglen;
66 	int			ret;
67 
68 	uint64_t mjrnum;
69 	uint64_t mnrnum;
70 
71 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p\n", dip);
72 
73 	/*
74 	 * Check HV intr group api versioning.
75 	 * This driver uses the old interrupt routines which are supported
76 	 * in old firmware in the CORE API group and in newer firmware in
77 	 * the INTR API group.  Support for these calls will be dropped
78 	 * once the INTR API group major goes to 2.
79 	 */
80 	if ((hsvc_version(HSVC_GROUP_INTR, &mjrnum, &mnrnum) == 0) &&
81 	    (mjrnum > 1)) {
82 		cmn_err(CE_WARN, "px: unsupported intr api group: "
83 		    "maj:0x%lx, min:0x%lx", mjrnum, mnrnum);
84 		return (ENOTSUP);
85 	}
86 
87 	ret = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
88 	    "reg", (uchar_t **)&rp, &reglen);
89 	if (ret != DDI_PROP_SUCCESS) {
90 		DBG(DBG_ATTACH, dip, "px_lib_dev_init failed ret=%d\n", ret);
91 		return (DDI_FAILURE);
92 	}
93 
94 	/*
95 	 * Initilize device handle. The device handle uniquely identifies
96 	 * a SUN4V device. It consists of the lower 28-bits of the hi-cell
97 	 * of the first entry of the SUN4V device's "reg" property as
98 	 * defined by the SUN4V Bus Binding to Open Firmware.
99 	 */
100 	*dev_hdl = (devhandle_t)((rp->phys_addr >> 32) & DEVHDLE_MASK);
101 	ddi_prop_free(rp);
102 
103 	/*
104 	 * hotplug implementation requires this property to be associated with
105 	 * any indirect PCI config access services
106 	 */
107 	(void) ddi_prop_update_int(makedevice(ddi_driver_major(dip),
108 	    PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR)), dip,
109 	    PCI_BUS_CONF_MAP_PROP, 1);
110 
111 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl);
112 
113 	/*
114 	 * Negotiate the API version for VPCI hypervisor services.
115 	 */
116 	if (px_vpci_users++)
117 		return (DDI_SUCCESS);
118 
119 	if ((ret = hsvc_register(&px_hsvc, &px_vpci_min_ver)) != 0) {
120 		cmn_err(CE_WARN, "%s: cannot negotiate hypervisor services "
121 		    "group: 0x%lx major: 0x%lx minor: 0x%lx errno: %d\n",
122 		    px_hsvc.hsvc_modname, px_hsvc.hsvc_group,
123 		    px_hsvc.hsvc_major, px_hsvc.hsvc_minor, ret);
124 
125 		return (DDI_FAILURE);
126 	}
127 
128 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: negotiated VPCI API version, "
129 	    "major 0x%lx minor 0x%lx\n", px_hsvc.hsvc_major, px_vpci_min_ver);
130 
131 	return (DDI_SUCCESS);
132 }
133 
134 /*ARGSUSED*/
135 int
136 px_lib_dev_fini(dev_info_t *dip)
137 {
138 	DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip);
139 
140 	(void) ddi_prop_remove(makedevice(ddi_driver_major(dip),
141 	    PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR)), dip,
142 	    PCI_BUS_CONF_MAP_PROP);
143 
144 	if (--px_vpci_users == 0)
145 		(void) hsvc_unregister(&px_hsvc);
146 
147 	return (DDI_SUCCESS);
148 }
149 
150 /*ARGSUSED*/
151 int
152 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino,
153     sysino_t *sysino)
154 {
155 	uint64_t	ret;
156 
157 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p "
158 	    "devino 0x%x\n", dip, devino);
159 
160 	if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip),
161 	    devino, sysino)) != H_EOK) {
162 		DBG(DBG_LIB_INT, dip,
163 		    "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret);
164 		return (DDI_FAILURE);
165 	}
166 
167 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n",
168 	    *sysino);
169 
170 	return (DDI_SUCCESS);
171 }
172 
173 /*ARGSUSED*/
174 int
175 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino,
176     intr_valid_state_t *intr_valid_state)
177 {
178 	uint64_t	ret;
179 
180 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n",
181 	    dip, sysino);
182 
183 	if ((ret = hvio_intr_getvalid(sysino,
184 	    (int *)intr_valid_state)) != H_EOK) {
185 		DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n",
186 		    ret);
187 		return (DDI_FAILURE);
188 	}
189 
190 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n",
191 	    *intr_valid_state);
192 
193 	return (DDI_SUCCESS);
194 }
195 
196 /*ARGSUSED*/
197 int
198 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino,
199     intr_valid_state_t intr_valid_state)
200 {
201 	uint64_t	ret;
202 
203 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx "
204 	    "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state);
205 
206 	if ((ret = hvio_intr_setvalid(sysino, intr_valid_state)) != H_EOK) {
207 		DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n",
208 		    ret);
209 		return (DDI_FAILURE);
210 	}
211 
212 	return (DDI_SUCCESS);
213 }
214 
215 /*ARGSUSED*/
216 int
217 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino,
218     intr_state_t *intr_state)
219 {
220 	uint64_t	ret;
221 
222 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n",
223 	    dip, sysino);
224 
225 	if ((ret = hvio_intr_getstate(sysino, (int *)intr_state)) != H_EOK) {
226 		DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n",
227 		    ret);
228 		return (DDI_FAILURE);
229 	}
230 
231 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n",
232 	    *intr_state);
233 
234 	return (DDI_SUCCESS);
235 }
236 
237 /*ARGSUSED*/
238 int
239 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino,
240     intr_state_t intr_state)
241 {
242 	uint64_t	ret;
243 
244 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx "
245 	    "intr_state 0x%x\n", dip, sysino, intr_state);
246 
247 	if ((ret = hvio_intr_setstate(sysino, intr_state)) != H_EOK) {
248 		DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n",
249 		    ret);
250 		return (DDI_FAILURE);
251 	}
252 
253 	return (DDI_SUCCESS);
254 }
255 
256 /*ARGSUSED*/
257 int
258 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid)
259 {
260 	uint64_t	ret;
261 
262 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n",
263 	    dip, sysino);
264 
265 	if ((ret = hvio_intr_gettarget(sysino, cpuid)) != H_EOK) {
266 		DBG(DBG_LIB_INT, dip,
267 		    "hvio_intr_gettarget failed, ret 0x%lx\n", ret);
268 		return (DDI_FAILURE);
269 	}
270 
271 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", *cpuid);
272 
273 	return (DDI_SUCCESS);
274 }
275 
276 /*ARGSUSED*/
277 int
278 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid)
279 {
280 	uint64_t	ret;
281 
282 	DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx "
283 	    "cpuid 0x%x\n", dip, sysino, cpuid);
284 
285 	ret = hvio_intr_settarget(sysino, cpuid);
286 	if (ret == H_ECPUERROR) {
287 		cmn_err(CE_PANIC,
288 		    "px_lib_intr_settarget: hvio_intr_settarget failed, "
289 		    "ret = 0x%lx, cpuid = 0x%x, sysino = 0x%lx\n", ret,
290 		    cpuid, sysino);
291 	} else if (ret != H_EOK) {
292 		DBG(DBG_LIB_INT, dip,
293 		    "hvio_intr_settarget failed, ret 0x%lx\n", ret);
294 		return (DDI_FAILURE);
295 	}
296 
297 	return (DDI_SUCCESS);
298 }
299 
300 /*ARGSUSED*/
301 int
302 px_lib_intr_reset(dev_info_t *dip)
303 {
304 	px_t		*px_p = DIP_TO_STATE(dip);
305 	px_ib_t		*ib_p = px_p->px_ib_p;
306 	px_ino_t	*ino_p;
307 
308 	DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip);
309 
310 	mutex_enter(&ib_p->ib_ino_lst_mutex);
311 
312 	/* Reset all Interrupts */
313 	for (ino_p = ib_p->ib_ino_lst; ino_p; ino_p = ino_p->ino_next_p) {
314 		if (px_lib_intr_setstate(dip, ino_p->ino_sysino,
315 		    INTR_IDLE_STATE) != DDI_SUCCESS)
316 			return (BF_FATAL);
317 	}
318 
319 	mutex_exit(&ib_p->ib_ino_lst_mutex);
320 
321 	return (BF_NONE);
322 }
323 
324 /*ARGSUSED*/
325 int
326 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages,
327     io_attributes_t attr, void *addr, size_t pfn_index, int flags)
328 {
329 	tsbnum_t	tsb_num = PCI_TSBID_TO_TSBNUM(tsbid);
330 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
331 	io_page_list_t	*pfns, *pfn_p;
332 	pages_t		ttes_mapped = 0;
333 	int		i, err = DDI_SUCCESS;
334 
335 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx "
336 	    "pages 0x%x attr 0x%llx addr 0x%p pfn_index 0x%llx flags 0x%x\n",
337 	    dip, tsbid, pages, attr, addr, pfn_index, flags);
338 
339 	if ((pfns = pfn_p = kmem_zalloc((pages * sizeof (io_page_list_t)),
340 	    KM_NOSLEEP)) == NULL) {
341 		DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: kmem_zalloc failed\n");
342 		return (DDI_FAILURE);
343 	}
344 
345 	for (i = 0; i < pages; i++)
346 		pfns[i] = MMU_PTOB(PX_ADDR2PFN(addr, pfn_index, flags, i));
347 
348 	/*
349 	 * If HV VPCI version is 1.1 and higher, pass BDF, phantom function,
350 	 * and relaxed ordering attributes. Otherwise, pass only read or write
351 	 * attribute.
352 	 */
353 	if (px_vpci_min_ver == PX_VPCI_MINOR_VER_0)
354 		attr = attr & (PCI_MAP_ATTR_READ | PCI_MAP_ATTR_WRITE);
355 
356 	while ((ttes_mapped = pfn_p - pfns) < pages) {
357 		uintptr_t	ra = va_to_pa(pfn_p);
358 		pages_t		ttes2map;
359 		uint64_t	ret;
360 
361 		ttes2map = (MMU_PAGE_SIZE - P2PHASE(ra, MMU_PAGE_SIZE)) >> 3;
362 		ra = MMU_PTOB(MMU_BTOP(ra));
363 
364 		for (ttes2map = MIN(ttes2map, pages - ttes_mapped); ttes2map;
365 		    ttes2map -= ttes_mapped, pfn_p += ttes_mapped) {
366 
367 			ttes_mapped = 0;
368 			if ((ret = hvio_iommu_map(DIP_TO_HANDLE(dip),
369 			    PCI_TSBID(tsb_num, tsb_index + (pfn_p - pfns)),
370 			    ttes2map, attr, (io_page_list_t *)(ra |
371 			    ((uintptr_t)pfn_p & MMU_PAGE_OFFSET)),
372 			    &ttes_mapped)) != H_EOK) {
373 				DBG(DBG_LIB_DMA, dip, "hvio_iommu_map failed "
374 				    "ret 0x%lx\n", ret);
375 
376 				ttes_mapped = pfn_p - pfns;
377 				err = DDI_FAILURE;
378 				goto cleanup;
379 			}
380 
381 			DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: tsb_num 0x%x "
382 			    "tsb_index 0x%lx ttes_to_map 0x%lx attr 0x%llx "
383 			    "ra 0x%p ttes_mapped 0x%x\n", tsb_num,
384 			    tsb_index + (pfn_p - pfns), ttes2map, attr,
385 			    ra | ((uintptr_t)pfn_p & MMU_PAGE_OFFSET),
386 			    ttes_mapped);
387 		}
388 	}
389 
390 cleanup:
391 	if ((err == DDI_FAILURE) && ttes_mapped)
392 		(void) px_lib_iommu_demap(dip, tsbid, ttes_mapped);
393 
394 	kmem_free(pfns, pages * sizeof (io_page_list_t));
395 	return (err);
396 }
397 
398 /*ARGSUSED*/
399 int
400 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages)
401 {
402 	tsbnum_t	tsb_num = PCI_TSBID_TO_TSBNUM(tsbid);
403 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
404 	pages_t		ttes2demap, ttes_demapped = 0;
405 	uint64_t	ret;
406 
407 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx "
408 	    "pages 0x%x\n", dip, tsbid, pages);
409 
410 	for (ttes2demap = pages; ttes2demap;
411 	    ttes2demap -= ttes_demapped, tsb_index += ttes_demapped) {
412 		if ((ret = hvio_iommu_demap(DIP_TO_HANDLE(dip),
413 		    PCI_TSBID(tsb_num, tsb_index), ttes2demap,
414 		    &ttes_demapped)) != H_EOK) {
415 			DBG(DBG_LIB_DMA, dip, "hvio_iommu_demap failed, "
416 			    "ret 0x%lx\n", ret);
417 
418 			return (DDI_FAILURE);
419 		}
420 
421 		DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: tsb_num 0x%x "
422 		    "tsb_index 0x%lx ttes_to_demap 0x%lx ttes_demapped 0x%x\n",
423 		    tsb_num, tsb_index, ttes2demap, ttes_demapped);
424 	}
425 
426 	return (DDI_SUCCESS);
427 }
428 
429 /*ARGSUSED*/
430 int
431 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p,
432     r_addr_t *r_addr_p)
433 {
434 	uint64_t	ret;
435 
436 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n",
437 	    dip, tsbid);
438 
439 	if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), tsbid,
440 	    attr_p, r_addr_p)) != H_EOK) {
441 		DBG(DBG_LIB_DMA, dip,
442 		    "hvio_iommu_getmap failed, ret 0x%lx\n", ret);
443 
444 		return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE);
445 	}
446 
447 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%llx "
448 	    "r_addr 0x%llx\n", *attr_p, *r_addr_p);
449 
450 	return (DDI_SUCCESS);
451 }
452 
453 /*ARGSUSED*/
454 uint64_t
455 px_get_rng_parent_hi_mask(px_t *px_p)
456 {
457 	return (PX_RANGE_PROP_MASK);
458 }
459 
460 /*
461  * Checks dma attributes against system bypass ranges
462  * A sun4v device must be capable of generating the entire 64-bit
463  * address in order to perform bypass DMA.
464  */
465 /*ARGSUSED*/
466 int
467 px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p,
468     uint64_t *lo_p, uint64_t *hi_p)
469 {
470 	if ((attr_p->dma_attr_addr_lo != 0ull) ||
471 	    (attr_p->dma_attr_addr_hi != UINT64_MAX)) {
472 
473 		return (DDI_DMA_BADATTR);
474 	}
475 
476 	*lo_p = 0ull;
477 	*hi_p = UINT64_MAX;
478 
479 	return (DDI_SUCCESS);
480 }
481 
482 
483 /*ARGSUSED*/
484 int
485 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, io_attributes_t attr,
486     io_addr_t *io_addr_p)
487 {
488 	uint64_t	ret;
489 
490 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx "
491 	    "attr 0x%llx\n", dip, ra, attr);
492 
493 	if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), ra,
494 	    attr, io_addr_p)) != H_EOK) {
495 		DBG(DBG_LIB_DMA, dip,
496 		    "hvio_iommu_getbypass failed, ret 0x%lx\n", ret);
497 		return (ret == H_ENOTSUPPORTED ? DDI_ENOTSUP : DDI_FAILURE);
498 	}
499 
500 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n",
501 	    *io_addr_p);
502 
503 	return (DDI_SUCCESS);
504 }
505 
506 /*
507  * Returns any needed IO address bit(s) for relaxed ordering in IOMMU
508  * bypass mode.
509  */
510 /* ARGSUSED */
511 uint64_t
512 px_lib_ro_bypass(dev_info_t *dip, io_attributes_t attr, uint64_t ioaddr)
513 {
514 	return (ioaddr);
515 }
516 
517 /*ARGSUSED*/
518 int
519 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
520 	off_t off, size_t len, uint_t cache_flags)
521 {
522 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
523 	uint64_t sync_dir;
524 	size_t bytes_synced;
525 	int end, idx;
526 	off_t pg_off;
527 	devhandle_t hdl = DIP_TO_HANDLE(dip); /* need to cache hdl */
528 
529 	DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p "
530 	    "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n",
531 	    dip, rdip, handle, off, len, cache_flags);
532 
533 	if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) {
534 		cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.",
535 		    ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp);
536 		return (DDI_FAILURE);
537 	}
538 
539 	if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC)
540 		return (DDI_SUCCESS);
541 
542 	if (!len)
543 		len = mp->dmai_size;
544 
545 	if (mp->dmai_rflags & DDI_DMA_READ)
546 		sync_dir = HVIO_DMA_SYNC_DIR_FROM_DEV;
547 	else
548 		sync_dir = HVIO_DMA_SYNC_DIR_TO_DEV;
549 
550 	off += mp->dmai_offset;
551 	pg_off = off & MMU_PAGEOFFSET;
552 
553 	DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: page offset %x size %x\n",
554 	    pg_off, len);
555 
556 	/* sync on page basis */
557 	end = MMU_BTOPR(off + len - 1);
558 	for (idx = MMU_BTOP(off); idx < end; idx++,
559 	    len -= bytes_synced, pg_off = 0) {
560 		size_t bytes_to_sync = bytes_to_sync =
561 		    MIN(len, MMU_PAGESIZE - pg_off);
562 
563 		if (hvio_dma_sync(hdl, MMU_PTOB(PX_GET_MP_PFN(mp, idx)) +
564 		    pg_off, bytes_to_sync, sync_dir, &bytes_synced) != H_EOK)
565 			break;
566 
567 		DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: Called hvio_dma_sync "
568 		    "ra = %p bytes to sync = %x bytes synced %x\n",
569 		    MMU_PTOB(PX_GET_MP_PFN(mp, idx)) + pg_off, bytes_to_sync,
570 		    bytes_synced);
571 
572 		if (bytes_to_sync != bytes_synced)
573 			break;
574 	}
575 
576 	return (len ? DDI_FAILURE : DDI_SUCCESS);
577 }
578 
579 
580 /*
581  * MSIQ Functions:
582  */
583 
584 /*ARGSUSED*/
585 int
586 px_lib_msiq_init(dev_info_t *dip)
587 {
588 	px_t		*px_p = DIP_TO_STATE(dip);
589 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
590 	r_addr_t	ra;
591 	size_t		msiq_size;
592 	uint_t		rec_cnt;
593 	int		i, err = DDI_SUCCESS;
594 	uint64_t	ret;
595 
596 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip);
597 
598 	msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
599 
600 	/* sun4v requires all EQ allocation to be on q size boundary */
601 	if ((msiq_state_p->msiq_buf_p = contig_mem_alloc_align(
602 	    msiq_state_p->msiq_cnt * msiq_size, msiq_size)) == NULL) {
603 		DBG(DBG_LIB_MSIQ, dip,
604 		    "px_lib_msiq_init: Contig alloc failed\n");
605 
606 		return (DDI_FAILURE);
607 	}
608 
609 	for (i = 0; i < msiq_state_p->msiq_cnt; i++) {
610 		msiq_state_p->msiq_p[i].msiq_base_p = (msiqhead_t *)
611 		    ((caddr_t)msiq_state_p->msiq_buf_p + (i * msiq_size));
612 
613 		ra = (r_addr_t)va_to_pa((caddr_t)msiq_state_p->msiq_buf_p +
614 		    (i * msiq_size));
615 
616 		if ((ret = hvio_msiq_conf(DIP_TO_HANDLE(dip),
617 		    (i + msiq_state_p->msiq_1st_msiq_id),
618 		    ra, msiq_state_p->msiq_rec_cnt)) != H_EOK) {
619 			DBG(DBG_LIB_MSIQ, dip,
620 			    "hvio_msiq_conf failed, ret 0x%lx\n", ret);
621 			err = DDI_FAILURE;
622 			break;
623 		}
624 
625 		if ((err = px_lib_msiq_info(dip,
626 		    (i + msiq_state_p->msiq_1st_msiq_id),
627 		    &ra, &rec_cnt)) != DDI_SUCCESS) {
628 			DBG(DBG_LIB_MSIQ, dip,
629 			    "px_lib_msiq_info failed, ret 0x%x\n", err);
630 			err = DDI_FAILURE;
631 			break;
632 		}
633 
634 		DBG(DBG_LIB_MSIQ, dip,
635 		    "px_lib_msiq_init: ra 0x%p rec_cnt 0x%x\n", ra, rec_cnt);
636 	}
637 
638 	return (err);
639 }
640 
641 /*ARGSUSED*/
642 int
643 px_lib_msiq_fini(dev_info_t *dip)
644 {
645 	px_t		*px_p = DIP_TO_STATE(dip);
646 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
647 	size_t		msiq_size;
648 
649 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip);
650 	msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
651 
652 	if (msiq_state_p->msiq_buf_p != NULL)
653 		contig_mem_free(msiq_state_p->msiq_buf_p,
654 		    msiq_state_p->msiq_cnt * msiq_size);
655 
656 	return (DDI_SUCCESS);
657 }
658 
659 /*ARGSUSED*/
660 int
661 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p,
662     uint_t *msiq_rec_cnt_p)
663 {
664 	uint64_t	ret;
665 
666 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n",
667 	    dip, msiq_id);
668 
669 	if ((ret = hvio_msiq_info(DIP_TO_HANDLE(dip),
670 	    msiq_id, ra_p, msiq_rec_cnt_p)) != H_EOK) {
671 		DBG(DBG_LIB_MSIQ, dip,
672 		    "hvio_msiq_info failed, ret 0x%lx\n", ret);
673 		return (DDI_FAILURE);
674 	}
675 
676 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n",
677 	    ra_p, *msiq_rec_cnt_p);
678 
679 	return (DDI_SUCCESS);
680 }
681 
682 /*ARGSUSED*/
683 int
684 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id,
685     pci_msiq_valid_state_t *msiq_valid_state)
686 {
687 	uint64_t	ret;
688 
689 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n",
690 	    dip, msiq_id);
691 
692 	if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip),
693 	    msiq_id, msiq_valid_state)) != H_EOK) {
694 		DBG(DBG_LIB_MSIQ, dip,
695 		    "hvio_msiq_getvalid failed, ret 0x%lx\n", ret);
696 		return (DDI_FAILURE);
697 	}
698 
699 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n",
700 	    *msiq_valid_state);
701 
702 	return (DDI_SUCCESS);
703 }
704 
705 /*ARGSUSED*/
706 int
707 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id,
708     pci_msiq_valid_state_t msiq_valid_state)
709 {
710 	uint64_t	ret;
711 
712 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x "
713 	    "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state);
714 
715 	if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip),
716 	    msiq_id, msiq_valid_state)) != H_EOK) {
717 		DBG(DBG_LIB_MSIQ, dip,
718 		    "hvio_msiq_setvalid failed, ret 0x%lx\n", ret);
719 		return (DDI_FAILURE);
720 	}
721 
722 	return (DDI_SUCCESS);
723 }
724 
725 /*ARGSUSED*/
726 int
727 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id,
728     pci_msiq_state_t *msiq_state)
729 {
730 	uint64_t	ret;
731 
732 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n",
733 	    dip, msiq_id);
734 
735 	if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip),
736 	    msiq_id, msiq_state)) != H_EOK) {
737 		DBG(DBG_LIB_MSIQ, dip,
738 		    "hvio_msiq_getstate failed, ret 0x%lx\n", ret);
739 		return (DDI_FAILURE);
740 	}
741 
742 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n",
743 	    *msiq_state);
744 
745 	return (DDI_SUCCESS);
746 }
747 
748 /*ARGSUSED*/
749 int
750 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id,
751     pci_msiq_state_t msiq_state)
752 {
753 	uint64_t	ret;
754 
755 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x "
756 	    "msiq_state 0x%x\n", dip, msiq_id, msiq_state);
757 
758 	if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip),
759 	    msiq_id, msiq_state)) != H_EOK) {
760 		DBG(DBG_LIB_MSIQ, dip,
761 		    "hvio_msiq_setstate failed, ret 0x%lx\n", ret);
762 		return (DDI_FAILURE);
763 	}
764 
765 	return (DDI_SUCCESS);
766 }
767 
768 /*ARGSUSED*/
769 int
770 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id,
771     msiqhead_t *msiq_head_p)
772 {
773 	uint64_t	ret;
774 
775 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n",
776 	    dip, msiq_id);
777 
778 	if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip),
779 	    msiq_id, msiq_head_p)) != H_EOK) {
780 		DBG(DBG_LIB_MSIQ, dip,
781 		    "hvio_msiq_gethead failed, ret 0x%lx\n", ret);
782 		return (DDI_FAILURE);
783 	}
784 
785 	*msiq_head_p =  (*msiq_head_p / sizeof (msiq_rec_t));
786 
787 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_gethead: msiq_head 0x%x\n",
788 	    *msiq_head_p);
789 
790 	return (DDI_SUCCESS);
791 }
792 
793 /*ARGSUSED*/
794 int
795 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id,
796     msiqhead_t msiq_head)
797 {
798 	uint64_t	ret;
799 
800 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x "
801 	    "msiq_head 0x%x\n", dip, msiq_id, msiq_head);
802 
803 	if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip),
804 	    msiq_id, msiq_head * sizeof (msiq_rec_t))) != H_EOK) {
805 		DBG(DBG_LIB_MSIQ, dip,
806 		    "hvio_msiq_sethead failed, ret 0x%lx\n", ret);
807 		return (DDI_FAILURE);
808 	}
809 
810 	return (DDI_SUCCESS);
811 }
812 
813 /*ARGSUSED*/
814 int
815 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id,
816     msiqtail_t *msiq_tail_p)
817 {
818 	uint64_t	ret;
819 
820 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n",
821 	    dip, msiq_id);
822 
823 	if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip),
824 	    msiq_id, msiq_tail_p)) != H_EOK) {
825 		DBG(DBG_LIB_MSIQ, dip,
826 		    "hvio_msiq_gettail failed, ret 0x%lx\n", ret);
827 		return (DDI_FAILURE);
828 	}
829 
830 	*msiq_tail_p =  (*msiq_tail_p / sizeof (msiq_rec_t));
831 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n",
832 	    *msiq_tail_p);
833 
834 	return (DDI_SUCCESS);
835 }
836 
837 /*ARGSUSED*/
838 void
839 px_lib_get_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p,
840     msiq_rec_t *msiq_rec_p)
841 {
842 	msiq_rec_t	*curr_msiq_rec_p = (msiq_rec_t *)msiq_head_p;
843 
844 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p\n", dip);
845 
846 	if (!curr_msiq_rec_p->msiq_rec_type) {
847 		/* Set msiq_rec_type to zero */
848 		msiq_rec_p->msiq_rec_type = 0;
849 
850 		return;
851 	}
852 
853 	*msiq_rec_p = *curr_msiq_rec_p;
854 }
855 
856 /*ARGSUSED*/
857 void
858 px_lib_clr_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p)
859 {
860 	msiq_rec_t	*curr_msiq_rec_p = (msiq_rec_t *)msiq_head_p;
861 
862 	DBG(DBG_LIB_MSIQ, dip, "px_lib_clr_msiq_rec: dip 0x%p\n", dip);
863 
864 	/* Zero out msiq_rec_type field */
865 	curr_msiq_rec_p->msiq_rec_type  = 0;
866 }
867 
868 /*
869  * MSI Functions:
870  */
871 
872 /*ARGSUSED*/
873 int
874 px_lib_msi_init(dev_info_t *dip)
875 {
876 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip);
877 
878 	/* Noop */
879 	return (DDI_SUCCESS);
880 }
881 
882 /*ARGSUSED*/
883 int
884 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num,
885     msiqid_t *msiq_id)
886 {
887 	uint64_t	ret;
888 
889 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n",
890 	    dip, msi_num);
891 
892 	if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip),
893 	    msi_num, msiq_id)) != H_EOK) {
894 		DBG(DBG_LIB_MSI, dip,
895 		    "hvio_msi_getmsiq failed, ret 0x%lx\n", ret);
896 		return (DDI_FAILURE);
897 	}
898 
899 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n",
900 	    *msiq_id);
901 
902 	return (DDI_SUCCESS);
903 }
904 
905 /*ARGSUSED*/
906 int
907 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num,
908     msiqid_t msiq_id, msi_type_t msitype)
909 {
910 	uint64_t	ret;
911 
912 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x "
913 	    "msq_id 0x%x\n", dip, msi_num, msiq_id);
914 
915 	if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip),
916 	    msi_num, msiq_id, msitype)) != H_EOK) {
917 		DBG(DBG_LIB_MSI, dip,
918 		    "hvio_msi_setmsiq failed, ret 0x%lx\n", ret);
919 		return (DDI_FAILURE);
920 	}
921 
922 	return (DDI_SUCCESS);
923 }
924 
925 /*ARGSUSED*/
926 int
927 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num,
928     pci_msi_valid_state_t *msi_valid_state)
929 {
930 	uint64_t	ret;
931 
932 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n",
933 	    dip, msi_num);
934 
935 	if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip),
936 	    msi_num, msi_valid_state)) != H_EOK) {
937 		DBG(DBG_LIB_MSI, dip,
938 		    "hvio_msi_getvalid failed, ret 0x%lx\n", ret);
939 		return (DDI_FAILURE);
940 	}
941 
942 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n",
943 	    *msi_valid_state);
944 
945 	return (DDI_SUCCESS);
946 }
947 
948 /*ARGSUSED*/
949 int
950 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num,
951     pci_msi_valid_state_t msi_valid_state)
952 {
953 	uint64_t	ret;
954 
955 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x "
956 	    "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state);
957 
958 	if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip),
959 	    msi_num, msi_valid_state)) != H_EOK) {
960 		DBG(DBG_LIB_MSI, dip,
961 		    "hvio_msi_setvalid failed, ret 0x%lx\n", ret);
962 		return (DDI_FAILURE);
963 	}
964 
965 	return (DDI_SUCCESS);
966 }
967 
968 /*ARGSUSED*/
969 int
970 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num,
971     pci_msi_state_t *msi_state)
972 {
973 	uint64_t	ret;
974 
975 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n",
976 	    dip, msi_num);
977 
978 	if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip),
979 	    msi_num, msi_state)) != H_EOK) {
980 		DBG(DBG_LIB_MSI, dip,
981 		    "hvio_msi_getstate failed, ret 0x%lx\n", ret);
982 		return (DDI_FAILURE);
983 	}
984 
985 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n",
986 	    *msi_state);
987 
988 	return (DDI_SUCCESS);
989 }
990 
991 /*ARGSUSED*/
992 int
993 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num,
994     pci_msi_state_t msi_state)
995 {
996 	uint64_t	ret;
997 
998 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x "
999 	    "msi_state 0x%x\n", dip, msi_num, msi_state);
1000 
1001 	if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip),
1002 	    msi_num, msi_state)) != H_EOK) {
1003 		DBG(DBG_LIB_MSI, dip,
1004 		    "hvio_msi_setstate failed, ret 0x%lx\n", ret);
1005 		return (DDI_FAILURE);
1006 	}
1007 
1008 	return (DDI_SUCCESS);
1009 }
1010 
1011 /*
1012  * MSG Functions:
1013  */
1014 
1015 /*ARGSUSED*/
1016 int
1017 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
1018     msiqid_t *msiq_id)
1019 {
1020 	uint64_t	ret;
1021 
1022 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n",
1023 	    dip, msg_type);
1024 
1025 	if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip),
1026 	    msg_type, msiq_id)) != H_EOK) {
1027 		DBG(DBG_LIB_MSG, dip,
1028 		    "hvio_msg_getmsiq failed, ret 0x%lx\n", ret);
1029 		return (DDI_FAILURE);
1030 	}
1031 
1032 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n",
1033 	    *msiq_id);
1034 
1035 	return (DDI_SUCCESS);
1036 }
1037 
1038 /*ARGSUSED*/
1039 int
1040 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
1041     msiqid_t msiq_id)
1042 {
1043 	uint64_t	ret;
1044 
1045 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_setmsiq: dip 0x%p msg_type 0x%x "
1046 	    "msq_id 0x%x\n", dip, msg_type, msiq_id);
1047 
1048 	if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip),
1049 	    msg_type, msiq_id)) != H_EOK) {
1050 		DBG(DBG_LIB_MSG, dip,
1051 		    "hvio_msg_setmsiq failed, ret 0x%lx\n", ret);
1052 		return (DDI_FAILURE);
1053 	}
1054 
1055 	return (DDI_SUCCESS);
1056 }
1057 
1058 /*ARGSUSED*/
1059 int
1060 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
1061     pcie_msg_valid_state_t *msg_valid_state)
1062 {
1063 	uint64_t	ret;
1064 
1065 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n",
1066 	    dip, msg_type);
1067 
1068 	if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type,
1069 	    msg_valid_state)) != H_EOK) {
1070 		DBG(DBG_LIB_MSG, dip,
1071 		    "hvio_msg_getvalid failed, ret 0x%lx\n", ret);
1072 		return (DDI_FAILURE);
1073 	}
1074 
1075 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n",
1076 	    *msg_valid_state);
1077 
1078 	return (DDI_SUCCESS);
1079 }
1080 
1081 /*ARGSUSED*/
1082 int
1083 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
1084     pcie_msg_valid_state_t msg_valid_state)
1085 {
1086 	uint64_t	ret;
1087 
1088 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x "
1089 	    "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state);
1090 
1091 	if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type,
1092 	    msg_valid_state)) != H_EOK) {
1093 		DBG(DBG_LIB_MSG, dip,
1094 		    "hvio_msg_setvalid failed, ret 0x%lx\n", ret);
1095 		return (DDI_FAILURE);
1096 	}
1097 
1098 	return (DDI_SUCCESS);
1099 }
1100 
1101 /*
1102  * Suspend/Resume Functions:
1103  * Currently unsupported by hypervisor and all functions are noops.
1104  */
1105 /*ARGSUSED*/
1106 int
1107 px_lib_suspend(dev_info_t *dip)
1108 {
1109 	DBG(DBG_ATTACH, dip, "px_lib_suspend: Not supported\n");
1110 
1111 	/* Not supported */
1112 	return (DDI_FAILURE);
1113 }
1114 
1115 /*ARGSUSED*/
1116 void
1117 px_lib_resume(dev_info_t *dip)
1118 {
1119 	DBG(DBG_ATTACH, dip, "px_lib_resume: Not supported\n");
1120 
1121 	/* Noop */
1122 }
1123 
1124 /*
1125  * Misc Functions:
1126  * Currently unsupported by hypervisor and all functions are noops.
1127  */
1128 /*ARGSUSED*/
1129 static int
1130 px_lib_config_get(dev_info_t *dip, pci_device_t bdf, pci_config_offset_t off,
1131     uint8_t size, pci_cfg_data_t *data_p)
1132 {
1133 	uint64_t	ret;
1134 
1135 	DBG(DBG_LIB_CFG, dip, "px_lib_config_get: dip 0x%p, bdf 0x%llx "
1136 	    "off 0x%x size 0x%x\n", dip, bdf, off, size);
1137 
1138 	if ((ret = hvio_config_get(DIP_TO_HANDLE(dip), bdf, off,
1139 	    size, data_p)) != H_EOK) {
1140 		DBG(DBG_LIB_CFG, dip,
1141 		    "hvio_config_get failed, ret 0x%lx\n", ret);
1142 		return (DDI_FAILURE);
1143 	}
1144 	DBG(DBG_LIB_CFG, dip, "px_config_get: data 0x%x\n", data_p->dw);
1145 
1146 	return (DDI_SUCCESS);
1147 }
1148 
1149 /*ARGSUSED*/
1150 static int
1151 px_lib_config_put(dev_info_t *dip, pci_device_t bdf, pci_config_offset_t off,
1152     uint8_t size, pci_cfg_data_t data)
1153 {
1154 	uint64_t	ret;
1155 
1156 	DBG(DBG_LIB_CFG, dip, "px_lib_config_put: dip 0x%p, bdf 0x%llx "
1157 	    "off 0x%x size 0x%x data 0x%llx\n", dip, bdf, off, size, data.qw);
1158 
1159 	if ((ret = hvio_config_put(DIP_TO_HANDLE(dip), bdf, off,
1160 	    size, data)) != H_EOK) {
1161 		DBG(DBG_LIB_CFG, dip,
1162 		    "hvio_config_put failed, ret 0x%lx\n", ret);
1163 		return (DDI_FAILURE);
1164 	}
1165 
1166 	return (DDI_SUCCESS);
1167 }
1168 
1169 static uint32_t
1170 px_pci_config_get(ddi_acc_impl_t *handle, uint32_t *addr, int size)
1171 {
1172 	px_config_acc_pvt_t *px_pvt = (px_config_acc_pvt_t *)
1173 	    handle->ahi_common.ah_bus_private;
1174 	uint32_t pci_dev_addr = px_pvt->raddr;
1175 	uint32_t vaddr = px_pvt->vaddr;
1176 	uint16_t off = (uint16_t)(uintptr_t)(addr - vaddr) & 0xfff;
1177 	uint64_t rdata = 0;
1178 
1179 	if (px_lib_config_get(px_pvt->dip, pci_dev_addr, off,
1180 	    size, (pci_cfg_data_t *)&rdata) != DDI_SUCCESS)
1181 		/* XXX update error kstats */
1182 		return (0xffffffff);
1183 	return ((uint32_t)rdata);
1184 }
1185 
1186 static void
1187 px_pci_config_put(ddi_acc_impl_t *handle, uint32_t *addr,
1188 		int size, pci_cfg_data_t wdata)
1189 {
1190 	px_config_acc_pvt_t *px_pvt = (px_config_acc_pvt_t *)
1191 	    handle->ahi_common.ah_bus_private;
1192 	uint32_t pci_dev_addr = px_pvt->raddr;
1193 	uint32_t vaddr = px_pvt->vaddr;
1194 	uint16_t off = (uint16_t)(uintptr_t)(addr - vaddr) & 0xfff;
1195 
1196 	if (px_lib_config_put(px_pvt->dip, pci_dev_addr, off,
1197 	    size, wdata) != DDI_SUCCESS) {
1198 		/*EMPTY*/
1199 		/* XXX update error kstats */
1200 	}
1201 }
1202 
1203 static uint8_t
1204 px_pci_config_get8(ddi_acc_impl_t *handle, uint8_t *addr)
1205 {
1206 	return ((uint8_t)px_pci_config_get(handle, (uint32_t *)addr, 1));
1207 }
1208 
1209 static uint16_t
1210 px_pci_config_get16(ddi_acc_impl_t *handle, uint16_t *addr)
1211 {
1212 	return ((uint16_t)px_pci_config_get(handle, (uint32_t *)addr, 2));
1213 }
1214 
1215 static uint32_t
1216 px_pci_config_get32(ddi_acc_impl_t *handle, uint32_t *addr)
1217 {
1218 	return ((uint32_t)px_pci_config_get(handle, (uint32_t *)addr, 4));
1219 }
1220 
1221 static uint64_t
1222 px_pci_config_get64(ddi_acc_impl_t *handle, uint64_t *addr)
1223 {
1224 	uint32_t rdatah, rdatal;
1225 
1226 	rdatal = (uint32_t)px_pci_config_get(handle, (uint32_t *)addr, 4);
1227 	rdatah = (uint32_t)px_pci_config_get(handle,
1228 	    (uint32_t *)((char *)addr+4), 4);
1229 	return (((uint64_t)rdatah << 32) | rdatal);
1230 }
1231 
1232 static void
1233 px_pci_config_put8(ddi_acc_impl_t *handle, uint8_t *addr, uint8_t data)
1234 {
1235 	pci_cfg_data_t wdata = { 0 };
1236 
1237 	wdata.qw = (uint8_t)data;
1238 	px_pci_config_put(handle, (uint32_t *)addr, 1, wdata);
1239 }
1240 
1241 static void
1242 px_pci_config_put16(ddi_acc_impl_t *handle, uint16_t *addr, uint16_t data)
1243 {
1244 	pci_cfg_data_t wdata = { 0 };
1245 
1246 	wdata.qw = (uint16_t)data;
1247 	px_pci_config_put(handle, (uint32_t *)addr, 2, wdata);
1248 }
1249 
1250 static void
1251 px_pci_config_put32(ddi_acc_impl_t *handle, uint32_t *addr, uint32_t data)
1252 {
1253 	pci_cfg_data_t wdata = { 0 };
1254 
1255 	wdata.qw = (uint32_t)data;
1256 	px_pci_config_put(handle, (uint32_t *)addr, 4, wdata);
1257 }
1258 
1259 static void
1260 px_pci_config_put64(ddi_acc_impl_t *handle, uint64_t *addr, uint64_t data)
1261 {
1262 	pci_cfg_data_t wdata = { 0 };
1263 
1264 	wdata.qw = (uint32_t)(data & 0xffffffff);
1265 	px_pci_config_put(handle, (uint32_t *)addr, 4, wdata);
1266 	wdata.qw = (uint32_t)((data >> 32) & 0xffffffff);
1267 	px_pci_config_put(handle, (uint32_t *)((char *)addr+4), 4, wdata);
1268 }
1269 
1270 static void
1271 px_pci_config_rep_get8(ddi_acc_impl_t *handle, uint8_t *host_addr,
1272 			uint8_t *dev_addr, size_t repcount, uint_t flags)
1273 {
1274 	if (flags == DDI_DEV_AUTOINCR)
1275 		for (; repcount; repcount--)
1276 			*host_addr++ = px_pci_config_get8(handle, dev_addr++);
1277 	else
1278 		for (; repcount; repcount--)
1279 			*host_addr++ = px_pci_config_get8(handle, dev_addr);
1280 }
1281 
1282 /*
1283  * Function to rep read 16 bit data off the PCI configuration space behind
1284  * the 21554's host interface.
1285  */
1286 static void
1287 px_pci_config_rep_get16(ddi_acc_impl_t *handle, uint16_t *host_addr,
1288 			uint16_t *dev_addr, size_t repcount, uint_t flags)
1289 {
1290 	if (flags == DDI_DEV_AUTOINCR)
1291 		for (; repcount; repcount--)
1292 			*host_addr++ = px_pci_config_get16(handle, dev_addr++);
1293 	else
1294 		for (; repcount; repcount--)
1295 			*host_addr++ = px_pci_config_get16(handle, dev_addr);
1296 }
1297 
1298 /*
1299  * Function to rep read 32 bit data off the PCI configuration space behind
1300  * the 21554's host interface.
1301  */
1302 static void
1303 px_pci_config_rep_get32(ddi_acc_impl_t *handle, uint32_t *host_addr,
1304 			uint32_t *dev_addr, size_t repcount, uint_t flags)
1305 {
1306 	if (flags == DDI_DEV_AUTOINCR)
1307 		for (; repcount; repcount--)
1308 			*host_addr++ = px_pci_config_get32(handle, dev_addr++);
1309 	else
1310 		for (; repcount; repcount--)
1311 			*host_addr++ = px_pci_config_get32(handle, dev_addr);
1312 }
1313 
1314 /*
1315  * Function to rep read 64 bit data off the PCI configuration space behind
1316  * the 21554's host interface.
1317  */
1318 static void
1319 px_pci_config_rep_get64(ddi_acc_impl_t *handle, uint64_t *host_addr,
1320 			uint64_t *dev_addr, size_t repcount, uint_t flags)
1321 {
1322 	if (flags == DDI_DEV_AUTOINCR)
1323 		for (; repcount; repcount--)
1324 			*host_addr++ = px_pci_config_get64(handle, dev_addr++);
1325 	else
1326 		for (; repcount; repcount--)
1327 			*host_addr++ = px_pci_config_get64(handle, dev_addr);
1328 }
1329 
1330 /*
1331  * Function to rep write 8 bit data into the PCI configuration space behind
1332  * the 21554's host interface.
1333  */
1334 static void
1335 px_pci_config_rep_put8(ddi_acc_impl_t *handle, uint8_t *host_addr,
1336 			uint8_t *dev_addr, size_t repcount, uint_t flags)
1337 {
1338 	if (flags == DDI_DEV_AUTOINCR)
1339 		for (; repcount; repcount--)
1340 			px_pci_config_put8(handle, dev_addr++, *host_addr++);
1341 	else
1342 		for (; repcount; repcount--)
1343 			px_pci_config_put8(handle, dev_addr, *host_addr++);
1344 }
1345 
1346 /*
1347  * Function to rep write 16 bit data into the PCI configuration space behind
1348  * the 21554's host interface.
1349  */
1350 static void
1351 px_pci_config_rep_put16(ddi_acc_impl_t *handle, uint16_t *host_addr,
1352 			uint16_t *dev_addr, size_t repcount, uint_t flags)
1353 {
1354 	if (flags == DDI_DEV_AUTOINCR)
1355 		for (; repcount; repcount--)
1356 			px_pci_config_put16(handle, dev_addr++, *host_addr++);
1357 	else
1358 		for (; repcount; repcount--)
1359 			px_pci_config_put16(handle, dev_addr, *host_addr++);
1360 }
1361 
1362 /*
1363  * Function to rep write 32 bit data into the PCI configuration space behind
1364  * the 21554's host interface.
1365  */
1366 static void
1367 px_pci_config_rep_put32(ddi_acc_impl_t *handle, uint32_t *host_addr,
1368 			uint32_t *dev_addr, size_t repcount, uint_t flags)
1369 {
1370 	if (flags == DDI_DEV_AUTOINCR)
1371 		for (; repcount; repcount--)
1372 			px_pci_config_put32(handle, dev_addr++, *host_addr++);
1373 	else
1374 		for (; repcount; repcount--)
1375 			px_pci_config_put32(handle, dev_addr, *host_addr++);
1376 }
1377 
1378 /*
1379  * Function to rep write 64 bit data into the PCI configuration space behind
1380  * the 21554's host interface.
1381  */
1382 static void
1383 px_pci_config_rep_put64(ddi_acc_impl_t *handle, uint64_t *host_addr,
1384 			uint64_t *dev_addr, size_t repcount, uint_t flags)
1385 {
1386 	if (flags == DDI_DEV_AUTOINCR)
1387 		for (; repcount; repcount--)
1388 			px_pci_config_put64(handle, dev_addr++, *host_addr++);
1389 	else
1390 		for (; repcount; repcount--)
1391 			px_pci_config_put64(handle, dev_addr, *host_addr++);
1392 }
1393 
1394 /*
1395  * Provide a private access handle to route config access calls to Hypervisor.
1396  * Beware: Do all error checking for config space accesses before calling
1397  * this function. ie. do error checking from the calling function.
1398  * Due to a lack of meaningful error code in DDI, the gauranteed return of
1399  * DDI_SUCCESS from here makes the code organization readable/easier from
1400  * the generic code.
1401  */
1402 /*ARGSUSED*/
1403 int
1404 px_lib_map_vconfig(dev_info_t *dip,
1405 	ddi_map_req_t *mp, pci_config_offset_t off,
1406 	pci_regspec_t *rp, caddr_t *addrp)
1407 {
1408 	int fmcap;
1409 	ndi_err_t *errp;
1410 	on_trap_data_t *otp;
1411 	ddi_acc_hdl_t *hp;
1412 	ddi_acc_impl_t *ap;
1413 	uchar_t busnum;	/* bus number */
1414 	uchar_t devnum;	/* device number */
1415 	uchar_t funcnum; /* function number */
1416 	px_config_acc_pvt_t *px_pvt;
1417 
1418 	hp = (ddi_acc_hdl_t *)mp->map_handlep;
1419 	ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1420 
1421 	/* Check for mapping teardown operation */
1422 	if ((mp->map_op == DDI_MO_UNMAP) ||
1423 	    (mp->map_op == DDI_MO_UNLOCK)) {
1424 		/* free up memory allocated for the private access handle. */
1425 		px_pvt = (px_config_acc_pvt_t *)hp->ah_bus_private;
1426 		kmem_free((void *)px_pvt, sizeof (px_config_acc_pvt_t));
1427 
1428 		/* unmap operation of PCI IO/config space. */
1429 		return (DDI_SUCCESS);
1430 	}
1431 
1432 	fmcap = ddi_fm_capable(dip);
1433 	if (DDI_FM_ACC_ERR_CAP(fmcap)) {
1434 		errp = ((ddi_acc_impl_t *)hp)->ahi_err;
1435 		otp = (on_trap_data_t *)errp->err_ontrap;
1436 		otp->ot_handle = (void *)(hp);
1437 		otp->ot_prot = OT_DATA_ACCESS;
1438 		errp->err_status = DDI_FM_OK;
1439 		errp->err_expected = DDI_FM_ERR_UNEXPECTED;
1440 		errp->err_cf = px_err_cfg_hdl_check;
1441 	}
1442 
1443 	ap->ahi_get8 = px_pci_config_get8;
1444 	ap->ahi_get16 = px_pci_config_get16;
1445 	ap->ahi_get32 = px_pci_config_get32;
1446 	ap->ahi_get64 = px_pci_config_get64;
1447 	ap->ahi_put8 = px_pci_config_put8;
1448 	ap->ahi_put16 = px_pci_config_put16;
1449 	ap->ahi_put32 = px_pci_config_put32;
1450 	ap->ahi_put64 = px_pci_config_put64;
1451 	ap->ahi_rep_get8 = px_pci_config_rep_get8;
1452 	ap->ahi_rep_get16 = px_pci_config_rep_get16;
1453 	ap->ahi_rep_get32 = px_pci_config_rep_get32;
1454 	ap->ahi_rep_get64 = px_pci_config_rep_get64;
1455 	ap->ahi_rep_put8 = px_pci_config_rep_put8;
1456 	ap->ahi_rep_put16 = px_pci_config_rep_put16;
1457 	ap->ahi_rep_put32 = px_pci_config_rep_put32;
1458 	ap->ahi_rep_put64 = px_pci_config_rep_put64;
1459 
1460 	/* Initialize to default check/notify functions */
1461 	ap->ahi_fault = 0;
1462 	ap->ahi_fault_check = i_ddi_acc_fault_check;
1463 	ap->ahi_fault_notify = i_ddi_acc_fault_notify;
1464 
1465 	/* allocate memory for our private handle */
1466 	px_pvt = (px_config_acc_pvt_t *)
1467 	    kmem_zalloc(sizeof (px_config_acc_pvt_t), KM_SLEEP);
1468 	hp->ah_bus_private = (void *)px_pvt;
1469 
1470 	busnum = PCI_REG_BUS_G(rp->pci_phys_hi);
1471 	devnum = PCI_REG_DEV_G(rp->pci_phys_hi);
1472 	funcnum = PCI_REG_FUNC_G(rp->pci_phys_hi);
1473 
1474 	/* set up private data for use during IO routines */
1475 
1476 	/* addr needed by the HV APIs */
1477 	px_pvt->raddr = busnum << 16 | devnum << 11 | funcnum << 8;
1478 	/*
1479 	 * Address that specifies the actual offset into the 256MB
1480 	 * memory mapped configuration space, 4K per device.
1481 	 * First 12bits form the offset into 4K config space.
1482 	 * This address is only used during the IO routines to calculate
1483 	 * the offset at which the transaction must be performed.
1484 	 * Drivers bypassing DDI functions to access PCI config space will
1485 	 * panic the system since the following is a bogus virtual address.
1486 	 */
1487 	px_pvt->vaddr = busnum << 20 | devnum << 15 | funcnum << 12 | off;
1488 	px_pvt->dip = dip;
1489 
1490 	DBG(DBG_LIB_CFG, dip, "px_config_setup: raddr 0x%x, vaddr 0x%x\n",
1491 	    px_pvt->raddr, px_pvt->vaddr);
1492 	*addrp = (caddr_t)(uintptr_t)px_pvt->vaddr;
1493 	return (DDI_SUCCESS);
1494 }
1495 
1496 /*ARGSUSED*/
1497 void
1498 px_lib_map_attr_check(ddi_map_req_t *mp)
1499 {
1500 }
1501 
1502 /*
1503  * px_lib_log_safeacc_err:
1504  * Imitate a cpu/mem trap call when a peek/poke fails.
1505  * This will initiate something similar to px_fm_callback.
1506  */
1507 static void
1508 px_lib_log_safeacc_err(px_t *px_p, ddi_acc_handle_t handle, int fme_flag,
1509     r_addr_t addr)
1510 {
1511 	uint32_t	addr_high, addr_low;
1512 	pcie_req_id_t	bdf = PCIE_INVALID_BDF;
1513 	pci_ranges_t	*ranges_p;
1514 	int		range_len, i;
1515 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)handle;
1516 	ddi_fm_error_t derr;
1517 
1518 	derr.fme_status = DDI_FM_NONFATAL;
1519 	derr.fme_version = DDI_FME_VERSION;
1520 	derr.fme_flag = fme_flag;
1521 	derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
1522 	derr.fme_acc_handle = handle;
1523 	if (hp)
1524 		hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
1525 
1526 	addr_high = (uint32_t)(addr >> 32);
1527 	addr_low = (uint32_t)addr;
1528 
1529 	/*
1530 	 * Make sure this failed load came from this PCIe port.  Check by
1531 	 * matching the upper 32 bits of the address with the ranges property.
1532 	 */
1533 	range_len = px_p->px_ranges_length / sizeof (pci_ranges_t);
1534 	i = 0;
1535 	for (ranges_p = px_p->px_ranges_p; i < range_len; i++, ranges_p++) {
1536 		if (ranges_p->parent_high == addr_high) {
1537 			switch (ranges_p->child_high & PCI_ADDR_MASK) {
1538 			case PCI_ADDR_CONFIG:
1539 				bdf = (pcie_req_id_t)(addr_low >> 12);
1540 				break;
1541 			default:
1542 				bdf = PCIE_INVALID_BDF;
1543 				break;
1544 			}
1545 			break;
1546 		}
1547 	}
1548 
1549 	px_rp_en_q(px_p, bdf, addr, NULL);
1550 
1551 	if (px_fm_enter(px_p) == DDI_SUCCESS) {
1552 		(void) px_scan_fabric(px_p, px_p->px_dip, &derr);
1553 		px_fm_exit(px_p);
1554 	}
1555 }
1556 
1557 
1558 #ifdef  DEBUG
1559 int	px_peekfault_cnt = 0;
1560 int	px_pokefault_cnt = 0;
1561 #endif  /* DEBUG */
1562 
1563 /*
1564  * Do a safe write to a device.
1565  *
1566  * When this function is given a handle (cautious access), all errors are
1567  * suppressed.
1568  *
1569  * When this function is not given a handle (poke), only Unsupported Request
1570  * and Completer Abort errors are suppressed.
1571  *
1572  * In all cases, all errors are returned in the function return status.
1573  */
1574 
1575 int
1576 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip,
1577     peekpoke_ctlops_t *in_args)
1578 {
1579 	px_t *px_p = DIP_TO_STATE(dip);
1580 	px_pec_t *pec_p = px_p->px_pec_p;
1581 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle;
1582 
1583 	size_t repcount = in_args->repcount;
1584 	size_t size = in_args->size;
1585 	uintptr_t dev_addr = in_args->dev_addr;
1586 	uintptr_t host_addr = in_args->host_addr;
1587 
1588 	int err	= DDI_SUCCESS;
1589 	uint64_t hvio_poke_status;
1590 	uint32_t wrt_stat;
1591 
1592 	r_addr_t ra;
1593 	uint64_t pokeval;
1594 	pcie_req_id_t bdf;
1595 
1596 	ra = (r_addr_t)va_to_pa((void *)dev_addr);
1597 	for (; repcount; repcount--) {
1598 
1599 		switch (size) {
1600 		case sizeof (uint8_t):
1601 			pokeval = *(uint8_t *)host_addr;
1602 			break;
1603 		case sizeof (uint16_t):
1604 			pokeval = *(uint16_t *)host_addr;
1605 			break;
1606 		case sizeof (uint32_t):
1607 			pokeval = *(uint32_t *)host_addr;
1608 			break;
1609 		case sizeof (uint64_t):
1610 			pokeval = *(uint64_t *)host_addr;
1611 			break;
1612 		default:
1613 			DBG(DBG_MAP, px_p->px_dip,
1614 			    "poke: invalid size %d passed\n", size);
1615 			err = DDI_FAILURE;
1616 			goto done;
1617 		}
1618 
1619 		/*
1620 		 * Grab pokefault mutex since hypervisor does not guarantee
1621 		 * poke serialization.
1622 		 */
1623 		if (hp) {
1624 			i_ndi_busop_access_enter(hp->ahi_common.ah_dip,
1625 			    (ddi_acc_handle_t)hp);
1626 			pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
1627 		} else {
1628 			mutex_enter(&pec_p->pec_pokefault_mutex);
1629 			pec_p->pec_safeacc_type = DDI_FM_ERR_POKE;
1630 		}
1631 
1632 		if (pcie_get_bdf_from_dip(rdip, &bdf) != DDI_SUCCESS) {
1633 			err = DDI_FAILURE;
1634 			goto done;
1635 		}
1636 
1637 		hvio_poke_status = hvio_poke(px_p->px_dev_hdl, ra, size,
1638 		    pokeval, bdf << 8, &wrt_stat);
1639 
1640 		if ((hvio_poke_status != H_EOK) || (wrt_stat != H_EOK)) {
1641 			err = DDI_FAILURE;
1642 #ifdef  DEBUG
1643 			px_pokefault_cnt++;
1644 #endif
1645 			/*
1646 			 * For CAUTIOUS and POKE access, notify FMA to
1647 			 * cleanup.  Imitate a cpu/mem trap call like in sun4u.
1648 			 */
1649 			px_lib_log_safeacc_err(px_p, (ddi_acc_handle_t)hp,
1650 			    (hp ? DDI_FM_ERR_EXPECTED :
1651 			    DDI_FM_ERR_POKE), ra);
1652 
1653 			pec_p->pec_ontrap_data = NULL;
1654 			pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1655 			if (hp) {
1656 				i_ndi_busop_access_exit(hp->ahi_common.ah_dip,
1657 				    (ddi_acc_handle_t)hp);
1658 			} else {
1659 				mutex_exit(&pec_p->pec_pokefault_mutex);
1660 			}
1661 			goto done;
1662 		}
1663 
1664 		pec_p->pec_ontrap_data = NULL;
1665 		pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1666 		if (hp) {
1667 			i_ndi_busop_access_exit(hp->ahi_common.ah_dip,
1668 			    (ddi_acc_handle_t)hp);
1669 		} else {
1670 			mutex_exit(&pec_p->pec_pokefault_mutex);
1671 		}
1672 
1673 		host_addr += size;
1674 
1675 		if (in_args->flags == DDI_DEV_AUTOINCR) {
1676 			dev_addr += size;
1677 			ra = (r_addr_t)va_to_pa((void *)dev_addr);
1678 		}
1679 	}
1680 
1681 done:
1682 	return (err);
1683 }
1684 
1685 
1686 /*ARGSUSED*/
1687 int
1688 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip,
1689     peekpoke_ctlops_t *in_args, void *result)
1690 {
1691 	px_t *px_p = DIP_TO_STATE(dip);
1692 	px_pec_t *pec_p = px_p->px_pec_p;
1693 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle;
1694 
1695 	size_t repcount = in_args->repcount;
1696 	uintptr_t dev_addr = in_args->dev_addr;
1697 	uintptr_t host_addr = in_args->host_addr;
1698 
1699 	r_addr_t ra;
1700 	uint32_t read_status;
1701 	uint64_t hvio_peek_status;
1702 	uint64_t peekval;
1703 	int err = DDI_SUCCESS;
1704 
1705 	result = (void *)in_args->host_addr;
1706 
1707 	ra = (r_addr_t)va_to_pa((void *)dev_addr);
1708 	for (; repcount; repcount--) {
1709 
1710 		/* Lock pokefault mutex so read doesn't mask a poke fault. */
1711 		if (hp) {
1712 			i_ndi_busop_access_enter(hp->ahi_common.ah_dip,
1713 			    (ddi_acc_handle_t)hp);
1714 			pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
1715 		} else {
1716 			mutex_enter(&pec_p->pec_pokefault_mutex);
1717 			pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK;
1718 		}
1719 
1720 		hvio_peek_status = hvio_peek(px_p->px_dev_hdl, ra,
1721 		    in_args->size, &read_status, &peekval);
1722 
1723 		if ((hvio_peek_status != H_EOK) || (read_status != H_EOK)) {
1724 			err = DDI_FAILURE;
1725 
1726 			/*
1727 			 * For CAUTIOUS and PEEK access, notify FMA to
1728 			 * cleanup.  Imitate a cpu/mem trap call like in sun4u.
1729 			 */
1730 			px_lib_log_safeacc_err(px_p, (ddi_acc_handle_t)hp,
1731 			    (hp ? DDI_FM_ERR_EXPECTED :
1732 			    DDI_FM_ERR_PEEK), ra);
1733 
1734 			/* Stuff FFs in host addr if peek. */
1735 			if (hp == NULL) {
1736 				int i;
1737 				uint8_t *ff_addr = (uint8_t *)host_addr;
1738 				for (i = 0; i < in_args->size; i++)
1739 					*ff_addr++ = 0xff;
1740 			}
1741 #ifdef  DEBUG
1742 			px_peekfault_cnt++;
1743 #endif
1744 			pec_p->pec_ontrap_data = NULL;
1745 			pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1746 			if (hp) {
1747 				i_ndi_busop_access_exit(hp->ahi_common.ah_dip,
1748 				    (ddi_acc_handle_t)hp);
1749 			} else {
1750 				mutex_exit(&pec_p->pec_pokefault_mutex);
1751 			}
1752 			goto done;
1753 
1754 		}
1755 		pec_p->pec_ontrap_data = NULL;
1756 		pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
1757 		if (hp) {
1758 			i_ndi_busop_access_exit(hp->ahi_common.ah_dip,
1759 			    (ddi_acc_handle_t)hp);
1760 		} else {
1761 			mutex_exit(&pec_p->pec_pokefault_mutex);
1762 		}
1763 
1764 		switch (in_args->size) {
1765 		case sizeof (uint8_t):
1766 			*(uint8_t *)host_addr = (uint8_t)peekval;
1767 			break;
1768 		case sizeof (uint16_t):
1769 			*(uint16_t *)host_addr = (uint16_t)peekval;
1770 			break;
1771 		case sizeof (uint32_t):
1772 			*(uint32_t *)host_addr = (uint32_t)peekval;
1773 			break;
1774 		case sizeof (uint64_t):
1775 			*(uint64_t *)host_addr = (uint64_t)peekval;
1776 			break;
1777 		default:
1778 			DBG(DBG_MAP, px_p->px_dip,
1779 			    "peek: invalid size %d passed\n",
1780 			    in_args->size);
1781 			err = DDI_FAILURE;
1782 			goto done;
1783 		}
1784 
1785 		host_addr += in_args->size;
1786 
1787 		if (in_args->flags == DDI_DEV_AUTOINCR) {
1788 			dev_addr += in_args->size;
1789 			ra = (r_addr_t)va_to_pa((void *)dev_addr);
1790 		}
1791 	}
1792 done:
1793 	return (err);
1794 }
1795 
1796 
1797 /* add interrupt vector */
1798 int
1799 px_err_add_intr(px_fault_t *px_fault_p)
1800 {
1801 	px_t	*px_p = DIP_TO_STATE(px_fault_p->px_fh_dip);
1802 
1803 	DBG(DBG_LIB_INT, px_p->px_dip,
1804 	    "px_err_add_intr: calling add_ivintr");
1805 
1806 	VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL,
1807 	    (intrfunc)px_fault_p->px_err_func, (caddr_t)px_fault_p, NULL,
1808 	    (caddr_t)&px_fault_p->px_intr_payload[0]) == 0);
1809 
1810 	DBG(DBG_LIB_INT, px_p->px_dip,
1811 	    "px_err_add_intr: ib_intr_enable ");
1812 
1813 	px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino);
1814 
1815 	return (DDI_SUCCESS);
1816 }
1817 
1818 /* remove interrupt vector */
1819 void
1820 px_err_rem_intr(px_fault_t *px_fault_p)
1821 {
1822 	px_t	*px_p = DIP_TO_STATE(px_fault_p->px_fh_dip);
1823 
1824 	px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino,
1825 	    IB_INTR_WAIT);
1826 
1827 	VERIFY(rem_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL) == 0);
1828 }
1829 
1830 void
1831 px_cb_intr_redist(void *arg)
1832 {
1833 	px_t	*px_p = (px_t *)arg;
1834 	px_ib_intr_dist_en(px_p->px_dip, intr_dist_cpuid(),
1835 	    px_p->px_inos[PX_INTR_XBC], B_FALSE);
1836 }
1837 
1838 int
1839 px_cb_add_intr(px_fault_t *f_p)
1840 {
1841 	px_t	*px_p = DIP_TO_STATE(f_p->px_fh_dip);
1842 
1843 	DBG(DBG_LIB_INT, px_p->px_dip,
1844 	    "px_err_add_intr: calling add_ivintr");
1845 
1846 	VERIFY(add_ivintr(f_p->px_fh_sysino, PX_ERR_PIL,
1847 	    (intrfunc)f_p->px_err_func, (caddr_t)f_p, NULL,
1848 	    (caddr_t)&f_p->px_intr_payload[0]) == 0);
1849 
1850 	intr_dist_add(px_cb_intr_redist, px_p);
1851 
1852 	DBG(DBG_LIB_INT, px_p->px_dip,
1853 	    "px_err_add_intr: ib_intr_enable ");
1854 
1855 	px_ib_intr_enable(px_p, intr_dist_cpuid(), f_p->px_intr_ino);
1856 
1857 	return (DDI_SUCCESS);
1858 }
1859 
1860 void
1861 px_cb_rem_intr(px_fault_t *f_p)
1862 {
1863 	intr_dist_rem(px_cb_intr_redist, DIP_TO_STATE(f_p->px_fh_dip));
1864 	px_err_rem_intr(f_p);
1865 }
1866 
1867 #ifdef FMA
1868 void
1869 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status)
1870 {
1871 	px_pec_err_t	*err_pkt;
1872 
1873 	err_pkt = (px_pec_err_t *)px_fault_p->px_intr_payload;
1874 
1875 	/* initialise all the structure members */
1876 	rc_status->status_valid = 0;
1877 
1878 	if (err_pkt->pec_descr.P) {
1879 		/* PCI Status Register */
1880 		rc_status->pci_err_status = err_pkt->pci_err_status;
1881 		rc_status->status_valid |= PCI_ERR_STATUS_VALID;
1882 	}
1883 
1884 	if (err_pkt->pec_descr.E) {
1885 		/* PCIe Status Register */
1886 		rc_status->pcie_err_status = err_pkt->pcie_err_status;
1887 		rc_status->status_valid |= PCIE_ERR_STATUS_VALID;
1888 	}
1889 
1890 	if (err_pkt->pec_descr.U) {
1891 		rc_status->ue_status = err_pkt->ue_reg_status;
1892 		rc_status->status_valid |= UE_STATUS_VALID;
1893 	}
1894 
1895 	if (err_pkt->pec_descr.H) {
1896 		rc_status->ue_hdr1 = err_pkt->hdr[0];
1897 		rc_status->status_valid |= UE_HDR1_VALID;
1898 	}
1899 
1900 	if (err_pkt->pec_descr.I) {
1901 		rc_status->ue_hdr2 = err_pkt->hdr[1];
1902 		rc_status->status_valid |= UE_HDR2_VALID;
1903 	}
1904 
1905 	/* ue_fst_err_ptr - not available for sun4v?? */
1906 
1907 
1908 	if (err_pkt->pec_descr.S) {
1909 		rc_status->source_id = err_pkt->err_src_reg;
1910 		rc_status->status_valid |= SOURCE_ID_VALID;
1911 	}
1912 
1913 	if (err_pkt->pec_descr.R) {
1914 		rc_status->root_err_status = err_pkt->root_err_status;
1915 		rc_status->status_valid |= CE_STATUS_VALID;
1916 	}
1917 }
1918 #endif
1919 
1920 /*ARGSUSED*/
1921 int
1922 px_lib_pmctl(int cmd, px_t *px_p)
1923 {
1924 	return (DDI_FAILURE);
1925 }
1926 
1927 /*ARGSUSED*/
1928 uint_t
1929 px_pmeq_intr(caddr_t arg)
1930 {
1931 	return (DDI_INTR_CLAIMED);
1932 }
1933 
1934 /*
1935  * fetch the config space base addr of the root complex
1936  * note this depends on px structure being initialized
1937  */
1938 uint64_t
1939 px_lib_get_cfgacc_base(dev_info_t *dip)
1940 {
1941 	int		instance = DIP_TO_INST(dip);
1942 	px_t		*px_p = INST_TO_STATE(instance);
1943 
1944 	return (px_p->px_dev_hdl);
1945 }
1946 
1947 /*ARGSUSED*/
1948 int
1949 px_lib_hotplug_init(dev_info_t *dip, void *arg)
1950 {
1951 	return (DDI_ENOTSUP);
1952 }
1953 
1954 /*ARGSUSED*/
1955 void
1956 px_lib_hotplug_uninit(dev_info_t *dip)
1957 {
1958 }
1959 
1960 /*ARGSUSED*/
1961 void
1962 px_hp_intr_redist(px_t *px_p)
1963 {
1964 }
1965 
1966 /* Dummy cpr add callback */
1967 /*ARGSUSED*/
1968 void
1969 px_cpr_add_callb(px_t *px_p)
1970 {
1971 }
1972 
1973 /* Dummy cpr rem callback */
1974 /*ARGSUSED*/
1975 void
1976 px_cpr_rem_callb(px_t *px_p)
1977 {
1978 }
1979 
1980 /*ARGSUSED*/
1981 boolean_t
1982 px_lib_is_in_drain_state(px_t *px_p)
1983 {
1984 	return (B_FALSE);
1985 }
1986 
1987 /*
1988  * There is no IOAPI to get the BDF of the pcie root port nexus at this moment.
1989  * Assume it is 0x0000, until otherwise noted.  For now, all sun4v platforms
1990  * have programmed the BDF to be 0x0000.
1991  */
1992 /*ARGSUSED*/
1993 pcie_req_id_t
1994 px_lib_get_bdf(px_t *px_p)
1995 {
1996 	return (0x0000);
1997 }
1998 
1999 int
2000 px_lib_get_root_complex_mps(px_t *px_p, dev_info_t *dip, int *mps)
2001 {
2002 	pci_device_t	bdf = px_lib_get_bdf(px_p);
2003 
2004 	if (hvio_get_rp_mps_cap(DIP_TO_HANDLE(dip), bdf, mps) == H_EOK)
2005 		return (DDI_SUCCESS);
2006 	else
2007 		return (DDI_FAILURE);
2008 }
2009 
2010 int
2011 px_lib_set_root_complex_mps(px_t *px_p,  dev_info_t *dip, int mps)
2012 {
2013 	pci_device_t	bdf = px_lib_get_bdf(px_p);
2014 
2015 	if (hvio_set_rp_mps(DIP_TO_HANDLE(dip), bdf, mps) == H_EOK)
2016 		return (DDI_SUCCESS);
2017 	else
2018 		return (DDI_FAILURE);
2019 }
2020