xref: /titanic_50/usr/src/uts/sun4v/io/px/px_lib4v.c (revision 8eea8e29cc4374d1ee24c25a07f45af132db3499)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/sysmacros.h>
31 #include <sys/ddi.h>
32 #include <sys/sunddi.h>
33 #include <sys/ddifm.h>
34 #include <sys/fm/protocol.h>
35 #include <sys/vmem.h>
36 #include <sys/hypervisor_api.h>
37 #include <px_obj.h>
38 #include "px_lib4v.h"
39 
40 /* mask for the ranges property in calculating the real PFN range */
41 uint_t px_ranges_phi_mask = ((1 << 28) -1);
42 
43 int
44 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl)
45 {
46 	px_nexus_regspec_t	*rp;
47 	int			reglen;
48 
49 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p\n", dip);
50 
51 	if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
52 	    "reg", (caddr_t)&rp, &reglen) != DDI_SUCCESS) {
53 		DBG(DBG_ATTACH, dip, "px_lib_dev_init failed\n");
54 		return (DDI_FAILURE);
55 	}
56 
57 	/*
58 	 * Initilize device handle. The device handle uniquely identifies
59 	 * a SUN4V device. It consists of the lower 28-bits of the hi-cell
60 	 * of the first entry of the SUN4V device's "reg" property as
61 	 * defined by the SUN4V Bus Binding to Open Firmware.
62 	 */
63 	*dev_hdl = (devhandle_t)((rp->phys_addr >> 32) & DEVHDLE_MASK);
64 
65 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl);
66 
67 	return (DDI_SUCCESS);
68 }
69 
70 /*ARGSUSED*/
71 int
72 px_lib_dev_fini(dev_info_t *dip)
73 {
74 	DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip);
75 
76 	return (DDI_SUCCESS);
77 }
78 
79 /*ARGSUSED*/
80 int
81 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino,
82     sysino_t *sysino)
83 {
84 	uint64_t	ret;
85 
86 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p "
87 	    "devino 0x%x\n", dip, devino);
88 
89 	if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip),
90 	    devino, sysino)) != H_EOK) {
91 		DBG(DBG_LIB_INT, dip,
92 		    "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret);
93 		return (DDI_FAILURE);
94 	}
95 
96 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n",
97 	    *sysino);
98 
99 	return (DDI_SUCCESS);
100 }
101 
102 /*ARGSUSED*/
103 int
104 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino,
105     intr_valid_state_t *intr_valid_state)
106 {
107 	uint64_t	ret;
108 
109 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n",
110 	    dip, sysino);
111 
112 	if ((ret = hvio_intr_getvalid(sysino,
113 	    (int *)intr_valid_state)) != H_EOK) {
114 		DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n",
115 		    ret);
116 		return (DDI_FAILURE);
117 	}
118 
119 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n",
120 	    *intr_valid_state);
121 
122 	return (DDI_SUCCESS);
123 }
124 
125 /*ARGSUSED*/
126 int
127 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino,
128     intr_valid_state_t intr_valid_state)
129 {
130 	uint64_t	ret;
131 
132 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx "
133 	    "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state);
134 
135 	if ((ret = hvio_intr_setvalid(sysino, intr_valid_state)) != H_EOK) {
136 		DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n",
137 		    ret);
138 		return (DDI_FAILURE);
139 	}
140 
141 	return (DDI_SUCCESS);
142 }
143 
144 /*ARGSUSED*/
145 int
146 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino,
147     intr_state_t *intr_state)
148 {
149 	uint64_t	ret;
150 
151 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n",
152 	    dip, sysino);
153 
154 	if ((ret = hvio_intr_getstate(sysino, (int *)intr_state)) != H_EOK) {
155 		DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n",
156 		    ret);
157 		return (DDI_FAILURE);
158 	}
159 
160 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n",
161 	    *intr_state);
162 
163 	return (DDI_SUCCESS);
164 }
165 
166 /*ARGSUSED*/
167 int
168 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino,
169     intr_state_t intr_state)
170 {
171 	uint64_t	ret;
172 
173 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx "
174 	    "intr_state 0x%x\n", dip, sysino, intr_state);
175 
176 	if ((ret = hvio_intr_setstate(sysino, intr_state)) != H_EOK) {
177 		DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n",
178 		    ret);
179 		return (DDI_FAILURE);
180 	}
181 
182 	return (DDI_SUCCESS);
183 }
184 
185 /*ARGSUSED*/
186 int
187 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid)
188 {
189 	uint64_t	ret;
190 
191 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n",
192 	    dip, sysino);
193 
194 	if ((ret = hvio_intr_gettarget(sysino, cpuid)) != H_EOK) {
195 		DBG(DBG_LIB_INT, dip,
196 		    "hvio_intr_gettarget failed, ret 0x%lx\n", ret);
197 		return (DDI_FAILURE);
198 	}
199 
200 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid);
201 
202 	return (DDI_SUCCESS);
203 }
204 
205 /*ARGSUSED*/
206 int
207 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid)
208 {
209 	uint64_t	ret;
210 
211 	DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx "
212 	    "cpuid 0x%x\n", dip, sysino, cpuid);
213 
214 	if ((ret = hvio_intr_settarget(sysino, cpuid)) != H_EOK) {
215 		DBG(DBG_LIB_INT, dip,
216 		    "hvio_intr_settarget failed, ret 0x%lx\n", ret);
217 		return (DDI_FAILURE);
218 	}
219 
220 	return (DDI_SUCCESS);
221 }
222 
223 /*ARGSUSED*/
224 int
225 px_lib_intr_reset(dev_info_t *dip)
226 {
227 	DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip);
228 
229 	return (DDI_SUCCESS);
230 }
231 
232 /*ARGSUSED*/
233 int
234 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages,
235     io_attributes_t io_attributes, void *addr, size_t pfn_index,
236     int flag)
237 {
238 	pages_t		pgs_mapped = 0, pgs_cnt = 0;
239 	pages_t		pgs = pages;
240 	tsbnum_t	tsb_num = PCI_TSBID_TO_TSBNUM(tsbid);
241 	tsbindex_t	tsbindex = PCI_TSBID_TO_TSBINDEX(tsbid);
242 	io_page_list_t	*io_page_list_p, *ptr;
243 	int		i, err = DDI_SUCCESS;
244 	uint64_t	ret;
245 
246 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx "
247 	    "pages 0x%x atrr 0x%x addr 0x%p pfn_index 0x%llx, flag 0x%x\n",
248 	    dip, tsbid, pages, io_attributes, addr, pfn_index, flag);
249 
250 	if ((ptr = kmem_zalloc((pages * sizeof (io_page_list_t)),
251 	    KM_NOSLEEP)) == NULL) {
252 		DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: kmem_zalloc failed\n");
253 		return (DDI_FAILURE);
254 	}
255 
256 	io_page_list_p = (io_page_list_t *)ptr;
257 
258 	if (flag == MMU_MAP_MP) {
259 		ddi_dma_impl_t	*mp = (ddi_dma_impl_t *)addr;
260 
261 		for (i = 0; i < pages; i++, pfn_index++) {
262 			px_iopfn_t pfn = PX_GET_MP_PFN(mp, pfn_index);
263 			io_page_list_p[i] = MMU_PTOB(pfn);
264 		}
265 	} else {
266 		caddr_t	a = (caddr_t)addr;
267 
268 		for (i = 0; i < pages; i++, a += MMU_PAGE_SIZE) {
269 			px_iopfn_t pfn = hat_getpfnum(kas.a_hat, a);
270 			io_page_list_p[i] = MMU_PTOB(pfn);
271 		}
272 	}
273 
274 	io_page_list_p = (io_page_list_t *)va_to_pa(ptr);
275 	pgs_mapped = 0;
276 
277 	while (pgs) {
278 		if ((ret = hvio_iommu_map(DIP_TO_HANDLE(dip),
279 		    PCI_TSBID(tsb_num, tsbindex), pgs, io_attributes,
280 		    io_page_list_p, &pgs_cnt)) != H_EOK) {
281 			DBG(DBG_LIB_DMA, dip,
282 			    "hvio_iommu_map failed, ret 0x%lx\n", ret);
283 			err = DDI_FAILURE;
284 			break;
285 		}
286 
287 		pgs_mapped += pgs_cnt;
288 		pgs -= pgs_cnt;
289 		tsbindex += pgs_cnt;
290 		io_page_list_p += pgs_cnt;
291 		pgs_cnt = 0;
292 	}
293 
294 	if ((err == DDI_FAILURE) && (pgs_mapped))
295 		(void) px_lib_iommu_demap(dip, tsbid, pgs_mapped);
296 
297 	kmem_free(ptr, (pages * sizeof (io_page_list_t)));
298 
299 	return (err);
300 }
301 
302 /*ARGSUSED*/
303 int
304 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages)
305 {
306 	tsbnum_t	tsb_num = PCI_TSBID_TO_TSBNUM(tsbid);
307 	tsbindex_t	tsbindex = PCI_TSBID_TO_TSBINDEX(tsbid);
308 	pages_t		pgs_cnt = 0;
309 	uint64_t	ret;
310 
311 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx "
312 	    "pages 0x%x\n", dip, tsbid, pages);
313 
314 	while (pages) {
315 		if ((ret = hvio_iommu_demap(DIP_TO_HANDLE(dip),
316 		    PCI_TSBID(tsb_num, tsbindex), pages,
317 		    &pgs_cnt)) != H_EOK) {
318 			DBG(DBG_LIB_DMA, dip,
319 			    "hvio_iommu_demap failed, ret 0x%lx\n", ret);
320 			return (DDI_FAILURE);
321 		}
322 
323 		pages -= pgs_cnt;
324 		tsbindex += pgs_cnt;
325 		pgs_cnt = 0;
326 	}
327 
328 	return (DDI_SUCCESS);
329 }
330 
331 /*ARGSUSED*/
332 int
333 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid,
334     io_attributes_t *attributes_p, r_addr_t *r_addr_p)
335 {
336 	uint64_t	ret;
337 
338 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n",
339 	    dip, tsbid);
340 
341 	if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), tsbid,
342 	    attributes_p, r_addr_p)) != H_EOK) {
343 		DBG(DBG_LIB_DMA, dip,
344 		    "hvio_iommu_getmap failed, ret 0x%lx\n", ret);
345 
346 		return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE);
347 	}
348 
349 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n",
350 	    *attributes_p, *r_addr_p);
351 
352 	return (DDI_SUCCESS);
353 }
354 
355 
356 /*
357  * Checks dma attributes against system bypass ranges
358  * A sun4v device must be capable of generating the entire 64-bit
359  * address in order to perform bypass DMA.
360  */
361 /*ARGSUSED*/
362 int
363 px_lib_dma_bypass_rngchk(ddi_dma_attr_t *attrp, uint64_t *lo_p, uint64_t *hi_p)
364 {
365 	if ((attrp->dma_attr_addr_lo != 0ull) ||
366 	    (attrp->dma_attr_addr_hi != UINT64_MAX)) {
367 
368 		return (DDI_DMA_BADATTR);
369 	}
370 
371 	*lo_p = 0ull;
372 	*hi_p = UINT64_MAX;
373 
374 	return (DDI_SUCCESS);
375 }
376 
377 
378 /*ARGSUSED*/
379 int
380 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra,
381     io_attributes_t io_attributes, io_addr_t *io_addr_p)
382 {
383 	uint64_t	ret;
384 
385 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx "
386 	    "attr 0x%x\n", dip, ra, io_attributes);
387 
388 	if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), ra,
389 	    io_attributes, io_addr_p)) != H_EOK) {
390 		DBG(DBG_LIB_DMA, dip,
391 		    "hvio_iommu_getbypass failed, ret 0x%lx\n", ret);
392 		return (ret == H_ENOTSUPPORTED ? DDI_ENOTSUP : DDI_FAILURE);
393 	}
394 
395 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n",
396 	    *io_addr_p);
397 
398 	return (DDI_SUCCESS);
399 }
400 
401 /*ARGSUSED*/
402 int
403 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
404 	off_t off, size_t len, uint_t cache_flags)
405 {
406 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
407 	uint64_t sync_dir;
408 	px_dvma_addr_t dvma_addr, pg_off;
409 	size_t num_sync;
410 	uint64_t status = H_EOK;
411 
412 	DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p "
413 	    "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n",
414 	    dip, rdip, handle, off, len, cache_flags);
415 
416 	if (!(mp->dmai_flags & DMAI_FLAGS_INUSE)) {
417 		cmn_err(CE_WARN, "Unbound dma handle %p from %s%d", (void *)mp,
418 		    ddi_driver_name(rdip), ddi_get_instance(rdip));
419 		return (DDI_FAILURE);
420 	}
421 
422 	if (mp->dmai_flags & DMAI_FLAGS_NOSYNC)
423 		return (DDI_SUCCESS);
424 
425 	if (!len)
426 		len = mp->dmai_size;
427 
428 	pg_off = mp->dmai_offset;			/* start min */
429 	dvma_addr = MAX(off, pg_off);			/* lo */
430 	pg_off += mp->dmai_size;			/* end max */
431 	pg_off = MIN(off + len, pg_off);		/* hi */
432 	if (dvma_addr >= pg_off) {			/* lo >= hi ? */
433 		cmn_err(CE_WARN, "%lx + %lx out of window [%lx,%lx]",
434 		    off, len, mp->dmai_offset,
435 		    mp->dmai_offset + mp->dmai_size);
436 		return (DDI_FAILURE);
437 	}
438 
439 	len = pg_off - dvma_addr;			/* sz = hi - lo */
440 	dvma_addr += mp->dmai_mapping;			/* start addr */
441 
442 	if (mp->dmai_rflags & DDI_DMA_READ)
443 		sync_dir = HVIO_DMA_SYNC_DIR_FROM_DEV;
444 	else
445 		sync_dir = HVIO_DMA_SYNC_DIR_TO_DEV;
446 
447 	for (; ((len > 0) && (status == H_EOK)); len -= num_sync) {
448 		status = hvio_dma_sync(DIP_TO_HANDLE(dip), dvma_addr, len,
449 		    sync_dir, &num_sync);
450 		dvma_addr += num_sync;
451 	}
452 
453 	return ((status == H_EOK) ? DDI_SUCCESS : DDI_FAILURE);
454 }
455 
456 
457 /*
458  * MSIQ Functions:
459  */
460 
461 /*ARGSUSED*/
462 int
463 px_lib_msiq_init(dev_info_t *dip)
464 {
465 	px_t		*px_p = DIP_TO_STATE(dip);
466 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
467 	uint64_t	*msiq_addr, ra;
468 	size_t		msiq_size;
469 	uint_t		rec_cnt;
470 	int		i, err = DDI_SUCCESS;
471 	uint64_t	ret;
472 
473 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip);
474 
475 	msiq_addr = (uint64_t *)(((uint64_t)msiq_state_p->msiq_buf_p +
476 	    (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT);
477 
478 	msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
479 
480 	for (i = 0; i < msiq_state_p->msiq_cnt; i++) {
481 		ra = (r_addr_t)va_to_pa((caddr_t)msiq_addr + (i * msiq_size));
482 
483 		if ((ret = hvio_msiq_conf(DIP_TO_HANDLE(dip),
484 		    (i + msiq_state_p->msiq_1st_msiq_id),
485 		    ra, msiq_state_p->msiq_rec_cnt)) != H_EOK) {
486 			DBG(DBG_LIB_MSIQ, dip,
487 			    "hvio_msiq_conf failed, ret 0x%lx\n", ret);
488 			err = DDI_FAILURE;
489 			break;
490 		}
491 
492 		if ((err = px_lib_msiq_info(dip,
493 		    (i + msiq_state_p->msiq_1st_msiq_id),
494 		    &ra, &rec_cnt)) != DDI_SUCCESS) {
495 			DBG(DBG_LIB_MSIQ, dip,
496 			    "px_lib_msiq_info failed, ret 0x%x\n", err);
497 			err = DDI_FAILURE;
498 			break;
499 		}
500 
501 		DBG(DBG_LIB_MSIQ, dip,
502 		    "px_lib_msiq_init: ra 0x%p rec_cnt 0x%x\n", ra, rec_cnt);
503 	}
504 
505 	return (err);
506 }
507 
508 /*ARGSUSED*/
509 int
510 px_lib_msiq_fini(dev_info_t *dip)
511 {
512 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip);
513 
514 	return (DDI_SUCCESS);
515 }
516 
517 /*ARGSUSED*/
518 int
519 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p,
520     uint_t *msiq_rec_cnt_p)
521 {
522 	uint64_t	ret;
523 
524 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n",
525 	    dip, msiq_id);
526 
527 	if ((ret = hvio_msiq_info(DIP_TO_HANDLE(dip),
528 	    msiq_id, ra_p, msiq_rec_cnt_p)) != H_EOK) {
529 		DBG(DBG_LIB_MSIQ, dip,
530 		    "hvio_msiq_info failed, ret 0x%lx\n", ret);
531 		return (DDI_FAILURE);
532 	}
533 
534 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n",
535 	    ra_p, *msiq_rec_cnt_p);
536 
537 	return (DDI_SUCCESS);
538 }
539 
540 /*ARGSUSED*/
541 int
542 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id,
543     pci_msiq_valid_state_t *msiq_valid_state)
544 {
545 	uint64_t	ret;
546 
547 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n",
548 	    dip, msiq_id);
549 
550 	if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip),
551 	    msiq_id, msiq_valid_state)) != H_EOK) {
552 		DBG(DBG_LIB_MSIQ, dip,
553 		    "hvio_msiq_getvalid failed, ret 0x%lx\n", ret);
554 		return (DDI_FAILURE);
555 	}
556 
557 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n",
558 	    *msiq_valid_state);
559 
560 	return (DDI_SUCCESS);
561 }
562 
563 /*ARGSUSED*/
564 int
565 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id,
566     pci_msiq_valid_state_t msiq_valid_state)
567 {
568 	uint64_t	ret;
569 
570 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x "
571 	    "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state);
572 
573 	if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip),
574 	    msiq_id, msiq_valid_state)) != H_EOK) {
575 		DBG(DBG_LIB_MSIQ, dip,
576 		    "hvio_msiq_setvalid failed, ret 0x%lx\n", ret);
577 		return (DDI_FAILURE);
578 	}
579 
580 	return (DDI_SUCCESS);
581 }
582 
583 /*ARGSUSED*/
584 int
585 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id,
586     pci_msiq_state_t *msiq_state)
587 {
588 	uint64_t	ret;
589 
590 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n",
591 	    dip, msiq_id);
592 
593 	if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip),
594 	    msiq_id, msiq_state)) != H_EOK) {
595 		DBG(DBG_LIB_MSIQ, dip,
596 		    "hvio_msiq_getstate failed, ret 0x%lx\n", ret);
597 		return (DDI_FAILURE);
598 	}
599 
600 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n",
601 	    *msiq_state);
602 
603 	return (DDI_SUCCESS);
604 }
605 
606 /*ARGSUSED*/
607 int
608 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id,
609     pci_msiq_state_t msiq_state)
610 {
611 	uint64_t	ret;
612 
613 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x "
614 	    "msiq_state 0x%x\n", dip, msiq_id, msiq_state);
615 
616 	if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip),
617 	    msiq_id, msiq_state)) != H_EOK) {
618 		DBG(DBG_LIB_MSIQ, dip,
619 		    "hvio_msiq_setstate failed, ret 0x%lx\n", ret);
620 		return (DDI_FAILURE);
621 	}
622 
623 	return (DDI_SUCCESS);
624 }
625 
626 /*ARGSUSED*/
627 int
628 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id,
629     msiqhead_t *msiq_head_p)
630 {
631 	uint64_t	ret;
632 
633 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n",
634 	    dip, msiq_id);
635 
636 	if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip),
637 	    msiq_id, msiq_head_p)) != H_EOK) {
638 		DBG(DBG_LIB_MSIQ, dip,
639 		    "hvio_msiq_gethead failed, ret 0x%lx\n", ret);
640 		return (DDI_FAILURE);
641 	}
642 
643 	*msiq_head_p =  (*msiq_head_p / sizeof (msiq_rec_t));
644 
645 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_gethead: msiq_head 0x%x\n",
646 	    *msiq_head_p);
647 
648 	return (DDI_SUCCESS);
649 }
650 
651 /*ARGSUSED*/
652 int
653 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id,
654     msiqhead_t msiq_head)
655 {
656 	uint64_t	ret;
657 
658 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x "
659 	    "msiq_head 0x%x\n", dip, msiq_id, msiq_head);
660 
661 	if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip),
662 	    msiq_id, msiq_head * sizeof (msiq_rec_t))) != H_EOK) {
663 		DBG(DBG_LIB_MSIQ, dip,
664 		    "hvio_msiq_sethead failed, ret 0x%lx\n", ret);
665 		return (DDI_FAILURE);
666 	}
667 
668 	return (DDI_SUCCESS);
669 }
670 
671 /*ARGSUSED*/
672 int
673 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id,
674     msiqtail_t *msiq_tail_p)
675 {
676 	uint64_t	ret;
677 
678 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n",
679 	    dip, msiq_id);
680 
681 	if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip),
682 	    msiq_id, msiq_tail_p)) != H_EOK) {
683 		DBG(DBG_LIB_MSIQ, dip,
684 		    "hvio_msiq_gettail failed, ret 0x%lx\n", ret);
685 		return (DDI_FAILURE);
686 	}
687 
688 	*msiq_tail_p =  (*msiq_tail_p / sizeof (msiq_rec_t));
689 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n",
690 	    *msiq_tail_p);
691 
692 	return (DDI_SUCCESS);
693 }
694 
695 /*ARGSUSED*/
696 void
697 px_lib_get_msiq_rec(dev_info_t *dip, px_msiq_t *msiq_p, msiq_rec_t *msiq_rec_p)
698 {
699 	msiq_rec_t	*curr_msiq_rec_p = (msiq_rec_t *)msiq_p->msiq_curr;
700 
701 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p\n", dip);
702 
703 	if (!curr_msiq_rec_p->msiq_rec_rid)
704 		return;
705 
706 	*msiq_rec_p = *curr_msiq_rec_p;
707 
708 	/* Zero out msiq_rec_rid field */
709 	curr_msiq_rec_p->msiq_rec_rid  = 0;
710 }
711 
712 /*
713  * MSI Functions:
714  */
715 
716 /*ARGSUSED*/
717 int
718 px_lib_msi_init(dev_info_t *dip)
719 {
720 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip);
721 
722 	/* Noop */
723 	return (DDI_SUCCESS);
724 }
725 
726 /*ARGSUSED*/
727 int
728 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num,
729     msiqid_t *msiq_id)
730 {
731 	uint64_t	ret;
732 
733 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n",
734 	    dip, msi_num);
735 
736 	if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip),
737 	    msi_num, msiq_id)) != H_EOK) {
738 		DBG(DBG_LIB_MSI, dip,
739 		    "hvio_msi_getmsiq failed, ret 0x%lx\n", ret);
740 		return (DDI_FAILURE);
741 	}
742 
743 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n",
744 	    *msiq_id);
745 
746 	return (DDI_SUCCESS);
747 }
748 
749 /*ARGSUSED*/
750 int
751 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num,
752     msiqid_t msiq_id, msi_type_t msitype)
753 {
754 	uint64_t	ret;
755 
756 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x "
757 	    "msq_id 0x%x\n", dip, msi_num, msiq_id);
758 
759 	if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip),
760 	    msi_num, msiq_id, msitype)) != H_EOK) {
761 		DBG(DBG_LIB_MSI, dip,
762 		    "hvio_msi_setmsiq failed, ret 0x%lx\n", ret);
763 		return (DDI_FAILURE);
764 	}
765 
766 	return (DDI_SUCCESS);
767 }
768 
769 /*ARGSUSED*/
770 int
771 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num,
772     pci_msi_valid_state_t *msi_valid_state)
773 {
774 	uint64_t	ret;
775 
776 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n",
777 	    dip, msi_num);
778 
779 	if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip),
780 	    msi_num, msi_valid_state)) != H_EOK) {
781 		DBG(DBG_LIB_MSI, dip,
782 		    "hvio_msi_getvalid failed, ret 0x%lx\n", ret);
783 		return (DDI_FAILURE);
784 	}
785 
786 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n",
787 	    *msi_valid_state);
788 
789 	return (DDI_SUCCESS);
790 }
791 
792 /*ARGSUSED*/
793 int
794 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num,
795     pci_msi_valid_state_t msi_valid_state)
796 {
797 	uint64_t	ret;
798 
799 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x "
800 	    "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state);
801 
802 	if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip),
803 	    msi_num, msi_valid_state)) != H_EOK) {
804 		DBG(DBG_LIB_MSI, dip,
805 		    "hvio_msi_setvalid failed, ret 0x%lx\n", ret);
806 		return (DDI_FAILURE);
807 	}
808 
809 	return (DDI_SUCCESS);
810 }
811 
812 /*ARGSUSED*/
813 int
814 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num,
815     pci_msi_state_t *msi_state)
816 {
817 	uint64_t	ret;
818 
819 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n",
820 	    dip, msi_num);
821 
822 	if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip),
823 	    msi_num, msi_state)) != H_EOK) {
824 		DBG(DBG_LIB_MSI, dip,
825 		    "hvio_msi_getstate failed, ret 0x%lx\n", ret);
826 		return (DDI_FAILURE);
827 	}
828 
829 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n",
830 	    *msi_state);
831 
832 	return (DDI_SUCCESS);
833 }
834 
835 /*ARGSUSED*/
836 int
837 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num,
838     pci_msi_state_t msi_state)
839 {
840 	uint64_t	ret;
841 
842 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x "
843 	    "msi_state 0x%x\n", dip, msi_num, msi_state);
844 
845 	if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip),
846 	    msi_num, msi_state)) != H_EOK) {
847 		DBG(DBG_LIB_MSI, dip,
848 		    "hvio_msi_setstate failed, ret 0x%lx\n", ret);
849 		return (DDI_FAILURE);
850 	}
851 
852 	return (DDI_SUCCESS);
853 }
854 
855 /*
856  * MSG Functions:
857  */
858 
859 /*ARGSUSED*/
860 int
861 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
862     msiqid_t *msiq_id)
863 {
864 	uint64_t	ret;
865 
866 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n",
867 	    dip, msg_type);
868 
869 	if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip),
870 	    msg_type, msiq_id)) != H_EOK) {
871 		DBG(DBG_LIB_MSG, dip,
872 		    "hvio_msg_getmsiq failed, ret 0x%lx\n", ret);
873 		return (DDI_FAILURE);
874 	}
875 
876 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n",
877 	    *msiq_id);
878 
879 	return (DDI_SUCCESS);
880 }
881 
882 /*ARGSUSED*/
883 int
884 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
885     msiqid_t msiq_id)
886 {
887 	uint64_t	ret;
888 
889 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_setmsiq: dip 0x%p msg_type 0x%x "
890 	    "msq_id 0x%x\n", dip, msg_type, msiq_id);
891 
892 	if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip),
893 	    msg_type, msiq_id)) != H_EOK) {
894 		DBG(DBG_LIB_MSG, dip,
895 		    "hvio_msg_setmsiq failed, ret 0x%lx\n", ret);
896 		return (DDI_FAILURE);
897 	}
898 
899 	return (DDI_SUCCESS);
900 }
901 
902 /*ARGSUSED*/
903 int
904 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
905     pcie_msg_valid_state_t *msg_valid_state)
906 {
907 	uint64_t	ret;
908 
909 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n",
910 	    dip, msg_type);
911 
912 	if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type,
913 	    msg_valid_state)) != H_EOK) {
914 		DBG(DBG_LIB_MSG, dip,
915 		    "hvio_msg_getvalid failed, ret 0x%lx\n", ret);
916 		return (DDI_FAILURE);
917 	}
918 
919 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n",
920 	    *msg_valid_state);
921 
922 	return (DDI_SUCCESS);
923 }
924 
925 /*ARGSUSED*/
926 int
927 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
928     pcie_msg_valid_state_t msg_valid_state)
929 {
930 	uint64_t	ret;
931 
932 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x "
933 	    "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state);
934 
935 	if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type,
936 	    msg_valid_state)) != H_EOK) {
937 		DBG(DBG_LIB_MSG, dip,
938 		    "hvio_msg_setvalid failed, ret 0x%lx\n", ret);
939 		return (DDI_FAILURE);
940 	}
941 
942 	return (DDI_SUCCESS);
943 }
944 
945 /*
946  * Suspend/Resume Functions:
947  * Currently unsupported by hypervisor and all functions are noops.
948  */
949 /*ARGSUSED*/
950 int
951 px_lib_suspend(dev_info_t *dip)
952 {
953 	DBG(DBG_ATTACH, dip, "px_lib_suspend: Not supported\n");
954 
955 	/* Not supported */
956 	return (DDI_FAILURE);
957 }
958 
959 /*ARGSUSED*/
960 void
961 px_lib_resume(dev_info_t *dip)
962 {
963 	DBG(DBG_ATTACH, dip, "px_lib_resume: Not supported\n");
964 
965 	/* Noop */
966 }
967 
968 /*
969  * PCI tool Functions:
970  * Currently unsupported by hypervisor and all functions are noops.
971  */
972 /*ARGSUSED*/
973 int
974 px_lib_tools_dev_reg_ops(dev_info_t *dip, void *arg, int cmd, int mode)
975 {
976 	DBG(DBG_TOOLS, dip, "px_lib_tools_dev_reg_ops: Not supported\n");
977 
978 	return (DDI_FAILURE);
979 }
980 
981 /*ARGSUSED*/
982 int
983 px_lib_tools_bus_reg_ops(dev_info_t *dip, void *arg, int cmd, int mode)
984 {
985 	DBG(DBG_TOOLS, dip, "px_lib_tools_bus_reg_ops: Not supported\n");
986 
987 	return (DDI_FAILURE);
988 }
989 
990 /*ARGSUSED*/
991 int
992 px_lib_tools_intr_admn(dev_info_t *dip, void *arg, int cmd, int mode)
993 {
994 	DBG(DBG_TOOLS, dip, "px_lib_tools_intr_admn: Not supported\n");
995 
996 	return (DDI_FAILURE);
997 }
998 
999 /*
1000  * Misc Functions:
1001  * Currently unsupported by hypervisor and all functions are noops.
1002  */
1003 /*ARGSUSED*/
1004 uint64_t
1005 px_lib_get_cb(caddr_t csr)
1006 {
1007 	return (DDI_SUCCESS);
1008 }
1009 
1010 /*ARGSUSED*/
1011 void
1012 px_lib_set_cb(caddr_t csr, uint64_t val)
1013 {
1014 	/* Noop */
1015 }
1016 
1017 /*ARGSUSED*/
1018 static int
1019 px_lib_config_get(dev_info_t *dip, pci_device_t bdf, pci_config_offset_t off,
1020     uint8_t size, pci_cfg_data_t *data_p)
1021 {
1022 	uint64_t	ret;
1023 
1024 	DBG(DBG_LIB_CFG, dip, "px_lib_config_get: dip 0x%p, bdf 0x%llx "
1025 	    "off 0x%x size 0x%x\n", dip, bdf, off, size);
1026 
1027 	if ((ret = hvio_config_get(DIP_TO_HANDLE(dip), bdf, off,
1028 	    size, data_p)) != H_EOK) {
1029 		DBG(DBG_LIB_CFG, dip,
1030 		    "hvio_config_get failed, ret 0x%lx\n", ret);
1031 		return (DDI_FAILURE);
1032 	}
1033 	DBG(DBG_LIB_CFG, dip, "px_config_get: data 0x%x\n", data_p->dw);
1034 
1035 	return (DDI_SUCCESS);
1036 }
1037 
1038 /*ARGSUSED*/
1039 static int
1040 px_lib_config_put(dev_info_t *dip, pci_device_t bdf, pci_config_offset_t off,
1041     uint8_t size, pci_cfg_data_t data)
1042 {
1043 	uint64_t	ret;
1044 
1045 	DBG(DBG_LIB_CFG, dip, "px_lib_config_put: dip 0x%p, bdf 0x%llx "
1046 	    "off 0x%x size 0x%x data 0x%llx\n", dip, bdf, off, size, data.qw);
1047 
1048 	if ((ret = hvio_config_put(DIP_TO_HANDLE(dip), bdf, off,
1049 	    size, data)) != H_EOK) {
1050 		DBG(DBG_LIB_CFG, dip,
1051 		    "hvio_config_put failed, ret 0x%lx\n", ret);
1052 		return (DDI_FAILURE);
1053 	}
1054 
1055 	return (DDI_SUCCESS);
1056 }
1057 
1058 static uint32_t
1059 px_pci_config_get(ddi_acc_impl_t *handle, uint32_t *addr, int size)
1060 {
1061 	px_config_acc_pvt_t	*px_pvt = (px_config_acc_pvt_t *)
1062 					handle->ahi_common.ah_bus_private;
1063 	uint32_t pci_dev_addr = px_pvt->raddr;
1064 	uint32_t vaddr = px_pvt->vaddr;
1065 	uint16_t off = (uint16_t)(addr - vaddr) & 0xfff;
1066 	uint32_t rdata = 0;
1067 
1068 	if (px_lib_config_get(px_pvt->dip, pci_dev_addr, off,
1069 				size, (pci_cfg_data_t *)&rdata) != DDI_SUCCESS)
1070 		/* XXX update error kstats */
1071 		return (0xffffffff);
1072 	return (rdata);
1073 }
1074 
1075 static void
1076 px_pci_config_put(ddi_acc_impl_t *handle, uint32_t *addr,
1077 		int size, pci_cfg_data_t wdata)
1078 {
1079 	px_config_acc_pvt_t	*px_pvt = (px_config_acc_pvt_t *)
1080 					handle->ahi_common.ah_bus_private;
1081 	uint32_t pci_dev_addr = px_pvt->raddr;
1082 	uint32_t vaddr = px_pvt->vaddr;
1083 	uint16_t off = (uint16_t)(addr - vaddr) & 0xfff;
1084 
1085 	if (px_lib_config_put(px_pvt->dip, pci_dev_addr, off,
1086 				size, wdata) != DDI_SUCCESS) {
1087 		/*EMPTY*/
1088 		/* XXX update error kstats */
1089 	}
1090 }
1091 
1092 static uint8_t
1093 px_pci_config_get8(ddi_acc_impl_t *handle, uint8_t *addr)
1094 {
1095 	return ((uint8_t)px_pci_config_get(handle, (uint32_t *)addr, 1));
1096 }
1097 
1098 static uint16_t
1099 px_pci_config_get16(ddi_acc_impl_t *handle, uint16_t *addr)
1100 {
1101 	return ((uint16_t)px_pci_config_get(handle, (uint32_t *)addr, 2));
1102 }
1103 
1104 static uint32_t
1105 px_pci_config_get32(ddi_acc_impl_t *handle, uint32_t *addr)
1106 {
1107 	return ((uint32_t)px_pci_config_get(handle, (uint32_t *)addr, 4));
1108 }
1109 
1110 static uint64_t
1111 px_pci_config_get64(ddi_acc_impl_t *handle, uint64_t *addr)
1112 {
1113 	uint32_t rdatah, rdatal;
1114 
1115 	rdatal = (uint32_t)px_pci_config_get(handle, (uint32_t *)addr, 4);
1116 	rdatah = (uint32_t)px_pci_config_get(handle,
1117 				(uint32_t *)((char *)addr+4), 4);
1118 	return (((uint64_t)rdatah << 32) | rdatal);
1119 }
1120 
1121 static void
1122 px_pci_config_put8(ddi_acc_impl_t *handle, uint8_t *addr, uint8_t data)
1123 {
1124 	pci_cfg_data_t wdata = { 0 };
1125 
1126 	wdata.qw = (uint8_t)data;
1127 	px_pci_config_put(handle, (uint32_t *)addr, 1, wdata);
1128 }
1129 
1130 static void
1131 px_pci_config_put16(ddi_acc_impl_t *handle, uint16_t *addr, uint16_t data)
1132 {
1133 	pci_cfg_data_t wdata = { 0 };
1134 
1135 	wdata.qw = (uint16_t)data;
1136 	px_pci_config_put(handle, (uint32_t *)addr, 2, wdata);
1137 }
1138 
1139 static void
1140 px_pci_config_put32(ddi_acc_impl_t *handle, uint32_t *addr, uint32_t data)
1141 {
1142 	pci_cfg_data_t wdata = { 0 };
1143 
1144 	wdata.qw = (uint32_t)data;
1145 	px_pci_config_put(handle, (uint32_t *)addr, 4, wdata);
1146 }
1147 
1148 static void
1149 px_pci_config_put64(ddi_acc_impl_t *handle, uint64_t *addr, uint64_t data)
1150 {
1151 	pci_cfg_data_t wdata = { 0 };
1152 
1153 	wdata.qw = (uint32_t)(data & 0xffffffff);
1154 	px_pci_config_put(handle, (uint32_t *)addr, 4, wdata);
1155 	wdata.qw = (uint32_t)((data >> 32) & 0xffffffff);
1156 	px_pci_config_put(handle, (uint32_t *)((char *)addr+4), 4, wdata);
1157 }
1158 
1159 static void
1160 px_pci_config_rep_get8(ddi_acc_impl_t *handle, uint8_t *host_addr,
1161 			uint8_t *dev_addr, size_t repcount, uint_t flags)
1162 {
1163 	if (flags == DDI_DEV_AUTOINCR)
1164 		for (; repcount; repcount--)
1165 			*host_addr++ = px_pci_config_get8(handle, dev_addr++);
1166 	else
1167 		for (; repcount; repcount--)
1168 			*host_addr++ = px_pci_config_get8(handle, dev_addr);
1169 }
1170 
1171 /*
1172  * Function to rep read 16 bit data off the PCI configuration space behind
1173  * the 21554's host interface.
1174  */
1175 static void
1176 px_pci_config_rep_get16(ddi_acc_impl_t *handle, uint16_t *host_addr,
1177 			uint16_t *dev_addr, size_t repcount, uint_t flags)
1178 {
1179 	if (flags == DDI_DEV_AUTOINCR)
1180 		for (; repcount; repcount--)
1181 			*host_addr++ = px_pci_config_get16(handle, dev_addr++);
1182 	else
1183 		for (; repcount; repcount--)
1184 			*host_addr++ = px_pci_config_get16(handle, dev_addr);
1185 }
1186 
1187 /*
1188  * Function to rep read 32 bit data off the PCI configuration space behind
1189  * the 21554's host interface.
1190  */
1191 static void
1192 px_pci_config_rep_get32(ddi_acc_impl_t *handle, uint32_t *host_addr,
1193 			uint32_t *dev_addr, size_t repcount, uint_t flags)
1194 {
1195 	if (flags == DDI_DEV_AUTOINCR)
1196 		for (; repcount; repcount--)
1197 			*host_addr++ = px_pci_config_get32(handle, dev_addr++);
1198 	else
1199 		for (; repcount; repcount--)
1200 			*host_addr++ = px_pci_config_get32(handle, dev_addr);
1201 }
1202 
1203 /*
1204  * Function to rep read 64 bit data off the PCI configuration space behind
1205  * the 21554's host interface.
1206  */
1207 static void
1208 px_pci_config_rep_get64(ddi_acc_impl_t *handle, uint64_t *host_addr,
1209 			uint64_t *dev_addr, size_t repcount, uint_t flags)
1210 {
1211 	if (flags == DDI_DEV_AUTOINCR)
1212 		for (; repcount; repcount--)
1213 			*host_addr++ = px_pci_config_get64(handle, dev_addr++);
1214 	else
1215 		for (; repcount; repcount--)
1216 			*host_addr++ = px_pci_config_get64(handle, dev_addr);
1217 }
1218 
1219 /*
1220  * Function to rep write 8 bit data into the PCI configuration space behind
1221  * the 21554's host interface.
1222  */
1223 static void
1224 px_pci_config_rep_put8(ddi_acc_impl_t *handle, uint8_t *host_addr,
1225 			uint8_t *dev_addr, size_t repcount, uint_t flags)
1226 {
1227 	if (flags == DDI_DEV_AUTOINCR)
1228 		for (; repcount; repcount--)
1229 			px_pci_config_put8(handle, dev_addr++, *host_addr++);
1230 	else
1231 		for (; repcount; repcount--)
1232 			px_pci_config_put8(handle, dev_addr, *host_addr++);
1233 }
1234 
1235 /*
1236  * Function to rep write 16 bit data into the PCI configuration space behind
1237  * the 21554's host interface.
1238  */
1239 static void
1240 px_pci_config_rep_put16(ddi_acc_impl_t *handle, uint16_t *host_addr,
1241 			uint16_t *dev_addr, size_t repcount, uint_t flags)
1242 {
1243 	if (flags == DDI_DEV_AUTOINCR)
1244 		for (; repcount; repcount--)
1245 			px_pci_config_put16(handle, dev_addr++, *host_addr++);
1246 	else
1247 		for (; repcount; repcount--)
1248 			px_pci_config_put16(handle, dev_addr, *host_addr++);
1249 }
1250 
1251 /*
1252  * Function to rep write 32 bit data into the PCI configuration space behind
1253  * the 21554's host interface.
1254  */
1255 static void
1256 px_pci_config_rep_put32(ddi_acc_impl_t *handle, uint32_t *host_addr,
1257 			uint32_t *dev_addr, size_t repcount, uint_t flags)
1258 {
1259 	if (flags == DDI_DEV_AUTOINCR)
1260 		for (; repcount; repcount--)
1261 			px_pci_config_put32(handle, dev_addr++, *host_addr++);
1262 	else
1263 		for (; repcount; repcount--)
1264 			px_pci_config_put32(handle, dev_addr, *host_addr++);
1265 }
1266 
1267 /*
1268  * Function to rep write 64 bit data into the PCI configuration space behind
1269  * the 21554's host interface.
1270  */
1271 static void
1272 px_pci_config_rep_put64(ddi_acc_impl_t *handle, uint64_t *host_addr,
1273 			uint64_t *dev_addr, size_t repcount, uint_t flags)
1274 {
1275 	if (flags == DDI_DEV_AUTOINCR)
1276 		for (; repcount; repcount--)
1277 			px_pci_config_put64(handle, dev_addr++, *host_addr++);
1278 	else
1279 		for (; repcount; repcount--)
1280 			px_pci_config_put64(handle, dev_addr, *host_addr++);
1281 }
1282 
1283 /*
1284  * Provide a private access handle to route config access calls to Hypervisor.
1285  * Beware: Do all error checking for config space accesses before calling
1286  * this function. ie. do error checking from the calling function.
1287  * Due to a lack of meaningful error code in DDI, the gauranteed return of
1288  * DDI_SUCCESS from here makes the code organization readable/easier from
1289  * the generic code.
1290  */
1291 /*ARGSUSED*/
1292 int
1293 px_lib_map_vconfig(dev_info_t *dip,
1294 	ddi_map_req_t *mp, pci_config_offset_t off,
1295 	pci_regspec_t *rp, caddr_t *addrp)
1296 {
1297 	ddi_acc_hdl_t *hp;
1298 	ddi_acc_impl_t *ap;
1299 	uchar_t busnum;	/* bus number */
1300 	uchar_t devnum;	/* device number */
1301 	uchar_t funcnum; /* function number */
1302 	px_config_acc_pvt_t *px_pvt;
1303 
1304 	hp = (ddi_acc_hdl_t *)mp->map_handlep;
1305 	ap = (ddi_acc_impl_t *)hp->ah_platform_private;
1306 
1307 	/* Check for mapping teardown operation */
1308 	if ((mp->map_op == DDI_MO_UNMAP) ||
1309 			(mp->map_op == DDI_MO_UNLOCK)) {
1310 		/* free up memory allocated for the private access handle. */
1311 		px_pvt = (px_config_acc_pvt_t *)hp->ah_bus_private;
1312 		kmem_free((void *)px_pvt, sizeof (px_config_acc_pvt_t));
1313 
1314 		/* unmap operation of PCI IO/config space. */
1315 		return (DDI_SUCCESS);
1316 	}
1317 
1318 	ap->ahi_get8 = px_pci_config_get8;
1319 	ap->ahi_get16 = px_pci_config_get16;
1320 	ap->ahi_get32 = px_pci_config_get32;
1321 	ap->ahi_get64 = px_pci_config_get64;
1322 	ap->ahi_put8 = px_pci_config_put8;
1323 	ap->ahi_put16 = px_pci_config_put16;
1324 	ap->ahi_put32 = px_pci_config_put32;
1325 	ap->ahi_put64 = px_pci_config_put64;
1326 	ap->ahi_rep_get8 = px_pci_config_rep_get8;
1327 	ap->ahi_rep_get16 = px_pci_config_rep_get16;
1328 	ap->ahi_rep_get32 = px_pci_config_rep_get32;
1329 	ap->ahi_rep_get64 = px_pci_config_rep_get64;
1330 	ap->ahi_rep_put8 = px_pci_config_rep_put8;
1331 	ap->ahi_rep_put16 = px_pci_config_rep_put16;
1332 	ap->ahi_rep_put32 = px_pci_config_rep_put32;
1333 	ap->ahi_rep_put64 = px_pci_config_rep_put64;
1334 
1335 	/* Initialize to default check/notify functions */
1336 	ap->ahi_fault = 0;
1337 	ap->ahi_fault_check = i_ddi_acc_fault_check;
1338 	ap->ahi_fault_notify = i_ddi_acc_fault_notify;
1339 
1340 	/* allocate memory for our private handle */
1341 	px_pvt = (px_config_acc_pvt_t *)
1342 			kmem_zalloc(sizeof (px_config_acc_pvt_t), KM_SLEEP);
1343 	hp->ah_bus_private = (void *)px_pvt;
1344 
1345 	busnum = PCI_REG_BUS_G(rp->pci_phys_hi);
1346 	devnum = PCI_REG_DEV_G(rp->pci_phys_hi);
1347 	funcnum = PCI_REG_FUNC_G(rp->pci_phys_hi);
1348 
1349 	/* set up private data for use during IO routines */
1350 
1351 	/* addr needed by the HV APIs */
1352 	px_pvt->raddr = busnum << 16 | devnum << 11 | funcnum << 8;
1353 	/*
1354 	 * Address that specifies the actual offset into the 256MB
1355 	 * memory mapped configuration space, 4K per device.
1356 	 * First 12bits form the offset into 4K config space.
1357 	 * This address is only used during the IO routines to calculate
1358 	 * the offset at which the transaction must be performed.
1359 	 * Drivers bypassing DDI functions to access PCI config space will
1360 	 * panic the system since the following is a bogus virtual address.
1361 	 */
1362 	px_pvt->vaddr = busnum << 20 | devnum << 15 | funcnum << 12 | off;
1363 	px_pvt->dip = dip;
1364 
1365 	DBG(DBG_LIB_CFG, dip, "px_config_setup: raddr 0x%x, vaddr 0x%x\n",
1366 				px_pvt->raddr, px_pvt->vaddr);
1367 	*addrp = (caddr_t)px_pvt->vaddr;
1368 	return (DDI_SUCCESS);
1369 }
1370 
1371 static void
1372 px_lib_log_safeacc_err(px_t *px_p, dev_info_t *rdip,
1373     ddi_acc_handle_t handle, int fme_flag)
1374 {
1375 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)handle;
1376 	ddi_fm_error_t derr;
1377 
1378 	derr.fme_status = DDI_FM_NONFATAL;
1379 	derr.fme_version = DDI_FME_VERSION;
1380 	derr.fme_flag = fme_flag;
1381 	derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
1382 	derr.fme_acc_handle = handle;
1383 	if (hp)
1384 		hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
1385 
1386 	(void) px_fm_err_handler(rdip, &derr, (const void *)px_p);
1387 }
1388 
1389 
1390 #ifdef  DEBUG
1391 int	px_peekfault_cnt = 0;
1392 int	px_pokefault_cnt = 0;
1393 #endif  /* DEBUG */
1394 
1395 static int
1396 px_lib_bdf_from_dip(dev_info_t *rdip, uint32_t *bdf)
1397 {
1398 	/* Start with an array of 8 reg spaces for now to cover most devices. */
1399 	pci_regspec_t regspec_array[8];
1400 	pci_regspec_t *regspec = regspec_array;
1401 	int buflen = sizeof (regspec_array);
1402 	boolean_t kmalloced = B_FALSE;
1403 	int status;
1404 
1405 	status = ddi_getlongprop_buf(DDI_DEV_T_NONE, rdip,
1406 	    DDI_PROP_DONTPASS, "reg", (caddr_t)regspec, &buflen);
1407 
1408 	/* If need more space, fallback to kmem_alloc. */
1409 	if (status == DDI_PROP_BUF_TOO_SMALL) {
1410 		regspec = kmem_alloc(buflen, KM_SLEEP);
1411 
1412 		status = ddi_getlongprop_buf(DDI_DEV_T_NONE, rdip,
1413 		    DDI_PROP_DONTPASS, "reg", (caddr_t)regspec, &buflen);
1414 
1415 		kmalloced = B_TRUE;
1416 	}
1417 
1418 	/* Get phys_hi from first element.  All have same bdf. */
1419 	if (status == DDI_PROP_SUCCESS)
1420 		*bdf = regspec->pci_phys_hi & (PCI_REG_BDFR_M ^ PCI_REG_REG_M);
1421 
1422 	if (kmalloced)
1423 		kmem_free(regspec, buflen);
1424 
1425 	return ((status == DDI_PROP_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
1426 }
1427 
1428 /*
1429  * Do a safe write to a device.
1430  *
1431  * When this function is given a handle (cautious access), all errors are
1432  * suppressed.
1433  *
1434  * When this function is not given a handle (poke), only Unsupported Request
1435  * and Completer Abort errors are suppressed.
1436  *
1437  * In all cases, all errors are returned in the function return status.
1438  */
1439 
1440 int
1441 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip,
1442     peekpoke_ctlops_t *in_args)
1443 {
1444 	px_t *px_p = DIP_TO_STATE(dip);
1445 	px_pec_t *pec_p = px_p->px_pec_p;
1446 
1447 	size_t repcount = in_args->repcount;
1448 	size_t size = in_args->size;
1449 	uintptr_t dev_addr = in_args->dev_addr;
1450 	uintptr_t host_addr = in_args->host_addr;
1451 
1452 	int err	= DDI_SUCCESS;
1453 	uint64_t hvio_poke_status;
1454 	uint32_t bdf;
1455 	uint32_t wrt_stat;
1456 
1457 	r_addr_t ra;
1458 	uint64_t pokeval;
1459 
1460 	if (px_lib_bdf_from_dip(rdip, &bdf) != DDI_SUCCESS) {
1461 		DBG(DBG_LIB_DMA, px_p->px_dip,
1462 		    "poke: px_lib_bdf_from_dip failed\n");
1463 		err = DDI_FAILURE;
1464 		goto done;
1465 	}
1466 
1467 	ra = (r_addr_t)va_to_pa((void *)dev_addr);
1468 	for (; repcount; repcount--) {
1469 
1470 		switch (size) {
1471 		case sizeof (uint8_t):
1472 			pokeval = *(uint8_t *)host_addr;
1473 			break;
1474 		case sizeof (uint16_t):
1475 			pokeval = *(uint16_t *)host_addr;
1476 			break;
1477 		case sizeof (uint32_t):
1478 			pokeval = *(uint32_t *)host_addr;
1479 			break;
1480 		case sizeof (uint64_t):
1481 			pokeval = *(uint64_t *)host_addr;
1482 			break;
1483 		default:
1484 			DBG(DBG_MAP, px_p->px_dip,
1485 			    "poke: invalid size %d passed\n", size);
1486 			err = DDI_FAILURE;
1487 			goto done;
1488 		}
1489 
1490 		/* Hypervisor does not guarantee poke serialization. */
1491 		mutex_enter(&pec_p->pec_pokefault_mutex);
1492 
1493 		/*
1494 		 * XXX Fabric errors can come in via error interrupts, and
1495 		 * this code lacks the handshaking needed to process them.
1496 		 *
1497 		 * XXX what to do for ddi_poke since there is no handle?
1498 		 *
1499 		 * XXX Holes to be filled in by FMA putback.
1500 		 */
1501 		hvio_poke_status = hvio_poke(px_p->px_dev_hdl, ra, size,
1502 			    pokeval, bdf, &wrt_stat);
1503 
1504 		mutex_exit(&pec_p->pec_pokefault_mutex);
1505 
1506 		if ((hvio_poke_status != H_EOK) || (wrt_stat != H_EOK)) {
1507 			err = DDI_FAILURE;
1508 #ifdef  DEBUG
1509 			px_pokefault_cnt++;
1510 #endif
1511 			/*
1512 			 * For CAUTIOUS and POKE access, notify FMA to
1513 			 * cleanup.  Distinguish between them, as FMA
1514 			 * may need to take different actions for each.
1515 			 */
1516 			px_lib_log_safeacc_err(px_p, rdip, in_args->handle,
1517 			    (in_args->handle ? DDI_FM_ERR_EXPECTED :
1518 			    DDI_FM_ERR_POKE));
1519 
1520 			goto done;
1521 		}
1522 
1523 		host_addr += size;
1524 
1525 		if (in_args->flags == DDI_DEV_AUTOINCR) {
1526 			dev_addr += size;
1527 			ra = (r_addr_t)va_to_pa((void *)dev_addr);
1528 		}
1529 	}
1530 
1531 done:
1532 	return (err);
1533 }
1534 
1535 
1536 /*ARGSUSED*/
1537 int
1538 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip,
1539     peekpoke_ctlops_t *in_args, void *result)
1540 {
1541 	px_t *px_p = DIP_TO_STATE(dip);
1542 	px_pec_t *pec_p = px_p->px_pec_p;
1543 
1544 	size_t repcount = in_args->repcount;
1545 	uintptr_t dev_addr = in_args->dev_addr;
1546 	uintptr_t host_addr = in_args->host_addr;
1547 
1548 	r_addr_t ra;
1549 	uint32_t read_status;
1550 	uint64_t hvio_peek_status;
1551 	uint64_t peekval;
1552 
1553 	int err = DDI_SUCCESS;
1554 
1555 	result = (void *)in_args->host_addr;
1556 
1557 	ra = (r_addr_t)va_to_pa((void *)dev_addr);
1558 	for (; repcount; repcount--) {
1559 
1560 		/* Lock pokefault mutex so read doesn't mask a poke fault. */
1561 		mutex_enter(&pec_p->pec_pokefault_mutex);
1562 
1563 		hvio_peek_status = hvio_peek(px_p->px_dev_hdl, ra,
1564 		    in_args->size, &read_status, &peekval);
1565 
1566 		mutex_exit(&pec_p->pec_pokefault_mutex);
1567 
1568 		if ((hvio_peek_status != H_EOK) || (read_status != H_EOK)) {
1569 			err = DDI_FAILURE;
1570 
1571 			/*
1572 			 * For CAUTIOUS and PEEK access, notify FMA to
1573 			 * cleanup.  Distinguish between them, as FMA
1574 			 * may need to take different actions for each.
1575 			 */
1576 			px_lib_log_safeacc_err(px_p, rdip, in_args->handle,
1577 			    (in_args->handle ? DDI_FM_ERR_EXPECTED :
1578 			    DDI_FM_ERR_PEEK));
1579 
1580 			/* Stuff FFs in host addr if peek. */
1581 			if (in_args->handle == NULL) {
1582 				int i;
1583 				uint8_t *ff_addr = (uint8_t *)host_addr;
1584 				for (i = 0; i < in_args->size; i++)
1585 					*ff_addr++ = 0xff;
1586 			}
1587 #ifdef  DEBUG
1588 			px_peekfault_cnt++;
1589 #endif
1590 			goto done;
1591 
1592 		} else {
1593 
1594 			switch (in_args->size) {
1595 			case sizeof (uint8_t):
1596 				*(uint8_t *)host_addr = (uint8_t)peekval;
1597 				break;
1598 			case sizeof (uint16_t):
1599 				*(uint16_t *)host_addr = (uint16_t)peekval;
1600 				break;
1601 			case sizeof (uint32_t):
1602 				*(uint32_t *)host_addr = (uint32_t)peekval;
1603 				break;
1604 			case sizeof (uint64_t):
1605 				*(uint64_t *)host_addr = (uint64_t)peekval;
1606 				break;
1607 			default:
1608 				DBG(DBG_MAP, px_p->px_dip,
1609 				    "peek: invalid size %d passed\n",
1610 				    in_args->size);
1611 				err = DDI_FAILURE;
1612 				goto done;
1613 			}
1614 		}
1615 
1616 		host_addr += in_args->size;
1617 
1618 		if (in_args->flags == DDI_DEV_AUTOINCR) {
1619 			dev_addr += in_args->size;
1620 			ra = (r_addr_t)va_to_pa((void *)dev_addr);
1621 		}
1622 	}
1623 done:
1624 	return (err);
1625 }
1626 
1627 /*ARGSUSED*/
1628 int
1629 px_lib_pmctl(int cmd, px_t *px_p)
1630 {
1631 	return (DDI_FAILURE);
1632 }
1633