xref: /titanic_41/usr/src/uts/sun4/io/px/px_dma.c (revision 0a44ef6d9afbfe052a7e975f55ea0d2954b62a82)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * PCI Express nexus DVMA and DMA core routines:
30  *	dma_map/dma_bind_handle implementation
31  *	bypass and peer-to-peer support
32  *	fast track DVMA space allocation
33  *	runtime DVMA debug
34  */
35 #include <sys/types.h>
36 #include <sys/kmem.h>
37 #include <sys/async.h>
38 #include <sys/sysmacros.h>
39 #include <sys/sunddi.h>
40 #include <sys/ddi_impldefs.h>
41 #include "px_obj.h"
42 
43 /*LINTLIBRARY*/
44 
45 /*
46  * px_dma_allocmp - Allocate a pci dma implementation structure
47  *
48  * An extra ddi_dma_attr structure is bundled with the usual ddi_dma_impl
49  * to hold unmodified device limits. The ddi_dma_attr inside the
50  * ddi_dma_impl structure is augumented with system limits to enhance
51  * DVMA performance at runtime. The unaugumented device limits saved
52  * right after (accessed through (ddi_dma_attr_t *)(mp + 1)) is used
53  * strictly for peer-to-peer transfers which do not obey system limits.
54  *
55  * return: DDI_SUCCESS DDI_DMA_NORESOURCES
56  */
57 ddi_dma_impl_t *
58 px_dma_allocmp(dev_info_t *dip, dev_info_t *rdip, int (*waitfp)(caddr_t),
59 	caddr_t arg)
60 {
61 	register ddi_dma_impl_t *mp;
62 	int sleep = (waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
63 
64 	/* Caution: we don't use zalloc to enhance performance! */
65 	if ((mp = kmem_alloc(sizeof (px_dma_hdl_t), sleep)) == 0) {
66 		DBG(DBG_DMA_MAP, dip, "can't alloc dma_handle\n");
67 		if (waitfp != DDI_DMA_DONTWAIT) {
68 			DBG(DBG_DMA_MAP, dip, "alloc_mp kmem cb\n");
69 			ddi_set_callback(waitfp, arg, &px_kmem_clid);
70 		}
71 		return (mp);
72 	}
73 
74 	mp->dmai_rdip = rdip;
75 	mp->dmai_flags = 0;
76 	mp->dmai_pfnlst = NULL;
77 	mp->dmai_winlst = NULL;
78 
79 	/*
80 	 * kmem_alloc debug: the following fields are not zero-ed
81 	 * mp->dmai_mapping = 0;
82 	 * mp->dmai_size = 0;
83 	 * mp->dmai_offset = 0;
84 	 * mp->dmai_minxfer = 0;
85 	 * mp->dmai_burstsizes = 0;
86 	 * mp->dmai_ndvmapages = 0;
87 	 * mp->dmai_pool/roffset = 0;
88 	 * mp->dmai_rflags = 0;
89 	 * mp->dmai_inuse/flags
90 	 * mp->dmai_nwin = 0;
91 	 * mp->dmai_winsize = 0;
92 	 * mp->dmai_nexus_private/tte = 0;
93 	 * mp->dmai_iopte/pfnlst
94 	 * mp->dmai_sbi/pfn0 = 0;
95 	 * mp->dmai_minfo/winlst/fdvma
96 	 * mp->dmai_rdip
97 	 * bzero(&mp->dmai_object, sizeof (ddi_dma_obj_t));
98 	 * bzero(&mp->dmai_attr, sizeof (ddi_dma_attr_t));
99 	 * mp->dmai_cookie = 0;
100 	 */
101 
102 	mp->dmai_attr.dma_attr_version = (uint_t)DMA_ATTR_VERSION;
103 	mp->dmai_attr.dma_attr_flags = (uint_t)0;
104 	mp->dmai_fault = 0;
105 	mp->dmai_fault_check = NULL;
106 	mp->dmai_fault_notify = NULL;
107 
108 	mp->dmai_error.err_ena = 0;
109 	mp->dmai_error.err_status = DDI_FM_OK;
110 	mp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED;
111 	mp->dmai_error.err_ontrap = NULL;
112 	mp->dmai_error.err_fep = NULL;
113 	mp->dmai_error.err_cf = NULL;
114 
115 	return (mp);
116 }
117 
118 void
119 px_dma_freemp(ddi_dma_impl_t *mp)
120 {
121 	if (mp->dmai_ndvmapages > 1)
122 		px_dma_freepfn(mp);
123 	if (mp->dmai_winlst)
124 		px_dma_freewin(mp);
125 	kmem_free(mp, sizeof (px_dma_hdl_t));
126 }
127 
128 void
129 px_dma_freepfn(ddi_dma_impl_t *mp)
130 {
131 	void *addr = mp->dmai_pfnlst;
132 	if (addr) {
133 		size_t npages = mp->dmai_ndvmapages;
134 		if (npages > 1)
135 			kmem_free(addr, npages * sizeof (px_iopfn_t));
136 		mp->dmai_pfnlst = NULL;
137 	}
138 	mp->dmai_ndvmapages = 0;
139 }
140 
141 /*
142  * px_dma_lmts2hdl - alloate a ddi_dma_impl_t, validate practical limits
143  *			and convert dmareq->dmar_limits to mp->dmai_attr
144  *
145  * ddi_dma_impl_t member modified     input
146  * ------------------------------------------------------------------------
147  * mp->dmai_minxfer		    - dev
148  * mp->dmai_burstsizes		    - dev
149  * mp->dmai_flags		    - no limit? peer-to-peer only?
150  *
151  * ddi_dma_attr member modified       input
152  * ------------------------------------------------------------------------
153  * mp->dmai_attr.dma_attr_addr_lo   - dev lo, sys lo
154  * mp->dmai_attr.dma_attr_addr_hi   - dev hi, sys hi
155  * mp->dmai_attr.dma_attr_count_max - dev count max, dev/sys lo/hi delta
156  * mp->dmai_attr.dma_attr_seg       - 0         (no nocross   restriction)
157  * mp->dmai_attr.dma_attr_align     - 1         (no alignment restriction)
158  *
159  * The dlim_dmaspeed member of dmareq->dmar_limits is ignored.
160  */
161 ddi_dma_impl_t *
162 px_dma_lmts2hdl(dev_info_t *dip, dev_info_t *rdip, px_mmu_t *mmu_p,
163 	ddi_dma_req_t *dmareq)
164 {
165 	ddi_dma_impl_t *mp;
166 	ddi_dma_attr_t *attr_p;
167 	uint64_t syslo		= mmu_p->mmu_dvma_base;
168 	uint64_t syshi		= mmu_p->mmu_dvma_end;
169 	uint64_t fasthi		= mmu_p->mmu_dvma_fast_end;
170 	ddi_dma_lim_t *lim_p	= dmareq->dmar_limits;
171 	uint32_t count_max	= lim_p->dlim_cntr_max;
172 	uint64_t lo		= lim_p->dlim_addr_lo;
173 	uint64_t hi		= lim_p->dlim_addr_hi;
174 	if (hi <= lo) {
175 		DBG(DBG_DMA_MAP, dip, "Bad limits\n");
176 		return ((ddi_dma_impl_t *)DDI_DMA_NOMAPPING);
177 	}
178 	if (!count_max)
179 		count_max--;
180 
181 	if (!(mp = px_dma_allocmp(dip, rdip, dmareq->dmar_fp,
182 		dmareq->dmar_arg)))
183 		return (NULL);
184 
185 	/* store original dev input at the 2nd ddi_dma_attr */
186 	attr_p = PX_DEV_ATTR(mp);
187 	SET_DMAATTR(attr_p, lo, hi, -1, count_max);
188 	SET_DMAALIGN(attr_p, 1);
189 
190 	lo = MAX(lo, syslo);
191 	hi = MIN(hi, syshi);
192 	if (hi <= lo)
193 		mp->dmai_flags |= PX_DMAI_FLAGS_PEER_ONLY;
194 	count_max = MIN(count_max, hi - lo);
195 
196 	if (PX_DEV_NOSYSLIMIT(lo, hi, syslo, fasthi, 1))
197 		mp->dmai_flags |= PX_DMAI_FLAGS_NOFASTLIMIT |
198 			PX_DMAI_FLAGS_NOSYSLIMIT;
199 	else {
200 		if (PX_DEV_NOFASTLIMIT(lo, hi, syslo, syshi, 1))
201 			mp->dmai_flags |= PX_DMAI_FLAGS_NOFASTLIMIT;
202 	}
203 	if (PX_DMA_NOCTX(rdip))
204 		mp->dmai_flags |= PX_DMAI_FLAGS_NOCTX;
205 
206 	/* store augumented dev input to mp->dmai_attr */
207 	mp->dmai_minxfer	= lim_p->dlim_minxfer;
208 	mp->dmai_burstsizes	= lim_p->dlim_burstsizes;
209 	attr_p = &mp->dmai_attr;
210 	SET_DMAATTR(attr_p, lo, hi, -1, count_max);
211 	SET_DMAALIGN(attr_p, 1);
212 	return (mp);
213 }
214 
215 /*
216  * Called from px_attach to check for bypass dma support and set
217  * flags accordingly.
218  */
219 int
220 px_dma_attach(px_t *px_p)
221 {
222 	uint64_t baddr;
223 
224 	if (px_lib_iommu_getbypass(px_p->px_dip, 0ull,
225 			PCI_MAP_ATTR_WRITE|PCI_MAP_ATTR_READ,
226 			&baddr) != DDI_ENOTSUP)
227 		/* ignore all other errors */
228 		px_p->px_dev_caps |= PX_BYPASS_DMA_ALLOWED;
229 
230 	px_p->px_dma_sync_opt = ddi_prop_get_int(DDI_DEV_T_ANY,
231 	    px_p->px_dip, DDI_PROP_DONTPASS, "dma-sync-options", 0);
232 
233 	if (px_p->px_dma_sync_opt != 0)
234 		px_p->px_dev_caps |= PX_DMA_SYNC_REQUIRED;
235 
236 	return (DDI_SUCCESS);
237 }
238 
239 /*
240  * px_dma_attr2hdl
241  *
242  * This routine is called from the alloc handle entry point to sanity check the
243  * dma attribute structure.
244  *
245  * use by: px_dma_allochdl()
246  *
247  * return value:
248  *
249  *	DDI_SUCCESS		- on success
250  *	DDI_DMA_BADATTR		- attribute has invalid version number
251  *				  or address limits exclude dvma space
252  */
253 int
254 px_dma_attr2hdl(px_t *px_p, ddi_dma_impl_t *mp)
255 {
256 	px_mmu_t *mmu_p = px_p->px_mmu_p;
257 	uint64_t syslo, syshi;
258 	int	ret;
259 	ddi_dma_attr_t *attrp		= PX_DEV_ATTR(mp);
260 	uint64_t hi			= attrp->dma_attr_addr_hi;
261 	uint64_t lo			= attrp->dma_attr_addr_lo;
262 	uint64_t align			= attrp->dma_attr_align;
263 	uint64_t nocross		= attrp->dma_attr_seg;
264 	uint64_t count_max		= attrp->dma_attr_count_max;
265 
266 	DBG(DBG_DMA_ALLOCH, px_p->px_dip, "attrp=%p cntr_max=%x.%08x\n",
267 		attrp, HI32(count_max), LO32(count_max));
268 	DBG(DBG_DMA_ALLOCH, px_p->px_dip, "hi=%x.%08x lo=%x.%08x\n",
269 		HI32(hi), LO32(hi), HI32(lo), LO32(lo));
270 	DBG(DBG_DMA_ALLOCH, px_p->px_dip, "seg=%x.%08x align=%x.%08x\n",
271 		HI32(nocross), LO32(nocross), HI32(align), LO32(align));
272 
273 	if (!nocross)
274 		nocross--;
275 	if (attrp->dma_attr_flags & DDI_DMA_FORCE_PHYSICAL) { /* BYPASS */
276 
277 		DBG(DBG_DMA_ALLOCH, px_p->px_dip, "bypass mode\n");
278 		/*
279 		 * If Bypass DMA is not supported, return error so that
280 		 * target driver can fall back to dvma mode of operation
281 		 */
282 		if (!(px_p->px_dev_caps & PX_BYPASS_DMA_ALLOWED))
283 			return (DDI_DMA_BADATTR);
284 		mp->dmai_flags |= PX_DMAI_FLAGS_BYPASSREQ;
285 		if (nocross != UINT64_MAX)
286 			return (DDI_DMA_BADATTR);
287 		if (align && (align > MMU_PAGE_SIZE))
288 			return (DDI_DMA_BADATTR);
289 		align = 1; /* align on 1 page boundary */
290 
291 		/* do a range check and get the limits */
292 		ret = px_lib_dma_bypass_rngchk(px_p->px_dip, attrp,
293 				&syslo, &syshi);
294 		if (ret != DDI_SUCCESS)
295 			return (ret);
296 	} else { /* MMU_XLATE or PEER_TO_PEER */
297 		align = MAX(align, MMU_PAGE_SIZE) - 1;
298 		if ((align & nocross) != align) {
299 			dev_info_t *rdip = mp->dmai_rdip;
300 			cmn_err(CE_WARN, "%s%d dma_attr_seg not aligned",
301 				NAMEINST(rdip));
302 			return (DDI_DMA_BADATTR);
303 		}
304 		align = MMU_BTOP(align + 1);
305 		syslo = mmu_p->mmu_dvma_base;
306 		syshi = mmu_p->mmu_dvma_end;
307 	}
308 	if (hi <= lo) {
309 		dev_info_t *rdip = mp->dmai_rdip;
310 		cmn_err(CE_WARN, "%s%d limits out of range", NAMEINST(rdip));
311 		return (DDI_DMA_BADATTR);
312 	}
313 	lo = MAX(lo, syslo);
314 	hi = MIN(hi, syshi);
315 	if (!count_max)
316 		count_max--;
317 
318 	DBG(DBG_DMA_ALLOCH, px_p->px_dip, "hi=%x.%08x, lo=%x.%08x\n",
319 		HI32(hi), LO32(hi), HI32(lo), LO32(lo));
320 	if (hi <= lo) {
321 		/*
322 		 * If this is an IOMMU bypass access, the caller can't use
323 		 * the required addresses, so fail it.  Otherwise, it's
324 		 * peer-to-peer; ensure that the caller has no alignment or
325 		 * segment size restrictions.
326 		 */
327 		if ((mp->dmai_flags & PX_DMAI_FLAGS_BYPASSREQ) ||
328 		    (nocross < UINT32_MAX) || (align > 1))
329 			return (DDI_DMA_BADATTR);
330 
331 		mp->dmai_flags |= PX_DMAI_FLAGS_PEER_ONLY;
332 	} else /* set practical counter_max value */
333 		count_max = MIN(count_max, hi - lo);
334 
335 	if (PX_DEV_NOSYSLIMIT(lo, hi, syslo, syshi, align))
336 		mp->dmai_flags |= PX_DMAI_FLAGS_NOSYSLIMIT |
337 			PX_DMAI_FLAGS_NOFASTLIMIT;
338 	else {
339 		syshi = mmu_p->mmu_dvma_fast_end;
340 		if (PX_DEV_NOFASTLIMIT(lo, hi, syslo, syshi, align))
341 			mp->dmai_flags |= PX_DMAI_FLAGS_NOFASTLIMIT;
342 	}
343 	if (PX_DMA_NOCTX(mp->dmai_rdip))
344 		mp->dmai_flags |= PX_DMAI_FLAGS_NOCTX;
345 
346 	mp->dmai_minxfer	= attrp->dma_attr_minxfer;
347 	mp->dmai_burstsizes	= attrp->dma_attr_burstsizes;
348 	attrp = &mp->dmai_attr;
349 	SET_DMAATTR(attrp, lo, hi, nocross, count_max);
350 	return (DDI_SUCCESS);
351 }
352 
353 #define	TGT_PFN_INBETWEEN(pfn, bgn, end) ((pfn >= bgn) && (pfn <= end))
354 
355 /*
356  * px_dma_type - determine which of the three types DMA (peer-to-peer,
357  *		mmu bypass, or mmu translate) we are asked to do.
358  *		Also checks pfn0 and rejects any non-peer-to-peer
359  *		requests for peer-only devices.
360  *
361  *	return values:
362  *		DDI_DMA_NOMAPPING - can't get valid pfn0, or bad dma type
363  *		DDI_SUCCESS
364  *
365  *	dma handle members affected (set on exit):
366  *	mp->dmai_object		- dmareq->dmar_object
367  *	mp->dmai_rflags		- consistent?, nosync?, dmareq->dmar_flags
368  *	mp->dmai_flags   	- DMA type
369  *	mp->dmai_pfn0   	- 1st page pfn (if va/size pair and not shadow)
370  *	mp->dmai_roffset 	- initialized to starting MMU page offset
371  *	mp->dmai_ndvmapages	- # of total MMU pages of entire object
372  */
373 int
374 px_dma_type(px_t *px_p, ddi_dma_req_t *dmareq, ddi_dma_impl_t *mp)
375 {
376 	dev_info_t *dip = px_p->px_dip;
377 	ddi_dma_obj_t *dobj_p = &dmareq->dmar_object;
378 	px_pec_t *pec_p = px_p->px_pec_p;
379 	uint32_t offset;
380 	pfn_t pfn0;
381 	uint_t redzone;
382 
383 	mp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
384 
385 	if (!(px_p->px_dev_caps & PX_DMA_SYNC_REQUIRED))
386 		mp->dmai_rflags |= DMP_NOSYNC;
387 
388 	switch (dobj_p->dmao_type) {
389 	case DMA_OTYP_BUFVADDR:
390 	case DMA_OTYP_VADDR: {
391 		page_t **pplist = dobj_p->dmao_obj.virt_obj.v_priv;
392 		caddr_t vaddr = dobj_p->dmao_obj.virt_obj.v_addr;
393 
394 		DBG(DBG_DMA_MAP, dip, "vaddr=%p pplist=%p\n", vaddr, pplist);
395 		offset = (ulong_t)vaddr & MMU_PAGE_OFFSET;
396 		if (pplist) {				/* shadow list */
397 			mp->dmai_flags |= PX_DMAI_FLAGS_PGPFN;
398 			pfn0 = page_pptonum(*pplist);
399 		} else {
400 			struct as *as_p = dobj_p->dmao_obj.virt_obj.v_as;
401 			struct hat *hat_p = as_p ? as_p->a_hat : kas.a_hat;
402 			pfn0 = hat_getpfnum(hat_p, vaddr);
403 		}
404 		}
405 		break;
406 
407 	case DMA_OTYP_PAGES:
408 		offset = dobj_p->dmao_obj.pp_obj.pp_offset;
409 		mp->dmai_flags |= PX_DMAI_FLAGS_PGPFN;
410 		pfn0 = page_pptonum(dobj_p->dmao_obj.pp_obj.pp_pp);
411 		break;
412 
413 	case DMA_OTYP_PADDR:
414 	default:
415 		cmn_err(CE_WARN, "%s%d requested unsupported dma type %x",
416 			NAMEINST(mp->dmai_rdip), dobj_p->dmao_type);
417 		return (DDI_DMA_NOMAPPING);
418 	}
419 	if (pfn0 == PFN_INVALID) {
420 		cmn_err(CE_WARN, "%s%d: invalid pfn0 for DMA object %p",
421 			NAMEINST(dip), dobj_p);
422 		return (DDI_DMA_NOMAPPING);
423 	}
424 	if (TGT_PFN_INBETWEEN(pfn0, pec_p->pec_base32_pfn,
425 			pec_p->pec_last32_pfn)) {
426 		mp->dmai_flags |= PX_DMAI_FLAGS_PTP|PX_DMAI_FLAGS_PTP32;
427 		goto done;	/* leave bypass and dvma flag as 0 */
428 	} else if (TGT_PFN_INBETWEEN(pfn0, pec_p->pec_base64_pfn,
429 			pec_p->pec_last64_pfn)) {
430 		mp->dmai_flags |= PX_DMAI_FLAGS_PTP|PX_DMAI_FLAGS_PTP64;
431 		goto done;	/* leave bypass and dvma flag as 0 */
432 	}
433 	if (PX_DMA_ISPEERONLY(mp)) {
434 		dev_info_t *rdip = mp->dmai_rdip;
435 		cmn_err(CE_WARN, "Bad peer-to-peer req %s%d", NAMEINST(rdip));
436 		return (DDI_DMA_NOMAPPING);
437 	}
438 
439 	redzone = (mp->dmai_rflags & DDI_DMA_REDZONE) ||
440 	    (mp->dmai_flags & PX_DMAI_FLAGS_MAP_BUFZONE) ?
441 	    PX_DMAI_FLAGS_REDZONE : 0;
442 
443 	mp->dmai_flags |= (mp->dmai_flags & PX_DMAI_FLAGS_BYPASSREQ) ?
444 	    PX_DMAI_FLAGS_BYPASS : (PX_DMAI_FLAGS_DVMA | redzone);
445 done:
446 	mp->dmai_object	 = *dobj_p;			/* whole object    */
447 	mp->dmai_pfn0	 = (void *)pfn0;		/* cache pfn0	   */
448 	mp->dmai_roffset = offset;			/* win0 pg0 offset */
449 	mp->dmai_ndvmapages = MMU_BTOPR(offset + mp->dmai_object.dmao_size);
450 	return (DDI_SUCCESS);
451 }
452 
453 /*
454  * px_dma_pgpfn - set up pfnlst array according to pages
455  *	VA/size pair: <shadow IO, bypass, peer-to-peer>, or OTYP_PAGES
456  */
457 /*ARGSUSED*/
458 static int
459 px_dma_pgpfn(px_t *px_p, ddi_dma_impl_t *mp, uint_t npages)
460 {
461 	int i;
462 	dev_info_t *dip = px_p->px_dip;
463 
464 	switch (mp->dmai_object.dmao_type) {
465 	case DMA_OTYP_BUFVADDR:
466 	case DMA_OTYP_VADDR: {
467 		page_t **pplist = mp->dmai_object.dmao_obj.virt_obj.v_priv;
468 		DBG(DBG_DMA_MAP, dip, "shadow pplist=%p, %x pages, pfns=",
469 			pplist, npages);
470 		for (i = 1; i < npages; i++) {
471 			px_iopfn_t pfn = page_pptonum(pplist[i]);
472 			PX_SET_MP_PFN1(mp, i, pfn);
473 			DBG(DBG_DMA_MAP|DBG_CONT, dip, "%x ", pfn);
474 		}
475 		DBG(DBG_DMA_MAP|DBG_CONT, dip, "\n");
476 		}
477 		break;
478 
479 	case DMA_OTYP_PAGES: {
480 		page_t *pp = mp->dmai_object.dmao_obj.pp_obj.pp_pp->p_next;
481 		DBG(DBG_DMA_MAP, dip, "pp=%p pfns=", pp);
482 		for (i = 1; i < npages; i++, pp = pp->p_next) {
483 			px_iopfn_t pfn = page_pptonum(pp);
484 			PX_SET_MP_PFN1(mp, i, pfn);
485 			DBG(DBG_DMA_MAP|DBG_CONT, dip, "%x ", pfn);
486 		}
487 		DBG(DBG_DMA_MAP|DBG_CONT, dip, "\n");
488 		}
489 		break;
490 
491 	default:	/* check is already done by px_dma_type */
492 		ASSERT(0);
493 		break;
494 	}
495 	return (DDI_SUCCESS);
496 }
497 
498 /*
499  * px_dma_vapfn - set up pfnlst array according to VA
500  *	VA/size pair: <normal, bypass, peer-to-peer>
501  *	pfn0 is skipped as it is already done.
502  *	In this case, the cached pfn0 is used to fill pfnlst[0]
503  */
504 static int
505 px_dma_vapfn(px_t *px_p, ddi_dma_impl_t *mp, uint_t npages)
506 {
507 	dev_info_t *dip = px_p->px_dip;
508 	int i;
509 	caddr_t vaddr = (caddr_t)mp->dmai_object.dmao_obj.virt_obj.v_as;
510 	struct hat *hat_p = vaddr ? ((struct as *)vaddr)->a_hat : kas.a_hat;
511 
512 	vaddr = mp->dmai_object.dmao_obj.virt_obj.v_addr + MMU_PAGE_SIZE;
513 	for (i = 1; i < npages; i++, vaddr += MMU_PAGE_SIZE) {
514 		px_iopfn_t pfn = hat_getpfnum(hat_p, vaddr);
515 		if (pfn == PFN_INVALID)
516 			goto err_badpfn;
517 		PX_SET_MP_PFN1(mp, i, pfn);
518 		DBG(DBG_DMA_BINDH, dip, "px_dma_vapfn: mp=%p pfnlst[%x]=%x\n",
519 			mp, i, pfn);
520 	}
521 	return (DDI_SUCCESS);
522 err_badpfn:
523 	cmn_err(CE_WARN, "%s%d: bad page frame vaddr=%p", NAMEINST(dip), vaddr);
524 	return (DDI_DMA_NOMAPPING);
525 }
526 
527 /*
528  * px_dma_pfn - Fills pfn list for all pages being DMA-ed.
529  *
530  * dependencies:
531  *	mp->dmai_ndvmapages	- set to total # of dma pages
532  *
533  * return value:
534  *	DDI_SUCCESS
535  *	DDI_DMA_NOMAPPING
536  */
537 int
538 px_dma_pfn(px_t *px_p, ddi_dma_req_t *dmareq, ddi_dma_impl_t *mp)
539 {
540 	uint32_t npages = mp->dmai_ndvmapages;
541 	int (*waitfp)(caddr_t) = dmareq->dmar_fp;
542 	int i, ret, peer = PX_DMA_ISPTP(mp);
543 	int peer32 = PX_DMA_ISPTP32(mp);
544 	dev_info_t *dip = px_p->px_dip;
545 
546 	px_pec_t *pec_p = px_p->px_pec_p;
547 	px_iopfn_t pfn_base = peer32 ? pec_p->pec_base32_pfn :
548 					pec_p->pec_base64_pfn;
549 	px_iopfn_t pfn_last = peer32 ? pec_p->pec_last32_pfn :
550 					pec_p->pec_last64_pfn;
551 	px_iopfn_t pfn_adj = peer ? pfn_base : 0;
552 
553 	DBG(DBG_DMA_BINDH, dip, "px_dma_pfn: mp=%p pfn0=%x\n",
554 		mp, PX_MP_PFN0(mp) - pfn_adj);
555 	/* 1 page: no array alloc/fill, no mixed mode check */
556 	if (npages == 1) {
557 		PX_SET_MP_PFN(mp, 0, PX_MP_PFN0(mp) - pfn_adj);
558 		return (DDI_SUCCESS);
559 	}
560 	/* allocate pfn array */
561 	if (!(mp->dmai_pfnlst = kmem_alloc(npages * sizeof (px_iopfn_t),
562 		waitfp == DDI_DMA_SLEEP ? KM_SLEEP : KM_NOSLEEP))) {
563 		if (waitfp != DDI_DMA_DONTWAIT)
564 			ddi_set_callback(waitfp, dmareq->dmar_arg,
565 				&px_kmem_clid);
566 		return (DDI_DMA_NORESOURCES);
567 	}
568 	/* fill pfn array */
569 	PX_SET_MP_PFN(mp, 0, PX_MP_PFN0(mp) - pfn_adj);	/* pfnlst[0] */
570 	if ((ret = PX_DMA_ISPGPFN(mp) ? px_dma_pgpfn(px_p, mp, npages) :
571 		px_dma_vapfn(px_p, mp, npages)) != DDI_SUCCESS)
572 		goto err;
573 
574 	/* skip pfn0, check mixed mode and adjust peer to peer pfn */
575 	for (i = 1; i < npages; i++) {
576 		px_iopfn_t pfn = PX_GET_MP_PFN1(mp, i);
577 		if (peer ^ TGT_PFN_INBETWEEN(pfn, pfn_base, pfn_last)) {
578 			cmn_err(CE_WARN, "%s%d mixed mode DMA %lx %lx",
579 				NAMEINST(mp->dmai_rdip), PX_MP_PFN0(mp), pfn);
580 			ret = DDI_DMA_NOMAPPING;	/* mixed mode */
581 			goto err;
582 		}
583 		DBG(DBG_DMA_MAP, dip,
584 			"px_dma_pfn: pfnlst[%x]=%x-%x\n", i, pfn, pfn_adj);
585 		if (pfn_adj)
586 			PX_SET_MP_PFN1(mp, i, pfn - pfn_adj);
587 	}
588 	return (DDI_SUCCESS);
589 err:
590 	px_dma_freepfn(mp);
591 	return (ret);
592 }
593 
594 /*
595  * px_dvma_win() - trim requested DVMA size down to window size
596  *	The 1st window starts from offset and ends at page-aligned boundary.
597  *	From the 2nd window on, each window starts and ends at page-aligned
598  *	boundary except the last window ends at wherever requested.
599  *
600  *	accesses the following mp-> members:
601  *	mp->dmai_attr.dma_attr_count_max
602  *	mp->dmai_attr.dma_attr_seg
603  *	mp->dmai_roffset   - start offset of 1st window
604  *	mp->dmai_rflags (redzone)
605  *	mp->dmai_ndvmapages (for 1 page fast path)
606  *
607  *	sets the following mp-> members:
608  *	mp->dmai_size	   - xfer size, != winsize if 1st/last win  (not fixed)
609  *	mp->dmai_winsize   - window size (no redzone), n * page size    (fixed)
610  *	mp->dmai_nwin	   - # of DMA windows of entire object		(fixed)
611  *	mp->dmai_rflags	   - remove partial flag if nwin == 1		(fixed)
612  *	mp->dmai_winlst	   - NULL, window objects not used for DVMA	(fixed)
613  *
614  *	fixed - not changed across different DMA windows
615  */
616 /*ARGSUSED*/
617 int
618 px_dvma_win(px_t *px_p, ddi_dma_req_t *dmareq, ddi_dma_impl_t *mp)
619 {
620 	uint32_t redzone_sz	= PX_HAS_REDZONE(mp) ? MMU_PAGE_SIZE : 0;
621 	size_t obj_sz		= mp->dmai_object.dmao_size;
622 	size_t xfer_sz;
623 	ulong_t pg_off;
624 
625 	if ((mp->dmai_ndvmapages == 1) && !redzone_sz) {
626 		mp->dmai_rflags &= ~DDI_DMA_PARTIAL;
627 		mp->dmai_size = obj_sz;
628 		mp->dmai_winsize = MMU_PAGE_SIZE;
629 		mp->dmai_nwin = 1;
630 		goto done;
631 	}
632 
633 	pg_off	= mp->dmai_roffset;
634 	xfer_sz	= obj_sz + redzone_sz;
635 
636 	/* include redzone in nocross check */ {
637 		uint64_t nocross = mp->dmai_attr.dma_attr_seg;
638 		if (xfer_sz + pg_off - 1 > nocross)
639 			xfer_sz = nocross - pg_off + 1;
640 		if (redzone_sz && (xfer_sz <= redzone_sz)) {
641 			DBG(DBG_DMA_MAP, px_p->px_dip,
642 			    "nocross too small: "
643 			    "%lx(%lx)+%lx+%lx < %llx\n",
644 			    xfer_sz, obj_sz, pg_off, redzone_sz, nocross);
645 			return (DDI_DMA_TOOBIG);
646 		}
647 	}
648 	xfer_sz -= redzone_sz;		/* restore transfer size  */
649 	/* check counter max */ {
650 		uint32_t count_max = mp->dmai_attr.dma_attr_count_max;
651 		if (xfer_sz - 1 > count_max)
652 			xfer_sz = count_max + 1;
653 	}
654 	if (xfer_sz >= obj_sz) {
655 		mp->dmai_rflags &= ~DDI_DMA_PARTIAL;
656 		mp->dmai_size = xfer_sz;
657 		mp->dmai_winsize = P2ROUNDUP(xfer_sz + pg_off, MMU_PAGE_SIZE);
658 		mp->dmai_nwin = 1;
659 		goto done;
660 	}
661 	if (!(dmareq->dmar_flags & DDI_DMA_PARTIAL)) {
662 		DBG(DBG_DMA_MAP, px_p->px_dip, "too big: %lx+%lx+%lx > %lx\n",
663 			obj_sz, pg_off, redzone_sz, xfer_sz);
664 		return (DDI_DMA_TOOBIG);
665 	}
666 
667 	xfer_sz = MMU_PTOB(MMU_BTOP(xfer_sz + pg_off)); /* page align */
668 	mp->dmai_size = xfer_sz - pg_off;	/* 1st window xferrable size */
669 	mp->dmai_winsize = xfer_sz;		/* redzone not in winsize */
670 	mp->dmai_nwin = (obj_sz + pg_off + xfer_sz - 1) / xfer_sz;
671 done:
672 	mp->dmai_winlst = NULL;
673 	px_dump_dma_handle(DBG_DMA_MAP, px_p->px_dip, mp);
674 	return (DDI_SUCCESS);
675 }
676 
677 /*
678  * fast track cache entry to mmu context, inserts 3 0 bits between
679  * upper 6-bits and lower 3-bits of the 9-bit cache entry
680  */
681 #define	MMU_FCE_TO_CTX(i)	(((i) << 3) | ((i) & 0x7) | 0x38)
682 
683 /*
684  * px_dvma_map_fast - attempts to map fast trackable DVMA
685  */
686 /*ARGSUSED*/
687 int
688 px_dvma_map_fast(px_mmu_t *mmu_p, ddi_dma_impl_t *mp)
689 {
690 	uint_t clustsz = px_dvma_page_cache_clustsz;
691 	uint_t entries = px_dvma_page_cache_entries;
692 	io_attributes_t attr = PX_GET_TTE_ATTR(mp->dmai_rflags,
693 	    mp->dmai_attr.dma_attr_flags);
694 	int i = mmu_p->mmu_dvma_addr_scan_start;
695 	uint8_t *lock_addr = mmu_p->mmu_dvma_cache_locks + i;
696 	px_dvma_addr_t dvma_pg;
697 	size_t npages = MMU_BTOP(mp->dmai_winsize);
698 	dev_info_t *dip = mmu_p->mmu_px_p->px_dip;
699 
700 	extern uint8_t ldstub(uint8_t *);
701 	ASSERT(MMU_PTOB(npages) == mp->dmai_winsize);
702 	ASSERT(npages + PX_HAS_REDZONE(mp) <= clustsz);
703 
704 	for (; i < entries && ldstub(lock_addr); i++, lock_addr++);
705 	if (i >= entries) {
706 		lock_addr = mmu_p->mmu_dvma_cache_locks;
707 		i = 0;
708 		for (; i < entries && ldstub(lock_addr); i++, lock_addr++);
709 		if (i >= entries) {
710 #ifdef	PX_DMA_PROF
711 			px_dvmaft_exhaust++;
712 #endif	/* PX_DMA_PROF */
713 			return (DDI_DMA_NORESOURCES);
714 		}
715 	}
716 	mmu_p->mmu_dvma_addr_scan_start = (i + 1) & (entries - 1);
717 
718 	i *= clustsz;
719 	dvma_pg = mmu_p->dvma_base_pg + i;
720 
721 	if (px_lib_iommu_map(dip, PCI_TSBID(0, i), npages, attr,
722 	    (void *)mp, 0, MMU_MAP_PFN) != DDI_SUCCESS) {
723 		DBG(DBG_MAP_WIN, dip, "px_dvma_map_fast: "
724 		    "px_lib_iommu_map failed\n");
725 
726 		return (DDI_FAILURE);
727 	}
728 
729 	if (!PX_MAP_BUFZONE(mp))
730 		goto done;
731 
732 	DBG(DBG_MAP_WIN, dip, "px_dvma_map_fast: redzone pg=%x\n", i + npages);
733 
734 	ASSERT(PX_HAS_REDZONE(mp));
735 
736 	if (px_lib_iommu_map(dip, PCI_TSBID(0, i + npages), 1, attr,
737 	    (void *)mp, npages - 1, MMU_MAP_PFN) != DDI_SUCCESS) {
738 		DBG(DBG_MAP_WIN, dip, "px_dvma_map_fast: "
739 		    "mapping REDZONE page failed\n");
740 
741 		(void) px_lib_iommu_demap(dip, PCI_TSBID(0, i), npages);
742 		return (DDI_FAILURE);
743 	}
744 
745 done:
746 #ifdef PX_DMA_PROF
747 	px_dvmaft_success++;
748 #endif
749 	mp->dmai_mapping = mp->dmai_roffset | MMU_PTOB(dvma_pg);
750 	mp->dmai_offset = 0;
751 	mp->dmai_flags |= PX_DMAI_FLAGS_FASTTRACK;
752 	PX_SAVE_MP_TTE(mp, attr);	/* save TTE template for unmapping */
753 	if (PX_DVMA_DBG_ON(mmu_p))
754 		px_dvma_alloc_debug(mmu_p, (char *)mp->dmai_mapping,
755 			mp->dmai_size, mp);
756 	return (DDI_SUCCESS);
757 }
758 
759 /*
760  * px_dvma_map: map non-fasttrack DMA
761  *		Use quantum cache if single page DMA.
762  */
763 int
764 px_dvma_map(ddi_dma_impl_t *mp, ddi_dma_req_t *dmareq, px_mmu_t *mmu_p)
765 {
766 	uint_t npages = PX_DMA_WINNPGS(mp);
767 	px_dvma_addr_t dvma_pg, dvma_pg_index;
768 	void *dvma_addr;
769 	uint64_t tte = PX_GET_TTE_ATTR(mp->dmai_rflags,
770 	    mp->dmai_attr.dma_attr_flags);
771 	int sleep = dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP;
772 	dev_info_t *dip = mp->dmai_rdip;
773 	int	ret = DDI_SUCCESS;
774 
775 	/*
776 	 * allocate dvma space resource and map in the first window.
777 	 * (vmem_t *vmp, size_t size,
778 	 *	size_t align, size_t phase, size_t nocross,
779 	 *	void *minaddr, void *maxaddr, int vmflag)
780 	 */
781 	if ((npages == 1) && !PX_HAS_REDZONE(mp) && PX_HAS_NOSYSLIMIT(mp)) {
782 		dvma_addr = vmem_alloc(mmu_p->mmu_dvma_map,
783 			MMU_PAGE_SIZE, sleep);
784 		mp->dmai_flags |= PX_DMAI_FLAGS_VMEMCACHE;
785 #ifdef	PX_DMA_PROF
786 		px_dvma_vmem_alloc++;
787 #endif	/* PX_DMA_PROF */
788 	} else {
789 		dvma_addr = vmem_xalloc(mmu_p->mmu_dvma_map,
790 			MMU_PTOB(npages + PX_HAS_REDZONE(mp)),
791 			MAX(mp->dmai_attr.dma_attr_align, MMU_PAGE_SIZE),
792 			0,
793 			mp->dmai_attr.dma_attr_seg + 1,
794 			(void *)mp->dmai_attr.dma_attr_addr_lo,
795 			(void *)(mp->dmai_attr.dma_attr_addr_hi + 1),
796 			sleep);
797 #ifdef	PX_DMA_PROF
798 		px_dvma_vmem_xalloc++;
799 #endif	/* PX_DMA_PROF */
800 	}
801 	dvma_pg = MMU_BTOP((ulong_t)dvma_addr);
802 	dvma_pg_index = dvma_pg - mmu_p->dvma_base_pg;
803 	DBG(DBG_DMA_MAP, dip, "fallback dvma_pages: dvma_pg=%x index=%x\n",
804 		dvma_pg, dvma_pg_index);
805 	if (dvma_pg == 0)
806 		goto noresource;
807 
808 	mp->dmai_mapping = mp->dmai_roffset | MMU_PTOB(dvma_pg);
809 	mp->dmai_offset = 0;
810 	PX_SAVE_MP_TTE(mp, tte);	/* mp->dmai_tte = tte */
811 
812 	if ((ret = px_mmu_map_pages(mmu_p,
813 	    mp, dvma_pg, npages, 0)) != DDI_SUCCESS) {
814 		if (mp->dmai_flags & PX_DMAI_FLAGS_VMEMCACHE) {
815 			vmem_free(mmu_p->mmu_dvma_map, (void *)dvma_addr,
816 			    MMU_PAGE_SIZE);
817 #ifdef PX_DMA_PROF
818 			px_dvma_vmem_free++;
819 #endif /* PX_DMA_PROF */
820 		} else {
821 			vmem_xfree(mmu_p->mmu_dvma_map, (void *)dvma_addr,
822 			    MMU_PTOB(npages + PX_HAS_REDZONE(mp)));
823 #ifdef PX_DMA_PROF
824 			px_dvma_vmem_xfree++;
825 #endif /* PX_DMA_PROF */
826 		}
827 	}
828 
829 	return (ret);
830 noresource:
831 	if (dmareq->dmar_fp != DDI_DMA_DONTWAIT) {
832 		DBG(DBG_DMA_MAP, dip, "dvma_pg 0 - set callback\n");
833 		ddi_set_callback(dmareq->dmar_fp, dmareq->dmar_arg,
834 			&mmu_p->mmu_dvma_clid);
835 	}
836 	DBG(DBG_DMA_MAP, dip, "vmem_xalloc - DDI_DMA_NORESOURCES\n");
837 	return (DDI_DMA_NORESOURCES);
838 }
839 
840 void
841 px_dvma_unmap(px_mmu_t *mmu_p, ddi_dma_impl_t *mp)
842 {
843 	px_dvma_addr_t dvma_addr = (px_dvma_addr_t)mp->dmai_mapping;
844 	px_dvma_addr_t dvma_pg = MMU_BTOP(dvma_addr);
845 	dvma_addr = MMU_PTOB(dvma_pg);
846 
847 	if (mp->dmai_flags & PX_DMAI_FLAGS_FASTTRACK) {
848 		px_iopfn_t index = dvma_pg - mmu_p->dvma_base_pg;
849 		ASSERT(index % px_dvma_page_cache_clustsz == 0);
850 		index /= px_dvma_page_cache_clustsz;
851 		ASSERT(index < px_dvma_page_cache_entries);
852 		mmu_p->mmu_dvma_cache_locks[index] = 0;
853 #ifdef	PX_DMA_PROF
854 		px_dvmaft_free++;
855 #endif	/* PX_DMA_PROF */
856 		return;
857 	}
858 
859 	if (mp->dmai_flags & PX_DMAI_FLAGS_VMEMCACHE) {
860 		vmem_free(mmu_p->mmu_dvma_map, (void *)dvma_addr,
861 			MMU_PAGE_SIZE);
862 #ifdef PX_DMA_PROF
863 		px_dvma_vmem_free++;
864 #endif /* PX_DMA_PROF */
865 	} else {
866 		size_t npages = MMU_BTOP(mp->dmai_winsize) + PX_HAS_REDZONE(mp);
867 		vmem_xfree(mmu_p->mmu_dvma_map, (void *)dvma_addr,
868 			MMU_PTOB(npages));
869 #ifdef PX_DMA_PROF
870 		px_dvma_vmem_xfree++;
871 #endif /* PX_DMA_PROF */
872 	}
873 }
874 
875 /*
876  * DVMA mappings may have multiple windows, but each window always have
877  * one segment.
878  */
879 int
880 px_dvma_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_impl_t *mp,
881 	enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp,
882 	uint_t cache_flags)
883 {
884 	switch (cmd) {
885 	case DDI_DMA_SYNC:
886 		return (px_lib_dma_sync(dip, rdip, (ddi_dma_handle_t)mp,
887 		    *offp, *lenp, cache_flags));
888 
889 	case DDI_DMA_HTOC: {
890 		int ret;
891 		off_t wo_off, off = *offp;	/* wo_off: wnd's obj offset */
892 		uint_t win_size = mp->dmai_winsize;
893 		ddi_dma_cookie_t *cp = (ddi_dma_cookie_t *)objp;
894 
895 		if (off >= mp->dmai_object.dmao_size) {
896 			cmn_err(CE_WARN, "%s%d invalid dma_htoc offset %lx",
897 				NAMEINST(mp->dmai_rdip), off);
898 			return (DDI_FAILURE);
899 		}
900 		off += mp->dmai_roffset;
901 		ret = px_dma_win(dip, rdip, (ddi_dma_handle_t)mp,
902 		    off / win_size, &wo_off, NULL, cp, NULL); /* lenp == NULL */
903 		if (ret)
904 			return (ret);
905 		DBG(DBG_DMA_CTL, dip, "HTOC:cookie=%x+%lx off=%lx,%lx\n",
906 			cp->dmac_address, cp->dmac_size, off, *offp);
907 
908 		/* adjust cookie addr/len if we are not on window boundary */
909 		ASSERT((off % win_size) == (off -
910 			(PX_DMA_CURWIN(mp) ? mp->dmai_roffset : 0) - wo_off));
911 		off = PX_DMA_CURWIN(mp) ? off % win_size : *offp;
912 		ASSERT(cp->dmac_size > off);
913 		cp->dmac_laddress += off;
914 		cp->dmac_size -= off;
915 		DBG(DBG_DMA_CTL, dip, "HTOC:mp=%p cookie=%x+%lx off=%lx,%lx\n",
916 			mp, cp->dmac_address, cp->dmac_size, off, wo_off);
917 		}
918 		return (DDI_SUCCESS);
919 
920 	case DDI_DMA_REPWIN:
921 		*offp = mp->dmai_offset;
922 		*lenp = mp->dmai_size;
923 		return (DDI_SUCCESS);
924 
925 	case DDI_DMA_MOVWIN: {
926 		off_t off = *offp;
927 		if (off >= mp->dmai_object.dmao_size)
928 			return (DDI_FAILURE);
929 		off += mp->dmai_roffset;
930 		return (px_dma_win(dip, rdip, (ddi_dma_handle_t)mp,
931 		    off / mp->dmai_winsize, offp, lenp,
932 		    (ddi_dma_cookie_t *)objp, NULL));
933 		}
934 
935 	case DDI_DMA_NEXTWIN: {
936 		px_window_t win = PX_DMA_CURWIN(mp);
937 		if (offp) {
938 			if (*(px_window_t *)offp != win) {
939 				/* window not active */
940 				*(px_window_t *)objp = win; /* return cur win */
941 				return (DDI_DMA_STALE);
942 			}
943 			win++;
944 		} else	/* map win 0 */
945 			win = 0;
946 		if (win >= mp->dmai_nwin) {
947 			*(px_window_t *)objp = win - 1;
948 			return (DDI_DMA_DONE);
949 		}
950 		if (px_dma_win(dip, rdip, (ddi_dma_handle_t)mp,
951 		    win, 0, 0, 0, 0)) {
952 			*(px_window_t *)objp = win - 1;
953 			return (DDI_FAILURE);
954 		}
955 		*(px_window_t *)objp = win;
956 		}
957 		return (DDI_SUCCESS);
958 
959 	case DDI_DMA_NEXTSEG:
960 		if (*(px_window_t *)offp != PX_DMA_CURWIN(mp))
961 			return (DDI_DMA_STALE);
962 		if (lenp)				/* only 1 seg allowed */
963 			return (DDI_DMA_DONE);
964 
965 		/* return mp as seg 0 */
966 		*(ddi_dma_seg_t *)objp = (ddi_dma_seg_t)mp;
967 		return (DDI_SUCCESS);
968 
969 	case DDI_DMA_SEGTOC:
970 		MAKE_DMA_COOKIE((ddi_dma_cookie_t *)objp, mp->dmai_mapping,
971 			mp->dmai_size);
972 		*offp = mp->dmai_offset;
973 		*lenp = mp->dmai_size;
974 		return (DDI_SUCCESS);
975 
976 	case DDI_DMA_COFF: {
977 		ddi_dma_cookie_t *cp = (ddi_dma_cookie_t *)offp;
978 		if (cp->dmac_address < mp->dmai_mapping ||
979 			(cp->dmac_address + cp->dmac_size) >
980 			(mp->dmai_mapping + mp->dmai_size))
981 			return (DDI_FAILURE);
982 		*objp = (caddr_t)(cp->dmac_address - mp->dmai_mapping +
983 			mp->dmai_offset);
984 		}
985 		return (DDI_SUCCESS);
986 	default:
987 		DBG(DBG_DMA_CTL, dip, "unknown command (%x): rdip=%s%d\n",
988 			cmd, ddi_driver_name(rdip), ddi_get_instance(rdip));
989 		break;
990 	}
991 	return (DDI_FAILURE);
992 }
993 
994 void
995 px_dma_freewin(ddi_dma_impl_t *mp)
996 {
997 	px_dma_win_t *win_p = mp->dmai_winlst, *win2_p;
998 	for (win2_p = win_p; win_p; win2_p = win_p) {
999 		win_p = win2_p->win_next;
1000 		kmem_free(win2_p, sizeof (px_dma_win_t) +
1001 			sizeof (ddi_dma_cookie_t) * win2_p->win_ncookies);
1002 	}
1003 	mp->dmai_nwin = 0;
1004 	mp->dmai_winlst = NULL;
1005 }
1006 
1007 /*
1008  * px_dma_newwin - create a dma window object and cookies
1009  *
1010  *	After the initial scan in px_dma_physwin(), which identifies
1011  *	a portion of the pfn array that belongs to a dma window,
1012  *	we are called to allocate and initialize representing memory
1013  *	resources. We know from the 1st scan the number of cookies
1014  *	or dma segment in this window so we can allocate a contiguous
1015  *	memory array for the dma cookies (The implementation of
1016  *	ddi_dma_nextcookie(9f) dictates dma cookies be contiguous).
1017  *
1018  *	A second round scan is done on the pfn array to identify
1019  *	each dma segment and initialize its corresponding dma cookie.
1020  *	We don't need to do all the safety checking and we know they
1021  *	all belong to the same dma window.
1022  *
1023  *	Input:	cookie_no - # of cookies identified by the 1st scan
1024  *		start_idx - subscript of the pfn array for the starting pfn
1025  *		end_idx   - subscript of the last pfn in dma window
1026  *		win_pp    - pointer to win_next member of previous window
1027  *	Return:	DDI_SUCCESS - with **win_pp as newly created window object
1028  *		DDI_DMA_NORESROUCE - caller frees all previous window objs
1029  *	Note:	Each cookie and window size are all initialized on page
1030  *		boundary. This is not true for the 1st cookie of the 1st
1031  *		window and the last cookie of the last window.
1032  *		We fix that later in upper layer which has access to size
1033  *		and offset info.
1034  *
1035  */
1036 /*ARGSUSED*/
1037 static int
1038 px_dma_newwin(dev_info_t *dip, ddi_dma_req_t *dmareq, ddi_dma_impl_t *mp,
1039 	uint32_t cookie_no, uint32_t start_idx, uint32_t end_idx,
1040 	px_dma_win_t **win_pp, uint64_t count_max, uint64_t bypass)
1041 {
1042 	int (*waitfp)(caddr_t) = dmareq->dmar_fp;
1043 	ddi_dma_cookie_t *cookie_p;
1044 	uint32_t pfn_no = 1;
1045 	px_iopfn_t pfn = PX_GET_MP_PFN(mp, start_idx);
1046 	px_iopfn_t prev_pfn = pfn;
1047 	uint64_t baddr, seg_pfn0 = pfn;
1048 	size_t sz = cookie_no * sizeof (ddi_dma_cookie_t);
1049 	px_dma_win_t *win_p = kmem_zalloc(sizeof (px_dma_win_t) + sz,
1050 		waitfp == DDI_DMA_SLEEP ? KM_SLEEP : KM_NOSLEEP);
1051 	io_attributes_t	attr = PX_GET_TTE_ATTR(mp->dmai_rflags,
1052 	    mp->dmai_attr.dma_attr_flags);
1053 
1054 	if (!win_p)
1055 		goto noresource;
1056 
1057 	win_p->win_next = NULL;
1058 	win_p->win_ncookies = cookie_no;
1059 	win_p->win_curseg = 0;	/* start from segment 0 */
1060 	win_p->win_size = MMU_PTOB(end_idx - start_idx + 1);
1061 	/* win_p->win_offset is left uninitialized */
1062 
1063 	cookie_p = (ddi_dma_cookie_t *)(win_p + 1);
1064 	start_idx++;
1065 	for (; start_idx <= end_idx; start_idx++, prev_pfn = pfn, pfn_no++) {
1066 		pfn = PX_GET_MP_PFN1(mp, start_idx);
1067 		if ((pfn == prev_pfn + 1) &&
1068 			(MMU_PTOB(pfn_no + 1) - 1 <= count_max))
1069 			continue;
1070 
1071 		/* close up the cookie up to (including) prev_pfn */
1072 		baddr = MMU_PTOB(seg_pfn0);
1073 		if (bypass && (px_lib_iommu_getbypass(dip,
1074 				baddr, attr, &baddr) != DDI_SUCCESS))
1075 			return (DDI_FAILURE);
1076 
1077 		MAKE_DMA_COOKIE(cookie_p, baddr, MMU_PTOB(pfn_no));
1078 		DBG(DBG_BYPASS, mp->dmai_rdip, "cookie %p (%x pages)\n",
1079 			MMU_PTOB(seg_pfn0), pfn_no);
1080 
1081 		cookie_p++;	/* advance to next available cookie cell */
1082 		pfn_no = 0;
1083 		seg_pfn0 = pfn;	/* start a new segment from current pfn */
1084 	}
1085 
1086 	baddr = MMU_PTOB(seg_pfn0);
1087 	if (bypass && (px_lib_iommu_getbypass(dip,
1088 			baddr, attr, &baddr) != DDI_SUCCESS))
1089 		return (DDI_FAILURE);
1090 
1091 	MAKE_DMA_COOKIE(cookie_p, baddr, MMU_PTOB(pfn_no));
1092 	DBG(DBG_BYPASS, mp->dmai_rdip, "cookie %p (%x pages) of total %x\n",
1093 		MMU_PTOB(seg_pfn0), pfn_no, cookie_no);
1094 #ifdef	DEBUG
1095 	cookie_p++;
1096 	ASSERT((cookie_p - (ddi_dma_cookie_t *)(win_p + 1)) == cookie_no);
1097 #endif	/* DEBUG */
1098 	*win_pp = win_p;
1099 	return (DDI_SUCCESS);
1100 noresource:
1101 	if (waitfp != DDI_DMA_DONTWAIT)
1102 		ddi_set_callback(waitfp, dmareq->dmar_arg, &px_kmem_clid);
1103 	return (DDI_DMA_NORESOURCES);
1104 }
1105 
1106 /*
1107  * px_dma_adjust - adjust 1st and last cookie and window sizes
1108  *	remove initial dma page offset from 1st cookie and window size
1109  *	remove last dma page remainder from last cookie and window size
1110  *	fill win_offset of each dma window according to just fixed up
1111  *		each window sizes
1112  *	px_dma_win_t members modified:
1113  *	win_p->win_offset - this window's offset within entire DMA object
1114  *	win_p->win_size	  - xferrable size (in bytes) for this window
1115  *
1116  *	ddi_dma_impl_t members modified:
1117  *	mp->dmai_size	  - 1st window xferrable size
1118  *	mp->dmai_offset   - 0, which is the dma offset of the 1st window
1119  *
1120  *	ddi_dma_cookie_t members modified:
1121  *	cookie_p->dmac_size - 1st and last cookie remove offset or remainder
1122  *	cookie_p->dmac_laddress - 1st cookie add page offset
1123  */
1124 static void
1125 px_dma_adjust(ddi_dma_req_t *dmareq, ddi_dma_impl_t *mp, px_dma_win_t *win_p)
1126 {
1127 	ddi_dma_cookie_t *cookie_p = (ddi_dma_cookie_t *)(win_p + 1);
1128 	size_t pg_offset = mp->dmai_roffset;
1129 	size_t win_offset = 0;
1130 
1131 	cookie_p->dmac_size -= pg_offset;
1132 	cookie_p->dmac_laddress |= pg_offset;
1133 	win_p->win_size -= pg_offset;
1134 	DBG(DBG_BYPASS, mp->dmai_rdip, "pg0 adjust %lx\n", pg_offset);
1135 
1136 	mp->dmai_size = win_p->win_size;
1137 	mp->dmai_offset = 0;
1138 
1139 	pg_offset += mp->dmai_object.dmao_size;
1140 	pg_offset &= MMU_PAGE_OFFSET;
1141 	if (pg_offset)
1142 		pg_offset = MMU_PAGE_SIZE - pg_offset;
1143 	DBG(DBG_BYPASS, mp->dmai_rdip, "last pg adjust %lx\n", pg_offset);
1144 
1145 	for (; win_p->win_next; win_p = win_p->win_next) {
1146 		DBG(DBG_BYPASS, mp->dmai_rdip, "win off %p\n", win_offset);
1147 		win_p->win_offset = win_offset;
1148 		win_offset += win_p->win_size;
1149 	}
1150 	/* last window */
1151 	win_p->win_offset = win_offset;
1152 	cookie_p = (ddi_dma_cookie_t *)(win_p + 1);
1153 	cookie_p[win_p->win_ncookies - 1].dmac_size -= pg_offset;
1154 	win_p->win_size -= pg_offset;
1155 	ASSERT((win_offset + win_p->win_size) == mp->dmai_object.dmao_size);
1156 }
1157 
1158 /*
1159  * px_dma_physwin() - carve up dma windows using physical addresses.
1160  *	Called to handle mmu bypass and pci peer-to-peer transfers.
1161  *	Calls px_dma_newwin() to allocate window objects.
1162  *
1163  * Dependency: mp->dmai_pfnlst points to an array of pfns
1164  *
1165  * 1. Each dma window is represented by a px_dma_win_t object.
1166  *	The object will be casted to ddi_dma_win_t and returned
1167  *	to leaf driver through the DDI interface.
1168  * 2. Each dma window can have several dma segments with each
1169  *	segment representing a physically contiguous either memory
1170  *	space (if we are doing an mmu bypass transfer) or pci address
1171  *	space (if we are doing a peer-to-peer transfer).
1172  * 3. Each segment has a DMA cookie to program the DMA engine.
1173  *	The cookies within each DMA window must be located in a
1174  *	contiguous array per ddi_dma_nextcookie(9f).
1175  * 4. The number of DMA segments within each DMA window cannot exceed
1176  *	mp->dmai_attr.dma_attr_sgllen. If the transfer size is
1177  *	too large to fit in the sgllen, the rest needs to be
1178  *	relocated to the next dma window.
1179  * 5. Peer-to-peer DMA segment follows device hi, lo, count_max,
1180  *	and nocross restrictions while bypass DMA follows the set of
1181  *	restrictions with system limits factored in.
1182  *
1183  * Return:
1184  *	mp->dmai_winlst	 - points to a link list of px_dma_win_t objects.
1185  *		Each px_dma_win_t object on the link list contains
1186  *		infomation such as its window size (# of pages),
1187  *		starting offset (also see Restriction), an array of
1188  *		DMA cookies, and # of cookies in the array.
1189  *	mp->dmai_pfnlst	 - NULL, the pfn list is freed to conserve memory.
1190  *	mp->dmai_nwin	 - # of total DMA windows on mp->dmai_winlst.
1191  *	mp->dmai_mapping - starting cookie address
1192  *	mp->dmai_rflags	 - consistent, nosync, no redzone
1193  *	mp->dmai_cookie	 - start of cookie table of the 1st DMA window
1194  *
1195  * Restriction:
1196  *	Each px_dma_win_t object can theoratically start from any offset
1197  *	since the mmu is not involved. However, this implementation
1198  *	always make windows start from page aligned offset (except
1199  *	the 1st window, which follows the requested offset) due to the
1200  *	fact that we are handed a pfn list. This does require device's
1201  *	count_max and attr_seg to be at least MMU_PAGE_SIZE aligned.
1202  */
1203 int
1204 px_dma_physwin(px_t *px_p, ddi_dma_req_t *dmareq, ddi_dma_impl_t *mp)
1205 {
1206 	uint_t npages = mp->dmai_ndvmapages;
1207 	int ret, sgllen = mp->dmai_attr.dma_attr_sgllen;
1208 	px_iopfn_t pfn_lo, pfn_hi, prev_pfn;
1209 	px_iopfn_t pfn = PX_GET_MP_PFN(mp, 0);
1210 	uint32_t i, win_no = 0, pfn_no = 1, win_pfn0_index = 0, cookie_no = 0;
1211 	uint64_t count_max, bypass_addr = 0;
1212 	px_dma_win_t **win_pp = (px_dma_win_t **)&mp->dmai_winlst;
1213 	ddi_dma_cookie_t *cookie0_p;
1214 	io_attributes_t attr = PX_GET_TTE_ATTR(mp->dmai_rflags,
1215 	    mp->dmai_attr.dma_attr_flags);
1216 	dev_info_t *dip = px_p->px_dip;
1217 
1218 	ASSERT(PX_DMA_ISPTP(mp) || PX_DMA_ISBYPASS(mp));
1219 	if (PX_DMA_ISPTP(mp)) { /* ignore sys limits for peer-to-peer */
1220 		ddi_dma_attr_t *dev_attr_p = PX_DEV_ATTR(mp);
1221 		uint64_t nocross = dev_attr_p->dma_attr_seg;
1222 		px_pec_t *pec_p = px_p->px_pec_p;
1223 		px_iopfn_t pfn_last = PX_DMA_ISPTP32(mp) ?
1224 				pec_p->pec_last32_pfn - pec_p->pec_base32_pfn :
1225 				pec_p->pec_last64_pfn - pec_p->pec_base64_pfn;
1226 
1227 		if (nocross && (nocross < UINT32_MAX))
1228 			return (DDI_DMA_NOMAPPING);
1229 		if (dev_attr_p->dma_attr_align > MMU_PAGE_SIZE)
1230 			return (DDI_DMA_NOMAPPING);
1231 		pfn_lo = MMU_BTOP(dev_attr_p->dma_attr_addr_lo);
1232 		pfn_hi = MMU_BTOP(dev_attr_p->dma_attr_addr_hi);
1233 		pfn_hi = MIN(pfn_hi, pfn_last);
1234 		if ((pfn_lo > pfn_hi) || (pfn < pfn_lo))
1235 			return (DDI_DMA_NOMAPPING);
1236 
1237 		count_max = dev_attr_p->dma_attr_count_max;
1238 		count_max = MIN(count_max, nocross);
1239 		/*
1240 		 * the following count_max trim is not done because we are
1241 		 * making sure pfn_lo <= pfn <= pfn_hi inside the loop
1242 		 * count_max=MIN(count_max, MMU_PTOB(pfn_hi - pfn_lo + 1)-1);
1243 		 */
1244 	} else { /* bypass hi/lo/count_max have been processed by attr2hdl() */
1245 		count_max = mp->dmai_attr.dma_attr_count_max;
1246 		pfn_lo = MMU_BTOP(mp->dmai_attr.dma_attr_addr_lo);
1247 		pfn_hi = MMU_BTOP(mp->dmai_attr.dma_attr_addr_hi);
1248 
1249 		if (px_lib_iommu_getbypass(dip, MMU_PTOB(pfn),
1250 				attr, &bypass_addr) != DDI_SUCCESS) {
1251 			cmn_err(CE_WARN, "bypass cookie failure %lx\n", pfn);
1252 			return (DDI_DMA_NOMAPPING);
1253 		}
1254 		pfn = MMU_BTOP(bypass_addr);
1255 	}
1256 
1257 	/* pfn: absolute (bypass mode) or relative (p2p mode) */
1258 	for (prev_pfn = pfn, i = 1; i < npages;
1259 	    i++, prev_pfn = pfn, pfn_no++) {
1260 		pfn = PX_GET_MP_PFN1(mp, i);
1261 		if (bypass_addr) {
1262 			if (px_lib_iommu_getbypass(dip, MMU_PTOB(pfn), attr,
1263 					&bypass_addr) != DDI_SUCCESS) {
1264 				ret = DDI_DMA_NOMAPPING;
1265 				goto err;
1266 			}
1267 			pfn = MMU_BTOP(bypass_addr);
1268 		}
1269 		if ((pfn == prev_pfn + 1) &&
1270 				(MMU_PTOB(pfn_no + 1) - 1 <= count_max))
1271 			continue;
1272 		if ((pfn < pfn_lo) || (prev_pfn > pfn_hi)) {
1273 			ret = DDI_DMA_NOMAPPING;
1274 			goto err;
1275 		}
1276 		cookie_no++;
1277 		pfn_no = 0;
1278 		if (cookie_no < sgllen)
1279 			continue;
1280 
1281 		DBG(DBG_BYPASS, mp->dmai_rdip, "newwin pfn[%x-%x] %x cks\n",
1282 			win_pfn0_index, i - 1, cookie_no);
1283 		if (ret = px_dma_newwin(dip, dmareq, mp, cookie_no,
1284 			win_pfn0_index, i - 1, win_pp, count_max, bypass_addr))
1285 			goto err;
1286 
1287 		win_pp = &(*win_pp)->win_next;	/* win_pp = *(win_pp) */
1288 		win_no++;
1289 		win_pfn0_index = i;
1290 		cookie_no = 0;
1291 	}
1292 	if (pfn > pfn_hi) {
1293 		ret = DDI_DMA_NOMAPPING;
1294 		goto err;
1295 	}
1296 	cookie_no++;
1297 	DBG(DBG_BYPASS, mp->dmai_rdip, "newwin pfn[%x-%x] %x cks\n",
1298 		win_pfn0_index, i - 1, cookie_no);
1299 	if (ret = px_dma_newwin(dip, dmareq, mp, cookie_no, win_pfn0_index,
1300 		i - 1, win_pp, count_max, bypass_addr))
1301 		goto err;
1302 	win_no++;
1303 	px_dma_adjust(dmareq, mp, mp->dmai_winlst);
1304 	mp->dmai_nwin = win_no;
1305 	mp->dmai_rflags |= DDI_DMA_CONSISTENT | DMP_NOSYNC;
1306 	mp->dmai_rflags &= ~DDI_DMA_REDZONE;
1307 	mp->dmai_flags |= PX_DMAI_FLAGS_NOSYNC;
1308 	cookie0_p = (ddi_dma_cookie_t *)(PX_WINLST(mp) + 1);
1309 	mp->dmai_cookie = PX_WINLST(mp)->win_ncookies > 1 ? cookie0_p + 1 : 0;
1310 	mp->dmai_mapping = cookie0_p->dmac_laddress;
1311 
1312 	px_dma_freepfn(mp);
1313 	return (DDI_DMA_MAPPED);
1314 err:
1315 	px_dma_freewin(mp);
1316 	return (ret);
1317 }
1318 
1319 int
1320 px_dma_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_impl_t *mp,
1321 	enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp,
1322 	uint_t cache_flags)
1323 {
1324 	switch (cmd) {
1325 	case DDI_DMA_SYNC:
1326 		return (DDI_SUCCESS);
1327 
1328 	case DDI_DMA_HTOC: {
1329 		off_t off = *offp;
1330 		ddi_dma_cookie_t *loop_cp, *cp;
1331 		px_dma_win_t *win_p = mp->dmai_winlst;
1332 
1333 		if (off >= mp->dmai_object.dmao_size)
1334 			return (DDI_FAILURE);
1335 
1336 		/* locate window */
1337 		while (win_p->win_offset + win_p->win_size <= off)
1338 			win_p = win_p->win_next;
1339 
1340 		loop_cp = cp = (ddi_dma_cookie_t *)(win_p + 1);
1341 		mp->dmai_offset = win_p->win_offset;
1342 		mp->dmai_size   = win_p->win_size;
1343 		mp->dmai_mapping = cp->dmac_laddress; /* cookie0 start addr */
1344 
1345 		/* adjust cookie addr/len if we are not on cookie boundary */
1346 		off -= win_p->win_offset;	   /* offset within window */
1347 		for (; off >= loop_cp->dmac_size; loop_cp++)
1348 			off -= loop_cp->dmac_size; /* offset within cookie */
1349 
1350 		mp->dmai_cookie = loop_cp + 1;
1351 		win_p->win_curseg = loop_cp - cp;
1352 		cp = (ddi_dma_cookie_t *)objp;
1353 		MAKE_DMA_COOKIE(cp, loop_cp->dmac_laddress + off,
1354 			loop_cp->dmac_size - off);
1355 
1356 		DBG(DBG_DMA_CTL, dip,
1357 			"HTOC: cookie - dmac_laddress=%p dmac_size=%x\n",
1358 			cp->dmac_laddress, cp->dmac_size);
1359 		}
1360 		return (DDI_SUCCESS);
1361 
1362 	case DDI_DMA_REPWIN:
1363 		*offp = mp->dmai_offset;
1364 		*lenp = mp->dmai_size;
1365 		return (DDI_SUCCESS);
1366 
1367 	case DDI_DMA_MOVWIN: {
1368 		off_t off = *offp;
1369 		ddi_dma_cookie_t *cp;
1370 		px_dma_win_t *win_p = mp->dmai_winlst;
1371 
1372 		if (off >= mp->dmai_object.dmao_size)
1373 			return (DDI_FAILURE);
1374 
1375 		/* locate window */
1376 		while (win_p->win_offset + win_p->win_size <= off)
1377 			win_p = win_p->win_next;
1378 
1379 		cp = (ddi_dma_cookie_t *)(win_p + 1);
1380 		mp->dmai_offset = win_p->win_offset;
1381 		mp->dmai_size   = win_p->win_size;
1382 		mp->dmai_mapping = cp->dmac_laddress;	/* cookie0 star addr */
1383 		mp->dmai_cookie = cp + 1;
1384 		win_p->win_curseg = 0;
1385 
1386 		*(ddi_dma_cookie_t *)objp = *cp;
1387 		*offp = win_p->win_offset;
1388 		*lenp = win_p->win_size;
1389 		DBG(DBG_DMA_CTL, dip,
1390 			"HTOC: cookie - dmac_laddress=%p dmac_size=%x\n",
1391 			cp->dmac_laddress, cp->dmac_size);
1392 		}
1393 		return (DDI_SUCCESS);
1394 
1395 	case DDI_DMA_NEXTWIN: {
1396 		px_dma_win_t *win_p = *(px_dma_win_t **)offp;
1397 		px_dma_win_t **nw_pp = (px_dma_win_t **)objp;
1398 		ddi_dma_cookie_t *cp;
1399 		if (!win_p) {
1400 			*nw_pp = mp->dmai_winlst;
1401 			return (DDI_SUCCESS);
1402 		}
1403 
1404 		if (win_p->win_offset != mp->dmai_offset)
1405 			return (DDI_DMA_STALE);
1406 		if (!win_p->win_next)
1407 			return (DDI_DMA_DONE);
1408 		win_p = win_p->win_next;
1409 		cp = (ddi_dma_cookie_t *)(win_p + 1);
1410 		mp->dmai_offset = win_p->win_offset;
1411 		mp->dmai_size   = win_p->win_size;
1412 		mp->dmai_mapping = cp->dmac_laddress;   /* cookie0 star addr */
1413 		mp->dmai_cookie = cp + 1;
1414 		win_p->win_curseg = 0;
1415 		*nw_pp = win_p;
1416 		}
1417 		return (DDI_SUCCESS);
1418 
1419 	case DDI_DMA_NEXTSEG: {
1420 		px_dma_win_t *w_p = *(px_dma_win_t **)offp;
1421 		if (w_p->win_offset != mp->dmai_offset)
1422 			return (DDI_DMA_STALE);
1423 		if (w_p->win_curseg + 1 >= w_p->win_ncookies)
1424 			return (DDI_DMA_DONE);
1425 		w_p->win_curseg++;
1426 		}
1427 		*(ddi_dma_seg_t *)objp = (ddi_dma_seg_t)mp;
1428 		return (DDI_SUCCESS);
1429 
1430 	case DDI_DMA_SEGTOC: {
1431 		px_dma_win_t *win_p = mp->dmai_winlst;
1432 		off_t off = mp->dmai_offset;
1433 		ddi_dma_cookie_t *cp;
1434 		int i;
1435 
1436 		/* locate active window */
1437 		for (; win_p->win_offset != off; win_p = win_p->win_next);
1438 		cp = (ddi_dma_cookie_t *)(win_p + 1);
1439 		for (i = 0; i < win_p->win_curseg; i++, cp++)
1440 			off += cp->dmac_size;
1441 		*offp = off;
1442 		*lenp = cp->dmac_size;
1443 		*(ddi_dma_cookie_t *)objp = *cp;	/* copy cookie */
1444 		}
1445 		return (DDI_SUCCESS);
1446 
1447 	case DDI_DMA_COFF: {
1448 		px_dma_win_t *win_p;
1449 		ddi_dma_cookie_t *cp;
1450 		uint64_t addr, key = ((ddi_dma_cookie_t *)offp)->dmac_laddress;
1451 		size_t win_off;
1452 
1453 		for (win_p = mp->dmai_winlst; win_p; win_p = win_p->win_next) {
1454 			int i;
1455 			win_off = 0;
1456 			cp = (ddi_dma_cookie_t *)(win_p + 1);
1457 			for (i = 0; i < win_p->win_ncookies; i++, cp++) {
1458 				size_t sz = cp->dmac_size;
1459 
1460 				addr = cp->dmac_laddress;
1461 				if ((addr <= key) && (addr + sz >= key))
1462 					goto found;
1463 				win_off += sz;
1464 			}
1465 		}
1466 		return (DDI_FAILURE);
1467 found:
1468 		*objp = (caddr_t)(win_p->win_offset + win_off + (key - addr));
1469 		return (DDI_SUCCESS);
1470 		}
1471 	default:
1472 		DBG(DBG_DMA_CTL, dip, "unknown command (%x): rdip=%s%d\n",
1473 			cmd, ddi_driver_name(rdip), ddi_get_instance(rdip));
1474 		break;
1475 	}
1476 	return (DDI_FAILURE);
1477 }
1478 
1479 static void
1480 px_dvma_debug_init(px_mmu_t *mmu_p)
1481 {
1482 	size_t sz = sizeof (struct px_dvma_rec) * px_dvma_debug_rec;
1483 	ASSERT(MUTEX_HELD(&mmu_p->dvma_debug_lock));
1484 	cmn_err(CE_NOTE, "PCI Express DVMA %p stat ON", mmu_p);
1485 
1486 	mmu_p->dvma_alloc_rec = kmem_alloc(sz, KM_SLEEP);
1487 	mmu_p->dvma_free_rec = kmem_alloc(sz, KM_SLEEP);
1488 
1489 	mmu_p->dvma_active_list = NULL;
1490 	mmu_p->dvma_alloc_rec_index = 0;
1491 	mmu_p->dvma_free_rec_index = 0;
1492 	mmu_p->dvma_active_count = 0;
1493 }
1494 
1495 void
1496 px_dvma_debug_fini(px_mmu_t *mmu_p)
1497 {
1498 	struct px_dvma_rec *prev, *ptr;
1499 	size_t sz = sizeof (struct px_dvma_rec) * px_dvma_debug_rec;
1500 	uint64_t mask = ~(1ull << mmu_p->mmu_inst);
1501 	cmn_err(CE_NOTE, "PCI Express DVMA %p stat OFF", mmu_p);
1502 
1503 	kmem_free(mmu_p->dvma_alloc_rec, sz);
1504 	kmem_free(mmu_p->dvma_free_rec, sz);
1505 	mmu_p->dvma_alloc_rec = mmu_p->dvma_free_rec = NULL;
1506 
1507 	prev = mmu_p->dvma_active_list;
1508 	if (!prev)
1509 		return;
1510 	for (ptr = prev->next; ptr; prev = ptr, ptr = ptr->next)
1511 		kmem_free(prev, sizeof (struct px_dvma_rec));
1512 	kmem_free(prev, sizeof (struct px_dvma_rec));
1513 
1514 	mmu_p->dvma_active_list = NULL;
1515 	mmu_p->dvma_alloc_rec_index = 0;
1516 	mmu_p->dvma_free_rec_index = 0;
1517 	mmu_p->dvma_active_count = 0;
1518 
1519 	px_dvma_debug_off &= mask;
1520 	px_dvma_debug_on &= mask;
1521 }
1522 
1523 void
1524 px_dvma_alloc_debug(px_mmu_t *mmu_p, char *address, uint_t len,
1525 	ddi_dma_impl_t *mp)
1526 {
1527 	struct px_dvma_rec *ptr;
1528 	mutex_enter(&mmu_p->dvma_debug_lock);
1529 
1530 	if (!mmu_p->dvma_alloc_rec)
1531 		px_dvma_debug_init(mmu_p);
1532 	if (PX_DVMA_DBG_OFF(mmu_p)) {
1533 		px_dvma_debug_fini(mmu_p);
1534 		goto done;
1535 	}
1536 
1537 	ptr = &mmu_p->dvma_alloc_rec[mmu_p->dvma_alloc_rec_index];
1538 	ptr->dvma_addr = address;
1539 	ptr->len = len;
1540 	ptr->mp = mp;
1541 	if (++mmu_p->dvma_alloc_rec_index == px_dvma_debug_rec)
1542 		mmu_p->dvma_alloc_rec_index = 0;
1543 
1544 	ptr = kmem_alloc(sizeof (struct px_dvma_rec), KM_SLEEP);
1545 	ptr->dvma_addr = address;
1546 	ptr->len = len;
1547 	ptr->mp = mp;
1548 
1549 	ptr->next = mmu_p->dvma_active_list;
1550 	mmu_p->dvma_active_list = ptr;
1551 	mmu_p->dvma_active_count++;
1552 done:
1553 	mutex_exit(&mmu_p->dvma_debug_lock);
1554 }
1555 
1556 void
1557 px_dvma_free_debug(px_mmu_t *mmu_p, char *address, uint_t len,
1558     ddi_dma_impl_t *mp)
1559 {
1560 	struct px_dvma_rec *ptr, *ptr_save;
1561 	mutex_enter(&mmu_p->dvma_debug_lock);
1562 
1563 	if (!mmu_p->dvma_alloc_rec)
1564 		px_dvma_debug_init(mmu_p);
1565 	if (PX_DVMA_DBG_OFF(mmu_p)) {
1566 		px_dvma_debug_fini(mmu_p);
1567 		goto done;
1568 	}
1569 
1570 	ptr = &mmu_p->dvma_free_rec[mmu_p->dvma_free_rec_index];
1571 	ptr->dvma_addr = address;
1572 	ptr->len = len;
1573 	ptr->mp = mp;
1574 	if (++mmu_p->dvma_free_rec_index == px_dvma_debug_rec)
1575 		mmu_p->dvma_free_rec_index = 0;
1576 
1577 	ptr_save = mmu_p->dvma_active_list;
1578 	for (ptr = ptr_save; ptr; ptr = ptr->next) {
1579 		if ((ptr->dvma_addr == address) && (ptr->len = len))
1580 			break;
1581 		ptr_save = ptr;
1582 	}
1583 	if (!ptr) {
1584 		cmn_err(CE_WARN, "bad dvma free addr=%lx len=%x",
1585 			(long)address, len);
1586 		goto done;
1587 	}
1588 	if (ptr == mmu_p->dvma_active_list)
1589 		mmu_p->dvma_active_list = ptr->next;
1590 	else
1591 		ptr_save->next = ptr->next;
1592 	kmem_free(ptr, sizeof (struct px_dvma_rec));
1593 	mmu_p->dvma_active_count--;
1594 done:
1595 	mutex_exit(&mmu_p->dvma_debug_lock);
1596 }
1597 
1598 #ifdef	DEBUG
1599 void
1600 px_dump_dma_handle(uint64_t flag, dev_info_t *dip, ddi_dma_impl_t *hp)
1601 {
1602 	DBG(flag, dip, "mp(%p): flags=%x mapping=%lx xfer_size=%x\n",
1603 		hp, hp->dmai_inuse, hp->dmai_mapping, hp->dmai_size);
1604 	DBG(flag|DBG_CONT, dip, "\tnpages=%x roffset=%x rflags=%x nwin=%x\n",
1605 		hp->dmai_ndvmapages, hp->dmai_roffset, hp->dmai_rflags,
1606 		hp->dmai_nwin);
1607 	DBG(flag|DBG_CONT, dip, "\twinsize=%x tte=%p pfnlst=%p pfn0=%p\n",
1608 		hp->dmai_winsize, hp->dmai_tte, hp->dmai_pfnlst, hp->dmai_pfn0);
1609 	DBG(flag|DBG_CONT, dip, "\twinlst=%x obj=%p attr=%p ckp=%p\n",
1610 		hp->dmai_winlst, &hp->dmai_object, &hp->dmai_attr,
1611 		hp->dmai_cookie);
1612 }
1613 #endif	/* DEBUG */
1614