xref: /illumos-gate/usr/src/uts/sun4u/io/iommu.c (revision 88f8b78a88cbdc6d8c1af5c3e54bc49d25095c98)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/conf.h>
32 #include <sys/ddi.h>
33 #include <sys/sunddi.h>
34 #include <sys/ddi_impldefs.h>
35 #include <sys/cmn_err.h>
36 #include <sys/kmem.h>
37 #include <sys/vmem.h>
38 #include <sys/sysmacros.h>
39 
40 #include <sys/ddidmareq.h>
41 #include <sys/sysiosbus.h>
42 #include <sys/iommu.h>
43 #include <sys/iocache.h>
44 #include <sys/dvma.h>
45 
46 #include <vm/as.h>
47 #include <vm/hat.h>
48 #include <vm/page.h>
49 #include <vm/hat_sfmmu.h>
50 #include <sys/machparam.h>
51 #include <sys/machsystm.h>
52 #include <sys/vmsystm.h>
53 #include <sys/iommutsb.h>
54 
55 /* Useful debugging Stuff */
56 #include <sys/nexusdebug.h>
57 #include <sys/debug.h>
58 /* Bitfield debugging definitions for this file */
59 #define	IOMMU_GETDVMAPAGES_DEBUG	0x1
60 #define	IOMMU_DMAMAP_DEBUG		0x2
61 #define	IOMMU_DMAMCTL_DEBUG		0x4
62 #define	IOMMU_DMAMCTL_SYNC_DEBUG	0x8
63 #define	IOMMU_DMAMCTL_HTOC_DEBUG	0x10
64 #define	IOMMU_DMAMCTL_KVADDR_DEBUG	0x20
65 #define	IOMMU_DMAMCTL_NEXTWIN_DEBUG	0x40
66 #define	IOMMU_DMAMCTL_NEXTSEG_DEBUG	0x80
67 #define	IOMMU_DMAMCTL_MOVWIN_DEBUG	0x100
68 #define	IOMMU_DMAMCTL_REPWIN_DEBUG	0x200
69 #define	IOMMU_DMAMCTL_GETERR_DEBUG	0x400
70 #define	IOMMU_DMAMCTL_COFF_DEBUG	0x800
71 #define	IOMMU_DMAMCTL_DMA_FREE_DEBUG	0x1000
72 #define	IOMMU_REGISTERS_DEBUG		0x2000
73 #define	IOMMU_DMA_SETUP_DEBUG		0x4000
74 #define	IOMMU_DMA_UNBINDHDL_DEBUG	0x8000
75 #define	IOMMU_DMA_BINDHDL_DEBUG		0x10000
76 #define	IOMMU_DMA_WIN_DEBUG		0x20000
77 #define	IOMMU_DMA_ALLOCHDL_DEBUG	0x40000
78 #define	IOMMU_DMA_LIM_SETUP_DEBUG	0x80000
79 #define	IOMMU_FASTDMA_RESERVE		0x100000
80 #define	IOMMU_FASTDMA_LOAD		0x200000
81 #define	IOMMU_INTER_INTRA_XFER		0x400000
82 #define	IOMMU_TTE			0x800000
83 #define	IOMMU_TLB			0x1000000
84 #define	IOMMU_FASTDMA_SYNC		0x2000000
85 
86 /* Turn on if you need to keep track of outstanding IOMMU usage */
87 /* #define	IO_MEMUSAGE */
88 /* Turn on to debug IOMMU unmapping code */
89 /* #define	IO_MEMDEBUG */
90 
91 static struct dvma_ops iommu_dvma_ops = {
92 	DVMAO_REV,
93 	iommu_dvma_kaddr_load,
94 	iommu_dvma_unload,
95 	iommu_dvma_sync
96 };
97 
98 extern void *sbusp;		/* sbus soft state hook */
99 
100 #define	DVMA_MAX_CACHE	65536
101 
102 /*
103  * This is the number of pages that a mapping request needs before we force
104  * the TLB flush code to use diagnostic registers.  This value was determined
105  * through a series of test runs measuring dma mapping settup performance.
106  */
107 int tlb_flush_using_diag = 16;
108 
109 int sysio_iommu_tsb_sizes[] = {
110 	IOMMU_TSB_SIZE_8M,
111 	IOMMU_TSB_SIZE_16M,
112 	IOMMU_TSB_SIZE_32M,
113 	IOMMU_TSB_SIZE_64M,
114 	IOMMU_TSB_SIZE_128M,
115 	IOMMU_TSB_SIZE_256M,
116 	IOMMU_TSB_SIZE_512M,
117 	IOMMU_TSB_SIZE_1G
118 };
119 
120 static int iommu_map_window(ddi_dma_impl_t *, off_t, size_t);
121 
122 int
123 iommu_init(struct sbus_soft_state *softsp, caddr_t address)
124 {
125 	int i;
126 	char name[40];
127 
128 #ifdef DEBUG
129 	debug_info = 1;
130 #endif
131 
132 	/*
133 	 * Simply add each registers offset to the base address
134 	 * to calculate the already mapped virtual address of
135 	 * the device register...
136 	 *
137 	 * define a macro for the pointer arithmetic; all registers
138 	 * are 64 bits wide and are defined as uint64_t's.
139 	 */
140 
141 #define	REG_ADDR(b, o)	(uint64_t *)((caddr_t)(b) + (o))
142 
143 	softsp->iommu_ctrl_reg = REG_ADDR(address, OFF_IOMMU_CTRL_REG);
144 	softsp->tsb_base_addr = REG_ADDR(address, OFF_TSB_BASE_ADDR);
145 	softsp->iommu_flush_reg = REG_ADDR(address, OFF_IOMMU_FLUSH_REG);
146 	softsp->iommu_tlb_tag = REG_ADDR(address, OFF_IOMMU_TLB_TAG);
147 	softsp->iommu_tlb_data = REG_ADDR(address, OFF_IOMMU_TLB_DATA);
148 
149 #undef REG_ADDR
150 
151 	mutex_init(&softsp->dma_pool_lock, NULL, MUTEX_DEFAULT, NULL);
152 	mutex_init(&softsp->intr_poll_list_lock, NULL, MUTEX_DEFAULT, NULL);
153 
154 	/* Set up the DVMA resource sizes */
155 	if ((softsp->iommu_tsb_cookie = iommu_tsb_alloc(softsp->upa_id)) ==
156 	    IOMMU_TSB_COOKIE_NONE) {
157 		cmn_err(CE_WARN, "%s%d: Unable to retrieve IOMMU array.",
158 		    ddi_driver_name(softsp->dip),
159 		    ddi_get_instance(softsp->dip));
160 		return (DDI_FAILURE);
161 	}
162 	softsp->soft_tsb_base_addr =
163 	    iommu_tsb_cookie_to_va(softsp->iommu_tsb_cookie);
164 	softsp->iommu_dvma_size =
165 	    iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie) <<
166 	    IOMMU_TSB_TO_RNG;
167 	softsp->iommu_dvma_base = (ioaddr_t)
168 	    (0 - (ioaddr_t)softsp->iommu_dvma_size);
169 
170 	(void) snprintf(name, sizeof (name), "%s%d_dvma",
171 	    ddi_driver_name(softsp->dip), ddi_get_instance(softsp->dip));
172 
173 	/*
174 	 * Initialize the DVMA vmem arena.
175 	 */
176 	softsp->dvma_arena = vmem_create(name, (void *)softsp->iommu_dvma_base,
177 	    softsp->iommu_dvma_size, PAGESIZE, NULL, NULL, NULL,
178 	    DVMA_MAX_CACHE, VM_SLEEP);
179 
180 	/* Set the limit for dvma_reserve() to 1/2 of the total dvma space */
181 	softsp->dma_reserve = iommu_btop(softsp->iommu_dvma_size >> 1);
182 
183 #if defined(DEBUG) && defined(IO_MEMUSAGE)
184 	mutex_init(&softsp->iomemlock, NULL, MUTEX_DEFAULT, NULL);
185 	softsp->iomem = (struct io_mem_list *)0;
186 #endif /* DEBUG && IO_MEMUSAGE */
187 	/*
188 	 * Get the base address of the TSB table and store it in the hardware
189 	 */
190 
191 	/*
192 	 * We plan on the PROM flushing all TLB entries.  If this is not the
193 	 * case, this is where we should flush the hardware TLB.
194 	 */
195 
196 	/* Set the IOMMU registers */
197 	(void) iommu_resume_init(softsp);
198 
199 	/* check the convenient copy of TSB base, and flush write buffers */
200 	if (*softsp->tsb_base_addr !=
201 	    va_to_pa((caddr_t)softsp->soft_tsb_base_addr)) {
202 		iommu_tsb_free(softsp->iommu_tsb_cookie);
203 		return (DDI_FAILURE);
204 	}
205 
206 	softsp->sbus_io_lo_pfn = UINT32_MAX;
207 	softsp->sbus_io_hi_pfn = 0;
208 	for (i = 0; i < sysio_pd_getnrng(softsp->dip); i++) {
209 		struct rangespec *rangep;
210 		uint64_t addr;
211 		pfn_t hipfn, lopfn;
212 
213 		rangep = sysio_pd_getrng(softsp->dip, i);
214 		addr = (uint64_t)((uint64_t)rangep->rng_bustype << 32);
215 		addr |= (uint64_t)rangep->rng_offset;
216 		lopfn = (pfn_t)(addr >> MMU_PAGESHIFT);
217 		addr += (uint64_t)(rangep->rng_size - 1);
218 		hipfn = (pfn_t)(addr >> MMU_PAGESHIFT);
219 
220 		softsp->sbus_io_lo_pfn = (lopfn < softsp->sbus_io_lo_pfn) ?
221 		    lopfn : softsp->sbus_io_lo_pfn;
222 
223 		softsp->sbus_io_hi_pfn = (hipfn > softsp->sbus_io_hi_pfn) ?
224 		    hipfn : softsp->sbus_io_hi_pfn;
225 	}
226 
227 	DPRINTF(IOMMU_REGISTERS_DEBUG, ("IOMMU Control reg: %p IOMMU TSB "
228 	    "base reg: %p IOMMU flush reg: %p TSB base addr %p\n",
229 	    softsp->iommu_ctrl_reg, softsp->tsb_base_addr,
230 	    softsp->iommu_flush_reg, softsp->soft_tsb_base_addr));
231 
232 	return (DDI_SUCCESS);
233 }
234 
235 /*
236  * function to uninitialize the iommu and release the tsb back to
237  * the spare pool.  See startup.c for tsb spare management.
238  */
239 
240 int
241 iommu_uninit(struct sbus_soft_state *softsp)
242 {
243 	vmem_destroy(softsp->dvma_arena);
244 
245 	/* flip off the IOMMU enable switch */
246 	*softsp->iommu_ctrl_reg &=
247 		(TSB_SIZE << TSB_SIZE_SHIFT | IOMMU_DISABLE);
248 
249 	iommu_tsb_free(softsp->iommu_tsb_cookie);
250 
251 	return (DDI_SUCCESS);
252 }
253 
254 /*
255  * Initialize iommu hardware registers when the system is being resumed.
256  * (Subset of iommu_init())
257  */
258 int
259 iommu_resume_init(struct sbus_soft_state *softsp)
260 {
261 	int i;
262 	uint_t tsb_size;
263 	uint_t tsb_bytes;
264 
265 	/*
266 	 * Reset the base address of the TSB table in the hardware
267 	 */
268 	*softsp->tsb_base_addr = va_to_pa((caddr_t)softsp->soft_tsb_base_addr);
269 
270 	/*
271 	 * Figure out the correct size of the IOMMU TSB entries.  If we
272 	 * end up with a size smaller than that needed for 8M of IOMMU
273 	 * space, default the size to 8M.  XXX We could probably panic here
274 	 */
275 	i = sizeof (sysio_iommu_tsb_sizes) / sizeof (sysio_iommu_tsb_sizes[0])
276 	    - 1;
277 
278 	tsb_bytes = iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie);
279 
280 	while (i > 0) {
281 		if (tsb_bytes >= sysio_iommu_tsb_sizes[i])
282 			break;
283 		i--;
284 	}
285 
286 	tsb_size = i;
287 
288 	/* OK, lets flip the "on" switch of the IOMMU */
289 	*softsp->iommu_ctrl_reg = (uint64_t)(tsb_size << TSB_SIZE_SHIFT
290 	    | IOMMU_ENABLE | IOMMU_DIAG_ENABLE);
291 
292 	return (DDI_SUCCESS);
293 }
294 
295 void
296 iommu_tlb_flush(struct sbus_soft_state *softsp, ioaddr_t addr, pgcnt_t npages)
297 {
298 	volatile uint64_t tmpreg;
299 	volatile uint64_t *vaddr_reg, *valid_bit_reg;
300 	ioaddr_t hiaddr, ioaddr;
301 	int i, do_flush = 0;
302 
303 	if (npages == 1) {
304 		*softsp->iommu_flush_reg = (uint64_t)addr;
305 		tmpreg = *softsp->sbus_ctrl_reg;
306 		return;
307 	}
308 
309 	hiaddr = addr + (ioaddr_t)(npages * IOMMU_PAGESIZE);
310 	for (i = 0, vaddr_reg = softsp->iommu_tlb_tag,
311 	    valid_bit_reg = softsp->iommu_tlb_data;
312 	    i < IOMMU_TLB_ENTRIES; i++, vaddr_reg++, valid_bit_reg++) {
313 		tmpreg = *vaddr_reg;
314 		ioaddr = (ioaddr_t)((tmpreg & IOMMU_TLBTAG_VA_MASK) <<
315 		    IOMMU_TLBTAG_VA_SHIFT);
316 
317 		DPRINTF(IOMMU_TLB, ("Vaddr reg 0x%x, "
318 		    "TLB vaddr reg %llx, IO addr 0x%x "
319 		    "Base addr 0x%x, Hi addr 0x%x\n",
320 		    vaddr_reg, tmpreg, ioaddr, addr, hiaddr));
321 
322 		if (ioaddr >= addr && ioaddr <= hiaddr) {
323 			tmpreg = *valid_bit_reg;
324 
325 			DPRINTF(IOMMU_TLB, ("Valid reg addr 0x%x, "
326 			    "TLB valid reg %llx\n",
327 			    valid_bit_reg, tmpreg));
328 
329 			if (tmpreg & IOMMU_TLB_VALID) {
330 				*softsp->iommu_flush_reg = (uint64_t)ioaddr;
331 				do_flush = 1;
332 			}
333 		}
334 	}
335 
336 	if (do_flush)
337 		tmpreg = *softsp->sbus_ctrl_reg;
338 }
339 
340 
341 /*
342  * Shorthand defines
343  */
344 
345 #define	ALO		dma_lim->dlim_addr_lo
346 #define	AHI		dma_lim->dlim_addr_hi
347 #define	OBJSIZE		dmareq->dmar_object.dmao_size
348 #define	IOTTE_NDX(vaddr, base) (base + \
349 		(int)(iommu_btop((vaddr & ~IOMMU_PAGEMASK) - \
350 		softsp->iommu_dvma_base)))
351 /*
352  * If DDI_DMA_PARTIAL flag is set and the request is for
353  * less than MIN_DVMA_WIN_SIZE, it's not worth the hassle so
354  * we turn off the DDI_DMA_PARTIAL flag
355  */
356 #define	MIN_DVMA_WIN_SIZE	(128)
357 
358 /* ARGSUSED */
359 void
360 iommu_remove_mappings(ddi_dma_impl_t *mp)
361 {
362 #if defined(DEBUG) && defined(IO_MEMDEBUG)
363 	pgcnt_t npages;
364 	ioaddr_t ioaddr;
365 	volatile uint64_t *iotte_ptr;
366 	ioaddr_t ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET;
367 	pgcnt_t npages = mp->dmai_ndvmapages;
368 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
369 	struct sbus_soft_state *softsp = mppriv->softsp;
370 
371 #if defined(IO_MEMUSAGE)
372 	struct io_mem_list **prevp, *walk;
373 #endif /* DEBUG && IO_MEMUSAGE */
374 
375 	ASSERT(softsp != NULL);
376 	/*
377 	 * Run thru the mapped entries and free 'em
378 	 */
379 
380 	ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET;
381 	npages = mp->dmai_ndvmapages;
382 
383 #if defined(IO_MEMUSAGE)
384 	mutex_enter(&softsp->iomemlock);
385 	prevp = &softsp->iomem;
386 	walk = softsp->iomem;
387 
388 	while (walk) {
389 		if (walk->ioaddr == ioaddr) {
390 			*prevp = walk->next;
391 			break;
392 		}
393 
394 		prevp = &walk->next;
395 		walk = walk->next;
396 	}
397 	mutex_exit(&softsp->iomemlock);
398 
399 	kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1));
400 	kmem_free(walk, sizeof (struct io_mem_list));
401 #endif /* IO_MEMUSAGE */
402 
403 	iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
404 
405 	while (npages) {
406 		DPRINTF(IOMMU_DMAMCTL_DEBUG,
407 		    ("dma_mctl: freeing ioaddr %x iotte %p\n",
408 		    ioaddr, iotte_ptr));
409 		*iotte_ptr = (uint64_t)0;	/* unload tte */
410 		iommu_tlb_flush(softsp, ioaddr, 1);
411 		npages--;
412 		ioaddr += IOMMU_PAGESIZE;
413 		iotte_ptr++;
414 	}
415 #endif /* DEBUG && IO_MEMDEBUG */
416 }
417 
418 
419 int
420 iommu_create_vaddr_mappings(ddi_dma_impl_t *mp, uintptr_t addr)
421 {
422 	pfn_t pfn;
423 	struct as *as = NULL;
424 	pgcnt_t npages;
425 	ioaddr_t ioaddr;
426 	uint_t offset;
427 	volatile uint64_t *iotte_ptr;
428 	uint64_t tmp_iotte_flag;
429 	int rval = DDI_DMA_MAPPED;
430 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
431 	struct sbus_soft_state *softsp = mppriv->softsp;
432 	int diag_tlb_flush;
433 #if defined(DEBUG) && defined(IO_MEMUSAGE)
434 	struct io_mem_list *iomemp;
435 	pfn_t *pfnp;
436 #endif /* DEBUG && IO_MEMUSAGE */
437 
438 	ASSERT(softsp != NULL);
439 
440 	/* Set Valid and Cache for mem xfer */
441 	tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM;
442 
443 	offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
444 	npages = iommu_btopr(mp->dmai_size + offset);
445 	ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
446 	iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
447 	diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0;
448 
449 	as = mp->dmai_object.dmao_obj.virt_obj.v_as;
450 	if (as == NULL)
451 		as = &kas;
452 
453 	/*
454 	 * Set the per object bits of the TTE here. We optimize this for
455 	 * the memory case so that the while loop overhead is minimal.
456 	 */
457 	/* Turn on NOSYNC if we need consistent mem */
458 	if (mp->dmai_rflags & DDI_DMA_CONSISTENT) {
459 		mp->dmai_rflags |= DMP_NOSYNC;
460 		tmp_iotte_flag ^= IOTTE_STREAM;
461 	/* Set streaming mode if not consistent mem */
462 	} else if (softsp->stream_buf_off) {
463 		tmp_iotte_flag ^= IOTTE_STREAM;
464 	}
465 
466 #if defined(DEBUG) && defined(IO_MEMUSAGE)
467 	iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP);
468 	iomemp->rdip = mp->dmai_rdip;
469 	iomemp->ioaddr = ioaddr;
470 	iomemp->addr = addr;
471 	iomemp->npages = npages;
472 	pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1),
473 	    KM_SLEEP);
474 #endif /* DEBUG && IO_MEMUSAGE */
475 	/*
476 	 * Grab the mappings from the dmmu and stick 'em into the
477 	 * iommu.
478 	 */
479 	ASSERT(npages != 0);
480 
481 	/* If we're going to flush the TLB using diag mode, do it now. */
482 	if (diag_tlb_flush)
483 		iommu_tlb_flush(softsp, ioaddr, npages);
484 
485 	do {
486 		uint64_t iotte_flag = tmp_iotte_flag;
487 
488 		/*
489 		 * Fetch the pfn for the DMA object
490 		 */
491 
492 		ASSERT(as);
493 		pfn = hat_getpfnum(as->a_hat, (caddr_t)addr);
494 		ASSERT(pfn != PFN_INVALID);
495 
496 		if (!pf_is_memory(pfn)) {
497 			/* DVMA'ing to IO space */
498 
499 			/* Turn off cache bit if set */
500 			if (iotte_flag & IOTTE_CACHE)
501 				iotte_flag ^= IOTTE_CACHE;
502 
503 			/* Turn off stream bit if set */
504 			if (iotte_flag & IOTTE_STREAM)
505 				iotte_flag ^= IOTTE_STREAM;
506 
507 			if (IS_INTRA_SBUS(softsp, pfn)) {
508 				/* Intra sbus transfer */
509 
510 				/* Turn on intra flag */
511 				iotte_flag |= IOTTE_INTRA;
512 
513 				DPRINTF(IOMMU_INTER_INTRA_XFER, (
514 				    "Intra xfer pfnum %x TTE %llx\n",
515 				    pfn, iotte_flag));
516 			} else {
517 				if (pf_is_dmacapable(pfn) == 1) {
518 					/*EMPTY*/
519 					DPRINTF(IOMMU_INTER_INTRA_XFER,
520 					    ("Inter xfer pfnum %lx "
521 					    "tte hi %llx\n",
522 					    pfn, iotte_flag));
523 				} else {
524 					rval = DDI_DMA_NOMAPPING;
525 #if defined(DEBUG) && defined(IO_MEMDEBUG)
526 					goto bad;
527 #endif /* DEBUG && IO_MEMDEBUG */
528 				}
529 			}
530 		}
531 		addr += IOMMU_PAGESIZE;
532 
533 		DPRINTF(IOMMU_TTE, ("vaddr mapping: tte index %x pfn %lx "
534 		    "tte flag %llx addr %p ioaddr %x\n",
535 		    iotte_ptr, pfn, iotte_flag, addr, ioaddr));
536 
537 		/* Flush the IOMMU TLB before loading a new mapping */
538 		if (!diag_tlb_flush)
539 			iommu_tlb_flush(softsp, ioaddr, 1);
540 
541 		/* Set the hardware IO TTE */
542 		*iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
543 
544 		ioaddr += IOMMU_PAGESIZE;
545 		npages--;
546 		iotte_ptr++;
547 #if defined(DEBUG) && defined(IO_MEMUSAGE)
548 		*pfnp = pfn;
549 		pfnp++;
550 #endif /* DEBUG && IO_MEMUSAGE */
551 	} while (npages != 0);
552 
553 #if defined(DEBUG) && defined(IO_MEMUSAGE)
554 	mutex_enter(&softsp->iomemlock);
555 	iomemp->next = softsp->iomem;
556 	softsp->iomem = iomemp;
557 	mutex_exit(&softsp->iomemlock);
558 #endif /* DEBUG && IO_MEMUSAGE */
559 
560 	return (rval);
561 
562 #if defined(DEBUG) && defined(IO_MEMDEBUG)
563 bad:
564 	/* If we fail a mapping, free up any mapping resources used */
565 	iommu_remove_mappings(mp);
566 	return (rval);
567 #endif /* DEBUG && IO_MEMDEBUG */
568 }
569 
570 
571 int
572 iommu_create_pp_mappings(ddi_dma_impl_t *mp, page_t *pp, page_t **pplist)
573 {
574 	pfn_t pfn;
575 	pgcnt_t npages;
576 	ioaddr_t ioaddr;
577 	uint_t offset;
578 	volatile uint64_t *iotte_ptr;
579 	uint64_t tmp_iotte_flag;
580 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
581 	struct sbus_soft_state *softsp = mppriv->softsp;
582 	int diag_tlb_flush;
583 #if defined(DEBUG) && defined(IO_MEMUSAGE)
584 	struct io_mem_list *iomemp;
585 	pfn_t *pfnp;
586 #endif /* DEBUG && IO_MEMUSAGE */
587 	int rval = DDI_DMA_MAPPED;
588 
589 	/* Set Valid and Cache for mem xfer */
590 	tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM;
591 
592 	ASSERT(softsp != NULL);
593 
594 	offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
595 	npages = iommu_btopr(mp->dmai_size + offset);
596 	ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
597 	iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
598 	diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0;
599 
600 	/*
601 	 * Set the per object bits of the TTE here. We optimize this for
602 	 * the memory case so that the while loop overhead is minimal.
603 	 */
604 	if (mp->dmai_rflags & DDI_DMA_CONSISTENT) {
605 		/* Turn on NOSYNC if we need consistent mem */
606 		mp->dmai_rflags |= DMP_NOSYNC;
607 		tmp_iotte_flag ^= IOTTE_STREAM;
608 	} else if (softsp->stream_buf_off) {
609 		/* Set streaming mode if not consistent mem */
610 		tmp_iotte_flag ^= IOTTE_STREAM;
611 	}
612 
613 #if defined(DEBUG) && defined(IO_MEMUSAGE)
614 	iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP);
615 	iomemp->rdip = mp->dmai_rdip;
616 	iomemp->ioaddr = ioaddr;
617 	iomemp->npages = npages;
618 	pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1),
619 	    KM_SLEEP);
620 #endif /* DEBUG && IO_MEMUSAGE */
621 	/*
622 	 * Grab the mappings from the dmmu and stick 'em into the
623 	 * iommu.
624 	 */
625 	ASSERT(npages != 0);
626 
627 	/* If we're going to flush the TLB using diag mode, do it now. */
628 	if (diag_tlb_flush)
629 		iommu_tlb_flush(softsp, ioaddr, npages);
630 
631 	do {
632 		uint64_t iotte_flag;
633 
634 		iotte_flag = tmp_iotte_flag;
635 
636 		if (pp != NULL) {
637 			pfn = pp->p_pagenum;
638 			pp = pp->p_next;
639 		} else {
640 			pfn = (*pplist)->p_pagenum;
641 			pplist++;
642 		}
643 
644 		DPRINTF(IOMMU_TTE, ("pp mapping TTE index %x pfn %lx "
645 		    "tte flag %llx ioaddr %x\n", iotte_ptr,
646 		    pfn, iotte_flag, ioaddr));
647 
648 		/* Flush the IOMMU TLB before loading a new mapping */
649 		if (!diag_tlb_flush)
650 			iommu_tlb_flush(softsp, ioaddr, 1);
651 
652 		/* Set the hardware IO TTE */
653 		*iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
654 
655 		ioaddr += IOMMU_PAGESIZE;
656 		npages--;
657 		iotte_ptr++;
658 
659 #if defined(DEBUG) && defined(IO_MEMUSAGE)
660 		*pfnp = pfn;
661 		pfnp++;
662 #endif /* DEBUG && IO_MEMUSAGE */
663 
664 	} while (npages != 0);
665 
666 #if defined(DEBUG) && defined(IO_MEMUSAGE)
667 	mutex_enter(&softsp->iomemlock);
668 	iomemp->next = softsp->iomem;
669 	softsp->iomem = iomemp;
670 	mutex_exit(&softsp->iomemlock);
671 #endif /* DEBUG && IO_MEMUSAGE */
672 
673 	return (rval);
674 }
675 
676 
677 int
678 iommu_dma_lim_setup(dev_info_t *dip, dev_info_t *rdip,
679     struct sbus_soft_state *softsp, uint_t *burstsizep, uint_t burstsize64,
680     uint_t *minxferp, uint_t dma_flags)
681 {
682 	struct regspec *rp;
683 
684 	/* Take care of 64 bit limits. */
685 	if (!(dma_flags & DDI_DMA_SBUS_64BIT)) {
686 		/*
687 		 * return burst size for 32-bit mode
688 		 */
689 		*burstsizep &= softsp->sbus_burst_sizes;
690 		return (DDI_FAILURE);
691 	}
692 
693 	/*
694 	 * check if SBus supports 64 bit and if caller
695 	 * is child of SBus. No support through bridges
696 	 */
697 	if (!softsp->sbus64_burst_sizes || (ddi_get_parent(rdip) != dip)) {
698 		/*
699 		 * SBus doesn't support it or bridge. Do 32-bit
700 		 * xfers
701 		 */
702 		*burstsizep &= softsp->sbus_burst_sizes;
703 		return (DDI_FAILURE);
704 	}
705 
706 	rp = ddi_rnumber_to_regspec(rdip, 0);
707 	if (rp == NULL) {
708 		*burstsizep &= softsp->sbus_burst_sizes;
709 		return (DDI_FAILURE);
710 	}
711 
712 	/* Check for old-style 64 bit burstsizes */
713 	if (burstsize64 & SYSIO64_BURST_MASK) {
714 		/* Scale back burstsizes if Necessary */
715 		*burstsizep &= (softsp->sbus64_burst_sizes |
716 		    softsp->sbus_burst_sizes);
717 	} else {
718 		/* Get the 64 bit burstsizes. */
719 		*burstsizep = burstsize64;
720 
721 		/* Scale back burstsizes if Necessary */
722 		*burstsizep &= (softsp->sbus64_burst_sizes >>
723 		    SYSIO64_BURST_SHIFT);
724 	}
725 
726 	/*
727 	 * Set the largest value of the smallest burstsize that the
728 	 * device or the bus can manage.
729 	 */
730 	*minxferp = MAX(*minxferp,
731 	    (1 << (ddi_ffs(softsp->sbus64_burst_sizes) - 1)));
732 
733 	return (DDI_SUCCESS);
734 }
735 
736 
737 int
738 iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
739     ddi_dma_attr_t *dma_attr, int (*waitfp)(caddr_t), caddr_t arg,
740     ddi_dma_handle_t *handlep)
741 {
742 	ioaddr_t addrlow, addrhigh, segalign;
743 	ddi_dma_impl_t *mp;
744 	struct dma_impl_priv *mppriv;
745 	struct sbus_soft_state *softsp = (struct sbus_soft_state *)
746 	    ddi_get_soft_state(sbusp, ddi_get_instance(dip));
747 
748 	/*
749 	 * Setup dma burstsizes and min-xfer counts.
750 	 */
751 	(void) iommu_dma_lim_setup(dip, rdip, softsp,
752 	    &dma_attr->dma_attr_burstsizes,
753 	    dma_attr->dma_attr_burstsizes, &dma_attr->dma_attr_minxfer,
754 	    dma_attr->dma_attr_flags);
755 
756 	if (dma_attr->dma_attr_burstsizes == 0)
757 		return (DDI_DMA_BADATTR);
758 
759 	addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo;
760 	addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi;
761 	segalign = (ioaddr_t)dma_attr->dma_attr_seg;
762 
763 	/*
764 	 * Check sanity for hi and lo address limits
765 	 */
766 	if ((addrhigh <= addrlow) ||
767 	    (addrhigh < (ioaddr_t)softsp->iommu_dvma_base)) {
768 		return (DDI_DMA_BADATTR);
769 	}
770 	if (dma_attr->dma_attr_flags & DDI_DMA_FORCE_PHYSICAL)
771 		return (DDI_DMA_BADATTR);
772 
773 	mppriv = kmem_zalloc(sizeof (*mppriv),
774 	    (waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
775 
776 	if (mppriv == NULL) {
777 		if (waitfp != DDI_DMA_DONTWAIT) {
778 		    ddi_set_callback(waitfp, arg, &softsp->dvma_call_list_id);
779 		}
780 		return (DDI_DMA_NORESOURCES);
781 	}
782 	mp = (ddi_dma_impl_t *)mppriv;
783 
784 	DPRINTF(IOMMU_DMA_ALLOCHDL_DEBUG, ("dma_allochdl: (%s) handle %x "
785 	    "hi %x lo %x min %x burst %x\n",
786 	    ddi_get_name(dip), mp, addrhigh, addrlow,
787 	    dma_attr->dma_attr_minxfer, dma_attr->dma_attr_burstsizes));
788 
789 	mp->dmai_rdip = rdip;
790 	mp->dmai_minxfer = (uint_t)dma_attr->dma_attr_minxfer;
791 	mp->dmai_burstsizes = (uint_t)dma_attr->dma_attr_burstsizes;
792 	mp->dmai_attr = *dma_attr;
793 	/* See if the DMA engine has any limit restrictions. */
794 	if (segalign == (ioaddr_t)UINT32_MAX &&
795 	    addrhigh == (ioaddr_t)UINT32_MAX &&
796 	    (dma_attr->dma_attr_align <= IOMMU_PAGESIZE) && addrlow == 0) {
797 		mp->dmai_rflags |= DMP_NOLIMIT;
798 	}
799 	mppriv->softsp = softsp;
800 	mppriv->phys_sync_flag = va_to_pa((caddr_t)&mppriv->sync_flag);
801 
802 	*handlep = (ddi_dma_handle_t)mp;
803 	return (DDI_SUCCESS);
804 }
805 
806 /*ARGSUSED*/
807 int
808 iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
809 {
810 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)handle;
811 	struct sbus_soft_state *softsp = mppriv->softsp;
812 	ASSERT(softsp != NULL);
813 
814 	kmem_free(mppriv, sizeof (*mppriv));
815 
816 	if (softsp->dvma_call_list_id != 0) {
817 		ddi_run_callback(&softsp->dvma_call_list_id);
818 	}
819 	return (DDI_SUCCESS);
820 }
821 
822 static int
823 check_dma_attr(struct ddi_dma_req *dmareq, ddi_dma_attr_t *dma_attr,
824     uint32_t *size)
825 {
826 	ioaddr_t addrlow;
827 	ioaddr_t addrhigh;
828 	uint32_t segalign;
829 	uint32_t smask;
830 
831 	smask = *size - 1;
832 	segalign = dma_attr->dma_attr_seg;
833 	if (smask > segalign) {
834 		if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0)
835 			return (DDI_DMA_TOOBIG);
836 		*size = segalign + 1;
837 	}
838 	addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo;
839 	addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi;
840 	if (addrlow + smask > addrhigh || addrlow + smask < addrlow) {
841 		if (!((addrlow + dmareq->dmar_object.dmao_size == 0) &&
842 		    (addrhigh == (ioaddr_t)-1))) {
843 			if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0)
844 				return (DDI_DMA_TOOBIG);
845 			*size = MIN(addrhigh - addrlow + 1, *size);
846 		}
847 	}
848 	return (DDI_DMA_MAPOK);
849 }
850 
851 int
852 iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
853     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
854     ddi_dma_cookie_t *cp, uint_t *ccountp)
855 {
856 	page_t *pp;
857 	uint32_t size;
858 	ioaddr_t ioaddr;
859 	uint_t offset;
860 	uintptr_t addr = 0;
861 	pgcnt_t npages;
862 	int rval;
863 	ddi_dma_attr_t *dma_attr;
864 	struct sbus_soft_state *softsp;
865 	struct page **pplist = NULL;
866 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
867 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
868 
869 #ifdef lint
870 	dip = dip;
871 	rdip = rdip;
872 #endif
873 
874 	if (mp->dmai_inuse)
875 		return (DDI_DMA_INUSE);
876 
877 	dma_attr = &mp->dmai_attr;
878 	size = (uint32_t)dmareq->dmar_object.dmao_size;
879 	if (!(mp->dmai_rflags & DMP_NOLIMIT)) {
880 		rval = check_dma_attr(dmareq, dma_attr, &size);
881 		if (rval != DDI_DMA_MAPOK)
882 			return (rval);
883 	}
884 	mp->dmai_inuse = 1;
885 	mp->dmai_offset = 0;
886 	mp->dmai_rflags = (dmareq->dmar_flags & DMP_DDIFLAGS) |
887 	    (mp->dmai_rflags & DMP_NOLIMIT);
888 
889 	switch (dmareq->dmar_object.dmao_type) {
890 	case DMA_OTYP_VADDR:
891 	case DMA_OTYP_BUFVADDR:
892 		addr = (uintptr_t)dmareq->dmar_object.dmao_obj.virt_obj.v_addr;
893 		offset = addr & IOMMU_PAGEOFFSET;
894 		pplist = dmareq->dmar_object.dmao_obj.virt_obj.v_priv;
895 		npages = iommu_btopr(OBJSIZE + offset);
896 
897 		DPRINTF(IOMMU_DMAMAP_DEBUG, ("dma_map vaddr: %x pages "
898 		    "req addr %lx off %x OBJSIZE %x\n",
899 		    npages, addr, offset, OBJSIZE));
900 
901 		/* We don't need the addr anymore if we have a shadow list */
902 		if (pplist != NULL)
903 			addr = NULL;
904 		pp = NULL;
905 		break;
906 
907 	case DMA_OTYP_PAGES:
908 		pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
909 		offset = dmareq->dmar_object.dmao_obj.pp_obj.pp_offset;
910 		npages = iommu_btopr(OBJSIZE + offset);
911 		break;
912 
913 	case DMA_OTYP_PADDR:
914 	default:
915 		/*
916 		 * Not a supported type for this implementation
917 		 */
918 		rval = DDI_DMA_NOMAPPING;
919 		goto bad;
920 	}
921 
922 	/* Get our soft state once we know we're mapping an object. */
923 	softsp = mppriv->softsp;
924 	ASSERT(softsp != NULL);
925 
926 	if (mp->dmai_rflags & DDI_DMA_PARTIAL) {
927 		if (size != OBJSIZE) {
928 			/*
929 			 * If the request is for partial mapping arrangement,
930 			 * the device has to be able to address at least the
931 			 * size of the window we are establishing.
932 			 */
933 			if (size < iommu_ptob(MIN_DVMA_WIN_SIZE)) {
934 				rval = DDI_DMA_NOMAPPING;
935 				goto bad;
936 			}
937 			npages = iommu_btopr(size + offset);
938 		}
939 		/*
940 		 * If the size requested is less than a moderate amt,
941 		 * skip the partial mapping stuff- it's not worth the
942 		 * effort.
943 		 */
944 		if (npages > MIN_DVMA_WIN_SIZE) {
945 			npages = MIN_DVMA_WIN_SIZE + iommu_btopr(offset);
946 			size = iommu_ptob(MIN_DVMA_WIN_SIZE);
947 			DPRINTF(IOMMU_DMA_SETUP_DEBUG, ("dma_setup: SZ %x pg "
948 			    "%x sz %lx\n", OBJSIZE, npages, size));
949 			if (pplist != NULL) {
950 				mp->dmai_minfo = (void *)pplist;
951 				mp->dmai_rflags |= DMP_SHADOW;
952 			}
953 		} else {
954 			mp->dmai_rflags ^= DDI_DMA_PARTIAL;
955 		}
956 	} else {
957 		if (npages >= iommu_btop(softsp->iommu_dvma_size) -
958 		    MIN_DVMA_WIN_SIZE) {
959 			rval = DDI_DMA_TOOBIG;
960 			goto bad;
961 		}
962 	}
963 
964 	/*
965 	 * save dmareq-object, size and npages into mp
966 	 */
967 	mp->dmai_object = dmareq->dmar_object;
968 	mp->dmai_size = size;
969 	mp->dmai_ndvmapages = npages;
970 
971 	if (mp->dmai_rflags & DMP_NOLIMIT) {
972 		ioaddr = (ioaddr_t)vmem_alloc(softsp->dvma_arena,
973 		    iommu_ptob(npages),
974 		    dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
975 		if (ioaddr == 0) {
976 			rval = DDI_DMA_NORESOURCES;
977 			goto bad;
978 		}
979 
980 		/*
981 		 * If we have a 1 page request and we're working with a page
982 		 * list, we're going to speed load an IOMMU entry.
983 		 */
984 		if (npages == 1 && !addr) {
985 			uint64_t iotte_flag = IOTTE_VALID | IOTTE_CACHE |
986 			    IOTTE_WRITE | IOTTE_STREAM;
987 			volatile uint64_t *iotte_ptr;
988 			pfn_t pfn;
989 #if defined(DEBUG) && defined(IO_MEMUSAGE)
990 			struct io_mem_list *iomemp;
991 			pfn_t *pfnp;
992 #endif /* DEBUG && IO_MEMUSAGE */
993 
994 			iotte_ptr = IOTTE_NDX(ioaddr,
995 			    softsp->soft_tsb_base_addr);
996 
997 			if (mp->dmai_rflags & DDI_DMA_CONSISTENT) {
998 				mp->dmai_rflags |= DMP_NOSYNC;
999 				iotte_flag ^= IOTTE_STREAM;
1000 			} else if (softsp->stream_buf_off)
1001 				iotte_flag ^= IOTTE_STREAM;
1002 
1003 			mp->dmai_rflags ^= DDI_DMA_PARTIAL;
1004 
1005 			if (pp != NULL)
1006 				pfn = pp->p_pagenum;
1007 			else
1008 				pfn = (*pplist)->p_pagenum;
1009 
1010 			iommu_tlb_flush(softsp, ioaddr, 1);
1011 
1012 			*iotte_ptr =
1013 			    ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
1014 
1015 			mp->dmai_mapping = (ioaddr_t)(ioaddr + offset);
1016 			mp->dmai_nwin = 0;
1017 			if (cp != NULL) {
1018 				cp->dmac_notused = 0;
1019 				cp->dmac_address = (ioaddr_t)mp->dmai_mapping;
1020 				cp->dmac_size = mp->dmai_size;
1021 				cp->dmac_type = 0;
1022 				*ccountp = 1;
1023 			}
1024 
1025 			DPRINTF(IOMMU_TTE, ("speed loading: TTE index %x "
1026 			    "pfn %lx tte flag %llx addr %lx ioaddr %x\n",
1027 			    iotte_ptr, pfn, iotte_flag, addr, ioaddr));
1028 
1029 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1030 			iomemp = kmem_alloc(sizeof (struct io_mem_list),
1031 			    KM_SLEEP);
1032 			iomemp->rdip = mp->dmai_rdip;
1033 			iomemp->ioaddr = ioaddr;
1034 			iomemp->addr = addr;
1035 			iomemp->npages = npages;
1036 			pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) *
1037 			    (npages + 1), KM_SLEEP);
1038 			*pfnp = pfn;
1039 			mutex_enter(&softsp->iomemlock);
1040 			iomemp->next = softsp->iomem;
1041 			softsp->iomem = iomemp;
1042 			mutex_exit(&softsp->iomemlock);
1043 #endif /* DEBUG && IO_MEMUSAGE */
1044 
1045 			return (DDI_DMA_MAPPED);
1046 		}
1047 	} else {
1048 		ioaddr = (ioaddr_t)vmem_xalloc(softsp->dvma_arena,
1049 		    iommu_ptob(npages),
1050 		    MAX((uint_t)dma_attr->dma_attr_align, IOMMU_PAGESIZE), 0,
1051 		    (uint_t)dma_attr->dma_attr_seg + 1,
1052 		    (void *)(ioaddr_t)dma_attr->dma_attr_addr_lo,
1053 		    (void *)((ioaddr_t)dma_attr->dma_attr_addr_hi + 1),
1054 		    dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
1055 	}
1056 
1057 	if (ioaddr == 0) {
1058 		if (dmareq->dmar_fp == DDI_DMA_SLEEP)
1059 			rval = DDI_DMA_NOMAPPING;
1060 		else
1061 			rval = DDI_DMA_NORESOURCES;
1062 		goto bad;
1063 	}
1064 
1065 	mp->dmai_mapping = ioaddr + offset;
1066 	ASSERT(mp->dmai_mapping >= softsp->iommu_dvma_base);
1067 
1068 	/*
1069 	 * At this point we have a range of virtual address allocated
1070 	 * with which we now have to map to the requested object.
1071 	 */
1072 	if (addr) {
1073 		rval = iommu_create_vaddr_mappings(mp,
1074 		    addr & ~IOMMU_PAGEOFFSET);
1075 		if (rval == DDI_DMA_NOMAPPING)
1076 			goto bad_nomap;
1077 	} else {
1078 		rval = iommu_create_pp_mappings(mp, pp, pplist);
1079 		if (rval == DDI_DMA_NOMAPPING)
1080 			goto bad_nomap;
1081 	}
1082 
1083 	if (cp) {
1084 		cp->dmac_notused = 0;
1085 		cp->dmac_address = (ioaddr_t)mp->dmai_mapping;
1086 		cp->dmac_size = mp->dmai_size;
1087 		cp->dmac_type = 0;
1088 		*ccountp = 1;
1089 	}
1090 	if (mp->dmai_rflags & DDI_DMA_PARTIAL) {
1091 		size = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset));
1092 		mp->dmai_nwin =
1093 		    (dmareq->dmar_object.dmao_size + (size - 1)) / size;
1094 		return (DDI_DMA_PARTIAL_MAP);
1095 	} else {
1096 		mp->dmai_nwin = 0;
1097 		return (DDI_DMA_MAPPED);
1098 	}
1099 
1100 bad_nomap:
1101 	/*
1102 	 * Could not create mmu mappings.
1103 	 */
1104 	if (mp->dmai_rflags & DMP_NOLIMIT) {
1105 		vmem_free(softsp->dvma_arena, (void *)ioaddr,
1106 		    iommu_ptob(npages));
1107 	} else {
1108 		vmem_xfree(softsp->dvma_arena, (void *)ioaddr,
1109 		    iommu_ptob(npages));
1110 	}
1111 
1112 bad:
1113 	if (rval == DDI_DMA_NORESOURCES &&
1114 	    dmareq->dmar_fp != DDI_DMA_DONTWAIT) {
1115 		ddi_set_callback(dmareq->dmar_fp,
1116 		    dmareq->dmar_arg, &softsp->dvma_call_list_id);
1117 	}
1118 	mp->dmai_inuse = 0;
1119 	return (rval);
1120 }
1121 
1122 /* ARGSUSED */
1123 int
1124 iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
1125     ddi_dma_handle_t handle)
1126 {
1127 	ioaddr_t addr;
1128 	uint_t npages;
1129 	size_t size;
1130 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1131 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1132 	struct sbus_soft_state *softsp = mppriv->softsp;
1133 	ASSERT(softsp != NULL);
1134 
1135 	addr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
1136 	npages = mp->dmai_ndvmapages;
1137 	size = iommu_ptob(npages);
1138 
1139 	DPRINTF(IOMMU_DMA_UNBINDHDL_DEBUG, ("iommu_dma_unbindhdl: "
1140 	    "unbinding addr %x for %x pages\n", addr, mp->dmai_ndvmapages));
1141 
1142 	/* sync the entire object */
1143 	if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
1144 		/* flush stream write buffers */
1145 		sync_stream_buf(softsp, addr, npages, (int *)&mppriv->sync_flag,
1146 		    mppriv->phys_sync_flag);
1147 	}
1148 
1149 #if defined(DEBUG) && defined(IO_MEMDEBUG)
1150 	/*
1151 	 * 'Free' the dma mappings.
1152 	 */
1153 	iommu_remove_mappings(mp);
1154 #endif /* DEBUG && IO_MEMDEBUG */
1155 
1156 	ASSERT(npages > (uint_t)0);
1157 	if (mp->dmai_rflags & DMP_NOLIMIT)
1158 		vmem_free(softsp->dvma_arena, (void *)addr, size);
1159 	else
1160 		vmem_xfree(softsp->dvma_arena, (void *)addr, size);
1161 
1162 	mp->dmai_ndvmapages = 0;
1163 	mp->dmai_inuse = 0;
1164 	mp->dmai_minfo = NULL;
1165 
1166 	if (softsp->dvma_call_list_id != 0)
1167 		ddi_run_callback(&softsp->dvma_call_list_id);
1168 
1169 	return (DDI_SUCCESS);
1170 }
1171 
1172 /*ARGSUSED*/
1173 int
1174 iommu_dma_flush(dev_info_t *dip, dev_info_t *rdip,
1175     ddi_dma_handle_t handle, off_t off, size_t len,
1176     uint_t cache_flags)
1177 {
1178 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1179 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1180 
1181 	if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
1182 		sync_stream_buf(mppriv->softsp, mp->dmai_mapping,
1183 		    mp->dmai_ndvmapages, (int *)&mppriv->sync_flag,
1184 		    mppriv->phys_sync_flag);
1185 	}
1186 	return (DDI_SUCCESS);
1187 }
1188 
1189 /*ARGSUSED*/
1190 int
1191 iommu_dma_win(dev_info_t *dip, dev_info_t *rdip,
1192     ddi_dma_handle_t handle, uint_t win, off_t *offp,
1193     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1194 {
1195 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1196 	off_t offset;
1197 	uint_t winsize;
1198 	uint_t newoff;
1199 	int rval;
1200 
1201 	offset = mp->dmai_mapping & IOMMU_PAGEOFFSET;
1202 	winsize = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset));
1203 
1204 	DPRINTF(IOMMU_DMA_WIN_DEBUG, ("getwin win %d winsize %x\n", win,
1205 	    winsize));
1206 
1207 	/*
1208 	 * win is in the range [0 .. dmai_nwin-1]
1209 	 */
1210 	if (win >= mp->dmai_nwin)
1211 		return (DDI_FAILURE);
1212 
1213 	newoff = win * winsize;
1214 	if (newoff > mp->dmai_object.dmao_size - mp->dmai_minxfer)
1215 		return (DDI_FAILURE);
1216 
1217 	ASSERT(cookiep);
1218 	cookiep->dmac_notused = 0;
1219 	cookiep->dmac_type = 0;
1220 	cookiep->dmac_address = (ioaddr_t)mp->dmai_mapping;
1221 	cookiep->dmac_size = mp->dmai_size;
1222 	*ccountp = 1;
1223 	*offp = (off_t)newoff;
1224 	*lenp = (uint_t)winsize;
1225 
1226 	if (newoff == mp->dmai_offset) {
1227 		/*
1228 		 * Nothing to do...
1229 		 */
1230 		return (DDI_SUCCESS);
1231 	}
1232 
1233 	if ((rval = iommu_map_window(mp, newoff, winsize)) != DDI_SUCCESS)
1234 		return (rval);
1235 
1236 	/*
1237 	 * Set this again in case iommu_map_window() has changed it
1238 	 */
1239 	cookiep->dmac_size = mp->dmai_size;
1240 
1241 	return (DDI_SUCCESS);
1242 }
1243 
1244 static int
1245 iommu_map_window(ddi_dma_impl_t *mp, off_t newoff, size_t winsize)
1246 {
1247 	uintptr_t addr = 0;
1248 	page_t *pp;
1249 	uint_t flags;
1250 	struct page **pplist = NULL;
1251 
1252 #if defined(DEBUG) && defined(IO_MEMDEBUG)
1253 	/* Free mappings for current window */
1254 	iommu_remove_mappings(mp);
1255 #endif /* DEBUG && IO_MEMDEBUG */
1256 
1257 	mp->dmai_offset = newoff;
1258 	mp->dmai_size = mp->dmai_object.dmao_size - newoff;
1259 	mp->dmai_size = MIN(mp->dmai_size, winsize);
1260 
1261 	if (mp->dmai_object.dmao_type == DMA_OTYP_VADDR ||
1262 	    mp->dmai_object.dmao_type == DMA_OTYP_BUFVADDR) {
1263 		if (mp->dmai_rflags & DMP_SHADOW) {
1264 			pplist = (struct page **)mp->dmai_minfo;
1265 			ASSERT(pplist != NULL);
1266 			pplist = pplist + (newoff >> MMU_PAGESHIFT);
1267 		} else {
1268 			addr = (uintptr_t)
1269 			    mp->dmai_object.dmao_obj.virt_obj.v_addr;
1270 			addr = (addr + newoff) & ~IOMMU_PAGEOFFSET;
1271 		}
1272 		pp = NULL;
1273 	} else {
1274 		pp = mp->dmai_object.dmao_obj.pp_obj.pp_pp;
1275 		flags = 0;
1276 		while (flags < newoff) {
1277 			pp = pp->p_next;
1278 			flags += MMU_PAGESIZE;
1279 		}
1280 	}
1281 
1282 	/* Set up mappings for next window */
1283 	if (addr) {
1284 		if (iommu_create_vaddr_mappings(mp, addr) < 0)
1285 			return (DDI_FAILURE);
1286 	} else {
1287 		if (iommu_create_pp_mappings(mp, pp, pplist) < 0)
1288 			return (DDI_FAILURE);
1289 	}
1290 
1291 	/*
1292 	 * also invalidate read stream buffer
1293 	 */
1294 	if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
1295 		struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1296 
1297 		sync_stream_buf(mppriv->softsp, mp->dmai_mapping,
1298 		    mp->dmai_ndvmapages, (int *)&mppriv->sync_flag,
1299 		    mppriv->phys_sync_flag);
1300 	}
1301 
1302 	return (DDI_SUCCESS);
1303 
1304 }
1305 
1306 int
1307 iommu_dma_map(dev_info_t *dip, dev_info_t *rdip,
1308     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep)
1309 {
1310 	ddi_dma_lim_t *dma_lim = dmareq->dmar_limits;
1311 	ddi_dma_impl_t *mp;
1312 	ddi_dma_attr_t *dma_attr;
1313 	struct dma_impl_priv *mppriv;
1314 	ioaddr_t addrlow, addrhigh;
1315 	ioaddr_t segalign;
1316 	int rval;
1317 	struct sbus_soft_state *softsp =
1318 		(struct sbus_soft_state *)ddi_get_soft_state(sbusp,
1319 		ddi_get_instance(dip));
1320 
1321 	addrlow = dma_lim->dlim_addr_lo;
1322 	addrhigh = dma_lim->dlim_addr_hi;
1323 	if ((addrhigh <= addrlow) ||
1324 	    (addrhigh < (ioaddr_t)softsp->iommu_dvma_base)) {
1325 		return (DDI_DMA_NOMAPPING);
1326 	}
1327 
1328 	/*
1329 	 * Setup DMA burstsizes and min-xfer counts.
1330 	 */
1331 	(void) iommu_dma_lim_setup(dip, rdip, softsp, &dma_lim->dlim_burstsizes,
1332 		(uint_t)dma_lim->dlim_burstsizes, &dma_lim->dlim_minxfer,
1333 		dmareq->dmar_flags);
1334 
1335 	if (dma_lim->dlim_burstsizes == 0)
1336 		return (DDI_DMA_NOMAPPING);
1337 	/*
1338 	 * If not an advisory call, get a DMA handle
1339 	 */
1340 	if (!handlep) {
1341 		return (DDI_DMA_MAPOK);
1342 	}
1343 
1344 	mppriv = kmem_zalloc(sizeof (*mppriv),
1345 	    (dmareq->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
1346 	if (mppriv == NULL) {
1347 		if (dmareq->dmar_fp != DDI_DMA_DONTWAIT) {
1348 			ddi_set_callback(dmareq->dmar_fp,
1349 			    dmareq->dmar_arg, &softsp->dvma_call_list_id);
1350 		}
1351 		return (DDI_DMA_NORESOURCES);
1352 	}
1353 	mp = (ddi_dma_impl_t *)mppriv;
1354 	mp->dmai_rdip = rdip;
1355 	mp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
1356 	mp->dmai_minxfer = dma_lim->dlim_minxfer;
1357 	mp->dmai_burstsizes = dma_lim->dlim_burstsizes;
1358 	mp->dmai_offset = 0;
1359 	mp->dmai_ndvmapages = 0;
1360 	mp->dmai_minfo = 0;
1361 	mp->dmai_inuse = 0;
1362 	segalign = dma_lim->dlim_cntr_max;
1363 	/* See if the DMA engine has any limit restrictions. */
1364 	if (segalign == UINT32_MAX && addrhigh == UINT32_MAX &&
1365 	    addrlow == 0) {
1366 		mp->dmai_rflags |= DMP_NOLIMIT;
1367 	}
1368 	mppriv->softsp = softsp;
1369 	mppriv->phys_sync_flag = va_to_pa((caddr_t)&mppriv->sync_flag);
1370 	dma_attr = &mp->dmai_attr;
1371 	dma_attr->dma_attr_align = 1;
1372 	dma_attr->dma_attr_addr_lo = addrlow;
1373 	dma_attr->dma_attr_addr_hi = addrhigh;
1374 	dma_attr->dma_attr_seg = segalign;
1375 	dma_attr->dma_attr_burstsizes = dma_lim->dlim_burstsizes;
1376 	rval = iommu_dma_bindhdl(dip, rdip, (ddi_dma_handle_t)mp,
1377 		dmareq, NULL, NULL);
1378 	if (rval && (rval != DDI_DMA_PARTIAL_MAP)) {
1379 		kmem_free(mppriv, sizeof (*mppriv));
1380 	} else {
1381 		*handlep = (ddi_dma_handle_t)mp;
1382 	}
1383 	return (rval);
1384 }
1385 
1386 /*ARGSUSED*/
1387 int
1388 iommu_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
1389     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
1390     off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags)
1391 {
1392 	ioaddr_t addr;
1393 	uint_t offset;
1394 	pgcnt_t npages;
1395 	size_t size;
1396 	ddi_dma_cookie_t *cp;
1397 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1398 
1399 	DPRINTF(IOMMU_DMAMCTL_DEBUG, ("dma_mctl: handle %p ", mp));
1400 	switch (request) {
1401 	case DDI_DMA_FREE:
1402 	{
1403 		struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1404 		struct sbus_soft_state *softsp = mppriv->softsp;
1405 		ASSERT(softsp != NULL);
1406 
1407 		/*
1408 		 * 'Free' the dma mappings.
1409 		 */
1410 		addr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
1411 		npages = mp->dmai_ndvmapages;
1412 		size = iommu_ptob(npages);
1413 
1414 		DPRINTF(IOMMU_DMAMCTL_DMA_FREE_DEBUG, ("iommu_dma_mctl dmafree:"
1415 		    "freeing vaddr %x for %x pages.\n", addr,
1416 		    mp->dmai_ndvmapages));
1417 		/* sync the entire object */
1418 		if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
1419 			/* flush stream write buffers */
1420 			sync_stream_buf(softsp, addr, npages,
1421 			    (int *)&mppriv->sync_flag, mppriv->phys_sync_flag);
1422 		}
1423 
1424 #if defined(DEBUG) && defined(IO_MEMDEBUG)
1425 		iommu_remove_mappings(mp);
1426 #endif /* DEBUG && IO_MEMDEBUG */
1427 
1428 		ASSERT(npages > (uint_t)0);
1429 		if (mp->dmai_rflags & DMP_NOLIMIT)
1430 			vmem_free(softsp->dvma_arena, (void *)addr, size);
1431 		else
1432 			vmem_xfree(softsp->dvma_arena, (void *)addr, size);
1433 
1434 		kmem_free(mppriv, sizeof (*mppriv));
1435 
1436 		if (softsp->dvma_call_list_id != 0)
1437 			ddi_run_callback(&softsp->dvma_call_list_id);
1438 
1439 		break;
1440 	}
1441 
1442 	case DDI_DMA_SET_SBUS64:
1443 	{
1444 		struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1445 
1446 		return (iommu_dma_lim_setup(dip, rdip, mppriv->softsp,
1447 		    &mp->dmai_burstsizes, (uint_t)*lenp, &mp->dmai_minxfer,
1448 		    DDI_DMA_SBUS_64BIT));
1449 	}
1450 
1451 	case DDI_DMA_HTOC:
1452 		DPRINTF(IOMMU_DMAMCTL_HTOC_DEBUG, ("htoc off %lx mapping %lx "
1453 		    "size %lx\n", *offp, mp->dmai_mapping,
1454 		    mp->dmai_size));
1455 
1456 		if ((uint_t)(*offp) >= mp->dmai_size)
1457 			return (DDI_FAILURE);
1458 
1459 		cp = (ddi_dma_cookie_t *)objp;
1460 		cp->dmac_notused = 0;
1461 		cp->dmac_address = (mp->dmai_mapping + (uint_t)(*offp));
1462 		cp->dmac_size =
1463 		    mp->dmai_mapping + mp->dmai_size - cp->dmac_address;
1464 		cp->dmac_type = 0;
1465 
1466 		break;
1467 
1468 	case DDI_DMA_KVADDR:
1469 		/*
1470 		 * If a physical address mapping has percolated this high,
1471 		 * that is an error (maybe?).
1472 		 */
1473 		if (mp->dmai_rflags & DMP_PHYSADDR) {
1474 			DPRINTF(IOMMU_DMAMCTL_KVADDR_DEBUG, ("kvaddr of phys "
1475 			    "mapping\n"));
1476 			return (DDI_FAILURE);
1477 		}
1478 
1479 		return (DDI_FAILURE);
1480 
1481 	case DDI_DMA_NEXTWIN:
1482 	{
1483 		ddi_dma_win_t *owin, *nwin;
1484 		uint_t winsize, newoff;
1485 		int rval;
1486 
1487 		DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, ("nextwin\n"));
1488 
1489 		mp = (ddi_dma_impl_t *)handle;
1490 		owin = (ddi_dma_win_t *)offp;
1491 		nwin = (ddi_dma_win_t *)objp;
1492 		if (mp->dmai_rflags & DDI_DMA_PARTIAL) {
1493 			if (*owin == NULL) {
1494 				DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG,
1495 				    ("nextwin: win == NULL\n"));
1496 				mp->dmai_offset = 0;
1497 				*nwin = (ddi_dma_win_t)mp;
1498 				return (DDI_SUCCESS);
1499 			}
1500 
1501 			offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
1502 			winsize = iommu_ptob(mp->dmai_ndvmapages -
1503 			    iommu_btopr(offset));
1504 
1505 			newoff = (uint_t)(mp->dmai_offset + winsize);
1506 			if (newoff > mp->dmai_object.dmao_size -
1507 			    mp->dmai_minxfer)
1508 				return (DDI_DMA_DONE);
1509 
1510 			if ((rval = iommu_map_window(mp, newoff, winsize))
1511 			    != DDI_SUCCESS)
1512 				return (rval);
1513 		} else {
1514 			DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, ("nextwin: no "
1515 			    "partial mapping\n"));
1516 			if (*owin != NULL)
1517 				return (DDI_DMA_DONE);
1518 			mp->dmai_offset = 0;
1519 			*nwin = (ddi_dma_win_t)mp;
1520 		}
1521 		break;
1522 	}
1523 
1524 	case DDI_DMA_NEXTSEG:
1525 	{
1526 		ddi_dma_seg_t *oseg, *nseg;
1527 
1528 		DPRINTF(IOMMU_DMAMCTL_NEXTSEG_DEBUG, ("nextseg:\n"));
1529 
1530 		oseg = (ddi_dma_seg_t *)lenp;
1531 		if (*oseg != NULL)
1532 			return (DDI_DMA_DONE);
1533 		nseg = (ddi_dma_seg_t *)objp;
1534 		*nseg = *((ddi_dma_seg_t *)offp);
1535 		break;
1536 	}
1537 
1538 	case DDI_DMA_SEGTOC:
1539 	{
1540 		ddi_dma_seg_impl_t *seg;
1541 
1542 		seg = (ddi_dma_seg_impl_t *)handle;
1543 		cp = (ddi_dma_cookie_t *)objp;
1544 		cp->dmac_notused = 0;
1545 		cp->dmac_address = (ioaddr_t)seg->dmai_mapping;
1546 		cp->dmac_size = *lenp = seg->dmai_size;
1547 		cp->dmac_type = 0;
1548 		*offp = seg->dmai_offset;
1549 		break;
1550 	}
1551 
1552 	case DDI_DMA_MOVWIN:
1553 	{
1554 		uint_t winsize;
1555 		uint_t newoff;
1556 		int rval;
1557 
1558 		offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
1559 		winsize = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset));
1560 
1561 		DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("movwin off %lx len %x "
1562 		    "winsize %x\n", *offp, *lenp, winsize));
1563 
1564 		if ((mp->dmai_rflags & DDI_DMA_PARTIAL) == 0)
1565 			return (DDI_FAILURE);
1566 
1567 		if (*lenp != (uint_t)-1 && *lenp != winsize) {
1568 			DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("bad length\n"));
1569 			return (DDI_FAILURE);
1570 		}
1571 		newoff = (uint_t)*offp;
1572 		if (newoff & (winsize - 1)) {
1573 			DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("bad off\n"));
1574 			return (DDI_FAILURE);
1575 		}
1576 
1577 		if (newoff == mp->dmai_offset) {
1578 			/*
1579 			 * Nothing to do...
1580 			 */
1581 			break;
1582 		}
1583 
1584 		/*
1585 		 * Check out new address...
1586 		 */
1587 		if (newoff > mp->dmai_object.dmao_size - mp->dmai_minxfer) {
1588 			DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("newoff out of "
1589 			    "range\n"));
1590 			return (DDI_FAILURE);
1591 		}
1592 
1593 		rval = iommu_map_window(mp, newoff, winsize);
1594 		if (rval != DDI_SUCCESS)
1595 			return (rval);
1596 
1597 		if ((cp = (ddi_dma_cookie_t *)objp) != 0) {
1598 			cp->dmac_notused = 0;
1599 			cp->dmac_address = (ioaddr_t)mp->dmai_mapping;
1600 			cp->dmac_size = mp->dmai_size;
1601 			cp->dmac_type = 0;
1602 		}
1603 		*offp = (off_t)newoff;
1604 		*lenp = (uint_t)winsize;
1605 		break;
1606 	}
1607 
1608 	case DDI_DMA_REPWIN:
1609 		if ((mp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
1610 			DPRINTF(IOMMU_DMAMCTL_REPWIN_DEBUG, ("repwin fail\n"));
1611 			return (DDI_FAILURE);
1612 		}
1613 
1614 		*offp = (off_t)mp->dmai_offset;
1615 
1616 		addr = mp->dmai_ndvmapages -
1617 		    iommu_btopr(mp->dmai_mapping & IOMMU_PAGEOFFSET);
1618 
1619 		*lenp = (uint_t)iommu_ptob(addr);
1620 
1621 		DPRINTF(IOMMU_DMAMCTL_REPWIN_DEBUG, ("repwin off %x len %x\n",
1622 		    mp->dmai_offset, mp->dmai_size));
1623 
1624 		break;
1625 
1626 	case DDI_DMA_GETERR:
1627 		DPRINTF(IOMMU_DMAMCTL_GETERR_DEBUG,
1628 		    ("iommu_dma_mctl: geterr\n"));
1629 
1630 		break;
1631 
1632 	case DDI_DMA_COFF:
1633 		cp = (ddi_dma_cookie_t *)offp;
1634 		addr = cp->dmac_address;
1635 
1636 		if (addr < mp->dmai_mapping ||
1637 		    addr >= mp->dmai_mapping + mp->dmai_size)
1638 			return (DDI_FAILURE);
1639 
1640 		*objp = (caddr_t)(addr - mp->dmai_mapping);
1641 
1642 		DPRINTF(IOMMU_DMAMCTL_COFF_DEBUG, ("coff off %lx mapping %x "
1643 		    "size %x\n", (ulong_t)*objp, mp->dmai_mapping,
1644 		    mp->dmai_size));
1645 
1646 		break;
1647 
1648 	case DDI_DMA_RESERVE:
1649 	{
1650 		struct ddi_dma_req *dmareq = (struct ddi_dma_req *)offp;
1651 		ddi_dma_lim_t *dma_lim;
1652 		ddi_dma_handle_t *handlep;
1653 		uint_t np;
1654 		ioaddr_t ioaddr;
1655 		int i;
1656 		struct fast_dvma *iommu_fast_dvma;
1657 		struct sbus_soft_state *softsp =
1658 		    (struct sbus_soft_state *)ddi_get_soft_state(sbusp,
1659 		    ddi_get_instance(dip));
1660 
1661 		/* Some simple sanity checks */
1662 		dma_lim = dmareq->dmar_limits;
1663 		if (dma_lim->dlim_burstsizes == 0) {
1664 			DPRINTF(IOMMU_FASTDMA_RESERVE,
1665 			    ("Reserve: bad burstsizes\n"));
1666 			return (DDI_DMA_BADLIMITS);
1667 		}
1668 		if ((AHI <= ALO) || (AHI < softsp->iommu_dvma_base)) {
1669 			DPRINTF(IOMMU_FASTDMA_RESERVE,
1670 			    ("Reserve: bad limits\n"));
1671 			return (DDI_DMA_BADLIMITS);
1672 		}
1673 
1674 		np = dmareq->dmar_object.dmao_size;
1675 		mutex_enter(&softsp->dma_pool_lock);
1676 		if (np > softsp->dma_reserve) {
1677 			mutex_exit(&softsp->dma_pool_lock);
1678 			DPRINTF(IOMMU_FASTDMA_RESERVE,
1679 			    ("Reserve: dma_reserve is exhausted\n"));
1680 			return (DDI_DMA_NORESOURCES);
1681 		}
1682 
1683 		softsp->dma_reserve -= np;
1684 		mutex_exit(&softsp->dma_pool_lock);
1685 		mp = kmem_zalloc(sizeof (*mp), KM_SLEEP);
1686 		mp->dmai_rflags = DMP_BYPASSNEXUS;
1687 		mp->dmai_rdip = rdip;
1688 		mp->dmai_minxfer = dma_lim->dlim_minxfer;
1689 		mp->dmai_burstsizes = dma_lim->dlim_burstsizes;
1690 
1691 		ioaddr = (ioaddr_t)vmem_xalloc(softsp->dvma_arena,
1692 		    iommu_ptob(np), IOMMU_PAGESIZE, 0,
1693 		    dma_lim->dlim_cntr_max + 1, (void *)ALO, (void *)(AHI + 1),
1694 		    dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
1695 
1696 		if (ioaddr == 0) {
1697 			mutex_enter(&softsp->dma_pool_lock);
1698 			softsp->dma_reserve += np;
1699 			mutex_exit(&softsp->dma_pool_lock);
1700 			kmem_free(mp, sizeof (*mp));
1701 			DPRINTF(IOMMU_FASTDMA_RESERVE,
1702 			    ("Reserve: No dvma resources available\n"));
1703 			return (DDI_DMA_NOMAPPING);
1704 		}
1705 
1706 		/* create a per request structure */
1707 		iommu_fast_dvma = kmem_alloc(sizeof (struct fast_dvma),
1708 		    KM_SLEEP);
1709 
1710 		/*
1711 		 * We need to remember the size of the transfer so that
1712 		 * we can figure the virtual pages to sync when the transfer
1713 		 * is complete.
1714 		 */
1715 		iommu_fast_dvma->pagecnt = kmem_zalloc(np *
1716 		    sizeof (uint_t), KM_SLEEP);
1717 
1718 		/* Allocate a streaming cache sync flag for each index */
1719 		iommu_fast_dvma->sync_flag = kmem_zalloc(np *
1720 		    sizeof (int), KM_SLEEP);
1721 
1722 		/* Allocate a physical sync flag for each index */
1723 		iommu_fast_dvma->phys_sync_flag =
1724 		    kmem_zalloc(np * sizeof (uint64_t), KM_SLEEP);
1725 
1726 		for (i = 0; i < np; i++)
1727 			iommu_fast_dvma->phys_sync_flag[i] = va_to_pa((caddr_t)
1728 			    &iommu_fast_dvma->sync_flag[i]);
1729 
1730 		mp->dmai_mapping = ioaddr;
1731 		mp->dmai_ndvmapages = np;
1732 		iommu_fast_dvma->ops = &iommu_dvma_ops;
1733 		iommu_fast_dvma->softsp = (caddr_t)softsp;
1734 		mp->dmai_nexus_private = (caddr_t)iommu_fast_dvma;
1735 		handlep = (ddi_dma_handle_t *)objp;
1736 		*handlep = (ddi_dma_handle_t)mp;
1737 
1738 		DPRINTF(IOMMU_FASTDMA_RESERVE,
1739 		    ("Reserve: mapping object %p base addr %lx size %x\n",
1740 		    mp, mp->dmai_mapping, mp->dmai_ndvmapages));
1741 
1742 		break;
1743 	}
1744 
1745 	case DDI_DMA_RELEASE:
1746 	{
1747 		ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1748 		uint_t np = npages = mp->dmai_ndvmapages;
1749 		ioaddr_t ioaddr = mp->dmai_mapping;
1750 		volatile uint64_t *iotte_ptr;
1751 		struct fast_dvma *iommu_fast_dvma = (struct fast_dvma *)
1752 		    mp->dmai_nexus_private;
1753 		struct sbus_soft_state *softsp = (struct sbus_soft_state *)
1754 		    iommu_fast_dvma->softsp;
1755 
1756 		ASSERT(softsp != NULL);
1757 
1758 		/* Unload stale mappings and flush stale tlb's */
1759 		iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
1760 
1761 		while (npages > (uint_t)0) {
1762 			*iotte_ptr = (uint64_t)0;	/* unload tte */
1763 			iommu_tlb_flush(softsp, ioaddr, 1);
1764 
1765 			npages--;
1766 			iotte_ptr++;
1767 			ioaddr += IOMMU_PAGESIZE;
1768 		}
1769 
1770 		ioaddr = (ioaddr_t)mp->dmai_mapping;
1771 		mutex_enter(&softsp->dma_pool_lock);
1772 		softsp->dma_reserve += np;
1773 		mutex_exit(&softsp->dma_pool_lock);
1774 
1775 		if (mp->dmai_rflags & DMP_NOLIMIT)
1776 			vmem_free(softsp->dvma_arena, (void *)ioaddr,
1777 			    iommu_ptob(np));
1778 		else
1779 			vmem_xfree(softsp->dvma_arena, (void *)ioaddr,
1780 			    iommu_ptob(np));
1781 
1782 		kmem_free(mp, sizeof (*mp));
1783 		kmem_free(iommu_fast_dvma->pagecnt, np * sizeof (uint_t));
1784 		kmem_free(iommu_fast_dvma->sync_flag, np * sizeof (int));
1785 		kmem_free(iommu_fast_dvma->phys_sync_flag, np *
1786 		    sizeof (uint64_t));
1787 		kmem_free(iommu_fast_dvma, sizeof (struct fast_dvma));
1788 
1789 
1790 		DPRINTF(IOMMU_FASTDMA_RESERVE,
1791 		    ("Release: Base addr %x size %x\n", ioaddr, np));
1792 		/*
1793 		 * Now that we've freed some resource,
1794 		 * if there is anybody waiting for it
1795 		 * try and get them going.
1796 		 */
1797 		if (softsp->dvma_call_list_id != 0)
1798 			ddi_run_callback(&softsp->dvma_call_list_id);
1799 
1800 		break;
1801 	}
1802 
1803 	default:
1804 		DPRINTF(IOMMU_DMAMCTL_DEBUG, ("iommu_dma_mctl: unknown option "
1805 		    "0%x\n", request));
1806 
1807 		return (DDI_FAILURE);
1808 	}
1809 	return (DDI_SUCCESS);
1810 }
1811 
1812 /*ARGSUSED*/
1813 void
1814 iommu_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
1815     ddi_dma_cookie_t *cp)
1816 {
1817 	uintptr_t addr;
1818 	ioaddr_t ioaddr;
1819 	uint_t offset;
1820 	pfn_t pfn;
1821 	int npages;
1822 	volatile uint64_t *iotte_ptr;
1823 	uint64_t iotte_flag = 0;
1824 	struct as *as = NULL;
1825 	extern struct as kas;
1826 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
1827 	struct fast_dvma *iommu_fast_dvma =
1828 	    (struct fast_dvma *)mp->dmai_nexus_private;
1829 	struct sbus_soft_state *softsp = (struct sbus_soft_state *)
1830 	    iommu_fast_dvma->softsp;
1831 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1832 	struct io_mem_list *iomemp;
1833 	pfn_t *pfnp;
1834 #endif /* DEBUG && IO_MEMUSAGE */
1835 
1836 	ASSERT(softsp != NULL);
1837 
1838 	addr = (uintptr_t)a;
1839 	ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index));
1840 	offset = (uint_t)(addr & IOMMU_PAGEOFFSET);
1841 	iommu_fast_dvma->pagecnt[index] = iommu_btopr(len + offset);
1842 	as = &kas;
1843 	addr &= ~IOMMU_PAGEOFFSET;
1844 	npages = iommu_btopr(len + offset);
1845 
1846 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1847 	iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP);
1848 	iomemp->rdip = mp->dmai_rdip;
1849 	iomemp->ioaddr = ioaddr;
1850 	iomemp->addr = addr;
1851 	iomemp->npages = npages;
1852 	pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1),
1853 	    KM_SLEEP);
1854 #endif /* DEBUG && IO_MEMUSAGE */
1855 
1856 	cp->dmac_address = ioaddr | offset;
1857 	cp->dmac_size = len;
1858 
1859 	iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
1860 	/* read/write and streaming io on */
1861 	iotte_flag = IOTTE_VALID | IOTTE_WRITE | IOTTE_CACHE;
1862 
1863 	if (mp->dmai_rflags & DDI_DMA_CONSISTENT)
1864 		mp->dmai_rflags |= DMP_NOSYNC;
1865 	else if (!softsp->stream_buf_off)
1866 		iotte_flag |= IOTTE_STREAM;
1867 
1868 	DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: ioaddr %x "
1869 	    "size %x offset %x index %x kaddr %p\n",
1870 	    ioaddr, len, offset, index, addr));
1871 	ASSERT(npages > 0);
1872 	do {
1873 		pfn = hat_getpfnum(as->a_hat, (caddr_t)addr);
1874 		if (pfn == PFN_INVALID) {
1875 			DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: invalid pfn "
1876 			    "from hat_getpfnum()\n"));
1877 		}
1878 
1879 		iommu_tlb_flush(softsp, ioaddr, 1);
1880 
1881 		/* load tte */
1882 		*iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
1883 
1884 		npages--;
1885 		iotte_ptr++;
1886 
1887 		addr += IOMMU_PAGESIZE;
1888 		ioaddr += IOMMU_PAGESIZE;
1889 
1890 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1891 		*pfnp = pfn;
1892 		pfnp++;
1893 #endif /* DEBUG && IO_MEMUSAGE */
1894 
1895 	} while (npages > 0);
1896 
1897 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1898 	mutex_enter(&softsp->iomemlock);
1899 	iomemp->next = softsp->iomem;
1900 	softsp->iomem = iomemp;
1901 	mutex_exit(&softsp->iomemlock);
1902 #endif /* DEBUG && IO_MEMUSAGE */
1903 }
1904 
1905 /*ARGSUSED*/
1906 void
1907 iommu_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view)
1908 {
1909 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
1910 	ioaddr_t ioaddr;
1911 	pgcnt_t npages;
1912 	struct fast_dvma *iommu_fast_dvma =
1913 	    (struct fast_dvma *)mp->dmai_nexus_private;
1914 	struct sbus_soft_state *softsp = (struct sbus_soft_state *)
1915 	    iommu_fast_dvma->softsp;
1916 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1917 	struct io_mem_list **prevp, *walk;
1918 #endif /* DEBUG && IO_MEMUSAGE */
1919 
1920 	ASSERT(softsp != NULL);
1921 
1922 	ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index));
1923 	npages = iommu_fast_dvma->pagecnt[index];
1924 
1925 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1926 	mutex_enter(&softsp->iomemlock);
1927 	prevp = &softsp->iomem;
1928 	walk = softsp->iomem;
1929 
1930 	while (walk != NULL) {
1931 		if (walk->ioaddr == ioaddr) {
1932 			*prevp = walk->next;
1933 			break;
1934 		}
1935 		prevp = &walk->next;
1936 		walk = walk->next;
1937 	}
1938 	mutex_exit(&softsp->iomemlock);
1939 
1940 	kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1));
1941 	kmem_free(walk, sizeof (struct io_mem_list));
1942 #endif /* DEBUG && IO_MEMUSAGE */
1943 
1944 	DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_unload: handle %p sync flag "
1945 	    "addr %p sync flag pfn %x index %x page count %x\n", mp,
1946 	    &iommu_fast_dvma->sync_flag[index],
1947 	    iommu_fast_dvma->phys_sync_flag[index],
1948 	    index, npages));
1949 
1950 	if ((mp->dmai_rflags & DMP_NOSYNC) != DMP_NOSYNC) {
1951 		sync_stream_buf(softsp, ioaddr, npages,
1952 			(int *)&iommu_fast_dvma->sync_flag[index],
1953 			iommu_fast_dvma->phys_sync_flag[index]);
1954 	}
1955 }
1956 
1957 /*ARGSUSED*/
1958 void
1959 iommu_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view)
1960 {
1961 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
1962 	ioaddr_t ioaddr;
1963 	uint_t npages;
1964 	struct fast_dvma *iommu_fast_dvma =
1965 	    (struct fast_dvma *)mp->dmai_nexus_private;
1966 	struct sbus_soft_state *softsp = (struct sbus_soft_state *)
1967 	    iommu_fast_dvma->softsp;
1968 
1969 	if ((mp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
1970 		return;
1971 
1972 	ASSERT(softsp != NULL);
1973 	ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index));
1974 	npages = iommu_fast_dvma->pagecnt[index];
1975 
1976 	DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_sync: handle %p, "
1977 	    "sync flag addr %p, sync flag pfn %x\n", mp,
1978 	    &iommu_fast_dvma->sync_flag[index],
1979 	    iommu_fast_dvma->phys_sync_flag[index]));
1980 
1981 	sync_stream_buf(softsp, ioaddr, npages,
1982 	    (int *)&iommu_fast_dvma->sync_flag[index],
1983 	    iommu_fast_dvma->phys_sync_flag[index]);
1984 }
1985