xref: /titanic_51/usr/src/uts/sun4u/io/iommu.c (revision 2a9459bdd821c1cf59590a7a9069ac9c591e8a6b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/conf.h>
32 #include <sys/ddi.h>
33 #include <sys/sunddi.h>
34 #include <sys/ddi_impldefs.h>
35 #include <sys/cmn_err.h>
36 #include <sys/kmem.h>
37 #include <sys/vmem.h>
38 #include <sys/sysmacros.h>
39 
40 #include <sys/ddidmareq.h>
41 #include <sys/sysiosbus.h>
42 #include <sys/iommu.h>
43 #include <sys/iocache.h>
44 #include <sys/dvma.h>
45 
46 #include <vm/as.h>
47 #include <vm/hat.h>
48 #include <vm/page.h>
49 #include <vm/hat_sfmmu.h>
50 #include <sys/machparam.h>
51 #include <sys/machsystm.h>
52 #include <sys/vmsystm.h>
53 #include <sys/iommutsb.h>
54 
55 /* Useful debugging Stuff */
56 #include <sys/nexusdebug.h>
57 #include <sys/debug.h>
58 /* Bitfield debugging definitions for this file */
59 #define	IOMMU_GETDVMAPAGES_DEBUG	0x1
60 #define	IOMMU_DMAMAP_DEBUG		0x2
61 #define	IOMMU_DMAMCTL_DEBUG		0x4
62 #define	IOMMU_DMAMCTL_SYNC_DEBUG	0x8
63 #define	IOMMU_DMAMCTL_HTOC_DEBUG	0x10
64 #define	IOMMU_DMAMCTL_KVADDR_DEBUG	0x20
65 #define	IOMMU_DMAMCTL_NEXTWIN_DEBUG	0x40
66 #define	IOMMU_DMAMCTL_NEXTSEG_DEBUG	0x80
67 #define	IOMMU_DMAMCTL_MOVWIN_DEBUG	0x100
68 #define	IOMMU_DMAMCTL_REPWIN_DEBUG	0x200
69 #define	IOMMU_DMAMCTL_GETERR_DEBUG	0x400
70 #define	IOMMU_DMAMCTL_COFF_DEBUG	0x800
71 #define	IOMMU_DMAMCTL_DMA_FREE_DEBUG	0x1000
72 #define	IOMMU_REGISTERS_DEBUG		0x2000
73 #define	IOMMU_DMA_SETUP_DEBUG		0x4000
74 #define	IOMMU_DMA_UNBINDHDL_DEBUG	0x8000
75 #define	IOMMU_DMA_BINDHDL_DEBUG		0x10000
76 #define	IOMMU_DMA_WIN_DEBUG		0x20000
77 #define	IOMMU_DMA_ALLOCHDL_DEBUG	0x40000
78 #define	IOMMU_DMA_LIM_SETUP_DEBUG	0x80000
79 #define	IOMMU_FASTDMA_RESERVE		0x100000
80 #define	IOMMU_FASTDMA_LOAD		0x200000
81 #define	IOMMU_INTER_INTRA_XFER		0x400000
82 #define	IOMMU_TTE			0x800000
83 #define	IOMMU_TLB			0x1000000
84 #define	IOMMU_FASTDMA_SYNC		0x2000000
85 
86 /* Turn on if you need to keep track of outstanding IOMMU usage */
87 /* #define	IO_MEMUSAGE */
88 /* Turn on to debug IOMMU unmapping code */
89 /* #define	IO_MEMDEBUG */
90 
91 static struct dvma_ops iommu_dvma_ops = {
92 	DVMAO_REV,
93 	iommu_dvma_kaddr_load,
94 	iommu_dvma_unload,
95 	iommu_dvma_sync
96 };
97 
98 extern void *sbusp;		/* sbus soft state hook */
99 
100 #define	DVMA_MAX_CACHE	65536
101 
102 /*
103  * This is the number of pages that a mapping request needs before we force
104  * the TLB flush code to use diagnostic registers.  This value was determined
105  * through a series of test runs measuring dma mapping settup performance.
106  */
107 int tlb_flush_using_diag = 16;
108 
109 int sysio_iommu_tsb_sizes[] = {
110 	IOMMU_TSB_SIZE_8M,
111 	IOMMU_TSB_SIZE_16M,
112 	IOMMU_TSB_SIZE_32M,
113 	IOMMU_TSB_SIZE_64M,
114 	IOMMU_TSB_SIZE_128M,
115 	IOMMU_TSB_SIZE_256M,
116 	IOMMU_TSB_SIZE_512M,
117 	IOMMU_TSB_SIZE_1G
118 };
119 
120 static int iommu_map_window(ddi_dma_impl_t *, off_t, size_t);
121 
122 int
123 iommu_init(struct sbus_soft_state *softsp, caddr_t address)
124 {
125 	int i;
126 	char name[40];
127 
128 #ifdef DEBUG
129 	debug_info = 1;
130 #endif
131 
132 	/*
133 	 * Simply add each registers offset to the base address
134 	 * to calculate the already mapped virtual address of
135 	 * the device register...
136 	 *
137 	 * define a macro for the pointer arithmetic; all registers
138 	 * are 64 bits wide and are defined as uint64_t's.
139 	 */
140 
141 #define	REG_ADDR(b, o)	(uint64_t *)((caddr_t)(b) + (o))
142 
143 	softsp->iommu_ctrl_reg = REG_ADDR(address, OFF_IOMMU_CTRL_REG);
144 	softsp->tsb_base_addr = REG_ADDR(address, OFF_TSB_BASE_ADDR);
145 	softsp->iommu_flush_reg = REG_ADDR(address, OFF_IOMMU_FLUSH_REG);
146 	softsp->iommu_tlb_tag = REG_ADDR(address, OFF_IOMMU_TLB_TAG);
147 	softsp->iommu_tlb_data = REG_ADDR(address, OFF_IOMMU_TLB_DATA);
148 
149 #undef REG_ADDR
150 
151 	mutex_init(&softsp->dma_pool_lock, NULL, MUTEX_DEFAULT, NULL);
152 	mutex_init(&softsp->intr_poll_list_lock, NULL, MUTEX_DEFAULT, NULL);
153 
154 	/* Set up the DVMA resource sizes */
155 	if ((softsp->iommu_tsb_cookie = iommu_tsb_alloc(softsp->upa_id)) ==
156 	    IOMMU_TSB_COOKIE_NONE) {
157 		cmn_err(CE_WARN, "%s%d: Unable to retrieve IOMMU array.",
158 		    ddi_driver_name(softsp->dip),
159 		    ddi_get_instance(softsp->dip));
160 		return (DDI_FAILURE);
161 	}
162 	softsp->soft_tsb_base_addr =
163 	    iommu_tsb_cookie_to_va(softsp->iommu_tsb_cookie);
164 	softsp->iommu_dvma_size =
165 	    iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie) <<
166 	    IOMMU_TSB_TO_RNG;
167 	softsp->iommu_dvma_base = (ioaddr_t)
168 	    (0 - (ioaddr_t)softsp->iommu_dvma_size);
169 
170 	(void) snprintf(name, sizeof (name), "%s%d_dvma",
171 	    ddi_driver_name(softsp->dip), ddi_get_instance(softsp->dip));
172 
173 	/*
174 	 * Initialize the DVMA vmem arena.
175 	 */
176 	softsp->dvma_arena = vmem_create(name,
177 	    (void *)(uintptr_t)softsp->iommu_dvma_base,
178 	    softsp->iommu_dvma_size, PAGESIZE, NULL, NULL, NULL,
179 	    DVMA_MAX_CACHE, VM_SLEEP);
180 
181 	/* Set the limit for dvma_reserve() to 1/2 of the total dvma space */
182 	softsp->dma_reserve = iommu_btop(softsp->iommu_dvma_size >> 1);
183 
184 #if defined(DEBUG) && defined(IO_MEMUSAGE)
185 	mutex_init(&softsp->iomemlock, NULL, MUTEX_DEFAULT, NULL);
186 	softsp->iomem = (struct io_mem_list *)0;
187 #endif /* DEBUG && IO_MEMUSAGE */
188 	/*
189 	 * Get the base address of the TSB table and store it in the hardware
190 	 */
191 
192 	/*
193 	 * We plan on the PROM flushing all TLB entries.  If this is not the
194 	 * case, this is where we should flush the hardware TLB.
195 	 */
196 
197 	/* Set the IOMMU registers */
198 	(void) iommu_resume_init(softsp);
199 
200 	/* check the convenient copy of TSB base, and flush write buffers */
201 	if (*softsp->tsb_base_addr !=
202 	    va_to_pa((caddr_t)softsp->soft_tsb_base_addr)) {
203 		iommu_tsb_free(softsp->iommu_tsb_cookie);
204 		return (DDI_FAILURE);
205 	}
206 
207 	softsp->sbus_io_lo_pfn = UINT32_MAX;
208 	softsp->sbus_io_hi_pfn = 0;
209 	for (i = 0; i < sysio_pd_getnrng(softsp->dip); i++) {
210 		struct rangespec *rangep;
211 		uint64_t addr;
212 		pfn_t hipfn, lopfn;
213 
214 		rangep = sysio_pd_getrng(softsp->dip, i);
215 		addr = (uint64_t)((uint64_t)rangep->rng_bustype << 32);
216 		addr |= (uint64_t)rangep->rng_offset;
217 		lopfn = (pfn_t)(addr >> MMU_PAGESHIFT);
218 		addr += (uint64_t)(rangep->rng_size - 1);
219 		hipfn = (pfn_t)(addr >> MMU_PAGESHIFT);
220 
221 		softsp->sbus_io_lo_pfn = (lopfn < softsp->sbus_io_lo_pfn) ?
222 		    lopfn : softsp->sbus_io_lo_pfn;
223 
224 		softsp->sbus_io_hi_pfn = (hipfn > softsp->sbus_io_hi_pfn) ?
225 		    hipfn : softsp->sbus_io_hi_pfn;
226 	}
227 
228 	DPRINTF(IOMMU_REGISTERS_DEBUG, ("IOMMU Control reg: %p IOMMU TSB "
229 	    "base reg: %p IOMMU flush reg: %p TSB base addr %p\n",
230 	    softsp->iommu_ctrl_reg, softsp->tsb_base_addr,
231 	    softsp->iommu_flush_reg, softsp->soft_tsb_base_addr));
232 
233 	return (DDI_SUCCESS);
234 }
235 
236 /*
237  * function to uninitialize the iommu and release the tsb back to
238  * the spare pool.  See startup.c for tsb spare management.
239  */
240 
241 int
242 iommu_uninit(struct sbus_soft_state *softsp)
243 {
244 	vmem_destroy(softsp->dvma_arena);
245 
246 	/* flip off the IOMMU enable switch */
247 	*softsp->iommu_ctrl_reg &=
248 		(TSB_SIZE << TSB_SIZE_SHIFT | IOMMU_DISABLE);
249 
250 	iommu_tsb_free(softsp->iommu_tsb_cookie);
251 
252 	return (DDI_SUCCESS);
253 }
254 
255 /*
256  * Initialize iommu hardware registers when the system is being resumed.
257  * (Subset of iommu_init())
258  */
259 int
260 iommu_resume_init(struct sbus_soft_state *softsp)
261 {
262 	int i;
263 	uint_t tsb_size;
264 	uint_t tsb_bytes;
265 
266 	/*
267 	 * Reset the base address of the TSB table in the hardware
268 	 */
269 	*softsp->tsb_base_addr = va_to_pa((caddr_t)softsp->soft_tsb_base_addr);
270 
271 	/*
272 	 * Figure out the correct size of the IOMMU TSB entries.  If we
273 	 * end up with a size smaller than that needed for 8M of IOMMU
274 	 * space, default the size to 8M.  XXX We could probably panic here
275 	 */
276 	i = sizeof (sysio_iommu_tsb_sizes) / sizeof (sysio_iommu_tsb_sizes[0])
277 	    - 1;
278 
279 	tsb_bytes = iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie);
280 
281 	while (i > 0) {
282 		if (tsb_bytes >= sysio_iommu_tsb_sizes[i])
283 			break;
284 		i--;
285 	}
286 
287 	tsb_size = i;
288 
289 	/* OK, lets flip the "on" switch of the IOMMU */
290 	*softsp->iommu_ctrl_reg = (uint64_t)(tsb_size << TSB_SIZE_SHIFT
291 	    | IOMMU_ENABLE | IOMMU_DIAG_ENABLE);
292 
293 	return (DDI_SUCCESS);
294 }
295 
296 void
297 iommu_tlb_flush(struct sbus_soft_state *softsp, ioaddr_t addr, pgcnt_t npages)
298 {
299 	volatile uint64_t tmpreg;
300 	volatile uint64_t *vaddr_reg, *valid_bit_reg;
301 	ioaddr_t hiaddr, ioaddr;
302 	int i, do_flush = 0;
303 
304 	if (npages == 1) {
305 		*softsp->iommu_flush_reg = (uint64_t)addr;
306 		tmpreg = *softsp->sbus_ctrl_reg;
307 		return;
308 	}
309 
310 	hiaddr = addr + (ioaddr_t)(npages * IOMMU_PAGESIZE);
311 	for (i = 0, vaddr_reg = softsp->iommu_tlb_tag,
312 	    valid_bit_reg = softsp->iommu_tlb_data;
313 	    i < IOMMU_TLB_ENTRIES; i++, vaddr_reg++, valid_bit_reg++) {
314 		tmpreg = *vaddr_reg;
315 		ioaddr = (ioaddr_t)((tmpreg & IOMMU_TLBTAG_VA_MASK) <<
316 		    IOMMU_TLBTAG_VA_SHIFT);
317 
318 		DPRINTF(IOMMU_TLB, ("Vaddr reg 0x%p, "
319 		    "TLB vaddr reg %lx, IO addr 0x%x "
320 		    "Base addr 0x%x, Hi addr 0x%x\n",
321 		    vaddr_reg, tmpreg, ioaddr, addr, hiaddr));
322 
323 		if (ioaddr >= addr && ioaddr <= hiaddr) {
324 			tmpreg = *valid_bit_reg;
325 
326 			DPRINTF(IOMMU_TLB, ("Valid reg addr 0x%p, "
327 			    "TLB valid reg %lx\n",
328 			    valid_bit_reg, tmpreg));
329 
330 			if (tmpreg & IOMMU_TLB_VALID) {
331 				*softsp->iommu_flush_reg = (uint64_t)ioaddr;
332 				do_flush = 1;
333 			}
334 		}
335 	}
336 
337 	if (do_flush)
338 		tmpreg = *softsp->sbus_ctrl_reg;
339 }
340 
341 
342 /*
343  * Shorthand defines
344  */
345 
346 #define	ALO		dma_lim->dlim_addr_lo
347 #define	AHI		dma_lim->dlim_addr_hi
348 #define	OBJSIZE		dmareq->dmar_object.dmao_size
349 #define	IOTTE_NDX(vaddr, base) (base + \
350 		(int)(iommu_btop((vaddr & ~IOMMU_PAGEMASK) - \
351 		softsp->iommu_dvma_base)))
352 /*
353  * If DDI_DMA_PARTIAL flag is set and the request is for
354  * less than MIN_DVMA_WIN_SIZE, it's not worth the hassle so
355  * we turn off the DDI_DMA_PARTIAL flag
356  */
357 #define	MIN_DVMA_WIN_SIZE	(128)
358 
359 /* ARGSUSED */
360 void
361 iommu_remove_mappings(ddi_dma_impl_t *mp)
362 {
363 #if defined(DEBUG) && defined(IO_MEMDEBUG)
364 	pgcnt_t npages;
365 	ioaddr_t ioaddr;
366 	volatile uint64_t *iotte_ptr;
367 	ioaddr_t ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET;
368 	pgcnt_t npages = mp->dmai_ndvmapages;
369 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
370 	struct sbus_soft_state *softsp = mppriv->softsp;
371 
372 #if defined(IO_MEMUSAGE)
373 	struct io_mem_list **prevp, *walk;
374 #endif /* DEBUG && IO_MEMUSAGE */
375 
376 	ASSERT(softsp != NULL);
377 	/*
378 	 * Run thru the mapped entries and free 'em
379 	 */
380 
381 	ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET;
382 	npages = mp->dmai_ndvmapages;
383 
384 #if defined(IO_MEMUSAGE)
385 	mutex_enter(&softsp->iomemlock);
386 	prevp = &softsp->iomem;
387 	walk = softsp->iomem;
388 
389 	while (walk) {
390 		if (walk->ioaddr == ioaddr) {
391 			*prevp = walk->next;
392 			break;
393 		}
394 
395 		prevp = &walk->next;
396 		walk = walk->next;
397 	}
398 	mutex_exit(&softsp->iomemlock);
399 
400 	kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1));
401 	kmem_free(walk, sizeof (struct io_mem_list));
402 #endif /* IO_MEMUSAGE */
403 
404 	iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
405 
406 	while (npages) {
407 		DPRINTF(IOMMU_DMAMCTL_DEBUG,
408 		    ("dma_mctl: freeing ioaddr %x iotte %p\n",
409 		    ioaddr, iotte_ptr));
410 		*iotte_ptr = (uint64_t)0;	/* unload tte */
411 		iommu_tlb_flush(softsp, ioaddr, 1);
412 		npages--;
413 		ioaddr += IOMMU_PAGESIZE;
414 		iotte_ptr++;
415 	}
416 #endif /* DEBUG && IO_MEMDEBUG */
417 }
418 
419 
420 int
421 iommu_create_vaddr_mappings(ddi_dma_impl_t *mp, uintptr_t addr)
422 {
423 	pfn_t pfn;
424 	struct as *as = NULL;
425 	pgcnt_t npages;
426 	ioaddr_t ioaddr;
427 	uint_t offset;
428 	volatile uint64_t *iotte_ptr;
429 	uint64_t tmp_iotte_flag;
430 	int rval = DDI_DMA_MAPPED;
431 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
432 	struct sbus_soft_state *softsp = mppriv->softsp;
433 	int diag_tlb_flush;
434 #if defined(DEBUG) && defined(IO_MEMUSAGE)
435 	struct io_mem_list *iomemp;
436 	pfn_t *pfnp;
437 #endif /* DEBUG && IO_MEMUSAGE */
438 
439 	ASSERT(softsp != NULL);
440 
441 	/* Set Valid and Cache for mem xfer */
442 	tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM;
443 
444 	offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
445 	npages = iommu_btopr(mp->dmai_size + offset);
446 	ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
447 	iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
448 	diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0;
449 
450 	as = mp->dmai_object.dmao_obj.virt_obj.v_as;
451 	if (as == NULL)
452 		as = &kas;
453 
454 	/*
455 	 * Set the per object bits of the TTE here. We optimize this for
456 	 * the memory case so that the while loop overhead is minimal.
457 	 */
458 	/* Turn on NOSYNC if we need consistent mem */
459 	if (mp->dmai_rflags & DDI_DMA_CONSISTENT) {
460 		mp->dmai_rflags |= DMP_NOSYNC;
461 		tmp_iotte_flag ^= IOTTE_STREAM;
462 	/* Set streaming mode if not consistent mem */
463 	} else if (softsp->stream_buf_off) {
464 		tmp_iotte_flag ^= IOTTE_STREAM;
465 	}
466 
467 #if defined(DEBUG) && defined(IO_MEMUSAGE)
468 	iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP);
469 	iomemp->rdip = mp->dmai_rdip;
470 	iomemp->ioaddr = ioaddr;
471 	iomemp->addr = addr;
472 	iomemp->npages = npages;
473 	pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1),
474 	    KM_SLEEP);
475 #endif /* DEBUG && IO_MEMUSAGE */
476 	/*
477 	 * Grab the mappings from the dmmu and stick 'em into the
478 	 * iommu.
479 	 */
480 	ASSERT(npages != 0);
481 
482 	/* If we're going to flush the TLB using diag mode, do it now. */
483 	if (diag_tlb_flush)
484 		iommu_tlb_flush(softsp, ioaddr, npages);
485 
486 	do {
487 		uint64_t iotte_flag = tmp_iotte_flag;
488 
489 		/*
490 		 * Fetch the pfn for the DMA object
491 		 */
492 
493 		ASSERT(as);
494 		pfn = hat_getpfnum(as->a_hat, (caddr_t)addr);
495 		ASSERT(pfn != PFN_INVALID);
496 
497 		if (!pf_is_memory(pfn)) {
498 			/* DVMA'ing to IO space */
499 
500 			/* Turn off cache bit if set */
501 			if (iotte_flag & IOTTE_CACHE)
502 				iotte_flag ^= IOTTE_CACHE;
503 
504 			/* Turn off stream bit if set */
505 			if (iotte_flag & IOTTE_STREAM)
506 				iotte_flag ^= IOTTE_STREAM;
507 
508 			if (IS_INTRA_SBUS(softsp, pfn)) {
509 				/* Intra sbus transfer */
510 
511 				/* Turn on intra flag */
512 				iotte_flag |= IOTTE_INTRA;
513 
514 				DPRINTF(IOMMU_INTER_INTRA_XFER, (
515 				    "Intra xfer pfnum %lx TTE %lx\n",
516 				    pfn, iotte_flag));
517 			} else {
518 				if (pf_is_dmacapable(pfn) == 1) {
519 					/*EMPTY*/
520 					DPRINTF(IOMMU_INTER_INTRA_XFER,
521 					    ("Inter xfer pfnum %lx "
522 					    "tte hi %lx\n",
523 					    pfn, iotte_flag));
524 				} else {
525 					rval = DDI_DMA_NOMAPPING;
526 #if defined(DEBUG) && defined(IO_MEMDEBUG)
527 					goto bad;
528 #endif /* DEBUG && IO_MEMDEBUG */
529 				}
530 			}
531 		}
532 		addr += IOMMU_PAGESIZE;
533 
534 		DPRINTF(IOMMU_TTE, ("vaddr mapping: tte index %p pfn %lx "
535 		    "tte flag %lx addr %lx ioaddr %x\n",
536 		    iotte_ptr, pfn, iotte_flag, addr, ioaddr));
537 
538 		/* Flush the IOMMU TLB before loading a new mapping */
539 		if (!diag_tlb_flush)
540 			iommu_tlb_flush(softsp, ioaddr, 1);
541 
542 		/* Set the hardware IO TTE */
543 		*iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
544 
545 		ioaddr += IOMMU_PAGESIZE;
546 		npages--;
547 		iotte_ptr++;
548 #if defined(DEBUG) && defined(IO_MEMUSAGE)
549 		*pfnp = pfn;
550 		pfnp++;
551 #endif /* DEBUG && IO_MEMUSAGE */
552 	} while (npages != 0);
553 
554 #if defined(DEBUG) && defined(IO_MEMUSAGE)
555 	mutex_enter(&softsp->iomemlock);
556 	iomemp->next = softsp->iomem;
557 	softsp->iomem = iomemp;
558 	mutex_exit(&softsp->iomemlock);
559 #endif /* DEBUG && IO_MEMUSAGE */
560 
561 	return (rval);
562 
563 #if defined(DEBUG) && defined(IO_MEMDEBUG)
564 bad:
565 	/* If we fail a mapping, free up any mapping resources used */
566 	iommu_remove_mappings(mp);
567 	return (rval);
568 #endif /* DEBUG && IO_MEMDEBUG */
569 }
570 
571 
572 int
573 iommu_create_pp_mappings(ddi_dma_impl_t *mp, page_t *pp, page_t **pplist)
574 {
575 	pfn_t pfn;
576 	pgcnt_t npages;
577 	ioaddr_t ioaddr;
578 	uint_t offset;
579 	volatile uint64_t *iotte_ptr;
580 	uint64_t tmp_iotte_flag;
581 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
582 	struct sbus_soft_state *softsp = mppriv->softsp;
583 	int diag_tlb_flush;
584 #if defined(DEBUG) && defined(IO_MEMUSAGE)
585 	struct io_mem_list *iomemp;
586 	pfn_t *pfnp;
587 #endif /* DEBUG && IO_MEMUSAGE */
588 	int rval = DDI_DMA_MAPPED;
589 
590 	/* Set Valid and Cache for mem xfer */
591 	tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM;
592 
593 	ASSERT(softsp != NULL);
594 
595 	offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
596 	npages = iommu_btopr(mp->dmai_size + offset);
597 	ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
598 	iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
599 	diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0;
600 
601 	/*
602 	 * Set the per object bits of the TTE here. We optimize this for
603 	 * the memory case so that the while loop overhead is minimal.
604 	 */
605 	if (mp->dmai_rflags & DDI_DMA_CONSISTENT) {
606 		/* Turn on NOSYNC if we need consistent mem */
607 		mp->dmai_rflags |= DMP_NOSYNC;
608 		tmp_iotte_flag ^= IOTTE_STREAM;
609 	} else if (softsp->stream_buf_off) {
610 		/* Set streaming mode if not consistent mem */
611 		tmp_iotte_flag ^= IOTTE_STREAM;
612 	}
613 
614 #if defined(DEBUG) && defined(IO_MEMUSAGE)
615 	iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP);
616 	iomemp->rdip = mp->dmai_rdip;
617 	iomemp->ioaddr = ioaddr;
618 	iomemp->npages = npages;
619 	pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1),
620 	    KM_SLEEP);
621 #endif /* DEBUG && IO_MEMUSAGE */
622 	/*
623 	 * Grab the mappings from the dmmu and stick 'em into the
624 	 * iommu.
625 	 */
626 	ASSERT(npages != 0);
627 
628 	/* If we're going to flush the TLB using diag mode, do it now. */
629 	if (diag_tlb_flush)
630 		iommu_tlb_flush(softsp, ioaddr, npages);
631 
632 	do {
633 		uint64_t iotte_flag;
634 
635 		iotte_flag = tmp_iotte_flag;
636 
637 		if (pp != NULL) {
638 			pfn = pp->p_pagenum;
639 			pp = pp->p_next;
640 		} else {
641 			pfn = (*pplist)->p_pagenum;
642 			pplist++;
643 		}
644 
645 		DPRINTF(IOMMU_TTE, ("pp mapping TTE index %p pfn %lx "
646 		    "tte flag %lx ioaddr %x\n", iotte_ptr,
647 		    pfn, iotte_flag, ioaddr));
648 
649 		/* Flush the IOMMU TLB before loading a new mapping */
650 		if (!diag_tlb_flush)
651 			iommu_tlb_flush(softsp, ioaddr, 1);
652 
653 		/* Set the hardware IO TTE */
654 		*iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
655 
656 		ioaddr += IOMMU_PAGESIZE;
657 		npages--;
658 		iotte_ptr++;
659 
660 #if defined(DEBUG) && defined(IO_MEMUSAGE)
661 		*pfnp = pfn;
662 		pfnp++;
663 #endif /* DEBUG && IO_MEMUSAGE */
664 
665 	} while (npages != 0);
666 
667 #if defined(DEBUG) && defined(IO_MEMUSAGE)
668 	mutex_enter(&softsp->iomemlock);
669 	iomemp->next = softsp->iomem;
670 	softsp->iomem = iomemp;
671 	mutex_exit(&softsp->iomemlock);
672 #endif /* DEBUG && IO_MEMUSAGE */
673 
674 	return (rval);
675 }
676 
677 
678 int
679 iommu_dma_lim_setup(dev_info_t *dip, dev_info_t *rdip,
680     struct sbus_soft_state *softsp, uint_t *burstsizep, uint_t burstsize64,
681     uint_t *minxferp, uint_t dma_flags)
682 {
683 	struct regspec *rp;
684 
685 	/* Take care of 64 bit limits. */
686 	if (!(dma_flags & DDI_DMA_SBUS_64BIT)) {
687 		/*
688 		 * return burst size for 32-bit mode
689 		 */
690 		*burstsizep &= softsp->sbus_burst_sizes;
691 		return (DDI_FAILURE);
692 	}
693 
694 	/*
695 	 * check if SBus supports 64 bit and if caller
696 	 * is child of SBus. No support through bridges
697 	 */
698 	if (!softsp->sbus64_burst_sizes || (ddi_get_parent(rdip) != dip)) {
699 		/*
700 		 * SBus doesn't support it or bridge. Do 32-bit
701 		 * xfers
702 		 */
703 		*burstsizep &= softsp->sbus_burst_sizes;
704 		return (DDI_FAILURE);
705 	}
706 
707 	rp = ddi_rnumber_to_regspec(rdip, 0);
708 	if (rp == NULL) {
709 		*burstsizep &= softsp->sbus_burst_sizes;
710 		return (DDI_FAILURE);
711 	}
712 
713 	/* Check for old-style 64 bit burstsizes */
714 	if (burstsize64 & SYSIO64_BURST_MASK) {
715 		/* Scale back burstsizes if Necessary */
716 		*burstsizep &= (softsp->sbus64_burst_sizes |
717 		    softsp->sbus_burst_sizes);
718 	} else {
719 		/* Get the 64 bit burstsizes. */
720 		*burstsizep = burstsize64;
721 
722 		/* Scale back burstsizes if Necessary */
723 		*burstsizep &= (softsp->sbus64_burst_sizes >>
724 		    SYSIO64_BURST_SHIFT);
725 	}
726 
727 	/*
728 	 * Set the largest value of the smallest burstsize that the
729 	 * device or the bus can manage.
730 	 */
731 	*minxferp = MAX(*minxferp,
732 	    (1 << (ddi_ffs(softsp->sbus64_burst_sizes) - 1)));
733 
734 	return (DDI_SUCCESS);
735 }
736 
737 
738 int
739 iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
740     ddi_dma_attr_t *dma_attr, int (*waitfp)(caddr_t), caddr_t arg,
741     ddi_dma_handle_t *handlep)
742 {
743 	ioaddr_t addrlow, addrhigh, segalign;
744 	ddi_dma_impl_t *mp;
745 	struct dma_impl_priv *mppriv;
746 	struct sbus_soft_state *softsp = (struct sbus_soft_state *)
747 	    ddi_get_soft_state(sbusp, ddi_get_instance(dip));
748 
749 	/*
750 	 * Setup dma burstsizes and min-xfer counts.
751 	 */
752 	(void) iommu_dma_lim_setup(dip, rdip, softsp,
753 	    &dma_attr->dma_attr_burstsizes,
754 	    dma_attr->dma_attr_burstsizes, &dma_attr->dma_attr_minxfer,
755 	    dma_attr->dma_attr_flags);
756 
757 	if (dma_attr->dma_attr_burstsizes == 0)
758 		return (DDI_DMA_BADATTR);
759 
760 	addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo;
761 	addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi;
762 	segalign = (ioaddr_t)dma_attr->dma_attr_seg;
763 
764 	/*
765 	 * Check sanity for hi and lo address limits
766 	 */
767 	if ((addrhigh <= addrlow) ||
768 	    (addrhigh < (ioaddr_t)softsp->iommu_dvma_base)) {
769 		return (DDI_DMA_BADATTR);
770 	}
771 	if (dma_attr->dma_attr_flags & DDI_DMA_FORCE_PHYSICAL)
772 		return (DDI_DMA_BADATTR);
773 
774 	mppriv = kmem_zalloc(sizeof (*mppriv),
775 	    (waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
776 
777 	if (mppriv == NULL) {
778 		if (waitfp != DDI_DMA_DONTWAIT) {
779 		    ddi_set_callback(waitfp, arg, &softsp->dvma_call_list_id);
780 		}
781 		return (DDI_DMA_NORESOURCES);
782 	}
783 	mp = (ddi_dma_impl_t *)mppriv;
784 
785 	DPRINTF(IOMMU_DMA_ALLOCHDL_DEBUG, ("dma_allochdl: (%s) handle %p "
786 	    "hi %x lo %x min %x burst %x\n",
787 	    ddi_get_name(dip), mp, addrhigh, addrlow,
788 	    dma_attr->dma_attr_minxfer, dma_attr->dma_attr_burstsizes));
789 
790 	mp->dmai_rdip = rdip;
791 	mp->dmai_minxfer = (uint_t)dma_attr->dma_attr_minxfer;
792 	mp->dmai_burstsizes = (uint_t)dma_attr->dma_attr_burstsizes;
793 	mp->dmai_attr = *dma_attr;
794 	/* See if the DMA engine has any limit restrictions. */
795 	if (segalign == (ioaddr_t)UINT32_MAX &&
796 	    addrhigh == (ioaddr_t)UINT32_MAX &&
797 	    (dma_attr->dma_attr_align <= IOMMU_PAGESIZE) && addrlow == 0) {
798 		mp->dmai_rflags |= DMP_NOLIMIT;
799 	}
800 	mppriv->softsp = softsp;
801 	mppriv->phys_sync_flag = va_to_pa((caddr_t)&mppriv->sync_flag);
802 
803 	*handlep = (ddi_dma_handle_t)mp;
804 	return (DDI_SUCCESS);
805 }
806 
807 /*ARGSUSED*/
808 int
809 iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
810 {
811 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)handle;
812 	struct sbus_soft_state *softsp = mppriv->softsp;
813 	ASSERT(softsp != NULL);
814 
815 	kmem_free(mppriv, sizeof (*mppriv));
816 
817 	if (softsp->dvma_call_list_id != 0) {
818 		ddi_run_callback(&softsp->dvma_call_list_id);
819 	}
820 	return (DDI_SUCCESS);
821 }
822 
823 static int
824 check_dma_attr(struct ddi_dma_req *dmareq, ddi_dma_attr_t *dma_attr,
825     uint32_t *size)
826 {
827 	ioaddr_t addrlow;
828 	ioaddr_t addrhigh;
829 	uint32_t segalign;
830 	uint32_t smask;
831 
832 	smask = *size - 1;
833 	segalign = dma_attr->dma_attr_seg;
834 	if (smask > segalign) {
835 		if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0)
836 			return (DDI_DMA_TOOBIG);
837 		*size = segalign + 1;
838 	}
839 	addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo;
840 	addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi;
841 	if (addrlow + smask > addrhigh || addrlow + smask < addrlow) {
842 		if (!((addrlow + dmareq->dmar_object.dmao_size == 0) &&
843 		    (addrhigh == (ioaddr_t)-1))) {
844 			if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0)
845 				return (DDI_DMA_TOOBIG);
846 			*size = MIN(addrhigh - addrlow + 1, *size);
847 		}
848 	}
849 	return (DDI_DMA_MAPOK);
850 }
851 
852 int
853 iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
854     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
855     ddi_dma_cookie_t *cp, uint_t *ccountp)
856 {
857 	page_t *pp;
858 	uint32_t size;
859 	ioaddr_t ioaddr;
860 	uint_t offset;
861 	uintptr_t addr = 0;
862 	pgcnt_t npages;
863 	int rval;
864 	ddi_dma_attr_t *dma_attr;
865 	struct sbus_soft_state *softsp;
866 	struct page **pplist = NULL;
867 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
868 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
869 
870 #ifdef lint
871 	dip = dip;
872 	rdip = rdip;
873 #endif
874 
875 	if (mp->dmai_inuse)
876 		return (DDI_DMA_INUSE);
877 
878 	dma_attr = &mp->dmai_attr;
879 	size = (uint32_t)dmareq->dmar_object.dmao_size;
880 	if (!(mp->dmai_rflags & DMP_NOLIMIT)) {
881 		rval = check_dma_attr(dmareq, dma_attr, &size);
882 		if (rval != DDI_DMA_MAPOK)
883 			return (rval);
884 	}
885 	mp->dmai_inuse = 1;
886 	mp->dmai_offset = 0;
887 	mp->dmai_rflags = (dmareq->dmar_flags & DMP_DDIFLAGS) |
888 	    (mp->dmai_rflags & DMP_NOLIMIT);
889 
890 	switch (dmareq->dmar_object.dmao_type) {
891 	case DMA_OTYP_VADDR:
892 	case DMA_OTYP_BUFVADDR:
893 		addr = (uintptr_t)dmareq->dmar_object.dmao_obj.virt_obj.v_addr;
894 		offset = addr & IOMMU_PAGEOFFSET;
895 		pplist = dmareq->dmar_object.dmao_obj.virt_obj.v_priv;
896 		npages = iommu_btopr(OBJSIZE + offset);
897 
898 		DPRINTF(IOMMU_DMAMAP_DEBUG, ("dma_map vaddr: %lx pages "
899 		    "req addr %lx off %x OBJSIZE %x\n",
900 		    npages, addr, offset, OBJSIZE));
901 
902 		/* We don't need the addr anymore if we have a shadow list */
903 		if (pplist != NULL)
904 			addr = NULL;
905 		pp = NULL;
906 		break;
907 
908 	case DMA_OTYP_PAGES:
909 		pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
910 		offset = dmareq->dmar_object.dmao_obj.pp_obj.pp_offset;
911 		npages = iommu_btopr(OBJSIZE + offset);
912 		break;
913 
914 	case DMA_OTYP_PADDR:
915 	default:
916 		/*
917 		 * Not a supported type for this implementation
918 		 */
919 		rval = DDI_DMA_NOMAPPING;
920 		goto bad;
921 	}
922 
923 	/* Get our soft state once we know we're mapping an object. */
924 	softsp = mppriv->softsp;
925 	ASSERT(softsp != NULL);
926 
927 	if (mp->dmai_rflags & DDI_DMA_PARTIAL) {
928 		if (size != OBJSIZE) {
929 			/*
930 			 * If the request is for partial mapping arrangement,
931 			 * the device has to be able to address at least the
932 			 * size of the window we are establishing.
933 			 */
934 			if (size < iommu_ptob(MIN_DVMA_WIN_SIZE)) {
935 				rval = DDI_DMA_NOMAPPING;
936 				goto bad;
937 			}
938 			npages = iommu_btopr(size + offset);
939 		}
940 		/*
941 		 * If the size requested is less than a moderate amt,
942 		 * skip the partial mapping stuff- it's not worth the
943 		 * effort.
944 		 */
945 		if (npages > MIN_DVMA_WIN_SIZE) {
946 			npages = MIN_DVMA_WIN_SIZE + iommu_btopr(offset);
947 			size = iommu_ptob(MIN_DVMA_WIN_SIZE);
948 			DPRINTF(IOMMU_DMA_SETUP_DEBUG, ("dma_setup: SZ %x pg "
949 			    "%lx sz %x\n", OBJSIZE, npages, size));
950 			if (pplist != NULL) {
951 				mp->dmai_minfo = (void *)pplist;
952 				mp->dmai_rflags |= DMP_SHADOW;
953 			}
954 		} else {
955 			mp->dmai_rflags ^= DDI_DMA_PARTIAL;
956 		}
957 	} else {
958 		if (npages >= iommu_btop(softsp->iommu_dvma_size) -
959 		    MIN_DVMA_WIN_SIZE) {
960 			rval = DDI_DMA_TOOBIG;
961 			goto bad;
962 		}
963 	}
964 
965 	/*
966 	 * save dmareq-object, size and npages into mp
967 	 */
968 	mp->dmai_object = dmareq->dmar_object;
969 	mp->dmai_size = size;
970 	mp->dmai_ndvmapages = npages;
971 
972 	if (mp->dmai_rflags & DMP_NOLIMIT) {
973 		ioaddr = (ioaddr_t)(uintptr_t)vmem_alloc(softsp->dvma_arena,
974 		    iommu_ptob(npages),
975 		    dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
976 		if (ioaddr == 0) {
977 			rval = DDI_DMA_NORESOURCES;
978 			goto bad;
979 		}
980 
981 		/*
982 		 * If we have a 1 page request and we're working with a page
983 		 * list, we're going to speed load an IOMMU entry.
984 		 */
985 		if (npages == 1 && !addr) {
986 			uint64_t iotte_flag = IOTTE_VALID | IOTTE_CACHE |
987 			    IOTTE_WRITE | IOTTE_STREAM;
988 			volatile uint64_t *iotte_ptr;
989 			pfn_t pfn;
990 #if defined(DEBUG) && defined(IO_MEMUSAGE)
991 			struct io_mem_list *iomemp;
992 			pfn_t *pfnp;
993 #endif /* DEBUG && IO_MEMUSAGE */
994 
995 			iotte_ptr = IOTTE_NDX(ioaddr,
996 			    softsp->soft_tsb_base_addr);
997 
998 			if (mp->dmai_rflags & DDI_DMA_CONSISTENT) {
999 				mp->dmai_rflags |= DMP_NOSYNC;
1000 				iotte_flag ^= IOTTE_STREAM;
1001 			} else if (softsp->stream_buf_off)
1002 				iotte_flag ^= IOTTE_STREAM;
1003 
1004 			mp->dmai_rflags ^= DDI_DMA_PARTIAL;
1005 
1006 			if (pp != NULL)
1007 				pfn = pp->p_pagenum;
1008 			else
1009 				pfn = (*pplist)->p_pagenum;
1010 
1011 			iommu_tlb_flush(softsp, ioaddr, 1);
1012 
1013 			*iotte_ptr =
1014 			    ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
1015 
1016 			mp->dmai_mapping = (ioaddr_t)(ioaddr + offset);
1017 			mp->dmai_nwin = 0;
1018 			if (cp != NULL) {
1019 				cp->dmac_notused = 0;
1020 				cp->dmac_address = (ioaddr_t)mp->dmai_mapping;
1021 				cp->dmac_size = mp->dmai_size;
1022 				cp->dmac_type = 0;
1023 				*ccountp = 1;
1024 			}
1025 
1026 			DPRINTF(IOMMU_TTE, ("speed loading: TTE index %p "
1027 			    "pfn %lx tte flag %lx addr %lx ioaddr %x\n",
1028 			    iotte_ptr, pfn, iotte_flag, addr, ioaddr));
1029 
1030 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1031 			iomemp = kmem_alloc(sizeof (struct io_mem_list),
1032 			    KM_SLEEP);
1033 			iomemp->rdip = mp->dmai_rdip;
1034 			iomemp->ioaddr = ioaddr;
1035 			iomemp->addr = addr;
1036 			iomemp->npages = npages;
1037 			pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) *
1038 			    (npages + 1), KM_SLEEP);
1039 			*pfnp = pfn;
1040 			mutex_enter(&softsp->iomemlock);
1041 			iomemp->next = softsp->iomem;
1042 			softsp->iomem = iomemp;
1043 			mutex_exit(&softsp->iomemlock);
1044 #endif /* DEBUG && IO_MEMUSAGE */
1045 
1046 			return (DDI_DMA_MAPPED);
1047 		}
1048 	} else {
1049 		ioaddr = (ioaddr_t)(uintptr_t)vmem_xalloc(softsp->dvma_arena,
1050 		    iommu_ptob(npages),
1051 		    MAX((uint_t)dma_attr->dma_attr_align, IOMMU_PAGESIZE), 0,
1052 		    (uint_t)dma_attr->dma_attr_seg + 1,
1053 		    (void *)(uintptr_t)(ioaddr_t)dma_attr->dma_attr_addr_lo,
1054 		    (void *)(uintptr_t)
1055 			((ioaddr_t)dma_attr->dma_attr_addr_hi + 1),
1056 		    dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
1057 	}
1058 
1059 	if (ioaddr == 0) {
1060 		if (dmareq->dmar_fp == DDI_DMA_SLEEP)
1061 			rval = DDI_DMA_NOMAPPING;
1062 		else
1063 			rval = DDI_DMA_NORESOURCES;
1064 		goto bad;
1065 	}
1066 
1067 	mp->dmai_mapping = ioaddr + offset;
1068 	ASSERT(mp->dmai_mapping >= softsp->iommu_dvma_base);
1069 
1070 	/*
1071 	 * At this point we have a range of virtual address allocated
1072 	 * with which we now have to map to the requested object.
1073 	 */
1074 	if (addr) {
1075 		rval = iommu_create_vaddr_mappings(mp,
1076 		    addr & ~IOMMU_PAGEOFFSET);
1077 		if (rval == DDI_DMA_NOMAPPING)
1078 			goto bad_nomap;
1079 	} else {
1080 		rval = iommu_create_pp_mappings(mp, pp, pplist);
1081 		if (rval == DDI_DMA_NOMAPPING)
1082 			goto bad_nomap;
1083 	}
1084 
1085 	if (cp) {
1086 		cp->dmac_notused = 0;
1087 		cp->dmac_address = (ioaddr_t)mp->dmai_mapping;
1088 		cp->dmac_size = mp->dmai_size;
1089 		cp->dmac_type = 0;
1090 		*ccountp = 1;
1091 	}
1092 	if (mp->dmai_rflags & DDI_DMA_PARTIAL) {
1093 		size = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset));
1094 		mp->dmai_nwin =
1095 		    (dmareq->dmar_object.dmao_size + (size - 1)) / size;
1096 		return (DDI_DMA_PARTIAL_MAP);
1097 	} else {
1098 		mp->dmai_nwin = 0;
1099 		return (DDI_DMA_MAPPED);
1100 	}
1101 
1102 bad_nomap:
1103 	/*
1104 	 * Could not create mmu mappings.
1105 	 */
1106 	if (mp->dmai_rflags & DMP_NOLIMIT) {
1107 		vmem_free(softsp->dvma_arena, (void *)(uintptr_t)ioaddr,
1108 		    iommu_ptob(npages));
1109 	} else {
1110 		vmem_xfree(softsp->dvma_arena, (void *)(uintptr_t)ioaddr,
1111 		    iommu_ptob(npages));
1112 	}
1113 
1114 bad:
1115 	if (rval == DDI_DMA_NORESOURCES &&
1116 	    dmareq->dmar_fp != DDI_DMA_DONTWAIT) {
1117 		ddi_set_callback(dmareq->dmar_fp,
1118 		    dmareq->dmar_arg, &softsp->dvma_call_list_id);
1119 	}
1120 	mp->dmai_inuse = 0;
1121 	return (rval);
1122 }
1123 
1124 /* ARGSUSED */
1125 int
1126 iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
1127     ddi_dma_handle_t handle)
1128 {
1129 	ioaddr_t addr;
1130 	uint_t npages;
1131 	size_t size;
1132 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1133 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1134 	struct sbus_soft_state *softsp = mppriv->softsp;
1135 	ASSERT(softsp != NULL);
1136 
1137 	addr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
1138 	npages = mp->dmai_ndvmapages;
1139 	size = iommu_ptob(npages);
1140 
1141 	DPRINTF(IOMMU_DMA_UNBINDHDL_DEBUG, ("iommu_dma_unbindhdl: "
1142 	    "unbinding addr %x for %x pages\n", addr, mp->dmai_ndvmapages));
1143 
1144 	/* sync the entire object */
1145 	if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
1146 		/* flush stream write buffers */
1147 		sync_stream_buf(softsp, addr, npages, (int *)&mppriv->sync_flag,
1148 		    mppriv->phys_sync_flag);
1149 	}
1150 
1151 #if defined(DEBUG) && defined(IO_MEMDEBUG)
1152 	/*
1153 	 * 'Free' the dma mappings.
1154 	 */
1155 	iommu_remove_mappings(mp);
1156 #endif /* DEBUG && IO_MEMDEBUG */
1157 
1158 	ASSERT(npages > (uint_t)0);
1159 	if (mp->dmai_rflags & DMP_NOLIMIT)
1160 		vmem_free(softsp->dvma_arena, (void *)(uintptr_t)addr, size);
1161 	else
1162 		vmem_xfree(softsp->dvma_arena, (void *)(uintptr_t)addr, size);
1163 
1164 	mp->dmai_ndvmapages = 0;
1165 	mp->dmai_inuse = 0;
1166 	mp->dmai_minfo = NULL;
1167 
1168 	if (softsp->dvma_call_list_id != 0)
1169 		ddi_run_callback(&softsp->dvma_call_list_id);
1170 
1171 	return (DDI_SUCCESS);
1172 }
1173 
1174 /*ARGSUSED*/
1175 int
1176 iommu_dma_flush(dev_info_t *dip, dev_info_t *rdip,
1177     ddi_dma_handle_t handle, off_t off, size_t len,
1178     uint_t cache_flags)
1179 {
1180 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1181 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1182 
1183 	if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
1184 		sync_stream_buf(mppriv->softsp, mp->dmai_mapping,
1185 		    mp->dmai_ndvmapages, (int *)&mppriv->sync_flag,
1186 		    mppriv->phys_sync_flag);
1187 	}
1188 	return (DDI_SUCCESS);
1189 }
1190 
1191 /*ARGSUSED*/
1192 int
1193 iommu_dma_win(dev_info_t *dip, dev_info_t *rdip,
1194     ddi_dma_handle_t handle, uint_t win, off_t *offp,
1195     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
1196 {
1197 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1198 	off_t offset;
1199 	uint_t winsize;
1200 	uint_t newoff;
1201 	int rval;
1202 
1203 	offset = mp->dmai_mapping & IOMMU_PAGEOFFSET;
1204 	winsize = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset));
1205 
1206 	DPRINTF(IOMMU_DMA_WIN_DEBUG, ("getwin win %d winsize %x\n", win,
1207 	    winsize));
1208 
1209 	/*
1210 	 * win is in the range [0 .. dmai_nwin-1]
1211 	 */
1212 	if (win >= mp->dmai_nwin)
1213 		return (DDI_FAILURE);
1214 
1215 	newoff = win * winsize;
1216 	if (newoff > mp->dmai_object.dmao_size - mp->dmai_minxfer)
1217 		return (DDI_FAILURE);
1218 
1219 	ASSERT(cookiep);
1220 	cookiep->dmac_notused = 0;
1221 	cookiep->dmac_type = 0;
1222 	cookiep->dmac_address = (ioaddr_t)mp->dmai_mapping;
1223 	cookiep->dmac_size = mp->dmai_size;
1224 	*ccountp = 1;
1225 	*offp = (off_t)newoff;
1226 	*lenp = (uint_t)winsize;
1227 
1228 	if (newoff == mp->dmai_offset) {
1229 		/*
1230 		 * Nothing to do...
1231 		 */
1232 		return (DDI_SUCCESS);
1233 	}
1234 
1235 	if ((rval = iommu_map_window(mp, newoff, winsize)) != DDI_SUCCESS)
1236 		return (rval);
1237 
1238 	/*
1239 	 * Set this again in case iommu_map_window() has changed it
1240 	 */
1241 	cookiep->dmac_size = mp->dmai_size;
1242 
1243 	return (DDI_SUCCESS);
1244 }
1245 
1246 static int
1247 iommu_map_window(ddi_dma_impl_t *mp, off_t newoff, size_t winsize)
1248 {
1249 	uintptr_t addr = 0;
1250 	page_t *pp;
1251 	uint_t flags;
1252 	struct page **pplist = NULL;
1253 
1254 #if defined(DEBUG) && defined(IO_MEMDEBUG)
1255 	/* Free mappings for current window */
1256 	iommu_remove_mappings(mp);
1257 #endif /* DEBUG && IO_MEMDEBUG */
1258 
1259 	mp->dmai_offset = newoff;
1260 	mp->dmai_size = mp->dmai_object.dmao_size - newoff;
1261 	mp->dmai_size = MIN(mp->dmai_size, winsize);
1262 
1263 	if (mp->dmai_object.dmao_type == DMA_OTYP_VADDR ||
1264 	    mp->dmai_object.dmao_type == DMA_OTYP_BUFVADDR) {
1265 		if (mp->dmai_rflags & DMP_SHADOW) {
1266 			pplist = (struct page **)mp->dmai_minfo;
1267 			ASSERT(pplist != NULL);
1268 			pplist = pplist + (newoff >> MMU_PAGESHIFT);
1269 		} else {
1270 			addr = (uintptr_t)
1271 			    mp->dmai_object.dmao_obj.virt_obj.v_addr;
1272 			addr = (addr + newoff) & ~IOMMU_PAGEOFFSET;
1273 		}
1274 		pp = NULL;
1275 	} else {
1276 		pp = mp->dmai_object.dmao_obj.pp_obj.pp_pp;
1277 		flags = 0;
1278 		while (flags < newoff) {
1279 			pp = pp->p_next;
1280 			flags += MMU_PAGESIZE;
1281 		}
1282 	}
1283 
1284 	/* Set up mappings for next window */
1285 	if (addr) {
1286 		if (iommu_create_vaddr_mappings(mp, addr) < 0)
1287 			return (DDI_FAILURE);
1288 	} else {
1289 		if (iommu_create_pp_mappings(mp, pp, pplist) < 0)
1290 			return (DDI_FAILURE);
1291 	}
1292 
1293 	/*
1294 	 * also invalidate read stream buffer
1295 	 */
1296 	if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
1297 		struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1298 
1299 		sync_stream_buf(mppriv->softsp, mp->dmai_mapping,
1300 		    mp->dmai_ndvmapages, (int *)&mppriv->sync_flag,
1301 		    mppriv->phys_sync_flag);
1302 	}
1303 
1304 	return (DDI_SUCCESS);
1305 
1306 }
1307 
1308 int
1309 iommu_dma_map(dev_info_t *dip, dev_info_t *rdip,
1310     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep)
1311 {
1312 	ddi_dma_lim_t *dma_lim = dmareq->dmar_limits;
1313 	ddi_dma_impl_t *mp;
1314 	ddi_dma_attr_t *dma_attr;
1315 	struct dma_impl_priv *mppriv;
1316 	ioaddr_t addrlow, addrhigh;
1317 	ioaddr_t segalign;
1318 	int rval;
1319 	struct sbus_soft_state *softsp =
1320 		(struct sbus_soft_state *)ddi_get_soft_state(sbusp,
1321 		ddi_get_instance(dip));
1322 
1323 	addrlow = dma_lim->dlim_addr_lo;
1324 	addrhigh = dma_lim->dlim_addr_hi;
1325 	if ((addrhigh <= addrlow) ||
1326 	    (addrhigh < (ioaddr_t)softsp->iommu_dvma_base)) {
1327 		return (DDI_DMA_NOMAPPING);
1328 	}
1329 
1330 	/*
1331 	 * Setup DMA burstsizes and min-xfer counts.
1332 	 */
1333 	(void) iommu_dma_lim_setup(dip, rdip, softsp, &dma_lim->dlim_burstsizes,
1334 		(uint_t)dma_lim->dlim_burstsizes, &dma_lim->dlim_minxfer,
1335 		dmareq->dmar_flags);
1336 
1337 	if (dma_lim->dlim_burstsizes == 0)
1338 		return (DDI_DMA_NOMAPPING);
1339 	/*
1340 	 * If not an advisory call, get a DMA handle
1341 	 */
1342 	if (!handlep) {
1343 		return (DDI_DMA_MAPOK);
1344 	}
1345 
1346 	mppriv = kmem_zalloc(sizeof (*mppriv),
1347 	    (dmareq->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
1348 	if (mppriv == NULL) {
1349 		if (dmareq->dmar_fp != DDI_DMA_DONTWAIT) {
1350 			ddi_set_callback(dmareq->dmar_fp,
1351 			    dmareq->dmar_arg, &softsp->dvma_call_list_id);
1352 		}
1353 		return (DDI_DMA_NORESOURCES);
1354 	}
1355 	mp = (ddi_dma_impl_t *)mppriv;
1356 	mp->dmai_rdip = rdip;
1357 	mp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
1358 	mp->dmai_minxfer = dma_lim->dlim_minxfer;
1359 	mp->dmai_burstsizes = dma_lim->dlim_burstsizes;
1360 	mp->dmai_offset = 0;
1361 	mp->dmai_ndvmapages = 0;
1362 	mp->dmai_minfo = 0;
1363 	mp->dmai_inuse = 0;
1364 	segalign = dma_lim->dlim_cntr_max;
1365 	/* See if the DMA engine has any limit restrictions. */
1366 	if (segalign == UINT32_MAX && addrhigh == UINT32_MAX &&
1367 	    addrlow == 0) {
1368 		mp->dmai_rflags |= DMP_NOLIMIT;
1369 	}
1370 	mppriv->softsp = softsp;
1371 	mppriv->phys_sync_flag = va_to_pa((caddr_t)&mppriv->sync_flag);
1372 	dma_attr = &mp->dmai_attr;
1373 	dma_attr->dma_attr_align = 1;
1374 	dma_attr->dma_attr_addr_lo = addrlow;
1375 	dma_attr->dma_attr_addr_hi = addrhigh;
1376 	dma_attr->dma_attr_seg = segalign;
1377 	dma_attr->dma_attr_burstsizes = dma_lim->dlim_burstsizes;
1378 	rval = iommu_dma_bindhdl(dip, rdip, (ddi_dma_handle_t)mp,
1379 		dmareq, NULL, NULL);
1380 	if (rval && (rval != DDI_DMA_PARTIAL_MAP)) {
1381 		kmem_free(mppriv, sizeof (*mppriv));
1382 	} else {
1383 		*handlep = (ddi_dma_handle_t)mp;
1384 	}
1385 	return (rval);
1386 }
1387 
1388 /*ARGSUSED*/
1389 int
1390 iommu_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
1391     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
1392     off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags)
1393 {
1394 	ioaddr_t addr;
1395 	uint_t offset;
1396 	pgcnt_t npages;
1397 	size_t size;
1398 	ddi_dma_cookie_t *cp;
1399 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1400 
1401 	DPRINTF(IOMMU_DMAMCTL_DEBUG, ("dma_mctl: handle %p ", mp));
1402 	switch (request) {
1403 	case DDI_DMA_FREE:
1404 	{
1405 		struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1406 		struct sbus_soft_state *softsp = mppriv->softsp;
1407 		ASSERT(softsp != NULL);
1408 
1409 		/*
1410 		 * 'Free' the dma mappings.
1411 		 */
1412 		addr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
1413 		npages = mp->dmai_ndvmapages;
1414 		size = iommu_ptob(npages);
1415 
1416 		DPRINTF(IOMMU_DMAMCTL_DMA_FREE_DEBUG, ("iommu_dma_mctl dmafree:"
1417 		    "freeing vaddr %x for %x pages.\n", addr,
1418 		    mp->dmai_ndvmapages));
1419 		/* sync the entire object */
1420 		if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
1421 			/* flush stream write buffers */
1422 			sync_stream_buf(softsp, addr, npages,
1423 			    (int *)&mppriv->sync_flag, mppriv->phys_sync_flag);
1424 		}
1425 
1426 #if defined(DEBUG) && defined(IO_MEMDEBUG)
1427 		iommu_remove_mappings(mp);
1428 #endif /* DEBUG && IO_MEMDEBUG */
1429 
1430 		ASSERT(npages > (uint_t)0);
1431 		if (mp->dmai_rflags & DMP_NOLIMIT)
1432 			vmem_free(softsp->dvma_arena,
1433 			    (void *)(uintptr_t)addr, size);
1434 		else
1435 			vmem_xfree(softsp->dvma_arena,
1436 			    (void *)(uintptr_t)addr, size);
1437 
1438 		kmem_free(mppriv, sizeof (*mppriv));
1439 
1440 		if (softsp->dvma_call_list_id != 0)
1441 			ddi_run_callback(&softsp->dvma_call_list_id);
1442 
1443 		break;
1444 	}
1445 
1446 	case DDI_DMA_SET_SBUS64:
1447 	{
1448 		struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
1449 
1450 		return (iommu_dma_lim_setup(dip, rdip, mppriv->softsp,
1451 		    &mp->dmai_burstsizes, (uint_t)*lenp, &mp->dmai_minxfer,
1452 		    DDI_DMA_SBUS_64BIT));
1453 	}
1454 
1455 	case DDI_DMA_HTOC:
1456 		DPRINTF(IOMMU_DMAMCTL_HTOC_DEBUG, ("htoc off %lx mapping %lx "
1457 		    "size %x\n", *offp, mp->dmai_mapping,
1458 		    mp->dmai_size));
1459 
1460 		if ((uint_t)(*offp) >= mp->dmai_size)
1461 			return (DDI_FAILURE);
1462 
1463 		cp = (ddi_dma_cookie_t *)objp;
1464 		cp->dmac_notused = 0;
1465 		cp->dmac_address = (mp->dmai_mapping + (uint_t)(*offp));
1466 		cp->dmac_size =
1467 		    mp->dmai_mapping + mp->dmai_size - cp->dmac_address;
1468 		cp->dmac_type = 0;
1469 
1470 		break;
1471 
1472 	case DDI_DMA_KVADDR:
1473 		/*
1474 		 * If a physical address mapping has percolated this high,
1475 		 * that is an error (maybe?).
1476 		 */
1477 		if (mp->dmai_rflags & DMP_PHYSADDR) {
1478 			DPRINTF(IOMMU_DMAMCTL_KVADDR_DEBUG, ("kvaddr of phys "
1479 			    "mapping\n"));
1480 			return (DDI_FAILURE);
1481 		}
1482 
1483 		return (DDI_FAILURE);
1484 
1485 	case DDI_DMA_NEXTWIN:
1486 	{
1487 		ddi_dma_win_t *owin, *nwin;
1488 		uint_t winsize, newoff;
1489 		int rval;
1490 
1491 		DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, ("nextwin\n"));
1492 
1493 		mp = (ddi_dma_impl_t *)handle;
1494 		owin = (ddi_dma_win_t *)offp;
1495 		nwin = (ddi_dma_win_t *)objp;
1496 		if (mp->dmai_rflags & DDI_DMA_PARTIAL) {
1497 			if (*owin == NULL) {
1498 				DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG,
1499 				    ("nextwin: win == NULL\n"));
1500 				mp->dmai_offset = 0;
1501 				*nwin = (ddi_dma_win_t)mp;
1502 				return (DDI_SUCCESS);
1503 			}
1504 
1505 			offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
1506 			winsize = iommu_ptob(mp->dmai_ndvmapages -
1507 			    iommu_btopr(offset));
1508 
1509 			newoff = (uint_t)(mp->dmai_offset + winsize);
1510 			if (newoff > mp->dmai_object.dmao_size -
1511 			    mp->dmai_minxfer)
1512 				return (DDI_DMA_DONE);
1513 
1514 			if ((rval = iommu_map_window(mp, newoff, winsize))
1515 			    != DDI_SUCCESS)
1516 				return (rval);
1517 		} else {
1518 			DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, ("nextwin: no "
1519 			    "partial mapping\n"));
1520 			if (*owin != NULL)
1521 				return (DDI_DMA_DONE);
1522 			mp->dmai_offset = 0;
1523 			*nwin = (ddi_dma_win_t)mp;
1524 		}
1525 		break;
1526 	}
1527 
1528 	case DDI_DMA_NEXTSEG:
1529 	{
1530 		ddi_dma_seg_t *oseg, *nseg;
1531 
1532 		DPRINTF(IOMMU_DMAMCTL_NEXTSEG_DEBUG, ("nextseg:\n"));
1533 
1534 		oseg = (ddi_dma_seg_t *)lenp;
1535 		if (*oseg != NULL)
1536 			return (DDI_DMA_DONE);
1537 		nseg = (ddi_dma_seg_t *)objp;
1538 		*nseg = *((ddi_dma_seg_t *)offp);
1539 		break;
1540 	}
1541 
1542 	case DDI_DMA_SEGTOC:
1543 	{
1544 		ddi_dma_seg_impl_t *seg;
1545 
1546 		seg = (ddi_dma_seg_impl_t *)handle;
1547 		cp = (ddi_dma_cookie_t *)objp;
1548 		cp->dmac_notused = 0;
1549 		cp->dmac_address = (ioaddr_t)seg->dmai_mapping;
1550 		cp->dmac_size = *lenp = seg->dmai_size;
1551 		cp->dmac_type = 0;
1552 		*offp = seg->dmai_offset;
1553 		break;
1554 	}
1555 
1556 	case DDI_DMA_MOVWIN:
1557 	{
1558 		uint_t winsize;
1559 		uint_t newoff;
1560 		int rval;
1561 
1562 		offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
1563 		winsize = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset));
1564 
1565 		DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("movwin off %lx len %lx "
1566 		    "winsize %x\n", *offp, *lenp, winsize));
1567 
1568 		if ((mp->dmai_rflags & DDI_DMA_PARTIAL) == 0)
1569 			return (DDI_FAILURE);
1570 
1571 		if (*lenp != (uint_t)-1 && *lenp != winsize) {
1572 			DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("bad length\n"));
1573 			return (DDI_FAILURE);
1574 		}
1575 		newoff = (uint_t)*offp;
1576 		if (newoff & (winsize - 1)) {
1577 			DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("bad off\n"));
1578 			return (DDI_FAILURE);
1579 		}
1580 
1581 		if (newoff == mp->dmai_offset) {
1582 			/*
1583 			 * Nothing to do...
1584 			 */
1585 			break;
1586 		}
1587 
1588 		/*
1589 		 * Check out new address...
1590 		 */
1591 		if (newoff > mp->dmai_object.dmao_size - mp->dmai_minxfer) {
1592 			DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("newoff out of "
1593 			    "range\n"));
1594 			return (DDI_FAILURE);
1595 		}
1596 
1597 		rval = iommu_map_window(mp, newoff, winsize);
1598 		if (rval != DDI_SUCCESS)
1599 			return (rval);
1600 
1601 		if ((cp = (ddi_dma_cookie_t *)objp) != 0) {
1602 			cp->dmac_notused = 0;
1603 			cp->dmac_address = (ioaddr_t)mp->dmai_mapping;
1604 			cp->dmac_size = mp->dmai_size;
1605 			cp->dmac_type = 0;
1606 		}
1607 		*offp = (off_t)newoff;
1608 		*lenp = (uint_t)winsize;
1609 		break;
1610 	}
1611 
1612 	case DDI_DMA_REPWIN:
1613 		if ((mp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
1614 			DPRINTF(IOMMU_DMAMCTL_REPWIN_DEBUG, ("repwin fail\n"));
1615 			return (DDI_FAILURE);
1616 		}
1617 
1618 		*offp = (off_t)mp->dmai_offset;
1619 
1620 		addr = mp->dmai_ndvmapages -
1621 		    iommu_btopr(mp->dmai_mapping & IOMMU_PAGEOFFSET);
1622 
1623 		*lenp = (uint_t)iommu_ptob(addr);
1624 
1625 		DPRINTF(IOMMU_DMAMCTL_REPWIN_DEBUG, ("repwin off %lx len %x\n",
1626 		    mp->dmai_offset, mp->dmai_size));
1627 
1628 		break;
1629 
1630 	case DDI_DMA_GETERR:
1631 		DPRINTF(IOMMU_DMAMCTL_GETERR_DEBUG,
1632 		    ("iommu_dma_mctl: geterr\n"));
1633 
1634 		break;
1635 
1636 	case DDI_DMA_COFF:
1637 		cp = (ddi_dma_cookie_t *)offp;
1638 		addr = cp->dmac_address;
1639 
1640 		if (addr < mp->dmai_mapping ||
1641 		    addr >= mp->dmai_mapping + mp->dmai_size)
1642 			return (DDI_FAILURE);
1643 
1644 		*objp = (caddr_t)(addr - mp->dmai_mapping);
1645 
1646 		DPRINTF(IOMMU_DMAMCTL_COFF_DEBUG, ("coff off %lx mapping %lx "
1647 		    "size %x\n", (ulong_t)*objp, mp->dmai_mapping,
1648 		    mp->dmai_size));
1649 
1650 		break;
1651 
1652 	case DDI_DMA_RESERVE:
1653 	{
1654 		struct ddi_dma_req *dmareq = (struct ddi_dma_req *)offp;
1655 		ddi_dma_lim_t *dma_lim;
1656 		ddi_dma_handle_t *handlep;
1657 		uint_t np;
1658 		ioaddr_t ioaddr;
1659 		int i;
1660 		struct fast_dvma *iommu_fast_dvma;
1661 		struct sbus_soft_state *softsp =
1662 		    (struct sbus_soft_state *)ddi_get_soft_state(sbusp,
1663 		    ddi_get_instance(dip));
1664 
1665 		/* Some simple sanity checks */
1666 		dma_lim = dmareq->dmar_limits;
1667 		if (dma_lim->dlim_burstsizes == 0) {
1668 			DPRINTF(IOMMU_FASTDMA_RESERVE,
1669 			    ("Reserve: bad burstsizes\n"));
1670 			return (DDI_DMA_BADLIMITS);
1671 		}
1672 		if ((AHI <= ALO) || (AHI < softsp->iommu_dvma_base)) {
1673 			DPRINTF(IOMMU_FASTDMA_RESERVE,
1674 			    ("Reserve: bad limits\n"));
1675 			return (DDI_DMA_BADLIMITS);
1676 		}
1677 
1678 		np = dmareq->dmar_object.dmao_size;
1679 		mutex_enter(&softsp->dma_pool_lock);
1680 		if (np > softsp->dma_reserve) {
1681 			mutex_exit(&softsp->dma_pool_lock);
1682 			DPRINTF(IOMMU_FASTDMA_RESERVE,
1683 			    ("Reserve: dma_reserve is exhausted\n"));
1684 			return (DDI_DMA_NORESOURCES);
1685 		}
1686 
1687 		softsp->dma_reserve -= np;
1688 		mutex_exit(&softsp->dma_pool_lock);
1689 		mp = kmem_zalloc(sizeof (*mp), KM_SLEEP);
1690 		mp->dmai_rflags = DMP_BYPASSNEXUS;
1691 		mp->dmai_rdip = rdip;
1692 		mp->dmai_minxfer = dma_lim->dlim_minxfer;
1693 		mp->dmai_burstsizes = dma_lim->dlim_burstsizes;
1694 
1695 		ioaddr = (ioaddr_t)(uintptr_t)vmem_xalloc(softsp->dvma_arena,
1696 		    iommu_ptob(np), IOMMU_PAGESIZE, 0,
1697 		    dma_lim->dlim_cntr_max + 1,
1698 		    (void *)(uintptr_t)ALO, (void *)(uintptr_t)(AHI + 1),
1699 		    dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
1700 
1701 		if (ioaddr == 0) {
1702 			mutex_enter(&softsp->dma_pool_lock);
1703 			softsp->dma_reserve += np;
1704 			mutex_exit(&softsp->dma_pool_lock);
1705 			kmem_free(mp, sizeof (*mp));
1706 			DPRINTF(IOMMU_FASTDMA_RESERVE,
1707 			    ("Reserve: No dvma resources available\n"));
1708 			return (DDI_DMA_NOMAPPING);
1709 		}
1710 
1711 		/* create a per request structure */
1712 		iommu_fast_dvma = kmem_alloc(sizeof (struct fast_dvma),
1713 		    KM_SLEEP);
1714 
1715 		/*
1716 		 * We need to remember the size of the transfer so that
1717 		 * we can figure the virtual pages to sync when the transfer
1718 		 * is complete.
1719 		 */
1720 		iommu_fast_dvma->pagecnt = kmem_zalloc(np *
1721 		    sizeof (uint_t), KM_SLEEP);
1722 
1723 		/* Allocate a streaming cache sync flag for each index */
1724 		iommu_fast_dvma->sync_flag = kmem_zalloc(np *
1725 		    sizeof (int), KM_SLEEP);
1726 
1727 		/* Allocate a physical sync flag for each index */
1728 		iommu_fast_dvma->phys_sync_flag =
1729 		    kmem_zalloc(np * sizeof (uint64_t), KM_SLEEP);
1730 
1731 		for (i = 0; i < np; i++)
1732 			iommu_fast_dvma->phys_sync_flag[i] = va_to_pa((caddr_t)
1733 			    &iommu_fast_dvma->sync_flag[i]);
1734 
1735 		mp->dmai_mapping = ioaddr;
1736 		mp->dmai_ndvmapages = np;
1737 		iommu_fast_dvma->ops = &iommu_dvma_ops;
1738 		iommu_fast_dvma->softsp = (caddr_t)softsp;
1739 		mp->dmai_nexus_private = (caddr_t)iommu_fast_dvma;
1740 		handlep = (ddi_dma_handle_t *)objp;
1741 		*handlep = (ddi_dma_handle_t)mp;
1742 
1743 		DPRINTF(IOMMU_FASTDMA_RESERVE,
1744 		    ("Reserve: mapping object %p base addr %lx size %x\n",
1745 		    mp, mp->dmai_mapping, mp->dmai_ndvmapages));
1746 
1747 		break;
1748 	}
1749 
1750 	case DDI_DMA_RELEASE:
1751 	{
1752 		ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
1753 		uint_t np = npages = mp->dmai_ndvmapages;
1754 		ioaddr_t ioaddr = mp->dmai_mapping;
1755 		volatile uint64_t *iotte_ptr;
1756 		struct fast_dvma *iommu_fast_dvma = (struct fast_dvma *)
1757 		    mp->dmai_nexus_private;
1758 		struct sbus_soft_state *softsp = (struct sbus_soft_state *)
1759 		    iommu_fast_dvma->softsp;
1760 
1761 		ASSERT(softsp != NULL);
1762 
1763 		/* Unload stale mappings and flush stale tlb's */
1764 		iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
1765 
1766 		while (npages > (uint_t)0) {
1767 			*iotte_ptr = (uint64_t)0;	/* unload tte */
1768 			iommu_tlb_flush(softsp, ioaddr, 1);
1769 
1770 			npages--;
1771 			iotte_ptr++;
1772 			ioaddr += IOMMU_PAGESIZE;
1773 		}
1774 
1775 		ioaddr = (ioaddr_t)mp->dmai_mapping;
1776 		mutex_enter(&softsp->dma_pool_lock);
1777 		softsp->dma_reserve += np;
1778 		mutex_exit(&softsp->dma_pool_lock);
1779 
1780 		if (mp->dmai_rflags & DMP_NOLIMIT)
1781 			vmem_free(softsp->dvma_arena,
1782 			    (void *)(uintptr_t)ioaddr, iommu_ptob(np));
1783 		else
1784 			vmem_xfree(softsp->dvma_arena,
1785 			    (void *)(uintptr_t)ioaddr, iommu_ptob(np));
1786 
1787 		kmem_free(mp, sizeof (*mp));
1788 		kmem_free(iommu_fast_dvma->pagecnt, np * sizeof (uint_t));
1789 		kmem_free(iommu_fast_dvma->sync_flag, np * sizeof (int));
1790 		kmem_free(iommu_fast_dvma->phys_sync_flag, np *
1791 		    sizeof (uint64_t));
1792 		kmem_free(iommu_fast_dvma, sizeof (struct fast_dvma));
1793 
1794 
1795 		DPRINTF(IOMMU_FASTDMA_RESERVE,
1796 		    ("Release: Base addr %x size %x\n", ioaddr, np));
1797 		/*
1798 		 * Now that we've freed some resource,
1799 		 * if there is anybody waiting for it
1800 		 * try and get them going.
1801 		 */
1802 		if (softsp->dvma_call_list_id != 0)
1803 			ddi_run_callback(&softsp->dvma_call_list_id);
1804 
1805 		break;
1806 	}
1807 
1808 	default:
1809 		DPRINTF(IOMMU_DMAMCTL_DEBUG, ("iommu_dma_mctl: unknown option "
1810 		    "0%x\n", request));
1811 
1812 		return (DDI_FAILURE);
1813 	}
1814 	return (DDI_SUCCESS);
1815 }
1816 
1817 /*ARGSUSED*/
1818 void
1819 iommu_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
1820     ddi_dma_cookie_t *cp)
1821 {
1822 	uintptr_t addr;
1823 	ioaddr_t ioaddr;
1824 	uint_t offset;
1825 	pfn_t pfn;
1826 	int npages;
1827 	volatile uint64_t *iotte_ptr;
1828 	uint64_t iotte_flag = 0;
1829 	struct as *as = NULL;
1830 	extern struct as kas;
1831 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
1832 	struct fast_dvma *iommu_fast_dvma =
1833 	    (struct fast_dvma *)mp->dmai_nexus_private;
1834 	struct sbus_soft_state *softsp = (struct sbus_soft_state *)
1835 	    iommu_fast_dvma->softsp;
1836 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1837 	struct io_mem_list *iomemp;
1838 	pfn_t *pfnp;
1839 #endif /* DEBUG && IO_MEMUSAGE */
1840 
1841 	ASSERT(softsp != NULL);
1842 
1843 	addr = (uintptr_t)a;
1844 	ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index));
1845 	offset = (uint_t)(addr & IOMMU_PAGEOFFSET);
1846 	iommu_fast_dvma->pagecnt[index] = iommu_btopr(len + offset);
1847 	as = &kas;
1848 	addr &= ~IOMMU_PAGEOFFSET;
1849 	npages = iommu_btopr(len + offset);
1850 
1851 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1852 	iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP);
1853 	iomemp->rdip = mp->dmai_rdip;
1854 	iomemp->ioaddr = ioaddr;
1855 	iomemp->addr = addr;
1856 	iomemp->npages = npages;
1857 	pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1),
1858 	    KM_SLEEP);
1859 #endif /* DEBUG && IO_MEMUSAGE */
1860 
1861 	cp->dmac_address = ioaddr | offset;
1862 	cp->dmac_size = len;
1863 
1864 	iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
1865 	/* read/write and streaming io on */
1866 	iotte_flag = IOTTE_VALID | IOTTE_WRITE | IOTTE_CACHE;
1867 
1868 	if (mp->dmai_rflags & DDI_DMA_CONSISTENT)
1869 		mp->dmai_rflags |= DMP_NOSYNC;
1870 	else if (!softsp->stream_buf_off)
1871 		iotte_flag |= IOTTE_STREAM;
1872 
1873 	DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: ioaddr %x "
1874 	    "size %x offset %x index %x kaddr %lx\n",
1875 	    ioaddr, len, offset, index, addr));
1876 	ASSERT(npages > 0);
1877 	do {
1878 		pfn = hat_getpfnum(as->a_hat, (caddr_t)addr);
1879 		if (pfn == PFN_INVALID) {
1880 			DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: invalid pfn "
1881 			    "from hat_getpfnum()\n"));
1882 		}
1883 
1884 		iommu_tlb_flush(softsp, ioaddr, 1);
1885 
1886 		/* load tte */
1887 		*iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
1888 
1889 		npages--;
1890 		iotte_ptr++;
1891 
1892 		addr += IOMMU_PAGESIZE;
1893 		ioaddr += IOMMU_PAGESIZE;
1894 
1895 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1896 		*pfnp = pfn;
1897 		pfnp++;
1898 #endif /* DEBUG && IO_MEMUSAGE */
1899 
1900 	} while (npages > 0);
1901 
1902 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1903 	mutex_enter(&softsp->iomemlock);
1904 	iomemp->next = softsp->iomem;
1905 	softsp->iomem = iomemp;
1906 	mutex_exit(&softsp->iomemlock);
1907 #endif /* DEBUG && IO_MEMUSAGE */
1908 }
1909 
1910 /*ARGSUSED*/
1911 void
1912 iommu_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view)
1913 {
1914 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
1915 	ioaddr_t ioaddr;
1916 	pgcnt_t npages;
1917 	struct fast_dvma *iommu_fast_dvma =
1918 	    (struct fast_dvma *)mp->dmai_nexus_private;
1919 	struct sbus_soft_state *softsp = (struct sbus_soft_state *)
1920 	    iommu_fast_dvma->softsp;
1921 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1922 	struct io_mem_list **prevp, *walk;
1923 #endif /* DEBUG && IO_MEMUSAGE */
1924 
1925 	ASSERT(softsp != NULL);
1926 
1927 	ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index));
1928 	npages = iommu_fast_dvma->pagecnt[index];
1929 
1930 #if defined(DEBUG) && defined(IO_MEMUSAGE)
1931 	mutex_enter(&softsp->iomemlock);
1932 	prevp = &softsp->iomem;
1933 	walk = softsp->iomem;
1934 
1935 	while (walk != NULL) {
1936 		if (walk->ioaddr == ioaddr) {
1937 			*prevp = walk->next;
1938 			break;
1939 		}
1940 		prevp = &walk->next;
1941 		walk = walk->next;
1942 	}
1943 	mutex_exit(&softsp->iomemlock);
1944 
1945 	kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1));
1946 	kmem_free(walk, sizeof (struct io_mem_list));
1947 #endif /* DEBUG && IO_MEMUSAGE */
1948 
1949 	DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_unload: handle %p sync flag "
1950 	    "addr %p sync flag pfn %llx index %x page count %lx\n", mp,
1951 	    &iommu_fast_dvma->sync_flag[index],
1952 	    iommu_fast_dvma->phys_sync_flag[index],
1953 	    index, npages));
1954 
1955 	if ((mp->dmai_rflags & DMP_NOSYNC) != DMP_NOSYNC) {
1956 		sync_stream_buf(softsp, ioaddr, npages,
1957 			(int *)&iommu_fast_dvma->sync_flag[index],
1958 			iommu_fast_dvma->phys_sync_flag[index]);
1959 	}
1960 }
1961 
1962 /*ARGSUSED*/
1963 void
1964 iommu_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view)
1965 {
1966 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
1967 	ioaddr_t ioaddr;
1968 	uint_t npages;
1969 	struct fast_dvma *iommu_fast_dvma =
1970 	    (struct fast_dvma *)mp->dmai_nexus_private;
1971 	struct sbus_soft_state *softsp = (struct sbus_soft_state *)
1972 	    iommu_fast_dvma->softsp;
1973 
1974 	if ((mp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
1975 		return;
1976 
1977 	ASSERT(softsp != NULL);
1978 	ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index));
1979 	npages = iommu_fast_dvma->pagecnt[index];
1980 
1981 	DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_sync: handle %p, "
1982 	    "sync flag addr %p, sync flag pfn %llx\n", mp,
1983 	    &iommu_fast_dvma->sync_flag[index],
1984 	    iommu_fast_dvma->phys_sync_flag[index]));
1985 
1986 	sync_stream_buf(softsp, ioaddr, npages,
1987 	    (int *)&iommu_fast_dvma->sync_flag[index],
1988 	    iommu_fast_dvma->phys_sync_flag[index]);
1989 }
1990