xref: /titanic_53/usr/src/uts/sun4u/io/iommu.c (revision ffadc26e3de00e4c5377fd016a1ffc5dfbb4ff9c)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
57c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
67c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
77c478bd9Sstevel@tonic-gate  * with the License.
87c478bd9Sstevel@tonic-gate  *
97c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
107c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
117c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
127c478bd9Sstevel@tonic-gate  * and limitations under the License.
137c478bd9Sstevel@tonic-gate  *
147c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
157c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
167c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
177c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
187c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
197c478bd9Sstevel@tonic-gate  *
207c478bd9Sstevel@tonic-gate  * CDDL HEADER END
217c478bd9Sstevel@tonic-gate  */
227c478bd9Sstevel@tonic-gate /*
237c478bd9Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
247c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
257c478bd9Sstevel@tonic-gate  */
267c478bd9Sstevel@tonic-gate 
277c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
287c478bd9Sstevel@tonic-gate 
297c478bd9Sstevel@tonic-gate #include <sys/types.h>
307c478bd9Sstevel@tonic-gate #include <sys/param.h>
317c478bd9Sstevel@tonic-gate #include <sys/conf.h>
327c478bd9Sstevel@tonic-gate #include <sys/ddi.h>
337c478bd9Sstevel@tonic-gate #include <sys/sunddi.h>
347c478bd9Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
357c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
367c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
377c478bd9Sstevel@tonic-gate #include <sys/vmem.h>
387c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
397c478bd9Sstevel@tonic-gate 
407c478bd9Sstevel@tonic-gate #include <sys/ddidmareq.h>
417c478bd9Sstevel@tonic-gate #include <sys/sysiosbus.h>
427c478bd9Sstevel@tonic-gate #include <sys/iommu.h>
437c478bd9Sstevel@tonic-gate #include <sys/iocache.h>
447c478bd9Sstevel@tonic-gate #include <sys/dvma.h>
457c478bd9Sstevel@tonic-gate 
467c478bd9Sstevel@tonic-gate #include <vm/as.h>
477c478bd9Sstevel@tonic-gate #include <vm/hat.h>
487c478bd9Sstevel@tonic-gate #include <vm/page.h>
497c478bd9Sstevel@tonic-gate #include <vm/hat_sfmmu.h>
507c478bd9Sstevel@tonic-gate #include <sys/machparam.h>
517c478bd9Sstevel@tonic-gate #include <sys/machsystm.h>
527c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h>
537c478bd9Sstevel@tonic-gate #include <sys/iommutsb.h>
547c478bd9Sstevel@tonic-gate 
557c478bd9Sstevel@tonic-gate /* Useful debugging Stuff */
567c478bd9Sstevel@tonic-gate #include <sys/nexusdebug.h>
577c478bd9Sstevel@tonic-gate #include <sys/debug.h>
587c478bd9Sstevel@tonic-gate /* Bitfield debugging definitions for this file */
597c478bd9Sstevel@tonic-gate #define	IOMMU_GETDVMAPAGES_DEBUG	0x1
607c478bd9Sstevel@tonic-gate #define	IOMMU_DMAMAP_DEBUG		0x2
617c478bd9Sstevel@tonic-gate #define	IOMMU_DMAMCTL_DEBUG		0x4
627c478bd9Sstevel@tonic-gate #define	IOMMU_DMAMCTL_SYNC_DEBUG	0x8
637c478bd9Sstevel@tonic-gate #define	IOMMU_DMAMCTL_HTOC_DEBUG	0x10
647c478bd9Sstevel@tonic-gate #define	IOMMU_DMAMCTL_KVADDR_DEBUG	0x20
657c478bd9Sstevel@tonic-gate #define	IOMMU_DMAMCTL_NEXTWIN_DEBUG	0x40
667c478bd9Sstevel@tonic-gate #define	IOMMU_DMAMCTL_NEXTSEG_DEBUG	0x80
677c478bd9Sstevel@tonic-gate #define	IOMMU_DMAMCTL_MOVWIN_DEBUG	0x100
687c478bd9Sstevel@tonic-gate #define	IOMMU_DMAMCTL_REPWIN_DEBUG	0x200
697c478bd9Sstevel@tonic-gate #define	IOMMU_DMAMCTL_GETERR_DEBUG	0x400
707c478bd9Sstevel@tonic-gate #define	IOMMU_DMAMCTL_COFF_DEBUG	0x800
717c478bd9Sstevel@tonic-gate #define	IOMMU_DMAMCTL_DMA_FREE_DEBUG	0x1000
727c478bd9Sstevel@tonic-gate #define	IOMMU_REGISTERS_DEBUG		0x2000
737c478bd9Sstevel@tonic-gate #define	IOMMU_DMA_SETUP_DEBUG		0x4000
747c478bd9Sstevel@tonic-gate #define	IOMMU_DMA_UNBINDHDL_DEBUG	0x8000
757c478bd9Sstevel@tonic-gate #define	IOMMU_DMA_BINDHDL_DEBUG		0x10000
767c478bd9Sstevel@tonic-gate #define	IOMMU_DMA_WIN_DEBUG		0x20000
777c478bd9Sstevel@tonic-gate #define	IOMMU_DMA_ALLOCHDL_DEBUG	0x40000
787c478bd9Sstevel@tonic-gate #define	IOMMU_DMA_LIM_SETUP_DEBUG	0x80000
797c478bd9Sstevel@tonic-gate #define	IOMMU_FASTDMA_RESERVE		0x100000
807c478bd9Sstevel@tonic-gate #define	IOMMU_FASTDMA_LOAD		0x200000
817c478bd9Sstevel@tonic-gate #define	IOMMU_INTER_INTRA_XFER		0x400000
827c478bd9Sstevel@tonic-gate #define	IOMMU_TTE			0x800000
837c478bd9Sstevel@tonic-gate #define	IOMMU_TLB			0x1000000
847c478bd9Sstevel@tonic-gate #define	IOMMU_FASTDMA_SYNC		0x2000000
857c478bd9Sstevel@tonic-gate 
867c478bd9Sstevel@tonic-gate /* Turn on if you need to keep track of outstanding IOMMU usage */
877c478bd9Sstevel@tonic-gate /* #define	IO_MEMUSAGE */
887c478bd9Sstevel@tonic-gate /* Turn on to debug IOMMU unmapping code */
897c478bd9Sstevel@tonic-gate /* #define	IO_MEMDEBUG */
907c478bd9Sstevel@tonic-gate 
917c478bd9Sstevel@tonic-gate static struct dvma_ops iommu_dvma_ops = {
927c478bd9Sstevel@tonic-gate 	DVMAO_REV,
937c478bd9Sstevel@tonic-gate 	iommu_dvma_kaddr_load,
947c478bd9Sstevel@tonic-gate 	iommu_dvma_unload,
957c478bd9Sstevel@tonic-gate 	iommu_dvma_sync
967c478bd9Sstevel@tonic-gate };
977c478bd9Sstevel@tonic-gate 
987c478bd9Sstevel@tonic-gate extern void *sbusp;		/* sbus soft state hook */
997c478bd9Sstevel@tonic-gate 
1007c478bd9Sstevel@tonic-gate #define	DVMA_MAX_CACHE	65536
1017c478bd9Sstevel@tonic-gate 
1027c478bd9Sstevel@tonic-gate /*
1037c478bd9Sstevel@tonic-gate  * This is the number of pages that a mapping request needs before we force
1047c478bd9Sstevel@tonic-gate  * the TLB flush code to use diagnostic registers.  This value was determined
1057c478bd9Sstevel@tonic-gate  * through a series of test runs measuring dma mapping settup performance.
1067c478bd9Sstevel@tonic-gate  */
1077c478bd9Sstevel@tonic-gate int tlb_flush_using_diag = 16;
1087c478bd9Sstevel@tonic-gate 
1097c478bd9Sstevel@tonic-gate int sysio_iommu_tsb_sizes[] = {
1107c478bd9Sstevel@tonic-gate 	IOMMU_TSB_SIZE_8M,
1117c478bd9Sstevel@tonic-gate 	IOMMU_TSB_SIZE_16M,
1127c478bd9Sstevel@tonic-gate 	IOMMU_TSB_SIZE_32M,
1137c478bd9Sstevel@tonic-gate 	IOMMU_TSB_SIZE_64M,
1147c478bd9Sstevel@tonic-gate 	IOMMU_TSB_SIZE_128M,
1157c478bd9Sstevel@tonic-gate 	IOMMU_TSB_SIZE_256M,
1167c478bd9Sstevel@tonic-gate 	IOMMU_TSB_SIZE_512M,
1177c478bd9Sstevel@tonic-gate 	IOMMU_TSB_SIZE_1G
1187c478bd9Sstevel@tonic-gate };
1197c478bd9Sstevel@tonic-gate 
1207c478bd9Sstevel@tonic-gate static int iommu_map_window(ddi_dma_impl_t *, off_t, size_t);
1217c478bd9Sstevel@tonic-gate 
1227c478bd9Sstevel@tonic-gate int
1237c478bd9Sstevel@tonic-gate iommu_init(struct sbus_soft_state *softsp, caddr_t address)
1247c478bd9Sstevel@tonic-gate {
1257c478bd9Sstevel@tonic-gate 	int i;
1267c478bd9Sstevel@tonic-gate 	char name[40];
1277c478bd9Sstevel@tonic-gate 
1287c478bd9Sstevel@tonic-gate #ifdef DEBUG
1297c478bd9Sstevel@tonic-gate 	debug_info = 1;
1307c478bd9Sstevel@tonic-gate #endif
1317c478bd9Sstevel@tonic-gate 
1327c478bd9Sstevel@tonic-gate 	/*
1337c478bd9Sstevel@tonic-gate 	 * Simply add each registers offset to the base address
1347c478bd9Sstevel@tonic-gate 	 * to calculate the already mapped virtual address of
1357c478bd9Sstevel@tonic-gate 	 * the device register...
1367c478bd9Sstevel@tonic-gate 	 *
1377c478bd9Sstevel@tonic-gate 	 * define a macro for the pointer arithmetic; all registers
1387c478bd9Sstevel@tonic-gate 	 * are 64 bits wide and are defined as uint64_t's.
1397c478bd9Sstevel@tonic-gate 	 */
1407c478bd9Sstevel@tonic-gate 
1417c478bd9Sstevel@tonic-gate #define	REG_ADDR(b, o)	(uint64_t *)((caddr_t)(b) + (o))
1427c478bd9Sstevel@tonic-gate 
1437c478bd9Sstevel@tonic-gate 	softsp->iommu_ctrl_reg = REG_ADDR(address, OFF_IOMMU_CTRL_REG);
1447c478bd9Sstevel@tonic-gate 	softsp->tsb_base_addr = REG_ADDR(address, OFF_TSB_BASE_ADDR);
1457c478bd9Sstevel@tonic-gate 	softsp->iommu_flush_reg = REG_ADDR(address, OFF_IOMMU_FLUSH_REG);
1467c478bd9Sstevel@tonic-gate 	softsp->iommu_tlb_tag = REG_ADDR(address, OFF_IOMMU_TLB_TAG);
1477c478bd9Sstevel@tonic-gate 	softsp->iommu_tlb_data = REG_ADDR(address, OFF_IOMMU_TLB_DATA);
1487c478bd9Sstevel@tonic-gate 
1497c478bd9Sstevel@tonic-gate #undef REG_ADDR
1507c478bd9Sstevel@tonic-gate 
1517c478bd9Sstevel@tonic-gate 	mutex_init(&softsp->dma_pool_lock, NULL, MUTEX_DEFAULT, NULL);
1527c478bd9Sstevel@tonic-gate 	mutex_init(&softsp->intr_poll_list_lock, NULL, MUTEX_DEFAULT, NULL);
1537c478bd9Sstevel@tonic-gate 
1547c478bd9Sstevel@tonic-gate 	/* Set up the DVMA resource sizes */
1557c478bd9Sstevel@tonic-gate 	if ((softsp->iommu_tsb_cookie = iommu_tsb_alloc(softsp->upa_id)) ==
1567c478bd9Sstevel@tonic-gate 	    IOMMU_TSB_COOKIE_NONE) {
1577c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "%s%d: Unable to retrieve IOMMU array.",
1587c478bd9Sstevel@tonic-gate 		    ddi_driver_name(softsp->dip),
1597c478bd9Sstevel@tonic-gate 		    ddi_get_instance(softsp->dip));
1607c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
1617c478bd9Sstevel@tonic-gate 	}
1627c478bd9Sstevel@tonic-gate 	softsp->soft_tsb_base_addr =
1637c478bd9Sstevel@tonic-gate 	    iommu_tsb_cookie_to_va(softsp->iommu_tsb_cookie);
1647c478bd9Sstevel@tonic-gate 	softsp->iommu_dvma_size =
1657c478bd9Sstevel@tonic-gate 	    iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie) <<
1667c478bd9Sstevel@tonic-gate 	    IOMMU_TSB_TO_RNG;
1677c478bd9Sstevel@tonic-gate 	softsp->iommu_dvma_base = (ioaddr_t)
1687c478bd9Sstevel@tonic-gate 	    (0 - (ioaddr_t)softsp->iommu_dvma_size);
1697c478bd9Sstevel@tonic-gate 
1707c478bd9Sstevel@tonic-gate 	(void) snprintf(name, sizeof (name), "%s%d_dvma",
1717c478bd9Sstevel@tonic-gate 	    ddi_driver_name(softsp->dip), ddi_get_instance(softsp->dip));
1727c478bd9Sstevel@tonic-gate 
1737c478bd9Sstevel@tonic-gate 	/*
1747c478bd9Sstevel@tonic-gate 	 * Initialize the DVMA vmem arena.
1757c478bd9Sstevel@tonic-gate 	 */
176*ffadc26eSmike_s 	softsp->dvma_arena = vmem_create(name,
177*ffadc26eSmike_s 	    (void *)(uintptr_t)softsp->iommu_dvma_base,
1787c478bd9Sstevel@tonic-gate 	    softsp->iommu_dvma_size, PAGESIZE, NULL, NULL, NULL,
1797c478bd9Sstevel@tonic-gate 	    DVMA_MAX_CACHE, VM_SLEEP);
1807c478bd9Sstevel@tonic-gate 
1817c478bd9Sstevel@tonic-gate 	/* Set the limit for dvma_reserve() to 1/2 of the total dvma space */
1827c478bd9Sstevel@tonic-gate 	softsp->dma_reserve = iommu_btop(softsp->iommu_dvma_size >> 1);
1837c478bd9Sstevel@tonic-gate 
1847c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
1857c478bd9Sstevel@tonic-gate 	mutex_init(&softsp->iomemlock, NULL, MUTEX_DEFAULT, NULL);
1867c478bd9Sstevel@tonic-gate 	softsp->iomem = (struct io_mem_list *)0;
1877c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
1887c478bd9Sstevel@tonic-gate 	/*
1897c478bd9Sstevel@tonic-gate 	 * Get the base address of the TSB table and store it in the hardware
1907c478bd9Sstevel@tonic-gate 	 */
1917c478bd9Sstevel@tonic-gate 
1927c478bd9Sstevel@tonic-gate 	/*
1937c478bd9Sstevel@tonic-gate 	 * We plan on the PROM flushing all TLB entries.  If this is not the
1947c478bd9Sstevel@tonic-gate 	 * case, this is where we should flush the hardware TLB.
1957c478bd9Sstevel@tonic-gate 	 */
1967c478bd9Sstevel@tonic-gate 
1977c478bd9Sstevel@tonic-gate 	/* Set the IOMMU registers */
1987c478bd9Sstevel@tonic-gate 	(void) iommu_resume_init(softsp);
1997c478bd9Sstevel@tonic-gate 
2007c478bd9Sstevel@tonic-gate 	/* check the convenient copy of TSB base, and flush write buffers */
2017c478bd9Sstevel@tonic-gate 	if (*softsp->tsb_base_addr !=
2027c478bd9Sstevel@tonic-gate 	    va_to_pa((caddr_t)softsp->soft_tsb_base_addr)) {
2037c478bd9Sstevel@tonic-gate 		iommu_tsb_free(softsp->iommu_tsb_cookie);
2047c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
2057c478bd9Sstevel@tonic-gate 	}
2067c478bd9Sstevel@tonic-gate 
2077c478bd9Sstevel@tonic-gate 	softsp->sbus_io_lo_pfn = UINT32_MAX;
2087c478bd9Sstevel@tonic-gate 	softsp->sbus_io_hi_pfn = 0;
2097c478bd9Sstevel@tonic-gate 	for (i = 0; i < sysio_pd_getnrng(softsp->dip); i++) {
2107c478bd9Sstevel@tonic-gate 		struct rangespec *rangep;
2117c478bd9Sstevel@tonic-gate 		uint64_t addr;
2127c478bd9Sstevel@tonic-gate 		pfn_t hipfn, lopfn;
2137c478bd9Sstevel@tonic-gate 
2147c478bd9Sstevel@tonic-gate 		rangep = sysio_pd_getrng(softsp->dip, i);
2157c478bd9Sstevel@tonic-gate 		addr = (uint64_t)((uint64_t)rangep->rng_bustype << 32);
2167c478bd9Sstevel@tonic-gate 		addr |= (uint64_t)rangep->rng_offset;
2177c478bd9Sstevel@tonic-gate 		lopfn = (pfn_t)(addr >> MMU_PAGESHIFT);
2187c478bd9Sstevel@tonic-gate 		addr += (uint64_t)(rangep->rng_size - 1);
2197c478bd9Sstevel@tonic-gate 		hipfn = (pfn_t)(addr >> MMU_PAGESHIFT);
2207c478bd9Sstevel@tonic-gate 
2217c478bd9Sstevel@tonic-gate 		softsp->sbus_io_lo_pfn = (lopfn < softsp->sbus_io_lo_pfn) ?
2227c478bd9Sstevel@tonic-gate 		    lopfn : softsp->sbus_io_lo_pfn;
2237c478bd9Sstevel@tonic-gate 
2247c478bd9Sstevel@tonic-gate 		softsp->sbus_io_hi_pfn = (hipfn > softsp->sbus_io_hi_pfn) ?
2257c478bd9Sstevel@tonic-gate 		    hipfn : softsp->sbus_io_hi_pfn;
2267c478bd9Sstevel@tonic-gate 	}
2277c478bd9Sstevel@tonic-gate 
2287c478bd9Sstevel@tonic-gate 	DPRINTF(IOMMU_REGISTERS_DEBUG, ("IOMMU Control reg: %p IOMMU TSB "
2297c478bd9Sstevel@tonic-gate 	    "base reg: %p IOMMU flush reg: %p TSB base addr %p\n",
2307c478bd9Sstevel@tonic-gate 	    softsp->iommu_ctrl_reg, softsp->tsb_base_addr,
2317c478bd9Sstevel@tonic-gate 	    softsp->iommu_flush_reg, softsp->soft_tsb_base_addr));
2327c478bd9Sstevel@tonic-gate 
2337c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
2347c478bd9Sstevel@tonic-gate }
2357c478bd9Sstevel@tonic-gate 
2367c478bd9Sstevel@tonic-gate /*
2377c478bd9Sstevel@tonic-gate  * function to uninitialize the iommu and release the tsb back to
2387c478bd9Sstevel@tonic-gate  * the spare pool.  See startup.c for tsb spare management.
2397c478bd9Sstevel@tonic-gate  */
2407c478bd9Sstevel@tonic-gate 
2417c478bd9Sstevel@tonic-gate int
2427c478bd9Sstevel@tonic-gate iommu_uninit(struct sbus_soft_state *softsp)
2437c478bd9Sstevel@tonic-gate {
2447c478bd9Sstevel@tonic-gate 	vmem_destroy(softsp->dvma_arena);
2457c478bd9Sstevel@tonic-gate 
2467c478bd9Sstevel@tonic-gate 	/* flip off the IOMMU enable switch */
2477c478bd9Sstevel@tonic-gate 	*softsp->iommu_ctrl_reg &=
2487c478bd9Sstevel@tonic-gate 		(TSB_SIZE << TSB_SIZE_SHIFT | IOMMU_DISABLE);
2497c478bd9Sstevel@tonic-gate 
2507c478bd9Sstevel@tonic-gate 	iommu_tsb_free(softsp->iommu_tsb_cookie);
2517c478bd9Sstevel@tonic-gate 
2527c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
2537c478bd9Sstevel@tonic-gate }
2547c478bd9Sstevel@tonic-gate 
2557c478bd9Sstevel@tonic-gate /*
2567c478bd9Sstevel@tonic-gate  * Initialize iommu hardware registers when the system is being resumed.
2577c478bd9Sstevel@tonic-gate  * (Subset of iommu_init())
2587c478bd9Sstevel@tonic-gate  */
2597c478bd9Sstevel@tonic-gate int
2607c478bd9Sstevel@tonic-gate iommu_resume_init(struct sbus_soft_state *softsp)
2617c478bd9Sstevel@tonic-gate {
2627c478bd9Sstevel@tonic-gate 	int i;
2637c478bd9Sstevel@tonic-gate 	uint_t tsb_size;
2647c478bd9Sstevel@tonic-gate 	uint_t tsb_bytes;
2657c478bd9Sstevel@tonic-gate 
2667c478bd9Sstevel@tonic-gate 	/*
2677c478bd9Sstevel@tonic-gate 	 * Reset the base address of the TSB table in the hardware
2687c478bd9Sstevel@tonic-gate 	 */
2697c478bd9Sstevel@tonic-gate 	*softsp->tsb_base_addr = va_to_pa((caddr_t)softsp->soft_tsb_base_addr);
2707c478bd9Sstevel@tonic-gate 
2717c478bd9Sstevel@tonic-gate 	/*
2727c478bd9Sstevel@tonic-gate 	 * Figure out the correct size of the IOMMU TSB entries.  If we
2737c478bd9Sstevel@tonic-gate 	 * end up with a size smaller than that needed for 8M of IOMMU
2747c478bd9Sstevel@tonic-gate 	 * space, default the size to 8M.  XXX We could probably panic here
2757c478bd9Sstevel@tonic-gate 	 */
2767c478bd9Sstevel@tonic-gate 	i = sizeof (sysio_iommu_tsb_sizes) / sizeof (sysio_iommu_tsb_sizes[0])
2777c478bd9Sstevel@tonic-gate 	    - 1;
2787c478bd9Sstevel@tonic-gate 
2797c478bd9Sstevel@tonic-gate 	tsb_bytes = iommu_tsb_cookie_to_size(softsp->iommu_tsb_cookie);
2807c478bd9Sstevel@tonic-gate 
2817c478bd9Sstevel@tonic-gate 	while (i > 0) {
2827c478bd9Sstevel@tonic-gate 		if (tsb_bytes >= sysio_iommu_tsb_sizes[i])
2837c478bd9Sstevel@tonic-gate 			break;
2847c478bd9Sstevel@tonic-gate 		i--;
2857c478bd9Sstevel@tonic-gate 	}
2867c478bd9Sstevel@tonic-gate 
2877c478bd9Sstevel@tonic-gate 	tsb_size = i;
2887c478bd9Sstevel@tonic-gate 
2897c478bd9Sstevel@tonic-gate 	/* OK, lets flip the "on" switch of the IOMMU */
2907c478bd9Sstevel@tonic-gate 	*softsp->iommu_ctrl_reg = (uint64_t)(tsb_size << TSB_SIZE_SHIFT
2917c478bd9Sstevel@tonic-gate 	    | IOMMU_ENABLE | IOMMU_DIAG_ENABLE);
2927c478bd9Sstevel@tonic-gate 
2937c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
2947c478bd9Sstevel@tonic-gate }
2957c478bd9Sstevel@tonic-gate 
2967c478bd9Sstevel@tonic-gate void
2977c478bd9Sstevel@tonic-gate iommu_tlb_flush(struct sbus_soft_state *softsp, ioaddr_t addr, pgcnt_t npages)
2987c478bd9Sstevel@tonic-gate {
2997c478bd9Sstevel@tonic-gate 	volatile uint64_t tmpreg;
3007c478bd9Sstevel@tonic-gate 	volatile uint64_t *vaddr_reg, *valid_bit_reg;
3017c478bd9Sstevel@tonic-gate 	ioaddr_t hiaddr, ioaddr;
3027c478bd9Sstevel@tonic-gate 	int i, do_flush = 0;
3037c478bd9Sstevel@tonic-gate 
3047c478bd9Sstevel@tonic-gate 	if (npages == 1) {
3057c478bd9Sstevel@tonic-gate 		*softsp->iommu_flush_reg = (uint64_t)addr;
3067c478bd9Sstevel@tonic-gate 		tmpreg = *softsp->sbus_ctrl_reg;
3077c478bd9Sstevel@tonic-gate 		return;
3087c478bd9Sstevel@tonic-gate 	}
3097c478bd9Sstevel@tonic-gate 
3107c478bd9Sstevel@tonic-gate 	hiaddr = addr + (ioaddr_t)(npages * IOMMU_PAGESIZE);
3117c478bd9Sstevel@tonic-gate 	for (i = 0, vaddr_reg = softsp->iommu_tlb_tag,
3127c478bd9Sstevel@tonic-gate 	    valid_bit_reg = softsp->iommu_tlb_data;
3137c478bd9Sstevel@tonic-gate 	    i < IOMMU_TLB_ENTRIES; i++, vaddr_reg++, valid_bit_reg++) {
3147c478bd9Sstevel@tonic-gate 		tmpreg = *vaddr_reg;
3157c478bd9Sstevel@tonic-gate 		ioaddr = (ioaddr_t)((tmpreg & IOMMU_TLBTAG_VA_MASK) <<
3167c478bd9Sstevel@tonic-gate 		    IOMMU_TLBTAG_VA_SHIFT);
3177c478bd9Sstevel@tonic-gate 
318*ffadc26eSmike_s 		DPRINTF(IOMMU_TLB, ("Vaddr reg 0x%p, "
319*ffadc26eSmike_s 		    "TLB vaddr reg %lx, IO addr 0x%x "
3207c478bd9Sstevel@tonic-gate 		    "Base addr 0x%x, Hi addr 0x%x\n",
3217c478bd9Sstevel@tonic-gate 		    vaddr_reg, tmpreg, ioaddr, addr, hiaddr));
3227c478bd9Sstevel@tonic-gate 
3237c478bd9Sstevel@tonic-gate 		if (ioaddr >= addr && ioaddr <= hiaddr) {
3247c478bd9Sstevel@tonic-gate 			tmpreg = *valid_bit_reg;
3257c478bd9Sstevel@tonic-gate 
326*ffadc26eSmike_s 			DPRINTF(IOMMU_TLB, ("Valid reg addr 0x%p, "
327*ffadc26eSmike_s 			    "TLB valid reg %lx\n",
3287c478bd9Sstevel@tonic-gate 			    valid_bit_reg, tmpreg));
3297c478bd9Sstevel@tonic-gate 
3307c478bd9Sstevel@tonic-gate 			if (tmpreg & IOMMU_TLB_VALID) {
3317c478bd9Sstevel@tonic-gate 				*softsp->iommu_flush_reg = (uint64_t)ioaddr;
3327c478bd9Sstevel@tonic-gate 				do_flush = 1;
3337c478bd9Sstevel@tonic-gate 			}
3347c478bd9Sstevel@tonic-gate 		}
3357c478bd9Sstevel@tonic-gate 	}
3367c478bd9Sstevel@tonic-gate 
3377c478bd9Sstevel@tonic-gate 	if (do_flush)
3387c478bd9Sstevel@tonic-gate 		tmpreg = *softsp->sbus_ctrl_reg;
3397c478bd9Sstevel@tonic-gate }
3407c478bd9Sstevel@tonic-gate 
3417c478bd9Sstevel@tonic-gate 
3427c478bd9Sstevel@tonic-gate /*
3437c478bd9Sstevel@tonic-gate  * Shorthand defines
3447c478bd9Sstevel@tonic-gate  */
3457c478bd9Sstevel@tonic-gate 
3467c478bd9Sstevel@tonic-gate #define	ALO		dma_lim->dlim_addr_lo
3477c478bd9Sstevel@tonic-gate #define	AHI		dma_lim->dlim_addr_hi
3487c478bd9Sstevel@tonic-gate #define	OBJSIZE		dmareq->dmar_object.dmao_size
3497c478bd9Sstevel@tonic-gate #define	IOTTE_NDX(vaddr, base) (base + \
3507c478bd9Sstevel@tonic-gate 		(int)(iommu_btop((vaddr & ~IOMMU_PAGEMASK) - \
3517c478bd9Sstevel@tonic-gate 		softsp->iommu_dvma_base)))
3527c478bd9Sstevel@tonic-gate /*
3537c478bd9Sstevel@tonic-gate  * If DDI_DMA_PARTIAL flag is set and the request is for
3547c478bd9Sstevel@tonic-gate  * less than MIN_DVMA_WIN_SIZE, it's not worth the hassle so
3557c478bd9Sstevel@tonic-gate  * we turn off the DDI_DMA_PARTIAL flag
3567c478bd9Sstevel@tonic-gate  */
3577c478bd9Sstevel@tonic-gate #define	MIN_DVMA_WIN_SIZE	(128)
3587c478bd9Sstevel@tonic-gate 
3597c478bd9Sstevel@tonic-gate /* ARGSUSED */
3607c478bd9Sstevel@tonic-gate void
3617c478bd9Sstevel@tonic-gate iommu_remove_mappings(ddi_dma_impl_t *mp)
3627c478bd9Sstevel@tonic-gate {
3637c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG)
3647c478bd9Sstevel@tonic-gate 	pgcnt_t npages;
3657c478bd9Sstevel@tonic-gate 	ioaddr_t ioaddr;
3667c478bd9Sstevel@tonic-gate 	volatile uint64_t *iotte_ptr;
3677c478bd9Sstevel@tonic-gate 	ioaddr_t ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET;
3687c478bd9Sstevel@tonic-gate 	pgcnt_t npages = mp->dmai_ndvmapages;
3697c478bd9Sstevel@tonic-gate 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
3707c478bd9Sstevel@tonic-gate 	struct sbus_soft_state *softsp = mppriv->softsp;
3717c478bd9Sstevel@tonic-gate 
3727c478bd9Sstevel@tonic-gate #if defined(IO_MEMUSAGE)
3737c478bd9Sstevel@tonic-gate 	struct io_mem_list **prevp, *walk;
3747c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
3757c478bd9Sstevel@tonic-gate 
3767c478bd9Sstevel@tonic-gate 	ASSERT(softsp != NULL);
3777c478bd9Sstevel@tonic-gate 	/*
3787c478bd9Sstevel@tonic-gate 	 * Run thru the mapped entries and free 'em
3797c478bd9Sstevel@tonic-gate 	 */
3807c478bd9Sstevel@tonic-gate 
3817c478bd9Sstevel@tonic-gate 	ioaddr = mp->dmai_mapping & ~IOMMU_PAGEOFFSET;
3827c478bd9Sstevel@tonic-gate 	npages = mp->dmai_ndvmapages;
3837c478bd9Sstevel@tonic-gate 
3847c478bd9Sstevel@tonic-gate #if defined(IO_MEMUSAGE)
3857c478bd9Sstevel@tonic-gate 	mutex_enter(&softsp->iomemlock);
3867c478bd9Sstevel@tonic-gate 	prevp = &softsp->iomem;
3877c478bd9Sstevel@tonic-gate 	walk = softsp->iomem;
3887c478bd9Sstevel@tonic-gate 
3897c478bd9Sstevel@tonic-gate 	while (walk) {
3907c478bd9Sstevel@tonic-gate 		if (walk->ioaddr == ioaddr) {
3917c478bd9Sstevel@tonic-gate 			*prevp = walk->next;
3927c478bd9Sstevel@tonic-gate 			break;
3937c478bd9Sstevel@tonic-gate 		}
3947c478bd9Sstevel@tonic-gate 
3957c478bd9Sstevel@tonic-gate 		prevp = &walk->next;
3967c478bd9Sstevel@tonic-gate 		walk = walk->next;
3977c478bd9Sstevel@tonic-gate 	}
3987c478bd9Sstevel@tonic-gate 	mutex_exit(&softsp->iomemlock);
3997c478bd9Sstevel@tonic-gate 
4007c478bd9Sstevel@tonic-gate 	kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1));
4017c478bd9Sstevel@tonic-gate 	kmem_free(walk, sizeof (struct io_mem_list));
4027c478bd9Sstevel@tonic-gate #endif /* IO_MEMUSAGE */
4037c478bd9Sstevel@tonic-gate 
4047c478bd9Sstevel@tonic-gate 	iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
4057c478bd9Sstevel@tonic-gate 
4067c478bd9Sstevel@tonic-gate 	while (npages) {
4077c478bd9Sstevel@tonic-gate 		DPRINTF(IOMMU_DMAMCTL_DEBUG,
4087c478bd9Sstevel@tonic-gate 		    ("dma_mctl: freeing ioaddr %x iotte %p\n",
4097c478bd9Sstevel@tonic-gate 		    ioaddr, iotte_ptr));
4107c478bd9Sstevel@tonic-gate 		*iotte_ptr = (uint64_t)0;	/* unload tte */
4117c478bd9Sstevel@tonic-gate 		iommu_tlb_flush(softsp, ioaddr, 1);
4127c478bd9Sstevel@tonic-gate 		npages--;
4137c478bd9Sstevel@tonic-gate 		ioaddr += IOMMU_PAGESIZE;
4147c478bd9Sstevel@tonic-gate 		iotte_ptr++;
4157c478bd9Sstevel@tonic-gate 	}
4167c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */
4177c478bd9Sstevel@tonic-gate }
4187c478bd9Sstevel@tonic-gate 
4197c478bd9Sstevel@tonic-gate 
4207c478bd9Sstevel@tonic-gate int
4217c478bd9Sstevel@tonic-gate iommu_create_vaddr_mappings(ddi_dma_impl_t *mp, uintptr_t addr)
4227c478bd9Sstevel@tonic-gate {
4237c478bd9Sstevel@tonic-gate 	pfn_t pfn;
4247c478bd9Sstevel@tonic-gate 	struct as *as = NULL;
4257c478bd9Sstevel@tonic-gate 	pgcnt_t npages;
4267c478bd9Sstevel@tonic-gate 	ioaddr_t ioaddr;
4277c478bd9Sstevel@tonic-gate 	uint_t offset;
4287c478bd9Sstevel@tonic-gate 	volatile uint64_t *iotte_ptr;
4297c478bd9Sstevel@tonic-gate 	uint64_t tmp_iotte_flag;
4307c478bd9Sstevel@tonic-gate 	int rval = DDI_DMA_MAPPED;
4317c478bd9Sstevel@tonic-gate 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
4327c478bd9Sstevel@tonic-gate 	struct sbus_soft_state *softsp = mppriv->softsp;
4337c478bd9Sstevel@tonic-gate 	int diag_tlb_flush;
4347c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
4357c478bd9Sstevel@tonic-gate 	struct io_mem_list *iomemp;
4367c478bd9Sstevel@tonic-gate 	pfn_t *pfnp;
4377c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
4387c478bd9Sstevel@tonic-gate 
4397c478bd9Sstevel@tonic-gate 	ASSERT(softsp != NULL);
4407c478bd9Sstevel@tonic-gate 
4417c478bd9Sstevel@tonic-gate 	/* Set Valid and Cache for mem xfer */
4427c478bd9Sstevel@tonic-gate 	tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM;
4437c478bd9Sstevel@tonic-gate 
4447c478bd9Sstevel@tonic-gate 	offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
4457c478bd9Sstevel@tonic-gate 	npages = iommu_btopr(mp->dmai_size + offset);
4467c478bd9Sstevel@tonic-gate 	ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
4477c478bd9Sstevel@tonic-gate 	iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
4487c478bd9Sstevel@tonic-gate 	diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0;
4497c478bd9Sstevel@tonic-gate 
4507c478bd9Sstevel@tonic-gate 	as = mp->dmai_object.dmao_obj.virt_obj.v_as;
4517c478bd9Sstevel@tonic-gate 	if (as == NULL)
4527c478bd9Sstevel@tonic-gate 		as = &kas;
4537c478bd9Sstevel@tonic-gate 
4547c478bd9Sstevel@tonic-gate 	/*
4557c478bd9Sstevel@tonic-gate 	 * Set the per object bits of the TTE here. We optimize this for
4567c478bd9Sstevel@tonic-gate 	 * the memory case so that the while loop overhead is minimal.
4577c478bd9Sstevel@tonic-gate 	 */
4587c478bd9Sstevel@tonic-gate 	/* Turn on NOSYNC if we need consistent mem */
4597c478bd9Sstevel@tonic-gate 	if (mp->dmai_rflags & DDI_DMA_CONSISTENT) {
4607c478bd9Sstevel@tonic-gate 		mp->dmai_rflags |= DMP_NOSYNC;
4617c478bd9Sstevel@tonic-gate 		tmp_iotte_flag ^= IOTTE_STREAM;
4627c478bd9Sstevel@tonic-gate 	/* Set streaming mode if not consistent mem */
4637c478bd9Sstevel@tonic-gate 	} else if (softsp->stream_buf_off) {
4647c478bd9Sstevel@tonic-gate 		tmp_iotte_flag ^= IOTTE_STREAM;
4657c478bd9Sstevel@tonic-gate 	}
4667c478bd9Sstevel@tonic-gate 
4677c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
4687c478bd9Sstevel@tonic-gate 	iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP);
4697c478bd9Sstevel@tonic-gate 	iomemp->rdip = mp->dmai_rdip;
4707c478bd9Sstevel@tonic-gate 	iomemp->ioaddr = ioaddr;
4717c478bd9Sstevel@tonic-gate 	iomemp->addr = addr;
4727c478bd9Sstevel@tonic-gate 	iomemp->npages = npages;
4737c478bd9Sstevel@tonic-gate 	pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1),
4747c478bd9Sstevel@tonic-gate 	    KM_SLEEP);
4757c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
4767c478bd9Sstevel@tonic-gate 	/*
4777c478bd9Sstevel@tonic-gate 	 * Grab the mappings from the dmmu and stick 'em into the
4787c478bd9Sstevel@tonic-gate 	 * iommu.
4797c478bd9Sstevel@tonic-gate 	 */
4807c478bd9Sstevel@tonic-gate 	ASSERT(npages != 0);
4817c478bd9Sstevel@tonic-gate 
4827c478bd9Sstevel@tonic-gate 	/* If we're going to flush the TLB using diag mode, do it now. */
4837c478bd9Sstevel@tonic-gate 	if (diag_tlb_flush)
4847c478bd9Sstevel@tonic-gate 		iommu_tlb_flush(softsp, ioaddr, npages);
4857c478bd9Sstevel@tonic-gate 
4867c478bd9Sstevel@tonic-gate 	do {
4877c478bd9Sstevel@tonic-gate 		uint64_t iotte_flag = tmp_iotte_flag;
4887c478bd9Sstevel@tonic-gate 
4897c478bd9Sstevel@tonic-gate 		/*
4907c478bd9Sstevel@tonic-gate 		 * Fetch the pfn for the DMA object
4917c478bd9Sstevel@tonic-gate 		 */
4927c478bd9Sstevel@tonic-gate 
4937c478bd9Sstevel@tonic-gate 		ASSERT(as);
4947c478bd9Sstevel@tonic-gate 		pfn = hat_getpfnum(as->a_hat, (caddr_t)addr);
4957c478bd9Sstevel@tonic-gate 		ASSERT(pfn != PFN_INVALID);
4967c478bd9Sstevel@tonic-gate 
4977c478bd9Sstevel@tonic-gate 		if (!pf_is_memory(pfn)) {
4987c478bd9Sstevel@tonic-gate 			/* DVMA'ing to IO space */
4997c478bd9Sstevel@tonic-gate 
5007c478bd9Sstevel@tonic-gate 			/* Turn off cache bit if set */
5017c478bd9Sstevel@tonic-gate 			if (iotte_flag & IOTTE_CACHE)
5027c478bd9Sstevel@tonic-gate 				iotte_flag ^= IOTTE_CACHE;
5037c478bd9Sstevel@tonic-gate 
5047c478bd9Sstevel@tonic-gate 			/* Turn off stream bit if set */
5057c478bd9Sstevel@tonic-gate 			if (iotte_flag & IOTTE_STREAM)
5067c478bd9Sstevel@tonic-gate 				iotte_flag ^= IOTTE_STREAM;
5077c478bd9Sstevel@tonic-gate 
5087c478bd9Sstevel@tonic-gate 			if (IS_INTRA_SBUS(softsp, pfn)) {
5097c478bd9Sstevel@tonic-gate 				/* Intra sbus transfer */
5107c478bd9Sstevel@tonic-gate 
5117c478bd9Sstevel@tonic-gate 				/* Turn on intra flag */
5127c478bd9Sstevel@tonic-gate 				iotte_flag |= IOTTE_INTRA;
5137c478bd9Sstevel@tonic-gate 
5147c478bd9Sstevel@tonic-gate 				DPRINTF(IOMMU_INTER_INTRA_XFER, (
515*ffadc26eSmike_s 				    "Intra xfer pfnum %lx TTE %lx\n",
5167c478bd9Sstevel@tonic-gate 				    pfn, iotte_flag));
5177c478bd9Sstevel@tonic-gate 			} else {
5187c478bd9Sstevel@tonic-gate 				if (pf_is_dmacapable(pfn) == 1) {
5197c478bd9Sstevel@tonic-gate 					/*EMPTY*/
5207c478bd9Sstevel@tonic-gate 					DPRINTF(IOMMU_INTER_INTRA_XFER,
5217c478bd9Sstevel@tonic-gate 					    ("Inter xfer pfnum %lx "
522*ffadc26eSmike_s 					    "tte hi %lx\n",
5237c478bd9Sstevel@tonic-gate 					    pfn, iotte_flag));
5247c478bd9Sstevel@tonic-gate 				} else {
5257c478bd9Sstevel@tonic-gate 					rval = DDI_DMA_NOMAPPING;
5267c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG)
5277c478bd9Sstevel@tonic-gate 					goto bad;
5287c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */
5297c478bd9Sstevel@tonic-gate 				}
5307c478bd9Sstevel@tonic-gate 			}
5317c478bd9Sstevel@tonic-gate 		}
5327c478bd9Sstevel@tonic-gate 		addr += IOMMU_PAGESIZE;
5337c478bd9Sstevel@tonic-gate 
534*ffadc26eSmike_s 		DPRINTF(IOMMU_TTE, ("vaddr mapping: tte index %p pfn %lx "
535*ffadc26eSmike_s 		    "tte flag %lx addr %lx ioaddr %x\n",
5367c478bd9Sstevel@tonic-gate 		    iotte_ptr, pfn, iotte_flag, addr, ioaddr));
5377c478bd9Sstevel@tonic-gate 
5387c478bd9Sstevel@tonic-gate 		/* Flush the IOMMU TLB before loading a new mapping */
5397c478bd9Sstevel@tonic-gate 		if (!diag_tlb_flush)
5407c478bd9Sstevel@tonic-gate 			iommu_tlb_flush(softsp, ioaddr, 1);
5417c478bd9Sstevel@tonic-gate 
5427c478bd9Sstevel@tonic-gate 		/* Set the hardware IO TTE */
5437c478bd9Sstevel@tonic-gate 		*iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
5447c478bd9Sstevel@tonic-gate 
5457c478bd9Sstevel@tonic-gate 		ioaddr += IOMMU_PAGESIZE;
5467c478bd9Sstevel@tonic-gate 		npages--;
5477c478bd9Sstevel@tonic-gate 		iotte_ptr++;
5487c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
5497c478bd9Sstevel@tonic-gate 		*pfnp = pfn;
5507c478bd9Sstevel@tonic-gate 		pfnp++;
5517c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
5527c478bd9Sstevel@tonic-gate 	} while (npages != 0);
5537c478bd9Sstevel@tonic-gate 
5547c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
5557c478bd9Sstevel@tonic-gate 	mutex_enter(&softsp->iomemlock);
5567c478bd9Sstevel@tonic-gate 	iomemp->next = softsp->iomem;
5577c478bd9Sstevel@tonic-gate 	softsp->iomem = iomemp;
5587c478bd9Sstevel@tonic-gate 	mutex_exit(&softsp->iomemlock);
5597c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
5607c478bd9Sstevel@tonic-gate 
5617c478bd9Sstevel@tonic-gate 	return (rval);
5627c478bd9Sstevel@tonic-gate 
5637c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG)
5647c478bd9Sstevel@tonic-gate bad:
5657c478bd9Sstevel@tonic-gate 	/* If we fail a mapping, free up any mapping resources used */
5667c478bd9Sstevel@tonic-gate 	iommu_remove_mappings(mp);
5677c478bd9Sstevel@tonic-gate 	return (rval);
5687c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */
5697c478bd9Sstevel@tonic-gate }
5707c478bd9Sstevel@tonic-gate 
5717c478bd9Sstevel@tonic-gate 
5727c478bd9Sstevel@tonic-gate int
5737c478bd9Sstevel@tonic-gate iommu_create_pp_mappings(ddi_dma_impl_t *mp, page_t *pp, page_t **pplist)
5747c478bd9Sstevel@tonic-gate {
5757c478bd9Sstevel@tonic-gate 	pfn_t pfn;
5767c478bd9Sstevel@tonic-gate 	pgcnt_t npages;
5777c478bd9Sstevel@tonic-gate 	ioaddr_t ioaddr;
5787c478bd9Sstevel@tonic-gate 	uint_t offset;
5797c478bd9Sstevel@tonic-gate 	volatile uint64_t *iotte_ptr;
5807c478bd9Sstevel@tonic-gate 	uint64_t tmp_iotte_flag;
5817c478bd9Sstevel@tonic-gate 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
5827c478bd9Sstevel@tonic-gate 	struct sbus_soft_state *softsp = mppriv->softsp;
5837c478bd9Sstevel@tonic-gate 	int diag_tlb_flush;
5847c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
5857c478bd9Sstevel@tonic-gate 	struct io_mem_list *iomemp;
5867c478bd9Sstevel@tonic-gate 	pfn_t *pfnp;
5877c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
5887c478bd9Sstevel@tonic-gate 	int rval = DDI_DMA_MAPPED;
5897c478bd9Sstevel@tonic-gate 
5907c478bd9Sstevel@tonic-gate 	/* Set Valid and Cache for mem xfer */
5917c478bd9Sstevel@tonic-gate 	tmp_iotte_flag = IOTTE_VALID | IOTTE_CACHE | IOTTE_WRITE | IOTTE_STREAM;
5927c478bd9Sstevel@tonic-gate 
5937c478bd9Sstevel@tonic-gate 	ASSERT(softsp != NULL);
5947c478bd9Sstevel@tonic-gate 
5957c478bd9Sstevel@tonic-gate 	offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
5967c478bd9Sstevel@tonic-gate 	npages = iommu_btopr(mp->dmai_size + offset);
5977c478bd9Sstevel@tonic-gate 	ioaddr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
5987c478bd9Sstevel@tonic-gate 	iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
5997c478bd9Sstevel@tonic-gate 	diag_tlb_flush = npages > tlb_flush_using_diag ? 1 : 0;
6007c478bd9Sstevel@tonic-gate 
6017c478bd9Sstevel@tonic-gate 	/*
6027c478bd9Sstevel@tonic-gate 	 * Set the per object bits of the TTE here. We optimize this for
6037c478bd9Sstevel@tonic-gate 	 * the memory case so that the while loop overhead is minimal.
6047c478bd9Sstevel@tonic-gate 	 */
6057c478bd9Sstevel@tonic-gate 	if (mp->dmai_rflags & DDI_DMA_CONSISTENT) {
6067c478bd9Sstevel@tonic-gate 		/* Turn on NOSYNC if we need consistent mem */
6077c478bd9Sstevel@tonic-gate 		mp->dmai_rflags |= DMP_NOSYNC;
6087c478bd9Sstevel@tonic-gate 		tmp_iotte_flag ^= IOTTE_STREAM;
6097c478bd9Sstevel@tonic-gate 	} else if (softsp->stream_buf_off) {
6107c478bd9Sstevel@tonic-gate 		/* Set streaming mode if not consistent mem */
6117c478bd9Sstevel@tonic-gate 		tmp_iotte_flag ^= IOTTE_STREAM;
6127c478bd9Sstevel@tonic-gate 	}
6137c478bd9Sstevel@tonic-gate 
6147c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
6157c478bd9Sstevel@tonic-gate 	iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP);
6167c478bd9Sstevel@tonic-gate 	iomemp->rdip = mp->dmai_rdip;
6177c478bd9Sstevel@tonic-gate 	iomemp->ioaddr = ioaddr;
6187c478bd9Sstevel@tonic-gate 	iomemp->npages = npages;
6197c478bd9Sstevel@tonic-gate 	pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1),
6207c478bd9Sstevel@tonic-gate 	    KM_SLEEP);
6217c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
6227c478bd9Sstevel@tonic-gate 	/*
6237c478bd9Sstevel@tonic-gate 	 * Grab the mappings from the dmmu and stick 'em into the
6247c478bd9Sstevel@tonic-gate 	 * iommu.
6257c478bd9Sstevel@tonic-gate 	 */
6267c478bd9Sstevel@tonic-gate 	ASSERT(npages != 0);
6277c478bd9Sstevel@tonic-gate 
6287c478bd9Sstevel@tonic-gate 	/* If we're going to flush the TLB using diag mode, do it now. */
6297c478bd9Sstevel@tonic-gate 	if (diag_tlb_flush)
6307c478bd9Sstevel@tonic-gate 		iommu_tlb_flush(softsp, ioaddr, npages);
6317c478bd9Sstevel@tonic-gate 
6327c478bd9Sstevel@tonic-gate 	do {
6337c478bd9Sstevel@tonic-gate 		uint64_t iotte_flag;
6347c478bd9Sstevel@tonic-gate 
6357c478bd9Sstevel@tonic-gate 		iotte_flag = tmp_iotte_flag;
6367c478bd9Sstevel@tonic-gate 
6377c478bd9Sstevel@tonic-gate 		if (pp != NULL) {
6387c478bd9Sstevel@tonic-gate 			pfn = pp->p_pagenum;
6397c478bd9Sstevel@tonic-gate 			pp = pp->p_next;
6407c478bd9Sstevel@tonic-gate 		} else {
6417c478bd9Sstevel@tonic-gate 			pfn = (*pplist)->p_pagenum;
6427c478bd9Sstevel@tonic-gate 			pplist++;
6437c478bd9Sstevel@tonic-gate 		}
6447c478bd9Sstevel@tonic-gate 
645*ffadc26eSmike_s 		DPRINTF(IOMMU_TTE, ("pp mapping TTE index %p pfn %lx "
646*ffadc26eSmike_s 		    "tte flag %lx ioaddr %x\n", iotte_ptr,
6477c478bd9Sstevel@tonic-gate 		    pfn, iotte_flag, ioaddr));
6487c478bd9Sstevel@tonic-gate 
6497c478bd9Sstevel@tonic-gate 		/* Flush the IOMMU TLB before loading a new mapping */
6507c478bd9Sstevel@tonic-gate 		if (!diag_tlb_flush)
6517c478bd9Sstevel@tonic-gate 			iommu_tlb_flush(softsp, ioaddr, 1);
6527c478bd9Sstevel@tonic-gate 
6537c478bd9Sstevel@tonic-gate 		/* Set the hardware IO TTE */
6547c478bd9Sstevel@tonic-gate 		*iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
6557c478bd9Sstevel@tonic-gate 
6567c478bd9Sstevel@tonic-gate 		ioaddr += IOMMU_PAGESIZE;
6577c478bd9Sstevel@tonic-gate 		npages--;
6587c478bd9Sstevel@tonic-gate 		iotte_ptr++;
6597c478bd9Sstevel@tonic-gate 
6607c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
6617c478bd9Sstevel@tonic-gate 		*pfnp = pfn;
6627c478bd9Sstevel@tonic-gate 		pfnp++;
6637c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
6647c478bd9Sstevel@tonic-gate 
6657c478bd9Sstevel@tonic-gate 	} while (npages != 0);
6667c478bd9Sstevel@tonic-gate 
6677c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
6687c478bd9Sstevel@tonic-gate 	mutex_enter(&softsp->iomemlock);
6697c478bd9Sstevel@tonic-gate 	iomemp->next = softsp->iomem;
6707c478bd9Sstevel@tonic-gate 	softsp->iomem = iomemp;
6717c478bd9Sstevel@tonic-gate 	mutex_exit(&softsp->iomemlock);
6727c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
6737c478bd9Sstevel@tonic-gate 
6747c478bd9Sstevel@tonic-gate 	return (rval);
6757c478bd9Sstevel@tonic-gate }
6767c478bd9Sstevel@tonic-gate 
6777c478bd9Sstevel@tonic-gate 
6787c478bd9Sstevel@tonic-gate int
6797c478bd9Sstevel@tonic-gate iommu_dma_lim_setup(dev_info_t *dip, dev_info_t *rdip,
6807c478bd9Sstevel@tonic-gate     struct sbus_soft_state *softsp, uint_t *burstsizep, uint_t burstsize64,
6817c478bd9Sstevel@tonic-gate     uint_t *minxferp, uint_t dma_flags)
6827c478bd9Sstevel@tonic-gate {
6837c478bd9Sstevel@tonic-gate 	struct regspec *rp;
6847c478bd9Sstevel@tonic-gate 
6857c478bd9Sstevel@tonic-gate 	/* Take care of 64 bit limits. */
6867c478bd9Sstevel@tonic-gate 	if (!(dma_flags & DDI_DMA_SBUS_64BIT)) {
6877c478bd9Sstevel@tonic-gate 		/*
6887c478bd9Sstevel@tonic-gate 		 * return burst size for 32-bit mode
6897c478bd9Sstevel@tonic-gate 		 */
6907c478bd9Sstevel@tonic-gate 		*burstsizep &= softsp->sbus_burst_sizes;
6917c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
6927c478bd9Sstevel@tonic-gate 	}
6937c478bd9Sstevel@tonic-gate 
6947c478bd9Sstevel@tonic-gate 	/*
6957c478bd9Sstevel@tonic-gate 	 * check if SBus supports 64 bit and if caller
6967c478bd9Sstevel@tonic-gate 	 * is child of SBus. No support through bridges
6977c478bd9Sstevel@tonic-gate 	 */
6987c478bd9Sstevel@tonic-gate 	if (!softsp->sbus64_burst_sizes || (ddi_get_parent(rdip) != dip)) {
6997c478bd9Sstevel@tonic-gate 		/*
7007c478bd9Sstevel@tonic-gate 		 * SBus doesn't support it or bridge. Do 32-bit
7017c478bd9Sstevel@tonic-gate 		 * xfers
7027c478bd9Sstevel@tonic-gate 		 */
7037c478bd9Sstevel@tonic-gate 		*burstsizep &= softsp->sbus_burst_sizes;
7047c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
7057c478bd9Sstevel@tonic-gate 	}
7067c478bd9Sstevel@tonic-gate 
7077c478bd9Sstevel@tonic-gate 	rp = ddi_rnumber_to_regspec(rdip, 0);
7087c478bd9Sstevel@tonic-gate 	if (rp == NULL) {
7097c478bd9Sstevel@tonic-gate 		*burstsizep &= softsp->sbus_burst_sizes;
7107c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
7117c478bd9Sstevel@tonic-gate 	}
7127c478bd9Sstevel@tonic-gate 
7137c478bd9Sstevel@tonic-gate 	/* Check for old-style 64 bit burstsizes */
7147c478bd9Sstevel@tonic-gate 	if (burstsize64 & SYSIO64_BURST_MASK) {
7157c478bd9Sstevel@tonic-gate 		/* Scale back burstsizes if Necessary */
7167c478bd9Sstevel@tonic-gate 		*burstsizep &= (softsp->sbus64_burst_sizes |
7177c478bd9Sstevel@tonic-gate 		    softsp->sbus_burst_sizes);
7187c478bd9Sstevel@tonic-gate 	} else {
7197c478bd9Sstevel@tonic-gate 		/* Get the 64 bit burstsizes. */
7207c478bd9Sstevel@tonic-gate 		*burstsizep = burstsize64;
7217c478bd9Sstevel@tonic-gate 
7227c478bd9Sstevel@tonic-gate 		/* Scale back burstsizes if Necessary */
7237c478bd9Sstevel@tonic-gate 		*burstsizep &= (softsp->sbus64_burst_sizes >>
7247c478bd9Sstevel@tonic-gate 		    SYSIO64_BURST_SHIFT);
7257c478bd9Sstevel@tonic-gate 	}
7267c478bd9Sstevel@tonic-gate 
7277c478bd9Sstevel@tonic-gate 	/*
7287c478bd9Sstevel@tonic-gate 	 * Set the largest value of the smallest burstsize that the
7297c478bd9Sstevel@tonic-gate 	 * device or the bus can manage.
7307c478bd9Sstevel@tonic-gate 	 */
7317c478bd9Sstevel@tonic-gate 	*minxferp = MAX(*minxferp,
7327c478bd9Sstevel@tonic-gate 	    (1 << (ddi_ffs(softsp->sbus64_burst_sizes) - 1)));
7337c478bd9Sstevel@tonic-gate 
7347c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
7357c478bd9Sstevel@tonic-gate }
7367c478bd9Sstevel@tonic-gate 
7377c478bd9Sstevel@tonic-gate 
7387c478bd9Sstevel@tonic-gate int
7397c478bd9Sstevel@tonic-gate iommu_dma_allochdl(dev_info_t *dip, dev_info_t *rdip,
7407c478bd9Sstevel@tonic-gate     ddi_dma_attr_t *dma_attr, int (*waitfp)(caddr_t), caddr_t arg,
7417c478bd9Sstevel@tonic-gate     ddi_dma_handle_t *handlep)
7427c478bd9Sstevel@tonic-gate {
7437c478bd9Sstevel@tonic-gate 	ioaddr_t addrlow, addrhigh, segalign;
7447c478bd9Sstevel@tonic-gate 	ddi_dma_impl_t *mp;
7457c478bd9Sstevel@tonic-gate 	struct dma_impl_priv *mppriv;
7467c478bd9Sstevel@tonic-gate 	struct sbus_soft_state *softsp = (struct sbus_soft_state *)
7477c478bd9Sstevel@tonic-gate 	    ddi_get_soft_state(sbusp, ddi_get_instance(dip));
7487c478bd9Sstevel@tonic-gate 
7497c478bd9Sstevel@tonic-gate 	/*
7507c478bd9Sstevel@tonic-gate 	 * Setup dma burstsizes and min-xfer counts.
7517c478bd9Sstevel@tonic-gate 	 */
7527c478bd9Sstevel@tonic-gate 	(void) iommu_dma_lim_setup(dip, rdip, softsp,
7537c478bd9Sstevel@tonic-gate 	    &dma_attr->dma_attr_burstsizes,
7547c478bd9Sstevel@tonic-gate 	    dma_attr->dma_attr_burstsizes, &dma_attr->dma_attr_minxfer,
7557c478bd9Sstevel@tonic-gate 	    dma_attr->dma_attr_flags);
7567c478bd9Sstevel@tonic-gate 
7577c478bd9Sstevel@tonic-gate 	if (dma_attr->dma_attr_burstsizes == 0)
7587c478bd9Sstevel@tonic-gate 		return (DDI_DMA_BADATTR);
7597c478bd9Sstevel@tonic-gate 
7607c478bd9Sstevel@tonic-gate 	addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo;
7617c478bd9Sstevel@tonic-gate 	addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi;
7627c478bd9Sstevel@tonic-gate 	segalign = (ioaddr_t)dma_attr->dma_attr_seg;
7637c478bd9Sstevel@tonic-gate 
7647c478bd9Sstevel@tonic-gate 	/*
7657c478bd9Sstevel@tonic-gate 	 * Check sanity for hi and lo address limits
7667c478bd9Sstevel@tonic-gate 	 */
7677c478bd9Sstevel@tonic-gate 	if ((addrhigh <= addrlow) ||
7687c478bd9Sstevel@tonic-gate 	    (addrhigh < (ioaddr_t)softsp->iommu_dvma_base)) {
7697c478bd9Sstevel@tonic-gate 		return (DDI_DMA_BADATTR);
7707c478bd9Sstevel@tonic-gate 	}
7717c478bd9Sstevel@tonic-gate 	if (dma_attr->dma_attr_flags & DDI_DMA_FORCE_PHYSICAL)
7727c478bd9Sstevel@tonic-gate 		return (DDI_DMA_BADATTR);
7737c478bd9Sstevel@tonic-gate 
7747c478bd9Sstevel@tonic-gate 	mppriv = kmem_zalloc(sizeof (*mppriv),
7757c478bd9Sstevel@tonic-gate 	    (waitfp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
7767c478bd9Sstevel@tonic-gate 
7777c478bd9Sstevel@tonic-gate 	if (mppriv == NULL) {
7787c478bd9Sstevel@tonic-gate 		if (waitfp != DDI_DMA_DONTWAIT) {
7797c478bd9Sstevel@tonic-gate 		    ddi_set_callback(waitfp, arg, &softsp->dvma_call_list_id);
7807c478bd9Sstevel@tonic-gate 		}
7817c478bd9Sstevel@tonic-gate 		return (DDI_DMA_NORESOURCES);
7827c478bd9Sstevel@tonic-gate 	}
7837c478bd9Sstevel@tonic-gate 	mp = (ddi_dma_impl_t *)mppriv;
7847c478bd9Sstevel@tonic-gate 
785*ffadc26eSmike_s 	DPRINTF(IOMMU_DMA_ALLOCHDL_DEBUG, ("dma_allochdl: (%s) handle %p "
7867c478bd9Sstevel@tonic-gate 	    "hi %x lo %x min %x burst %x\n",
7877c478bd9Sstevel@tonic-gate 	    ddi_get_name(dip), mp, addrhigh, addrlow,
7887c478bd9Sstevel@tonic-gate 	    dma_attr->dma_attr_minxfer, dma_attr->dma_attr_burstsizes));
7897c478bd9Sstevel@tonic-gate 
7907c478bd9Sstevel@tonic-gate 	mp->dmai_rdip = rdip;
7917c478bd9Sstevel@tonic-gate 	mp->dmai_minxfer = (uint_t)dma_attr->dma_attr_minxfer;
7927c478bd9Sstevel@tonic-gate 	mp->dmai_burstsizes = (uint_t)dma_attr->dma_attr_burstsizes;
7937c478bd9Sstevel@tonic-gate 	mp->dmai_attr = *dma_attr;
7947c478bd9Sstevel@tonic-gate 	/* See if the DMA engine has any limit restrictions. */
7957c478bd9Sstevel@tonic-gate 	if (segalign == (ioaddr_t)UINT32_MAX &&
7967c478bd9Sstevel@tonic-gate 	    addrhigh == (ioaddr_t)UINT32_MAX &&
7977c478bd9Sstevel@tonic-gate 	    (dma_attr->dma_attr_align <= IOMMU_PAGESIZE) && addrlow == 0) {
7987c478bd9Sstevel@tonic-gate 		mp->dmai_rflags |= DMP_NOLIMIT;
7997c478bd9Sstevel@tonic-gate 	}
8007c478bd9Sstevel@tonic-gate 	mppriv->softsp = softsp;
8017c478bd9Sstevel@tonic-gate 	mppriv->phys_sync_flag = va_to_pa((caddr_t)&mppriv->sync_flag);
8027c478bd9Sstevel@tonic-gate 
8037c478bd9Sstevel@tonic-gate 	*handlep = (ddi_dma_handle_t)mp;
8047c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
8057c478bd9Sstevel@tonic-gate }
8067c478bd9Sstevel@tonic-gate 
8077c478bd9Sstevel@tonic-gate /*ARGSUSED*/
8087c478bd9Sstevel@tonic-gate int
8097c478bd9Sstevel@tonic-gate iommu_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle)
8107c478bd9Sstevel@tonic-gate {
8117c478bd9Sstevel@tonic-gate 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)handle;
8127c478bd9Sstevel@tonic-gate 	struct sbus_soft_state *softsp = mppriv->softsp;
8137c478bd9Sstevel@tonic-gate 	ASSERT(softsp != NULL);
8147c478bd9Sstevel@tonic-gate 
8157c478bd9Sstevel@tonic-gate 	kmem_free(mppriv, sizeof (*mppriv));
8167c478bd9Sstevel@tonic-gate 
8177c478bd9Sstevel@tonic-gate 	if (softsp->dvma_call_list_id != 0) {
8187c478bd9Sstevel@tonic-gate 		ddi_run_callback(&softsp->dvma_call_list_id);
8197c478bd9Sstevel@tonic-gate 	}
8207c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
8217c478bd9Sstevel@tonic-gate }
8227c478bd9Sstevel@tonic-gate 
8237c478bd9Sstevel@tonic-gate static int
8247c478bd9Sstevel@tonic-gate check_dma_attr(struct ddi_dma_req *dmareq, ddi_dma_attr_t *dma_attr,
8257c478bd9Sstevel@tonic-gate     uint32_t *size)
8267c478bd9Sstevel@tonic-gate {
8277c478bd9Sstevel@tonic-gate 	ioaddr_t addrlow;
8287c478bd9Sstevel@tonic-gate 	ioaddr_t addrhigh;
8297c478bd9Sstevel@tonic-gate 	uint32_t segalign;
8307c478bd9Sstevel@tonic-gate 	uint32_t smask;
8317c478bd9Sstevel@tonic-gate 
8327c478bd9Sstevel@tonic-gate 	smask = *size - 1;
8337c478bd9Sstevel@tonic-gate 	segalign = dma_attr->dma_attr_seg;
8347c478bd9Sstevel@tonic-gate 	if (smask > segalign) {
8357c478bd9Sstevel@tonic-gate 		if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0)
8367c478bd9Sstevel@tonic-gate 			return (DDI_DMA_TOOBIG);
8377c478bd9Sstevel@tonic-gate 		*size = segalign + 1;
8387c478bd9Sstevel@tonic-gate 	}
8397c478bd9Sstevel@tonic-gate 	addrlow = (ioaddr_t)dma_attr->dma_attr_addr_lo;
8407c478bd9Sstevel@tonic-gate 	addrhigh = (ioaddr_t)dma_attr->dma_attr_addr_hi;
8417c478bd9Sstevel@tonic-gate 	if (addrlow + smask > addrhigh || addrlow + smask < addrlow) {
8427c478bd9Sstevel@tonic-gate 		if (!((addrlow + dmareq->dmar_object.dmao_size == 0) &&
8437c478bd9Sstevel@tonic-gate 		    (addrhigh == (ioaddr_t)-1))) {
8447c478bd9Sstevel@tonic-gate 			if ((dmareq->dmar_flags & DDI_DMA_PARTIAL) == 0)
8457c478bd9Sstevel@tonic-gate 				return (DDI_DMA_TOOBIG);
8467c478bd9Sstevel@tonic-gate 			*size = MIN(addrhigh - addrlow + 1, *size);
8477c478bd9Sstevel@tonic-gate 		}
8487c478bd9Sstevel@tonic-gate 	}
8497c478bd9Sstevel@tonic-gate 	return (DDI_DMA_MAPOK);
8507c478bd9Sstevel@tonic-gate }
8517c478bd9Sstevel@tonic-gate 
8527c478bd9Sstevel@tonic-gate int
8537c478bd9Sstevel@tonic-gate iommu_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip,
8547c478bd9Sstevel@tonic-gate     ddi_dma_handle_t handle, struct ddi_dma_req *dmareq,
8557c478bd9Sstevel@tonic-gate     ddi_dma_cookie_t *cp, uint_t *ccountp)
8567c478bd9Sstevel@tonic-gate {
8577c478bd9Sstevel@tonic-gate 	page_t *pp;
8587c478bd9Sstevel@tonic-gate 	uint32_t size;
8597c478bd9Sstevel@tonic-gate 	ioaddr_t ioaddr;
8607c478bd9Sstevel@tonic-gate 	uint_t offset;
8617c478bd9Sstevel@tonic-gate 	uintptr_t addr = 0;
8627c478bd9Sstevel@tonic-gate 	pgcnt_t npages;
8637c478bd9Sstevel@tonic-gate 	int rval;
8647c478bd9Sstevel@tonic-gate 	ddi_dma_attr_t *dma_attr;
8657c478bd9Sstevel@tonic-gate 	struct sbus_soft_state *softsp;
8667c478bd9Sstevel@tonic-gate 	struct page **pplist = NULL;
8677c478bd9Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
8687c478bd9Sstevel@tonic-gate 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
8697c478bd9Sstevel@tonic-gate 
8707c478bd9Sstevel@tonic-gate #ifdef lint
8717c478bd9Sstevel@tonic-gate 	dip = dip;
8727c478bd9Sstevel@tonic-gate 	rdip = rdip;
8737c478bd9Sstevel@tonic-gate #endif
8747c478bd9Sstevel@tonic-gate 
8757c478bd9Sstevel@tonic-gate 	if (mp->dmai_inuse)
8767c478bd9Sstevel@tonic-gate 		return (DDI_DMA_INUSE);
8777c478bd9Sstevel@tonic-gate 
8787c478bd9Sstevel@tonic-gate 	dma_attr = &mp->dmai_attr;
8797c478bd9Sstevel@tonic-gate 	size = (uint32_t)dmareq->dmar_object.dmao_size;
8807c478bd9Sstevel@tonic-gate 	if (!(mp->dmai_rflags & DMP_NOLIMIT)) {
8817c478bd9Sstevel@tonic-gate 		rval = check_dma_attr(dmareq, dma_attr, &size);
8827c478bd9Sstevel@tonic-gate 		if (rval != DDI_DMA_MAPOK)
8837c478bd9Sstevel@tonic-gate 			return (rval);
8847c478bd9Sstevel@tonic-gate 	}
8857c478bd9Sstevel@tonic-gate 	mp->dmai_inuse = 1;
8867c478bd9Sstevel@tonic-gate 	mp->dmai_offset = 0;
8877c478bd9Sstevel@tonic-gate 	mp->dmai_rflags = (dmareq->dmar_flags & DMP_DDIFLAGS) |
8887c478bd9Sstevel@tonic-gate 	    (mp->dmai_rflags & DMP_NOLIMIT);
8897c478bd9Sstevel@tonic-gate 
8907c478bd9Sstevel@tonic-gate 	switch (dmareq->dmar_object.dmao_type) {
8917c478bd9Sstevel@tonic-gate 	case DMA_OTYP_VADDR:
8927c478bd9Sstevel@tonic-gate 	case DMA_OTYP_BUFVADDR:
8937c478bd9Sstevel@tonic-gate 		addr = (uintptr_t)dmareq->dmar_object.dmao_obj.virt_obj.v_addr;
8947c478bd9Sstevel@tonic-gate 		offset = addr & IOMMU_PAGEOFFSET;
8957c478bd9Sstevel@tonic-gate 		pplist = dmareq->dmar_object.dmao_obj.virt_obj.v_priv;
8967c478bd9Sstevel@tonic-gate 		npages = iommu_btopr(OBJSIZE + offset);
8977c478bd9Sstevel@tonic-gate 
898*ffadc26eSmike_s 		DPRINTF(IOMMU_DMAMAP_DEBUG, ("dma_map vaddr: %lx pages "
8997c478bd9Sstevel@tonic-gate 		    "req addr %lx off %x OBJSIZE %x\n",
9007c478bd9Sstevel@tonic-gate 		    npages, addr, offset, OBJSIZE));
9017c478bd9Sstevel@tonic-gate 
9027c478bd9Sstevel@tonic-gate 		/* We don't need the addr anymore if we have a shadow list */
9037c478bd9Sstevel@tonic-gate 		if (pplist != NULL)
9047c478bd9Sstevel@tonic-gate 			addr = NULL;
9057c478bd9Sstevel@tonic-gate 		pp = NULL;
9067c478bd9Sstevel@tonic-gate 		break;
9077c478bd9Sstevel@tonic-gate 
9087c478bd9Sstevel@tonic-gate 	case DMA_OTYP_PAGES:
9097c478bd9Sstevel@tonic-gate 		pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp;
9107c478bd9Sstevel@tonic-gate 		offset = dmareq->dmar_object.dmao_obj.pp_obj.pp_offset;
9117c478bd9Sstevel@tonic-gate 		npages = iommu_btopr(OBJSIZE + offset);
9127c478bd9Sstevel@tonic-gate 		break;
9137c478bd9Sstevel@tonic-gate 
9147c478bd9Sstevel@tonic-gate 	case DMA_OTYP_PADDR:
9157c478bd9Sstevel@tonic-gate 	default:
9167c478bd9Sstevel@tonic-gate 		/*
9177c478bd9Sstevel@tonic-gate 		 * Not a supported type for this implementation
9187c478bd9Sstevel@tonic-gate 		 */
9197c478bd9Sstevel@tonic-gate 		rval = DDI_DMA_NOMAPPING;
9207c478bd9Sstevel@tonic-gate 		goto bad;
9217c478bd9Sstevel@tonic-gate 	}
9227c478bd9Sstevel@tonic-gate 
9237c478bd9Sstevel@tonic-gate 	/* Get our soft state once we know we're mapping an object. */
9247c478bd9Sstevel@tonic-gate 	softsp = mppriv->softsp;
9257c478bd9Sstevel@tonic-gate 	ASSERT(softsp != NULL);
9267c478bd9Sstevel@tonic-gate 
9277c478bd9Sstevel@tonic-gate 	if (mp->dmai_rflags & DDI_DMA_PARTIAL) {
9287c478bd9Sstevel@tonic-gate 		if (size != OBJSIZE) {
9297c478bd9Sstevel@tonic-gate 			/*
9307c478bd9Sstevel@tonic-gate 			 * If the request is for partial mapping arrangement,
9317c478bd9Sstevel@tonic-gate 			 * the device has to be able to address at least the
9327c478bd9Sstevel@tonic-gate 			 * size of the window we are establishing.
9337c478bd9Sstevel@tonic-gate 			 */
9347c478bd9Sstevel@tonic-gate 			if (size < iommu_ptob(MIN_DVMA_WIN_SIZE)) {
9357c478bd9Sstevel@tonic-gate 				rval = DDI_DMA_NOMAPPING;
9367c478bd9Sstevel@tonic-gate 				goto bad;
9377c478bd9Sstevel@tonic-gate 			}
9387c478bd9Sstevel@tonic-gate 			npages = iommu_btopr(size + offset);
9397c478bd9Sstevel@tonic-gate 		}
9407c478bd9Sstevel@tonic-gate 		/*
9417c478bd9Sstevel@tonic-gate 		 * If the size requested is less than a moderate amt,
9427c478bd9Sstevel@tonic-gate 		 * skip the partial mapping stuff- it's not worth the
9437c478bd9Sstevel@tonic-gate 		 * effort.
9447c478bd9Sstevel@tonic-gate 		 */
9457c478bd9Sstevel@tonic-gate 		if (npages > MIN_DVMA_WIN_SIZE) {
9467c478bd9Sstevel@tonic-gate 			npages = MIN_DVMA_WIN_SIZE + iommu_btopr(offset);
9477c478bd9Sstevel@tonic-gate 			size = iommu_ptob(MIN_DVMA_WIN_SIZE);
9487c478bd9Sstevel@tonic-gate 			DPRINTF(IOMMU_DMA_SETUP_DEBUG, ("dma_setup: SZ %x pg "
949*ffadc26eSmike_s 			    "%lx sz %x\n", OBJSIZE, npages, size));
9507c478bd9Sstevel@tonic-gate 			if (pplist != NULL) {
9517c478bd9Sstevel@tonic-gate 				mp->dmai_minfo = (void *)pplist;
9527c478bd9Sstevel@tonic-gate 				mp->dmai_rflags |= DMP_SHADOW;
9537c478bd9Sstevel@tonic-gate 			}
9547c478bd9Sstevel@tonic-gate 		} else {
9557c478bd9Sstevel@tonic-gate 			mp->dmai_rflags ^= DDI_DMA_PARTIAL;
9567c478bd9Sstevel@tonic-gate 		}
9577c478bd9Sstevel@tonic-gate 	} else {
9587c478bd9Sstevel@tonic-gate 		if (npages >= iommu_btop(softsp->iommu_dvma_size) -
9597c478bd9Sstevel@tonic-gate 		    MIN_DVMA_WIN_SIZE) {
9607c478bd9Sstevel@tonic-gate 			rval = DDI_DMA_TOOBIG;
9617c478bd9Sstevel@tonic-gate 			goto bad;
9627c478bd9Sstevel@tonic-gate 		}
9637c478bd9Sstevel@tonic-gate 	}
9647c478bd9Sstevel@tonic-gate 
9657c478bd9Sstevel@tonic-gate 	/*
9667c478bd9Sstevel@tonic-gate 	 * save dmareq-object, size and npages into mp
9677c478bd9Sstevel@tonic-gate 	 */
9687c478bd9Sstevel@tonic-gate 	mp->dmai_object = dmareq->dmar_object;
9697c478bd9Sstevel@tonic-gate 	mp->dmai_size = size;
9707c478bd9Sstevel@tonic-gate 	mp->dmai_ndvmapages = npages;
9717c478bd9Sstevel@tonic-gate 
9727c478bd9Sstevel@tonic-gate 	if (mp->dmai_rflags & DMP_NOLIMIT) {
973*ffadc26eSmike_s 		ioaddr = (ioaddr_t)(uintptr_t)vmem_alloc(softsp->dvma_arena,
9747c478bd9Sstevel@tonic-gate 		    iommu_ptob(npages),
9757c478bd9Sstevel@tonic-gate 		    dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
9767c478bd9Sstevel@tonic-gate 		if (ioaddr == 0) {
9777c478bd9Sstevel@tonic-gate 			rval = DDI_DMA_NORESOURCES;
9787c478bd9Sstevel@tonic-gate 			goto bad;
9797c478bd9Sstevel@tonic-gate 		}
9807c478bd9Sstevel@tonic-gate 
9817c478bd9Sstevel@tonic-gate 		/*
9827c478bd9Sstevel@tonic-gate 		 * If we have a 1 page request and we're working with a page
9837c478bd9Sstevel@tonic-gate 		 * list, we're going to speed load an IOMMU entry.
9847c478bd9Sstevel@tonic-gate 		 */
9857c478bd9Sstevel@tonic-gate 		if (npages == 1 && !addr) {
9867c478bd9Sstevel@tonic-gate 			uint64_t iotte_flag = IOTTE_VALID | IOTTE_CACHE |
9877c478bd9Sstevel@tonic-gate 			    IOTTE_WRITE | IOTTE_STREAM;
9887c478bd9Sstevel@tonic-gate 			volatile uint64_t *iotte_ptr;
9897c478bd9Sstevel@tonic-gate 			pfn_t pfn;
9907c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
9917c478bd9Sstevel@tonic-gate 			struct io_mem_list *iomemp;
9927c478bd9Sstevel@tonic-gate 			pfn_t *pfnp;
9937c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
9947c478bd9Sstevel@tonic-gate 
9957c478bd9Sstevel@tonic-gate 			iotte_ptr = IOTTE_NDX(ioaddr,
9967c478bd9Sstevel@tonic-gate 			    softsp->soft_tsb_base_addr);
9977c478bd9Sstevel@tonic-gate 
9987c478bd9Sstevel@tonic-gate 			if (mp->dmai_rflags & DDI_DMA_CONSISTENT) {
9997c478bd9Sstevel@tonic-gate 				mp->dmai_rflags |= DMP_NOSYNC;
10007c478bd9Sstevel@tonic-gate 				iotte_flag ^= IOTTE_STREAM;
10017c478bd9Sstevel@tonic-gate 			} else if (softsp->stream_buf_off)
10027c478bd9Sstevel@tonic-gate 				iotte_flag ^= IOTTE_STREAM;
10037c478bd9Sstevel@tonic-gate 
10047c478bd9Sstevel@tonic-gate 			mp->dmai_rflags ^= DDI_DMA_PARTIAL;
10057c478bd9Sstevel@tonic-gate 
10067c478bd9Sstevel@tonic-gate 			if (pp != NULL)
10077c478bd9Sstevel@tonic-gate 				pfn = pp->p_pagenum;
10087c478bd9Sstevel@tonic-gate 			else
10097c478bd9Sstevel@tonic-gate 				pfn = (*pplist)->p_pagenum;
10107c478bd9Sstevel@tonic-gate 
10117c478bd9Sstevel@tonic-gate 			iommu_tlb_flush(softsp, ioaddr, 1);
10127c478bd9Sstevel@tonic-gate 
10137c478bd9Sstevel@tonic-gate 			*iotte_ptr =
10147c478bd9Sstevel@tonic-gate 			    ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
10157c478bd9Sstevel@tonic-gate 
10167c478bd9Sstevel@tonic-gate 			mp->dmai_mapping = (ioaddr_t)(ioaddr + offset);
10177c478bd9Sstevel@tonic-gate 			mp->dmai_nwin = 0;
10187c478bd9Sstevel@tonic-gate 			if (cp != NULL) {
10197c478bd9Sstevel@tonic-gate 				cp->dmac_notused = 0;
10207c478bd9Sstevel@tonic-gate 				cp->dmac_address = (ioaddr_t)mp->dmai_mapping;
10217c478bd9Sstevel@tonic-gate 				cp->dmac_size = mp->dmai_size;
10227c478bd9Sstevel@tonic-gate 				cp->dmac_type = 0;
10237c478bd9Sstevel@tonic-gate 				*ccountp = 1;
10247c478bd9Sstevel@tonic-gate 			}
10257c478bd9Sstevel@tonic-gate 
1026*ffadc26eSmike_s 			DPRINTF(IOMMU_TTE, ("speed loading: TTE index %p "
1027*ffadc26eSmike_s 			    "pfn %lx tte flag %lx addr %lx ioaddr %x\n",
10287c478bd9Sstevel@tonic-gate 			    iotte_ptr, pfn, iotte_flag, addr, ioaddr));
10297c478bd9Sstevel@tonic-gate 
10307c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
10317c478bd9Sstevel@tonic-gate 			iomemp = kmem_alloc(sizeof (struct io_mem_list),
10327c478bd9Sstevel@tonic-gate 			    KM_SLEEP);
10337c478bd9Sstevel@tonic-gate 			iomemp->rdip = mp->dmai_rdip;
10347c478bd9Sstevel@tonic-gate 			iomemp->ioaddr = ioaddr;
10357c478bd9Sstevel@tonic-gate 			iomemp->addr = addr;
10367c478bd9Sstevel@tonic-gate 			iomemp->npages = npages;
10377c478bd9Sstevel@tonic-gate 			pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) *
10387c478bd9Sstevel@tonic-gate 			    (npages + 1), KM_SLEEP);
10397c478bd9Sstevel@tonic-gate 			*pfnp = pfn;
10407c478bd9Sstevel@tonic-gate 			mutex_enter(&softsp->iomemlock);
10417c478bd9Sstevel@tonic-gate 			iomemp->next = softsp->iomem;
10427c478bd9Sstevel@tonic-gate 			softsp->iomem = iomemp;
10437c478bd9Sstevel@tonic-gate 			mutex_exit(&softsp->iomemlock);
10447c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
10457c478bd9Sstevel@tonic-gate 
10467c478bd9Sstevel@tonic-gate 			return (DDI_DMA_MAPPED);
10477c478bd9Sstevel@tonic-gate 		}
10487c478bd9Sstevel@tonic-gate 	} else {
1049*ffadc26eSmike_s 		ioaddr = (ioaddr_t)(uintptr_t)vmem_xalloc(softsp->dvma_arena,
10507c478bd9Sstevel@tonic-gate 		    iommu_ptob(npages),
10517c478bd9Sstevel@tonic-gate 		    MAX((uint_t)dma_attr->dma_attr_align, IOMMU_PAGESIZE), 0,
10527c478bd9Sstevel@tonic-gate 		    (uint_t)dma_attr->dma_attr_seg + 1,
1053*ffadc26eSmike_s 		    (void *)(uintptr_t)(ioaddr_t)dma_attr->dma_attr_addr_lo,
1054*ffadc26eSmike_s 		    (void *)(uintptr_t)
1055*ffadc26eSmike_s 			((ioaddr_t)dma_attr->dma_attr_addr_hi + 1),
10567c478bd9Sstevel@tonic-gate 		    dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
10577c478bd9Sstevel@tonic-gate 	}
10587c478bd9Sstevel@tonic-gate 
10597c478bd9Sstevel@tonic-gate 	if (ioaddr == 0) {
10607c478bd9Sstevel@tonic-gate 		if (dmareq->dmar_fp == DDI_DMA_SLEEP)
10617c478bd9Sstevel@tonic-gate 			rval = DDI_DMA_NOMAPPING;
10627c478bd9Sstevel@tonic-gate 		else
10637c478bd9Sstevel@tonic-gate 			rval = DDI_DMA_NORESOURCES;
10647c478bd9Sstevel@tonic-gate 		goto bad;
10657c478bd9Sstevel@tonic-gate 	}
10667c478bd9Sstevel@tonic-gate 
10677c478bd9Sstevel@tonic-gate 	mp->dmai_mapping = ioaddr + offset;
10687c478bd9Sstevel@tonic-gate 	ASSERT(mp->dmai_mapping >= softsp->iommu_dvma_base);
10697c478bd9Sstevel@tonic-gate 
10707c478bd9Sstevel@tonic-gate 	/*
10717c478bd9Sstevel@tonic-gate 	 * At this point we have a range of virtual address allocated
10727c478bd9Sstevel@tonic-gate 	 * with which we now have to map to the requested object.
10737c478bd9Sstevel@tonic-gate 	 */
10747c478bd9Sstevel@tonic-gate 	if (addr) {
10757c478bd9Sstevel@tonic-gate 		rval = iommu_create_vaddr_mappings(mp,
10767c478bd9Sstevel@tonic-gate 		    addr & ~IOMMU_PAGEOFFSET);
10777c478bd9Sstevel@tonic-gate 		if (rval == DDI_DMA_NOMAPPING)
10787c478bd9Sstevel@tonic-gate 			goto bad_nomap;
10797c478bd9Sstevel@tonic-gate 	} else {
10807c478bd9Sstevel@tonic-gate 		rval = iommu_create_pp_mappings(mp, pp, pplist);
10817c478bd9Sstevel@tonic-gate 		if (rval == DDI_DMA_NOMAPPING)
10827c478bd9Sstevel@tonic-gate 			goto bad_nomap;
10837c478bd9Sstevel@tonic-gate 	}
10847c478bd9Sstevel@tonic-gate 
10857c478bd9Sstevel@tonic-gate 	if (cp) {
10867c478bd9Sstevel@tonic-gate 		cp->dmac_notused = 0;
10877c478bd9Sstevel@tonic-gate 		cp->dmac_address = (ioaddr_t)mp->dmai_mapping;
10887c478bd9Sstevel@tonic-gate 		cp->dmac_size = mp->dmai_size;
10897c478bd9Sstevel@tonic-gate 		cp->dmac_type = 0;
10907c478bd9Sstevel@tonic-gate 		*ccountp = 1;
10917c478bd9Sstevel@tonic-gate 	}
10927c478bd9Sstevel@tonic-gate 	if (mp->dmai_rflags & DDI_DMA_PARTIAL) {
10937c478bd9Sstevel@tonic-gate 		size = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset));
10947c478bd9Sstevel@tonic-gate 		mp->dmai_nwin =
10957c478bd9Sstevel@tonic-gate 		    (dmareq->dmar_object.dmao_size + (size - 1)) / size;
10967c478bd9Sstevel@tonic-gate 		return (DDI_DMA_PARTIAL_MAP);
10977c478bd9Sstevel@tonic-gate 	} else {
10987c478bd9Sstevel@tonic-gate 		mp->dmai_nwin = 0;
10997c478bd9Sstevel@tonic-gate 		return (DDI_DMA_MAPPED);
11007c478bd9Sstevel@tonic-gate 	}
11017c478bd9Sstevel@tonic-gate 
11027c478bd9Sstevel@tonic-gate bad_nomap:
11037c478bd9Sstevel@tonic-gate 	/*
11047c478bd9Sstevel@tonic-gate 	 * Could not create mmu mappings.
11057c478bd9Sstevel@tonic-gate 	 */
11067c478bd9Sstevel@tonic-gate 	if (mp->dmai_rflags & DMP_NOLIMIT) {
1107*ffadc26eSmike_s 		vmem_free(softsp->dvma_arena, (void *)(uintptr_t)ioaddr,
11087c478bd9Sstevel@tonic-gate 		    iommu_ptob(npages));
11097c478bd9Sstevel@tonic-gate 	} else {
1110*ffadc26eSmike_s 		vmem_xfree(softsp->dvma_arena, (void *)(uintptr_t)ioaddr,
11117c478bd9Sstevel@tonic-gate 		    iommu_ptob(npages));
11127c478bd9Sstevel@tonic-gate 	}
11137c478bd9Sstevel@tonic-gate 
11147c478bd9Sstevel@tonic-gate bad:
11157c478bd9Sstevel@tonic-gate 	if (rval == DDI_DMA_NORESOURCES &&
11167c478bd9Sstevel@tonic-gate 	    dmareq->dmar_fp != DDI_DMA_DONTWAIT) {
11177c478bd9Sstevel@tonic-gate 		ddi_set_callback(dmareq->dmar_fp,
11187c478bd9Sstevel@tonic-gate 		    dmareq->dmar_arg, &softsp->dvma_call_list_id);
11197c478bd9Sstevel@tonic-gate 	}
11207c478bd9Sstevel@tonic-gate 	mp->dmai_inuse = 0;
11217c478bd9Sstevel@tonic-gate 	return (rval);
11227c478bd9Sstevel@tonic-gate }
11237c478bd9Sstevel@tonic-gate 
11247c478bd9Sstevel@tonic-gate /* ARGSUSED */
11257c478bd9Sstevel@tonic-gate int
11267c478bd9Sstevel@tonic-gate iommu_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip,
11277c478bd9Sstevel@tonic-gate     ddi_dma_handle_t handle)
11287c478bd9Sstevel@tonic-gate {
11297c478bd9Sstevel@tonic-gate 	ioaddr_t addr;
11307c478bd9Sstevel@tonic-gate 	uint_t npages;
11317c478bd9Sstevel@tonic-gate 	size_t size;
11327c478bd9Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
11337c478bd9Sstevel@tonic-gate 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
11347c478bd9Sstevel@tonic-gate 	struct sbus_soft_state *softsp = mppriv->softsp;
11357c478bd9Sstevel@tonic-gate 	ASSERT(softsp != NULL);
11367c478bd9Sstevel@tonic-gate 
11377c478bd9Sstevel@tonic-gate 	addr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
11387c478bd9Sstevel@tonic-gate 	npages = mp->dmai_ndvmapages;
11397c478bd9Sstevel@tonic-gate 	size = iommu_ptob(npages);
11407c478bd9Sstevel@tonic-gate 
11417c478bd9Sstevel@tonic-gate 	DPRINTF(IOMMU_DMA_UNBINDHDL_DEBUG, ("iommu_dma_unbindhdl: "
11427c478bd9Sstevel@tonic-gate 	    "unbinding addr %x for %x pages\n", addr, mp->dmai_ndvmapages));
11437c478bd9Sstevel@tonic-gate 
11447c478bd9Sstevel@tonic-gate 	/* sync the entire object */
11457c478bd9Sstevel@tonic-gate 	if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
11467c478bd9Sstevel@tonic-gate 		/* flush stream write buffers */
11477c478bd9Sstevel@tonic-gate 		sync_stream_buf(softsp, addr, npages, (int *)&mppriv->sync_flag,
11487c478bd9Sstevel@tonic-gate 		    mppriv->phys_sync_flag);
11497c478bd9Sstevel@tonic-gate 	}
11507c478bd9Sstevel@tonic-gate 
11517c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG)
11527c478bd9Sstevel@tonic-gate 	/*
11537c478bd9Sstevel@tonic-gate 	 * 'Free' the dma mappings.
11547c478bd9Sstevel@tonic-gate 	 */
11557c478bd9Sstevel@tonic-gate 	iommu_remove_mappings(mp);
11567c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */
11577c478bd9Sstevel@tonic-gate 
11587c478bd9Sstevel@tonic-gate 	ASSERT(npages > (uint_t)0);
11597c478bd9Sstevel@tonic-gate 	if (mp->dmai_rflags & DMP_NOLIMIT)
1160*ffadc26eSmike_s 		vmem_free(softsp->dvma_arena, (void *)(uintptr_t)addr, size);
11617c478bd9Sstevel@tonic-gate 	else
1162*ffadc26eSmike_s 		vmem_xfree(softsp->dvma_arena, (void *)(uintptr_t)addr, size);
11637c478bd9Sstevel@tonic-gate 
11647c478bd9Sstevel@tonic-gate 	mp->dmai_ndvmapages = 0;
11657c478bd9Sstevel@tonic-gate 	mp->dmai_inuse = 0;
11667c478bd9Sstevel@tonic-gate 	mp->dmai_minfo = NULL;
11677c478bd9Sstevel@tonic-gate 
11687c478bd9Sstevel@tonic-gate 	if (softsp->dvma_call_list_id != 0)
11697c478bd9Sstevel@tonic-gate 		ddi_run_callback(&softsp->dvma_call_list_id);
11707c478bd9Sstevel@tonic-gate 
11717c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
11727c478bd9Sstevel@tonic-gate }
11737c478bd9Sstevel@tonic-gate 
11747c478bd9Sstevel@tonic-gate /*ARGSUSED*/
11757c478bd9Sstevel@tonic-gate int
11767c478bd9Sstevel@tonic-gate iommu_dma_flush(dev_info_t *dip, dev_info_t *rdip,
11777c478bd9Sstevel@tonic-gate     ddi_dma_handle_t handle, off_t off, size_t len,
11787c478bd9Sstevel@tonic-gate     uint_t cache_flags)
11797c478bd9Sstevel@tonic-gate {
11807c478bd9Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
11817c478bd9Sstevel@tonic-gate 	struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
11827c478bd9Sstevel@tonic-gate 
11837c478bd9Sstevel@tonic-gate 	if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
11847c478bd9Sstevel@tonic-gate 		sync_stream_buf(mppriv->softsp, mp->dmai_mapping,
11857c478bd9Sstevel@tonic-gate 		    mp->dmai_ndvmapages, (int *)&mppriv->sync_flag,
11867c478bd9Sstevel@tonic-gate 		    mppriv->phys_sync_flag);
11877c478bd9Sstevel@tonic-gate 	}
11887c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
11897c478bd9Sstevel@tonic-gate }
11907c478bd9Sstevel@tonic-gate 
11917c478bd9Sstevel@tonic-gate /*ARGSUSED*/
11927c478bd9Sstevel@tonic-gate int
11937c478bd9Sstevel@tonic-gate iommu_dma_win(dev_info_t *dip, dev_info_t *rdip,
11947c478bd9Sstevel@tonic-gate     ddi_dma_handle_t handle, uint_t win, off_t *offp,
11957c478bd9Sstevel@tonic-gate     size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp)
11967c478bd9Sstevel@tonic-gate {
11977c478bd9Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
11987c478bd9Sstevel@tonic-gate 	off_t offset;
11997c478bd9Sstevel@tonic-gate 	uint_t winsize;
12007c478bd9Sstevel@tonic-gate 	uint_t newoff;
12017c478bd9Sstevel@tonic-gate 	int rval;
12027c478bd9Sstevel@tonic-gate 
12037c478bd9Sstevel@tonic-gate 	offset = mp->dmai_mapping & IOMMU_PAGEOFFSET;
12047c478bd9Sstevel@tonic-gate 	winsize = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset));
12057c478bd9Sstevel@tonic-gate 
12067c478bd9Sstevel@tonic-gate 	DPRINTF(IOMMU_DMA_WIN_DEBUG, ("getwin win %d winsize %x\n", win,
12077c478bd9Sstevel@tonic-gate 	    winsize));
12087c478bd9Sstevel@tonic-gate 
12097c478bd9Sstevel@tonic-gate 	/*
12107c478bd9Sstevel@tonic-gate 	 * win is in the range [0 .. dmai_nwin-1]
12117c478bd9Sstevel@tonic-gate 	 */
12127c478bd9Sstevel@tonic-gate 	if (win >= mp->dmai_nwin)
12137c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
12147c478bd9Sstevel@tonic-gate 
12157c478bd9Sstevel@tonic-gate 	newoff = win * winsize;
12167c478bd9Sstevel@tonic-gate 	if (newoff > mp->dmai_object.dmao_size - mp->dmai_minxfer)
12177c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
12187c478bd9Sstevel@tonic-gate 
12197c478bd9Sstevel@tonic-gate 	ASSERT(cookiep);
12207c478bd9Sstevel@tonic-gate 	cookiep->dmac_notused = 0;
12217c478bd9Sstevel@tonic-gate 	cookiep->dmac_type = 0;
12227c478bd9Sstevel@tonic-gate 	cookiep->dmac_address = (ioaddr_t)mp->dmai_mapping;
12237c478bd9Sstevel@tonic-gate 	cookiep->dmac_size = mp->dmai_size;
12247c478bd9Sstevel@tonic-gate 	*ccountp = 1;
12257c478bd9Sstevel@tonic-gate 	*offp = (off_t)newoff;
12267c478bd9Sstevel@tonic-gate 	*lenp = (uint_t)winsize;
12277c478bd9Sstevel@tonic-gate 
12287c478bd9Sstevel@tonic-gate 	if (newoff == mp->dmai_offset) {
12297c478bd9Sstevel@tonic-gate 		/*
12307c478bd9Sstevel@tonic-gate 		 * Nothing to do...
12317c478bd9Sstevel@tonic-gate 		 */
12327c478bd9Sstevel@tonic-gate 		return (DDI_SUCCESS);
12337c478bd9Sstevel@tonic-gate 	}
12347c478bd9Sstevel@tonic-gate 
12357c478bd9Sstevel@tonic-gate 	if ((rval = iommu_map_window(mp, newoff, winsize)) != DDI_SUCCESS)
12367c478bd9Sstevel@tonic-gate 		return (rval);
12377c478bd9Sstevel@tonic-gate 
12387c478bd9Sstevel@tonic-gate 	/*
12397c478bd9Sstevel@tonic-gate 	 * Set this again in case iommu_map_window() has changed it
12407c478bd9Sstevel@tonic-gate 	 */
12417c478bd9Sstevel@tonic-gate 	cookiep->dmac_size = mp->dmai_size;
12427c478bd9Sstevel@tonic-gate 
12437c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
12447c478bd9Sstevel@tonic-gate }
12457c478bd9Sstevel@tonic-gate 
12467c478bd9Sstevel@tonic-gate static int
12477c478bd9Sstevel@tonic-gate iommu_map_window(ddi_dma_impl_t *mp, off_t newoff, size_t winsize)
12487c478bd9Sstevel@tonic-gate {
12497c478bd9Sstevel@tonic-gate 	uintptr_t addr = 0;
12507c478bd9Sstevel@tonic-gate 	page_t *pp;
12517c478bd9Sstevel@tonic-gate 	uint_t flags;
12527c478bd9Sstevel@tonic-gate 	struct page **pplist = NULL;
12537c478bd9Sstevel@tonic-gate 
12547c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG)
12557c478bd9Sstevel@tonic-gate 	/* Free mappings for current window */
12567c478bd9Sstevel@tonic-gate 	iommu_remove_mappings(mp);
12577c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */
12587c478bd9Sstevel@tonic-gate 
12597c478bd9Sstevel@tonic-gate 	mp->dmai_offset = newoff;
12607c478bd9Sstevel@tonic-gate 	mp->dmai_size = mp->dmai_object.dmao_size - newoff;
12617c478bd9Sstevel@tonic-gate 	mp->dmai_size = MIN(mp->dmai_size, winsize);
12627c478bd9Sstevel@tonic-gate 
12637c478bd9Sstevel@tonic-gate 	if (mp->dmai_object.dmao_type == DMA_OTYP_VADDR ||
12647c478bd9Sstevel@tonic-gate 	    mp->dmai_object.dmao_type == DMA_OTYP_BUFVADDR) {
12657c478bd9Sstevel@tonic-gate 		if (mp->dmai_rflags & DMP_SHADOW) {
12667c478bd9Sstevel@tonic-gate 			pplist = (struct page **)mp->dmai_minfo;
12677c478bd9Sstevel@tonic-gate 			ASSERT(pplist != NULL);
12687c478bd9Sstevel@tonic-gate 			pplist = pplist + (newoff >> MMU_PAGESHIFT);
12697c478bd9Sstevel@tonic-gate 		} else {
12707c478bd9Sstevel@tonic-gate 			addr = (uintptr_t)
12717c478bd9Sstevel@tonic-gate 			    mp->dmai_object.dmao_obj.virt_obj.v_addr;
12727c478bd9Sstevel@tonic-gate 			addr = (addr + newoff) & ~IOMMU_PAGEOFFSET;
12737c478bd9Sstevel@tonic-gate 		}
12747c478bd9Sstevel@tonic-gate 		pp = NULL;
12757c478bd9Sstevel@tonic-gate 	} else {
12767c478bd9Sstevel@tonic-gate 		pp = mp->dmai_object.dmao_obj.pp_obj.pp_pp;
12777c478bd9Sstevel@tonic-gate 		flags = 0;
12787c478bd9Sstevel@tonic-gate 		while (flags < newoff) {
12797c478bd9Sstevel@tonic-gate 			pp = pp->p_next;
12807c478bd9Sstevel@tonic-gate 			flags += MMU_PAGESIZE;
12817c478bd9Sstevel@tonic-gate 		}
12827c478bd9Sstevel@tonic-gate 	}
12837c478bd9Sstevel@tonic-gate 
12847c478bd9Sstevel@tonic-gate 	/* Set up mappings for next window */
12857c478bd9Sstevel@tonic-gate 	if (addr) {
12867c478bd9Sstevel@tonic-gate 		if (iommu_create_vaddr_mappings(mp, addr) < 0)
12877c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
12887c478bd9Sstevel@tonic-gate 	} else {
12897c478bd9Sstevel@tonic-gate 		if (iommu_create_pp_mappings(mp, pp, pplist) < 0)
12907c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
12917c478bd9Sstevel@tonic-gate 	}
12927c478bd9Sstevel@tonic-gate 
12937c478bd9Sstevel@tonic-gate 	/*
12947c478bd9Sstevel@tonic-gate 	 * also invalidate read stream buffer
12957c478bd9Sstevel@tonic-gate 	 */
12967c478bd9Sstevel@tonic-gate 	if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
12977c478bd9Sstevel@tonic-gate 		struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
12987c478bd9Sstevel@tonic-gate 
12997c478bd9Sstevel@tonic-gate 		sync_stream_buf(mppriv->softsp, mp->dmai_mapping,
13007c478bd9Sstevel@tonic-gate 		    mp->dmai_ndvmapages, (int *)&mppriv->sync_flag,
13017c478bd9Sstevel@tonic-gate 		    mppriv->phys_sync_flag);
13027c478bd9Sstevel@tonic-gate 	}
13037c478bd9Sstevel@tonic-gate 
13047c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
13057c478bd9Sstevel@tonic-gate 
13067c478bd9Sstevel@tonic-gate }
13077c478bd9Sstevel@tonic-gate 
13087c478bd9Sstevel@tonic-gate int
13097c478bd9Sstevel@tonic-gate iommu_dma_map(dev_info_t *dip, dev_info_t *rdip,
13107c478bd9Sstevel@tonic-gate     struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep)
13117c478bd9Sstevel@tonic-gate {
13127c478bd9Sstevel@tonic-gate 	ddi_dma_lim_t *dma_lim = dmareq->dmar_limits;
13137c478bd9Sstevel@tonic-gate 	ddi_dma_impl_t *mp;
13147c478bd9Sstevel@tonic-gate 	ddi_dma_attr_t *dma_attr;
13157c478bd9Sstevel@tonic-gate 	struct dma_impl_priv *mppriv;
13167c478bd9Sstevel@tonic-gate 	ioaddr_t addrlow, addrhigh;
13177c478bd9Sstevel@tonic-gate 	ioaddr_t segalign;
13187c478bd9Sstevel@tonic-gate 	int rval;
13197c478bd9Sstevel@tonic-gate 	struct sbus_soft_state *softsp =
13207c478bd9Sstevel@tonic-gate 		(struct sbus_soft_state *)ddi_get_soft_state(sbusp,
13217c478bd9Sstevel@tonic-gate 		ddi_get_instance(dip));
13227c478bd9Sstevel@tonic-gate 
13237c478bd9Sstevel@tonic-gate 	addrlow = dma_lim->dlim_addr_lo;
13247c478bd9Sstevel@tonic-gate 	addrhigh = dma_lim->dlim_addr_hi;
13257c478bd9Sstevel@tonic-gate 	if ((addrhigh <= addrlow) ||
13267c478bd9Sstevel@tonic-gate 	    (addrhigh < (ioaddr_t)softsp->iommu_dvma_base)) {
13277c478bd9Sstevel@tonic-gate 		return (DDI_DMA_NOMAPPING);
13287c478bd9Sstevel@tonic-gate 	}
13297c478bd9Sstevel@tonic-gate 
13307c478bd9Sstevel@tonic-gate 	/*
13317c478bd9Sstevel@tonic-gate 	 * Setup DMA burstsizes and min-xfer counts.
13327c478bd9Sstevel@tonic-gate 	 */
13337c478bd9Sstevel@tonic-gate 	(void) iommu_dma_lim_setup(dip, rdip, softsp, &dma_lim->dlim_burstsizes,
13347c478bd9Sstevel@tonic-gate 		(uint_t)dma_lim->dlim_burstsizes, &dma_lim->dlim_minxfer,
13357c478bd9Sstevel@tonic-gate 		dmareq->dmar_flags);
13367c478bd9Sstevel@tonic-gate 
13377c478bd9Sstevel@tonic-gate 	if (dma_lim->dlim_burstsizes == 0)
13387c478bd9Sstevel@tonic-gate 		return (DDI_DMA_NOMAPPING);
13397c478bd9Sstevel@tonic-gate 	/*
13407c478bd9Sstevel@tonic-gate 	 * If not an advisory call, get a DMA handle
13417c478bd9Sstevel@tonic-gate 	 */
13427c478bd9Sstevel@tonic-gate 	if (!handlep) {
13437c478bd9Sstevel@tonic-gate 		return (DDI_DMA_MAPOK);
13447c478bd9Sstevel@tonic-gate 	}
13457c478bd9Sstevel@tonic-gate 
13467c478bd9Sstevel@tonic-gate 	mppriv = kmem_zalloc(sizeof (*mppriv),
13477c478bd9Sstevel@tonic-gate 	    (dmareq->dmar_fp == DDI_DMA_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
13487c478bd9Sstevel@tonic-gate 	if (mppriv == NULL) {
13497c478bd9Sstevel@tonic-gate 		if (dmareq->dmar_fp != DDI_DMA_DONTWAIT) {
13507c478bd9Sstevel@tonic-gate 			ddi_set_callback(dmareq->dmar_fp,
13517c478bd9Sstevel@tonic-gate 			    dmareq->dmar_arg, &softsp->dvma_call_list_id);
13527c478bd9Sstevel@tonic-gate 		}
13537c478bd9Sstevel@tonic-gate 		return (DDI_DMA_NORESOURCES);
13547c478bd9Sstevel@tonic-gate 	}
13557c478bd9Sstevel@tonic-gate 	mp = (ddi_dma_impl_t *)mppriv;
13567c478bd9Sstevel@tonic-gate 	mp->dmai_rdip = rdip;
13577c478bd9Sstevel@tonic-gate 	mp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS;
13587c478bd9Sstevel@tonic-gate 	mp->dmai_minxfer = dma_lim->dlim_minxfer;
13597c478bd9Sstevel@tonic-gate 	mp->dmai_burstsizes = dma_lim->dlim_burstsizes;
13607c478bd9Sstevel@tonic-gate 	mp->dmai_offset = 0;
13617c478bd9Sstevel@tonic-gate 	mp->dmai_ndvmapages = 0;
13627c478bd9Sstevel@tonic-gate 	mp->dmai_minfo = 0;
13637c478bd9Sstevel@tonic-gate 	mp->dmai_inuse = 0;
13647c478bd9Sstevel@tonic-gate 	segalign = dma_lim->dlim_cntr_max;
13657c478bd9Sstevel@tonic-gate 	/* See if the DMA engine has any limit restrictions. */
13667c478bd9Sstevel@tonic-gate 	if (segalign == UINT32_MAX && addrhigh == UINT32_MAX &&
13677c478bd9Sstevel@tonic-gate 	    addrlow == 0) {
13687c478bd9Sstevel@tonic-gate 		mp->dmai_rflags |= DMP_NOLIMIT;
13697c478bd9Sstevel@tonic-gate 	}
13707c478bd9Sstevel@tonic-gate 	mppriv->softsp = softsp;
13717c478bd9Sstevel@tonic-gate 	mppriv->phys_sync_flag = va_to_pa((caddr_t)&mppriv->sync_flag);
13727c478bd9Sstevel@tonic-gate 	dma_attr = &mp->dmai_attr;
13737c478bd9Sstevel@tonic-gate 	dma_attr->dma_attr_align = 1;
13747c478bd9Sstevel@tonic-gate 	dma_attr->dma_attr_addr_lo = addrlow;
13757c478bd9Sstevel@tonic-gate 	dma_attr->dma_attr_addr_hi = addrhigh;
13767c478bd9Sstevel@tonic-gate 	dma_attr->dma_attr_seg = segalign;
13777c478bd9Sstevel@tonic-gate 	dma_attr->dma_attr_burstsizes = dma_lim->dlim_burstsizes;
13787c478bd9Sstevel@tonic-gate 	rval = iommu_dma_bindhdl(dip, rdip, (ddi_dma_handle_t)mp,
13797c478bd9Sstevel@tonic-gate 		dmareq, NULL, NULL);
13807c478bd9Sstevel@tonic-gate 	if (rval && (rval != DDI_DMA_PARTIAL_MAP)) {
13817c478bd9Sstevel@tonic-gate 		kmem_free(mppriv, sizeof (*mppriv));
13827c478bd9Sstevel@tonic-gate 	} else {
13837c478bd9Sstevel@tonic-gate 		*handlep = (ddi_dma_handle_t)mp;
13847c478bd9Sstevel@tonic-gate 	}
13857c478bd9Sstevel@tonic-gate 	return (rval);
13867c478bd9Sstevel@tonic-gate }
13877c478bd9Sstevel@tonic-gate 
13887c478bd9Sstevel@tonic-gate /*ARGSUSED*/
13897c478bd9Sstevel@tonic-gate int
13907c478bd9Sstevel@tonic-gate iommu_dma_mctl(dev_info_t *dip, dev_info_t *rdip,
13917c478bd9Sstevel@tonic-gate     ddi_dma_handle_t handle, enum ddi_dma_ctlops request,
13927c478bd9Sstevel@tonic-gate     off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags)
13937c478bd9Sstevel@tonic-gate {
13947c478bd9Sstevel@tonic-gate 	ioaddr_t addr;
13957c478bd9Sstevel@tonic-gate 	uint_t offset;
13967c478bd9Sstevel@tonic-gate 	pgcnt_t npages;
13977c478bd9Sstevel@tonic-gate 	size_t size;
13987c478bd9Sstevel@tonic-gate 	ddi_dma_cookie_t *cp;
13997c478bd9Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
14007c478bd9Sstevel@tonic-gate 
14017c478bd9Sstevel@tonic-gate 	DPRINTF(IOMMU_DMAMCTL_DEBUG, ("dma_mctl: handle %p ", mp));
14027c478bd9Sstevel@tonic-gate 	switch (request) {
14037c478bd9Sstevel@tonic-gate 	case DDI_DMA_FREE:
14047c478bd9Sstevel@tonic-gate 	{
14057c478bd9Sstevel@tonic-gate 		struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
14067c478bd9Sstevel@tonic-gate 		struct sbus_soft_state *softsp = mppriv->softsp;
14077c478bd9Sstevel@tonic-gate 		ASSERT(softsp != NULL);
14087c478bd9Sstevel@tonic-gate 
14097c478bd9Sstevel@tonic-gate 		/*
14107c478bd9Sstevel@tonic-gate 		 * 'Free' the dma mappings.
14117c478bd9Sstevel@tonic-gate 		 */
14127c478bd9Sstevel@tonic-gate 		addr = (ioaddr_t)(mp->dmai_mapping & ~IOMMU_PAGEOFFSET);
14137c478bd9Sstevel@tonic-gate 		npages = mp->dmai_ndvmapages;
14147c478bd9Sstevel@tonic-gate 		size = iommu_ptob(npages);
14157c478bd9Sstevel@tonic-gate 
14167c478bd9Sstevel@tonic-gate 		DPRINTF(IOMMU_DMAMCTL_DMA_FREE_DEBUG, ("iommu_dma_mctl dmafree:"
14177c478bd9Sstevel@tonic-gate 		    "freeing vaddr %x for %x pages.\n", addr,
14187c478bd9Sstevel@tonic-gate 		    mp->dmai_ndvmapages));
14197c478bd9Sstevel@tonic-gate 		/* sync the entire object */
14207c478bd9Sstevel@tonic-gate 		if (!(mp->dmai_rflags & DDI_DMA_CONSISTENT)) {
14217c478bd9Sstevel@tonic-gate 			/* flush stream write buffers */
14227c478bd9Sstevel@tonic-gate 			sync_stream_buf(softsp, addr, npages,
14237c478bd9Sstevel@tonic-gate 			    (int *)&mppriv->sync_flag, mppriv->phys_sync_flag);
14247c478bd9Sstevel@tonic-gate 		}
14257c478bd9Sstevel@tonic-gate 
14267c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMDEBUG)
14277c478bd9Sstevel@tonic-gate 		iommu_remove_mappings(mp);
14287c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMDEBUG */
14297c478bd9Sstevel@tonic-gate 
14307c478bd9Sstevel@tonic-gate 		ASSERT(npages > (uint_t)0);
14317c478bd9Sstevel@tonic-gate 		if (mp->dmai_rflags & DMP_NOLIMIT)
1432*ffadc26eSmike_s 			vmem_free(softsp->dvma_arena,
1433*ffadc26eSmike_s 			    (void *)(uintptr_t)addr, size);
14347c478bd9Sstevel@tonic-gate 		else
1435*ffadc26eSmike_s 			vmem_xfree(softsp->dvma_arena,
1436*ffadc26eSmike_s 			    (void *)(uintptr_t)addr, size);
14377c478bd9Sstevel@tonic-gate 
14387c478bd9Sstevel@tonic-gate 		kmem_free(mppriv, sizeof (*mppriv));
14397c478bd9Sstevel@tonic-gate 
14407c478bd9Sstevel@tonic-gate 		if (softsp->dvma_call_list_id != 0)
14417c478bd9Sstevel@tonic-gate 			ddi_run_callback(&softsp->dvma_call_list_id);
14427c478bd9Sstevel@tonic-gate 
14437c478bd9Sstevel@tonic-gate 		break;
14447c478bd9Sstevel@tonic-gate 	}
14457c478bd9Sstevel@tonic-gate 
14467c478bd9Sstevel@tonic-gate 	case DDI_DMA_SET_SBUS64:
14477c478bd9Sstevel@tonic-gate 	{
14487c478bd9Sstevel@tonic-gate 		struct dma_impl_priv *mppriv = (struct dma_impl_priv *)mp;
14497c478bd9Sstevel@tonic-gate 
14507c478bd9Sstevel@tonic-gate 		return (iommu_dma_lim_setup(dip, rdip, mppriv->softsp,
14517c478bd9Sstevel@tonic-gate 		    &mp->dmai_burstsizes, (uint_t)*lenp, &mp->dmai_minxfer,
14527c478bd9Sstevel@tonic-gate 		    DDI_DMA_SBUS_64BIT));
14537c478bd9Sstevel@tonic-gate 	}
14547c478bd9Sstevel@tonic-gate 
14557c478bd9Sstevel@tonic-gate 	case DDI_DMA_HTOC:
14567c478bd9Sstevel@tonic-gate 		DPRINTF(IOMMU_DMAMCTL_HTOC_DEBUG, ("htoc off %lx mapping %lx "
1457*ffadc26eSmike_s 		    "size %x\n", *offp, mp->dmai_mapping,
14587c478bd9Sstevel@tonic-gate 		    mp->dmai_size));
14597c478bd9Sstevel@tonic-gate 
14607c478bd9Sstevel@tonic-gate 		if ((uint_t)(*offp) >= mp->dmai_size)
14617c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14627c478bd9Sstevel@tonic-gate 
14637c478bd9Sstevel@tonic-gate 		cp = (ddi_dma_cookie_t *)objp;
14647c478bd9Sstevel@tonic-gate 		cp->dmac_notused = 0;
14657c478bd9Sstevel@tonic-gate 		cp->dmac_address = (mp->dmai_mapping + (uint_t)(*offp));
14667c478bd9Sstevel@tonic-gate 		cp->dmac_size =
14677c478bd9Sstevel@tonic-gate 		    mp->dmai_mapping + mp->dmai_size - cp->dmac_address;
14687c478bd9Sstevel@tonic-gate 		cp->dmac_type = 0;
14697c478bd9Sstevel@tonic-gate 
14707c478bd9Sstevel@tonic-gate 		break;
14717c478bd9Sstevel@tonic-gate 
14727c478bd9Sstevel@tonic-gate 	case DDI_DMA_KVADDR:
14737c478bd9Sstevel@tonic-gate 		/*
14747c478bd9Sstevel@tonic-gate 		 * If a physical address mapping has percolated this high,
14757c478bd9Sstevel@tonic-gate 		 * that is an error (maybe?).
14767c478bd9Sstevel@tonic-gate 		 */
14777c478bd9Sstevel@tonic-gate 		if (mp->dmai_rflags & DMP_PHYSADDR) {
14787c478bd9Sstevel@tonic-gate 			DPRINTF(IOMMU_DMAMCTL_KVADDR_DEBUG, ("kvaddr of phys "
14797c478bd9Sstevel@tonic-gate 			    "mapping\n"));
14807c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
14817c478bd9Sstevel@tonic-gate 		}
14827c478bd9Sstevel@tonic-gate 
14837c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
14847c478bd9Sstevel@tonic-gate 
14857c478bd9Sstevel@tonic-gate 	case DDI_DMA_NEXTWIN:
14867c478bd9Sstevel@tonic-gate 	{
14877c478bd9Sstevel@tonic-gate 		ddi_dma_win_t *owin, *nwin;
14887c478bd9Sstevel@tonic-gate 		uint_t winsize, newoff;
14897c478bd9Sstevel@tonic-gate 		int rval;
14907c478bd9Sstevel@tonic-gate 
14917c478bd9Sstevel@tonic-gate 		DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, ("nextwin\n"));
14927c478bd9Sstevel@tonic-gate 
14937c478bd9Sstevel@tonic-gate 		mp = (ddi_dma_impl_t *)handle;
14947c478bd9Sstevel@tonic-gate 		owin = (ddi_dma_win_t *)offp;
14957c478bd9Sstevel@tonic-gate 		nwin = (ddi_dma_win_t *)objp;
14967c478bd9Sstevel@tonic-gate 		if (mp->dmai_rflags & DDI_DMA_PARTIAL) {
14977c478bd9Sstevel@tonic-gate 			if (*owin == NULL) {
14987c478bd9Sstevel@tonic-gate 				DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG,
14997c478bd9Sstevel@tonic-gate 				    ("nextwin: win == NULL\n"));
15007c478bd9Sstevel@tonic-gate 				mp->dmai_offset = 0;
15017c478bd9Sstevel@tonic-gate 				*nwin = (ddi_dma_win_t)mp;
15027c478bd9Sstevel@tonic-gate 				return (DDI_SUCCESS);
15037c478bd9Sstevel@tonic-gate 			}
15047c478bd9Sstevel@tonic-gate 
15057c478bd9Sstevel@tonic-gate 			offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
15067c478bd9Sstevel@tonic-gate 			winsize = iommu_ptob(mp->dmai_ndvmapages -
15077c478bd9Sstevel@tonic-gate 			    iommu_btopr(offset));
15087c478bd9Sstevel@tonic-gate 
15097c478bd9Sstevel@tonic-gate 			newoff = (uint_t)(mp->dmai_offset + winsize);
15107c478bd9Sstevel@tonic-gate 			if (newoff > mp->dmai_object.dmao_size -
15117c478bd9Sstevel@tonic-gate 			    mp->dmai_minxfer)
15127c478bd9Sstevel@tonic-gate 				return (DDI_DMA_DONE);
15137c478bd9Sstevel@tonic-gate 
15147c478bd9Sstevel@tonic-gate 			if ((rval = iommu_map_window(mp, newoff, winsize))
15157c478bd9Sstevel@tonic-gate 			    != DDI_SUCCESS)
15167c478bd9Sstevel@tonic-gate 				return (rval);
15177c478bd9Sstevel@tonic-gate 		} else {
15187c478bd9Sstevel@tonic-gate 			DPRINTF(IOMMU_DMAMCTL_NEXTWIN_DEBUG, ("nextwin: no "
15197c478bd9Sstevel@tonic-gate 			    "partial mapping\n"));
15207c478bd9Sstevel@tonic-gate 			if (*owin != NULL)
15217c478bd9Sstevel@tonic-gate 				return (DDI_DMA_DONE);
15227c478bd9Sstevel@tonic-gate 			mp->dmai_offset = 0;
15237c478bd9Sstevel@tonic-gate 			*nwin = (ddi_dma_win_t)mp;
15247c478bd9Sstevel@tonic-gate 		}
15257c478bd9Sstevel@tonic-gate 		break;
15267c478bd9Sstevel@tonic-gate 	}
15277c478bd9Sstevel@tonic-gate 
15287c478bd9Sstevel@tonic-gate 	case DDI_DMA_NEXTSEG:
15297c478bd9Sstevel@tonic-gate 	{
15307c478bd9Sstevel@tonic-gate 		ddi_dma_seg_t *oseg, *nseg;
15317c478bd9Sstevel@tonic-gate 
15327c478bd9Sstevel@tonic-gate 		DPRINTF(IOMMU_DMAMCTL_NEXTSEG_DEBUG, ("nextseg:\n"));
15337c478bd9Sstevel@tonic-gate 
15347c478bd9Sstevel@tonic-gate 		oseg = (ddi_dma_seg_t *)lenp;
15357c478bd9Sstevel@tonic-gate 		if (*oseg != NULL)
15367c478bd9Sstevel@tonic-gate 			return (DDI_DMA_DONE);
15377c478bd9Sstevel@tonic-gate 		nseg = (ddi_dma_seg_t *)objp;
15387c478bd9Sstevel@tonic-gate 		*nseg = *((ddi_dma_seg_t *)offp);
15397c478bd9Sstevel@tonic-gate 		break;
15407c478bd9Sstevel@tonic-gate 	}
15417c478bd9Sstevel@tonic-gate 
15427c478bd9Sstevel@tonic-gate 	case DDI_DMA_SEGTOC:
15437c478bd9Sstevel@tonic-gate 	{
15447c478bd9Sstevel@tonic-gate 		ddi_dma_seg_impl_t *seg;
15457c478bd9Sstevel@tonic-gate 
15467c478bd9Sstevel@tonic-gate 		seg = (ddi_dma_seg_impl_t *)handle;
15477c478bd9Sstevel@tonic-gate 		cp = (ddi_dma_cookie_t *)objp;
15487c478bd9Sstevel@tonic-gate 		cp->dmac_notused = 0;
15497c478bd9Sstevel@tonic-gate 		cp->dmac_address = (ioaddr_t)seg->dmai_mapping;
15507c478bd9Sstevel@tonic-gate 		cp->dmac_size = *lenp = seg->dmai_size;
15517c478bd9Sstevel@tonic-gate 		cp->dmac_type = 0;
15527c478bd9Sstevel@tonic-gate 		*offp = seg->dmai_offset;
15537c478bd9Sstevel@tonic-gate 		break;
15547c478bd9Sstevel@tonic-gate 	}
15557c478bd9Sstevel@tonic-gate 
15567c478bd9Sstevel@tonic-gate 	case DDI_DMA_MOVWIN:
15577c478bd9Sstevel@tonic-gate 	{
15587c478bd9Sstevel@tonic-gate 		uint_t winsize;
15597c478bd9Sstevel@tonic-gate 		uint_t newoff;
15607c478bd9Sstevel@tonic-gate 		int rval;
15617c478bd9Sstevel@tonic-gate 
15627c478bd9Sstevel@tonic-gate 		offset = (uint_t)(mp->dmai_mapping & IOMMU_PAGEOFFSET);
15637c478bd9Sstevel@tonic-gate 		winsize = iommu_ptob(mp->dmai_ndvmapages - iommu_btopr(offset));
15647c478bd9Sstevel@tonic-gate 
1565*ffadc26eSmike_s 		DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("movwin off %lx len %lx "
15667c478bd9Sstevel@tonic-gate 		    "winsize %x\n", *offp, *lenp, winsize));
15677c478bd9Sstevel@tonic-gate 
15687c478bd9Sstevel@tonic-gate 		if ((mp->dmai_rflags & DDI_DMA_PARTIAL) == 0)
15697c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
15707c478bd9Sstevel@tonic-gate 
15717c478bd9Sstevel@tonic-gate 		if (*lenp != (uint_t)-1 && *lenp != winsize) {
15727c478bd9Sstevel@tonic-gate 			DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("bad length\n"));
15737c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
15747c478bd9Sstevel@tonic-gate 		}
15757c478bd9Sstevel@tonic-gate 		newoff = (uint_t)*offp;
15767c478bd9Sstevel@tonic-gate 		if (newoff & (winsize - 1)) {
15777c478bd9Sstevel@tonic-gate 			DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("bad off\n"));
15787c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
15797c478bd9Sstevel@tonic-gate 		}
15807c478bd9Sstevel@tonic-gate 
15817c478bd9Sstevel@tonic-gate 		if (newoff == mp->dmai_offset) {
15827c478bd9Sstevel@tonic-gate 			/*
15837c478bd9Sstevel@tonic-gate 			 * Nothing to do...
15847c478bd9Sstevel@tonic-gate 			 */
15857c478bd9Sstevel@tonic-gate 			break;
15867c478bd9Sstevel@tonic-gate 		}
15877c478bd9Sstevel@tonic-gate 
15887c478bd9Sstevel@tonic-gate 		/*
15897c478bd9Sstevel@tonic-gate 		 * Check out new address...
15907c478bd9Sstevel@tonic-gate 		 */
15917c478bd9Sstevel@tonic-gate 		if (newoff > mp->dmai_object.dmao_size - mp->dmai_minxfer) {
15927c478bd9Sstevel@tonic-gate 			DPRINTF(IOMMU_DMAMCTL_MOVWIN_DEBUG, ("newoff out of "
15937c478bd9Sstevel@tonic-gate 			    "range\n"));
15947c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
15957c478bd9Sstevel@tonic-gate 		}
15967c478bd9Sstevel@tonic-gate 
15977c478bd9Sstevel@tonic-gate 		rval = iommu_map_window(mp, newoff, winsize);
15987c478bd9Sstevel@tonic-gate 		if (rval != DDI_SUCCESS)
15997c478bd9Sstevel@tonic-gate 			return (rval);
16007c478bd9Sstevel@tonic-gate 
16017c478bd9Sstevel@tonic-gate 		if ((cp = (ddi_dma_cookie_t *)objp) != 0) {
16027c478bd9Sstevel@tonic-gate 			cp->dmac_notused = 0;
16037c478bd9Sstevel@tonic-gate 			cp->dmac_address = (ioaddr_t)mp->dmai_mapping;
16047c478bd9Sstevel@tonic-gate 			cp->dmac_size = mp->dmai_size;
16057c478bd9Sstevel@tonic-gate 			cp->dmac_type = 0;
16067c478bd9Sstevel@tonic-gate 		}
16077c478bd9Sstevel@tonic-gate 		*offp = (off_t)newoff;
16087c478bd9Sstevel@tonic-gate 		*lenp = (uint_t)winsize;
16097c478bd9Sstevel@tonic-gate 		break;
16107c478bd9Sstevel@tonic-gate 	}
16117c478bd9Sstevel@tonic-gate 
16127c478bd9Sstevel@tonic-gate 	case DDI_DMA_REPWIN:
16137c478bd9Sstevel@tonic-gate 		if ((mp->dmai_rflags & DDI_DMA_PARTIAL) == 0) {
16147c478bd9Sstevel@tonic-gate 			DPRINTF(IOMMU_DMAMCTL_REPWIN_DEBUG, ("repwin fail\n"));
16157c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
16167c478bd9Sstevel@tonic-gate 		}
16177c478bd9Sstevel@tonic-gate 
16187c478bd9Sstevel@tonic-gate 		*offp = (off_t)mp->dmai_offset;
16197c478bd9Sstevel@tonic-gate 
16207c478bd9Sstevel@tonic-gate 		addr = mp->dmai_ndvmapages -
16217c478bd9Sstevel@tonic-gate 		    iommu_btopr(mp->dmai_mapping & IOMMU_PAGEOFFSET);
16227c478bd9Sstevel@tonic-gate 
16237c478bd9Sstevel@tonic-gate 		*lenp = (uint_t)iommu_ptob(addr);
16247c478bd9Sstevel@tonic-gate 
1625*ffadc26eSmike_s 		DPRINTF(IOMMU_DMAMCTL_REPWIN_DEBUG, ("repwin off %lx len %x\n",
16267c478bd9Sstevel@tonic-gate 		    mp->dmai_offset, mp->dmai_size));
16277c478bd9Sstevel@tonic-gate 
16287c478bd9Sstevel@tonic-gate 		break;
16297c478bd9Sstevel@tonic-gate 
16307c478bd9Sstevel@tonic-gate 	case DDI_DMA_GETERR:
16317c478bd9Sstevel@tonic-gate 		DPRINTF(IOMMU_DMAMCTL_GETERR_DEBUG,
16327c478bd9Sstevel@tonic-gate 		    ("iommu_dma_mctl: geterr\n"));
16337c478bd9Sstevel@tonic-gate 
16347c478bd9Sstevel@tonic-gate 		break;
16357c478bd9Sstevel@tonic-gate 
16367c478bd9Sstevel@tonic-gate 	case DDI_DMA_COFF:
16377c478bd9Sstevel@tonic-gate 		cp = (ddi_dma_cookie_t *)offp;
16387c478bd9Sstevel@tonic-gate 		addr = cp->dmac_address;
16397c478bd9Sstevel@tonic-gate 
16407c478bd9Sstevel@tonic-gate 		if (addr < mp->dmai_mapping ||
16417c478bd9Sstevel@tonic-gate 		    addr >= mp->dmai_mapping + mp->dmai_size)
16427c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
16437c478bd9Sstevel@tonic-gate 
16447c478bd9Sstevel@tonic-gate 		*objp = (caddr_t)(addr - mp->dmai_mapping);
16457c478bd9Sstevel@tonic-gate 
1646*ffadc26eSmike_s 		DPRINTF(IOMMU_DMAMCTL_COFF_DEBUG, ("coff off %lx mapping %lx "
16477c478bd9Sstevel@tonic-gate 		    "size %x\n", (ulong_t)*objp, mp->dmai_mapping,
16487c478bd9Sstevel@tonic-gate 		    mp->dmai_size));
16497c478bd9Sstevel@tonic-gate 
16507c478bd9Sstevel@tonic-gate 		break;
16517c478bd9Sstevel@tonic-gate 
16527c478bd9Sstevel@tonic-gate 	case DDI_DMA_RESERVE:
16537c478bd9Sstevel@tonic-gate 	{
16547c478bd9Sstevel@tonic-gate 		struct ddi_dma_req *dmareq = (struct ddi_dma_req *)offp;
16557c478bd9Sstevel@tonic-gate 		ddi_dma_lim_t *dma_lim;
16567c478bd9Sstevel@tonic-gate 		ddi_dma_handle_t *handlep;
16577c478bd9Sstevel@tonic-gate 		uint_t np;
16587c478bd9Sstevel@tonic-gate 		ioaddr_t ioaddr;
16597c478bd9Sstevel@tonic-gate 		int i;
16607c478bd9Sstevel@tonic-gate 		struct fast_dvma *iommu_fast_dvma;
16617c478bd9Sstevel@tonic-gate 		struct sbus_soft_state *softsp =
16627c478bd9Sstevel@tonic-gate 		    (struct sbus_soft_state *)ddi_get_soft_state(sbusp,
16637c478bd9Sstevel@tonic-gate 		    ddi_get_instance(dip));
16647c478bd9Sstevel@tonic-gate 
16657c478bd9Sstevel@tonic-gate 		/* Some simple sanity checks */
16667c478bd9Sstevel@tonic-gate 		dma_lim = dmareq->dmar_limits;
16677c478bd9Sstevel@tonic-gate 		if (dma_lim->dlim_burstsizes == 0) {
16687c478bd9Sstevel@tonic-gate 			DPRINTF(IOMMU_FASTDMA_RESERVE,
16697c478bd9Sstevel@tonic-gate 			    ("Reserve: bad burstsizes\n"));
16707c478bd9Sstevel@tonic-gate 			return (DDI_DMA_BADLIMITS);
16717c478bd9Sstevel@tonic-gate 		}
16727c478bd9Sstevel@tonic-gate 		if ((AHI <= ALO) || (AHI < softsp->iommu_dvma_base)) {
16737c478bd9Sstevel@tonic-gate 			DPRINTF(IOMMU_FASTDMA_RESERVE,
16747c478bd9Sstevel@tonic-gate 			    ("Reserve: bad limits\n"));
16757c478bd9Sstevel@tonic-gate 			return (DDI_DMA_BADLIMITS);
16767c478bd9Sstevel@tonic-gate 		}
16777c478bd9Sstevel@tonic-gate 
16787c478bd9Sstevel@tonic-gate 		np = dmareq->dmar_object.dmao_size;
16797c478bd9Sstevel@tonic-gate 		mutex_enter(&softsp->dma_pool_lock);
16807c478bd9Sstevel@tonic-gate 		if (np > softsp->dma_reserve) {
16817c478bd9Sstevel@tonic-gate 			mutex_exit(&softsp->dma_pool_lock);
16827c478bd9Sstevel@tonic-gate 			DPRINTF(IOMMU_FASTDMA_RESERVE,
16837c478bd9Sstevel@tonic-gate 			    ("Reserve: dma_reserve is exhausted\n"));
16847c478bd9Sstevel@tonic-gate 			return (DDI_DMA_NORESOURCES);
16857c478bd9Sstevel@tonic-gate 		}
16867c478bd9Sstevel@tonic-gate 
16877c478bd9Sstevel@tonic-gate 		softsp->dma_reserve -= np;
16887c478bd9Sstevel@tonic-gate 		mutex_exit(&softsp->dma_pool_lock);
16897c478bd9Sstevel@tonic-gate 		mp = kmem_zalloc(sizeof (*mp), KM_SLEEP);
16907c478bd9Sstevel@tonic-gate 		mp->dmai_rflags = DMP_BYPASSNEXUS;
16917c478bd9Sstevel@tonic-gate 		mp->dmai_rdip = rdip;
16927c478bd9Sstevel@tonic-gate 		mp->dmai_minxfer = dma_lim->dlim_minxfer;
16937c478bd9Sstevel@tonic-gate 		mp->dmai_burstsizes = dma_lim->dlim_burstsizes;
16947c478bd9Sstevel@tonic-gate 
1695*ffadc26eSmike_s 		ioaddr = (ioaddr_t)(uintptr_t)vmem_xalloc(softsp->dvma_arena,
16967c478bd9Sstevel@tonic-gate 		    iommu_ptob(np), IOMMU_PAGESIZE, 0,
1697*ffadc26eSmike_s 		    dma_lim->dlim_cntr_max + 1,
1698*ffadc26eSmike_s 		    (void *)(uintptr_t)ALO, (void *)(uintptr_t)(AHI + 1),
16997c478bd9Sstevel@tonic-gate 		    dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP);
17007c478bd9Sstevel@tonic-gate 
17017c478bd9Sstevel@tonic-gate 		if (ioaddr == 0) {
17027c478bd9Sstevel@tonic-gate 			mutex_enter(&softsp->dma_pool_lock);
17037c478bd9Sstevel@tonic-gate 			softsp->dma_reserve += np;
17047c478bd9Sstevel@tonic-gate 			mutex_exit(&softsp->dma_pool_lock);
17057c478bd9Sstevel@tonic-gate 			kmem_free(mp, sizeof (*mp));
17067c478bd9Sstevel@tonic-gate 			DPRINTF(IOMMU_FASTDMA_RESERVE,
17077c478bd9Sstevel@tonic-gate 			    ("Reserve: No dvma resources available\n"));
17087c478bd9Sstevel@tonic-gate 			return (DDI_DMA_NOMAPPING);
17097c478bd9Sstevel@tonic-gate 		}
17107c478bd9Sstevel@tonic-gate 
17117c478bd9Sstevel@tonic-gate 		/* create a per request structure */
17127c478bd9Sstevel@tonic-gate 		iommu_fast_dvma = kmem_alloc(sizeof (struct fast_dvma),
17137c478bd9Sstevel@tonic-gate 		    KM_SLEEP);
17147c478bd9Sstevel@tonic-gate 
17157c478bd9Sstevel@tonic-gate 		/*
17167c478bd9Sstevel@tonic-gate 		 * We need to remember the size of the transfer so that
17177c478bd9Sstevel@tonic-gate 		 * we can figure the virtual pages to sync when the transfer
17187c478bd9Sstevel@tonic-gate 		 * is complete.
17197c478bd9Sstevel@tonic-gate 		 */
17207c478bd9Sstevel@tonic-gate 		iommu_fast_dvma->pagecnt = kmem_zalloc(np *
17217c478bd9Sstevel@tonic-gate 		    sizeof (uint_t), KM_SLEEP);
17227c478bd9Sstevel@tonic-gate 
17237c478bd9Sstevel@tonic-gate 		/* Allocate a streaming cache sync flag for each index */
17247c478bd9Sstevel@tonic-gate 		iommu_fast_dvma->sync_flag = kmem_zalloc(np *
17257c478bd9Sstevel@tonic-gate 		    sizeof (int), KM_SLEEP);
17267c478bd9Sstevel@tonic-gate 
17277c478bd9Sstevel@tonic-gate 		/* Allocate a physical sync flag for each index */
17287c478bd9Sstevel@tonic-gate 		iommu_fast_dvma->phys_sync_flag =
17297c478bd9Sstevel@tonic-gate 		    kmem_zalloc(np * sizeof (uint64_t), KM_SLEEP);
17307c478bd9Sstevel@tonic-gate 
17317c478bd9Sstevel@tonic-gate 		for (i = 0; i < np; i++)
17327c478bd9Sstevel@tonic-gate 			iommu_fast_dvma->phys_sync_flag[i] = va_to_pa((caddr_t)
17337c478bd9Sstevel@tonic-gate 			    &iommu_fast_dvma->sync_flag[i]);
17347c478bd9Sstevel@tonic-gate 
17357c478bd9Sstevel@tonic-gate 		mp->dmai_mapping = ioaddr;
17367c478bd9Sstevel@tonic-gate 		mp->dmai_ndvmapages = np;
17377c478bd9Sstevel@tonic-gate 		iommu_fast_dvma->ops = &iommu_dvma_ops;
17387c478bd9Sstevel@tonic-gate 		iommu_fast_dvma->softsp = (caddr_t)softsp;
17397c478bd9Sstevel@tonic-gate 		mp->dmai_nexus_private = (caddr_t)iommu_fast_dvma;
17407c478bd9Sstevel@tonic-gate 		handlep = (ddi_dma_handle_t *)objp;
17417c478bd9Sstevel@tonic-gate 		*handlep = (ddi_dma_handle_t)mp;
17427c478bd9Sstevel@tonic-gate 
17437c478bd9Sstevel@tonic-gate 		DPRINTF(IOMMU_FASTDMA_RESERVE,
17447c478bd9Sstevel@tonic-gate 		    ("Reserve: mapping object %p base addr %lx size %x\n",
17457c478bd9Sstevel@tonic-gate 		    mp, mp->dmai_mapping, mp->dmai_ndvmapages));
17467c478bd9Sstevel@tonic-gate 
17477c478bd9Sstevel@tonic-gate 		break;
17487c478bd9Sstevel@tonic-gate 	}
17497c478bd9Sstevel@tonic-gate 
17507c478bd9Sstevel@tonic-gate 	case DDI_DMA_RELEASE:
17517c478bd9Sstevel@tonic-gate 	{
17527c478bd9Sstevel@tonic-gate 		ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
17537c478bd9Sstevel@tonic-gate 		uint_t np = npages = mp->dmai_ndvmapages;
17547c478bd9Sstevel@tonic-gate 		ioaddr_t ioaddr = mp->dmai_mapping;
17557c478bd9Sstevel@tonic-gate 		volatile uint64_t *iotte_ptr;
17567c478bd9Sstevel@tonic-gate 		struct fast_dvma *iommu_fast_dvma = (struct fast_dvma *)
17577c478bd9Sstevel@tonic-gate 		    mp->dmai_nexus_private;
17587c478bd9Sstevel@tonic-gate 		struct sbus_soft_state *softsp = (struct sbus_soft_state *)
17597c478bd9Sstevel@tonic-gate 		    iommu_fast_dvma->softsp;
17607c478bd9Sstevel@tonic-gate 
17617c478bd9Sstevel@tonic-gate 		ASSERT(softsp != NULL);
17627c478bd9Sstevel@tonic-gate 
17637c478bd9Sstevel@tonic-gate 		/* Unload stale mappings and flush stale tlb's */
17647c478bd9Sstevel@tonic-gate 		iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
17657c478bd9Sstevel@tonic-gate 
17667c478bd9Sstevel@tonic-gate 		while (npages > (uint_t)0) {
17677c478bd9Sstevel@tonic-gate 			*iotte_ptr = (uint64_t)0;	/* unload tte */
17687c478bd9Sstevel@tonic-gate 			iommu_tlb_flush(softsp, ioaddr, 1);
17697c478bd9Sstevel@tonic-gate 
17707c478bd9Sstevel@tonic-gate 			npages--;
17717c478bd9Sstevel@tonic-gate 			iotte_ptr++;
17727c478bd9Sstevel@tonic-gate 			ioaddr += IOMMU_PAGESIZE;
17737c478bd9Sstevel@tonic-gate 		}
17747c478bd9Sstevel@tonic-gate 
17757c478bd9Sstevel@tonic-gate 		ioaddr = (ioaddr_t)mp->dmai_mapping;
17767c478bd9Sstevel@tonic-gate 		mutex_enter(&softsp->dma_pool_lock);
17777c478bd9Sstevel@tonic-gate 		softsp->dma_reserve += np;
17787c478bd9Sstevel@tonic-gate 		mutex_exit(&softsp->dma_pool_lock);
17797c478bd9Sstevel@tonic-gate 
17807c478bd9Sstevel@tonic-gate 		if (mp->dmai_rflags & DMP_NOLIMIT)
1781*ffadc26eSmike_s 			vmem_free(softsp->dvma_arena,
1782*ffadc26eSmike_s 			    (void *)(uintptr_t)ioaddr, iommu_ptob(np));
17837c478bd9Sstevel@tonic-gate 		else
1784*ffadc26eSmike_s 			vmem_xfree(softsp->dvma_arena,
1785*ffadc26eSmike_s 			    (void *)(uintptr_t)ioaddr, iommu_ptob(np));
17867c478bd9Sstevel@tonic-gate 
17877c478bd9Sstevel@tonic-gate 		kmem_free(mp, sizeof (*mp));
17887c478bd9Sstevel@tonic-gate 		kmem_free(iommu_fast_dvma->pagecnt, np * sizeof (uint_t));
17897c478bd9Sstevel@tonic-gate 		kmem_free(iommu_fast_dvma->sync_flag, np * sizeof (int));
17907c478bd9Sstevel@tonic-gate 		kmem_free(iommu_fast_dvma->phys_sync_flag, np *
17917c478bd9Sstevel@tonic-gate 		    sizeof (uint64_t));
17927c478bd9Sstevel@tonic-gate 		kmem_free(iommu_fast_dvma, sizeof (struct fast_dvma));
17937c478bd9Sstevel@tonic-gate 
17947c478bd9Sstevel@tonic-gate 
17957c478bd9Sstevel@tonic-gate 		DPRINTF(IOMMU_FASTDMA_RESERVE,
17967c478bd9Sstevel@tonic-gate 		    ("Release: Base addr %x size %x\n", ioaddr, np));
17977c478bd9Sstevel@tonic-gate 		/*
17987c478bd9Sstevel@tonic-gate 		 * Now that we've freed some resource,
17997c478bd9Sstevel@tonic-gate 		 * if there is anybody waiting for it
18007c478bd9Sstevel@tonic-gate 		 * try and get them going.
18017c478bd9Sstevel@tonic-gate 		 */
18027c478bd9Sstevel@tonic-gate 		if (softsp->dvma_call_list_id != 0)
18037c478bd9Sstevel@tonic-gate 			ddi_run_callback(&softsp->dvma_call_list_id);
18047c478bd9Sstevel@tonic-gate 
18057c478bd9Sstevel@tonic-gate 		break;
18067c478bd9Sstevel@tonic-gate 	}
18077c478bd9Sstevel@tonic-gate 
18087c478bd9Sstevel@tonic-gate 	default:
18097c478bd9Sstevel@tonic-gate 		DPRINTF(IOMMU_DMAMCTL_DEBUG, ("iommu_dma_mctl: unknown option "
18107c478bd9Sstevel@tonic-gate 		    "0%x\n", request));
18117c478bd9Sstevel@tonic-gate 
18127c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
18137c478bd9Sstevel@tonic-gate 	}
18147c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
18157c478bd9Sstevel@tonic-gate }
18167c478bd9Sstevel@tonic-gate 
18177c478bd9Sstevel@tonic-gate /*ARGSUSED*/
18187c478bd9Sstevel@tonic-gate void
18197c478bd9Sstevel@tonic-gate iommu_dvma_kaddr_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
18207c478bd9Sstevel@tonic-gate     ddi_dma_cookie_t *cp)
18217c478bd9Sstevel@tonic-gate {
18227c478bd9Sstevel@tonic-gate 	uintptr_t addr;
18237c478bd9Sstevel@tonic-gate 	ioaddr_t ioaddr;
18247c478bd9Sstevel@tonic-gate 	uint_t offset;
18257c478bd9Sstevel@tonic-gate 	pfn_t pfn;
18267c478bd9Sstevel@tonic-gate 	int npages;
18277c478bd9Sstevel@tonic-gate 	volatile uint64_t *iotte_ptr;
18287c478bd9Sstevel@tonic-gate 	uint64_t iotte_flag = 0;
18297c478bd9Sstevel@tonic-gate 	struct as *as = NULL;
18307c478bd9Sstevel@tonic-gate 	extern struct as kas;
18317c478bd9Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
18327c478bd9Sstevel@tonic-gate 	struct fast_dvma *iommu_fast_dvma =
18337c478bd9Sstevel@tonic-gate 	    (struct fast_dvma *)mp->dmai_nexus_private;
18347c478bd9Sstevel@tonic-gate 	struct sbus_soft_state *softsp = (struct sbus_soft_state *)
18357c478bd9Sstevel@tonic-gate 	    iommu_fast_dvma->softsp;
18367c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
18377c478bd9Sstevel@tonic-gate 	struct io_mem_list *iomemp;
18387c478bd9Sstevel@tonic-gate 	pfn_t *pfnp;
18397c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
18407c478bd9Sstevel@tonic-gate 
18417c478bd9Sstevel@tonic-gate 	ASSERT(softsp != NULL);
18427c478bd9Sstevel@tonic-gate 
18437c478bd9Sstevel@tonic-gate 	addr = (uintptr_t)a;
18447c478bd9Sstevel@tonic-gate 	ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index));
18457c478bd9Sstevel@tonic-gate 	offset = (uint_t)(addr & IOMMU_PAGEOFFSET);
18467c478bd9Sstevel@tonic-gate 	iommu_fast_dvma->pagecnt[index] = iommu_btopr(len + offset);
18477c478bd9Sstevel@tonic-gate 	as = &kas;
18487c478bd9Sstevel@tonic-gate 	addr &= ~IOMMU_PAGEOFFSET;
18497c478bd9Sstevel@tonic-gate 	npages = iommu_btopr(len + offset);
18507c478bd9Sstevel@tonic-gate 
18517c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
18527c478bd9Sstevel@tonic-gate 	iomemp = kmem_alloc(sizeof (struct io_mem_list), KM_SLEEP);
18537c478bd9Sstevel@tonic-gate 	iomemp->rdip = mp->dmai_rdip;
18547c478bd9Sstevel@tonic-gate 	iomemp->ioaddr = ioaddr;
18557c478bd9Sstevel@tonic-gate 	iomemp->addr = addr;
18567c478bd9Sstevel@tonic-gate 	iomemp->npages = npages;
18577c478bd9Sstevel@tonic-gate 	pfnp = iomemp->pfn = kmem_zalloc(sizeof (*pfnp) * (npages + 1),
18587c478bd9Sstevel@tonic-gate 	    KM_SLEEP);
18597c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
18607c478bd9Sstevel@tonic-gate 
18617c478bd9Sstevel@tonic-gate 	cp->dmac_address = ioaddr | offset;
18627c478bd9Sstevel@tonic-gate 	cp->dmac_size = len;
18637c478bd9Sstevel@tonic-gate 
18647c478bd9Sstevel@tonic-gate 	iotte_ptr = IOTTE_NDX(ioaddr, softsp->soft_tsb_base_addr);
18657c478bd9Sstevel@tonic-gate 	/* read/write and streaming io on */
18667c478bd9Sstevel@tonic-gate 	iotte_flag = IOTTE_VALID | IOTTE_WRITE | IOTTE_CACHE;
18677c478bd9Sstevel@tonic-gate 
18687c478bd9Sstevel@tonic-gate 	if (mp->dmai_rflags & DDI_DMA_CONSISTENT)
18697c478bd9Sstevel@tonic-gate 		mp->dmai_rflags |= DMP_NOSYNC;
18707c478bd9Sstevel@tonic-gate 	else if (!softsp->stream_buf_off)
18717c478bd9Sstevel@tonic-gate 		iotte_flag |= IOTTE_STREAM;
18727c478bd9Sstevel@tonic-gate 
18737c478bd9Sstevel@tonic-gate 	DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: ioaddr %x "
1874*ffadc26eSmike_s 	    "size %x offset %x index %x kaddr %lx\n",
18757c478bd9Sstevel@tonic-gate 	    ioaddr, len, offset, index, addr));
18767c478bd9Sstevel@tonic-gate 	ASSERT(npages > 0);
18777c478bd9Sstevel@tonic-gate 	do {
18787c478bd9Sstevel@tonic-gate 		pfn = hat_getpfnum(as->a_hat, (caddr_t)addr);
18797c478bd9Sstevel@tonic-gate 		if (pfn == PFN_INVALID) {
18807c478bd9Sstevel@tonic-gate 			DPRINTF(IOMMU_FASTDMA_LOAD, ("kaddr_load: invalid pfn "
18817c478bd9Sstevel@tonic-gate 			    "from hat_getpfnum()\n"));
18827c478bd9Sstevel@tonic-gate 		}
18837c478bd9Sstevel@tonic-gate 
18847c478bd9Sstevel@tonic-gate 		iommu_tlb_flush(softsp, ioaddr, 1);
18857c478bd9Sstevel@tonic-gate 
18867c478bd9Sstevel@tonic-gate 		/* load tte */
18877c478bd9Sstevel@tonic-gate 		*iotte_ptr = ((uint64_t)pfn << IOMMU_PAGESHIFT) | iotte_flag;
18887c478bd9Sstevel@tonic-gate 
18897c478bd9Sstevel@tonic-gate 		npages--;
18907c478bd9Sstevel@tonic-gate 		iotte_ptr++;
18917c478bd9Sstevel@tonic-gate 
18927c478bd9Sstevel@tonic-gate 		addr += IOMMU_PAGESIZE;
18937c478bd9Sstevel@tonic-gate 		ioaddr += IOMMU_PAGESIZE;
18947c478bd9Sstevel@tonic-gate 
18957c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
18967c478bd9Sstevel@tonic-gate 		*pfnp = pfn;
18977c478bd9Sstevel@tonic-gate 		pfnp++;
18987c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
18997c478bd9Sstevel@tonic-gate 
19007c478bd9Sstevel@tonic-gate 	} while (npages > 0);
19017c478bd9Sstevel@tonic-gate 
19027c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
19037c478bd9Sstevel@tonic-gate 	mutex_enter(&softsp->iomemlock);
19047c478bd9Sstevel@tonic-gate 	iomemp->next = softsp->iomem;
19057c478bd9Sstevel@tonic-gate 	softsp->iomem = iomemp;
19067c478bd9Sstevel@tonic-gate 	mutex_exit(&softsp->iomemlock);
19077c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
19087c478bd9Sstevel@tonic-gate }
19097c478bd9Sstevel@tonic-gate 
19107c478bd9Sstevel@tonic-gate /*ARGSUSED*/
19117c478bd9Sstevel@tonic-gate void
19127c478bd9Sstevel@tonic-gate iommu_dvma_unload(ddi_dma_handle_t h, uint_t index, uint_t view)
19137c478bd9Sstevel@tonic-gate {
19147c478bd9Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
19157c478bd9Sstevel@tonic-gate 	ioaddr_t ioaddr;
19167c478bd9Sstevel@tonic-gate 	pgcnt_t npages;
19177c478bd9Sstevel@tonic-gate 	struct fast_dvma *iommu_fast_dvma =
19187c478bd9Sstevel@tonic-gate 	    (struct fast_dvma *)mp->dmai_nexus_private;
19197c478bd9Sstevel@tonic-gate 	struct sbus_soft_state *softsp = (struct sbus_soft_state *)
19207c478bd9Sstevel@tonic-gate 	    iommu_fast_dvma->softsp;
19217c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
19227c478bd9Sstevel@tonic-gate 	struct io_mem_list **prevp, *walk;
19237c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
19247c478bd9Sstevel@tonic-gate 
19257c478bd9Sstevel@tonic-gate 	ASSERT(softsp != NULL);
19267c478bd9Sstevel@tonic-gate 
19277c478bd9Sstevel@tonic-gate 	ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index));
19287c478bd9Sstevel@tonic-gate 	npages = iommu_fast_dvma->pagecnt[index];
19297c478bd9Sstevel@tonic-gate 
19307c478bd9Sstevel@tonic-gate #if defined(DEBUG) && defined(IO_MEMUSAGE)
19317c478bd9Sstevel@tonic-gate 	mutex_enter(&softsp->iomemlock);
19327c478bd9Sstevel@tonic-gate 	prevp = &softsp->iomem;
19337c478bd9Sstevel@tonic-gate 	walk = softsp->iomem;
19347c478bd9Sstevel@tonic-gate 
19357c478bd9Sstevel@tonic-gate 	while (walk != NULL) {
19367c478bd9Sstevel@tonic-gate 		if (walk->ioaddr == ioaddr) {
19377c478bd9Sstevel@tonic-gate 			*prevp = walk->next;
19387c478bd9Sstevel@tonic-gate 			break;
19397c478bd9Sstevel@tonic-gate 		}
19407c478bd9Sstevel@tonic-gate 		prevp = &walk->next;
19417c478bd9Sstevel@tonic-gate 		walk = walk->next;
19427c478bd9Sstevel@tonic-gate 	}
19437c478bd9Sstevel@tonic-gate 	mutex_exit(&softsp->iomemlock);
19447c478bd9Sstevel@tonic-gate 
19457c478bd9Sstevel@tonic-gate 	kmem_free(walk->pfn, sizeof (pfn_t) * (npages + 1));
19467c478bd9Sstevel@tonic-gate 	kmem_free(walk, sizeof (struct io_mem_list));
19477c478bd9Sstevel@tonic-gate #endif /* DEBUG && IO_MEMUSAGE */
19487c478bd9Sstevel@tonic-gate 
19497c478bd9Sstevel@tonic-gate 	DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_unload: handle %p sync flag "
1950*ffadc26eSmike_s 	    "addr %p sync flag pfn %llx index %x page count %lx\n", mp,
19517c478bd9Sstevel@tonic-gate 	    &iommu_fast_dvma->sync_flag[index],
19527c478bd9Sstevel@tonic-gate 	    iommu_fast_dvma->phys_sync_flag[index],
19537c478bd9Sstevel@tonic-gate 	    index, npages));
19547c478bd9Sstevel@tonic-gate 
19557c478bd9Sstevel@tonic-gate 	if ((mp->dmai_rflags & DMP_NOSYNC) != DMP_NOSYNC) {
19567c478bd9Sstevel@tonic-gate 		sync_stream_buf(softsp, ioaddr, npages,
19577c478bd9Sstevel@tonic-gate 			(int *)&iommu_fast_dvma->sync_flag[index],
19587c478bd9Sstevel@tonic-gate 			iommu_fast_dvma->phys_sync_flag[index]);
19597c478bd9Sstevel@tonic-gate 	}
19607c478bd9Sstevel@tonic-gate }
19617c478bd9Sstevel@tonic-gate 
19627c478bd9Sstevel@tonic-gate /*ARGSUSED*/
19637c478bd9Sstevel@tonic-gate void
19647c478bd9Sstevel@tonic-gate iommu_dvma_sync(ddi_dma_handle_t h, uint_t index, uint_t view)
19657c478bd9Sstevel@tonic-gate {
19667c478bd9Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
19677c478bd9Sstevel@tonic-gate 	ioaddr_t ioaddr;
19687c478bd9Sstevel@tonic-gate 	uint_t npages;
19697c478bd9Sstevel@tonic-gate 	struct fast_dvma *iommu_fast_dvma =
19707c478bd9Sstevel@tonic-gate 	    (struct fast_dvma *)mp->dmai_nexus_private;
19717c478bd9Sstevel@tonic-gate 	struct sbus_soft_state *softsp = (struct sbus_soft_state *)
19727c478bd9Sstevel@tonic-gate 	    iommu_fast_dvma->softsp;
19737c478bd9Sstevel@tonic-gate 
19747c478bd9Sstevel@tonic-gate 	if ((mp->dmai_rflags & DMP_NOSYNC) == DMP_NOSYNC)
19757c478bd9Sstevel@tonic-gate 		return;
19767c478bd9Sstevel@tonic-gate 
19777c478bd9Sstevel@tonic-gate 	ASSERT(softsp != NULL);
19787c478bd9Sstevel@tonic-gate 	ioaddr = (ioaddr_t)(mp->dmai_mapping + iommu_ptob(index));
19797c478bd9Sstevel@tonic-gate 	npages = iommu_fast_dvma->pagecnt[index];
19807c478bd9Sstevel@tonic-gate 
19817c478bd9Sstevel@tonic-gate 	DPRINTF(IOMMU_FASTDMA_SYNC, ("kaddr_sync: handle %p, "
1982*ffadc26eSmike_s 	    "sync flag addr %p, sync flag pfn %llx\n", mp,
19837c478bd9Sstevel@tonic-gate 	    &iommu_fast_dvma->sync_flag[index],
19847c478bd9Sstevel@tonic-gate 	    iommu_fast_dvma->phys_sync_flag[index]));
19857c478bd9Sstevel@tonic-gate 
19867c478bd9Sstevel@tonic-gate 	sync_stream_buf(softsp, ioaddr, npages,
19877c478bd9Sstevel@tonic-gate 	    (int *)&iommu_fast_dvma->sync_flag[index],
19887c478bd9Sstevel@tonic-gate 	    iommu_fast_dvma->phys_sync_flag[index]);
19897c478bd9Sstevel@tonic-gate }
1990