xref: /titanic_51/usr/src/uts/sun4u/io/pci/pci_fdvma.c (revision 0ea4884762cfa870ddc7a4eefff4166d5fdb3e09)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5d0662dbfSelowe  * Common Development and Distribution License (the "License").
6d0662dbfSelowe  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
22*0ea48847SBhaskar Sarkar  * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
237c478bd9Sstevel@tonic-gate  */
247c478bd9Sstevel@tonic-gate 
257c478bd9Sstevel@tonic-gate /*
267c478bd9Sstevel@tonic-gate  * Internal PCI Fast DVMA implementation
277c478bd9Sstevel@tonic-gate  */
287c478bd9Sstevel@tonic-gate #include <sys/types.h>
297c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
307c478bd9Sstevel@tonic-gate #include <sys/async.h>
317c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
327c478bd9Sstevel@tonic-gate #include <sys/sunddi.h>
337c478bd9Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
347c478bd9Sstevel@tonic-gate #include <sys/dvma.h>
357c478bd9Sstevel@tonic-gate #include <vm/hat.h>
367c478bd9Sstevel@tonic-gate #include <sys/pci/pci_obj.h>
377c478bd9Sstevel@tonic-gate 
387c478bd9Sstevel@tonic-gate /*LINTLIBRARY*/
397c478bd9Sstevel@tonic-gate 
407c478bd9Sstevel@tonic-gate static struct dvma_ops fdvma_ops;
417c478bd9Sstevel@tonic-gate 
427c478bd9Sstevel@tonic-gate /*
437c478bd9Sstevel@tonic-gate  * The following routines are used to implement the sun4u fast dvma
447c478bd9Sstevel@tonic-gate  * routines on this bus.
457c478bd9Sstevel@tonic-gate  */
467c478bd9Sstevel@tonic-gate 
477c478bd9Sstevel@tonic-gate /*ARGSUSED*/
487c478bd9Sstevel@tonic-gate static void
497c478bd9Sstevel@tonic-gate pci_fdvma_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
507c478bd9Sstevel@tonic-gate 	ddi_dma_cookie_t *cp)
517c478bd9Sstevel@tonic-gate {
527c478bd9Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
537c478bd9Sstevel@tonic-gate 	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;
547c478bd9Sstevel@tonic-gate 	pci_t *pci_p = (pci_t *)fdvma_p->softsp;
557c478bd9Sstevel@tonic-gate 	iommu_t *iommu_p = pci_p->pci_iommu_p;
567c478bd9Sstevel@tonic-gate 	dev_info_t *dip = pci_p->pci_dip;
577c478bd9Sstevel@tonic-gate 	dvma_addr_t dvma_addr, dvma_pg;
587c478bd9Sstevel@tonic-gate 	caddr_t baseaddr = (caddr_t)((uintptr_t)a & PAGEMASK);
597c478bd9Sstevel@tonic-gate 	uint32_t offset;
607c478bd9Sstevel@tonic-gate 	size_t npages, pg_index;
617c478bd9Sstevel@tonic-gate 	pfn_t pfn;
627c478bd9Sstevel@tonic-gate 	int i;
637c478bd9Sstevel@tonic-gate 	uint64_t tte;
647c478bd9Sstevel@tonic-gate 
65f47a9c50Smathue 	offset = (uint32_t)(uintptr_t)a & IOMMU_PAGE_OFFSET;
667c478bd9Sstevel@tonic-gate 	npages = IOMMU_BTOPR(len + offset);
677c478bd9Sstevel@tonic-gate 	if (!npages)
687c478bd9Sstevel@tonic-gate 		return;
697c478bd9Sstevel@tonic-gate 
707c478bd9Sstevel@tonic-gate 	/* make sure we don't exceed reserved boundary */
717c478bd9Sstevel@tonic-gate 	DEBUG3(DBG_FAST_DVMA, dip, "load index=%x: %p+%x ", index, a, len);
727c478bd9Sstevel@tonic-gate 	if (index + npages > mp->dmai_ndvmapages) {
737c478bd9Sstevel@tonic-gate 		cmn_err(pci_panic_on_fatal_errors ? CE_PANIC : CE_WARN,
74f47a9c50Smathue 			"%s%d: kaddr_load index(%x)+pgs(%lx) exceeds limit\n",
757c478bd9Sstevel@tonic-gate 			ddi_driver_name(dip), ddi_get_instance(dip),
767c478bd9Sstevel@tonic-gate 			index, npages);
777c478bd9Sstevel@tonic-gate 		return;
787c478bd9Sstevel@tonic-gate 	}
797c478bd9Sstevel@tonic-gate 
807c478bd9Sstevel@tonic-gate 	/* better have not already loaded something at this address */
817c478bd9Sstevel@tonic-gate 	ASSERT(fdvma_p->kvbase[index] == NULL);
827c478bd9Sstevel@tonic-gate 	ASSERT(fdvma_p->pagecnt[index] == 0);
837c478bd9Sstevel@tonic-gate 
847c478bd9Sstevel@tonic-gate 	dvma_addr = mp->dmai_mapping + IOMMU_PTOB(index);
857c478bd9Sstevel@tonic-gate 	dvma_pg = IOMMU_BTOP(dvma_addr);
867c478bd9Sstevel@tonic-gate 	pg_index = dvma_pg - iommu_p->dvma_base_pg;
877c478bd9Sstevel@tonic-gate 
887c478bd9Sstevel@tonic-gate 	/* construct the dma cookie to be returned */
897c478bd9Sstevel@tonic-gate 	MAKE_DMA_COOKIE(cp, dvma_addr | offset, len);
907c478bd9Sstevel@tonic-gate 	DEBUG2(DBG_FAST_DVMA | DBG_CONT, dip, "cookie: %x+%x\n",
917c478bd9Sstevel@tonic-gate 		cp->dmac_address, cp->dmac_size);
927c478bd9Sstevel@tonic-gate 
937c478bd9Sstevel@tonic-gate 	for (i = 0, a = baseaddr; i < npages; i++, a += IOMMU_PAGE_SIZE) {
947c478bd9Sstevel@tonic-gate 		if (pci_dvma_remap_enabled) {
957c478bd9Sstevel@tonic-gate 			uint_t flags = HAC_NOSLEEP | HAC_PAGELOCK;
967c478bd9Sstevel@tonic-gate 
977c478bd9Sstevel@tonic-gate 			(void) hat_add_callback(pci_fast_dvma_cbid, a,
98d0662dbfSelowe 			    IOMMU_PAGE_SIZE, flags, mp, &pfn,
99d0662dbfSelowe 			    &fdvma_p->cbcookie[index + i]);
1007c478bd9Sstevel@tonic-gate 
1017c478bd9Sstevel@tonic-gate 			mp->dmai_flags |= DMAI_FLAGS_RELOC;
1027c478bd9Sstevel@tonic-gate 		} else {
1037c478bd9Sstevel@tonic-gate 			pfn = hat_getpfnum(kas.a_hat, a);
1047c478bd9Sstevel@tonic-gate 		}
1057c478bd9Sstevel@tonic-gate 		if (pfn == PFN_INVALID)
1067c478bd9Sstevel@tonic-gate 			goto bad_pfn;
1077c478bd9Sstevel@tonic-gate 
1087c478bd9Sstevel@tonic-gate 		if (i == 0)	/* setup template, all bits except pfn value */
1097c478bd9Sstevel@tonic-gate 			tte = MAKE_TTE_TEMPLATE((iopfn_t)pfn, mp);
1107c478bd9Sstevel@tonic-gate 
1117c478bd9Sstevel@tonic-gate 		/* XXX assumes iommu and mmu has same page size */
1127c478bd9Sstevel@tonic-gate 		iommu_p->iommu_tsb_vaddr[pg_index + i] = tte | IOMMU_PTOB(pfn);
1137c478bd9Sstevel@tonic-gate 		IOMMU_PAGE_FLUSH(iommu_p, (dvma_pg + i));
1147c478bd9Sstevel@tonic-gate 	}
1157c478bd9Sstevel@tonic-gate 
1167c478bd9Sstevel@tonic-gate 	mp->dmai_flags |= DMAI_FLAGS_MAPPED;
1177c478bd9Sstevel@tonic-gate 	fdvma_p->kvbase[index] = baseaddr;
1187c478bd9Sstevel@tonic-gate 	fdvma_p->pagecnt[index] = npages;
1197c478bd9Sstevel@tonic-gate 
1207c478bd9Sstevel@tonic-gate 	return;
1217c478bd9Sstevel@tonic-gate bad_pfn:
1227c478bd9Sstevel@tonic-gate 	cmn_err(CE_WARN, "%s%d: kaddr_load can't get page frame for vaddr %x",
123f47a9c50Smathue 		ddi_driver_name(dip), ddi_get_instance(dip), (int)(uintptr_t)a);
1247c478bd9Sstevel@tonic-gate }
1257c478bd9Sstevel@tonic-gate 
1267c478bd9Sstevel@tonic-gate /*ARGSUSED*/
1277c478bd9Sstevel@tonic-gate static void
1287c478bd9Sstevel@tonic-gate pci_fdvma_unload(ddi_dma_handle_t h, uint_t index, uint_t sync_flags)
1297c478bd9Sstevel@tonic-gate {
1307c478bd9Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
1317c478bd9Sstevel@tonic-gate 	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;
1327c478bd9Sstevel@tonic-gate 	pci_t *pci_p = (pci_t *)fdvma_p->softsp;
1337c478bd9Sstevel@tonic-gate 	size_t npg = fdvma_p->pagecnt[index];
1347c478bd9Sstevel@tonic-gate 
1357c478bd9Sstevel@tonic-gate 	dvma_addr_t dvma_pg = IOMMU_BTOP(mp->dmai_mapping + IOMMU_PTOB(index));
1367c478bd9Sstevel@tonic-gate 
1377c478bd9Sstevel@tonic-gate 	DEBUG5(DBG_FAST_DVMA, pci_p->pci_dip,
1387c478bd9Sstevel@tonic-gate 		"unload index=%x flags=%x %x+%x+%x\n", index, sync_flags,
1397c478bd9Sstevel@tonic-gate 		mp->dmai_mapping, IOMMU_PTOB(index), IOMMU_PTOB(npg));
1407c478bd9Sstevel@tonic-gate 
1417c478bd9Sstevel@tonic-gate 	if (!pci_dvma_sync_before_unmap) {
1427c478bd9Sstevel@tonic-gate 		if (PCI_DMA_CANRELOC(mp))
1437c478bd9Sstevel@tonic-gate 			pci_fdvma_unregister_callbacks(pci_p, fdvma_p, mp,
1447c478bd9Sstevel@tonic-gate 				index);
1457c478bd9Sstevel@tonic-gate 		fdvma_p->kvbase[index] = NULL;
1467c478bd9Sstevel@tonic-gate 		iommu_unmap_pages(pci_p->pci_iommu_p, dvma_pg, npg);
1477c478bd9Sstevel@tonic-gate 	}
1487c478bd9Sstevel@tonic-gate 	if (sync_flags != -1)
1497c478bd9Sstevel@tonic-gate 		pci_dma_sync(pci_p->pci_dip, mp->dmai_rdip, h,
1507c478bd9Sstevel@tonic-gate 			IOMMU_PTOB(index), IOMMU_PTOB(npg), sync_flags);
1517c478bd9Sstevel@tonic-gate 	if (pci_dvma_sync_before_unmap) {
1527c478bd9Sstevel@tonic-gate 		if (PCI_DMA_CANRELOC(mp))
1537c478bd9Sstevel@tonic-gate 			pci_fdvma_unregister_callbacks(pci_p, fdvma_p, mp,
1547c478bd9Sstevel@tonic-gate 				index);
1557c478bd9Sstevel@tonic-gate 		fdvma_p->kvbase[index] = NULL;
1567c478bd9Sstevel@tonic-gate 		iommu_unmap_pages(pci_p->pci_iommu_p, dvma_pg, npg);
1577c478bd9Sstevel@tonic-gate 	}
1587c478bd9Sstevel@tonic-gate 	fdvma_p->pagecnt[index] = 0;
1597c478bd9Sstevel@tonic-gate }
1607c478bd9Sstevel@tonic-gate 
1617c478bd9Sstevel@tonic-gate /*ARGSUSED*/
1627c478bd9Sstevel@tonic-gate static void
1637c478bd9Sstevel@tonic-gate pci_fdvma_sync(ddi_dma_handle_t h, uint_t index, uint_t sync_flags)
1647c478bd9Sstevel@tonic-gate {
1657c478bd9Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)h;
1667c478bd9Sstevel@tonic-gate 	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;
1677c478bd9Sstevel@tonic-gate 	pci_t *pci_p = (pci_t *)fdvma_p->softsp;
1687c478bd9Sstevel@tonic-gate 	size_t npg = fdvma_p->pagecnt[index];
1697c478bd9Sstevel@tonic-gate 
1707c478bd9Sstevel@tonic-gate 	DEBUG5(DBG_FAST_DVMA, pci_p->pci_dip,
1717c478bd9Sstevel@tonic-gate 		"sync index=%x flags=%x %x+%x+%x\n", index, sync_flags,
1727c478bd9Sstevel@tonic-gate 		mp->dmai_mapping, IOMMU_PTOB(index), IOMMU_PTOB(npg));
1737c478bd9Sstevel@tonic-gate 	pci_dma_sync(pci_p->pci_dip, mp->dmai_rdip, h, IOMMU_PTOB(index),
1747c478bd9Sstevel@tonic-gate 		IOMMU_PTOB(npg), sync_flags);
1757c478bd9Sstevel@tonic-gate }
1767c478bd9Sstevel@tonic-gate 
1777c478bd9Sstevel@tonic-gate int
1787c478bd9Sstevel@tonic-gate pci_fdvma_reserve(dev_info_t *dip, dev_info_t *rdip, pci_t *pci_p,
1797c478bd9Sstevel@tonic-gate 	ddi_dma_req_t *dmareq, ddi_dma_handle_t *handlep)
1807c478bd9Sstevel@tonic-gate {
1817c478bd9Sstevel@tonic-gate 	fdvma_t *fdvma_p;
1827c478bd9Sstevel@tonic-gate 	dvma_addr_t dvma_pg;
1837c478bd9Sstevel@tonic-gate 	iommu_t *iommu_p = pci_p->pci_iommu_p;
1847c478bd9Sstevel@tonic-gate 	size_t npages;
1857c478bd9Sstevel@tonic-gate 	ddi_dma_impl_t *mp;
1867c478bd9Sstevel@tonic-gate 	ddi_dma_lim_t *lim_p = dmareq->dmar_limits;
1877c478bd9Sstevel@tonic-gate 	ulong_t hi = lim_p->dlim_addr_hi;
1887c478bd9Sstevel@tonic-gate 	ulong_t lo = lim_p->dlim_addr_lo;
1897c478bd9Sstevel@tonic-gate 	size_t counter_max = (lim_p->dlim_cntr_max + 1) & IOMMU_PAGE_MASK;
1907c478bd9Sstevel@tonic-gate 
1917c478bd9Sstevel@tonic-gate 	if (pci_disable_fdvma)
1927c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
1937c478bd9Sstevel@tonic-gate 
1947c478bd9Sstevel@tonic-gate 	DEBUG2(DBG_DMA_CTL, dip, "DDI_DMA_RESERVE: rdip=%s%d\n",
1957c478bd9Sstevel@tonic-gate 		ddi_driver_name(rdip), ddi_get_instance(rdip));
1967c478bd9Sstevel@tonic-gate 
1977c478bd9Sstevel@tonic-gate 	/*
1987c478bd9Sstevel@tonic-gate 	 * Check the limit structure.
1997c478bd9Sstevel@tonic-gate 	 */
2007c478bd9Sstevel@tonic-gate 	if ((lo >= hi) || (hi < iommu_p->iommu_dvma_base))
2017c478bd9Sstevel@tonic-gate 		return (DDI_DMA_BADLIMITS);
2027c478bd9Sstevel@tonic-gate 
2037c478bd9Sstevel@tonic-gate 	/*
204*0ea48847SBhaskar Sarkar 	 * Allocate DVMA space from reserve.
2057c478bd9Sstevel@tonic-gate 	 */
2067c478bd9Sstevel@tonic-gate 	npages = dmareq->dmar_object.dmao_size;
207*0ea48847SBhaskar Sarkar 	if ((long)atomic_add_long_nv(&iommu_p->iommu_dvma_reserve,
208*0ea48847SBhaskar Sarkar 	    -npages) < 0) {
209*0ea48847SBhaskar Sarkar 		atomic_add_long(&iommu_p->iommu_dvma_reserve, npages);
2107c478bd9Sstevel@tonic-gate 		return (DDI_DMA_NORESOURCES);
211*0ea48847SBhaskar Sarkar 	}
2127c478bd9Sstevel@tonic-gate 
2137c478bd9Sstevel@tonic-gate 	/*
2147c478bd9Sstevel@tonic-gate 	 * Allocate the dma handle.
2157c478bd9Sstevel@tonic-gate 	 */
2167c478bd9Sstevel@tonic-gate 	mp = kmem_zalloc(sizeof (pci_dma_hdl_t), KM_SLEEP);
2177c478bd9Sstevel@tonic-gate 
2187c478bd9Sstevel@tonic-gate 	/*
2197c478bd9Sstevel@tonic-gate 	 * Get entries from dvma space map.
2207c478bd9Sstevel@tonic-gate 	 * (vmem_t *vmp,
2217c478bd9Sstevel@tonic-gate 	 *	size_t size, size_t align, size_t phase,
2227c478bd9Sstevel@tonic-gate 	 *	size_t nocross, void *minaddr, void *maxaddr, int vmflag)
2237c478bd9Sstevel@tonic-gate 	 */
2247c478bd9Sstevel@tonic-gate 	dvma_pg = IOMMU_BTOP((ulong_t)vmem_xalloc(iommu_p->iommu_dvma_map,
2257c478bd9Sstevel@tonic-gate 		IOMMU_PTOB(npages), IOMMU_PAGE_SIZE, 0,
2267c478bd9Sstevel@tonic-gate 		counter_max, (void *)lo, (void *)(hi + 1),
2277c478bd9Sstevel@tonic-gate 		dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP));
2287c478bd9Sstevel@tonic-gate 	if (dvma_pg == 0) {
229*0ea48847SBhaskar Sarkar 		atomic_add_long(&iommu_p->iommu_dvma_reserve, npages);
2307c478bd9Sstevel@tonic-gate 		kmem_free(mp, sizeof (pci_dma_hdl_t));
2317c478bd9Sstevel@tonic-gate 		return (DDI_DMA_NOMAPPING);
2327c478bd9Sstevel@tonic-gate 	}
2337c478bd9Sstevel@tonic-gate 
2347c478bd9Sstevel@tonic-gate 	/*
2357c478bd9Sstevel@tonic-gate 	 * Create the fast dvma request structure.
2367c478bd9Sstevel@tonic-gate 	 */
2377c478bd9Sstevel@tonic-gate 	fdvma_p = kmem_alloc(sizeof (fdvma_t), KM_SLEEP);
2387c478bd9Sstevel@tonic-gate 	fdvma_p->kvbase = kmem_zalloc(npages * sizeof (caddr_t), KM_SLEEP);
2397c478bd9Sstevel@tonic-gate 	fdvma_p->pagecnt = kmem_zalloc(npages * sizeof (uint_t), KM_SLEEP);
240d0662dbfSelowe 	fdvma_p->cbcookie = kmem_zalloc(npages * sizeof (void *), KM_SLEEP);
2417c478bd9Sstevel@tonic-gate 	fdvma_p->ops = &fdvma_ops;
2427c478bd9Sstevel@tonic-gate 	fdvma_p->softsp = (caddr_t)pci_p;
2437c478bd9Sstevel@tonic-gate 	fdvma_p->sync_flag = NULL;
2447c478bd9Sstevel@tonic-gate 
2457c478bd9Sstevel@tonic-gate 	/*
2467c478bd9Sstevel@tonic-gate 	 * Initialize the handle.
2477c478bd9Sstevel@tonic-gate 	 */
2487c478bd9Sstevel@tonic-gate 	mp->dmai_rdip = rdip;
2497c478bd9Sstevel@tonic-gate 	mp->dmai_rflags = DMP_BYPASSNEXUS |
2507c478bd9Sstevel@tonic-gate 		pci_dma_consist_check(dmareq->dmar_flags, pci_p->pci_pbm_p);
2517c478bd9Sstevel@tonic-gate 	if (!(dmareq->dmar_flags & DDI_DMA_RDWR))
2527c478bd9Sstevel@tonic-gate 		mp->dmai_rflags |= DDI_DMA_READ;
2537c478bd9Sstevel@tonic-gate 	mp->dmai_flags = DMAI_FLAGS_INUSE |
2547c478bd9Sstevel@tonic-gate 		(mp->dmai_rflags & DMP_NOSYNC ? DMAI_FLAGS_NOSYNC : 0);
2557c478bd9Sstevel@tonic-gate 	mp->dmai_minxfer = dmareq->dmar_limits->dlim_minxfer;
2567c478bd9Sstevel@tonic-gate 	mp->dmai_burstsizes = dmareq->dmar_limits->dlim_burstsizes;
2577c478bd9Sstevel@tonic-gate 	mp->dmai_mapping = IOMMU_PTOB(dvma_pg);
2587c478bd9Sstevel@tonic-gate 	mp->dmai_ndvmapages = npages;
2597c478bd9Sstevel@tonic-gate 	mp->dmai_size = npages * IOMMU_PAGE_SIZE;
2607c478bd9Sstevel@tonic-gate 	mp->dmai_nwin = 0;
2617c478bd9Sstevel@tonic-gate 	mp->dmai_fdvma = (caddr_t)fdvma_p;
2627c478bd9Sstevel@tonic-gate 
2637c478bd9Sstevel@tonic-gate 	DEBUG4(DBG_DMA_CTL, dip,
2647c478bd9Sstevel@tonic-gate 		"PCI_DVMA_RESERVE: mp=%p dvma=%x npages=%x private=%p\n",
2657c478bd9Sstevel@tonic-gate 		mp, mp->dmai_mapping, npages, fdvma_p);
2667c478bd9Sstevel@tonic-gate 	*handlep = (ddi_dma_handle_t)mp;
2677c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
2687c478bd9Sstevel@tonic-gate }
2697c478bd9Sstevel@tonic-gate 
2707c478bd9Sstevel@tonic-gate int
2717c478bd9Sstevel@tonic-gate pci_fdvma_release(dev_info_t *dip, pci_t *pci_p, ddi_dma_impl_t *mp)
2727c478bd9Sstevel@tonic-gate {
2737c478bd9Sstevel@tonic-gate 	iommu_t *iommu_p = pci_p->pci_iommu_p;
2747c478bd9Sstevel@tonic-gate 	size_t npages;
2757c478bd9Sstevel@tonic-gate 	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;
2767c478bd9Sstevel@tonic-gate 
2777c478bd9Sstevel@tonic-gate 	if (pci_disable_fdvma)
2787c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
2797c478bd9Sstevel@tonic-gate 
2807c478bd9Sstevel@tonic-gate 	/* validate fdvma handle */
2817c478bd9Sstevel@tonic-gate 	if (!(mp->dmai_rflags & DMP_BYPASSNEXUS)) {
2827c478bd9Sstevel@tonic-gate 		DEBUG0(DBG_DMA_CTL, dip, "DDI_DMA_RELEASE: not fast dma\n");
2837c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
2847c478bd9Sstevel@tonic-gate 	}
2857c478bd9Sstevel@tonic-gate 
2867c478bd9Sstevel@tonic-gate 	/* flush all reserved dvma addresses from iommu */
2877c478bd9Sstevel@tonic-gate 	pci_dma_sync_unmap(dip, mp->dmai_rdip, mp);
2887c478bd9Sstevel@tonic-gate 
2897c478bd9Sstevel@tonic-gate 	npages = mp->dmai_ndvmapages;
2907c478bd9Sstevel@tonic-gate 	pci_vmem_free(iommu_p, mp, (void *)mp->dmai_mapping, npages);
2917c478bd9Sstevel@tonic-gate 
292*0ea48847SBhaskar Sarkar 	atomic_add_long(&iommu_p->iommu_dvma_reserve, npages);
2937c478bd9Sstevel@tonic-gate 	mp->dmai_ndvmapages = 0;
2947c478bd9Sstevel@tonic-gate 
2957c478bd9Sstevel@tonic-gate 	/* see if there is anyone waiting for dvma space */
2967c478bd9Sstevel@tonic-gate 	if (iommu_p->iommu_dvma_clid != 0) {
2977c478bd9Sstevel@tonic-gate 		DEBUG0(DBG_DMA_CTL, dip, "run dvma callback\n");
2987c478bd9Sstevel@tonic-gate 		ddi_run_callback(&iommu_p->iommu_dvma_clid);
2997c478bd9Sstevel@tonic-gate 	}
3007c478bd9Sstevel@tonic-gate 
3017c478bd9Sstevel@tonic-gate 	/* free data structures */
3027c478bd9Sstevel@tonic-gate 	kmem_free(fdvma_p->kvbase, npages * sizeof (caddr_t));
3037c478bd9Sstevel@tonic-gate 	kmem_free(fdvma_p->pagecnt, npages * sizeof (uint_t));
304d0662dbfSelowe 	kmem_free(fdvma_p->cbcookie, npages * sizeof (void *));
3057c478bd9Sstevel@tonic-gate 	kmem_free(fdvma_p, sizeof (fdvma_t));
3067c478bd9Sstevel@tonic-gate 	kmem_free(mp, sizeof (pci_dma_hdl_t));
3077c478bd9Sstevel@tonic-gate 
3087c478bd9Sstevel@tonic-gate 	/* see if there is anyone waiting for kmem */
3097c478bd9Sstevel@tonic-gate 	if (pci_kmem_clid != 0) {
3107c478bd9Sstevel@tonic-gate 		DEBUG0(DBG_DMA_CTL, dip, "run handle callback\n");
3117c478bd9Sstevel@tonic-gate 		ddi_run_callback(&pci_kmem_clid);
3127c478bd9Sstevel@tonic-gate 	}
3137c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
3147c478bd9Sstevel@tonic-gate }
3157c478bd9Sstevel@tonic-gate 
3167c478bd9Sstevel@tonic-gate /*
3177c478bd9Sstevel@tonic-gate  * fast dvma ops structure:
3187c478bd9Sstevel@tonic-gate  */
3197c478bd9Sstevel@tonic-gate static struct dvma_ops fdvma_ops = {
3207c478bd9Sstevel@tonic-gate 	DVMAO_REV,
3217c478bd9Sstevel@tonic-gate 	pci_fdvma_load,
3227c478bd9Sstevel@tonic-gate 	pci_fdvma_unload,
3237c478bd9Sstevel@tonic-gate 	pci_fdvma_sync
3247c478bd9Sstevel@tonic-gate };
325