xref: /linux/arch/sparc/mm/iommu.c (revision 5da444aae54f64575a60f1d596ed7706e3089fb0)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * iommu.c:  IOMMU specific routines for memory management.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
51da177e4SLinus Torvalds  * Copyright (C) 1995,2002 Pete Zaitcev     (zaitcev@yahoo.com)
61da177e4SLinus Torvalds  * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
71da177e4SLinus Torvalds  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
81da177e4SLinus Torvalds  */
91da177e4SLinus Torvalds 
101da177e4SLinus Torvalds #include <linux/kernel.h>
111da177e4SLinus Torvalds #include <linux/init.h>
121da177e4SLinus Torvalds #include <linux/mm.h>
131da177e4SLinus Torvalds #include <linux/slab.h>
141da177e4SLinus Torvalds #include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
150912a5dbSJens Axboe #include <linux/scatterlist.h>
169dc69230SDavid S. Miller #include <linux/of.h>
179dc69230SDavid S. Miller #include <linux/of_device.h>
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds #include <asm/pgalloc.h>
201da177e4SLinus Torvalds #include <asm/pgtable.h>
211da177e4SLinus Torvalds #include <asm/io.h>
221da177e4SLinus Torvalds #include <asm/mxcc.h>
231da177e4SLinus Torvalds #include <asm/mbus.h>
241da177e4SLinus Torvalds #include <asm/cacheflush.h>
251da177e4SLinus Torvalds #include <asm/tlbflush.h>
261da177e4SLinus Torvalds #include <asm/bitext.h>
271da177e4SLinus Torvalds #include <asm/iommu.h>
281da177e4SLinus Torvalds #include <asm/dma.h>
291da177e4SLinus Torvalds 
301da177e4SLinus Torvalds /*
311da177e4SLinus Torvalds  * This can be sized dynamically, but we will do this
321da177e4SLinus Torvalds  * only when we have a guidance about actual I/O pressures.
331da177e4SLinus Torvalds  */
341da177e4SLinus Torvalds #define IOMMU_RNGE	IOMMU_RNGE_256MB
351da177e4SLinus Torvalds #define IOMMU_START	0xF0000000
361da177e4SLinus Torvalds #define IOMMU_WINSIZE	(256*1024*1024U)
371da177e4SLinus Torvalds #define IOMMU_NPTES	(IOMMU_WINSIZE/PAGE_SIZE)	/* 64K PTEs, 265KB */
381da177e4SLinus Torvalds #define IOMMU_ORDER	6				/* 4096 * (1<<6) */
391da177e4SLinus Torvalds 
401da177e4SLinus Torvalds /* srmmu.c */
411da177e4SLinus Torvalds extern int viking_mxcc_present;
421da177e4SLinus Torvalds extern int flush_page_for_dma_global;
431da177e4SLinus Torvalds static int viking_flush;
441da177e4SLinus Torvalds /* viking.S */
451da177e4SLinus Torvalds extern void viking_flush_page(unsigned long page);
461da177e4SLinus Torvalds extern void viking_mxcc_flush_page(unsigned long page);
471da177e4SLinus Torvalds 
481da177e4SLinus Torvalds /*
491da177e4SLinus Torvalds  * Values precomputed according to CPU type.
501da177e4SLinus Torvalds  */
511da177e4SLinus Torvalds static unsigned int ioperm_noc;		/* Consistent mapping iopte flags */
521da177e4SLinus Torvalds static pgprot_t dvma_prot;		/* Consistent mapping pte flags */
531da177e4SLinus Torvalds 
541da177e4SLinus Torvalds #define IOPERM        (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
551da177e4SLinus Torvalds #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
561da177e4SLinus Torvalds 
57cd4cd730SGrant Likely static void __init sbus_iommu_init(struct platform_device *op)
581da177e4SLinus Torvalds {
591da177e4SLinus Torvalds 	struct iommu_struct *iommu;
60e0039348SDavid S. Miller 	unsigned int impl, vers;
611da177e4SLinus Torvalds 	unsigned long *bitmap;
62e0039348SDavid S. Miller 	unsigned long tmp;
63e0039348SDavid S. Miller 
6471cd03b0SJulia Lawall 	iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
651da177e4SLinus Torvalds 	if (!iommu) {
661da177e4SLinus Torvalds 		prom_printf("Unable to allocate iommu structure\n");
671da177e4SLinus Torvalds 		prom_halt();
681da177e4SLinus Torvalds 	}
69e0039348SDavid S. Miller 
70046e26a8SDavid S. Miller 	iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
71e0039348SDavid S. Miller 				 "iommu_regs");
721da177e4SLinus Torvalds 	if (!iommu->regs) {
731da177e4SLinus Torvalds 		prom_printf("Cannot map IOMMU registers\n");
741da177e4SLinus Torvalds 		prom_halt();
751da177e4SLinus Torvalds 	}
761da177e4SLinus Torvalds 	impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
771da177e4SLinus Torvalds 	vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
781da177e4SLinus Torvalds 	tmp = iommu->regs->control;
791da177e4SLinus Torvalds 	tmp &= ~(IOMMU_CTRL_RNGE);
801da177e4SLinus Torvalds 	tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
811da177e4SLinus Torvalds 	iommu->regs->control = tmp;
821da177e4SLinus Torvalds 	iommu_invalidate(iommu->regs);
831da177e4SLinus Torvalds 	iommu->start = IOMMU_START;
841da177e4SLinus Torvalds 	iommu->end = 0xffffffff;
851da177e4SLinus Torvalds 
861da177e4SLinus Torvalds 	/* Allocate IOMMU page table */
871da177e4SLinus Torvalds 	/* Stupid alignment constraints give me a headache.
881da177e4SLinus Torvalds 	   We need 256K or 512K or 1M or 2M area aligned to
891da177e4SLinus Torvalds            its size and current gfp will fortunately give
901da177e4SLinus Torvalds            it to us. */
911da177e4SLinus Torvalds         tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
921da177e4SLinus Torvalds 	if (!tmp) {
93*5da444aaSAkinobu Mita 		prom_printf("Unable to allocate iommu table [0x%lx]\n",
941da177e4SLinus Torvalds 			    IOMMU_NPTES * sizeof(iopte_t));
951da177e4SLinus Torvalds 		prom_halt();
961da177e4SLinus Torvalds 	}
971da177e4SLinus Torvalds 	iommu->page_table = (iopte_t *)tmp;
981da177e4SLinus Torvalds 
991da177e4SLinus Torvalds 	/* Initialize new table. */
1001da177e4SLinus Torvalds 	memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
1011da177e4SLinus Torvalds 	flush_cache_all();
1021da177e4SLinus Torvalds 	flush_tlb_all();
1031da177e4SLinus Torvalds 	iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
1041da177e4SLinus Torvalds 	iommu_invalidate(iommu->regs);
1051da177e4SLinus Torvalds 
1061da177e4SLinus Torvalds 	bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
1071da177e4SLinus Torvalds 	if (!bitmap) {
1081da177e4SLinus Torvalds 		prom_printf("Unable to allocate iommu bitmap [%d]\n",
1091da177e4SLinus Torvalds 			    (int)(IOMMU_NPTES>>3));
1101da177e4SLinus Torvalds 		prom_halt();
1111da177e4SLinus Torvalds 	}
1121da177e4SLinus Torvalds 	bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
1131da177e4SLinus Torvalds 	/* To be coherent on HyperSparc, the page color of DVMA
1141da177e4SLinus Torvalds 	 * and physical addresses must match.
1151da177e4SLinus Torvalds 	 */
1161da177e4SLinus Torvalds 	if (srmmu_modtype == HyperSparc)
1171da177e4SLinus Torvalds 		iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
1181da177e4SLinus Torvalds 	else
1191da177e4SLinus Torvalds 		iommu->usemap.num_colors = 1;
1201da177e4SLinus Torvalds 
121046e26a8SDavid S. Miller 	printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
1221da177e4SLinus Torvalds 	       impl, vers, iommu->page_table,
1231da177e4SLinus Torvalds 	       (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
1241da177e4SLinus Torvalds 
125e0039348SDavid S. Miller 	op->dev.archdata.iommu = iommu;
1261da177e4SLinus Torvalds }
1271da177e4SLinus Torvalds 
128046e26a8SDavid S. Miller static int __init iommu_init(void)
129046e26a8SDavid S. Miller {
130046e26a8SDavid S. Miller 	struct device_node *dp;
131046e26a8SDavid S. Miller 
132046e26a8SDavid S. Miller 	for_each_node_by_name(dp, "iommu") {
133cd4cd730SGrant Likely 		struct platform_device *op = of_find_device_by_node(dp);
134046e26a8SDavid S. Miller 
135046e26a8SDavid S. Miller 		sbus_iommu_init(op);
136046e26a8SDavid S. Miller 		of_propagate_archdata(op);
137046e26a8SDavid S. Miller 	}
138046e26a8SDavid S. Miller 
139046e26a8SDavid S. Miller 	return 0;
140046e26a8SDavid S. Miller }
141046e26a8SDavid S. Miller 
142046e26a8SDavid S. Miller subsys_initcall(iommu_init);
143046e26a8SDavid S. Miller 
1441da177e4SLinus Torvalds /* Flush the iotlb entries to ram. */
1451da177e4SLinus Torvalds /* This could be better if we didn't have to flush whole pages. */
1461da177e4SLinus Torvalds static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
1471da177e4SLinus Torvalds {
1481da177e4SLinus Torvalds 	unsigned long start;
1491da177e4SLinus Torvalds 	unsigned long end;
1501da177e4SLinus Torvalds 
1513185d4d2SBob Breuer 	start = (unsigned long)iopte;
1521da177e4SLinus Torvalds 	end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
1533185d4d2SBob Breuer 	start &= PAGE_MASK;
1541da177e4SLinus Torvalds 	if (viking_mxcc_present) {
1551da177e4SLinus Torvalds 		while(start < end) {
1561da177e4SLinus Torvalds 			viking_mxcc_flush_page(start);
1571da177e4SLinus Torvalds 			start += PAGE_SIZE;
1581da177e4SLinus Torvalds 		}
1591da177e4SLinus Torvalds 	} else if (viking_flush) {
1601da177e4SLinus Torvalds 		while(start < end) {
1611da177e4SLinus Torvalds 			viking_flush_page(start);
1621da177e4SLinus Torvalds 			start += PAGE_SIZE;
1631da177e4SLinus Torvalds 		}
1641da177e4SLinus Torvalds 	} else {
1651da177e4SLinus Torvalds 		while(start < end) {
1661da177e4SLinus Torvalds 			__flush_page_to_ram(start);
1671da177e4SLinus Torvalds 			start += PAGE_SIZE;
1681da177e4SLinus Torvalds 		}
1691da177e4SLinus Torvalds 	}
1701da177e4SLinus Torvalds }
1711da177e4SLinus Torvalds 
172260489faSDavid S. Miller static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
1731da177e4SLinus Torvalds {
174260489faSDavid S. Miller 	struct iommu_struct *iommu = dev->archdata.iommu;
1751da177e4SLinus Torvalds 	int ioptex;
1761da177e4SLinus Torvalds 	iopte_t *iopte, *iopte0;
1771da177e4SLinus Torvalds 	unsigned int busa, busa0;
1781da177e4SLinus Torvalds 	int i;
1791da177e4SLinus Torvalds 
1801da177e4SLinus Torvalds 	/* page color = pfn of page */
1811da177e4SLinus Torvalds 	ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
1821da177e4SLinus Torvalds 	if (ioptex < 0)
1831da177e4SLinus Torvalds 		panic("iommu out");
1841da177e4SLinus Torvalds 	busa0 = iommu->start + (ioptex << PAGE_SHIFT);
1851da177e4SLinus Torvalds 	iopte0 = &iommu->page_table[ioptex];
1861da177e4SLinus Torvalds 
1871da177e4SLinus Torvalds 	busa = busa0;
1881da177e4SLinus Torvalds 	iopte = iopte0;
1891da177e4SLinus Torvalds 	for (i = 0; i < npages; i++) {
1901da177e4SLinus Torvalds 		iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
1911da177e4SLinus Torvalds 		iommu_invalidate_page(iommu->regs, busa);
1921da177e4SLinus Torvalds 		busa += PAGE_SIZE;
1931da177e4SLinus Torvalds 		iopte++;
1941da177e4SLinus Torvalds 		page++;
1951da177e4SLinus Torvalds 	}
1961da177e4SLinus Torvalds 
1971da177e4SLinus Torvalds 	iommu_flush_iotlb(iopte0, npages);
1981da177e4SLinus Torvalds 
1991da177e4SLinus Torvalds 	return busa0;
2001da177e4SLinus Torvalds }
2011da177e4SLinus Torvalds 
202260489faSDavid S. Miller static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
2031da177e4SLinus Torvalds {
2041da177e4SLinus Torvalds 	unsigned long off;
2051da177e4SLinus Torvalds 	int npages;
2061da177e4SLinus Torvalds 	struct page *page;
2071da177e4SLinus Torvalds 	u32 busa;
2081da177e4SLinus Torvalds 
2091da177e4SLinus Torvalds 	off = (unsigned long)vaddr & ~PAGE_MASK;
2101da177e4SLinus Torvalds 	npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
2111da177e4SLinus Torvalds 	page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
212260489faSDavid S. Miller 	busa = iommu_get_one(dev, page, npages);
2131da177e4SLinus Torvalds 	return busa + off;
2141da177e4SLinus Torvalds }
2151da177e4SLinus Torvalds 
216260489faSDavid S. Miller static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
2171da177e4SLinus Torvalds {
2181da177e4SLinus Torvalds 	flush_page_for_dma(0);
219260489faSDavid S. Miller 	return iommu_get_scsi_one(dev, vaddr, len);
2201da177e4SLinus Torvalds }
2211da177e4SLinus Torvalds 
222260489faSDavid S. Miller static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len)
2231da177e4SLinus Torvalds {
2241da177e4SLinus Torvalds 	unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
2251da177e4SLinus Torvalds 
2261da177e4SLinus Torvalds 	while(page < ((unsigned long)(vaddr + len))) {
2271da177e4SLinus Torvalds 		flush_page_for_dma(page);
2281da177e4SLinus Torvalds 		page += PAGE_SIZE;
2291da177e4SLinus Torvalds 	}
230260489faSDavid S. Miller 	return iommu_get_scsi_one(dev, vaddr, len);
2311da177e4SLinus Torvalds }
2321da177e4SLinus Torvalds 
233260489faSDavid S. Miller static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
2341da177e4SLinus Torvalds {
2351da177e4SLinus Torvalds 	int n;
2361da177e4SLinus Torvalds 
2371da177e4SLinus Torvalds 	flush_page_for_dma(0);
2381da177e4SLinus Torvalds 	while (sz != 0) {
2391da177e4SLinus Torvalds 		--sz;
2401da177e4SLinus Torvalds 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
241aa83a26aSRobert Reif 		sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
242aa83a26aSRobert Reif 		sg->dma_length = sg->length;
2430912a5dbSJens Axboe 		sg = sg_next(sg);
2441da177e4SLinus Torvalds 	}
2451da177e4SLinus Torvalds }
2461da177e4SLinus Torvalds 
247260489faSDavid S. Miller static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz)
2481da177e4SLinus Torvalds {
2491da177e4SLinus Torvalds 	unsigned long page, oldpage = 0;
2501da177e4SLinus Torvalds 	int n, i;
2511da177e4SLinus Torvalds 
2521da177e4SLinus Torvalds 	while(sz != 0) {
2531da177e4SLinus Torvalds 		--sz;
2541da177e4SLinus Torvalds 
2551da177e4SLinus Torvalds 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
2561da177e4SLinus Torvalds 
2571da177e4SLinus Torvalds 		/*
2581da177e4SLinus Torvalds 		 * We expect unmapped highmem pages to be not in the cache.
2591da177e4SLinus Torvalds 		 * XXX Is this a good assumption?
2601da177e4SLinus Torvalds 		 * XXX What if someone else unmaps it here and races us?
2611da177e4SLinus Torvalds 		 */
26258b053e4SJens Axboe 		if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
2631da177e4SLinus Torvalds 			for (i = 0; i < n; i++) {
2641da177e4SLinus Torvalds 				if (page != oldpage) {	/* Already flushed? */
2651da177e4SLinus Torvalds 					flush_page_for_dma(page);
2661da177e4SLinus Torvalds 					oldpage = page;
2671da177e4SLinus Torvalds 				}
2681da177e4SLinus Torvalds 				page += PAGE_SIZE;
2691da177e4SLinus Torvalds 			}
2701da177e4SLinus Torvalds 		}
2711da177e4SLinus Torvalds 
272aa83a26aSRobert Reif 		sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
273aa83a26aSRobert Reif 		sg->dma_length = sg->length;
2740912a5dbSJens Axboe 		sg = sg_next(sg);
2751da177e4SLinus Torvalds 	}
2761da177e4SLinus Torvalds }
2771da177e4SLinus Torvalds 
278260489faSDavid S. Miller static void iommu_release_one(struct device *dev, u32 busa, int npages)
2791da177e4SLinus Torvalds {
280260489faSDavid S. Miller 	struct iommu_struct *iommu = dev->archdata.iommu;
2811da177e4SLinus Torvalds 	int ioptex;
2821da177e4SLinus Torvalds 	int i;
2831da177e4SLinus Torvalds 
2841ae61388SEric Sesterhenn 	BUG_ON(busa < iommu->start);
2851da177e4SLinus Torvalds 	ioptex = (busa - iommu->start) >> PAGE_SHIFT;
2861da177e4SLinus Torvalds 	for (i = 0; i < npages; i++) {
2871da177e4SLinus Torvalds 		iopte_val(iommu->page_table[ioptex + i]) = 0;
2881da177e4SLinus Torvalds 		iommu_invalidate_page(iommu->regs, busa);
2891da177e4SLinus Torvalds 		busa += PAGE_SIZE;
2901da177e4SLinus Torvalds 	}
2911da177e4SLinus Torvalds 	bit_map_clear(&iommu->usemap, ioptex, npages);
2921da177e4SLinus Torvalds }
2931da177e4SLinus Torvalds 
294260489faSDavid S. Miller static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
2951da177e4SLinus Torvalds {
2961da177e4SLinus Torvalds 	unsigned long off;
2971da177e4SLinus Torvalds 	int npages;
2981da177e4SLinus Torvalds 
2991da177e4SLinus Torvalds 	off = vaddr & ~PAGE_MASK;
3001da177e4SLinus Torvalds 	npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
301260489faSDavid S. Miller 	iommu_release_one(dev, vaddr & PAGE_MASK, npages);
3021da177e4SLinus Torvalds }
3031da177e4SLinus Torvalds 
304260489faSDavid S. Miller static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
3051da177e4SLinus Torvalds {
3061da177e4SLinus Torvalds 	int n;
3071da177e4SLinus Torvalds 
3081da177e4SLinus Torvalds 	while(sz != 0) {
3091da177e4SLinus Torvalds 		--sz;
3101da177e4SLinus Torvalds 
3111da177e4SLinus Torvalds 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
312aa83a26aSRobert Reif 		iommu_release_one(dev, sg->dma_address & PAGE_MASK, n);
313aa83a26aSRobert Reif 		sg->dma_address = 0x21212121;
3140912a5dbSJens Axboe 		sg = sg_next(sg);
3151da177e4SLinus Torvalds 	}
3161da177e4SLinus Torvalds }
3171da177e4SLinus Torvalds 
3181da177e4SLinus Torvalds #ifdef CONFIG_SBUS
3194b1c5df2SDavid S. Miller static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
3201da177e4SLinus Torvalds 			      unsigned long addr, int len)
3211da177e4SLinus Torvalds {
3224b1c5df2SDavid S. Miller 	struct iommu_struct *iommu = dev->archdata.iommu;
3231da177e4SLinus Torvalds 	unsigned long page, end;
3241da177e4SLinus Torvalds 	iopte_t *iopte = iommu->page_table;
3251da177e4SLinus Torvalds 	iopte_t *first;
3261da177e4SLinus Torvalds 	int ioptex;
3271da177e4SLinus Torvalds 
3281ae61388SEric Sesterhenn 	BUG_ON((va & ~PAGE_MASK) != 0);
3291ae61388SEric Sesterhenn 	BUG_ON((addr & ~PAGE_MASK) != 0);
3301ae61388SEric Sesterhenn 	BUG_ON((len & ~PAGE_MASK) != 0);
3311da177e4SLinus Torvalds 
3321da177e4SLinus Torvalds 	/* page color = physical address */
3331da177e4SLinus Torvalds 	ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
3341da177e4SLinus Torvalds 		addr >> PAGE_SHIFT);
3351da177e4SLinus Torvalds 	if (ioptex < 0)
3361da177e4SLinus Torvalds 		panic("iommu out");
3371da177e4SLinus Torvalds 
3381da177e4SLinus Torvalds 	iopte += ioptex;
3391da177e4SLinus Torvalds 	first = iopte;
3401da177e4SLinus Torvalds 	end = addr + len;
3411da177e4SLinus Torvalds 	while(addr < end) {
3421da177e4SLinus Torvalds 		page = va;
3431da177e4SLinus Torvalds 		{
3441da177e4SLinus Torvalds 			pgd_t *pgdp;
3451da177e4SLinus Torvalds 			pmd_t *pmdp;
3461da177e4SLinus Torvalds 			pte_t *ptep;
3471da177e4SLinus Torvalds 
3481da177e4SLinus Torvalds 			if (viking_mxcc_present)
3491da177e4SLinus Torvalds 				viking_mxcc_flush_page(page);
3501da177e4SLinus Torvalds 			else if (viking_flush)
3511da177e4SLinus Torvalds 				viking_flush_page(page);
3521da177e4SLinus Torvalds 			else
3531da177e4SLinus Torvalds 				__flush_page_to_ram(page);
3541da177e4SLinus Torvalds 
3551da177e4SLinus Torvalds 			pgdp = pgd_offset(&init_mm, addr);
3561da177e4SLinus Torvalds 			pmdp = pmd_offset(pgdp, addr);
3571da177e4SLinus Torvalds 			ptep = pte_offset_map(pmdp, addr);
3581da177e4SLinus Torvalds 
3591da177e4SLinus Torvalds 			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
3601da177e4SLinus Torvalds 		}
3611da177e4SLinus Torvalds 		iopte_val(*iopte++) =
3621da177e4SLinus Torvalds 		    MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
3631da177e4SLinus Torvalds 		addr += PAGE_SIZE;
3641da177e4SLinus Torvalds 		va += PAGE_SIZE;
3651da177e4SLinus Torvalds 	}
3661da177e4SLinus Torvalds 	/* P3: why do we need this?
3671da177e4SLinus Torvalds 	 *
3681da177e4SLinus Torvalds 	 * DAVEM: Because there are several aspects, none of which
3691da177e4SLinus Torvalds 	 *        are handled by a single interface.  Some cpus are
3701da177e4SLinus Torvalds 	 *        completely not I/O DMA coherent, and some have
3711da177e4SLinus Torvalds 	 *        virtually indexed caches.  The driver DMA flushing
3721da177e4SLinus Torvalds 	 *        methods handle the former case, but here during
3731da177e4SLinus Torvalds 	 *        IOMMU page table modifications, and usage of non-cacheable
3741da177e4SLinus Torvalds 	 *        cpu mappings of pages potentially in the cpu caches, we have
3751da177e4SLinus Torvalds 	 *        to handle the latter case as well.
3761da177e4SLinus Torvalds 	 */
3771da177e4SLinus Torvalds 	flush_cache_all();
3781da177e4SLinus Torvalds 	iommu_flush_iotlb(first, len >> PAGE_SHIFT);
3791da177e4SLinus Torvalds 	flush_tlb_all();
3801da177e4SLinus Torvalds 	iommu_invalidate(iommu->regs);
3811da177e4SLinus Torvalds 
3821da177e4SLinus Torvalds 	*pba = iommu->start + (ioptex << PAGE_SHIFT);
3831da177e4SLinus Torvalds 	return 0;
3841da177e4SLinus Torvalds }
3851da177e4SLinus Torvalds 
3864b1c5df2SDavid S. Miller static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len)
3871da177e4SLinus Torvalds {
3884b1c5df2SDavid S. Miller 	struct iommu_struct *iommu = dev->archdata.iommu;
3891da177e4SLinus Torvalds 	iopte_t *iopte = iommu->page_table;
3901da177e4SLinus Torvalds 	unsigned long end;
3911da177e4SLinus Torvalds 	int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
3921da177e4SLinus Torvalds 
3931ae61388SEric Sesterhenn 	BUG_ON((busa & ~PAGE_MASK) != 0);
3941ae61388SEric Sesterhenn 	BUG_ON((len & ~PAGE_MASK) != 0);
3951da177e4SLinus Torvalds 
3961da177e4SLinus Torvalds 	iopte += ioptex;
3971da177e4SLinus Torvalds 	end = busa + len;
3981da177e4SLinus Torvalds 	while (busa < end) {
3991da177e4SLinus Torvalds 		iopte_val(*iopte++) = 0;
4001da177e4SLinus Torvalds 		busa += PAGE_SIZE;
4011da177e4SLinus Torvalds 	}
4021da177e4SLinus Torvalds 	flush_tlb_all();
4031da177e4SLinus Torvalds 	iommu_invalidate(iommu->regs);
4041da177e4SLinus Torvalds 	bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
4051da177e4SLinus Torvalds }
4061da177e4SLinus Torvalds #endif
4071da177e4SLinus Torvalds 
408d894d964SDavid S. Miller static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
409d894d964SDavid S. Miller 	.get_scsi_one		= iommu_get_scsi_one_gflush,
410d894d964SDavid S. Miller 	.get_scsi_sgl		= iommu_get_scsi_sgl_gflush,
411d894d964SDavid S. Miller 	.release_scsi_one	= iommu_release_scsi_one,
412d894d964SDavid S. Miller 	.release_scsi_sgl	= iommu_release_scsi_sgl,
413d894d964SDavid S. Miller #ifdef CONFIG_SBUS
414d894d964SDavid S. Miller 	.map_dma_area		= iommu_map_dma_area,
415d894d964SDavid S. Miller 	.unmap_dma_area		= iommu_unmap_dma_area,
416d894d964SDavid S. Miller #endif
417d894d964SDavid S. Miller };
418d894d964SDavid S. Miller 
419d894d964SDavid S. Miller static const struct sparc32_dma_ops iommu_dma_pflush_ops = {
420d894d964SDavid S. Miller 	.get_scsi_one		= iommu_get_scsi_one_pflush,
421d894d964SDavid S. Miller 	.get_scsi_sgl		= iommu_get_scsi_sgl_pflush,
422d894d964SDavid S. Miller 	.release_scsi_one	= iommu_release_scsi_one,
423d894d964SDavid S. Miller 	.release_scsi_sgl	= iommu_release_scsi_sgl,
424d894d964SDavid S. Miller #ifdef CONFIG_SBUS
425d894d964SDavid S. Miller 	.map_dma_area		= iommu_map_dma_area,
426d894d964SDavid S. Miller 	.unmap_dma_area		= iommu_unmap_dma_area,
427d894d964SDavid S. Miller #endif
428d894d964SDavid S. Miller };
429d894d964SDavid S. Miller 
4301da177e4SLinus Torvalds void __init ld_mmu_iommu(void)
4311da177e4SLinus Torvalds {
4325d83d666SDavid S. Miller 	if (flush_page_for_dma_global) {
4331da177e4SLinus Torvalds 		/* flush_page_for_dma flushes everything, no matter of what page is it */
434d894d964SDavid S. Miller 		sparc32_dma_ops = &iommu_dma_gflush_ops;
4351da177e4SLinus Torvalds 	} else {
436d894d964SDavid S. Miller 		sparc32_dma_ops = &iommu_dma_pflush_ops;
4371da177e4SLinus Torvalds 	}
4381da177e4SLinus Torvalds 
4391da177e4SLinus Torvalds 	if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
4401da177e4SLinus Torvalds 		dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
4411da177e4SLinus Torvalds 		ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
4421da177e4SLinus Torvalds 	} else {
4431da177e4SLinus Torvalds 		dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
4441da177e4SLinus Torvalds 		ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
4451da177e4SLinus Torvalds 	}
4461da177e4SLinus Torvalds }
447