xref: /linux/arch/sparc/mm/iommu.c (revision f977ea49ae24c3bf0595725465b9ca25385de307)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * iommu.c:  IOMMU specific routines for memory management.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
51da177e4SLinus Torvalds  * Copyright (C) 1995,2002 Pete Zaitcev     (zaitcev@yahoo.com)
61da177e4SLinus Torvalds  * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
71da177e4SLinus Torvalds  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
81da177e4SLinus Torvalds  */
91da177e4SLinus Torvalds 
101da177e4SLinus Torvalds #include <linux/kernel.h>
111da177e4SLinus Torvalds #include <linux/init.h>
121da177e4SLinus Torvalds #include <linux/mm.h>
131da177e4SLinus Torvalds #include <linux/slab.h>
141da177e4SLinus Torvalds #include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
150912a5dbSJens Axboe #include <linux/scatterlist.h>
169dc69230SDavid S. Miller #include <linux/of.h>
179dc69230SDavid S. Miller #include <linux/of_device.h>
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds #include <asm/pgalloc.h>
201da177e4SLinus Torvalds #include <asm/pgtable.h>
211da177e4SLinus Torvalds #include <asm/io.h>
221da177e4SLinus Torvalds #include <asm/mxcc.h>
231da177e4SLinus Torvalds #include <asm/mbus.h>
241da177e4SLinus Torvalds #include <asm/cacheflush.h>
251da177e4SLinus Torvalds #include <asm/tlbflush.h>
261da177e4SLinus Torvalds #include <asm/bitext.h>
271da177e4SLinus Torvalds #include <asm/iommu.h>
281da177e4SLinus Torvalds #include <asm/dma.h>
291da177e4SLinus Torvalds 
30e8c29c83SSam Ravnborg #include "mm_32.h"
31e8c29c83SSam Ravnborg 
321da177e4SLinus Torvalds /*
331da177e4SLinus Torvalds  * This can be sized dynamically, but we will do this
341da177e4SLinus Torvalds  * only when we have a guidance about actual I/O pressures.
351da177e4SLinus Torvalds  */
361da177e4SLinus Torvalds #define IOMMU_RNGE	IOMMU_RNGE_256MB
371da177e4SLinus Torvalds #define IOMMU_START	0xF0000000
381da177e4SLinus Torvalds #define IOMMU_WINSIZE	(256*1024*1024U)
399a0ac1b6SAkinobu Mita #define IOMMU_NPTES	(IOMMU_WINSIZE/PAGE_SIZE)	/* 64K PTEs, 256KB */
401da177e4SLinus Torvalds #define IOMMU_ORDER	6				/* 4096 * (1<<6) */
411da177e4SLinus Torvalds 
421da177e4SLinus Torvalds static int viking_flush;
431da177e4SLinus Torvalds /* viking.S */
441da177e4SLinus Torvalds extern void viking_flush_page(unsigned long page);
451da177e4SLinus Torvalds extern void viking_mxcc_flush_page(unsigned long page);
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds /*
481da177e4SLinus Torvalds  * Values precomputed according to CPU type.
491da177e4SLinus Torvalds  */
501da177e4SLinus Torvalds static unsigned int ioperm_noc;		/* Consistent mapping iopte flags */
511da177e4SLinus Torvalds static pgprot_t dvma_prot;		/* Consistent mapping pte flags */
521da177e4SLinus Torvalds 
531da177e4SLinus Torvalds #define IOPERM        (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
541da177e4SLinus Torvalds #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
551da177e4SLinus Torvalds 
56cd4cd730SGrant Likely static void __init sbus_iommu_init(struct platform_device *op)
571da177e4SLinus Torvalds {
581da177e4SLinus Torvalds 	struct iommu_struct *iommu;
59e0039348SDavid S. Miller 	unsigned int impl, vers;
601da177e4SLinus Torvalds 	unsigned long *bitmap;
61*f977ea49SSam Ravnborg 	unsigned long control;
62*f977ea49SSam Ravnborg 	unsigned long base;
63e0039348SDavid S. Miller 	unsigned long tmp;
64e0039348SDavid S. Miller 
6571cd03b0SJulia Lawall 	iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
661da177e4SLinus Torvalds 	if (!iommu) {
671da177e4SLinus Torvalds 		prom_printf("Unable to allocate iommu structure\n");
681da177e4SLinus Torvalds 		prom_halt();
691da177e4SLinus Torvalds 	}
70e0039348SDavid S. Miller 
71046e26a8SDavid S. Miller 	iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
72e0039348SDavid S. Miller 				 "iommu_regs");
731da177e4SLinus Torvalds 	if (!iommu->regs) {
741da177e4SLinus Torvalds 		prom_printf("Cannot map IOMMU registers\n");
751da177e4SLinus Torvalds 		prom_halt();
761da177e4SLinus Torvalds 	}
77*f977ea49SSam Ravnborg 
78*f977ea49SSam Ravnborg 	control = sbus_readl(&iommu->regs->control);
79*f977ea49SSam Ravnborg 	impl = (control & IOMMU_CTRL_IMPL) >> 28;
80*f977ea49SSam Ravnborg 	vers = (control & IOMMU_CTRL_VERS) >> 24;
81*f977ea49SSam Ravnborg 	control &= ~(IOMMU_CTRL_RNGE);
82*f977ea49SSam Ravnborg 	control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
83*f977ea49SSam Ravnborg 	sbus_writel(control, &iommu->regs->control);
84*f977ea49SSam Ravnborg 
851da177e4SLinus Torvalds 	iommu_invalidate(iommu->regs);
861da177e4SLinus Torvalds 	iommu->start = IOMMU_START;
871da177e4SLinus Torvalds 	iommu->end = 0xffffffff;
881da177e4SLinus Torvalds 
891da177e4SLinus Torvalds 	/* Allocate IOMMU page table */
901da177e4SLinus Torvalds 	/* Stupid alignment constraints give me a headache.
911da177e4SLinus Torvalds 	   We need 256K or 512K or 1M or 2M area aligned to
921da177e4SLinus Torvalds            its size and current gfp will fortunately give
931da177e4SLinus Torvalds            it to us. */
941da177e4SLinus Torvalds         tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
951da177e4SLinus Torvalds 	if (!tmp) {
965da444aaSAkinobu Mita 		prom_printf("Unable to allocate iommu table [0x%lx]\n",
971da177e4SLinus Torvalds 			    IOMMU_NPTES * sizeof(iopte_t));
981da177e4SLinus Torvalds 		prom_halt();
991da177e4SLinus Torvalds 	}
1001da177e4SLinus Torvalds 	iommu->page_table = (iopte_t *)tmp;
1011da177e4SLinus Torvalds 
1021da177e4SLinus Torvalds 	/* Initialize new table. */
1031da177e4SLinus Torvalds 	memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
1041da177e4SLinus Torvalds 	flush_cache_all();
1051da177e4SLinus Torvalds 	flush_tlb_all();
106*f977ea49SSam Ravnborg 
107*f977ea49SSam Ravnborg 	base = __pa((unsigned long)iommu->page_table) >> 4;
108*f977ea49SSam Ravnborg 	sbus_writel(base, &iommu->regs->base);
1091da177e4SLinus Torvalds 	iommu_invalidate(iommu->regs);
1101da177e4SLinus Torvalds 
1111da177e4SLinus Torvalds 	bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
1121da177e4SLinus Torvalds 	if (!bitmap) {
1131da177e4SLinus Torvalds 		prom_printf("Unable to allocate iommu bitmap [%d]\n",
1141da177e4SLinus Torvalds 			    (int)(IOMMU_NPTES>>3));
1151da177e4SLinus Torvalds 		prom_halt();
1161da177e4SLinus Torvalds 	}
1171da177e4SLinus Torvalds 	bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
1181da177e4SLinus Torvalds 	/* To be coherent on HyperSparc, the page color of DVMA
1191da177e4SLinus Torvalds 	 * and physical addresses must match.
1201da177e4SLinus Torvalds 	 */
1211da177e4SLinus Torvalds 	if (srmmu_modtype == HyperSparc)
1221da177e4SLinus Torvalds 		iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
1231da177e4SLinus Torvalds 	else
1241da177e4SLinus Torvalds 		iommu->usemap.num_colors = 1;
1251da177e4SLinus Torvalds 
126046e26a8SDavid S. Miller 	printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
1271da177e4SLinus Torvalds 	       impl, vers, iommu->page_table,
1281da177e4SLinus Torvalds 	       (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
1291da177e4SLinus Torvalds 
130e0039348SDavid S. Miller 	op->dev.archdata.iommu = iommu;
1311da177e4SLinus Torvalds }
1321da177e4SLinus Torvalds 
133046e26a8SDavid S. Miller static int __init iommu_init(void)
134046e26a8SDavid S. Miller {
135046e26a8SDavid S. Miller 	struct device_node *dp;
136046e26a8SDavid S. Miller 
137046e26a8SDavid S. Miller 	for_each_node_by_name(dp, "iommu") {
138cd4cd730SGrant Likely 		struct platform_device *op = of_find_device_by_node(dp);
139046e26a8SDavid S. Miller 
140046e26a8SDavid S. Miller 		sbus_iommu_init(op);
141046e26a8SDavid S. Miller 		of_propagate_archdata(op);
142046e26a8SDavid S. Miller 	}
143046e26a8SDavid S. Miller 
144046e26a8SDavid S. Miller 	return 0;
145046e26a8SDavid S. Miller }
146046e26a8SDavid S. Miller 
147046e26a8SDavid S. Miller subsys_initcall(iommu_init);
148046e26a8SDavid S. Miller 
1491da177e4SLinus Torvalds /* Flush the iotlb entries to ram. */
1501da177e4SLinus Torvalds /* This could be better if we didn't have to flush whole pages. */
1511da177e4SLinus Torvalds static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
1521da177e4SLinus Torvalds {
1531da177e4SLinus Torvalds 	unsigned long start;
1541da177e4SLinus Torvalds 	unsigned long end;
1551da177e4SLinus Torvalds 
1563185d4d2SBob Breuer 	start = (unsigned long)iopte;
1571da177e4SLinus Torvalds 	end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
1583185d4d2SBob Breuer 	start &= PAGE_MASK;
1591da177e4SLinus Torvalds 	if (viking_mxcc_present) {
1601da177e4SLinus Torvalds 		while(start < end) {
1611da177e4SLinus Torvalds 			viking_mxcc_flush_page(start);
1621da177e4SLinus Torvalds 			start += PAGE_SIZE;
1631da177e4SLinus Torvalds 		}
1641da177e4SLinus Torvalds 	} else if (viking_flush) {
1651da177e4SLinus Torvalds 		while(start < end) {
1661da177e4SLinus Torvalds 			viking_flush_page(start);
1671da177e4SLinus Torvalds 			start += PAGE_SIZE;
1681da177e4SLinus Torvalds 		}
1691da177e4SLinus Torvalds 	} else {
1701da177e4SLinus Torvalds 		while(start < end) {
1711da177e4SLinus Torvalds 			__flush_page_to_ram(start);
1721da177e4SLinus Torvalds 			start += PAGE_SIZE;
1731da177e4SLinus Torvalds 		}
1741da177e4SLinus Torvalds 	}
1751da177e4SLinus Torvalds }
1761da177e4SLinus Torvalds 
177260489faSDavid S. Miller static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
1781da177e4SLinus Torvalds {
179260489faSDavid S. Miller 	struct iommu_struct *iommu = dev->archdata.iommu;
1801da177e4SLinus Torvalds 	int ioptex;
1811da177e4SLinus Torvalds 	iopte_t *iopte, *iopte0;
1821da177e4SLinus Torvalds 	unsigned int busa, busa0;
1831da177e4SLinus Torvalds 	int i;
1841da177e4SLinus Torvalds 
1851da177e4SLinus Torvalds 	/* page color = pfn of page */
1861da177e4SLinus Torvalds 	ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
1871da177e4SLinus Torvalds 	if (ioptex < 0)
1881da177e4SLinus Torvalds 		panic("iommu out");
1891da177e4SLinus Torvalds 	busa0 = iommu->start + (ioptex << PAGE_SHIFT);
1901da177e4SLinus Torvalds 	iopte0 = &iommu->page_table[ioptex];
1911da177e4SLinus Torvalds 
1921da177e4SLinus Torvalds 	busa = busa0;
1931da177e4SLinus Torvalds 	iopte = iopte0;
1941da177e4SLinus Torvalds 	for (i = 0; i < npages; i++) {
1951da177e4SLinus Torvalds 		iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
1961da177e4SLinus Torvalds 		iommu_invalidate_page(iommu->regs, busa);
1971da177e4SLinus Torvalds 		busa += PAGE_SIZE;
1981da177e4SLinus Torvalds 		iopte++;
1991da177e4SLinus Torvalds 		page++;
2001da177e4SLinus Torvalds 	}
2011da177e4SLinus Torvalds 
2021da177e4SLinus Torvalds 	iommu_flush_iotlb(iopte0, npages);
2031da177e4SLinus Torvalds 
2041da177e4SLinus Torvalds 	return busa0;
2051da177e4SLinus Torvalds }
2061da177e4SLinus Torvalds 
207260489faSDavid S. Miller static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
2081da177e4SLinus Torvalds {
2091da177e4SLinus Torvalds 	unsigned long off;
2101da177e4SLinus Torvalds 	int npages;
2111da177e4SLinus Torvalds 	struct page *page;
2121da177e4SLinus Torvalds 	u32 busa;
2131da177e4SLinus Torvalds 
2141da177e4SLinus Torvalds 	off = (unsigned long)vaddr & ~PAGE_MASK;
2151da177e4SLinus Torvalds 	npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
2161da177e4SLinus Torvalds 	page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
217260489faSDavid S. Miller 	busa = iommu_get_one(dev, page, npages);
2181da177e4SLinus Torvalds 	return busa + off;
2191da177e4SLinus Torvalds }
2201da177e4SLinus Torvalds 
221260489faSDavid S. Miller static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
2221da177e4SLinus Torvalds {
2231da177e4SLinus Torvalds 	flush_page_for_dma(0);
224260489faSDavid S. Miller 	return iommu_get_scsi_one(dev, vaddr, len);
2251da177e4SLinus Torvalds }
2261da177e4SLinus Torvalds 
227260489faSDavid S. Miller static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len)
2281da177e4SLinus Torvalds {
2291da177e4SLinus Torvalds 	unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
2301da177e4SLinus Torvalds 
2311da177e4SLinus Torvalds 	while(page < ((unsigned long)(vaddr + len))) {
2321da177e4SLinus Torvalds 		flush_page_for_dma(page);
2331da177e4SLinus Torvalds 		page += PAGE_SIZE;
2341da177e4SLinus Torvalds 	}
235260489faSDavid S. Miller 	return iommu_get_scsi_one(dev, vaddr, len);
2361da177e4SLinus Torvalds }
2371da177e4SLinus Torvalds 
238260489faSDavid S. Miller static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
2391da177e4SLinus Torvalds {
2401da177e4SLinus Torvalds 	int n;
2411da177e4SLinus Torvalds 
2421da177e4SLinus Torvalds 	flush_page_for_dma(0);
2431da177e4SLinus Torvalds 	while (sz != 0) {
2441da177e4SLinus Torvalds 		--sz;
2451da177e4SLinus Torvalds 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
246aa83a26aSRobert Reif 		sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
247aa83a26aSRobert Reif 		sg->dma_length = sg->length;
2480912a5dbSJens Axboe 		sg = sg_next(sg);
2491da177e4SLinus Torvalds 	}
2501da177e4SLinus Torvalds }
2511da177e4SLinus Torvalds 
252260489faSDavid S. Miller static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz)
2531da177e4SLinus Torvalds {
2541da177e4SLinus Torvalds 	unsigned long page, oldpage = 0;
2551da177e4SLinus Torvalds 	int n, i;
2561da177e4SLinus Torvalds 
2571da177e4SLinus Torvalds 	while(sz != 0) {
2581da177e4SLinus Torvalds 		--sz;
2591da177e4SLinus Torvalds 
2601da177e4SLinus Torvalds 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
2611da177e4SLinus Torvalds 
2621da177e4SLinus Torvalds 		/*
2631da177e4SLinus Torvalds 		 * We expect unmapped highmem pages to be not in the cache.
2641da177e4SLinus Torvalds 		 * XXX Is this a good assumption?
2651da177e4SLinus Torvalds 		 * XXX What if someone else unmaps it here and races us?
2661da177e4SLinus Torvalds 		 */
26758b053e4SJens Axboe 		if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
2681da177e4SLinus Torvalds 			for (i = 0; i < n; i++) {
2691da177e4SLinus Torvalds 				if (page != oldpage) {	/* Already flushed? */
2701da177e4SLinus Torvalds 					flush_page_for_dma(page);
2711da177e4SLinus Torvalds 					oldpage = page;
2721da177e4SLinus Torvalds 				}
2731da177e4SLinus Torvalds 				page += PAGE_SIZE;
2741da177e4SLinus Torvalds 			}
2751da177e4SLinus Torvalds 		}
2761da177e4SLinus Torvalds 
277aa83a26aSRobert Reif 		sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
278aa83a26aSRobert Reif 		sg->dma_length = sg->length;
2790912a5dbSJens Axboe 		sg = sg_next(sg);
2801da177e4SLinus Torvalds 	}
2811da177e4SLinus Torvalds }
2821da177e4SLinus Torvalds 
283260489faSDavid S. Miller static void iommu_release_one(struct device *dev, u32 busa, int npages)
2841da177e4SLinus Torvalds {
285260489faSDavid S. Miller 	struct iommu_struct *iommu = dev->archdata.iommu;
2861da177e4SLinus Torvalds 	int ioptex;
2871da177e4SLinus Torvalds 	int i;
2881da177e4SLinus Torvalds 
2891ae61388SEric Sesterhenn 	BUG_ON(busa < iommu->start);
2901da177e4SLinus Torvalds 	ioptex = (busa - iommu->start) >> PAGE_SHIFT;
2911da177e4SLinus Torvalds 	for (i = 0; i < npages; i++) {
2921da177e4SLinus Torvalds 		iopte_val(iommu->page_table[ioptex + i]) = 0;
2931da177e4SLinus Torvalds 		iommu_invalidate_page(iommu->regs, busa);
2941da177e4SLinus Torvalds 		busa += PAGE_SIZE;
2951da177e4SLinus Torvalds 	}
2961da177e4SLinus Torvalds 	bit_map_clear(&iommu->usemap, ioptex, npages);
2971da177e4SLinus Torvalds }
2981da177e4SLinus Torvalds 
299260489faSDavid S. Miller static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
3001da177e4SLinus Torvalds {
3011da177e4SLinus Torvalds 	unsigned long off;
3021da177e4SLinus Torvalds 	int npages;
3031da177e4SLinus Torvalds 
3041da177e4SLinus Torvalds 	off = vaddr & ~PAGE_MASK;
3051da177e4SLinus Torvalds 	npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
306260489faSDavid S. Miller 	iommu_release_one(dev, vaddr & PAGE_MASK, npages);
3071da177e4SLinus Torvalds }
3081da177e4SLinus Torvalds 
309260489faSDavid S. Miller static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
3101da177e4SLinus Torvalds {
3111da177e4SLinus Torvalds 	int n;
3121da177e4SLinus Torvalds 
3131da177e4SLinus Torvalds 	while(sz != 0) {
3141da177e4SLinus Torvalds 		--sz;
3151da177e4SLinus Torvalds 
3161da177e4SLinus Torvalds 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
317aa83a26aSRobert Reif 		iommu_release_one(dev, sg->dma_address & PAGE_MASK, n);
318aa83a26aSRobert Reif 		sg->dma_address = 0x21212121;
3190912a5dbSJens Axboe 		sg = sg_next(sg);
3201da177e4SLinus Torvalds 	}
3211da177e4SLinus Torvalds }
3221da177e4SLinus Torvalds 
3231da177e4SLinus Torvalds #ifdef CONFIG_SBUS
3244b1c5df2SDavid S. Miller static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
3251da177e4SLinus Torvalds 			      unsigned long addr, int len)
3261da177e4SLinus Torvalds {
3274b1c5df2SDavid S. Miller 	struct iommu_struct *iommu = dev->archdata.iommu;
3281da177e4SLinus Torvalds 	unsigned long page, end;
3291da177e4SLinus Torvalds 	iopte_t *iopte = iommu->page_table;
3301da177e4SLinus Torvalds 	iopte_t *first;
3311da177e4SLinus Torvalds 	int ioptex;
3321da177e4SLinus Torvalds 
3331ae61388SEric Sesterhenn 	BUG_ON((va & ~PAGE_MASK) != 0);
3341ae61388SEric Sesterhenn 	BUG_ON((addr & ~PAGE_MASK) != 0);
3351ae61388SEric Sesterhenn 	BUG_ON((len & ~PAGE_MASK) != 0);
3361da177e4SLinus Torvalds 
3371da177e4SLinus Torvalds 	/* page color = physical address */
3381da177e4SLinus Torvalds 	ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
3391da177e4SLinus Torvalds 		addr >> PAGE_SHIFT);
3401da177e4SLinus Torvalds 	if (ioptex < 0)
3411da177e4SLinus Torvalds 		panic("iommu out");
3421da177e4SLinus Torvalds 
3431da177e4SLinus Torvalds 	iopte += ioptex;
3441da177e4SLinus Torvalds 	first = iopte;
3451da177e4SLinus Torvalds 	end = addr + len;
3461da177e4SLinus Torvalds 	while(addr < end) {
3471da177e4SLinus Torvalds 		page = va;
3481da177e4SLinus Torvalds 		{
3491da177e4SLinus Torvalds 			pgd_t *pgdp;
3501da177e4SLinus Torvalds 			pmd_t *pmdp;
3511da177e4SLinus Torvalds 			pte_t *ptep;
3521da177e4SLinus Torvalds 
3531da177e4SLinus Torvalds 			if (viking_mxcc_present)
3541da177e4SLinus Torvalds 				viking_mxcc_flush_page(page);
3551da177e4SLinus Torvalds 			else if (viking_flush)
3561da177e4SLinus Torvalds 				viking_flush_page(page);
3571da177e4SLinus Torvalds 			else
3581da177e4SLinus Torvalds 				__flush_page_to_ram(page);
3591da177e4SLinus Torvalds 
3601da177e4SLinus Torvalds 			pgdp = pgd_offset(&init_mm, addr);
3611da177e4SLinus Torvalds 			pmdp = pmd_offset(pgdp, addr);
3621da177e4SLinus Torvalds 			ptep = pte_offset_map(pmdp, addr);
3631da177e4SLinus Torvalds 
3641da177e4SLinus Torvalds 			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
3651da177e4SLinus Torvalds 		}
3661da177e4SLinus Torvalds 		iopte_val(*iopte++) =
3671da177e4SLinus Torvalds 		    MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
3681da177e4SLinus Torvalds 		addr += PAGE_SIZE;
3691da177e4SLinus Torvalds 		va += PAGE_SIZE;
3701da177e4SLinus Torvalds 	}
3711da177e4SLinus Torvalds 	/* P3: why do we need this?
3721da177e4SLinus Torvalds 	 *
3731da177e4SLinus Torvalds 	 * DAVEM: Because there are several aspects, none of which
3741da177e4SLinus Torvalds 	 *        are handled by a single interface.  Some cpus are
3751da177e4SLinus Torvalds 	 *        completely not I/O DMA coherent, and some have
3761da177e4SLinus Torvalds 	 *        virtually indexed caches.  The driver DMA flushing
3771da177e4SLinus Torvalds 	 *        methods handle the former case, but here during
3781da177e4SLinus Torvalds 	 *        IOMMU page table modifications, and usage of non-cacheable
3791da177e4SLinus Torvalds 	 *        cpu mappings of pages potentially in the cpu caches, we have
3801da177e4SLinus Torvalds 	 *        to handle the latter case as well.
3811da177e4SLinus Torvalds 	 */
3821da177e4SLinus Torvalds 	flush_cache_all();
3831da177e4SLinus Torvalds 	iommu_flush_iotlb(first, len >> PAGE_SHIFT);
3841da177e4SLinus Torvalds 	flush_tlb_all();
3851da177e4SLinus Torvalds 	iommu_invalidate(iommu->regs);
3861da177e4SLinus Torvalds 
3871da177e4SLinus Torvalds 	*pba = iommu->start + (ioptex << PAGE_SHIFT);
3881da177e4SLinus Torvalds 	return 0;
3891da177e4SLinus Torvalds }
3901da177e4SLinus Torvalds 
3914b1c5df2SDavid S. Miller static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len)
3921da177e4SLinus Torvalds {
3934b1c5df2SDavid S. Miller 	struct iommu_struct *iommu = dev->archdata.iommu;
3941da177e4SLinus Torvalds 	iopte_t *iopte = iommu->page_table;
3951da177e4SLinus Torvalds 	unsigned long end;
3961da177e4SLinus Torvalds 	int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
3971da177e4SLinus Torvalds 
3981ae61388SEric Sesterhenn 	BUG_ON((busa & ~PAGE_MASK) != 0);
3991ae61388SEric Sesterhenn 	BUG_ON((len & ~PAGE_MASK) != 0);
4001da177e4SLinus Torvalds 
4011da177e4SLinus Torvalds 	iopte += ioptex;
4021da177e4SLinus Torvalds 	end = busa + len;
4031da177e4SLinus Torvalds 	while (busa < end) {
4041da177e4SLinus Torvalds 		iopte_val(*iopte++) = 0;
4051da177e4SLinus Torvalds 		busa += PAGE_SIZE;
4061da177e4SLinus Torvalds 	}
4071da177e4SLinus Torvalds 	flush_tlb_all();
4081da177e4SLinus Torvalds 	iommu_invalidate(iommu->regs);
4091da177e4SLinus Torvalds 	bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
4101da177e4SLinus Torvalds }
4111da177e4SLinus Torvalds #endif
4121da177e4SLinus Torvalds 
413d894d964SDavid S. Miller static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
414d894d964SDavid S. Miller 	.get_scsi_one		= iommu_get_scsi_one_gflush,
415d894d964SDavid S. Miller 	.get_scsi_sgl		= iommu_get_scsi_sgl_gflush,
416d894d964SDavid S. Miller 	.release_scsi_one	= iommu_release_scsi_one,
417d894d964SDavid S. Miller 	.release_scsi_sgl	= iommu_release_scsi_sgl,
418d894d964SDavid S. Miller #ifdef CONFIG_SBUS
419d894d964SDavid S. Miller 	.map_dma_area		= iommu_map_dma_area,
420d894d964SDavid S. Miller 	.unmap_dma_area		= iommu_unmap_dma_area,
421d894d964SDavid S. Miller #endif
422d894d964SDavid S. Miller };
423d894d964SDavid S. Miller 
424d894d964SDavid S. Miller static const struct sparc32_dma_ops iommu_dma_pflush_ops = {
425d894d964SDavid S. Miller 	.get_scsi_one		= iommu_get_scsi_one_pflush,
426d894d964SDavid S. Miller 	.get_scsi_sgl		= iommu_get_scsi_sgl_pflush,
427d894d964SDavid S. Miller 	.release_scsi_one	= iommu_release_scsi_one,
428d894d964SDavid S. Miller 	.release_scsi_sgl	= iommu_release_scsi_sgl,
429d894d964SDavid S. Miller #ifdef CONFIG_SBUS
430d894d964SDavid S. Miller 	.map_dma_area		= iommu_map_dma_area,
431d894d964SDavid S. Miller 	.unmap_dma_area		= iommu_unmap_dma_area,
432d894d964SDavid S. Miller #endif
433d894d964SDavid S. Miller };
434d894d964SDavid S. Miller 
4351da177e4SLinus Torvalds void __init ld_mmu_iommu(void)
4361da177e4SLinus Torvalds {
4375d83d666SDavid S. Miller 	if (flush_page_for_dma_global) {
4381da177e4SLinus Torvalds 		/* flush_page_for_dma flushes everything, no matter of what page is it */
439d894d964SDavid S. Miller 		sparc32_dma_ops = &iommu_dma_gflush_ops;
4401da177e4SLinus Torvalds 	} else {
441d894d964SDavid S. Miller 		sparc32_dma_ops = &iommu_dma_pflush_ops;
4421da177e4SLinus Torvalds 	}
4431da177e4SLinus Torvalds 
4441da177e4SLinus Torvalds 	if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
4451da177e4SLinus Torvalds 		dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
4461da177e4SLinus Torvalds 		ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
4471da177e4SLinus Torvalds 	} else {
4481da177e4SLinus Torvalds 		dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
4491da177e4SLinus Torvalds 		ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
4501da177e4SLinus Torvalds 	}
4511da177e4SLinus Torvalds }
452