xref: /linux/arch/sparc/mm/iommu.c (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1*1da177e4SLinus Torvalds /*
2*1da177e4SLinus Torvalds  * iommu.c:  IOMMU specific routines for memory management.
3*1da177e4SLinus Torvalds  *
4*1da177e4SLinus Torvalds  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
5*1da177e4SLinus Torvalds  * Copyright (C) 1995,2002 Pete Zaitcev     (zaitcev@yahoo.com)
6*1da177e4SLinus Torvalds  * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
7*1da177e4SLinus Torvalds  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
8*1da177e4SLinus Torvalds  */
9*1da177e4SLinus Torvalds 
10*1da177e4SLinus Torvalds #include <linux/config.h>
11*1da177e4SLinus Torvalds #include <linux/kernel.h>
12*1da177e4SLinus Torvalds #include <linux/init.h>
13*1da177e4SLinus Torvalds #include <linux/mm.h>
14*1da177e4SLinus Torvalds #include <linux/slab.h>
15*1da177e4SLinus Torvalds #include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
16*1da177e4SLinus Torvalds 
17*1da177e4SLinus Torvalds #include <asm/scatterlist.h>
18*1da177e4SLinus Torvalds #include <asm/pgalloc.h>
19*1da177e4SLinus Torvalds #include <asm/pgtable.h>
20*1da177e4SLinus Torvalds #include <asm/sbus.h>
21*1da177e4SLinus Torvalds #include <asm/io.h>
22*1da177e4SLinus Torvalds #include <asm/mxcc.h>
23*1da177e4SLinus Torvalds #include <asm/mbus.h>
24*1da177e4SLinus Torvalds #include <asm/cacheflush.h>
25*1da177e4SLinus Torvalds #include <asm/tlbflush.h>
26*1da177e4SLinus Torvalds #include <asm/bitext.h>
27*1da177e4SLinus Torvalds #include <asm/iommu.h>
28*1da177e4SLinus Torvalds #include <asm/dma.h>
29*1da177e4SLinus Torvalds 
30*1da177e4SLinus Torvalds /*
31*1da177e4SLinus Torvalds  * This can be sized dynamically, but we will do this
32*1da177e4SLinus Torvalds  * only when we have a guidance about actual I/O pressures.
33*1da177e4SLinus Torvalds  */
34*1da177e4SLinus Torvalds #define IOMMU_RNGE	IOMMU_RNGE_256MB
35*1da177e4SLinus Torvalds #define IOMMU_START	0xF0000000
36*1da177e4SLinus Torvalds #define IOMMU_WINSIZE	(256*1024*1024U)
37*1da177e4SLinus Torvalds #define IOMMU_NPTES	(IOMMU_WINSIZE/PAGE_SIZE)	/* 64K PTEs, 265KB */
38*1da177e4SLinus Torvalds #define IOMMU_ORDER	6				/* 4096 * (1<<6) */
39*1da177e4SLinus Torvalds 
40*1da177e4SLinus Torvalds /* srmmu.c */
41*1da177e4SLinus Torvalds extern int viking_mxcc_present;
42*1da177e4SLinus Torvalds BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
43*1da177e4SLinus Torvalds #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
44*1da177e4SLinus Torvalds extern int flush_page_for_dma_global;
45*1da177e4SLinus Torvalds static int viking_flush;
46*1da177e4SLinus Torvalds /* viking.S */
47*1da177e4SLinus Torvalds extern void viking_flush_page(unsigned long page);
48*1da177e4SLinus Torvalds extern void viking_mxcc_flush_page(unsigned long page);
49*1da177e4SLinus Torvalds 
50*1da177e4SLinus Torvalds /*
51*1da177e4SLinus Torvalds  * Values precomputed according to CPU type.
52*1da177e4SLinus Torvalds  */
53*1da177e4SLinus Torvalds static unsigned int ioperm_noc;		/* Consistent mapping iopte flags */
54*1da177e4SLinus Torvalds static pgprot_t dvma_prot;		/* Consistent mapping pte flags */
55*1da177e4SLinus Torvalds 
56*1da177e4SLinus Torvalds #define IOPERM        (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
57*1da177e4SLinus Torvalds #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
58*1da177e4SLinus Torvalds 
59*1da177e4SLinus Torvalds void __init
60*1da177e4SLinus Torvalds iommu_init(int iommund, struct sbus_bus *sbus)
61*1da177e4SLinus Torvalds {
62*1da177e4SLinus Torvalds 	unsigned int impl, vers;
63*1da177e4SLinus Torvalds 	unsigned long tmp;
64*1da177e4SLinus Torvalds 	struct iommu_struct *iommu;
65*1da177e4SLinus Torvalds 	struct linux_prom_registers iommu_promregs[PROMREG_MAX];
66*1da177e4SLinus Torvalds 	struct resource r;
67*1da177e4SLinus Torvalds 	unsigned long *bitmap;
68*1da177e4SLinus Torvalds 
69*1da177e4SLinus Torvalds 	iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
70*1da177e4SLinus Torvalds 	if (!iommu) {
71*1da177e4SLinus Torvalds 		prom_printf("Unable to allocate iommu structure\n");
72*1da177e4SLinus Torvalds 		prom_halt();
73*1da177e4SLinus Torvalds 	}
74*1da177e4SLinus Torvalds 	iommu->regs = NULL;
75*1da177e4SLinus Torvalds 	if (prom_getproperty(iommund, "reg", (void *) iommu_promregs,
76*1da177e4SLinus Torvalds 			 sizeof(iommu_promregs)) != -1) {
77*1da177e4SLinus Torvalds 		memset(&r, 0, sizeof(r));
78*1da177e4SLinus Torvalds 		r.flags = iommu_promregs[0].which_io;
79*1da177e4SLinus Torvalds 		r.start = iommu_promregs[0].phys_addr;
80*1da177e4SLinus Torvalds 		iommu->regs = (struct iommu_regs *)
81*1da177e4SLinus Torvalds 			sbus_ioremap(&r, 0, PAGE_SIZE * 3, "iommu_regs");
82*1da177e4SLinus Torvalds 	}
83*1da177e4SLinus Torvalds 	if (!iommu->regs) {
84*1da177e4SLinus Torvalds 		prom_printf("Cannot map IOMMU registers\n");
85*1da177e4SLinus Torvalds 		prom_halt();
86*1da177e4SLinus Torvalds 	}
87*1da177e4SLinus Torvalds 	impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
88*1da177e4SLinus Torvalds 	vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
89*1da177e4SLinus Torvalds 	tmp = iommu->regs->control;
90*1da177e4SLinus Torvalds 	tmp &= ~(IOMMU_CTRL_RNGE);
91*1da177e4SLinus Torvalds 	tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
92*1da177e4SLinus Torvalds 	iommu->regs->control = tmp;
93*1da177e4SLinus Torvalds 	iommu_invalidate(iommu->regs);
94*1da177e4SLinus Torvalds 	iommu->start = IOMMU_START;
95*1da177e4SLinus Torvalds 	iommu->end = 0xffffffff;
96*1da177e4SLinus Torvalds 
97*1da177e4SLinus Torvalds 	/* Allocate IOMMU page table */
98*1da177e4SLinus Torvalds 	/* Stupid alignment constraints give me a headache.
99*1da177e4SLinus Torvalds 	   We need 256K or 512K or 1M or 2M area aligned to
100*1da177e4SLinus Torvalds            its size and current gfp will fortunately give
101*1da177e4SLinus Torvalds            it to us. */
102*1da177e4SLinus Torvalds         tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
103*1da177e4SLinus Torvalds 	if (!tmp) {
104*1da177e4SLinus Torvalds 		prom_printf("Unable to allocate iommu table [0x%08x]\n",
105*1da177e4SLinus Torvalds 			    IOMMU_NPTES*sizeof(iopte_t));
106*1da177e4SLinus Torvalds 		prom_halt();
107*1da177e4SLinus Torvalds 	}
108*1da177e4SLinus Torvalds 	iommu->page_table = (iopte_t *)tmp;
109*1da177e4SLinus Torvalds 
110*1da177e4SLinus Torvalds 	/* Initialize new table. */
111*1da177e4SLinus Torvalds 	memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
112*1da177e4SLinus Torvalds 	flush_cache_all();
113*1da177e4SLinus Torvalds 	flush_tlb_all();
114*1da177e4SLinus Torvalds 	iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
115*1da177e4SLinus Torvalds 	iommu_invalidate(iommu->regs);
116*1da177e4SLinus Torvalds 
117*1da177e4SLinus Torvalds 	bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
118*1da177e4SLinus Torvalds 	if (!bitmap) {
119*1da177e4SLinus Torvalds 		prom_printf("Unable to allocate iommu bitmap [%d]\n",
120*1da177e4SLinus Torvalds 			    (int)(IOMMU_NPTES>>3));
121*1da177e4SLinus Torvalds 		prom_halt();
122*1da177e4SLinus Torvalds 	}
123*1da177e4SLinus Torvalds 	bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
124*1da177e4SLinus Torvalds 	/* To be coherent on HyperSparc, the page color of DVMA
125*1da177e4SLinus Torvalds 	 * and physical addresses must match.
126*1da177e4SLinus Torvalds 	 */
127*1da177e4SLinus Torvalds 	if (srmmu_modtype == HyperSparc)
128*1da177e4SLinus Torvalds 		iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
129*1da177e4SLinus Torvalds 	else
130*1da177e4SLinus Torvalds 		iommu->usemap.num_colors = 1;
131*1da177e4SLinus Torvalds 
132*1da177e4SLinus Torvalds 	printk("IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
133*1da177e4SLinus Torvalds 	    impl, vers, iommu->page_table,
134*1da177e4SLinus Torvalds 	    (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
135*1da177e4SLinus Torvalds 
136*1da177e4SLinus Torvalds 	sbus->iommu = iommu;
137*1da177e4SLinus Torvalds }
138*1da177e4SLinus Torvalds 
139*1da177e4SLinus Torvalds /* This begs to be btfixup-ed by srmmu. */
140*1da177e4SLinus Torvalds /* Flush the iotlb entries to ram. */
141*1da177e4SLinus Torvalds /* This could be better if we didn't have to flush whole pages. */
142*1da177e4SLinus Torvalds static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
143*1da177e4SLinus Torvalds {
144*1da177e4SLinus Torvalds 	unsigned long start;
145*1da177e4SLinus Torvalds 	unsigned long end;
146*1da177e4SLinus Torvalds 
147*1da177e4SLinus Torvalds 	start = (unsigned long)iopte & PAGE_MASK;
148*1da177e4SLinus Torvalds 	end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
149*1da177e4SLinus Torvalds 	if (viking_mxcc_present) {
150*1da177e4SLinus Torvalds 		while(start < end) {
151*1da177e4SLinus Torvalds 			viking_mxcc_flush_page(start);
152*1da177e4SLinus Torvalds 			start += PAGE_SIZE;
153*1da177e4SLinus Torvalds 		}
154*1da177e4SLinus Torvalds 	} else if (viking_flush) {
155*1da177e4SLinus Torvalds 		while(start < end) {
156*1da177e4SLinus Torvalds 			viking_flush_page(start);
157*1da177e4SLinus Torvalds 			start += PAGE_SIZE;
158*1da177e4SLinus Torvalds 		}
159*1da177e4SLinus Torvalds 	} else {
160*1da177e4SLinus Torvalds 		while(start < end) {
161*1da177e4SLinus Torvalds 			__flush_page_to_ram(start);
162*1da177e4SLinus Torvalds 			start += PAGE_SIZE;
163*1da177e4SLinus Torvalds 		}
164*1da177e4SLinus Torvalds 	}
165*1da177e4SLinus Torvalds }
166*1da177e4SLinus Torvalds 
167*1da177e4SLinus Torvalds static u32 iommu_get_one(struct page *page, int npages, struct sbus_bus *sbus)
168*1da177e4SLinus Torvalds {
169*1da177e4SLinus Torvalds 	struct iommu_struct *iommu = sbus->iommu;
170*1da177e4SLinus Torvalds 	int ioptex;
171*1da177e4SLinus Torvalds 	iopte_t *iopte, *iopte0;
172*1da177e4SLinus Torvalds 	unsigned int busa, busa0;
173*1da177e4SLinus Torvalds 	int i;
174*1da177e4SLinus Torvalds 
175*1da177e4SLinus Torvalds 	/* page color = pfn of page */
176*1da177e4SLinus Torvalds 	ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
177*1da177e4SLinus Torvalds 	if (ioptex < 0)
178*1da177e4SLinus Torvalds 		panic("iommu out");
179*1da177e4SLinus Torvalds 	busa0 = iommu->start + (ioptex << PAGE_SHIFT);
180*1da177e4SLinus Torvalds 	iopte0 = &iommu->page_table[ioptex];
181*1da177e4SLinus Torvalds 
182*1da177e4SLinus Torvalds 	busa = busa0;
183*1da177e4SLinus Torvalds 	iopte = iopte0;
184*1da177e4SLinus Torvalds 	for (i = 0; i < npages; i++) {
185*1da177e4SLinus Torvalds 		iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
186*1da177e4SLinus Torvalds 		iommu_invalidate_page(iommu->regs, busa);
187*1da177e4SLinus Torvalds 		busa += PAGE_SIZE;
188*1da177e4SLinus Torvalds 		iopte++;
189*1da177e4SLinus Torvalds 		page++;
190*1da177e4SLinus Torvalds 	}
191*1da177e4SLinus Torvalds 
192*1da177e4SLinus Torvalds 	iommu_flush_iotlb(iopte0, npages);
193*1da177e4SLinus Torvalds 
194*1da177e4SLinus Torvalds 	return busa0;
195*1da177e4SLinus Torvalds }
196*1da177e4SLinus Torvalds 
197*1da177e4SLinus Torvalds static u32 iommu_get_scsi_one(char *vaddr, unsigned int len,
198*1da177e4SLinus Torvalds     struct sbus_bus *sbus)
199*1da177e4SLinus Torvalds {
200*1da177e4SLinus Torvalds 	unsigned long off;
201*1da177e4SLinus Torvalds 	int npages;
202*1da177e4SLinus Torvalds 	struct page *page;
203*1da177e4SLinus Torvalds 	u32 busa;
204*1da177e4SLinus Torvalds 
205*1da177e4SLinus Torvalds 	off = (unsigned long)vaddr & ~PAGE_MASK;
206*1da177e4SLinus Torvalds 	npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
207*1da177e4SLinus Torvalds 	page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
208*1da177e4SLinus Torvalds 	busa = iommu_get_one(page, npages, sbus);
209*1da177e4SLinus Torvalds 	return busa + off;
210*1da177e4SLinus Torvalds }
211*1da177e4SLinus Torvalds 
212*1da177e4SLinus Torvalds static __u32 iommu_get_scsi_one_noflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
213*1da177e4SLinus Torvalds {
214*1da177e4SLinus Torvalds 	return iommu_get_scsi_one(vaddr, len, sbus);
215*1da177e4SLinus Torvalds }
216*1da177e4SLinus Torvalds 
217*1da177e4SLinus Torvalds static __u32 iommu_get_scsi_one_gflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
218*1da177e4SLinus Torvalds {
219*1da177e4SLinus Torvalds 	flush_page_for_dma(0);
220*1da177e4SLinus Torvalds 	return iommu_get_scsi_one(vaddr, len, sbus);
221*1da177e4SLinus Torvalds }
222*1da177e4SLinus Torvalds 
223*1da177e4SLinus Torvalds static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
224*1da177e4SLinus Torvalds {
225*1da177e4SLinus Torvalds 	unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
226*1da177e4SLinus Torvalds 
227*1da177e4SLinus Torvalds 	while(page < ((unsigned long)(vaddr + len))) {
228*1da177e4SLinus Torvalds 		flush_page_for_dma(page);
229*1da177e4SLinus Torvalds 		page += PAGE_SIZE;
230*1da177e4SLinus Torvalds 	}
231*1da177e4SLinus Torvalds 	return iommu_get_scsi_one(vaddr, len, sbus);
232*1da177e4SLinus Torvalds }
233*1da177e4SLinus Torvalds 
234*1da177e4SLinus Torvalds static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
235*1da177e4SLinus Torvalds {
236*1da177e4SLinus Torvalds 	int n;
237*1da177e4SLinus Torvalds 
238*1da177e4SLinus Torvalds 	while (sz != 0) {
239*1da177e4SLinus Torvalds 		--sz;
240*1da177e4SLinus Torvalds 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
241*1da177e4SLinus Torvalds 		sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
242*1da177e4SLinus Torvalds 		sg->dvma_length = (__u32) sg->length;
243*1da177e4SLinus Torvalds 		sg++;
244*1da177e4SLinus Torvalds 	}
245*1da177e4SLinus Torvalds }
246*1da177e4SLinus Torvalds 
247*1da177e4SLinus Torvalds static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
248*1da177e4SLinus Torvalds {
249*1da177e4SLinus Torvalds 	int n;
250*1da177e4SLinus Torvalds 
251*1da177e4SLinus Torvalds 	flush_page_for_dma(0);
252*1da177e4SLinus Torvalds 	while (sz != 0) {
253*1da177e4SLinus Torvalds 		--sz;
254*1da177e4SLinus Torvalds 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
255*1da177e4SLinus Torvalds 		sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
256*1da177e4SLinus Torvalds 		sg->dvma_length = (__u32) sg->length;
257*1da177e4SLinus Torvalds 		sg++;
258*1da177e4SLinus Torvalds 	}
259*1da177e4SLinus Torvalds }
260*1da177e4SLinus Torvalds 
261*1da177e4SLinus Torvalds static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
262*1da177e4SLinus Torvalds {
263*1da177e4SLinus Torvalds 	unsigned long page, oldpage = 0;
264*1da177e4SLinus Torvalds 	int n, i;
265*1da177e4SLinus Torvalds 
266*1da177e4SLinus Torvalds 	while(sz != 0) {
267*1da177e4SLinus Torvalds 		--sz;
268*1da177e4SLinus Torvalds 
269*1da177e4SLinus Torvalds 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
270*1da177e4SLinus Torvalds 
271*1da177e4SLinus Torvalds 		/*
272*1da177e4SLinus Torvalds 		 * We expect unmapped highmem pages to be not in the cache.
273*1da177e4SLinus Torvalds 		 * XXX Is this a good assumption?
274*1da177e4SLinus Torvalds 		 * XXX What if someone else unmaps it here and races us?
275*1da177e4SLinus Torvalds 		 */
276*1da177e4SLinus Torvalds 		if ((page = (unsigned long) page_address(sg->page)) != 0) {
277*1da177e4SLinus Torvalds 			for (i = 0; i < n; i++) {
278*1da177e4SLinus Torvalds 				if (page != oldpage) {	/* Already flushed? */
279*1da177e4SLinus Torvalds 					flush_page_for_dma(page);
280*1da177e4SLinus Torvalds 					oldpage = page;
281*1da177e4SLinus Torvalds 				}
282*1da177e4SLinus Torvalds 				page += PAGE_SIZE;
283*1da177e4SLinus Torvalds 			}
284*1da177e4SLinus Torvalds 		}
285*1da177e4SLinus Torvalds 
286*1da177e4SLinus Torvalds 		sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
287*1da177e4SLinus Torvalds 		sg->dvma_length = (__u32) sg->length;
288*1da177e4SLinus Torvalds 		sg++;
289*1da177e4SLinus Torvalds 	}
290*1da177e4SLinus Torvalds }
291*1da177e4SLinus Torvalds 
292*1da177e4SLinus Torvalds static void iommu_release_one(u32 busa, int npages, struct sbus_bus *sbus)
293*1da177e4SLinus Torvalds {
294*1da177e4SLinus Torvalds 	struct iommu_struct *iommu = sbus->iommu;
295*1da177e4SLinus Torvalds 	int ioptex;
296*1da177e4SLinus Torvalds 	int i;
297*1da177e4SLinus Torvalds 
298*1da177e4SLinus Torvalds 	if (busa < iommu->start)
299*1da177e4SLinus Torvalds 		BUG();
300*1da177e4SLinus Torvalds 	ioptex = (busa - iommu->start) >> PAGE_SHIFT;
301*1da177e4SLinus Torvalds 	for (i = 0; i < npages; i++) {
302*1da177e4SLinus Torvalds 		iopte_val(iommu->page_table[ioptex + i]) = 0;
303*1da177e4SLinus Torvalds 		iommu_invalidate_page(iommu->regs, busa);
304*1da177e4SLinus Torvalds 		busa += PAGE_SIZE;
305*1da177e4SLinus Torvalds 	}
306*1da177e4SLinus Torvalds 	bit_map_clear(&iommu->usemap, ioptex, npages);
307*1da177e4SLinus Torvalds }
308*1da177e4SLinus Torvalds 
309*1da177e4SLinus Torvalds static void iommu_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
310*1da177e4SLinus Torvalds {
311*1da177e4SLinus Torvalds 	unsigned long off;
312*1da177e4SLinus Torvalds 	int npages;
313*1da177e4SLinus Torvalds 
314*1da177e4SLinus Torvalds 	off = vaddr & ~PAGE_MASK;
315*1da177e4SLinus Torvalds 	npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
316*1da177e4SLinus Torvalds 	iommu_release_one(vaddr & PAGE_MASK, npages, sbus);
317*1da177e4SLinus Torvalds }
318*1da177e4SLinus Torvalds 
319*1da177e4SLinus Torvalds static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
320*1da177e4SLinus Torvalds {
321*1da177e4SLinus Torvalds 	int n;
322*1da177e4SLinus Torvalds 
323*1da177e4SLinus Torvalds 	while(sz != 0) {
324*1da177e4SLinus Torvalds 		--sz;
325*1da177e4SLinus Torvalds 
326*1da177e4SLinus Torvalds 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
327*1da177e4SLinus Torvalds 		iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus);
328*1da177e4SLinus Torvalds 		sg->dvma_address = 0x21212121;
329*1da177e4SLinus Torvalds 		sg++;
330*1da177e4SLinus Torvalds 	}
331*1da177e4SLinus Torvalds }
332*1da177e4SLinus Torvalds 
333*1da177e4SLinus Torvalds #ifdef CONFIG_SBUS
334*1da177e4SLinus Torvalds static int iommu_map_dma_area(dma_addr_t *pba, unsigned long va,
335*1da177e4SLinus Torvalds     unsigned long addr, int len)
336*1da177e4SLinus Torvalds {
337*1da177e4SLinus Torvalds 	unsigned long page, end;
338*1da177e4SLinus Torvalds 	struct iommu_struct *iommu = sbus_root->iommu;
339*1da177e4SLinus Torvalds 	iopte_t *iopte = iommu->page_table;
340*1da177e4SLinus Torvalds 	iopte_t *first;
341*1da177e4SLinus Torvalds 	int ioptex;
342*1da177e4SLinus Torvalds 
343*1da177e4SLinus Torvalds 	if ((va & ~PAGE_MASK) != 0) BUG();
344*1da177e4SLinus Torvalds 	if ((addr & ~PAGE_MASK) != 0) BUG();
345*1da177e4SLinus Torvalds 	if ((len & ~PAGE_MASK) != 0) BUG();
346*1da177e4SLinus Torvalds 
347*1da177e4SLinus Torvalds 	/* page color = physical address */
348*1da177e4SLinus Torvalds 	ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
349*1da177e4SLinus Torvalds 		addr >> PAGE_SHIFT);
350*1da177e4SLinus Torvalds 	if (ioptex < 0)
351*1da177e4SLinus Torvalds 		panic("iommu out");
352*1da177e4SLinus Torvalds 
353*1da177e4SLinus Torvalds 	iopte += ioptex;
354*1da177e4SLinus Torvalds 	first = iopte;
355*1da177e4SLinus Torvalds 	end = addr + len;
356*1da177e4SLinus Torvalds 	while(addr < end) {
357*1da177e4SLinus Torvalds 		page = va;
358*1da177e4SLinus Torvalds 		{
359*1da177e4SLinus Torvalds 			pgd_t *pgdp;
360*1da177e4SLinus Torvalds 			pmd_t *pmdp;
361*1da177e4SLinus Torvalds 			pte_t *ptep;
362*1da177e4SLinus Torvalds 
363*1da177e4SLinus Torvalds 			if (viking_mxcc_present)
364*1da177e4SLinus Torvalds 				viking_mxcc_flush_page(page);
365*1da177e4SLinus Torvalds 			else if (viking_flush)
366*1da177e4SLinus Torvalds 				viking_flush_page(page);
367*1da177e4SLinus Torvalds 			else
368*1da177e4SLinus Torvalds 				__flush_page_to_ram(page);
369*1da177e4SLinus Torvalds 
370*1da177e4SLinus Torvalds 			pgdp = pgd_offset(&init_mm, addr);
371*1da177e4SLinus Torvalds 			pmdp = pmd_offset(pgdp, addr);
372*1da177e4SLinus Torvalds 			ptep = pte_offset_map(pmdp, addr);
373*1da177e4SLinus Torvalds 
374*1da177e4SLinus Torvalds 			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
375*1da177e4SLinus Torvalds 		}
376*1da177e4SLinus Torvalds 		iopte_val(*iopte++) =
377*1da177e4SLinus Torvalds 		    MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
378*1da177e4SLinus Torvalds 		addr += PAGE_SIZE;
379*1da177e4SLinus Torvalds 		va += PAGE_SIZE;
380*1da177e4SLinus Torvalds 	}
381*1da177e4SLinus Torvalds 	/* P3: why do we need this?
382*1da177e4SLinus Torvalds 	 *
383*1da177e4SLinus Torvalds 	 * DAVEM: Because there are several aspects, none of which
384*1da177e4SLinus Torvalds 	 *        are handled by a single interface.  Some cpus are
385*1da177e4SLinus Torvalds 	 *        completely not I/O DMA coherent, and some have
386*1da177e4SLinus Torvalds 	 *        virtually indexed caches.  The driver DMA flushing
387*1da177e4SLinus Torvalds 	 *        methods handle the former case, but here during
388*1da177e4SLinus Torvalds 	 *        IOMMU page table modifications, and usage of non-cacheable
389*1da177e4SLinus Torvalds 	 *        cpu mappings of pages potentially in the cpu caches, we have
390*1da177e4SLinus Torvalds 	 *        to handle the latter case as well.
391*1da177e4SLinus Torvalds 	 */
392*1da177e4SLinus Torvalds 	flush_cache_all();
393*1da177e4SLinus Torvalds 	iommu_flush_iotlb(first, len >> PAGE_SHIFT);
394*1da177e4SLinus Torvalds 	flush_tlb_all();
395*1da177e4SLinus Torvalds 	iommu_invalidate(iommu->regs);
396*1da177e4SLinus Torvalds 
397*1da177e4SLinus Torvalds 	*pba = iommu->start + (ioptex << PAGE_SHIFT);
398*1da177e4SLinus Torvalds 	return 0;
399*1da177e4SLinus Torvalds }
400*1da177e4SLinus Torvalds 
401*1da177e4SLinus Torvalds static void iommu_unmap_dma_area(unsigned long busa, int len)
402*1da177e4SLinus Torvalds {
403*1da177e4SLinus Torvalds 	struct iommu_struct *iommu = sbus_root->iommu;
404*1da177e4SLinus Torvalds 	iopte_t *iopte = iommu->page_table;
405*1da177e4SLinus Torvalds 	unsigned long end;
406*1da177e4SLinus Torvalds 	int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
407*1da177e4SLinus Torvalds 
408*1da177e4SLinus Torvalds 	if ((busa & ~PAGE_MASK) != 0) BUG();
409*1da177e4SLinus Torvalds 	if ((len & ~PAGE_MASK) != 0) BUG();
410*1da177e4SLinus Torvalds 
411*1da177e4SLinus Torvalds 	iopte += ioptex;
412*1da177e4SLinus Torvalds 	end = busa + len;
413*1da177e4SLinus Torvalds 	while (busa < end) {
414*1da177e4SLinus Torvalds 		iopte_val(*iopte++) = 0;
415*1da177e4SLinus Torvalds 		busa += PAGE_SIZE;
416*1da177e4SLinus Torvalds 	}
417*1da177e4SLinus Torvalds 	flush_tlb_all();
418*1da177e4SLinus Torvalds 	iommu_invalidate(iommu->regs);
419*1da177e4SLinus Torvalds 	bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
420*1da177e4SLinus Torvalds }
421*1da177e4SLinus Torvalds 
422*1da177e4SLinus Torvalds static struct page *iommu_translate_dvma(unsigned long busa)
423*1da177e4SLinus Torvalds {
424*1da177e4SLinus Torvalds 	struct iommu_struct *iommu = sbus_root->iommu;
425*1da177e4SLinus Torvalds 	iopte_t *iopte = iommu->page_table;
426*1da177e4SLinus Torvalds 
427*1da177e4SLinus Torvalds 	iopte += ((busa - iommu->start) >> PAGE_SHIFT);
428*1da177e4SLinus Torvalds 	return pfn_to_page((iopte_val(*iopte) & IOPTE_PAGE) >> (PAGE_SHIFT-4));
429*1da177e4SLinus Torvalds }
430*1da177e4SLinus Torvalds #endif
431*1da177e4SLinus Torvalds 
432*1da177e4SLinus Torvalds static char *iommu_lockarea(char *vaddr, unsigned long len)
433*1da177e4SLinus Torvalds {
434*1da177e4SLinus Torvalds 	return vaddr;
435*1da177e4SLinus Torvalds }
436*1da177e4SLinus Torvalds 
437*1da177e4SLinus Torvalds static void iommu_unlockarea(char *vaddr, unsigned long len)
438*1da177e4SLinus Torvalds {
439*1da177e4SLinus Torvalds }
440*1da177e4SLinus Torvalds 
441*1da177e4SLinus Torvalds void __init ld_mmu_iommu(void)
442*1da177e4SLinus Torvalds {
443*1da177e4SLinus Torvalds 	viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
444*1da177e4SLinus Torvalds 	BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
445*1da177e4SLinus Torvalds 	BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
446*1da177e4SLinus Torvalds 
447*1da177e4SLinus Torvalds 	if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
448*1da177e4SLinus Torvalds 		/* IO coherent chip */
449*1da177e4SLinus Torvalds 		BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
450*1da177e4SLinus Torvalds 		BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
451*1da177e4SLinus Torvalds 	} else if (flush_page_for_dma_global) {
452*1da177e4SLinus Torvalds 		/* flush_page_for_dma flushes everything, no matter of what page is it */
453*1da177e4SLinus Torvalds 		BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
454*1da177e4SLinus Torvalds 		BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
455*1da177e4SLinus Torvalds 	} else {
456*1da177e4SLinus Torvalds 		BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
457*1da177e4SLinus Torvalds 		BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
458*1da177e4SLinus Torvalds 	}
459*1da177e4SLinus Torvalds 	BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
460*1da177e4SLinus Torvalds 	BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
461*1da177e4SLinus Torvalds 
462*1da177e4SLinus Torvalds #ifdef CONFIG_SBUS
463*1da177e4SLinus Torvalds 	BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
464*1da177e4SLinus Torvalds 	BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
465*1da177e4SLinus Torvalds 	BTFIXUPSET_CALL(mmu_translate_dvma, iommu_translate_dvma, BTFIXUPCALL_NORM);
466*1da177e4SLinus Torvalds #endif
467*1da177e4SLinus Torvalds 
468*1da177e4SLinus Torvalds 	if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
469*1da177e4SLinus Torvalds 		dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
470*1da177e4SLinus Torvalds 		ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
471*1da177e4SLinus Torvalds 	} else {
472*1da177e4SLinus Torvalds 		dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
473*1da177e4SLinus Torvalds 		ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
474*1da177e4SLinus Torvalds 	}
475*1da177e4SLinus Torvalds }
476