xref: /linux/arch/sparc/mm/iommu.c (revision 2af5920b81a4cd0a22d40b6b2c38356d3df03e13)
1 /*
2  * iommu.c:  IOMMU specific routines for memory management.
3  *
4  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
5  * Copyright (C) 1995,2002 Pete Zaitcev     (zaitcev@yahoo.com)
6  * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
7  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
8  */
9 
10 #include <linux/config.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
16 
17 #include <asm/scatterlist.h>
18 #include <asm/pgalloc.h>
19 #include <asm/pgtable.h>
20 #include <asm/sbus.h>
21 #include <asm/io.h>
22 #include <asm/mxcc.h>
23 #include <asm/mbus.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/bitext.h>
27 #include <asm/iommu.h>
28 #include <asm/dma.h>
29 
30 /*
31  * This can be sized dynamically, but we will do this
32  * only when we have a guidance about actual I/O pressures.
33  */
34 #define IOMMU_RNGE	IOMMU_RNGE_256MB
35 #define IOMMU_START	0xF0000000
36 #define IOMMU_WINSIZE	(256*1024*1024U)
37 #define IOMMU_NPTES	(IOMMU_WINSIZE/PAGE_SIZE)	/* 64K PTEs, 265KB */
38 #define IOMMU_ORDER	6				/* 4096 * (1<<6) */
39 
40 /* srmmu.c */
41 extern int viking_mxcc_present;
42 BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
43 #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
44 extern int flush_page_for_dma_global;
45 static int viking_flush;
46 /* viking.S */
47 extern void viking_flush_page(unsigned long page);
48 extern void viking_mxcc_flush_page(unsigned long page);
49 
50 /*
51  * Values precomputed according to CPU type.
52  */
53 static unsigned int ioperm_noc;		/* Consistent mapping iopte flags */
54 static pgprot_t dvma_prot;		/* Consistent mapping pte flags */
55 
56 #define IOPERM        (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
57 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
58 
59 void __init
60 iommu_init(int iommund, struct sbus_bus *sbus)
61 {
62 	unsigned int impl, vers;
63 	unsigned long tmp;
64 	struct iommu_struct *iommu;
65 	struct linux_prom_registers iommu_promregs[PROMREG_MAX];
66 	struct resource r;
67 	unsigned long *bitmap;
68 
69 	iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
70 	if (!iommu) {
71 		prom_printf("Unable to allocate iommu structure\n");
72 		prom_halt();
73 	}
74 	iommu->regs = NULL;
75 	if (prom_getproperty(iommund, "reg", (void *) iommu_promregs,
76 			 sizeof(iommu_promregs)) != -1) {
77 		memset(&r, 0, sizeof(r));
78 		r.flags = iommu_promregs[0].which_io;
79 		r.start = iommu_promregs[0].phys_addr;
80 		iommu->regs = (struct iommu_regs *)
81 			sbus_ioremap(&r, 0, PAGE_SIZE * 3, "iommu_regs");
82 	}
83 	if (!iommu->regs) {
84 		prom_printf("Cannot map IOMMU registers\n");
85 		prom_halt();
86 	}
87 	impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
88 	vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
89 	tmp = iommu->regs->control;
90 	tmp &= ~(IOMMU_CTRL_RNGE);
91 	tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
92 	iommu->regs->control = tmp;
93 	iommu_invalidate(iommu->regs);
94 	iommu->start = IOMMU_START;
95 	iommu->end = 0xffffffff;
96 
97 	/* Allocate IOMMU page table */
98 	/* Stupid alignment constraints give me a headache.
99 	   We need 256K or 512K or 1M or 2M area aligned to
100            its size and current gfp will fortunately give
101            it to us. */
102         tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
103 	if (!tmp) {
104 		prom_printf("Unable to allocate iommu table [0x%08x]\n",
105 			    IOMMU_NPTES*sizeof(iopte_t));
106 		prom_halt();
107 	}
108 	iommu->page_table = (iopte_t *)tmp;
109 
110 	/* Initialize new table. */
111 	memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
112 	flush_cache_all();
113 	flush_tlb_all();
114 	iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
115 	iommu_invalidate(iommu->regs);
116 
117 	bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
118 	if (!bitmap) {
119 		prom_printf("Unable to allocate iommu bitmap [%d]\n",
120 			    (int)(IOMMU_NPTES>>3));
121 		prom_halt();
122 	}
123 	bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
124 	/* To be coherent on HyperSparc, the page color of DVMA
125 	 * and physical addresses must match.
126 	 */
127 	if (srmmu_modtype == HyperSparc)
128 		iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
129 	else
130 		iommu->usemap.num_colors = 1;
131 
132 	printk("IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
133 	    impl, vers, iommu->page_table,
134 	    (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
135 
136 	sbus->iommu = iommu;
137 }
138 
139 /* This begs to be btfixup-ed by srmmu. */
140 /* Flush the iotlb entries to ram. */
141 /* This could be better if we didn't have to flush whole pages. */
142 static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
143 {
144 	unsigned long start;
145 	unsigned long end;
146 
147 	start = (unsigned long)iopte & PAGE_MASK;
148 	end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
149 	if (viking_mxcc_present) {
150 		while(start < end) {
151 			viking_mxcc_flush_page(start);
152 			start += PAGE_SIZE;
153 		}
154 	} else if (viking_flush) {
155 		while(start < end) {
156 			viking_flush_page(start);
157 			start += PAGE_SIZE;
158 		}
159 	} else {
160 		while(start < end) {
161 			__flush_page_to_ram(start);
162 			start += PAGE_SIZE;
163 		}
164 	}
165 }
166 
167 static u32 iommu_get_one(struct page *page, int npages, struct sbus_bus *sbus)
168 {
169 	struct iommu_struct *iommu = sbus->iommu;
170 	int ioptex;
171 	iopte_t *iopte, *iopte0;
172 	unsigned int busa, busa0;
173 	int i;
174 
175 	/* page color = pfn of page */
176 	ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
177 	if (ioptex < 0)
178 		panic("iommu out");
179 	busa0 = iommu->start + (ioptex << PAGE_SHIFT);
180 	iopte0 = &iommu->page_table[ioptex];
181 
182 	busa = busa0;
183 	iopte = iopte0;
184 	for (i = 0; i < npages; i++) {
185 		iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
186 		iommu_invalidate_page(iommu->regs, busa);
187 		busa += PAGE_SIZE;
188 		iopte++;
189 		page++;
190 	}
191 
192 	iommu_flush_iotlb(iopte0, npages);
193 
194 	return busa0;
195 }
196 
197 static u32 iommu_get_scsi_one(char *vaddr, unsigned int len,
198     struct sbus_bus *sbus)
199 {
200 	unsigned long off;
201 	int npages;
202 	struct page *page;
203 	u32 busa;
204 
205 	off = (unsigned long)vaddr & ~PAGE_MASK;
206 	npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
207 	page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
208 	busa = iommu_get_one(page, npages, sbus);
209 	return busa + off;
210 }
211 
212 static __u32 iommu_get_scsi_one_noflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
213 {
214 	return iommu_get_scsi_one(vaddr, len, sbus);
215 }
216 
217 static __u32 iommu_get_scsi_one_gflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
218 {
219 	flush_page_for_dma(0);
220 	return iommu_get_scsi_one(vaddr, len, sbus);
221 }
222 
223 static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
224 {
225 	unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
226 
227 	while(page < ((unsigned long)(vaddr + len))) {
228 		flush_page_for_dma(page);
229 		page += PAGE_SIZE;
230 	}
231 	return iommu_get_scsi_one(vaddr, len, sbus);
232 }
233 
234 static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
235 {
236 	int n;
237 
238 	while (sz != 0) {
239 		--sz;
240 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
241 		sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
242 		sg->dvma_length = (__u32) sg->length;
243 		sg++;
244 	}
245 }
246 
247 static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
248 {
249 	int n;
250 
251 	flush_page_for_dma(0);
252 	while (sz != 0) {
253 		--sz;
254 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
255 		sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
256 		sg->dvma_length = (__u32) sg->length;
257 		sg++;
258 	}
259 }
260 
261 static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
262 {
263 	unsigned long page, oldpage = 0;
264 	int n, i;
265 
266 	while(sz != 0) {
267 		--sz;
268 
269 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
270 
271 		/*
272 		 * We expect unmapped highmem pages to be not in the cache.
273 		 * XXX Is this a good assumption?
274 		 * XXX What if someone else unmaps it here and races us?
275 		 */
276 		if ((page = (unsigned long) page_address(sg->page)) != 0) {
277 			for (i = 0; i < n; i++) {
278 				if (page != oldpage) {	/* Already flushed? */
279 					flush_page_for_dma(page);
280 					oldpage = page;
281 				}
282 				page += PAGE_SIZE;
283 			}
284 		}
285 
286 		sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
287 		sg->dvma_length = (__u32) sg->length;
288 		sg++;
289 	}
290 }
291 
292 static void iommu_release_one(u32 busa, int npages, struct sbus_bus *sbus)
293 {
294 	struct iommu_struct *iommu = sbus->iommu;
295 	int ioptex;
296 	int i;
297 
298 	BUG_ON(busa < iommu->start);
299 	ioptex = (busa - iommu->start) >> PAGE_SHIFT;
300 	for (i = 0; i < npages; i++) {
301 		iopte_val(iommu->page_table[ioptex + i]) = 0;
302 		iommu_invalidate_page(iommu->regs, busa);
303 		busa += PAGE_SIZE;
304 	}
305 	bit_map_clear(&iommu->usemap, ioptex, npages);
306 }
307 
308 static void iommu_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
309 {
310 	unsigned long off;
311 	int npages;
312 
313 	off = vaddr & ~PAGE_MASK;
314 	npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
315 	iommu_release_one(vaddr & PAGE_MASK, npages, sbus);
316 }
317 
318 static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
319 {
320 	int n;
321 
322 	while(sz != 0) {
323 		--sz;
324 
325 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
326 		iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus);
327 		sg->dvma_address = 0x21212121;
328 		sg++;
329 	}
330 }
331 
332 #ifdef CONFIG_SBUS
333 static int iommu_map_dma_area(dma_addr_t *pba, unsigned long va,
334     unsigned long addr, int len)
335 {
336 	unsigned long page, end;
337 	struct iommu_struct *iommu = sbus_root->iommu;
338 	iopte_t *iopte = iommu->page_table;
339 	iopte_t *first;
340 	int ioptex;
341 
342 	BUG_ON((va & ~PAGE_MASK) != 0);
343 	BUG_ON((addr & ~PAGE_MASK) != 0);
344 	BUG_ON((len & ~PAGE_MASK) != 0);
345 
346 	/* page color = physical address */
347 	ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
348 		addr >> PAGE_SHIFT);
349 	if (ioptex < 0)
350 		panic("iommu out");
351 
352 	iopte += ioptex;
353 	first = iopte;
354 	end = addr + len;
355 	while(addr < end) {
356 		page = va;
357 		{
358 			pgd_t *pgdp;
359 			pmd_t *pmdp;
360 			pte_t *ptep;
361 
362 			if (viking_mxcc_present)
363 				viking_mxcc_flush_page(page);
364 			else if (viking_flush)
365 				viking_flush_page(page);
366 			else
367 				__flush_page_to_ram(page);
368 
369 			pgdp = pgd_offset(&init_mm, addr);
370 			pmdp = pmd_offset(pgdp, addr);
371 			ptep = pte_offset_map(pmdp, addr);
372 
373 			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
374 		}
375 		iopte_val(*iopte++) =
376 		    MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
377 		addr += PAGE_SIZE;
378 		va += PAGE_SIZE;
379 	}
380 	/* P3: why do we need this?
381 	 *
382 	 * DAVEM: Because there are several aspects, none of which
383 	 *        are handled by a single interface.  Some cpus are
384 	 *        completely not I/O DMA coherent, and some have
385 	 *        virtually indexed caches.  The driver DMA flushing
386 	 *        methods handle the former case, but here during
387 	 *        IOMMU page table modifications, and usage of non-cacheable
388 	 *        cpu mappings of pages potentially in the cpu caches, we have
389 	 *        to handle the latter case as well.
390 	 */
391 	flush_cache_all();
392 	iommu_flush_iotlb(first, len >> PAGE_SHIFT);
393 	flush_tlb_all();
394 	iommu_invalidate(iommu->regs);
395 
396 	*pba = iommu->start + (ioptex << PAGE_SHIFT);
397 	return 0;
398 }
399 
400 static void iommu_unmap_dma_area(unsigned long busa, int len)
401 {
402 	struct iommu_struct *iommu = sbus_root->iommu;
403 	iopte_t *iopte = iommu->page_table;
404 	unsigned long end;
405 	int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
406 
407 	BUG_ON((busa & ~PAGE_MASK) != 0);
408 	BUG_ON((len & ~PAGE_MASK) != 0);
409 
410 	iopte += ioptex;
411 	end = busa + len;
412 	while (busa < end) {
413 		iopte_val(*iopte++) = 0;
414 		busa += PAGE_SIZE;
415 	}
416 	flush_tlb_all();
417 	iommu_invalidate(iommu->regs);
418 	bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
419 }
420 
421 static struct page *iommu_translate_dvma(unsigned long busa)
422 {
423 	struct iommu_struct *iommu = sbus_root->iommu;
424 	iopte_t *iopte = iommu->page_table;
425 
426 	iopte += ((busa - iommu->start) >> PAGE_SHIFT);
427 	return pfn_to_page((iopte_val(*iopte) & IOPTE_PAGE) >> (PAGE_SHIFT-4));
428 }
429 #endif
430 
431 static char *iommu_lockarea(char *vaddr, unsigned long len)
432 {
433 	return vaddr;
434 }
435 
436 static void iommu_unlockarea(char *vaddr, unsigned long len)
437 {
438 }
439 
440 void __init ld_mmu_iommu(void)
441 {
442 	viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
443 	BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
444 	BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
445 
446 	if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
447 		/* IO coherent chip */
448 		BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
449 		BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
450 	} else if (flush_page_for_dma_global) {
451 		/* flush_page_for_dma flushes everything, no matter of what page is it */
452 		BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
453 		BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
454 	} else {
455 		BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
456 		BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
457 	}
458 	BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
459 	BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
460 
461 #ifdef CONFIG_SBUS
462 	BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
463 	BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
464 	BTFIXUPSET_CALL(mmu_translate_dvma, iommu_translate_dvma, BTFIXUPCALL_NORM);
465 #endif
466 
467 	if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
468 		dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
469 		ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
470 	} else {
471 		dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
472 		ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
473 	}
474 }
475