xref: /linux/arch/sparc/mm/iommu.c (revision c5d3cdad688ed75fb311a3a671eb30ba7106d7d3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * iommu.c:  IOMMU specific routines for memory management.
4  *
5  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
6  * Copyright (C) 1995,2002 Pete Zaitcev     (zaitcev@yahoo.com)
7  * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
8  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
16 #include <linux/dma-mapping.h>
17 #include <linux/of.h>
18 #include <linux/of_device.h>
19 
20 #include <asm/pgalloc.h>
21 #include <asm/pgtable.h>
22 #include <asm/io.h>
23 #include <asm/mxcc.h>
24 #include <asm/mbus.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <asm/bitext.h>
28 #include <asm/iommu.h>
29 #include <asm/dma.h>
30 
31 #include "mm_32.h"
32 
33 /*
34  * This can be sized dynamically, but we will do this
35  * only when we have a guidance about actual I/O pressures.
36  */
37 #define IOMMU_RNGE	IOMMU_RNGE_256MB
38 #define IOMMU_START	0xF0000000
39 #define IOMMU_WINSIZE	(256*1024*1024U)
40 #define IOMMU_NPTES	(IOMMU_WINSIZE/PAGE_SIZE)	/* 64K PTEs, 256KB */
41 #define IOMMU_ORDER	6				/* 4096 * (1<<6) */
42 
43 static int viking_flush;
44 /* viking.S */
45 extern void viking_flush_page(unsigned long page);
46 extern void viking_mxcc_flush_page(unsigned long page);
47 
48 /*
49  * Values precomputed according to CPU type.
50  */
51 static unsigned int ioperm_noc;		/* Consistent mapping iopte flags */
52 static pgprot_t dvma_prot;		/* Consistent mapping pte flags */
53 
54 #define IOPERM        (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
55 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
56 
57 static const struct dma_map_ops sbus_iommu_dma_gflush_ops;
58 static const struct dma_map_ops sbus_iommu_dma_pflush_ops;
59 
60 static void __init sbus_iommu_init(struct platform_device *op)
61 {
62 	struct iommu_struct *iommu;
63 	unsigned int impl, vers;
64 	unsigned long *bitmap;
65 	unsigned long control;
66 	unsigned long base;
67 	unsigned long tmp;
68 
69 	iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
70 	if (!iommu) {
71 		prom_printf("Unable to allocate iommu structure\n");
72 		prom_halt();
73 	}
74 
75 	iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
76 				 "iommu_regs");
77 	if (!iommu->regs) {
78 		prom_printf("Cannot map IOMMU registers\n");
79 		prom_halt();
80 	}
81 
82 	control = sbus_readl(&iommu->regs->control);
83 	impl = (control & IOMMU_CTRL_IMPL) >> 28;
84 	vers = (control & IOMMU_CTRL_VERS) >> 24;
85 	control &= ~(IOMMU_CTRL_RNGE);
86 	control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
87 	sbus_writel(control, &iommu->regs->control);
88 
89 	iommu_invalidate(iommu->regs);
90 	iommu->start = IOMMU_START;
91 	iommu->end = 0xffffffff;
92 
93 	/* Allocate IOMMU page table */
94 	/* Stupid alignment constraints give me a headache.
95 	   We need 256K or 512K or 1M or 2M area aligned to
96            its size and current gfp will fortunately give
97            it to us. */
98         tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
99 	if (!tmp) {
100 		prom_printf("Unable to allocate iommu table [0x%lx]\n",
101 			    IOMMU_NPTES * sizeof(iopte_t));
102 		prom_halt();
103 	}
104 	iommu->page_table = (iopte_t *)tmp;
105 
106 	/* Initialize new table. */
107 	memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
108 	flush_cache_all();
109 	flush_tlb_all();
110 
111 	base = __pa((unsigned long)iommu->page_table) >> 4;
112 	sbus_writel(base, &iommu->regs->base);
113 	iommu_invalidate(iommu->regs);
114 
115 	bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
116 	if (!bitmap) {
117 		prom_printf("Unable to allocate iommu bitmap [%d]\n",
118 			    (int)(IOMMU_NPTES>>3));
119 		prom_halt();
120 	}
121 	bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
122 	/* To be coherent on HyperSparc, the page color of DVMA
123 	 * and physical addresses must match.
124 	 */
125 	if (srmmu_modtype == HyperSparc)
126 		iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
127 	else
128 		iommu->usemap.num_colors = 1;
129 
130 	printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
131 	       impl, vers, iommu->page_table,
132 	       (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
133 
134 	op->dev.archdata.iommu = iommu;
135 
136 	if (flush_page_for_dma_global)
137 		op->dev.dma_ops = &sbus_iommu_dma_gflush_ops;
138 	 else
139 		op->dev.dma_ops = &sbus_iommu_dma_pflush_ops;
140 }
141 
142 static int __init iommu_init(void)
143 {
144 	struct device_node *dp;
145 
146 	for_each_node_by_name(dp, "iommu") {
147 		struct platform_device *op = of_find_device_by_node(dp);
148 
149 		sbus_iommu_init(op);
150 		of_propagate_archdata(op);
151 	}
152 
153 	return 0;
154 }
155 
156 subsys_initcall(iommu_init);
157 
158 /* Flush the iotlb entries to ram. */
159 /* This could be better if we didn't have to flush whole pages. */
160 static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
161 {
162 	unsigned long start;
163 	unsigned long end;
164 
165 	start = (unsigned long)iopte;
166 	end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
167 	start &= PAGE_MASK;
168 	if (viking_mxcc_present) {
169 		while(start < end) {
170 			viking_mxcc_flush_page(start);
171 			start += PAGE_SIZE;
172 		}
173 	} else if (viking_flush) {
174 		while(start < end) {
175 			viking_flush_page(start);
176 			start += PAGE_SIZE;
177 		}
178 	} else {
179 		while(start < end) {
180 			__flush_page_to_ram(start);
181 			start += PAGE_SIZE;
182 		}
183 	}
184 }
185 
186 static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
187 		unsigned long offset, size_t len, bool per_page_flush)
188 {
189 	struct iommu_struct *iommu = dev->archdata.iommu;
190 	phys_addr_t paddr = page_to_phys(page) + offset;
191 	unsigned long off = paddr & ~PAGE_MASK;
192 	unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
193 	unsigned long pfn = __phys_to_pfn(paddr);
194 	unsigned int busa, busa0;
195 	iopte_t *iopte, *iopte0;
196 	int ioptex, i;
197 
198 	/* XXX So what is maxphys for us and how do drivers know it? */
199 	if (!len || len > 256 * 1024)
200 		return DMA_MAPPING_ERROR;
201 
202 	/*
203 	 * We expect unmapped highmem pages to be not in the cache.
204 	 * XXX Is this a good assumption?
205 	 * XXX What if someone else unmaps it here and races us?
206 	 */
207 	if (per_page_flush && !PageHighMem(page)) {
208 		unsigned long vaddr, p;
209 
210 		vaddr = (unsigned long)page_address(page) + offset;
211 		for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
212 			flush_page_for_dma(p);
213 	}
214 
215 	/* page color = pfn of page */
216 	ioptex = bit_map_string_get(&iommu->usemap, npages, pfn);
217 	if (ioptex < 0)
218 		panic("iommu out");
219 	busa0 = iommu->start + (ioptex << PAGE_SHIFT);
220 	iopte0 = &iommu->page_table[ioptex];
221 
222 	busa = busa0;
223 	iopte = iopte0;
224 	for (i = 0; i < npages; i++) {
225 		iopte_val(*iopte) = MKIOPTE(pfn, IOPERM);
226 		iommu_invalidate_page(iommu->regs, busa);
227 		busa += PAGE_SIZE;
228 		iopte++;
229 		pfn++;
230 	}
231 
232 	iommu_flush_iotlb(iopte0, npages);
233 	return busa0 + off;
234 }
235 
236 static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
237 		struct page *page, unsigned long offset, size_t len,
238 		enum dma_data_direction dir, unsigned long attrs)
239 {
240 	flush_page_for_dma(0);
241 	return __sbus_iommu_map_page(dev, page, offset, len, false);
242 }
243 
244 static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
245 		struct page *page, unsigned long offset, size_t len,
246 		enum dma_data_direction dir, unsigned long attrs)
247 {
248 	return __sbus_iommu_map_page(dev, page, offset, len, true);
249 }
250 
251 static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
252 		int nents, enum dma_data_direction dir, unsigned long attrs,
253 		bool per_page_flush)
254 {
255 	struct scatterlist *sg;
256 	int j;
257 
258 	for_each_sg(sgl, sg, nents, j) {
259 		sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg),
260 				sg->offset, sg->length, per_page_flush);
261 		if (sg->dma_address == DMA_MAPPING_ERROR)
262 			return 0;
263 		sg->dma_length = sg->length;
264 	}
265 
266 	return nents;
267 }
268 
269 static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
270 		int nents, enum dma_data_direction dir, unsigned long attrs)
271 {
272 	flush_page_for_dma(0);
273 	return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, false);
274 }
275 
276 static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
277 		int nents, enum dma_data_direction dir, unsigned long attrs)
278 {
279 	return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true);
280 }
281 
282 static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
283 		size_t len, enum dma_data_direction dir, unsigned long attrs)
284 {
285 	struct iommu_struct *iommu = dev->archdata.iommu;
286 	unsigned int busa = dma_addr & PAGE_MASK;
287 	unsigned long off = dma_addr & ~PAGE_MASK;
288 	unsigned int npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
289 	unsigned int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
290 	unsigned int i;
291 
292 	BUG_ON(busa < iommu->start);
293 	for (i = 0; i < npages; i++) {
294 		iopte_val(iommu->page_table[ioptex + i]) = 0;
295 		iommu_invalidate_page(iommu->regs, busa);
296 		busa += PAGE_SIZE;
297 	}
298 	bit_map_clear(&iommu->usemap, ioptex, npages);
299 }
300 
301 static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
302 		int nents, enum dma_data_direction dir, unsigned long attrs)
303 {
304 	struct scatterlist *sg;
305 	int i;
306 
307 	for_each_sg(sgl, sg, nents, i) {
308 		sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir,
309 				attrs);
310 		sg->dma_address = 0x21212121;
311 	}
312 }
313 
314 #ifdef CONFIG_SBUS
315 static void *sbus_iommu_alloc(struct device *dev, size_t len,
316 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
317 {
318 	struct iommu_struct *iommu = dev->archdata.iommu;
319 	unsigned long va, addr, page, end, ret;
320 	iopte_t *iopte = iommu->page_table;
321 	iopte_t *first;
322 	int ioptex;
323 
324 	/* XXX So what is maxphys for us and how do drivers know it? */
325 	if (!len || len > 256 * 1024)
326 		return NULL;
327 
328 	len = PAGE_ALIGN(len);
329 	va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
330 	if (va == 0)
331 		return NULL;
332 
333 	addr = ret = sparc_dma_alloc_resource(dev, len);
334 	if (!addr)
335 		goto out_free_pages;
336 
337 	BUG_ON((va & ~PAGE_MASK) != 0);
338 	BUG_ON((addr & ~PAGE_MASK) != 0);
339 	BUG_ON((len & ~PAGE_MASK) != 0);
340 
341 	/* page color = physical address */
342 	ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
343 		addr >> PAGE_SHIFT);
344 	if (ioptex < 0)
345 		panic("iommu out");
346 
347 	iopte += ioptex;
348 	first = iopte;
349 	end = addr + len;
350 	while(addr < end) {
351 		page = va;
352 		{
353 			pgd_t *pgdp;
354 			p4d_t *p4dp;
355 			pud_t *pudp;
356 			pmd_t *pmdp;
357 			pte_t *ptep;
358 
359 			if (viking_mxcc_present)
360 				viking_mxcc_flush_page(page);
361 			else if (viking_flush)
362 				viking_flush_page(page);
363 			else
364 				__flush_page_to_ram(page);
365 
366 			pgdp = pgd_offset(&init_mm, addr);
367 			p4dp = p4d_offset(pgdp, addr);
368 			pudp = pud_offset(p4dp, addr);
369 			pmdp = pmd_offset(pudp, addr);
370 			ptep = pte_offset_map(pmdp, addr);
371 
372 			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
373 		}
374 		iopte_val(*iopte++) =
375 		    MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
376 		addr += PAGE_SIZE;
377 		va += PAGE_SIZE;
378 	}
379 	/* P3: why do we need this?
380 	 *
381 	 * DAVEM: Because there are several aspects, none of which
382 	 *        are handled by a single interface.  Some cpus are
383 	 *        completely not I/O DMA coherent, and some have
384 	 *        virtually indexed caches.  The driver DMA flushing
385 	 *        methods handle the former case, but here during
386 	 *        IOMMU page table modifications, and usage of non-cacheable
387 	 *        cpu mappings of pages potentially in the cpu caches, we have
388 	 *        to handle the latter case as well.
389 	 */
390 	flush_cache_all();
391 	iommu_flush_iotlb(first, len >> PAGE_SHIFT);
392 	flush_tlb_all();
393 	iommu_invalidate(iommu->regs);
394 
395 	*dma_handle = iommu->start + (ioptex << PAGE_SHIFT);
396 	return (void *)ret;
397 
398 out_free_pages:
399 	free_pages(va, get_order(len));
400 	return NULL;
401 }
402 
403 static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr,
404 			       dma_addr_t busa, unsigned long attrs)
405 {
406 	struct iommu_struct *iommu = dev->archdata.iommu;
407 	iopte_t *iopte = iommu->page_table;
408 	struct page *page = virt_to_page(cpu_addr);
409 	int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
410 	unsigned long end;
411 
412 	if (!sparc_dma_free_resource(cpu_addr, len))
413 		return;
414 
415 	BUG_ON((busa & ~PAGE_MASK) != 0);
416 	BUG_ON((len & ~PAGE_MASK) != 0);
417 
418 	iopte += ioptex;
419 	end = busa + len;
420 	while (busa < end) {
421 		iopte_val(*iopte++) = 0;
422 		busa += PAGE_SIZE;
423 	}
424 	flush_tlb_all();
425 	iommu_invalidate(iommu->regs);
426 	bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
427 
428 	__free_pages(page, get_order(len));
429 }
430 #endif
431 
432 static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
433 #ifdef CONFIG_SBUS
434 	.alloc			= sbus_iommu_alloc,
435 	.free			= sbus_iommu_free,
436 #endif
437 	.map_page		= sbus_iommu_map_page_gflush,
438 	.unmap_page		= sbus_iommu_unmap_page,
439 	.map_sg			= sbus_iommu_map_sg_gflush,
440 	.unmap_sg		= sbus_iommu_unmap_sg,
441 };
442 
443 static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
444 #ifdef CONFIG_SBUS
445 	.alloc			= sbus_iommu_alloc,
446 	.free			= sbus_iommu_free,
447 #endif
448 	.map_page		= sbus_iommu_map_page_pflush,
449 	.unmap_page		= sbus_iommu_unmap_page,
450 	.map_sg			= sbus_iommu_map_sg_pflush,
451 	.unmap_sg		= sbus_iommu_unmap_sg,
452 };
453 
454 void __init ld_mmu_iommu(void)
455 {
456 	if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
457 		dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
458 		ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
459 	} else {
460 		dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
461 		ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
462 	}
463 }
464