xref: /linux/arch/sparc/mm/io-unit.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /* $Id: io-unit.c,v 1.24 2001/12/17 07:05:09 davem Exp $
2  * io-unit.c:  IO-UNIT specific routines for memory management.
3  *
4  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/spinlock.h>
11 #include <linux/mm.h>
12 #include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
13 #include <linux/bitops.h>
14 
15 #include <asm/scatterlist.h>
16 #include <asm/pgalloc.h>
17 #include <asm/pgtable.h>
18 #include <asm/sbus.h>
19 #include <asm/io.h>
20 #include <asm/io-unit.h>
21 #include <asm/mxcc.h>
22 #include <asm/cacheflush.h>
23 #include <asm/tlbflush.h>
24 #include <asm/dma.h>
25 
26 /* #define IOUNIT_DEBUG */
27 #ifdef IOUNIT_DEBUG
28 #define IOD(x) printk(x)
29 #else
30 #define IOD(x) do { } while (0)
31 #endif
32 
33 #define IOPERM        (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
34 #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
35 
36 void __init
37 iounit_init(int sbi_node, int io_node, struct sbus_bus *sbus)
38 {
39 	iopte_t *xpt, *xptend;
40 	struct iounit_struct *iounit;
41 	struct linux_prom_registers iommu_promregs[PROMREG_MAX];
42 	struct resource r;
43 
44 	iounit = kmalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
45 
46 	memset(iounit, 0, sizeof(*iounit));
47 	iounit->limit[0] = IOUNIT_BMAP1_START;
48 	iounit->limit[1] = IOUNIT_BMAP2_START;
49 	iounit->limit[2] = IOUNIT_BMAPM_START;
50 	iounit->limit[3] = IOUNIT_BMAPM_END;
51 	iounit->rotor[1] = IOUNIT_BMAP2_START;
52 	iounit->rotor[2] = IOUNIT_BMAPM_START;
53 
54 	xpt = NULL;
55 	if(prom_getproperty(sbi_node, "reg", (void *) iommu_promregs,
56 			    sizeof(iommu_promregs)) != -1) {
57 		prom_apply_generic_ranges(io_node, 0, iommu_promregs, 3);
58 		memset(&r, 0, sizeof(r));
59 		r.flags = iommu_promregs[2].which_io;
60 		r.start = iommu_promregs[2].phys_addr;
61 		xpt = (iopte_t *) sbus_ioremap(&r, 0, PAGE_SIZE * 16, "XPT");
62 	}
63 	if(!xpt) panic("Cannot map External Page Table.");
64 
65 	sbus->iommu = (struct iommu_struct *)iounit;
66 	iounit->page_table = xpt;
67 
68 	for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
69 	     xpt < xptend;)
70 	     	iopte_val(*xpt++) = 0;
71 }
72 
73 /* One has to hold iounit->lock to call this */
74 static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
75 {
76 	int i, j, k, npages;
77 	unsigned long rotor, scan, limit;
78 	iopte_t iopte;
79 
80         npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
81 
82 	/* A tiny bit of magic ingredience :) */
83 	switch (npages) {
84 	case 1: i = 0x0231; break;
85 	case 2: i = 0x0132; break;
86 	default: i = 0x0213; break;
87 	}
88 
89 	IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
90 
91 next:	j = (i & 15);
92 	rotor = iounit->rotor[j - 1];
93 	limit = iounit->limit[j];
94 	scan = rotor;
95 nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
96 	if (scan + npages > limit) {
97 		if (limit != rotor) {
98 			limit = rotor;
99 			scan = iounit->limit[j - 1];
100 			goto nexti;
101 		}
102 		i >>= 4;
103 		if (!(i & 15))
104 			panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
105 		goto next;
106 	}
107 	for (k = 1, scan++; k < npages; k++)
108 		if (test_bit(scan++, iounit->bmap))
109 			goto nexti;
110 	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
111 	scan -= npages;
112 	iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
113 	vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
114 	for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
115 		set_bit(scan, iounit->bmap);
116 		iounit->page_table[scan] = iopte;
117 	}
118 	IOD(("%08lx\n", vaddr));
119 	return vaddr;
120 }
121 
122 static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct sbus_bus *sbus)
123 {
124 	unsigned long ret, flags;
125 	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
126 
127 	spin_lock_irqsave(&iounit->lock, flags);
128 	ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
129 	spin_unlock_irqrestore(&iounit->lock, flags);
130 	return ret;
131 }
132 
133 static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
134 {
135 	unsigned long flags;
136 	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
137 
138 	/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
139 	spin_lock_irqsave(&iounit->lock, flags);
140 	while (sz != 0) {
141 		--sz;
142 		sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
143 		sg[sz].dvma_length = sg[sz].length;
144 	}
145 	spin_unlock_irqrestore(&iounit->lock, flags);
146 }
147 
148 static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
149 {
150 	unsigned long flags;
151 	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
152 
153 	spin_lock_irqsave(&iounit->lock, flags);
154 	len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
155 	vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
156 	IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
157 	for (len += vaddr; vaddr < len; vaddr++)
158 		clear_bit(vaddr, iounit->bmap);
159 	spin_unlock_irqrestore(&iounit->lock, flags);
160 }
161 
162 static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
163 {
164 	unsigned long flags;
165 	unsigned long vaddr, len;
166 	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
167 
168 	spin_lock_irqsave(&iounit->lock, flags);
169 	while (sz != 0) {
170 		--sz;
171 		len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
172 		vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
173 		IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
174 		for (len += vaddr; vaddr < len; vaddr++)
175 			clear_bit(vaddr, iounit->bmap);
176 	}
177 	spin_unlock_irqrestore(&iounit->lock, flags);
178 }
179 
180 #ifdef CONFIG_SBUS
181 static int iounit_map_dma_area(dma_addr_t *pba, unsigned long va, __u32 addr, int len)
182 {
183 	unsigned long page, end;
184 	pgprot_t dvma_prot;
185 	iopte_t *iopte;
186 	struct sbus_bus *sbus;
187 
188 	*pba = addr;
189 
190 	dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
191 	end = PAGE_ALIGN((addr + len));
192 	while(addr < end) {
193 		page = va;
194 		{
195 			pgd_t *pgdp;
196 			pmd_t *pmdp;
197 			pte_t *ptep;
198 			long i;
199 
200 			pgdp = pgd_offset(&init_mm, addr);
201 			pmdp = pmd_offset(pgdp, addr);
202 			ptep = pte_offset_map(pmdp, addr);
203 
204 			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
205 
206 			i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
207 
208 			for_each_sbus(sbus) {
209 				struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
210 
211 				iopte = (iopte_t *)(iounit->page_table + i);
212 				*iopte = MKIOPTE(__pa(page));
213 			}
214 		}
215 		addr += PAGE_SIZE;
216 		va += PAGE_SIZE;
217 	}
218 	flush_cache_all();
219 	flush_tlb_all();
220 
221 	return 0;
222 }
223 
224 static void iounit_unmap_dma_area(unsigned long addr, int len)
225 {
226 	/* XXX Somebody please fill this in */
227 }
228 
229 /* XXX We do not pass sbus device here, bad. */
230 static struct page *iounit_translate_dvma(unsigned long addr)
231 {
232 	struct sbus_bus *sbus = sbus_root;	/* They are all the same */
233 	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
234 	int i;
235 	iopte_t *iopte;
236 
237 	i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
238 	iopte = (iopte_t *)(iounit->page_table + i);
239 	return pfn_to_page(iopte_val(*iopte) >> (PAGE_SHIFT-4)); /* XXX sun4d guru, help */
240 }
241 #endif
242 
243 static char *iounit_lockarea(char *vaddr, unsigned long len)
244 {
245 /* FIXME: Write this */
246 	return vaddr;
247 }
248 
249 static void iounit_unlockarea(char *vaddr, unsigned long len)
250 {
251 /* FIXME: Write this */
252 }
253 
254 void __init ld_mmu_iounit(void)
255 {
256 	BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
257 	BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
258 
259 	BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
260 	BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
261 	BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
262 	BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
263 
264 #ifdef CONFIG_SBUS
265 	BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
266 	BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
267 	BTFIXUPSET_CALL(mmu_translate_dvma, iounit_translate_dvma, BTFIXUPCALL_NORM);
268 #endif
269 }
270 
271 __u32 iounit_map_dma_init(struct sbus_bus *sbus, int size)
272 {
273 	int i, j, k, npages;
274 	unsigned long rotor, scan, limit;
275 	unsigned long flags;
276 	__u32 ret;
277 	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
278 
279         npages = (size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
280 	i = 0x0213;
281 	spin_lock_irqsave(&iounit->lock, flags);
282 next:	j = (i & 15);
283 	rotor = iounit->rotor[j - 1];
284 	limit = iounit->limit[j];
285 	scan = rotor;
286 nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
287 	if (scan + npages > limit) {
288 		if (limit != rotor) {
289 			limit = rotor;
290 			scan = iounit->limit[j - 1];
291 			goto nexti;
292 		}
293 		i >>= 4;
294 		if (!(i & 15))
295 			panic("iounit_map_dma_init: Couldn't find free iopte slots for %d bytes\n", size);
296 		goto next;
297 	}
298 	for (k = 1, scan++; k < npages; k++)
299 		if (test_bit(scan++, iounit->bmap))
300 			goto nexti;
301 	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
302 	scan -= npages;
303 	ret = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT);
304 	for (k = 0; k < npages; k++, scan++)
305 		set_bit(scan, iounit->bmap);
306 	spin_unlock_irqrestore(&iounit->lock, flags);
307 	return ret;
308 }
309 
310 __u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus)
311 {
312 	int scan = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
313 	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
314 
315 	iounit->page_table[scan] = MKIOPTE(__pa(((unsigned long)addr) & PAGE_MASK));
316 	return vaddr + (((unsigned long)addr) & ~PAGE_MASK);
317 }
318