xref: /linux/arch/m68k/sun3x/dvma.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Virtual DMA allocation
4  *
5  * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
6  *
7  * 11/26/2000 -- disabled the existing code because it didn't work for
8  * me in 2.4.  Replaced with a significantly more primitive version
9  * similar to the sun3 code.  the old functionality was probably more
10  * desirable, but....   -- Sam Creasey (sammy@oh.verio.com)
11  *
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/mm.h>
18 #include <linux/memblock.h>
19 #include <linux/vmalloc.h>
20 
21 #include <asm/sun3x.h>
22 #include <asm/dvma.h>
23 #include <asm/io.h>
24 #include <asm/page.h>
25 #include <asm/pgtable.h>
26 #include <asm/pgalloc.h>
27 
28 /* IOMMU support */
29 
30 #define IOMMU_ADDR_MASK            0x03ffe000
31 #define IOMMU_CACHE_INHIBIT        0x00000040
32 #define IOMMU_FULL_BLOCK           0x00000020
33 #define IOMMU_MODIFIED             0x00000010
34 #define IOMMU_USED                 0x00000008
35 #define IOMMU_WRITE_PROTECT        0x00000004
36 #define IOMMU_DT_MASK              0x00000003
37 #define IOMMU_DT_INVALID           0x00000000
38 #define IOMMU_DT_VALID             0x00000001
39 #define IOMMU_DT_BAD               0x00000002
40 
41 
42 static volatile unsigned long *iommu_pte = (unsigned long *)SUN3X_IOMMU;
43 
44 
45 #define dvma_entry_paddr(index)		(iommu_pte[index] & IOMMU_ADDR_MASK)
46 #define dvma_entry_vaddr(index,paddr)	((index << DVMA_PAGE_SHIFT) |  \
47 					 (paddr & (DVMA_PAGE_SIZE-1)))
48 #if 0
49 #define dvma_entry_set(index,addr)	(iommu_pte[index] =            \
50 					    (addr & IOMMU_ADDR_MASK) | \
51 				             IOMMU_DT_VALID | IOMMU_CACHE_INHIBIT)
52 #else
53 #define dvma_entry_set(index,addr)	(iommu_pte[index] =            \
54 					    (addr & IOMMU_ADDR_MASK) | \
55 				             IOMMU_DT_VALID)
56 #endif
57 #define dvma_entry_clr(index)		(iommu_pte[index] = IOMMU_DT_INVALID)
58 #define dvma_entry_hash(addr)		((addr >> DVMA_PAGE_SHIFT) ^ \
59 					 ((addr & 0x03c00000) >>     \
60 						(DVMA_PAGE_SHIFT+4)))
61 
62 #ifdef DEBUG
63 /* code to print out a dvma mapping for debugging purposes */
64 void dvma_print (unsigned long dvma_addr)
65 {
66 
67 	unsigned long index;
68 
69 	index = dvma_addr >> DVMA_PAGE_SHIFT;
70 
71 	pr_info("idx %lx dvma_addr %08lx paddr %08lx\n", index, dvma_addr,
72 		dvma_entry_paddr(index));
73 }
74 #endif
75 
76 
77 /* create a virtual mapping for a page assigned within the IOMMU
78    so that the cpu can reach it easily */
79 inline int dvma_map_cpu(unsigned long kaddr,
80 			       unsigned long vaddr, int len)
81 {
82 	pgd_t *pgd;
83 	p4d_t *p4d;
84 	pud_t *pud;
85 	unsigned long end;
86 	int ret = 0;
87 
88 	kaddr &= PAGE_MASK;
89 	vaddr &= PAGE_MASK;
90 
91 	end = PAGE_ALIGN(vaddr + len);
92 
93 	pr_debug("dvma: mapping kern %08lx to virt %08lx\n", kaddr, vaddr);
94 	pgd = pgd_offset_k(vaddr);
95 	p4d = p4d_offset(pgd, vaddr);
96 	pud = pud_offset(p4d, vaddr);
97 
98 	do {
99 		pmd_t *pmd;
100 		unsigned long end2;
101 
102 		if((pmd = pmd_alloc(&init_mm, pud, vaddr)) == NULL) {
103 			ret = -ENOMEM;
104 			goto out;
105 		}
106 
107 		if((end & PGDIR_MASK) > (vaddr & PGDIR_MASK))
108 			end2 = (vaddr + (PGDIR_SIZE-1)) & PGDIR_MASK;
109 		else
110 			end2 = end;
111 
112 		do {
113 			pte_t *pte;
114 			unsigned long end3;
115 
116 			if((pte = pte_alloc_kernel(pmd, vaddr)) == NULL) {
117 				ret = -ENOMEM;
118 				goto out;
119 			}
120 
121 			if((end2 & PMD_MASK) > (vaddr & PMD_MASK))
122 				end3 = (vaddr + (PMD_SIZE-1)) & PMD_MASK;
123 			else
124 				end3 = end2;
125 
126 			do {
127 				pr_debug("mapping %08lx phys to %08lx\n",
128 					 __pa(kaddr), vaddr);
129 				set_pte(pte, pfn_pte(virt_to_pfn(kaddr),
130 						     PAGE_KERNEL));
131 				pte++;
132 				kaddr += PAGE_SIZE;
133 				vaddr += PAGE_SIZE;
134 			} while(vaddr < end3);
135 
136 		} while(vaddr < end2);
137 
138 	} while(vaddr < end);
139 
140 	flush_tlb_all();
141 
142  out:
143 	return ret;
144 }
145 
146 
147 inline int dvma_map_iommu(unsigned long kaddr, unsigned long baddr,
148 				 int len)
149 {
150 	unsigned long end, index;
151 
152 	index = baddr >> DVMA_PAGE_SHIFT;
153 	end = ((baddr+len) >> DVMA_PAGE_SHIFT);
154 
155 	if(len & ~DVMA_PAGE_MASK)
156 		end++;
157 
158 	for(; index < end ; index++) {
159 //		if(dvma_entry_use(index))
160 //			BUG();
161 //		pr_info("mapping pa %lx to ba %lx\n", __pa(kaddr),
162 //			index << DVMA_PAGE_SHIFT);
163 
164 		dvma_entry_set(index, __pa(kaddr));
165 
166 		iommu_pte[index] |= IOMMU_FULL_BLOCK;
167 //		dvma_entry_inc(index);
168 
169 		kaddr += DVMA_PAGE_SIZE;
170 	}
171 
172 #ifdef DEBUG
173 	for(index = (baddr >> DVMA_PAGE_SHIFT); index < end; index++)
174 		dvma_print(index << DVMA_PAGE_SHIFT);
175 #endif
176 	return 0;
177 
178 }
179 
180 void dvma_unmap_iommu(unsigned long baddr, int len)
181 {
182 
183 	int index, end;
184 
185 
186 	index = baddr >> DVMA_PAGE_SHIFT;
187 	end = (DVMA_PAGE_ALIGN(baddr+len) >> DVMA_PAGE_SHIFT);
188 
189 	for(; index < end ; index++) {
190 		pr_debug("freeing bus mapping %08x\n",
191 			 index << DVMA_PAGE_SHIFT);
192 #if 0
193 		if(!dvma_entry_use(index))
194 			pr_info("dvma_unmap freeing unused entry %04x\n",
195 				index);
196 		else
197 			dvma_entry_dec(index);
198 #endif
199 		dvma_entry_clr(index);
200 	}
201 
202 }
203