xref: /linux/arch/m68k/mm/kmap.c (revision 3213486f2e442831e324cc6201a2f9e924ecc235)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/arch/m68k/mm/kmap.c
4  *
5  *  Copyright (C) 1997 Roman Hodek
6  *
7  *  10/01/99 cleaned up the code and changing to the same interface
8  *	     used by other architectures		/Roman Zippel
9  */
10 
11 #include <linux/module.h>
12 #include <linux/mm.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 
19 #include <asm/setup.h>
20 #include <asm/segment.h>
21 #include <asm/page.h>
22 #include <asm/pgalloc.h>
23 #include <asm/io.h>
24 
25 #undef DEBUG
26 
27 #define PTRTREESIZE	(256*1024)
28 
29 /*
30  * For 040/060 we can use the virtual memory area like other architectures,
31  * but for 020/030 we want to use early termination page descriptors and we
32  * can't mix this with normal page descriptors, so we have to copy that code
33  * (mm/vmalloc.c) and return appropriately aligned addresses.
34  */
35 
36 #ifdef CPU_M68040_OR_M68060_ONLY
37 
38 #define IO_SIZE		PAGE_SIZE
39 
40 static inline struct vm_struct *get_io_area(unsigned long size)
41 {
42 	return get_vm_area(size, VM_IOREMAP);
43 }
44 
45 
46 static inline void free_io_area(void *addr)
47 {
48 	vfree((void *)(PAGE_MASK & (unsigned long)addr));
49 }
50 
51 #else
52 
53 #define IO_SIZE		(256*1024)
54 
55 static struct vm_struct *iolist;
56 
57 static struct vm_struct *get_io_area(unsigned long size)
58 {
59 	unsigned long addr;
60 	struct vm_struct **p, *tmp, *area;
61 
62 	area = kmalloc(sizeof(*area), GFP_KERNEL);
63 	if (!area)
64 		return NULL;
65 	addr = KMAP_START;
66 	for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
67 		if (size + addr < (unsigned long)tmp->addr)
68 			break;
69 		if (addr > KMAP_END-size) {
70 			kfree(area);
71 			return NULL;
72 		}
73 		addr = tmp->size + (unsigned long)tmp->addr;
74 	}
75 	area->addr = (void *)addr;
76 	area->size = size + IO_SIZE;
77 	area->next = *p;
78 	*p = area;
79 	return area;
80 }
81 
82 static inline void free_io_area(void *addr)
83 {
84 	struct vm_struct **p, *tmp;
85 
86 	if (!addr)
87 		return;
88 	addr = (void *)((unsigned long)addr & -IO_SIZE);
89 	for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
90 		if (tmp->addr == addr) {
91 			*p = tmp->next;
92 			/* remove gap added in get_io_area() */
93 			__iounmap(tmp->addr, tmp->size - IO_SIZE);
94 			kfree(tmp);
95 			return;
96 		}
97 	}
98 }
99 
100 #endif
101 
102 /*
103  * Map some physical address range into the kernel address space.
104  */
105 /* Rewritten by Andreas Schwab to remove all races. */
106 
107 void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
108 {
109 	struct vm_struct *area;
110 	unsigned long virtaddr, retaddr;
111 	long offset;
112 	pgd_t *pgd_dir;
113 	pmd_t *pmd_dir;
114 	pte_t *pte_dir;
115 
116 	/*
117 	 * Don't allow mappings that wrap..
118 	 */
119 	if (!size || physaddr > (unsigned long)(-size))
120 		return NULL;
121 
122 #ifdef CONFIG_AMIGA
123 	if (MACH_IS_AMIGA) {
124 		if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
125 		    && (cacheflag == IOMAP_NOCACHE_SER))
126 			return (void __iomem *)physaddr;
127 	}
128 #endif
129 #ifdef CONFIG_COLDFIRE
130 	if (__cf_internalio(physaddr))
131 		return (void __iomem *) physaddr;
132 #endif
133 
134 #ifdef DEBUG
135 	printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
136 #endif
137 	/*
138 	 * Mappings have to be aligned
139 	 */
140 	offset = physaddr & (IO_SIZE - 1);
141 	physaddr &= -IO_SIZE;
142 	size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
143 
144 	/*
145 	 * Ok, go for it..
146 	 */
147 	area = get_io_area(size);
148 	if (!area)
149 		return NULL;
150 
151 	virtaddr = (unsigned long)area->addr;
152 	retaddr = virtaddr + offset;
153 #ifdef DEBUG
154 	printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
155 #endif
156 
157 	/*
158 	 * add cache and table flags to physical address
159 	 */
160 	if (CPU_IS_040_OR_060) {
161 		physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
162 			     _PAGE_ACCESSED | _PAGE_DIRTY);
163 		switch (cacheflag) {
164 		case IOMAP_FULL_CACHING:
165 			physaddr |= _PAGE_CACHE040;
166 			break;
167 		case IOMAP_NOCACHE_SER:
168 		default:
169 			physaddr |= _PAGE_NOCACHE_S;
170 			break;
171 		case IOMAP_NOCACHE_NONSER:
172 			physaddr |= _PAGE_NOCACHE;
173 			break;
174 		case IOMAP_WRITETHROUGH:
175 			physaddr |= _PAGE_CACHE040W;
176 			break;
177 		}
178 	} else {
179 		physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
180 			     _PAGE_DIRTY | _PAGE_READWRITE);
181 		switch (cacheflag) {
182 		case IOMAP_NOCACHE_SER:
183 		case IOMAP_NOCACHE_NONSER:
184 		default:
185 			physaddr |= _PAGE_NOCACHE030;
186 			break;
187 		case IOMAP_FULL_CACHING:
188 		case IOMAP_WRITETHROUGH:
189 			break;
190 		}
191 	}
192 
193 	while ((long)size > 0) {
194 #ifdef DEBUG
195 		if (!(virtaddr & (PTRTREESIZE-1)))
196 			printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
197 #endif
198 		pgd_dir = pgd_offset_k(virtaddr);
199 		pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
200 		if (!pmd_dir) {
201 			printk("ioremap: no mem for pmd_dir\n");
202 			return NULL;
203 		}
204 
205 		if (CPU_IS_020_OR_030) {
206 			pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
207 			physaddr += PTRTREESIZE;
208 			virtaddr += PTRTREESIZE;
209 			size -= PTRTREESIZE;
210 		} else {
211 			pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
212 			if (!pte_dir) {
213 				printk("ioremap: no mem for pte_dir\n");
214 				return NULL;
215 			}
216 
217 			pte_val(*pte_dir) = physaddr;
218 			virtaddr += PAGE_SIZE;
219 			physaddr += PAGE_SIZE;
220 			size -= PAGE_SIZE;
221 		}
222 	}
223 #ifdef DEBUG
224 	printk("\n");
225 #endif
226 	flush_tlb_all();
227 
228 	return (void __iomem *)retaddr;
229 }
230 EXPORT_SYMBOL(__ioremap);
231 
232 /*
233  * Unmap an ioremap()ed region again
234  */
235 void iounmap(void __iomem *addr)
236 {
237 #ifdef CONFIG_AMIGA
238 	if ((!MACH_IS_AMIGA) ||
239 	    (((unsigned long)addr < 0x40000000) ||
240 	     ((unsigned long)addr > 0x60000000)))
241 			free_io_area((__force void *)addr);
242 #else
243 #ifdef CONFIG_COLDFIRE
244 	if (cf_internalio(addr))
245 		return;
246 #endif
247 	free_io_area((__force void *)addr);
248 #endif
249 }
250 EXPORT_SYMBOL(iounmap);
251 
252 /*
253  * __iounmap unmaps nearly everything, so be careful
254  * Currently it doesn't free pointer/page tables anymore but this
255  * wasn't used anyway and might be added later.
256  */
257 void __iounmap(void *addr, unsigned long size)
258 {
259 	unsigned long virtaddr = (unsigned long)addr;
260 	pgd_t *pgd_dir;
261 	pmd_t *pmd_dir;
262 	pte_t *pte_dir;
263 
264 	while ((long)size > 0) {
265 		pgd_dir = pgd_offset_k(virtaddr);
266 		if (pgd_bad(*pgd_dir)) {
267 			printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
268 			pgd_clear(pgd_dir);
269 			return;
270 		}
271 		pmd_dir = pmd_offset(pgd_dir, virtaddr);
272 
273 		if (CPU_IS_020_OR_030) {
274 			int pmd_off = (virtaddr/PTRTREESIZE) & 15;
275 			int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
276 
277 			if (pmd_type == _PAGE_PRESENT) {
278 				pmd_dir->pmd[pmd_off] = 0;
279 				virtaddr += PTRTREESIZE;
280 				size -= PTRTREESIZE;
281 				continue;
282 			} else if (pmd_type == 0)
283 				continue;
284 		}
285 
286 		if (pmd_bad(*pmd_dir)) {
287 			printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
288 			pmd_clear(pmd_dir);
289 			return;
290 		}
291 		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
292 
293 		pte_val(*pte_dir) = 0;
294 		virtaddr += PAGE_SIZE;
295 		size -= PAGE_SIZE;
296 	}
297 
298 	flush_tlb_all();
299 }
300 
301 /*
302  * Set new cache mode for some kernel address space.
303  * The caller must push data for that range itself, if such data may already
304  * be in the cache.
305  */
306 void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
307 {
308 	unsigned long virtaddr = (unsigned long)addr;
309 	pgd_t *pgd_dir;
310 	pmd_t *pmd_dir;
311 	pte_t *pte_dir;
312 
313 	if (CPU_IS_040_OR_060) {
314 		switch (cmode) {
315 		case IOMAP_FULL_CACHING:
316 			cmode = _PAGE_CACHE040;
317 			break;
318 		case IOMAP_NOCACHE_SER:
319 		default:
320 			cmode = _PAGE_NOCACHE_S;
321 			break;
322 		case IOMAP_NOCACHE_NONSER:
323 			cmode = _PAGE_NOCACHE;
324 			break;
325 		case IOMAP_WRITETHROUGH:
326 			cmode = _PAGE_CACHE040W;
327 			break;
328 		}
329 	} else {
330 		switch (cmode) {
331 		case IOMAP_NOCACHE_SER:
332 		case IOMAP_NOCACHE_NONSER:
333 		default:
334 			cmode = _PAGE_NOCACHE030;
335 			break;
336 		case IOMAP_FULL_CACHING:
337 		case IOMAP_WRITETHROUGH:
338 			cmode = 0;
339 		}
340 	}
341 
342 	while ((long)size > 0) {
343 		pgd_dir = pgd_offset_k(virtaddr);
344 		if (pgd_bad(*pgd_dir)) {
345 			printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
346 			pgd_clear(pgd_dir);
347 			return;
348 		}
349 		pmd_dir = pmd_offset(pgd_dir, virtaddr);
350 
351 		if (CPU_IS_020_OR_030) {
352 			int pmd_off = (virtaddr/PTRTREESIZE) & 15;
353 
354 			if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
355 				pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
356 							 _CACHEMASK040) | cmode;
357 				virtaddr += PTRTREESIZE;
358 				size -= PTRTREESIZE;
359 				continue;
360 			}
361 		}
362 
363 		if (pmd_bad(*pmd_dir)) {
364 			printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
365 			pmd_clear(pmd_dir);
366 			return;
367 		}
368 		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
369 
370 		pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
371 		virtaddr += PAGE_SIZE;
372 		size -= PAGE_SIZE;
373 	}
374 
375 	flush_tlb_all();
376 }
377 EXPORT_SYMBOL(kernel_set_cachemode);
378