xref: /linux/arch/m68k/mm/kmap.c (revision 2e53c4e1c807d91dc7241c2104e69ad9d2c71e48)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/arch/m68k/mm/kmap.c
4  *
5  *  Copyright (C) 1997 Roman Hodek
6  *
7  *  10/01/99 cleaned up the code and changing to the same interface
8  *	     used by other architectures		/Roman Zippel
9  */
10 
11 #include <linux/module.h>
12 #include <linux/mm.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 
19 #include <asm/setup.h>
20 #include <asm/segment.h>
21 #include <asm/page.h>
22 #include <asm/pgalloc.h>
23 #include <asm/io.h>
24 
25 #undef DEBUG
26 
27 #define PTRTREESIZE	(256*1024)
28 
29 /*
30  * For 040/060 we can use the virtual memory area like other architectures,
31  * but for 020/030 we want to use early termination page descriptors and we
32  * can't mix this with normal page descriptors, so we have to copy that code
33  * (mm/vmalloc.c) and return appropriately aligned addresses.
34  */
35 
36 #ifdef CPU_M68040_OR_M68060_ONLY
37 
38 #define IO_SIZE		PAGE_SIZE
39 
40 static inline struct vm_struct *get_io_area(unsigned long size)
41 {
42 	return get_vm_area(size, VM_IOREMAP);
43 }
44 
45 
46 static inline void free_io_area(void *addr)
47 {
48 	vfree((void *)(PAGE_MASK & (unsigned long)addr));
49 }
50 
51 #else
52 
53 #define IO_SIZE		(256*1024)
54 
55 static struct vm_struct *iolist;
56 
57 /*
58  * __free_io_area unmaps nearly everything, so be careful
59  * Currently it doesn't free pointer/page tables anymore but this
60  * wasn't used anyway and might be added later.
61  */
62 static void __free_io_area(void *addr, unsigned long size)
63 {
64 	unsigned long virtaddr = (unsigned long)addr;
65 	pgd_t *pgd_dir;
66 	pmd_t *pmd_dir;
67 	pte_t *pte_dir;
68 
69 	while ((long)size > 0) {
70 		pgd_dir = pgd_offset_k(virtaddr);
71 		if (pgd_bad(*pgd_dir)) {
72 			printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
73 			pgd_clear(pgd_dir);
74 			return;
75 		}
76 		pmd_dir = pmd_offset(pgd_dir, virtaddr);
77 
78 		if (CPU_IS_020_OR_030) {
79 			int pmd_off = (virtaddr/PTRTREESIZE) & 15;
80 			int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
81 
82 			if (pmd_type == _PAGE_PRESENT) {
83 				pmd_dir->pmd[pmd_off] = 0;
84 				virtaddr += PTRTREESIZE;
85 				size -= PTRTREESIZE;
86 				continue;
87 			} else if (pmd_type == 0)
88 				continue;
89 		}
90 
91 		if (pmd_bad(*pmd_dir)) {
92 			printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
93 			pmd_clear(pmd_dir);
94 			return;
95 		}
96 		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
97 
98 		pte_val(*pte_dir) = 0;
99 		virtaddr += PAGE_SIZE;
100 		size -= PAGE_SIZE;
101 	}
102 
103 	flush_tlb_all();
104 }
105 
106 static struct vm_struct *get_io_area(unsigned long size)
107 {
108 	unsigned long addr;
109 	struct vm_struct **p, *tmp, *area;
110 
111 	area = kmalloc(sizeof(*area), GFP_KERNEL);
112 	if (!area)
113 		return NULL;
114 	addr = KMAP_START;
115 	for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
116 		if (size + addr < (unsigned long)tmp->addr)
117 			break;
118 		if (addr > KMAP_END-size) {
119 			kfree(area);
120 			return NULL;
121 		}
122 		addr = tmp->size + (unsigned long)tmp->addr;
123 	}
124 	area->addr = (void *)addr;
125 	area->size = size + IO_SIZE;
126 	area->next = *p;
127 	*p = area;
128 	return area;
129 }
130 
131 static inline void free_io_area(void *addr)
132 {
133 	struct vm_struct **p, *tmp;
134 
135 	if (!addr)
136 		return;
137 	addr = (void *)((unsigned long)addr & -IO_SIZE);
138 	for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
139 		if (tmp->addr == addr) {
140 			*p = tmp->next;
141 			/* remove gap added in get_io_area() */
142 			__free_io_area(tmp->addr, tmp->size - IO_SIZE);
143 			kfree(tmp);
144 			return;
145 		}
146 	}
147 }
148 
149 #endif
150 
151 /*
152  * Map some physical address range into the kernel address space.
153  */
154 /* Rewritten by Andreas Schwab to remove all races. */
155 
156 void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
157 {
158 	struct vm_struct *area;
159 	unsigned long virtaddr, retaddr;
160 	long offset;
161 	pgd_t *pgd_dir;
162 	pmd_t *pmd_dir;
163 	pte_t *pte_dir;
164 
165 	/*
166 	 * Don't allow mappings that wrap..
167 	 */
168 	if (!size || physaddr > (unsigned long)(-size))
169 		return NULL;
170 
171 #ifdef CONFIG_AMIGA
172 	if (MACH_IS_AMIGA) {
173 		if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
174 		    && (cacheflag == IOMAP_NOCACHE_SER))
175 			return (void __iomem *)physaddr;
176 	}
177 #endif
178 #ifdef CONFIG_COLDFIRE
179 	if (__cf_internalio(physaddr))
180 		return (void __iomem *) physaddr;
181 #endif
182 
183 #ifdef DEBUG
184 	printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
185 #endif
186 	/*
187 	 * Mappings have to be aligned
188 	 */
189 	offset = physaddr & (IO_SIZE - 1);
190 	physaddr &= -IO_SIZE;
191 	size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
192 
193 	/*
194 	 * Ok, go for it..
195 	 */
196 	area = get_io_area(size);
197 	if (!area)
198 		return NULL;
199 
200 	virtaddr = (unsigned long)area->addr;
201 	retaddr = virtaddr + offset;
202 #ifdef DEBUG
203 	printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
204 #endif
205 
206 	/*
207 	 * add cache and table flags to physical address
208 	 */
209 	if (CPU_IS_040_OR_060) {
210 		physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
211 			     _PAGE_ACCESSED | _PAGE_DIRTY);
212 		switch (cacheflag) {
213 		case IOMAP_FULL_CACHING:
214 			physaddr |= _PAGE_CACHE040;
215 			break;
216 		case IOMAP_NOCACHE_SER:
217 		default:
218 			physaddr |= _PAGE_NOCACHE_S;
219 			break;
220 		case IOMAP_NOCACHE_NONSER:
221 			physaddr |= _PAGE_NOCACHE;
222 			break;
223 		case IOMAP_WRITETHROUGH:
224 			physaddr |= _PAGE_CACHE040W;
225 			break;
226 		}
227 	} else {
228 		physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
229 			     _PAGE_DIRTY | _PAGE_READWRITE);
230 		switch (cacheflag) {
231 		case IOMAP_NOCACHE_SER:
232 		case IOMAP_NOCACHE_NONSER:
233 		default:
234 			physaddr |= _PAGE_NOCACHE030;
235 			break;
236 		case IOMAP_FULL_CACHING:
237 		case IOMAP_WRITETHROUGH:
238 			break;
239 		}
240 	}
241 
242 	while ((long)size > 0) {
243 #ifdef DEBUG
244 		if (!(virtaddr & (PTRTREESIZE-1)))
245 			printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
246 #endif
247 		pgd_dir = pgd_offset_k(virtaddr);
248 		pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
249 		if (!pmd_dir) {
250 			printk("ioremap: no mem for pmd_dir\n");
251 			return NULL;
252 		}
253 
254 		if (CPU_IS_020_OR_030) {
255 			pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
256 			physaddr += PTRTREESIZE;
257 			virtaddr += PTRTREESIZE;
258 			size -= PTRTREESIZE;
259 		} else {
260 			pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
261 			if (!pte_dir) {
262 				printk("ioremap: no mem for pte_dir\n");
263 				return NULL;
264 			}
265 
266 			pte_val(*pte_dir) = physaddr;
267 			virtaddr += PAGE_SIZE;
268 			physaddr += PAGE_SIZE;
269 			size -= PAGE_SIZE;
270 		}
271 	}
272 #ifdef DEBUG
273 	printk("\n");
274 #endif
275 	flush_tlb_all();
276 
277 	return (void __iomem *)retaddr;
278 }
279 EXPORT_SYMBOL(__ioremap);
280 
281 /*
282  * Unmap an ioremap()ed region again
283  */
284 void iounmap(void __iomem *addr)
285 {
286 #ifdef CONFIG_AMIGA
287 	if ((!MACH_IS_AMIGA) ||
288 	    (((unsigned long)addr < 0x40000000) ||
289 	     ((unsigned long)addr > 0x60000000)))
290 			free_io_area((__force void *)addr);
291 #else
292 #ifdef CONFIG_COLDFIRE
293 	if (cf_internalio(addr))
294 		return;
295 #endif
296 	free_io_area((__force void *)addr);
297 #endif
298 }
299 EXPORT_SYMBOL(iounmap);
300 
301 /*
302  * Set new cache mode for some kernel address space.
303  * The caller must push data for that range itself, if such data may already
304  * be in the cache.
305  */
306 void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
307 {
308 	unsigned long virtaddr = (unsigned long)addr;
309 	pgd_t *pgd_dir;
310 	pmd_t *pmd_dir;
311 	pte_t *pte_dir;
312 
313 	if (CPU_IS_040_OR_060) {
314 		switch (cmode) {
315 		case IOMAP_FULL_CACHING:
316 			cmode = _PAGE_CACHE040;
317 			break;
318 		case IOMAP_NOCACHE_SER:
319 		default:
320 			cmode = _PAGE_NOCACHE_S;
321 			break;
322 		case IOMAP_NOCACHE_NONSER:
323 			cmode = _PAGE_NOCACHE;
324 			break;
325 		case IOMAP_WRITETHROUGH:
326 			cmode = _PAGE_CACHE040W;
327 			break;
328 		}
329 	} else {
330 		switch (cmode) {
331 		case IOMAP_NOCACHE_SER:
332 		case IOMAP_NOCACHE_NONSER:
333 		default:
334 			cmode = _PAGE_NOCACHE030;
335 			break;
336 		case IOMAP_FULL_CACHING:
337 		case IOMAP_WRITETHROUGH:
338 			cmode = 0;
339 		}
340 	}
341 
342 	while ((long)size > 0) {
343 		pgd_dir = pgd_offset_k(virtaddr);
344 		if (pgd_bad(*pgd_dir)) {
345 			printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
346 			pgd_clear(pgd_dir);
347 			return;
348 		}
349 		pmd_dir = pmd_offset(pgd_dir, virtaddr);
350 
351 		if (CPU_IS_020_OR_030) {
352 			int pmd_off = (virtaddr/PTRTREESIZE) & 15;
353 
354 			if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
355 				pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
356 							 _CACHEMASK040) | cmode;
357 				virtaddr += PTRTREESIZE;
358 				size -= PTRTREESIZE;
359 				continue;
360 			}
361 		}
362 
363 		if (pmd_bad(*pmd_dir)) {
364 			printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
365 			pmd_clear(pmd_dir);
366 			return;
367 		}
368 		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
369 
370 		pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
371 		virtaddr += PAGE_SIZE;
372 		size -= PAGE_SIZE;
373 	}
374 
375 	flush_tlb_all();
376 }
377 EXPORT_SYMBOL(kernel_set_cachemode);
378