xref: /linux/arch/arm/mm/ioremap.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  *  linux/arch/arm/mm/ioremap.c
3  *
4  * Re-map IO memory to kernel address space so that we can access it.
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  *
8  * Hacked for ARM by Phil Blundell <philb@gnu.org>
9  * Hacked to allow all architectures to build, and various cleanups
10  * by Russell King
11  *
12  * This allows a driver to remap an arbitrary region of bus memory into
13  * virtual space.  One should *only* use readl, writel, memcpy_toio and
14  * so on with such remapped areas.
15  *
16  * Because the ARM only has a 32-bit address space we can't address the
17  * whole of the (physical) PCI space at once.  PCI huge-mode addressing
18  * allows us to circumvent this restriction by splitting PCI space into
19  * two 2GB chunks and mapping only one at a time into processor memory.
20  * We use MMU protection domains to trap any attempt to access the bank
21  * that is not currently mapped.  (This isn't fully implemented yet.)
22  */
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/mm.h>
26 #include <linux/vmalloc.h>
27 
28 #include <asm/cacheflush.h>
29 #include <asm/io.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgalloc.h>
32 #include <asm/tlbflush.h>
33 #include <asm/sizes.h>
34 
35 /*
36  * Used by ioremap() and iounmap() code to mark (super)section-mapped
37  * I/O regions in vm_struct->flags field.
38  */
39 #define VM_ARM_SECTION_MAPPING	0x80000000
40 
41 static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
42 			  unsigned long phys_addr, pgprot_t prot)
43 {
44 	pte_t *pte;
45 
46 	pte = pte_alloc_kernel(pmd, addr);
47 	if (!pte)
48 		return -ENOMEM;
49 
50 	do {
51 		if (!pte_none(*pte))
52 			goto bad;
53 
54 		set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0);
55 		phys_addr += PAGE_SIZE;
56 	} while (pte++, addr += PAGE_SIZE, addr != end);
57 	return 0;
58 
59  bad:
60 	printk(KERN_CRIT "remap_area_pte: page already exists\n");
61 	BUG();
62 }
63 
64 static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr,
65 				 unsigned long end, unsigned long phys_addr,
66 				 pgprot_t prot)
67 {
68 	unsigned long next;
69 	pmd_t *pmd;
70 	int ret = 0;
71 
72 	pmd = pmd_alloc(&init_mm, pgd, addr);
73 	if (!pmd)
74 		return -ENOMEM;
75 
76 	do {
77 		next = pmd_addr_end(addr, end);
78 		ret = remap_area_pte(pmd, addr, next, phys_addr, prot);
79 		if (ret)
80 			return ret;
81 		phys_addr += next - addr;
82 	} while (pmd++, addr = next, addr != end);
83 	return ret;
84 }
85 
86 static int remap_area_pages(unsigned long start, unsigned long pfn,
87 			    unsigned long size, unsigned long flags)
88 {
89 	unsigned long addr = start;
90 	unsigned long next, end = start + size;
91 	unsigned long phys_addr = __pfn_to_phys(pfn);
92 	pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
93 				 L_PTE_DIRTY | L_PTE_WRITE | flags);
94 	pgd_t *pgd;
95 	int err = 0;
96 
97 	BUG_ON(addr >= end);
98 	pgd = pgd_offset_k(addr);
99 	do {
100 		next = pgd_addr_end(addr, end);
101 		err = remap_area_pmd(pgd, addr, next, phys_addr, prot);
102 		if (err)
103 			break;
104 		phys_addr += next - addr;
105 	} while (pgd++, addr = next, addr != end);
106 
107 	return err;
108 }
109 
110 
111 void __check_kvm_seq(struct mm_struct *mm)
112 {
113 	unsigned int seq;
114 
115 	do {
116 		seq = init_mm.context.kvm_seq;
117 		memcpy(pgd_offset(mm, VMALLOC_START),
118 		       pgd_offset_k(VMALLOC_START),
119 		       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
120 					pgd_index(VMALLOC_START)));
121 		mm->context.kvm_seq = seq;
122 	} while (seq != init_mm.context.kvm_seq);
123 }
124 
125 #ifndef CONFIG_SMP
126 /*
127  * Section support is unsafe on SMP - If you iounmap and ioremap a region,
128  * the other CPUs will not see this change until their next context switch.
129  * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
130  * which requires the new ioremap'd region to be referenced, the CPU will
131  * reference the _old_ region.
132  *
133  * Note that get_vm_area() allocates a guard 4K page, so we need to mask
134  * the size back to 1MB aligned or we will overflow in the loop below.
135  */
136 static void unmap_area_sections(unsigned long virt, unsigned long size)
137 {
138 	unsigned long addr = virt, end = virt + (size & ~SZ_1M);
139 	pgd_t *pgd;
140 
141 	flush_cache_vunmap(addr, end);
142 	pgd = pgd_offset_k(addr);
143 	do {
144 		pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
145 
146 		pmd = *pmdp;
147 		if (!pmd_none(pmd)) {
148 			/*
149 			 * Clear the PMD from the page table, and
150 			 * increment the kvm sequence so others
151 			 * notice this change.
152 			 *
153 			 * Note: this is still racy on SMP machines.
154 			 */
155 			pmd_clear(pmdp);
156 			init_mm.context.kvm_seq++;
157 
158 			/*
159 			 * Free the page table, if there was one.
160 			 */
161 			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
162 				pte_free_kernel(pmd_page_vaddr(pmd));
163 		}
164 
165 		addr += PGDIR_SIZE;
166 		pgd++;
167 	} while (addr < end);
168 
169 	/*
170 	 * Ensure that the active_mm is up to date - we want to
171 	 * catch any use-after-iounmap cases.
172 	 */
173 	if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
174 		__check_kvm_seq(current->active_mm);
175 
176 	flush_tlb_kernel_range(virt, end);
177 }
178 
179 static int
180 remap_area_sections(unsigned long virt, unsigned long pfn,
181 		    unsigned long size, unsigned long flags)
182 {
183 	unsigned long prot, addr = virt, end = virt + size;
184 	pgd_t *pgd;
185 
186 	/*
187 	 * Remove and free any PTE-based mapping, and
188 	 * sync the current kernel mapping.
189 	 */
190 	unmap_area_sections(virt, size);
191 
192 	prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO) |
193 	       (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
194 
195 	/*
196 	 * ARMv6 and above need XN set to prevent speculative prefetches
197 	 * hitting IO.
198 	 */
199 	if (cpu_architecture() >= CPU_ARCH_ARMv6)
200 		prot |= PMD_SECT_XN;
201 
202 	pgd = pgd_offset_k(addr);
203 	do {
204 		pmd_t *pmd = pmd_offset(pgd, addr);
205 
206 		pmd[0] = __pmd(__pfn_to_phys(pfn) | prot);
207 		pfn += SZ_1M >> PAGE_SHIFT;
208 		pmd[1] = __pmd(__pfn_to_phys(pfn) | prot);
209 		pfn += SZ_1M >> PAGE_SHIFT;
210 		flush_pmd_entry(pmd);
211 
212 		addr += PGDIR_SIZE;
213 		pgd++;
214 	} while (addr < end);
215 
216 	return 0;
217 }
218 
219 static int
220 remap_area_supersections(unsigned long virt, unsigned long pfn,
221 			 unsigned long size, unsigned long flags)
222 {
223 	unsigned long prot, addr = virt, end = virt + size;
224 	pgd_t *pgd;
225 
226 	/*
227 	 * Remove and free any PTE-based mapping, and
228 	 * sync the current kernel mapping.
229 	 */
230 	unmap_area_sections(virt, size);
231 
232 	prot = PMD_TYPE_SECT | PMD_SECT_SUPER | PMD_SECT_AP_WRITE |
233 			PMD_DOMAIN(DOMAIN_IO) |
234 			(flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
235 
236 	/*
237 	 * ARMv6 and above need XN set to prevent speculative prefetches
238 	 * hitting IO.
239 	 */
240 	if (cpu_architecture() >= CPU_ARCH_ARMv6)
241 		prot |= PMD_SECT_XN;
242 
243 	pgd = pgd_offset_k(virt);
244 	do {
245 		unsigned long super_pmd_val, i;
246 
247 		super_pmd_val = __pfn_to_phys(pfn) | prot;
248 		super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
249 
250 		for (i = 0; i < 8; i++) {
251 			pmd_t *pmd = pmd_offset(pgd, addr);
252 
253 			pmd[0] = __pmd(super_pmd_val);
254 			pmd[1] = __pmd(super_pmd_val);
255 			flush_pmd_entry(pmd);
256 
257 			addr += PGDIR_SIZE;
258 			pgd++;
259 		}
260 
261 		pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
262 	} while (addr < end);
263 
264 	return 0;
265 }
266 #endif
267 
268 
269 /*
270  * Remap an arbitrary physical address space into the kernel virtual
271  * address space. Needed when the kernel wants to access high addresses
272  * directly.
273  *
274  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
275  * have to convert them into an offset in a page-aligned mapping, but the
276  * caller shouldn't need to know that small detail.
277  *
278  * 'flags' are the extra L_PTE_ flags that you want to specify for this
279  * mapping.  See include/asm-arm/proc-armv/pgtable.h for more information.
280  */
281 void __iomem *
282 __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
283 	      unsigned long flags)
284 {
285 	int err;
286 	unsigned long addr;
287  	struct vm_struct * area;
288 
289 	/*
290 	 * High mappings must be supersection aligned
291 	 */
292 	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
293 		return NULL;
294 
295 	size = PAGE_ALIGN(size);
296 
297  	area = get_vm_area(size, VM_IOREMAP);
298  	if (!area)
299  		return NULL;
300  	addr = (unsigned long)area->addr;
301 
302 #ifndef CONFIG_SMP
303 	if (DOMAIN_IO == 0 &&
304 	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
305 	       cpu_is_xsc3()) &&
306 	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
307 		area->flags |= VM_ARM_SECTION_MAPPING;
308 		err = remap_area_supersections(addr, pfn, size, flags);
309 	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
310 		area->flags |= VM_ARM_SECTION_MAPPING;
311 		err = remap_area_sections(addr, pfn, size, flags);
312 	} else
313 #endif
314 		err = remap_area_pages(addr, pfn, size, flags);
315 
316 	if (err) {
317  		vunmap((void *)addr);
318  		return NULL;
319  	}
320 
321 	flush_cache_vmap(addr, addr + size);
322 	return (void __iomem *) (offset + addr);
323 }
324 EXPORT_SYMBOL(__ioremap_pfn);
325 
326 void __iomem *
327 __ioremap(unsigned long phys_addr, size_t size, unsigned long flags)
328 {
329 	unsigned long last_addr;
330  	unsigned long offset = phys_addr & ~PAGE_MASK;
331  	unsigned long pfn = __phys_to_pfn(phys_addr);
332 
333  	/*
334  	 * Don't allow wraparound or zero size
335 	 */
336 	last_addr = phys_addr + size - 1;
337 	if (!size || last_addr < phys_addr)
338 		return NULL;
339 
340 	/*
341  	 * Page align the mapping size
342 	 */
343 	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
344 
345  	return __ioremap_pfn(pfn, offset, size, flags);
346 }
347 EXPORT_SYMBOL(__ioremap);
348 
349 void __iounmap(volatile void __iomem *addr)
350 {
351 #ifndef CONFIG_SMP
352 	struct vm_struct **p, *tmp;
353 #endif
354 	unsigned int section_mapping = 0;
355 
356 	addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long)addr);
357 
358 #ifndef CONFIG_SMP
359 	/*
360 	 * If this is a section based mapping we need to handle it
361 	 * specially as the VM subysystem does not know how to handle
362 	 * such a beast. We need the lock here b/c we need to clear
363 	 * all the mappings before the area can be reclaimed
364 	 * by someone else.
365 	 */
366 	write_lock(&vmlist_lock);
367 	for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
368 		if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
369 			if (tmp->flags & VM_ARM_SECTION_MAPPING) {
370 				*p = tmp->next;
371 				unmap_area_sections((unsigned long)tmp->addr,
372 						    tmp->size);
373 				kfree(tmp);
374 				section_mapping = 1;
375 			}
376 			break;
377 		}
378 	}
379 	write_unlock(&vmlist_lock);
380 #endif
381 
382 	if (!section_mapping)
383 		vunmap((void __force *)addr);
384 }
385 EXPORT_SYMBOL(__iounmap);
386