xref: /linux/arch/sh/mm/ioremap.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  * arch/sh/mm/ioremap.c
3  *
4  * Re-map IO memory to kernel address space so that we can access it.
5  * This is needed for high PCI addresses that aren't mapped in the
6  * 640k-1MB IO memory area on PC's
7  *
8  * (C) Copyright 1995 1996 Linus Torvalds
9  * (C) Copyright 2005, 2006 Paul Mundt
10  *
11  * This file is subject to the terms and conditions of the GNU General
12  * Public License. See the file "COPYING" in the main directory of this
13  * archive for more details.
14  */
15 #include <linux/vmalloc.h>
16 #include <linux/module.h>
17 #include <linux/mm.h>
18 #include <asm/io.h>
19 #include <asm/page.h>
20 #include <asm/pgalloc.h>
21 #include <asm/addrspace.h>
22 #include <asm/cacheflush.h>
23 #include <asm/tlbflush.h>
24 
25 static inline void remap_area_pte(pte_t * pte, unsigned long address,
26 	unsigned long size, unsigned long phys_addr, unsigned long flags)
27 {
28 	unsigned long end;
29 	unsigned long pfn;
30 	pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW |
31 				   _PAGE_DIRTY | _PAGE_ACCESSED |
32 				   _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags);
33 
34 	address &= ~PMD_MASK;
35 	end = address + size;
36 	if (end > PMD_SIZE)
37 		end = PMD_SIZE;
38 	if (address >= end)
39 		BUG();
40 	pfn = phys_addr >> PAGE_SHIFT;
41 	do {
42 		if (!pte_none(*pte)) {
43 			printk("remap_area_pte: page already exists\n");
44 			BUG();
45 		}
46 		set_pte(pte, pfn_pte(pfn, pgprot));
47 		address += PAGE_SIZE;
48 		pfn++;
49 		pte++;
50 	} while (address && (address < end));
51 }
52 
53 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
54 	unsigned long size, unsigned long phys_addr, unsigned long flags)
55 {
56 	unsigned long end;
57 
58 	address &= ~PGDIR_MASK;
59 	end = address + size;
60 	if (end > PGDIR_SIZE)
61 		end = PGDIR_SIZE;
62 	phys_addr -= address;
63 	if (address >= end)
64 		BUG();
65 	do {
66 		pte_t * pte = pte_alloc_kernel(pmd, address);
67 		if (!pte)
68 			return -ENOMEM;
69 		remap_area_pte(pte, address, end - address, address + phys_addr, flags);
70 		address = (address + PMD_SIZE) & PMD_MASK;
71 		pmd++;
72 	} while (address && (address < end));
73 	return 0;
74 }
75 
76 int remap_area_pages(unsigned long address, unsigned long phys_addr,
77 		     unsigned long size, unsigned long flags)
78 {
79 	int error;
80 	pgd_t * dir;
81 	unsigned long end = address + size;
82 
83 	phys_addr -= address;
84 	dir = pgd_offset_k(address);
85 	flush_cache_all();
86 	if (address >= end)
87 		BUG();
88 	do {
89 		pud_t *pud;
90 		pmd_t *pmd;
91 
92 		error = -ENOMEM;
93 
94 		pud = pud_alloc(&init_mm, dir, address);
95 		if (!pud)
96 			break;
97 		pmd = pmd_alloc(&init_mm, pud, address);
98 		if (!pmd)
99 			break;
100 		if (remap_area_pmd(pmd, address, end - address,
101 					phys_addr + address, flags))
102 			break;
103 		error = 0;
104 		address = (address + PGDIR_SIZE) & PGDIR_MASK;
105 		dir++;
106 	} while (address && (address < end));
107 	flush_tlb_all();
108 	return error;
109 }
110 
111 /*
112  * Remap an arbitrary physical address space into the kernel virtual
113  * address space. Needed when the kernel wants to access high addresses
114  * directly.
115  *
116  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
117  * have to convert them into an offset in a page-aligned mapping, but the
118  * caller shouldn't need to know that small detail.
119  */
120 void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
121 			unsigned long flags)
122 {
123 	struct vm_struct * area;
124 	unsigned long offset, last_addr, addr, orig_addr;
125 
126 	/* Don't allow wraparound or zero size */
127 	last_addr = phys_addr + size - 1;
128 	if (!size || last_addr < phys_addr)
129 		return NULL;
130 
131 	/*
132 	 * Don't remap the low PCI/ISA area, it's always mapped..
133 	 */
134 	if (phys_addr >= 0xA0000 && last_addr < 0x100000)
135 		return (void __iomem *)phys_to_virt(phys_addr);
136 
137 	/*
138 	 * Don't allow anybody to remap normal RAM that we're using..
139 	 */
140 	if (phys_addr < virt_to_phys(high_memory))
141 		return NULL;
142 
143 	/*
144 	 * Mappings have to be page-aligned
145 	 */
146 	offset = phys_addr & ~PAGE_MASK;
147 	phys_addr &= PAGE_MASK;
148 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
149 
150 	/*
151 	 * Ok, go for it..
152 	 */
153 	area = get_vm_area(size, VM_IOREMAP);
154 	if (!area)
155 		return NULL;
156 	area->phys_addr = phys_addr;
157 	orig_addr = addr = (unsigned long)area->addr;
158 
159 #ifdef CONFIG_32BIT
160 	/*
161 	 * First try to remap through the PMB once a valid VMA has been
162 	 * established. Smaller allocations (or the rest of the size
163 	 * remaining after a PMB mapping due to the size not being
164 	 * perfectly aligned on a PMB size boundary) are then mapped
165 	 * through the UTLB using conventional page tables.
166 	 *
167 	 * PMB entries are all pre-faulted.
168 	 */
169 	if (unlikely(size >= 0x1000000)) {
170 		unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
171 
172 		if (likely(mapped)) {
173 			addr		+= mapped;
174 			phys_addr	+= mapped;
175 			size		-= mapped;
176 		}
177 	}
178 #endif
179 
180 	if (likely(size))
181 		if (remap_area_pages(addr, phys_addr, size, flags)) {
182 			vunmap((void *)orig_addr);
183 			return NULL;
184 		}
185 
186 	return (void __iomem *)(offset + (char *)orig_addr);
187 }
188 EXPORT_SYMBOL(__ioremap);
189 
190 void __iounmap(void __iomem *addr)
191 {
192 	unsigned long vaddr = (unsigned long __force)addr;
193 	struct vm_struct *p;
194 
195 	if (PXSEG(vaddr) < P3SEG)
196 		return;
197 
198 #ifdef CONFIG_32BIT
199 	/*
200 	 * Purge any PMB entries that may have been established for this
201 	 * mapping, then proceed with conventional VMA teardown.
202 	 *
203 	 * XXX: Note that due to the way that remove_vm_area() does
204 	 * matching of the resultant VMA, we aren't able to fast-forward
205 	 * the address past the PMB space until the end of the VMA where
206 	 * the page tables reside. As such, unmap_vm_area() will be
207 	 * forced to linearly scan over the area until it finds the page
208 	 * tables where PTEs that need to be unmapped actually reside,
209 	 * which is far from optimal. Perhaps we need to use a separate
210 	 * VMA for the PMB mappings?
211 	 *					-- PFM.
212 	 */
213 	pmb_unmap(vaddr);
214 #endif
215 
216 	p = remove_vm_area((void *)(vaddr & PAGE_MASK));
217 	if (!p) {
218 		printk(KERN_ERR "%s: bad address %p\n", __FUNCTION__, addr);
219 		return;
220 	}
221 
222 	kfree(p);
223 }
224 EXPORT_SYMBOL(__iounmap);
225