xref: /linux/mm/ioremap.c (revision a1c3be890440a1769ed6f822376a3e3ab0d42994)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Re-map IO memory to kernel address space so that we can access it.
4  * This is needed for high PCI addresses that aren't mapped in the
5  * 640k-1MB IO memory area on PC's
6  *
7  * (C) Copyright 1995 1996 Linus Torvalds
8  */
9 #include <linux/vmalloc.h>
10 #include <linux/mm.h>
11 #include <linux/sched.h>
12 #include <linux/io.h>
13 #include <linux/export.h>
14 #include <asm/cacheflush.h>
15 
16 #include "pgalloc-track.h"
17 
18 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
19 static int __read_mostly ioremap_p4d_capable;
20 static int __read_mostly ioremap_pud_capable;
21 static int __read_mostly ioremap_pmd_capable;
22 static int __read_mostly ioremap_huge_disabled;
23 
24 static int __init set_nohugeiomap(char *str)
25 {
26 	ioremap_huge_disabled = 1;
27 	return 0;
28 }
29 early_param("nohugeiomap", set_nohugeiomap);
30 
31 void __init ioremap_huge_init(void)
32 {
33 	if (!ioremap_huge_disabled) {
34 		if (arch_ioremap_p4d_supported())
35 			ioremap_p4d_capable = 1;
36 		if (arch_ioremap_pud_supported())
37 			ioremap_pud_capable = 1;
38 		if (arch_ioremap_pmd_supported())
39 			ioremap_pmd_capable = 1;
40 	}
41 }
42 
43 static inline int ioremap_p4d_enabled(void)
44 {
45 	return ioremap_p4d_capable;
46 }
47 
48 static inline int ioremap_pud_enabled(void)
49 {
50 	return ioremap_pud_capable;
51 }
52 
53 static inline int ioremap_pmd_enabled(void)
54 {
55 	return ioremap_pmd_capable;
56 }
57 
58 #else	/* !CONFIG_HAVE_ARCH_HUGE_VMAP */
59 static inline int ioremap_p4d_enabled(void) { return 0; }
60 static inline int ioremap_pud_enabled(void) { return 0; }
61 static inline int ioremap_pmd_enabled(void) { return 0; }
62 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
63 
64 static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
65 		unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
66 		pgtbl_mod_mask *mask)
67 {
68 	pte_t *pte;
69 	u64 pfn;
70 
71 	pfn = phys_addr >> PAGE_SHIFT;
72 	pte = pte_alloc_kernel_track(pmd, addr, mask);
73 	if (!pte)
74 		return -ENOMEM;
75 	do {
76 		BUG_ON(!pte_none(*pte));
77 		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
78 		pfn++;
79 	} while (pte++, addr += PAGE_SIZE, addr != end);
80 	*mask |= PGTBL_PTE_MODIFIED;
81 	return 0;
82 }
83 
84 static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
85 				unsigned long end, phys_addr_t phys_addr,
86 				pgprot_t prot)
87 {
88 	if (!ioremap_pmd_enabled())
89 		return 0;
90 
91 	if ((end - addr) != PMD_SIZE)
92 		return 0;
93 
94 	if (!IS_ALIGNED(addr, PMD_SIZE))
95 		return 0;
96 
97 	if (!IS_ALIGNED(phys_addr, PMD_SIZE))
98 		return 0;
99 
100 	if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
101 		return 0;
102 
103 	return pmd_set_huge(pmd, phys_addr, prot);
104 }
105 
106 static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
107 		unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
108 		pgtbl_mod_mask *mask)
109 {
110 	pmd_t *pmd;
111 	unsigned long next;
112 
113 	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
114 	if (!pmd)
115 		return -ENOMEM;
116 	do {
117 		next = pmd_addr_end(addr, end);
118 
119 		if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) {
120 			*mask |= PGTBL_PMD_MODIFIED;
121 			continue;
122 		}
123 
124 		if (ioremap_pte_range(pmd, addr, next, phys_addr, prot, mask))
125 			return -ENOMEM;
126 	} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
127 	return 0;
128 }
129 
130 static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
131 				unsigned long end, phys_addr_t phys_addr,
132 				pgprot_t prot)
133 {
134 	if (!ioremap_pud_enabled())
135 		return 0;
136 
137 	if ((end - addr) != PUD_SIZE)
138 		return 0;
139 
140 	if (!IS_ALIGNED(addr, PUD_SIZE))
141 		return 0;
142 
143 	if (!IS_ALIGNED(phys_addr, PUD_SIZE))
144 		return 0;
145 
146 	if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
147 		return 0;
148 
149 	return pud_set_huge(pud, phys_addr, prot);
150 }
151 
152 static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
153 		unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
154 		pgtbl_mod_mask *mask)
155 {
156 	pud_t *pud;
157 	unsigned long next;
158 
159 	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
160 	if (!pud)
161 		return -ENOMEM;
162 	do {
163 		next = pud_addr_end(addr, end);
164 
165 		if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) {
166 			*mask |= PGTBL_PUD_MODIFIED;
167 			continue;
168 		}
169 
170 		if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask))
171 			return -ENOMEM;
172 	} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
173 	return 0;
174 }
175 
176 static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
177 				unsigned long end, phys_addr_t phys_addr,
178 				pgprot_t prot)
179 {
180 	if (!ioremap_p4d_enabled())
181 		return 0;
182 
183 	if ((end - addr) != P4D_SIZE)
184 		return 0;
185 
186 	if (!IS_ALIGNED(addr, P4D_SIZE))
187 		return 0;
188 
189 	if (!IS_ALIGNED(phys_addr, P4D_SIZE))
190 		return 0;
191 
192 	if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
193 		return 0;
194 
195 	return p4d_set_huge(p4d, phys_addr, prot);
196 }
197 
198 static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
199 		unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
200 		pgtbl_mod_mask *mask)
201 {
202 	p4d_t *p4d;
203 	unsigned long next;
204 
205 	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
206 	if (!p4d)
207 		return -ENOMEM;
208 	do {
209 		next = p4d_addr_end(addr, end);
210 
211 		if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) {
212 			*mask |= PGTBL_P4D_MODIFIED;
213 			continue;
214 		}
215 
216 		if (ioremap_pud_range(p4d, addr, next, phys_addr, prot, mask))
217 			return -ENOMEM;
218 	} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
219 	return 0;
220 }
221 
222 int ioremap_page_range(unsigned long addr,
223 		       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
224 {
225 	pgd_t *pgd;
226 	unsigned long start;
227 	unsigned long next;
228 	int err;
229 	pgtbl_mod_mask mask = 0;
230 
231 	might_sleep();
232 	BUG_ON(addr >= end);
233 
234 	start = addr;
235 	pgd = pgd_offset_k(addr);
236 	do {
237 		next = pgd_addr_end(addr, end);
238 		err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot,
239 					&mask);
240 		if (err)
241 			break;
242 	} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
243 
244 	flush_cache_vmap(start, end);
245 
246 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
247 		arch_sync_kernel_mappings(start, end);
248 
249 	return err;
250 }
251 
252 #ifdef CONFIG_GENERIC_IOREMAP
253 void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
254 {
255 	unsigned long offset, vaddr;
256 	phys_addr_t last_addr;
257 	struct vm_struct *area;
258 
259 	/* Disallow wrap-around or zero size */
260 	last_addr = addr + size - 1;
261 	if (!size || last_addr < addr)
262 		return NULL;
263 
264 	/* Page-align mappings */
265 	offset = addr & (~PAGE_MASK);
266 	addr -= offset;
267 	size = PAGE_ALIGN(size + offset);
268 
269 	area = get_vm_area_caller(size, VM_IOREMAP,
270 			__builtin_return_address(0));
271 	if (!area)
272 		return NULL;
273 	vaddr = (unsigned long)area->addr;
274 
275 	if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
276 		free_vm_area(area);
277 		return NULL;
278 	}
279 
280 	return (void __iomem *)(vaddr + offset);
281 }
282 EXPORT_SYMBOL(ioremap_prot);
283 
284 void iounmap(volatile void __iomem *addr)
285 {
286 	vunmap((void *)((unsigned long)addr & PAGE_MASK));
287 }
288 EXPORT_SYMBOL(iounmap);
289 #endif /* CONFIG_GENERIC_IOREMAP */
290