xref: /freebsd/sys/kern/subr_devmap.c (revision af23369a6deaaeb612ab266eb88b8bb8d560c322)
1 /*-
2  * Copyright (c) 2013 Ian Lepore <ian@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 /* Routines for mapping device memory. */
31 
32 #include "opt_ddb.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/devmap.h>
37 #include <vm/vm.h>
38 #include <vm/vm_extern.h>
39 #include <vm/pmap.h>
40 #include <machine/vmparam.h>
41 
42 static const struct devmap_entry *devmap_table;
43 static boolean_t devmap_bootstrap_done = false;
44 
45 /*
46  * The allocated-kva (akva) devmap table and metadata.  Platforms can call
47  * devmap_add_entry() to add static device mappings to this table using
48  * automatically allocated virtual addresses carved out of the top of kva space.
49  * Allocation begins immediately below the max kernel virtual address.
50  */
51 #define	AKVA_DEVMAP_MAX_ENTRIES	32
52 static struct devmap_entry	akva_devmap_entries[AKVA_DEVMAP_MAX_ENTRIES];
53 static u_int			akva_devmap_idx;
54 static vm_offset_t		akva_devmap_vaddr = DEVMAP_MAX_VADDR;
55 
56 #if defined(__aarch64__) || defined(__riscv)
57 extern int early_boot;
58 #endif
59 
60 /*
61  * Print the contents of the static mapping table using the provided printf-like
62  * output function (which will be either printf or db_printf).
63  */
64 static void
65 devmap_dump_table(int (*prfunc)(const char *, ...))
66 {
67 	const struct devmap_entry *pd;
68 
69 	if (devmap_table == NULL || devmap_table[0].pd_size == 0) {
70 		prfunc("No static device mappings.\n");
71 		return;
72 	}
73 
74 	prfunc("Static device mappings:\n");
75 	for (pd = devmap_table; pd->pd_size != 0; ++pd) {
76 		prfunc("  0x%08jx - 0x%08jx mapped at VA 0x%08jx\n",
77 		    (uintmax_t)pd->pd_pa,
78 		    (uintmax_t)(pd->pd_pa + pd->pd_size - 1),
79 		    (uintmax_t)pd->pd_va);
80 	}
81 }
82 
83 /*
84  * Print the contents of the static mapping table.  Used for bootverbose.
85  */
86 void
87 devmap_print_table(void)
88 {
89 	devmap_dump_table(printf);
90 }
91 
92 /*
93  * Return the "last" kva address used by the registered devmap table.  It's
94  * actually the lowest address used by the static mappings, i.e., the address of
95  * the first unusable byte of KVA.
96  */
97 vm_offset_t
98 devmap_lastaddr(void)
99 {
100 	const struct devmap_entry *pd;
101 	vm_offset_t lowaddr;
102 
103 	if (akva_devmap_idx > 0)
104 		return (akva_devmap_vaddr);
105 
106 	lowaddr = DEVMAP_MAX_VADDR;
107 	for (pd = devmap_table; pd != NULL && pd->pd_size != 0; ++pd) {
108 		if (lowaddr > pd->pd_va)
109 			lowaddr = pd->pd_va;
110 	}
111 
112 	return (lowaddr);
113 }
114 
115 /*
116  * Add an entry to the internal "akva" static devmap table using the given
117  * physical address and size and a virtual address allocated from the top of
118  * kva.  This automatically registers the akva table on the first call, so all a
119  * platform has to do is call this routine to install as many mappings as it
120  * needs and when the platform-specific init function calls devmap_bootstrap()
121  * it will pick up all the entries in the akva table automatically.
122  */
123 void
124 devmap_add_entry(vm_paddr_t pa, vm_size_t sz)
125 {
126 	struct devmap_entry *m;
127 
128 	if (devmap_bootstrap_done)
129 		panic("devmap_add_entry() after devmap_bootstrap()");
130 
131 	if (akva_devmap_idx == (AKVA_DEVMAP_MAX_ENTRIES - 1))
132 		panic("AKVA_DEVMAP_MAX_ENTRIES is too small");
133 
134 	if (akva_devmap_idx == 0)
135 		devmap_register_table(akva_devmap_entries);
136 
137 	 /* Allocate virtual address space from the top of kva downwards. */
138 #ifdef __arm__
139 	/*
140 	 * If the range being mapped is aligned and sized to 1MB boundaries then
141 	 * also align the virtual address to the next-lower 1MB boundary so that
142 	 * we end with a nice efficient section mapping.
143 	 */
144 	if ((pa & 0x000fffff) == 0 && (sz & 0x000fffff) == 0) {
145 		akva_devmap_vaddr = trunc_1mpage(akva_devmap_vaddr - sz);
146 	} else
147 #endif
148 	{
149 		akva_devmap_vaddr = trunc_page(akva_devmap_vaddr - sz);
150 	}
151 	m = &akva_devmap_entries[akva_devmap_idx++];
152 	m->pd_va    = akva_devmap_vaddr;
153 	m->pd_pa    = pa;
154 	m->pd_size  = sz;
155 }
156 
157 /*
158  * Register the given table as the one to use in devmap_bootstrap().
159  */
160 void
161 devmap_register_table(const struct devmap_entry *table)
162 {
163 
164 	devmap_table = table;
165 }
166 
167 /*
168  * Map all of the static regions in the devmap table, and remember the devmap
169  * table so the mapdev, ptov, and vtop functions can do lookups later.
170  *
171  * If a non-NULL table pointer is given it is used unconditionally, otherwise
172  * the previously-registered table is used.  This smooths transition from legacy
173  * code that fills in a local table then calls this function passing that table,
174  * and newer code that uses devmap_register_table() in platform-specific
175  * code, then lets the common platform-specific init function call this function
176  * with a NULL pointer.
177  */
178 void
179 devmap_bootstrap(vm_offset_t l1pt, const struct devmap_entry *table)
180 {
181 	const struct devmap_entry *pd;
182 
183 	devmap_bootstrap_done = true;
184 
185 	/*
186 	 * If given a table pointer, use it.  Otherwise, if a table was
187 	 * previously registered, use it.  Otherwise, no work to do.
188 	 */
189 	if (table != NULL)
190 		devmap_table = table;
191 	else if (devmap_table == NULL)
192 		return;
193 
194 	for (pd = devmap_table; pd->pd_size != 0; ++pd) {
195 #if defined(__arm__)
196 		pmap_preboot_map_attr(pd->pd_pa, pd->pd_va, pd->pd_size,
197 		    VM_PROT_READ | VM_PROT_WRITE, VM_MEMATTR_DEVICE);
198 #elif defined(__aarch64__) || defined(__riscv)
199 		pmap_kenter_device(pd->pd_va, pd->pd_size, pd->pd_pa);
200 #endif
201 	}
202 }
203 
204 /*
205  * Look up the given physical address in the static mapping data and return the
206  * corresponding virtual address, or NULL if not found.
207  */
208 void *
209 devmap_ptov(vm_paddr_t pa, vm_size_t size)
210 {
211 	const struct devmap_entry *pd;
212 
213 	if (devmap_table == NULL)
214 		return (NULL);
215 
216 	for (pd = devmap_table; pd->pd_size != 0; ++pd) {
217 		if (pa >= pd->pd_pa && pa + size <= pd->pd_pa + pd->pd_size)
218 			return ((void *)(pd->pd_va + (pa - pd->pd_pa)));
219 	}
220 
221 	return (NULL);
222 }
223 
224 /*
225  * Look up the given virtual address in the static mapping data and return the
226  * corresponding physical address, or DEVMAP_PADDR_NOTFOUND if not found.
227  */
228 vm_paddr_t
229 devmap_vtop(void * vpva, vm_size_t size)
230 {
231 	const struct devmap_entry *pd;
232 	vm_offset_t va;
233 
234 	if (devmap_table == NULL)
235 		return (DEVMAP_PADDR_NOTFOUND);
236 
237 	va = (vm_offset_t)vpva;
238 	for (pd = devmap_table; pd->pd_size != 0; ++pd) {
239 		if (va >= pd->pd_va && va + size <= pd->pd_va + pd->pd_size)
240 			return ((vm_paddr_t)(pd->pd_pa + (va - pd->pd_va)));
241 	}
242 
243 	return (DEVMAP_PADDR_NOTFOUND);
244 }
245 
246 /*
247  * Map a set of physical memory pages into the kernel virtual address space.
248  * Return a pointer to where it is mapped.
249  *
250  * This uses a pre-established static mapping if one exists for the requested
251  * range, otherwise it allocates kva space and maps the physical pages into it.
252  *
253  * This routine is intended to be used for mapping device memory, NOT real
254  * memory; the mapping type is inherently VM_MEMATTR_DEVICE in
255  * pmap_kenter_device().
256  */
257 void *
258 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
259 {
260 	vm_offset_t va, offset;
261 	void * rva;
262 
263 	/* First look in the static mapping table. */
264 	if ((rva = devmap_ptov(pa, size)) != NULL)
265 		return (rva);
266 
267 	offset = pa & PAGE_MASK;
268 	pa = trunc_page(pa);
269 	size = round_page(size + offset);
270 
271 #if defined(__aarch64__) || defined(__riscv)
272 	if (early_boot) {
273 		akva_devmap_vaddr = trunc_page(akva_devmap_vaddr - size);
274 		va = akva_devmap_vaddr;
275 		KASSERT(va >= VM_MAX_KERNEL_ADDRESS - PMAP_MAPDEV_EARLY_SIZE,
276 		    ("Too many early devmap mappings"));
277 	} else
278 #endif
279 		va = kva_alloc(size);
280 	if (!va)
281 		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
282 
283 	pmap_kenter_device(va, size, pa);
284 
285 	return ((void *)(va + offset));
286 }
287 
288 #if defined(__aarch64__) || defined(__riscv)
289 void *
290 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
291 {
292 	vm_offset_t va, offset;
293 	void * rva;
294 
295 	/* First look in the static mapping table. */
296 	if ((rva = devmap_ptov(pa, size)) != NULL)
297 		return (rva);
298 
299 	offset = pa & PAGE_MASK;
300 	pa = trunc_page(pa);
301 	size = round_page(size + offset);
302 
303 	if (early_boot) {
304 		akva_devmap_vaddr = trunc_page(akva_devmap_vaddr - size);
305 		va = akva_devmap_vaddr;
306 		KASSERT(va >= (VM_MAX_KERNEL_ADDRESS - (PMAP_MAPDEV_EARLY_SIZE)),
307 		    ("Too many early devmap mappings 2"));
308 	} else
309 		va = kva_alloc(size);
310 	if (!va)
311 		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
312 
313 	pmap_kenter(va, size, pa, ma);
314 
315 	return ((void *)(va + offset));
316 }
317 #endif
318 
319 /*
320  * Unmap device memory and free the kva space.
321  */
322 void
323 pmap_unmapdev(void *p, vm_size_t size)
324 {
325 	vm_offset_t offset, va;
326 
327 	/* Nothing to do if we find the mapping in the static table. */
328 	if (devmap_vtop(p, size) != DEVMAP_PADDR_NOTFOUND)
329 		return;
330 
331 	va = (vm_offset_t)p;
332 	offset = va & PAGE_MASK;
333 	va = trunc_page(va);
334 	size = round_page(size + offset);
335 
336 	pmap_kremove_device(va, size);
337 	kva_free(va, size);
338 }
339 
340 #ifdef DDB
341 #include <ddb/ddb.h>
342 
343 DB_SHOW_COMMAND_FLAGS(devmap, db_show_devmap, DB_CMD_MEMSAFE)
344 {
345 	devmap_dump_table(db_printf);
346 }
347 
348 #endif /* DDB */
349