xref: /freebsd/sys/arm64/include/vmparam.h (revision 56b17de1e8360fe131d425de20b5e75ff3ea897c)
1 /*-
2  * Copyright (c) 1990 The Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * William Jolitz.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *	from: FreeBSD: src/sys/i386/include/vmparam.h,v 1.33 2000/03/30
34  */
35 
36 #ifdef __arm__
37 #include <arm/vmparam.h>
38 #else /* !__arm__ */
39 
40 #ifndef	_MACHINE_VMPARAM_H_
41 #define	_MACHINE_VMPARAM_H_
42 
43 /*
44  * Virtual memory related constants, all in bytes
45  */
46 #ifndef MAXTSIZ
47 #define	MAXTSIZ		(1*1024*1024*1024)	/* max text size */
48 #endif
49 #ifndef DFLDSIZ
50 #define	DFLDSIZ		(128*1024*1024)		/* initial data size limit */
51 #endif
52 #ifndef MAXDSIZ
53 #define	MAXDSIZ		(1*1024*1024*1024)	/* max data size */
54 #endif
55 #ifndef DFLSSIZ
56 #define	DFLSSIZ		(128*1024*1024)		/* initial stack size limit */
57 #endif
58 #ifndef MAXSSIZ
59 #define	MAXSSIZ		(1*1024*1024*1024)	/* max stack size */
60 #endif
61 #ifndef SGROWSIZ
62 #define	SGROWSIZ	(128*1024)		/* amount to grow stack */
63 #endif
64 
65 /*
66  * The physical address space is sparsely populated.
67  */
68 #define	VM_PHYSSEG_SPARSE
69 
70 /*
71  * The number of PHYSSEG entries.
72  */
73 #define	VM_PHYSSEG_MAX		64
74 
75 /*
76  * Create three free page pools: VM_FREEPOOL_DEFAULT is the default pool from
77  * which physical pages are allocated and VM_FREEPOOL_DIRECT is the pool from
78  * which physical pages for page tables and small UMA objects are allocated.
79  * VM_FREEPOOL_LAZYINIT is a special-purpose pool that is populated only during
80  * boot and is used to implement deferred initialization of page structures.
81  */
82 #define	VM_NFREEPOOL		3
83 #define	VM_FREEPOOL_LAZYINIT	0
84 #define	VM_FREEPOOL_DEFAULT	1
85 #define	VM_FREEPOOL_DIRECT	2
86 
87 /*
88  * Create two free page lists: VM_FREELIST_DMA32 is for physical pages that have
89  * physical addresses below 4G, and VM_FREELIST_DEFAULT is for all other
90  * physical pages.
91  */
92 #define	VM_NFREELIST		2
93 #define	VM_FREELIST_DEFAULT	0
94 #define	VM_FREELIST_DMA32	1
95 
96 /*
97  * When PAGE_SIZE is 4KB, an allocation size of 16MB is supported in order
98  * to optimize the use of the direct map by UMA.  Specifically, a 64-byte
99  * cache line contains at most 8 L2 BLOCK entries, collectively mapping 16MB
100  * of physical memory.  By reducing the number of distinct 16MB "pages" that
101  * are used by UMA, the physical memory allocator reduces the likelihood of
102  * both 2MB page TLB misses and cache misses during the page table walk when
103  * a 2MB page TLB miss does occur.
104  *
105  * When PAGE_SIZE is 16KB, an allocation size of 32MB is supported.  This
106  * size is used by level 0 reservations and L2 BLOCK mappings.
107  */
108 #if PAGE_SIZE == PAGE_SIZE_4K
109 #define	VM_NFREEORDER		13
110 #elif PAGE_SIZE == PAGE_SIZE_16K
111 #define	VM_NFREEORDER		12
112 #else
113 #error Unsupported page size
114 #endif
115 
116 /*
117  * Enable superpage reservations: 2 levels.
118  */
119 #ifndef	VM_NRESERVLEVEL
120 #define	VM_NRESERVLEVEL		2
121 #endif
122 
123 /*
124  * Level 0 reservations consist of 16 pages when PAGE_SIZE is 4KB, and 128
125  * pages when PAGE_SIZE is 16KB.  Level 1 reservations consist of 32 64KB
126  * pages when PAGE_SIZE is 4KB, and 16 2M pages when PAGE_SIZE is 16KB.
127  */
128 #if PAGE_SIZE == PAGE_SIZE_4K
129 #ifndef	VM_LEVEL_0_ORDER
130 #define	VM_LEVEL_0_ORDER	4
131 #endif
132 #ifndef	VM_LEVEL_1_ORDER
133 #define	VM_LEVEL_1_ORDER	5
134 #endif
135 #elif PAGE_SIZE == PAGE_SIZE_16K
136 #ifndef	VM_LEVEL_0_ORDER
137 #define	VM_LEVEL_0_ORDER	7
138 #endif
139 #ifndef	VM_LEVEL_1_ORDER
140 #define	VM_LEVEL_1_ORDER	4
141 #endif
142 #else
143 #error Unsupported page size
144 #endif
145 
146 /**
147  * Address space layout.
148  *
149  * ARMv8 implements up to a 48 bit virtual address space. The address space is
150  * split into 2 regions at each end of the 64 bit address space, with an
151  * out of range "hole" in the middle.
152  *
153  * We use the full 48 bits for each region, however the kernel may only use
154  * a limited range within this space.
155  *
156  * Upper region:    0xffffffffffffffff  Top of virtual memory
157  *
158  *                  0xfffffeffffffffff  End of DMAP
159  *                  0xffffa00000000000  Start of DMAP
160  *
161  *                  0xffff027fffffffff  End of KMSAN origin map
162  *                  0xffff020000000000  Start of KMSAN origin map
163  *
164  *                  0xffff017fffffffff  End of KMSAN shadow map
165  *                  0xffff010000000000  Start of KMSAN shadow map
166  *
167  *                  0xffff009fffffffff  End of KASAN shadow map
168  *                  0xffff008000000000  Start of KASAN shadow map
169  *
170  *                  0xffff007fffffffff  End of KVA
171  *                  0xffff000000000000  Kernel base address & start of KVA
172  *
173  * Hole:            0xfffeffffffffffff
174  *                  0x0001000000000000
175  *
176  * Lower region:    0x0000ffffffffffff End of user address space
177  *                  0x0000000000000000 Start of user address space
178  *
179  * We use the upper region for the kernel, and the lower region for userland.
180  *
181  * We define some interesting address constants:
182  *
183  * VM_MIN_ADDRESS and VM_MAX_ADDRESS define the start and end of the entire
184  * 64 bit address space, mostly just for convenience.
185  *
186  * VM_MIN_KERNEL_ADDRESS and VM_MAX_KERNEL_ADDRESS define the start and end of
187  * mappable kernel virtual address space.
188  *
189  * VM_MIN_USER_ADDRESS and VM_MAX_USER_ADDRESS define the start and end of the
190  * user address space.
191  */
192 #define	VM_MIN_ADDRESS		(0x0000000000000000UL)
193 #define	VM_MAX_ADDRESS		(0xffffffffffffffffUL)
194 
195 /* 512 GiB of kernel addresses */
196 #define	VM_MIN_KERNEL_ADDRESS	(0xffff000000000000UL)
197 #define	VM_MAX_KERNEL_ADDRESS	(0xffff008000000000UL)
198 
199 /* 128 GiB KASAN shadow map */
200 #define	KASAN_MIN_ADDRESS	(0xffff008000000000UL)
201 #define	KASAN_MAX_ADDRESS	(0xffff00a000000000UL)
202 
203 /* 512GiB KMSAN shadow map */
204 #define	KMSAN_SHAD_MIN_ADDRESS	(0xffff010000000000UL)
205 #define	KMSAN_SHAD_MAX_ADDRESS	(0xffff018000000000UL)
206 
207 /* 512GiB KMSAN origin map */
208 #define	KMSAN_ORIG_MIN_ADDRESS	(0xffff020000000000UL)
209 #define	KMSAN_ORIG_MAX_ADDRESS	(0xffff028000000000UL)
210 
211 /* The address bits that hold a pointer authentication code */
212 #define	PAC_ADDR_MASK		(0xff7f000000000000UL)
213 
214 /* If true addr is in the kernel address space */
215 #define	ADDR_IS_KERNEL(addr)	(((addr) & (1ul << 55)) == (1ul << 55))
216 /* If true addr is in its canonical form (i.e. no TBI, PAC, etc.) */
217 #define	ADDR_IS_CANONICAL(addr)	\
218     (((addr) & 0xffff000000000000UL) == 0 || \
219      ((addr) & 0xffff000000000000UL) == 0xffff000000000000UL)
220 #define	ADDR_MAKE_CANONICAL(addr) ({			\
221 	__typeof(addr) _tmp_addr = (addr);		\
222 							\
223 	_tmp_addr &= ~0xffff000000000000UL;		\
224 	if (ADDR_IS_KERNEL(addr))			\
225 		_tmp_addr |= 0xffff000000000000UL;	\
226 							\
227 	_tmp_addr;					\
228 })
229 
230 /* 95 TiB maximum for the direct map region */
231 #define	DMAP_MIN_ADDRESS	(0xffffa00000000000UL)
232 #define	DMAP_MAX_ADDRESS	(0xffffff0000000000UL)
233 
234 #define	DMAP_MIN_PHYSADDR	(dmap_phys_base)
235 #define	DMAP_MAX_PHYSADDR	(dmap_phys_max)
236 
237 /*
238  * Checks to see if a physical address is in the DMAP range.
239  * - PHYS_IN_DMAP_RANGE will return true that may be within the DMAP range
240  *   but not accessible through the DMAP, e.g. device memory between two
241  *   DMAP physical address regions.
242  * - PHYS_IN_DMAP will check if DMAP address is mapped before returning true.
243  *
244  * PHYS_IN_DMAP_RANGE should only be used when a check on the address is
245  * performed, e.g. by checking the physical address is within phys_avail,
246  * or checking the virtual address is mapped.
247  */
248 #define	PHYS_IN_DMAP_RANGE(pa)	((pa) >= DMAP_MIN_PHYSADDR && \
249     (pa) < DMAP_MAX_PHYSADDR)
250 #define	PHYS_IN_DMAP(pa)	(PHYS_IN_DMAP_RANGE(pa) && \
251     pmap_klookup(PHYS_TO_DMAP(pa), NULL))
252 /* True if va is in the dmap range */
253 #define	VIRT_IN_DMAP(va)	((va) >= DMAP_MIN_ADDRESS && \
254     (va) < (dmap_max_addr))
255 
256 #define	PMAP_HAS_DMAP	1
257 #define	PHYS_TO_DMAP(pa)						\
258 ({									\
259 	KASSERT(PHYS_IN_DMAP_RANGE(pa),					\
260 	    ("%s: PA out of range, PA: 0x%lx", __func__,		\
261 	    (vm_paddr_t)(pa)));						\
262 	((pa) - dmap_phys_base) + DMAP_MIN_ADDRESS;			\
263 })
264 
265 #define	DMAP_TO_PHYS(va)						\
266 ({									\
267 	KASSERT(VIRT_IN_DMAP(va),					\
268 	    ("%s: VA out of range, VA: 0x%lx", __func__,		\
269 	    (vm_offset_t)(va)));					\
270 	((va) - DMAP_MIN_ADDRESS) + dmap_phys_base;			\
271 })
272 
273 #define	VM_MIN_USER_ADDRESS	(0x0000000000000000UL)
274 #define	VM_MAX_USER_ADDRESS	(0x0001000000000000UL)
275 
276 #define	VM_MINUSER_ADDRESS	(VM_MIN_USER_ADDRESS)
277 #define	VM_MAXUSER_ADDRESS	(VM_MAX_USER_ADDRESS)
278 
279 #define	KERNBASE		(VM_MIN_KERNEL_ADDRESS)
280 #define	SHAREDPAGE		(VM_MAXUSER_ADDRESS - PAGE_SIZE)
281 #define	USRSTACK		SHAREDPAGE
282 
283 /*
284  * How many physical pages per kmem arena virtual page.
285  */
286 #ifndef VM_KMEM_SIZE_SCALE
287 #define	VM_KMEM_SIZE_SCALE	(1)
288 #endif
289 
290 /*
291  * Optional ceiling (in bytes) on the size of the kmem arena: 60% of the
292  * kernel map.
293  */
294 #ifndef VM_KMEM_SIZE_MAX
295 #define	VM_KMEM_SIZE_MAX	((VM_MAX_KERNEL_ADDRESS - \
296     VM_MIN_KERNEL_ADDRESS + 1) * 3 / 5)
297 #endif
298 
299 /*
300  * Initial pagein size of beginning of executable file.
301  */
302 #ifndef	VM_INITIAL_PAGEIN
303 #define	VM_INITIAL_PAGEIN	16
304 #endif
305 
306 #if !defined(KASAN) && !defined(KMSAN)
307 #define UMA_USE_DMAP
308 #endif
309 
310 #ifndef LOCORE
311 
312 extern vm_paddr_t dmap_phys_base;
313 extern vm_paddr_t dmap_phys_max;
314 extern vm_offset_t dmap_max_addr;
315 
316 #endif
317 
318 #define	ZERO_REGION_SIZE	(64 * 1024)	/* 64KB */
319 
320 #define	DEVMAP_MAX_VADDR	VM_MAX_KERNEL_ADDRESS
321 
322 /*
323  * The pmap can create non-transparent large page mappings.
324  */
325 #define	PMAP_HAS_LARGEPAGES	1
326 
327 /*
328  * Need a page dump array for minidump.
329  */
330 #define MINIDUMP_PAGE_TRACKING	1
331 #define MINIDUMP_STARTUP_PAGE_TRACKING 1
332 
333 #endif /* !_MACHINE_VMPARAM_H_ */
334 
335 #endif /* !__arm__ */
336