xref: /freebsd/sys/arm64/include/vmparam.h (revision 9cbf1de7e34a6fced041388fad5d9180cb7705fe)
1 /*-
2  * Copyright (c) 1990 The Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * William Jolitz.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *	from: FreeBSD: src/sys/i386/include/vmparam.h,v 1.33 2000/03/30
34  */
35 
36 #ifdef __arm__
37 #include <arm/vmparam.h>
38 #else /* !__arm__ */
39 
40 #ifndef	_MACHINE_VMPARAM_H_
41 #define	_MACHINE_VMPARAM_H_
42 
43 /*
44  * Virtual memory related constants, all in bytes
45  */
46 #ifndef MAXTSIZ
47 #define	MAXTSIZ		(1*1024*1024*1024)	/* max text size */
48 #endif
49 #ifndef DFLDSIZ
50 #define	DFLDSIZ		(128*1024*1024)		/* initial data size limit */
51 #endif
52 #ifndef MAXDSIZ
53 #define	MAXDSIZ		(1*1024*1024*1024)	/* max data size */
54 #endif
55 #ifndef DFLSSIZ
56 #define	DFLSSIZ		(128*1024*1024)		/* initial stack size limit */
57 #endif
58 #ifndef MAXSSIZ
59 #define	MAXSSIZ		(1*1024*1024*1024)	/* max stack size */
60 #endif
61 #ifndef SGROWSIZ
62 #define	SGROWSIZ	(128*1024)		/* amount to grow stack */
63 #endif
64 
65 /*
66  * The physical address space is sparsely populated.
67  */
68 #define	VM_PHYSSEG_SPARSE
69 
70 /*
71  * The number of PHYSSEG entries.
72  */
73 #define	VM_PHYSSEG_MAX		64
74 
75 /*
76  * Create three free page pools: VM_FREEPOOL_DEFAULT is the default pool from
77  * which physical pages are allocated and VM_FREEPOOL_DIRECT is the pool from
78  * which physical pages for page tables and small UMA objects are allocated.
79  * VM_FREEPOOL_LAZYINIT is a special-purpose pool that is populated only during
80  * boot and is used to implement deferred initialization of page structures.
81  */
82 #define	VM_NFREEPOOL		3
83 #define	VM_FREEPOOL_LAZYINIT	0
84 #define	VM_FREEPOOL_DEFAULT	1
85 #define	VM_FREEPOOL_DIRECT	2
86 
87 /*
88  * Create two free page lists: VM_FREELIST_DMA32 is for physical pages that have
89  * physical addresses below 4G, and VM_FREELIST_DEFAULT is for all other
90  * physical pages.
91  */
92 #define	VM_NFREELIST		2
93 #define	VM_FREELIST_DEFAULT	0
94 #define	VM_FREELIST_DMA32	1
95 
96 /*
97  * When PAGE_SIZE is 4KB, an allocation size of 16MB is supported in order
98  * to optimize the use of the direct map by UMA.  Specifically, a 64-byte
99  * cache line contains at most 8 L2 BLOCK entries, collectively mapping 16MB
100  * of physical memory.  By reducing the number of distinct 16MB "pages" that
101  * are used by UMA, the physical memory allocator reduces the likelihood of
102  * both 2MB page TLB misses and cache misses during the page table walk when
103  * a 2MB page TLB miss does occur.
104  *
105  * When PAGE_SIZE is 16KB, an allocation size of 32MB is supported.  This
106  * size is used by level 0 reservations and L2 BLOCK mappings.
107  */
108 #if PAGE_SIZE == PAGE_SIZE_4K
109 #define	VM_NFREEORDER		13
110 #elif PAGE_SIZE == PAGE_SIZE_16K
111 #define	VM_NFREEORDER		12
112 #else
113 #error Unsupported page size
114 #endif
115 
116 /*
117  * Enable superpage reservations: 1 level.
118  */
119 #ifndef	VM_NRESERVLEVEL
120 #define	VM_NRESERVLEVEL		1
121 #endif
122 
123 /*
124  * Level 0 reservations consist of 512 pages when PAGE_SIZE is 4KB, and
125  * 2048 pages when PAGE_SIZE is 16KB.
126  */
127 #ifndef	VM_LEVEL_0_ORDER
128 #if PAGE_SIZE == PAGE_SIZE_4K
129 #define	VM_LEVEL_0_ORDER	9
130 #elif PAGE_SIZE == PAGE_SIZE_16K
131 #define	VM_LEVEL_0_ORDER	11
132 #else
133 #error Unsupported page size
134 #endif
135 #endif
136 
137 /**
138  * Address space layout.
139  *
140  * ARMv8 implements up to a 48 bit virtual address space. The address space is
141  * split into 2 regions at each end of the 64 bit address space, with an
142  * out of range "hole" in the middle.
143  *
144  * We use the full 48 bits for each region, however the kernel may only use
145  * a limited range within this space.
146  *
147  * Upper region:    0xffffffffffffffff  Top of virtual memory
148  *
149  *                  0xfffffeffffffffff  End of DMAP
150  *                  0xffffa00000000000  Start of DMAP
151  *
152  *                  0xffff027fffffffff  End of KMSAN origin map
153  *                  0xffff020000000000  Start of KMSAN origin map
154  *
155  *                  0xffff017fffffffff  End of KMSAN shadow map
156  *                  0xffff010000000000  Start of KMSAN shadow map
157  *
158  *                  0xffff009fffffffff  End of KASAN shadow map
159  *                  0xffff008000000000  Start of KASAN shadow map
160  *
161  *                  0xffff007fffffffff  End of KVA
162  *                  0xffff000000000000  Kernel base address & start of KVA
163  *
164  * Hole:            0xfffeffffffffffff
165  *                  0x0001000000000000
166  *
167  * Lower region:    0x0000ffffffffffff End of user address space
168  *                  0x0000000000000000 Start of user address space
169  *
170  * We use the upper region for the kernel, and the lower region for userland.
171  *
172  * We define some interesting address constants:
173  *
174  * VM_MIN_ADDRESS and VM_MAX_ADDRESS define the start and end of the entire
175  * 64 bit address space, mostly just for convenience.
176  *
177  * VM_MIN_KERNEL_ADDRESS and VM_MAX_KERNEL_ADDRESS define the start and end of
178  * mappable kernel virtual address space.
179  *
180  * VM_MIN_USER_ADDRESS and VM_MAX_USER_ADDRESS define the start and end of the
181  * user address space.
182  */
183 #define	VM_MIN_ADDRESS		(0x0000000000000000UL)
184 #define	VM_MAX_ADDRESS		(0xffffffffffffffffUL)
185 
186 /* 512 GiB of kernel addresses */
187 #define	VM_MIN_KERNEL_ADDRESS	(0xffff000000000000UL)
188 #define	VM_MAX_KERNEL_ADDRESS	(0xffff008000000000UL)
189 
190 /* 128 GiB KASAN shadow map */
191 #define	KASAN_MIN_ADDRESS	(0xffff008000000000UL)
192 #define	KASAN_MAX_ADDRESS	(0xffff00a000000000UL)
193 
194 /* 512GiB KMSAN shadow map */
195 #define	KMSAN_SHAD_MIN_ADDRESS	(0xffff010000000000UL)
196 #define	KMSAN_SHAD_MAX_ADDRESS	(0xffff018000000000UL)
197 
198 /* 512GiB KMSAN origin map */
199 #define	KMSAN_ORIG_MIN_ADDRESS	(0xffff020000000000UL)
200 #define	KMSAN_ORIG_MAX_ADDRESS	(0xffff028000000000UL)
201 
202 /* The address bits that hold a pointer authentication code */
203 #define	PAC_ADDR_MASK		(0xff7f000000000000UL)
204 
205 /* If true addr is in the kernel address space */
206 #define	ADDR_IS_KERNEL(addr)	(((addr) & (1ul << 55)) == (1ul << 55))
207 /* If true addr is in its canonical form (i.e. no TBI, PAC, etc.) */
208 #define	ADDR_IS_CANONICAL(addr)	\
209     (((addr) & 0xffff000000000000UL) == 0 || \
210      ((addr) & 0xffff000000000000UL) == 0xffff000000000000UL)
211 #define	ADDR_MAKE_CANONICAL(addr) ({			\
212 	__typeof(addr) _tmp_addr = (addr);		\
213 							\
214 	_tmp_addr &= ~0xffff000000000000UL;		\
215 	if (ADDR_IS_KERNEL(addr))			\
216 		_tmp_addr |= 0xffff000000000000UL;	\
217 							\
218 	_tmp_addr;					\
219 })
220 
221 /* 95 TiB maximum for the direct map region */
222 #define	DMAP_MIN_ADDRESS	(0xffffa00000000000UL)
223 #define	DMAP_MAX_ADDRESS	(0xffffff0000000000UL)
224 
225 #define	DMAP_MIN_PHYSADDR	(dmap_phys_base)
226 #define	DMAP_MAX_PHYSADDR	(dmap_phys_max)
227 
228 /*
229  * Checks to see if a physical address is in the DMAP range.
230  * - PHYS_IN_DMAP_RANGE will return true that may be within the DMAP range
231  *   but not accessible through the DMAP, e.g. device memory between two
232  *   DMAP physical address regions.
233  * - PHYS_IN_DMAP will check if DMAP address is mapped before returning true.
234  *
235  * PHYS_IN_DMAP_RANGE should only be used when a check on the address is
236  * performed, e.g. by checking the physical address is within phys_avail,
237  * or checking the virtual address is mapped.
238  */
239 #define	PHYS_IN_DMAP_RANGE(pa)	((pa) >= DMAP_MIN_PHYSADDR && \
240     (pa) < DMAP_MAX_PHYSADDR)
241 #define	PHYS_IN_DMAP(pa)	(PHYS_IN_DMAP_RANGE(pa) && \
242     pmap_klookup(PHYS_TO_DMAP(pa), NULL))
243 /* True if va is in the dmap range */
244 #define	VIRT_IN_DMAP(va)	((va) >= DMAP_MIN_ADDRESS && \
245     (va) < (dmap_max_addr))
246 
247 #define	PMAP_HAS_DMAP	1
248 #define	PHYS_TO_DMAP(pa)						\
249 ({									\
250 	KASSERT(PHYS_IN_DMAP_RANGE(pa),					\
251 	    ("%s: PA out of range, PA: 0x%lx", __func__,		\
252 	    (vm_paddr_t)(pa)));						\
253 	((pa) - dmap_phys_base) + DMAP_MIN_ADDRESS;			\
254 })
255 
256 #define	DMAP_TO_PHYS(va)						\
257 ({									\
258 	KASSERT(VIRT_IN_DMAP(va),					\
259 	    ("%s: VA out of range, VA: 0x%lx", __func__,		\
260 	    (vm_offset_t)(va)));					\
261 	((va) - DMAP_MIN_ADDRESS) + dmap_phys_base;			\
262 })
263 
264 #define	VM_MIN_USER_ADDRESS	(0x0000000000000000UL)
265 #define	VM_MAX_USER_ADDRESS	(0x0001000000000000UL)
266 
267 #define	VM_MINUSER_ADDRESS	(VM_MIN_USER_ADDRESS)
268 #define	VM_MAXUSER_ADDRESS	(VM_MAX_USER_ADDRESS)
269 
270 #define	KERNBASE		(VM_MIN_KERNEL_ADDRESS)
271 #define	SHAREDPAGE		(VM_MAXUSER_ADDRESS - PAGE_SIZE)
272 #define	USRSTACK		SHAREDPAGE
273 
274 /*
275  * How many physical pages per kmem arena virtual page.
276  */
277 #ifndef VM_KMEM_SIZE_SCALE
278 #define	VM_KMEM_SIZE_SCALE	(1)
279 #endif
280 
281 /*
282  * Optional ceiling (in bytes) on the size of the kmem arena: 60% of the
283  * kernel map.
284  */
285 #ifndef VM_KMEM_SIZE_MAX
286 #define	VM_KMEM_SIZE_MAX	((VM_MAX_KERNEL_ADDRESS - \
287     VM_MIN_KERNEL_ADDRESS + 1) * 3 / 5)
288 #endif
289 
290 /*
291  * Initial pagein size of beginning of executable file.
292  */
293 #ifndef	VM_INITIAL_PAGEIN
294 #define	VM_INITIAL_PAGEIN	16
295 #endif
296 
297 #if !defined(KASAN) && !defined(KMSAN)
298 #define UMA_USE_DMAP
299 #endif
300 
301 #ifndef LOCORE
302 
303 extern vm_paddr_t dmap_phys_base;
304 extern vm_paddr_t dmap_phys_max;
305 extern vm_offset_t dmap_max_addr;
306 
307 #endif
308 
309 #define	ZERO_REGION_SIZE	(64 * 1024)	/* 64KB */
310 
311 #define	DEVMAP_MAX_VADDR	VM_MAX_KERNEL_ADDRESS
312 
313 /*
314  * The pmap can create non-transparent large page mappings.
315  */
316 #define	PMAP_HAS_LARGEPAGES	1
317 
318 /*
319  * Need a page dump array for minidump.
320  */
321 #define MINIDUMP_PAGE_TRACKING	1
322 #define MINIDUMP_STARTUP_PAGE_TRACKING 1
323 
324 #endif /* !_MACHINE_VMPARAM_H_ */
325 
326 #endif /* !__arm__ */
327