xref: /freebsd/sys/arm64/include/vmparam.h (revision 8311bc5f17dec348749f763b82dfe2737bc53cd7)
1 /*-
2  * Copyright (c) 1990 The Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * William Jolitz.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *	from: FreeBSD: src/sys/i386/include/vmparam.h,v 1.33 2000/03/30
34  */
35 
36 #ifdef __arm__
37 #include <arm/vmparam.h>
38 #else /* !__arm__ */
39 
40 #ifndef	_MACHINE_VMPARAM_H_
41 #define	_MACHINE_VMPARAM_H_
42 
43 /*
44  * Virtual memory related constants, all in bytes
45  */
46 #ifndef MAXTSIZ
47 #define	MAXTSIZ		(1*1024*1024*1024)	/* max text size */
48 #endif
49 #ifndef DFLDSIZ
50 #define	DFLDSIZ		(128*1024*1024)		/* initial data size limit */
51 #endif
52 #ifndef MAXDSIZ
53 #define	MAXDSIZ		(1*1024*1024*1024)	/* max data size */
54 #endif
55 #ifndef DFLSSIZ
56 #define	DFLSSIZ		(128*1024*1024)		/* initial stack size limit */
57 #endif
58 #ifndef MAXSSIZ
59 #define	MAXSSIZ		(1*1024*1024*1024)	/* max stack size */
60 #endif
61 #ifndef SGROWSIZ
62 #define	SGROWSIZ	(128*1024)		/* amount to grow stack */
63 #endif
64 
65 /*
66  * The physical address space is sparsely populated.
67  */
68 #define	VM_PHYSSEG_SPARSE
69 
70 /*
71  * The number of PHYSSEG entries.
72  */
73 #define	VM_PHYSSEG_MAX		64
74 
75 /*
76  * Create two free page pools: VM_FREEPOOL_DEFAULT is the default pool
77  * from which physical pages are allocated and VM_FREEPOOL_DIRECT is
78  * the pool from which physical pages for small UMA objects are
79  * allocated.
80  */
81 #define	VM_NFREEPOOL		2
82 #define	VM_FREEPOOL_DEFAULT	0
83 #define	VM_FREEPOOL_DIRECT	1
84 
85 /*
86  * Create two free page lists: VM_FREELIST_DMA32 is for physical pages that have
87  * physical addresses below 4G, and VM_FREELIST_DEFAULT is for all other
88  * physical pages.
89  */
90 #define	VM_NFREELIST		2
91 #define	VM_FREELIST_DEFAULT	0
92 #define	VM_FREELIST_DMA32	1
93 
94 /*
95  * When PAGE_SIZE is 4KB, an allocation size of 16MB is supported in order
96  * to optimize the use of the direct map by UMA.  Specifically, a 64-byte
97  * cache line contains at most 8 L2 BLOCK entries, collectively mapping 16MB
98  * of physical memory.  By reducing the number of distinct 16MB "pages" that
99  * are used by UMA, the physical memory allocator reduces the likelihood of
100  * both 2MB page TLB misses and cache misses during the page table walk when
101  * a 2MB page TLB miss does occur.
102  *
103  * When PAGE_SIZE is 16KB, an allocation size of 32MB is supported.  This
104  * size is used by level 0 reservations and L2 BLOCK mappings.
105  */
106 #if PAGE_SIZE == PAGE_SIZE_4K
107 #define	VM_NFREEORDER		13
108 #elif PAGE_SIZE == PAGE_SIZE_16K
109 #define	VM_NFREEORDER		12
110 #else
111 #error Unsupported page size
112 #endif
113 
114 /*
115  * Enable superpage reservations: 1 level.
116  */
117 #ifndef	VM_NRESERVLEVEL
118 #define	VM_NRESERVLEVEL		1
119 #endif
120 
121 /*
122  * Level 0 reservations consist of 512 pages when PAGE_SIZE is 4KB, and
123  * 2048 pages when PAGE_SIZE is 16KB.
124  */
125 #ifndef	VM_LEVEL_0_ORDER
126 #if PAGE_SIZE == PAGE_SIZE_4K
127 #define	VM_LEVEL_0_ORDER	9
128 #elif PAGE_SIZE == PAGE_SIZE_16K
129 #define	VM_LEVEL_0_ORDER	11
130 #else
131 #error Unsupported page size
132 #endif
133 #endif
134 
135 /**
136  * Address space layout.
137  *
138  * ARMv8 implements up to a 48 bit virtual address space. The address space is
139  * split into 2 regions at each end of the 64 bit address space, with an
140  * out of range "hole" in the middle.
141  *
142  * We use the full 48 bits for each region, however the kernel may only use
143  * a limited range within this space.
144  *
145  * Upper region:    0xffffffffffffffff  Top of virtual memory
146  *
147  *                  0xfffffeffffffffff  End of DMAP
148  *                  0xffffa00000000000  Start of DMAP
149  *
150  *                  0xffff027fffffffff  End of KMSAN origin map
151  *                  0xffff020000000000  Start of KMSAN origin map
152  *
153  *                  0xffff017fffffffff  End of KMSAN shadow map
154  *                  0xffff010000000000  Start of KMSAN shadow map
155  *
156  *                  0xffff009fffffffff  End of KASAN shadow map
157  *                  0xffff008000000000  Start of KASAN shadow map
158  *
159  *                  0xffff007fffffffff  End of KVA
160  *                  0xffff000000000000  Kernel base address & start of KVA
161  *
162  * Hole:            0xfffeffffffffffff
163  *                  0x0001000000000000
164  *
165  * Lower region:    0x0000ffffffffffff End of user address space
166  *                  0x0000000000000000 Start of user address space
167  *
168  * We use the upper region for the kernel, and the lower region for userland.
169  *
170  * We define some interesting address constants:
171  *
172  * VM_MIN_ADDRESS and VM_MAX_ADDRESS define the start and end of the entire
173  * 64 bit address space, mostly just for convenience.
174  *
175  * VM_MIN_KERNEL_ADDRESS and VM_MAX_KERNEL_ADDRESS define the start and end of
176  * mappable kernel virtual address space.
177  *
178  * VM_MIN_USER_ADDRESS and VM_MAX_USER_ADDRESS define the start and end of the
179  * user address space.
180  */
181 #define	VM_MIN_ADDRESS		(0x0000000000000000UL)
182 #define	VM_MAX_ADDRESS		(0xffffffffffffffffUL)
183 
184 /* 512 GiB of kernel addresses */
185 #define	VM_MIN_KERNEL_ADDRESS	(0xffff000000000000UL)
186 #define	VM_MAX_KERNEL_ADDRESS	(0xffff008000000000UL)
187 
188 /* 128 GiB KASAN shadow map */
189 #define	KASAN_MIN_ADDRESS	(0xffff008000000000UL)
190 #define	KASAN_MAX_ADDRESS	(0xffff00a000000000UL)
191 
192 /* 512GiB KMSAN shadow map */
193 #define	KMSAN_SHAD_MIN_ADDRESS	(0xffff010000000000UL)
194 #define	KMSAN_SHAD_MAX_ADDRESS	(0xffff018000000000UL)
195 
196 /* 512GiB KMSAN origin map */
197 #define	KMSAN_ORIG_MIN_ADDRESS	(0xffff020000000000UL)
198 #define	KMSAN_ORIG_MAX_ADDRESS	(0xffff028000000000UL)
199 
200 /* The address bits that hold a pointer authentication code */
201 #define	PAC_ADDR_MASK		(0xff7f000000000000UL)
202 
203 /* If true addr is in the kernel address space */
204 #define	ADDR_IS_KERNEL(addr)	(((addr) & (1ul << 55)) == (1ul << 55))
205 /* If true addr is in its canonical form (i.e. no TBI, PAC, etc.) */
206 #define	ADDR_IS_CANONICAL(addr)	\
207     (((addr) & 0xffff000000000000UL) == 0 || \
208      ((addr) & 0xffff000000000000UL) == 0xffff000000000000UL)
209 #define	ADDR_MAKE_CANONICAL(addr) ({			\
210 	__typeof(addr) _tmp_addr = (addr);		\
211 							\
212 	_tmp_addr &= ~0xffff000000000000UL;		\
213 	if (ADDR_IS_KERNEL(addr))			\
214 		_tmp_addr |= 0xffff000000000000UL;	\
215 							\
216 	_tmp_addr;					\
217 })
218 
219 /* 95 TiB maximum for the direct map region */
220 #define	DMAP_MIN_ADDRESS	(0xffffa00000000000UL)
221 #define	DMAP_MAX_ADDRESS	(0xffffff0000000000UL)
222 
223 #define	DMAP_MIN_PHYSADDR	(dmap_phys_base)
224 #define	DMAP_MAX_PHYSADDR	(dmap_phys_max)
225 
226 /* True if pa is in the dmap range */
227 #define	PHYS_IN_DMAP(pa)	((pa) >= DMAP_MIN_PHYSADDR && \
228     (pa) < DMAP_MAX_PHYSADDR)
229 /* True if va is in the dmap range */
230 #define	VIRT_IN_DMAP(va)	((va) >= DMAP_MIN_ADDRESS && \
231     (va) < (dmap_max_addr))
232 
233 #define	PMAP_HAS_DMAP	1
234 #define	PHYS_TO_DMAP(pa)						\
235 ({									\
236 	KASSERT(PHYS_IN_DMAP(pa),					\
237 	    ("%s: PA out of range, PA: 0x%lx", __func__,		\
238 	    (vm_paddr_t)(pa)));						\
239 	((pa) - dmap_phys_base) + DMAP_MIN_ADDRESS;			\
240 })
241 
242 #define	DMAP_TO_PHYS(va)						\
243 ({									\
244 	KASSERT(VIRT_IN_DMAP(va),					\
245 	    ("%s: VA out of range, VA: 0x%lx", __func__,		\
246 	    (vm_offset_t)(va)));					\
247 	((va) - DMAP_MIN_ADDRESS) + dmap_phys_base;			\
248 })
249 
250 #define	VM_MIN_USER_ADDRESS	(0x0000000000000000UL)
251 #define	VM_MAX_USER_ADDRESS	(0x0001000000000000UL)
252 
253 #define	VM_MINUSER_ADDRESS	(VM_MIN_USER_ADDRESS)
254 #define	VM_MAXUSER_ADDRESS	(VM_MAX_USER_ADDRESS)
255 
256 #define	KERNBASE		(VM_MIN_KERNEL_ADDRESS)
257 #define	SHAREDPAGE		(VM_MAXUSER_ADDRESS - PAGE_SIZE)
258 #define	USRSTACK		SHAREDPAGE
259 
260 /*
261  * How many physical pages per kmem arena virtual page.
262  */
263 #ifndef VM_KMEM_SIZE_SCALE
264 #define	VM_KMEM_SIZE_SCALE	(1)
265 #endif
266 
267 /*
268  * Optional ceiling (in bytes) on the size of the kmem arena: 60% of the
269  * kernel map.
270  */
271 #ifndef VM_KMEM_SIZE_MAX
272 #define	VM_KMEM_SIZE_MAX	((VM_MAX_KERNEL_ADDRESS - \
273     VM_MIN_KERNEL_ADDRESS + 1) * 3 / 5)
274 #endif
275 
276 /*
277  * Initial pagein size of beginning of executable file.
278  */
279 #ifndef	VM_INITIAL_PAGEIN
280 #define	VM_INITIAL_PAGEIN	16
281 #endif
282 
283 #if !defined(KASAN) && !defined(KMSAN)
284 #define	UMA_MD_SMALL_ALLOC
285 #endif
286 
287 #ifndef LOCORE
288 
289 extern vm_paddr_t dmap_phys_base;
290 extern vm_paddr_t dmap_phys_max;
291 extern vm_offset_t dmap_max_addr;
292 extern vm_offset_t vm_max_kernel_address;
293 
294 #endif
295 
296 #define	ZERO_REGION_SIZE	(64 * 1024)	/* 64KB */
297 
298 #define	DEVMAP_MAX_VADDR	VM_MAX_KERNEL_ADDRESS
299 
300 /*
301  * The pmap can create non-transparent large page mappings.
302  */
303 #define	PMAP_HAS_LARGEPAGES	1
304 
305 /*
306  * Need a page dump array for minidump.
307  */
308 #define MINIDUMP_PAGE_TRACKING	1
309 
310 #endif /* !_MACHINE_VMPARAM_H_ */
311 
312 #endif /* !__arm__ */
313