xref: /linux/arch/arm64/include/asm/memory.h (revision 86287543715ac2a6d92d561cc105d79306511457)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/memory.h
4  *
5  * Copyright (C) 2000-2002 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  *
8  * Note: this file should not be included by non-asm/.h files
9  */
10 #ifndef __ASM_MEMORY_H
11 #define __ASM_MEMORY_H
12 
13 #include <linux/compiler.h>
14 #include <linux/const.h>
15 #include <linux/sizes.h>
16 #include <linux/types.h>
17 #include <asm/bug.h>
18 #include <asm/page-def.h>
19 
20 /*
21  * Size of the PCI I/O space. This must remain a power of two so that
22  * IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses.
23  */
24 #define PCI_IO_SIZE		SZ_16M
25 
26 /*
27  * VMEMMAP_SIZE - allows the whole linear region to be covered by
28  *                a struct page array
29  *
30  * If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE
31  * needs to cover the memory region from the beginning of the 52-bit
32  * PAGE_OFFSET all the way to PAGE_END for 48-bit. This allows us to
33  * keep a constant PAGE_OFFSET and "fallback" to using the higher end
34  * of the VMEMMAP where 52-bit support is not available in hardware.
35  */
36 #define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) \
37 			>> (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT))
38 
39 /*
40  * PAGE_OFFSET - the virtual address of the start of the linear map, at the
41  *               start of the TTBR1 address space.
42  * PAGE_END - the end of the linear map, where all other kernel mappings begin.
43  * KIMAGE_VADDR - the virtual address of the start of the kernel image.
44  * VA_BITS - the maximum number of bits for virtual addresses.
45  */
46 #define VA_BITS			(CONFIG_ARM64_VA_BITS)
47 #define _PAGE_OFFSET(va)	(-(UL(1) << (va)))
48 #define PAGE_OFFSET		(_PAGE_OFFSET(VA_BITS))
49 #define KIMAGE_VADDR		(MODULES_END)
50 #define BPF_JIT_REGION_START	(KASAN_SHADOW_END)
51 #define BPF_JIT_REGION_SIZE	(SZ_128M)
52 #define BPF_JIT_REGION_END	(BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
53 #define MODULES_END		(MODULES_VADDR + MODULES_VSIZE)
54 #define MODULES_VADDR		(BPF_JIT_REGION_END)
55 #define MODULES_VSIZE		(SZ_128M)
56 #define VMEMMAP_START		(-VMEMMAP_SIZE - SZ_2M)
57 #define VMEMMAP_END		(VMEMMAP_START + VMEMMAP_SIZE)
58 #define PCI_IO_END		(VMEMMAP_START - SZ_2M)
59 #define PCI_IO_START		(PCI_IO_END - PCI_IO_SIZE)
60 #define FIXADDR_TOP		(PCI_IO_START - SZ_2M)
61 
62 #if VA_BITS > 48
63 #define VA_BITS_MIN		(48)
64 #else
65 #define VA_BITS_MIN		(VA_BITS)
66 #endif
67 
68 #define _PAGE_END(va)		(-(UL(1) << ((va) - 1)))
69 
70 #define KERNEL_START		_text
71 #define KERNEL_END		_end
72 
73 /*
74  * Generic and tag-based KASAN require 1/8th and 1/16th of the kernel virtual
75  * address space for the shadow region respectively. They can bloat the stack
76  * significantly, so double the (minimum) stack size when they are in use.
77  */
78 #ifdef CONFIG_KASAN
79 #define KASAN_SHADOW_OFFSET	_AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
80 #define KASAN_SHADOW_END	((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \
81 					+ KASAN_SHADOW_OFFSET)
82 #define KASAN_THREAD_SHIFT	1
83 #else
84 #define KASAN_THREAD_SHIFT	0
85 #define KASAN_SHADOW_END	(_PAGE_END(VA_BITS_MIN))
86 #endif /* CONFIG_KASAN */
87 
88 #define MIN_THREAD_SHIFT	(14 + KASAN_THREAD_SHIFT)
89 
90 /*
91  * VMAP'd stacks are allocated at page granularity, so we must ensure that such
92  * stacks are a multiple of page size.
93  */
94 #if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT)
95 #define THREAD_SHIFT		PAGE_SHIFT
96 #else
97 #define THREAD_SHIFT		MIN_THREAD_SHIFT
98 #endif
99 
100 #if THREAD_SHIFT >= PAGE_SHIFT
101 #define THREAD_SIZE_ORDER	(THREAD_SHIFT - PAGE_SHIFT)
102 #endif
103 
104 #define THREAD_SIZE		(UL(1) << THREAD_SHIFT)
105 
106 /*
107  * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by
108  * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
109  * assembly.
110  */
111 #ifdef CONFIG_VMAP_STACK
112 #define THREAD_ALIGN		(2 * THREAD_SIZE)
113 #else
114 #define THREAD_ALIGN		THREAD_SIZE
115 #endif
116 
117 #define IRQ_STACK_SIZE		THREAD_SIZE
118 
119 #define OVERFLOW_STACK_SIZE	SZ_4K
120 
121 /*
122  * Alignment of kernel segments (e.g. .text, .data).
123  */
124 #if defined(CONFIG_DEBUG_ALIGN_RODATA)
125 /*
126  *  4 KB granule:   1 level 2 entry
127  * 16 KB granule: 128 level 3 entries, with contiguous bit
128  * 64 KB granule:  32 level 3 entries, with contiguous bit
129  */
130 #define SEGMENT_ALIGN		SZ_2M
131 #else
132 /*
133  *  4 KB granule:  16 level 3 entries, with contiguous bit
134  * 16 KB granule:   4 level 3 entries, without contiguous bit
135  * 64 KB granule:   1 level 3 entry
136  */
137 #define SEGMENT_ALIGN		SZ_64K
138 #endif
139 
140 /*
141  * Memory types available.
142  */
143 #define MT_DEVICE_nGnRnE	0
144 #define MT_DEVICE_nGnRE		1
145 #define MT_DEVICE_GRE		2
146 #define MT_NORMAL_NC		3
147 #define MT_NORMAL		4
148 #define MT_NORMAL_WT		5
149 
150 /*
151  * Memory types for Stage-2 translation
152  */
153 #define MT_S2_NORMAL		0xf
154 #define MT_S2_DEVICE_nGnRE	0x1
155 
156 /*
157  * Memory types for Stage-2 translation when ID_AA64MMFR2_EL1.FWB is 0001
158  * Stage-2 enforces Normal-WB and Device-nGnRE
159  */
160 #define MT_S2_FWB_NORMAL	6
161 #define MT_S2_FWB_DEVICE_nGnRE	1
162 
163 #ifdef CONFIG_ARM64_4K_PAGES
164 #define IOREMAP_MAX_ORDER	(PUD_SHIFT)
165 #else
166 #define IOREMAP_MAX_ORDER	(PMD_SHIFT)
167 #endif
168 
169 #ifndef __ASSEMBLY__
170 extern u64			vabits_actual;
171 #define PAGE_END		(_PAGE_END(vabits_actual))
172 
173 #include <linux/bitops.h>
174 #include <linux/mmdebug.h>
175 
176 extern s64			physvirt_offset;
177 extern s64			memstart_addr;
178 /* PHYS_OFFSET - the physical address of the start of memory. */
179 #define PHYS_OFFSET		({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
180 
181 /* the virtual base of the kernel image (minus TEXT_OFFSET) */
182 extern u64			kimage_vaddr;
183 
184 /* the offset between the kernel virtual and physical mappings */
185 extern u64			kimage_voffset;
186 
187 static inline unsigned long kaslr_offset(void)
188 {
189 	return kimage_vaddr - KIMAGE_VADDR;
190 }
191 
192 /*
193  * Allow all memory at the discovery stage. We will clip it later.
194  */
195 #define MIN_MEMBLOCK_ADDR	0
196 #define MAX_MEMBLOCK_ADDR	U64_MAX
197 
198 /*
199  * PFNs are used to describe any physical page; this means
200  * PFN 0 == physical address 0.
201  *
202  * This is the PFN of the first RAM page in the kernel
203  * direct-mapped view.  We assume this is the first page
204  * of RAM in the mem_map as well.
205  */
206 #define PHYS_PFN_OFFSET	(PHYS_OFFSET >> PAGE_SHIFT)
207 
208 /*
209  * When dealing with data aborts, watchpoints, or instruction traps we may end
210  * up with a tagged userland pointer. Clear the tag to get a sane pointer to
211  * pass on to access_ok(), for instance.
212  */
213 #define __untagged_addr(addr)	\
214 	((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
215 
216 #define untagged_addr(addr)	({					\
217 	u64 __addr = (__force u64)(addr);					\
218 	__addr &= __untagged_addr(__addr);				\
219 	(__force __typeof__(addr))__addr;				\
220 })
221 
222 #ifdef CONFIG_KASAN_SW_TAGS
223 #define __tag_shifted(tag)	((u64)(tag) << 56)
224 #define __tag_reset(addr)	__untagged_addr(addr)
225 #define __tag_get(addr)		(__u8)((u64)(addr) >> 56)
226 #else
227 #define __tag_shifted(tag)	0UL
228 #define __tag_reset(addr)	(addr)
229 #define __tag_get(addr)		0
230 #endif /* CONFIG_KASAN_SW_TAGS */
231 
232 static inline const void *__tag_set(const void *addr, u8 tag)
233 {
234 	u64 __addr = (u64)addr & ~__tag_shifted(0xff);
235 	return (const void *)(__addr | __tag_shifted(tag));
236 }
237 
238 /*
239  * Physical vs virtual RAM address space conversion.  These are
240  * private definitions which should NOT be used outside memory.h
241  * files.  Use virt_to_phys/phys_to_virt/__pa/__va instead.
242  */
243 
244 
245 /*
246  * The linear kernel range starts at the bottom of the virtual address
247  * space. Testing the top bit for the start of the region is a
248  * sufficient check and avoids having to worry about the tag.
249  */
250 #define __is_lm_address(addr)	(!(((u64)addr) & BIT(vabits_actual - 1)))
251 
252 #define __lm_to_phys(addr)	(((addr) + physvirt_offset))
253 #define __kimg_to_phys(addr)	((addr) - kimage_voffset)
254 
255 #define __virt_to_phys_nodebug(x) ({					\
256 	phys_addr_t __x = (phys_addr_t)(__tag_reset(x));		\
257 	__is_lm_address(__x) ? __lm_to_phys(__x) : __kimg_to_phys(__x);	\
258 })
259 
260 #define __pa_symbol_nodebug(x)	__kimg_to_phys((phys_addr_t)(x))
261 
262 #ifdef CONFIG_DEBUG_VIRTUAL
263 extern phys_addr_t __virt_to_phys(unsigned long x);
264 extern phys_addr_t __phys_addr_symbol(unsigned long x);
265 #else
266 #define __virt_to_phys(x)	__virt_to_phys_nodebug(x)
267 #define __phys_addr_symbol(x)	__pa_symbol_nodebug(x)
268 #endif /* CONFIG_DEBUG_VIRTUAL */
269 
270 #define __phys_to_virt(x)	((unsigned long)((x) - physvirt_offset))
271 #define __phys_to_kimg(x)	((unsigned long)((x) + kimage_voffset))
272 
273 /*
274  * Convert a page to/from a physical address
275  */
276 #define page_to_phys(page)	(__pfn_to_phys(page_to_pfn(page)))
277 #define phys_to_page(phys)	(pfn_to_page(__phys_to_pfn(phys)))
278 
279 /*
280  * Note: Drivers should NOT use these.  They are the wrong
281  * translation for translating DMA addresses.  Use the driver
282  * DMA support - see dma-mapping.h.
283  */
284 #define virt_to_phys virt_to_phys
285 static inline phys_addr_t virt_to_phys(const volatile void *x)
286 {
287 	return __virt_to_phys((unsigned long)(x));
288 }
289 
290 #define phys_to_virt phys_to_virt
291 static inline void *phys_to_virt(phys_addr_t x)
292 {
293 	return (void *)(__phys_to_virt(x));
294 }
295 
296 /*
297  * Drivers should NOT use these either.
298  */
299 #define __pa(x)			__virt_to_phys((unsigned long)(x))
300 #define __pa_symbol(x)		__phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
301 #define __pa_nodebug(x)		__virt_to_phys_nodebug((unsigned long)(x))
302 #define __va(x)			((void *)__phys_to_virt((phys_addr_t)(x)))
303 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
304 #define virt_to_pfn(x)		__phys_to_pfn(__virt_to_phys((unsigned long)(x)))
305 #define sym_to_pfn(x)		__phys_to_pfn(__pa_symbol(x))
306 
307 /*
308  *  virt_to_page(x)	convert a _valid_ virtual address to struct page *
309  *  virt_addr_valid(x)	indicates whether a virtual address is valid
310  */
311 #define ARCH_PFN_OFFSET		((unsigned long)PHYS_PFN_OFFSET)
312 
313 #if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
314 #define virt_to_page(x)		pfn_to_page(virt_to_pfn(x))
315 #else
316 #define page_to_virt(x)	({						\
317 	__typeof__(x) __page = x;					\
318 	u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\
319 	u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE);			\
320 	(void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
321 })
322 
323 #define virt_to_page(x)	({						\
324 	u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE;	\
325 	u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page));	\
326 	(struct page *)__addr;						\
327 })
328 #endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
329 
330 #define virt_addr_valid(addr)	({					\
331 	__typeof__(addr) __addr = addr;					\
332 	__is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr));	\
333 })
334 
335 #endif /* !ASSEMBLY */
336 
337 /*
338  * Given that the GIC architecture permits ITS implementations that can only be
339  * configured with a LPI table address once, GICv3 systems with many CPUs may
340  * end up reserving a lot of different regions after a kexec for their LPI
341  * tables (one per CPU), as we are forced to reuse the same memory after kexec
342  * (and thus reserve it persistently with EFI beforehand)
343  */
344 #if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS)
345 # define INIT_MEMBLOCK_RESERVED_REGIONS	(INIT_MEMBLOCK_REGIONS + NR_CPUS + 1)
346 #endif
347 
348 #include <asm-generic/memory_model.h>
349 
350 #endif /* __ASM_MEMORY_H */
351