1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Based on arch/arm/include/asm/memory.h 4 * 5 * Copyright (C) 2000-2002 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 * 8 * Note: this file should not be included by non-asm/.h files 9 */ 10 #ifndef __ASM_MEMORY_H 11 #define __ASM_MEMORY_H 12 13 #include <linux/const.h> 14 #include <linux/sizes.h> 15 #include <asm/page-def.h> 16 17 /* 18 * Size of the PCI I/O space. This must remain a power of two so that 19 * IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses. 20 */ 21 #define PCI_IO_SIZE SZ_16M 22 23 /* 24 * VMEMMAP_SIZE - allows the whole linear region to be covered by 25 * a struct page array 26 * 27 * If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE 28 * needs to cover the memory region from the beginning of the 52-bit 29 * PAGE_OFFSET all the way to PAGE_END for 48-bit. This allows us to 30 * keep a constant PAGE_OFFSET and "fallback" to using the higher end 31 * of the VMEMMAP where 52-bit support is not available in hardware. 32 */ 33 #define VMEMMAP_RANGE (_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) 34 #define VMEMMAP_SIZE ((VMEMMAP_RANGE >> PAGE_SHIFT) * sizeof(struct page)) 35 36 /* 37 * PAGE_OFFSET - the virtual address of the start of the linear map, at the 38 * start of the TTBR1 address space. 39 * PAGE_END - the end of the linear map, where all other kernel mappings begin. 40 * KIMAGE_VADDR - the virtual address of the start of the kernel image. 41 * VA_BITS - the maximum number of bits for virtual addresses. 42 */ 43 #define VA_BITS (CONFIG_ARM64_VA_BITS) 44 #define _PAGE_OFFSET(va) (-(UL(1) << (va))) 45 #define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) 46 #define KIMAGE_VADDR (MODULES_END) 47 #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) 48 #define MODULES_VADDR (_PAGE_END(VA_BITS_MIN)) 49 #define MODULES_VSIZE (SZ_2G) 50 #define VMEMMAP_START (VMEMMAP_END - VMEMMAP_SIZE) 51 #define VMEMMAP_END (-UL(SZ_1G)) 52 #define PCI_IO_START (VMEMMAP_END + SZ_8M) 53 #define PCI_IO_END (PCI_IO_START + PCI_IO_SIZE) 54 #define FIXADDR_TOP (-UL(SZ_8M)) 55 56 #if VA_BITS > 48 57 #define VA_BITS_MIN (48) 58 #else 59 #define VA_BITS_MIN (VA_BITS) 60 #endif 61 62 #define _PAGE_END(va) (-(UL(1) << ((va) - 1))) 63 64 #define KERNEL_START _text 65 #define KERNEL_END _end 66 67 /* 68 * Generic and Software Tag-Based KASAN modes require 1/8th and 1/16th of the 69 * kernel virtual address space for storing the shadow memory respectively. 70 * 71 * The mapping between a virtual memory address and its corresponding shadow 72 * memory address is defined based on the formula: 73 * 74 * shadow_addr = (addr >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET 75 * 76 * where KASAN_SHADOW_SCALE_SHIFT is the order of the number of bits that map 77 * to a single shadow byte and KASAN_SHADOW_OFFSET is a constant that offsets 78 * the mapping. Note that KASAN_SHADOW_OFFSET does not point to the start of 79 * the shadow memory region. 80 * 81 * Based on this mapping, we define two constants: 82 * 83 * KASAN_SHADOW_START: the start of the shadow memory region; 84 * KASAN_SHADOW_END: the end of the shadow memory region. 85 * 86 * KASAN_SHADOW_END is defined first as the shadow address that corresponds to 87 * the upper bound of possible virtual kernel memory addresses UL(1) << 64 88 * according to the mapping formula. 89 * 90 * KASAN_SHADOW_START is defined second based on KASAN_SHADOW_END. The shadow 91 * memory start must map to the lowest possible kernel virtual memory address 92 * and thus it depends on the actual bitness of the address space. 93 * 94 * As KASAN inserts redzones between stack variables, this increases the stack 95 * memory usage significantly. Thus, we double the (minimum) stack size. 96 */ 97 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 98 #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) 99 #define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) + KASAN_SHADOW_OFFSET) 100 #define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (UL(1) << ((va) - KASAN_SHADOW_SCALE_SHIFT))) 101 #define KASAN_SHADOW_START _KASAN_SHADOW_START(vabits_actual) 102 #define PAGE_END KASAN_SHADOW_START 103 #define KASAN_THREAD_SHIFT 1 104 #else 105 #define KASAN_THREAD_SHIFT 0 106 #define PAGE_END (_PAGE_END(VA_BITS_MIN)) 107 #endif /* CONFIG_KASAN */ 108 109 #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) 110 111 /* 112 * VMAP'd stacks are allocated at page granularity, so we must ensure that such 113 * stacks are a multiple of page size. 114 */ 115 #if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT) 116 #define THREAD_SHIFT PAGE_SHIFT 117 #else 118 #define THREAD_SHIFT MIN_THREAD_SHIFT 119 #endif 120 121 #if THREAD_SHIFT >= PAGE_SHIFT 122 #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) 123 #endif 124 125 #define THREAD_SIZE (UL(1) << THREAD_SHIFT) 126 127 /* 128 * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by 129 * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry 130 * assembly. 131 */ 132 #ifdef CONFIG_VMAP_STACK 133 #define THREAD_ALIGN (2 * THREAD_SIZE) 134 #else 135 #define THREAD_ALIGN THREAD_SIZE 136 #endif 137 138 #define IRQ_STACK_SIZE THREAD_SIZE 139 140 #define OVERFLOW_STACK_SIZE SZ_4K 141 142 /* 143 * With the minimum frame size of [x29, x30], exactly half the combined 144 * sizes of the hyp and overflow stacks is the maximum size needed to 145 * save the unwinded stacktrace; plus an additional entry to delimit the 146 * end. 147 */ 148 #define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + PAGE_SIZE) / 2 + sizeof(long)) 149 150 /* 151 * Alignment of kernel segments (e.g. .text, .data). 152 * 153 * 4 KB granule: 16 level 3 entries, with contiguous bit 154 * 16 KB granule: 4 level 3 entries, without contiguous bit 155 * 64 KB granule: 1 level 3 entry 156 */ 157 #define SEGMENT_ALIGN SZ_64K 158 159 /* 160 * Memory types available. 161 * 162 * IMPORTANT: MT_NORMAL must be index 0 since vm_get_page_prot() may 'or' in 163 * the MT_NORMAL_TAGGED memory type for PROT_MTE mappings. Note 164 * that protection_map[] only contains MT_NORMAL attributes. 165 */ 166 #define MT_NORMAL 0 167 #define MT_NORMAL_TAGGED 1 168 #define MT_NORMAL_NC 2 169 #define MT_DEVICE_nGnRnE 3 170 #define MT_DEVICE_nGnRE 4 171 172 /* 173 * Memory types for Stage-2 translation 174 */ 175 #define MT_S2_NORMAL 0xf 176 #define MT_S2_DEVICE_nGnRE 0x1 177 178 /* 179 * Memory types for Stage-2 translation when ID_AA64MMFR2_EL1.FWB is 0001 180 * Stage-2 enforces Normal-WB and Device-nGnRE 181 */ 182 #define MT_S2_FWB_NORMAL 6 183 #define MT_S2_FWB_DEVICE_nGnRE 1 184 185 #ifdef CONFIG_ARM64_4K_PAGES 186 #define IOREMAP_MAX_ORDER (PUD_SHIFT) 187 #else 188 #define IOREMAP_MAX_ORDER (PMD_SHIFT) 189 #endif 190 191 /* 192 * Open-coded (swapper_pg_dir - reserved_pg_dir) as this cannot be calculated 193 * until link time. 194 */ 195 #define RESERVED_SWAPPER_OFFSET (PAGE_SIZE) 196 197 /* 198 * Open-coded (swapper_pg_dir - tramp_pg_dir) as this cannot be calculated 199 * until link time. 200 */ 201 #define TRAMP_SWAPPER_OFFSET (2 * PAGE_SIZE) 202 203 #ifndef __ASSEMBLY__ 204 205 #include <linux/bitops.h> 206 #include <linux/compiler.h> 207 #include <linux/mmdebug.h> 208 #include <linux/types.h> 209 #include <asm/boot.h> 210 #include <asm/bug.h> 211 #include <asm/sections.h> 212 #include <asm/sysreg.h> 213 214 static inline u64 __pure read_tcr(void) 215 { 216 u64 tcr; 217 218 // read_sysreg() uses asm volatile, so avoid it here 219 asm("mrs %0, tcr_el1" : "=r"(tcr)); 220 return tcr; 221 } 222 223 #if VA_BITS > 48 224 // For reasons of #include hell, we can't use TCR_T1SZ_OFFSET/TCR_T1SZ_MASK here 225 #define vabits_actual (64 - ((read_tcr() >> 16) & 63)) 226 #else 227 #define vabits_actual ((u64)VA_BITS) 228 #endif 229 230 extern s64 memstart_addr; 231 /* PHYS_OFFSET - the physical address of the start of memory. */ 232 #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; }) 233 234 /* the offset between the kernel virtual and physical mappings */ 235 extern u64 kimage_voffset; 236 237 static inline unsigned long kaslr_offset(void) 238 { 239 return (u64)&_text - KIMAGE_VADDR; 240 } 241 242 #ifdef CONFIG_RANDOMIZE_BASE 243 void kaslr_init(void); 244 static inline bool kaslr_enabled(void) 245 { 246 extern bool __kaslr_is_enabled; 247 return __kaslr_is_enabled; 248 } 249 #else 250 static inline void kaslr_init(void) { } 251 static inline bool kaslr_enabled(void) { return false; } 252 #endif 253 254 /* 255 * Allow all memory at the discovery stage. We will clip it later. 256 */ 257 #define MIN_MEMBLOCK_ADDR 0 258 #define MAX_MEMBLOCK_ADDR U64_MAX 259 260 /* 261 * PFNs are used to describe any physical page; this means 262 * PFN 0 == physical address 0. 263 * 264 * This is the PFN of the first RAM page in the kernel 265 * direct-mapped view. We assume this is the first page 266 * of RAM in the mem_map as well. 267 */ 268 #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) 269 270 /* 271 * When dealing with data aborts, watchpoints, or instruction traps we may end 272 * up with a tagged userland pointer. Clear the tag to get a sane pointer to 273 * pass on to access_ok(), for instance. 274 */ 275 #define __untagged_addr(addr) \ 276 ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55)) 277 278 #define untagged_addr(addr) ({ \ 279 u64 __addr = (__force u64)(addr); \ 280 __addr &= __untagged_addr(__addr); \ 281 (__force __typeof__(addr))__addr; \ 282 }) 283 284 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) 285 #define __tag_shifted(tag) ((u64)(tag) << 56) 286 #define __tag_reset(addr) __untagged_addr(addr) 287 #define __tag_get(addr) (__u8)((u64)(addr) >> 56) 288 #else 289 #define __tag_shifted(tag) 0UL 290 #define __tag_reset(addr) (addr) 291 #define __tag_get(addr) 0 292 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ 293 294 static inline const void *__tag_set(const void *addr, u8 tag) 295 { 296 u64 __addr = (u64)addr & ~__tag_shifted(0xff); 297 return (const void *)(__addr | __tag_shifted(tag)); 298 } 299 300 #ifdef CONFIG_KASAN_HW_TAGS 301 #define arch_enable_tag_checks_sync() mte_enable_kernel_sync() 302 #define arch_enable_tag_checks_async() mte_enable_kernel_async() 303 #define arch_enable_tag_checks_asymm() mte_enable_kernel_asymm() 304 #define arch_suppress_tag_checks_start() mte_enable_tco() 305 #define arch_suppress_tag_checks_stop() mte_disable_tco() 306 #define arch_force_async_tag_fault() mte_check_tfsr_exit() 307 #define arch_get_random_tag() mte_get_random_tag() 308 #define arch_get_mem_tag(addr) mte_get_mem_tag(addr) 309 #define arch_set_mem_tag_range(addr, size, tag, init) \ 310 mte_set_mem_tag_range((addr), (size), (tag), (init)) 311 #endif /* CONFIG_KASAN_HW_TAGS */ 312 313 /* 314 * Physical vs virtual RAM address space conversion. These are 315 * private definitions which should NOT be used outside memory.h 316 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. 317 */ 318 319 320 /* 321 * Check whether an arbitrary address is within the linear map, which 322 * lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the 323 * kernel's TTBR1 address range. 324 */ 325 #define __is_lm_address(addr) (((u64)(addr) - PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET)) 326 327 #define __lm_to_phys(addr) (((addr) - PAGE_OFFSET) + PHYS_OFFSET) 328 #define __kimg_to_phys(addr) ((addr) - kimage_voffset) 329 330 #define __virt_to_phys_nodebug(x) ({ \ 331 phys_addr_t __x = (phys_addr_t)(__tag_reset(x)); \ 332 __is_lm_address(__x) ? __lm_to_phys(__x) : __kimg_to_phys(__x); \ 333 }) 334 335 #define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x)) 336 337 #ifdef CONFIG_DEBUG_VIRTUAL 338 extern phys_addr_t __virt_to_phys(unsigned long x); 339 extern phys_addr_t __phys_addr_symbol(unsigned long x); 340 #else 341 #define __virt_to_phys(x) __virt_to_phys_nodebug(x) 342 #define __phys_addr_symbol(x) __pa_symbol_nodebug(x) 343 #endif /* CONFIG_DEBUG_VIRTUAL */ 344 345 #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) 346 #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) 347 348 /* 349 * Convert a page to/from a physical address 350 */ 351 #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) 352 #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) 353 354 /* 355 * Note: Drivers should NOT use these. They are the wrong 356 * translation for translating DMA addresses. Use the driver 357 * DMA support - see dma-mapping.h. 358 */ 359 #define virt_to_phys virt_to_phys 360 static inline phys_addr_t virt_to_phys(const volatile void *x) 361 { 362 return __virt_to_phys((unsigned long)(x)); 363 } 364 365 #define phys_to_virt phys_to_virt 366 static inline void *phys_to_virt(phys_addr_t x) 367 { 368 return (void *)(__phys_to_virt(x)); 369 } 370 371 /* Needed already here for resolving __phys_to_pfn() in virt_to_pfn() */ 372 #include <asm-generic/memory_model.h> 373 374 static inline unsigned long virt_to_pfn(const void *kaddr) 375 { 376 return __phys_to_pfn(virt_to_phys(kaddr)); 377 } 378 379 /* 380 * Drivers should NOT use these either. 381 */ 382 #define __pa(x) __virt_to_phys((unsigned long)(x)) 383 #define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0)) 384 #define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x)) 385 #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) 386 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 387 #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) 388 389 /* 390 * virt_to_page(x) convert a _valid_ virtual address to struct page * 391 * virt_addr_valid(x) indicates whether a virtual address is valid 392 */ 393 #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) 394 395 #if defined(CONFIG_DEBUG_VIRTUAL) 396 #define page_to_virt(x) ({ \ 397 __typeof__(x) __page = x; \ 398 void *__addr = __va(page_to_phys(__page)); \ 399 (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\ 400 }) 401 #define virt_to_page(x) pfn_to_page(virt_to_pfn(x)) 402 #else 403 #define page_to_virt(x) ({ \ 404 __typeof__(x) __page = x; \ 405 u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\ 406 u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE); \ 407 (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\ 408 }) 409 410 #define virt_to_page(x) ({ \ 411 u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE; \ 412 u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \ 413 (struct page *)__addr; \ 414 }) 415 #endif /* CONFIG_DEBUG_VIRTUAL */ 416 417 #define virt_addr_valid(addr) ({ \ 418 __typeof__(addr) __addr = __tag_reset(addr); \ 419 __is_lm_address(__addr) && pfn_is_map_memory(virt_to_pfn(__addr)); \ 420 }) 421 422 void dump_mem_limit(void); 423 #endif /* !ASSEMBLY */ 424 425 /* 426 * Given that the GIC architecture permits ITS implementations that can only be 427 * configured with a LPI table address once, GICv3 systems with many CPUs may 428 * end up reserving a lot of different regions after a kexec for their LPI 429 * tables (one per CPU), as we are forced to reuse the same memory after kexec 430 * (and thus reserve it persistently with EFI beforehand) 431 */ 432 #if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS) 433 # define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1) 434 #endif 435 436 /* 437 * memory regions which marked with flag MEMBLOCK_NOMAP(for example, the memory 438 * of the EFI_UNUSABLE_MEMORY type) may divide a continuous memory block into 439 * multiple parts. As a result, the number of memory regions is large. 440 */ 441 #ifdef CONFIG_EFI 442 #define INIT_MEMBLOCK_MEMORY_REGIONS (INIT_MEMBLOCK_REGIONS * 8) 443 #endif 444 445 446 #endif /* __ASM_MEMORY_H */ 447