1 /* 2 * Based on arch/arm/include/asm/memory.h 3 * 4 * Copyright (C) 2000-2002 Russell King 5 * Copyright (C) 2012 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 * 19 * Note: this file should not be included by non-asm/.h files 20 */ 21 #ifndef __ASM_MEMORY_H 22 #define __ASM_MEMORY_H 23 24 #include <linux/compiler.h> 25 #include <linux/const.h> 26 #include <linux/types.h> 27 #include <asm/bug.h> 28 #include <asm/page-def.h> 29 #include <asm/sizes.h> 30 31 /* 32 * Allow for constants defined here to be used from assembly code 33 * by prepending the UL suffix only with actual C code compilation. 34 */ 35 #define UL(x) _AC(x, UL) 36 37 /* 38 * Size of the PCI I/O space. This must remain a power of two so that 39 * IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses. 40 */ 41 #define PCI_IO_SIZE SZ_16M 42 43 /* 44 * Log2 of the upper bound of the size of a struct page. Used for sizing 45 * the vmemmap region only, does not affect actual memory footprint. 46 * We don't use sizeof(struct page) directly since taking its size here 47 * requires its definition to be available at this point in the inclusion 48 * chain, and it may not be a power of 2 in the first place. 49 */ 50 #define STRUCT_PAGE_MAX_SHIFT 6 51 52 /* 53 * VMEMMAP_SIZE - allows the whole linear region to be covered by 54 * a struct page array 55 */ 56 #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) 57 58 /* 59 * PAGE_OFFSET - the virtual address of the start of the linear map (top 60 * (VA_BITS - 1)) 61 * KIMAGE_VADDR - the virtual address of the start of the kernel image 62 * VA_BITS - the maximum number of bits for virtual addresses. 63 * VA_START - the first kernel virtual address. 64 * TASK_SIZE - the maximum size of a user space task. 65 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. 66 */ 67 #define VA_BITS (CONFIG_ARM64_VA_BITS) 68 #define VA_START (UL(0xffffffffffffffff) - \ 69 (UL(1) << VA_BITS) + 1) 70 #define PAGE_OFFSET (UL(0xffffffffffffffff) - \ 71 (UL(1) << (VA_BITS - 1)) + 1) 72 #define KIMAGE_VADDR (MODULES_END) 73 #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) 74 #define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE) 75 #define MODULES_VSIZE (SZ_128M) 76 #define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE) 77 #define PCI_IO_END (VMEMMAP_START - SZ_2M) 78 #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) 79 #define FIXADDR_TOP (PCI_IO_START - SZ_2M) 80 #define TASK_SIZE_64 (UL(1) << VA_BITS) 81 82 #ifdef CONFIG_COMPAT 83 #define TASK_SIZE_32 UL(0x100000000) 84 #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ 85 TASK_SIZE_32 : TASK_SIZE_64) 86 #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ 87 TASK_SIZE_32 : TASK_SIZE_64) 88 #else 89 #define TASK_SIZE TASK_SIZE_64 90 #endif /* CONFIG_COMPAT */ 91 92 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) 93 94 #define KERNEL_START _text 95 #define KERNEL_END _end 96 97 /* 98 * KASAN requires 1/8th of the kernel virtual address space for the shadow 99 * region. KASAN can bloat the stack significantly, so double the (minimum) 100 * stack size when KASAN is in use. 101 */ 102 #ifdef CONFIG_KASAN 103 #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3)) 104 #define KASAN_THREAD_SHIFT 1 105 #else 106 #define KASAN_SHADOW_SIZE (0) 107 #define KASAN_THREAD_SHIFT 0 108 #endif 109 110 #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) 111 112 /* 113 * VMAP'd stacks are allocated at page granularity, so we must ensure that such 114 * stacks are a multiple of page size. 115 */ 116 #if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT) 117 #define THREAD_SHIFT PAGE_SHIFT 118 #else 119 #define THREAD_SHIFT MIN_THREAD_SHIFT 120 #endif 121 122 #if THREAD_SHIFT >= PAGE_SHIFT 123 #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) 124 #endif 125 126 #define THREAD_SIZE (UL(1) << THREAD_SHIFT) 127 128 /* 129 * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by 130 * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry 131 * assembly. 132 */ 133 #ifdef CONFIG_VMAP_STACK 134 #define THREAD_ALIGN (2 * THREAD_SIZE) 135 #else 136 #define THREAD_ALIGN THREAD_SIZE 137 #endif 138 139 #define IRQ_STACK_SIZE THREAD_SIZE 140 141 #define OVERFLOW_STACK_SIZE SZ_4K 142 143 /* 144 * Alignment of kernel segments (e.g. .text, .data). 145 */ 146 #if defined(CONFIG_DEBUG_ALIGN_RODATA) 147 /* 148 * 4 KB granule: 1 level 2 entry 149 * 16 KB granule: 128 level 3 entries, with contiguous bit 150 * 64 KB granule: 32 level 3 entries, with contiguous bit 151 */ 152 #define SEGMENT_ALIGN SZ_2M 153 #else 154 /* 155 * 4 KB granule: 16 level 3 entries, with contiguous bit 156 * 16 KB granule: 4 level 3 entries, without contiguous bit 157 * 64 KB granule: 1 level 3 entry 158 */ 159 #define SEGMENT_ALIGN SZ_64K 160 #endif 161 162 /* 163 * Memory types available. 164 */ 165 #define MT_DEVICE_nGnRnE 0 166 #define MT_DEVICE_nGnRE 1 167 #define MT_DEVICE_GRE 2 168 #define MT_NORMAL_NC 3 169 #define MT_NORMAL 4 170 #define MT_NORMAL_WT 5 171 172 /* 173 * Memory types for Stage-2 translation 174 */ 175 #define MT_S2_NORMAL 0xf 176 #define MT_S2_DEVICE_nGnRE 0x1 177 178 #ifdef CONFIG_ARM64_4K_PAGES 179 #define IOREMAP_MAX_ORDER (PUD_SHIFT) 180 #else 181 #define IOREMAP_MAX_ORDER (PMD_SHIFT) 182 #endif 183 184 #ifdef CONFIG_BLK_DEV_INITRD 185 #define __early_init_dt_declare_initrd(__start, __end) \ 186 do { \ 187 initrd_start = (__start); \ 188 initrd_end = (__end); \ 189 } while (0) 190 #endif 191 192 #ifndef __ASSEMBLY__ 193 194 #include <linux/bitops.h> 195 #include <linux/mmdebug.h> 196 197 extern s64 memstart_addr; 198 /* PHYS_OFFSET - the physical address of the start of memory. */ 199 #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; }) 200 201 /* the virtual base of the kernel image (minus TEXT_OFFSET) */ 202 extern u64 kimage_vaddr; 203 204 /* the offset between the kernel virtual and physical mappings */ 205 extern u64 kimage_voffset; 206 207 static inline unsigned long kaslr_offset(void) 208 { 209 return kimage_vaddr - KIMAGE_VADDR; 210 } 211 212 /* 213 * Allow all memory at the discovery stage. We will clip it later. 214 */ 215 #define MIN_MEMBLOCK_ADDR 0 216 #define MAX_MEMBLOCK_ADDR U64_MAX 217 218 /* 219 * PFNs are used to describe any physical page; this means 220 * PFN 0 == physical address 0. 221 * 222 * This is the PFN of the first RAM page in the kernel 223 * direct-mapped view. We assume this is the first page 224 * of RAM in the mem_map as well. 225 */ 226 #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) 227 228 /* 229 * Physical vs virtual RAM address space conversion. These are 230 * private definitions which should NOT be used outside memory.h 231 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. 232 */ 233 234 235 /* 236 * The linear kernel range starts in the middle of the virtual adddress 237 * space. Testing the top bit for the start of the region is a 238 * sufficient check. 239 */ 240 #define __is_lm_address(addr) (!!((addr) & BIT(VA_BITS - 1))) 241 242 #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) 243 #define __kimg_to_phys(addr) ((addr) - kimage_voffset) 244 245 #define __virt_to_phys_nodebug(x) ({ \ 246 phys_addr_t __x = (phys_addr_t)(x); \ 247 __is_lm_address(__x) ? __lm_to_phys(__x) : \ 248 __kimg_to_phys(__x); \ 249 }) 250 251 #define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x)) 252 253 #ifdef CONFIG_DEBUG_VIRTUAL 254 extern phys_addr_t __virt_to_phys(unsigned long x); 255 extern phys_addr_t __phys_addr_symbol(unsigned long x); 256 #else 257 #define __virt_to_phys(x) __virt_to_phys_nodebug(x) 258 #define __phys_addr_symbol(x) __pa_symbol_nodebug(x) 259 #endif 260 261 #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) 262 #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) 263 264 /* 265 * Convert a page to/from a physical address 266 */ 267 #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) 268 #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) 269 270 /* 271 * Note: Drivers should NOT use these. They are the wrong 272 * translation for translating DMA addresses. Use the driver 273 * DMA support - see dma-mapping.h. 274 */ 275 #define virt_to_phys virt_to_phys 276 static inline phys_addr_t virt_to_phys(const volatile void *x) 277 { 278 return __virt_to_phys((unsigned long)(x)); 279 } 280 281 #define phys_to_virt phys_to_virt 282 static inline void *phys_to_virt(phys_addr_t x) 283 { 284 return (void *)(__phys_to_virt(x)); 285 } 286 287 /* 288 * Drivers should NOT use these either. 289 */ 290 #define __pa(x) __virt_to_phys((unsigned long)(x)) 291 #define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0)) 292 #define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x)) 293 #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) 294 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 295 #define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x))) 296 #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) 297 298 /* 299 * virt_to_page(k) convert a _valid_ virtual address to struct page * 300 * virt_addr_valid(k) indicates whether a virtual address is valid 301 */ 302 #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) 303 304 #ifndef CONFIG_SPARSEMEM_VMEMMAP 305 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 306 #define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 307 #else 308 #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) 309 #define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) 310 311 #define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) 312 #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) 313 314 #define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \ 315 + PHYS_OFFSET) >> PAGE_SHIFT) 316 #endif 317 #endif 318 319 #define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET) 320 #define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \ 321 _virt_addr_valid(kaddr)) 322 323 #include <asm-generic/memory_model.h> 324 325 #endif 326