1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012 ARM Ltd. 4 */ 5 #ifndef __ASM_PGTABLE_HWDEF_H 6 #define __ASM_PGTABLE_HWDEF_H 7 8 #include <asm/memory.h> 9 10 #define PTDESC_ORDER 3 11 12 /* Number of VA bits resolved by a single translation table level */ 13 #define PTDESC_TABLE_SHIFT (PAGE_SHIFT - PTDESC_ORDER) 14 15 /* 16 * Number of page-table levels required to address 'va_bits' wide 17 * address, without section mapping. We resolve the top (va_bits - PAGE_SHIFT) 18 * bits with PTDESC_TABLE_SHIFT bits at each page table level. Hence: 19 * 20 * levels = DIV_ROUND_UP((va_bits - PAGE_SHIFT), PTDESC_TABLE_SHIFT) 21 * 22 * where DIV_ROUND_UP(n, d) => (((n) + (d) - 1) / (d)) 23 * 24 * We cannot include linux/kernel.h which defines DIV_ROUND_UP here 25 * due to build issues. So we open code DIV_ROUND_UP here: 26 * 27 * ((((va_bits) - PAGE_SHIFT) + PTDESC_TABLE_SHIFT - 1) / PTDESC_TABLE_SHIFT) 28 * 29 * which gets simplified as : 30 */ 31 #define ARM64_HW_PGTABLE_LEVELS(va_bits) \ 32 (((va_bits) - PTDESC_ORDER - 1) / PTDESC_TABLE_SHIFT) 33 34 /* 35 * Size mapped by an entry at level n ( -1 <= n <= 3) 36 * We map PTDESC_TABLE_SHIFT at all translation levels and PAGE_SHIFT bits 37 * in the final page. The maximum number of translation levels supported by 38 * the architecture is 5. Hence, starting at level n, we have further 39 * ((4 - n) - 1) levels of translation excluding the offset within the page. 40 * So, the total number of bits mapped by an entry at level n is : 41 * 42 * ((4 - n) - 1) * PTDESC_TABLE_SHIFT + PAGE_SHIFT 43 * 44 * Rearranging it a bit we get : 45 * (4 - n) * PTDESC_TABLE_SHIFT + PTDESC_ORDER 46 */ 47 #define ARM64_HW_PGTABLE_LEVEL_SHIFT(n) (PTDESC_TABLE_SHIFT * (4 - (n)) + PTDESC_ORDER) 48 49 #define PTRS_PER_PTE (1 << PTDESC_TABLE_SHIFT) 50 51 /* 52 * PMD_SHIFT determines the size a level 2 page table entry can map. 53 */ 54 #if CONFIG_PGTABLE_LEVELS > 2 55 #define PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2) 56 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) 57 #define PMD_MASK (~(PMD_SIZE-1)) 58 #define PTRS_PER_PMD (1 << PTDESC_TABLE_SHIFT) 59 #endif 60 61 /* 62 * PUD_SHIFT determines the size a level 1 page table entry can map. 63 */ 64 #if CONFIG_PGTABLE_LEVELS > 3 65 #define PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1) 66 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) 67 #define PUD_MASK (~(PUD_SIZE-1)) 68 #define PTRS_PER_PUD (1 << PTDESC_TABLE_SHIFT) 69 #endif 70 71 #if CONFIG_PGTABLE_LEVELS > 4 72 #define P4D_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(0) 73 #define P4D_SIZE (_AC(1, UL) << P4D_SHIFT) 74 #define P4D_MASK (~(P4D_SIZE-1)) 75 #define PTRS_PER_P4D (1 << PTDESC_TABLE_SHIFT) 76 #endif 77 78 /* 79 * PGDIR_SHIFT determines the size a top-level page table entry can map 80 * (depending on the configuration, this level can be -1, 0, 1 or 2). 81 */ 82 #define PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - CONFIG_PGTABLE_LEVELS) 83 #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) 84 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 85 #define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT)) 86 87 /* 88 * Contiguous page definitions. 89 */ 90 #define CONT_PTE_SHIFT (CONFIG_ARM64_CONT_PTE_SHIFT + PAGE_SHIFT) 91 #define CONT_PTES (1 << (CONT_PTE_SHIFT - PAGE_SHIFT)) 92 #define CONT_PTE_SIZE (CONT_PTES * PAGE_SIZE) 93 #define CONT_PTE_MASK (~(CONT_PTE_SIZE - 1)) 94 95 #define CONT_PMD_SHIFT (CONFIG_ARM64_CONT_PMD_SHIFT + PMD_SHIFT) 96 #define CONT_PMDS (1 << (CONT_PMD_SHIFT - PMD_SHIFT)) 97 #define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE) 98 #define CONT_PMD_MASK (~(CONT_PMD_SIZE - 1)) 99 100 /* 101 * Hardware page table definitions. 102 * 103 * Level -1 descriptor (PGD). 104 */ 105 #define PGD_TYPE_TABLE (_AT(pgdval_t, 3) << 0) 106 #define PGD_TYPE_MASK (_AT(pgdval_t, 3) << 0) 107 #define PGD_TABLE_AF (_AT(pgdval_t, 1) << 10) /* Ignored if no FEAT_HAFT */ 108 #define PGD_TABLE_PXN (_AT(pgdval_t, 1) << 59) 109 #define PGD_TABLE_UXN (_AT(pgdval_t, 1) << 60) 110 111 /* 112 * Level 0 descriptor (P4D). 113 */ 114 #define P4D_TYPE_TABLE (_AT(p4dval_t, 3) << 0) 115 #define P4D_TYPE_MASK (_AT(p4dval_t, 3) << 0) 116 #define P4D_TYPE_SECT (_AT(p4dval_t, 1) << 0) 117 #define P4D_SECT_RDONLY (_AT(p4dval_t, 1) << 7) /* AP[2] */ 118 #define P4D_TABLE_AF (_AT(p4dval_t, 1) << 10) /* Ignored if no FEAT_HAFT */ 119 #define P4D_TABLE_PXN (_AT(p4dval_t, 1) << 59) 120 #define P4D_TABLE_UXN (_AT(p4dval_t, 1) << 60) 121 122 /* 123 * Level 1 descriptor (PUD). 124 */ 125 #define PUD_TYPE_TABLE (_AT(pudval_t, 3) << 0) 126 #define PUD_TYPE_MASK (_AT(pudval_t, 3) << 0) 127 #define PUD_TYPE_SECT (_AT(pudval_t, 1) << 0) 128 #define PUD_SECT_RDONLY (_AT(pudval_t, 1) << 7) /* AP[2] */ 129 #define PUD_TABLE_AF (_AT(pudval_t, 1) << 10) /* Ignored if no FEAT_HAFT */ 130 #define PUD_TABLE_PXN (_AT(pudval_t, 1) << 59) 131 #define PUD_TABLE_UXN (_AT(pudval_t, 1) << 60) 132 133 /* 134 * Level 2 descriptor (PMD). 135 */ 136 #define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0) 137 #define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0) 138 #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) 139 #define PMD_TABLE_AF (_AT(pmdval_t, 1) << 10) /* Ignored if no FEAT_HAFT */ 140 141 /* 142 * Section 143 */ 144 #define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ 145 #define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ 146 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) 147 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) 148 #define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) 149 #define PMD_SECT_CONT (_AT(pmdval_t, 1) << 52) 150 #define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53) 151 #define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54) 152 #define PMD_TABLE_PXN (_AT(pmdval_t, 1) << 59) 153 #define PMD_TABLE_UXN (_AT(pmdval_t, 1) << 60) 154 155 /* 156 * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). 157 */ 158 #define PMD_ATTRINDX(t) (_AT(pmdval_t, (t)) << 2) 159 #define PMD_ATTRINDX_MASK (_AT(pmdval_t, 7) << 2) 160 161 /* 162 * Level 3 descriptor (PTE). 163 */ 164 #define PTE_VALID (_AT(pteval_t, 1) << 0) 165 #define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) 166 #define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0) 167 #define PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ 168 #define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ 169 #define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ 170 #define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ 171 #define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */ 172 #define PTE_GP (_AT(pteval_t, 1) << 50) /* BTI guarded */ 173 #define PTE_DBM (_AT(pteval_t, 1) << 51) /* Dirty Bit Management */ 174 #define PTE_CONT (_AT(pteval_t, 1) << 52) /* Contiguous range */ 175 #define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */ 176 #define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */ 177 #define PTE_SWBITS_MASK _AT(pteval_t, (BIT(63) | GENMASK(58, 55))) 178 179 #define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (50 - PAGE_SHIFT)) - 1) << PAGE_SHIFT) 180 #ifdef CONFIG_ARM64_PA_BITS_52 181 #ifdef CONFIG_ARM64_64K_PAGES 182 #define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12) 183 #define PTE_ADDR_HIGH_SHIFT 36 184 #define PHYS_TO_PTE_ADDR_MASK (PTE_ADDR_LOW | PTE_ADDR_HIGH) 185 #else 186 #define PTE_ADDR_HIGH (_AT(pteval_t, 0x3) << 8) 187 #define PTE_ADDR_HIGH_SHIFT 42 188 #define PHYS_TO_PTE_ADDR_MASK GENMASK_ULL(49, 8) 189 #endif 190 #endif 191 192 /* 193 * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). 194 */ 195 #define PTE_ATTRINDX(t) (_AT(pteval_t, (t)) << 2) 196 #define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2) 197 198 /* 199 * PIIndex[3:0] encoding (Permission Indirection Extension) 200 */ 201 #define PTE_PI_IDX_0 6 /* AP[1], USER */ 202 #define PTE_PI_IDX_1 51 /* DBM */ 203 #define PTE_PI_IDX_2 53 /* PXN */ 204 #define PTE_PI_IDX_3 54 /* UXN */ 205 206 /* 207 * POIndex[2:0] encoding (Permission Overlay Extension) 208 */ 209 #define PTE_PO_IDX_0 (_AT(pteval_t, 1) << 60) 210 #define PTE_PO_IDX_1 (_AT(pteval_t, 1) << 61) 211 #define PTE_PO_IDX_2 (_AT(pteval_t, 1) << 62) 212 213 #define PTE_PO_IDX_MASK GENMASK_ULL(62, 60) 214 215 216 /* 217 * Memory Attribute override for Stage-2 (MemAttr[3:0]) 218 */ 219 #define PTE_S2_MEMATTR(t) (_AT(pteval_t, (t)) << 2) 220 221 /* 222 * Hierarchical permission for Stage-1 tables 223 */ 224 #define S1_TABLE_AP (_AT(pmdval_t, 3) << 61) 225 226 #define TTBR_CNP_BIT (UL(1) << 0) 227 228 /* 229 * TCR flags. 230 */ 231 #define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_EL1_T0SZ_SHIFT) 232 #define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_EL1_T1SZ_SHIFT) 233 234 #define TCR_T0SZ_MASK TCR_EL1_T0SZ_MASK 235 #define TCR_T1SZ_MASK TCR_EL1_T1SZ_MASK 236 237 #define TCR_EPD0_MASK TCR_EL1_EPD0_MASK 238 #define TCR_EPD1_MASK TCR_EL1_EPD1_MASK 239 240 #define TCR_IRGN0_MASK TCR_EL1_IRGN0_MASK 241 #define TCR_IRGN0_WBWA (TCR_EL1_IRGN0_WBWA << TCR_EL1_IRGN0_SHIFT) 242 243 #define TCR_ORGN0_MASK TCR_EL1_ORGN0_MASK 244 #define TCR_ORGN0_WBWA (TCR_EL1_ORGN0_WBWA << TCR_EL1_ORGN0_SHIFT) 245 246 #define TCR_SH0_MASK TCR_EL1_SH0_MASK 247 #define TCR_SH0_INNER (TCR_EL1_SH0_INNER << TCR_EL1_SH0_SHIFT) 248 249 #define TCR_SH1_MASK TCR_EL1_SH1_MASK 250 251 #define TCR_TG0_SHIFT TCR_EL1_TG0_SHIFT 252 #define TCR_TG0_MASK TCR_EL1_TG0_MASK 253 #define TCR_TG0_4K (TCR_EL1_TG0_4K << TCR_EL1_TG0_SHIFT) 254 #define TCR_TG0_64K (TCR_EL1_TG0_64K << TCR_EL1_TG0_SHIFT) 255 #define TCR_TG0_16K (TCR_EL1_TG0_16K << TCR_EL1_TG0_SHIFT) 256 257 #define TCR_TG1_SHIFT TCR_EL1_TG1_SHIFT 258 #define TCR_TG1_MASK TCR_EL1_TG1_MASK 259 #define TCR_TG1_16K (TCR_EL1_TG1_16K << TCR_EL1_TG1_SHIFT) 260 #define TCR_TG1_4K (TCR_EL1_TG1_4K << TCR_EL1_TG1_SHIFT) 261 #define TCR_TG1_64K (TCR_EL1_TG1_64K << TCR_EL1_TG1_SHIFT) 262 263 #define TCR_IPS_SHIFT TCR_EL1_IPS_SHIFT 264 #define TCR_IPS_MASK TCR_EL1_IPS_MASK 265 #define TCR_A1 TCR_EL1_A1 266 #define TCR_ASID16 TCR_EL1_AS 267 #define TCR_TBI0 TCR_EL1_TBI0 268 #define TCR_TBI1 TCR_EL1_TBI1 269 #define TCR_HA TCR_EL1_HA 270 #define TCR_HD TCR_EL1_HD 271 #define TCR_HPD0 TCR_EL1_HPD0 272 #define TCR_HPD1 TCR_EL1_HPD1 273 #define TCR_TBID0 TCR_EL1_TBID0 274 #define TCR_TBID1 TCR_EL1_TBID1 275 #define TCR_E0PD0 TCR_EL1_E0PD0 276 #define TCR_E0PD1 TCR_EL1_E0PD1 277 #define TCR_DS TCR_EL1_DS 278 279 /* 280 * TTBR. 281 */ 282 #ifdef CONFIG_ARM64_PA_BITS_52 283 /* 284 * TTBR_ELx[1] is RES0 in this configuration. 285 */ 286 #define TTBR_BADDR_MASK_52 GENMASK_ULL(47, 2) 287 #endif 288 289 #ifdef CONFIG_ARM64_VA_BITS_52 290 /* Must be at least 64-byte aligned to prevent corruption of the TTBR */ 291 #define TTBR1_BADDR_4852_OFFSET (((UL(1) << (52 - PGDIR_SHIFT)) - \ 292 (UL(1) << (48 - PGDIR_SHIFT))) * 8) 293 #endif 294 295 #endif 296