1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 4 * Copyright 2003 PathScale, Inc. 5 * Derived from include/asm-i386/pgtable.h 6 */ 7 8 #ifndef __UM_PGTABLE_H 9 #define __UM_PGTABLE_H 10 11 #include <asm/fixmap.h> 12 13 #define _PAGE_PRESENT 0x001 14 #define _PAGE_NEEDSYNC 0x002 15 #define _PAGE_RW 0x020 16 #define _PAGE_USER 0x040 17 #define _PAGE_ACCESSED 0x080 18 #define _PAGE_DIRTY 0x100 19 /* If _PAGE_PRESENT is clear, we use these: */ 20 #define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE; 21 pte_present gives true */ 22 23 /* We borrow bit 10 to store the exclusive marker in swap PTEs. */ 24 #define _PAGE_SWP_EXCLUSIVE 0x400 25 26 #if CONFIG_PGTABLE_LEVELS == 4 27 #include <asm/pgtable-4level.h> 28 #elif CONFIG_PGTABLE_LEVELS == 2 29 #include <asm/pgtable-2level.h> 30 #else 31 #error "Unsupported number of page table levels" 32 #endif 33 34 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 35 36 /* zero page used for uninitialized stuff */ 37 extern unsigned long *empty_zero_page; 38 39 /* Just any arbitrary offset to the start of the vmalloc VM area: the 40 * current 8MB value just means that there will be a 8MB "hole" after the 41 * physical memory until the kernel virtual memory starts. That means that 42 * any out-of-bounds memory accesses will hopefully be caught. 43 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 44 * area for the same reason. ;) 45 */ 46 47 extern unsigned long end_iomem; 48 49 #define VMALLOC_OFFSET (__va_space) 50 #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 51 #define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK) 52 #define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) 53 #define MODULES_VADDR VMALLOC_START 54 #define MODULES_END VMALLOC_END 55 #define MODULES_LEN (MODULES_VADDR - MODULES_END) 56 57 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) 58 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 59 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 60 #define __PAGE_KERNEL_EXEC \ 61 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) 62 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) 63 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) 64 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) 65 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) 66 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) 67 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) 68 69 /* 70 * The i386 can't do page protection for execute, and considers that the same 71 * are read. 72 * Also, write permissions imply read permissions. This is the closest we can 73 * get.. 74 */ 75 76 /* 77 * ZERO_PAGE is a global shared page that is always zero: used 78 * for zero-mapped memory areas etc.. 79 */ 80 #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) 81 82 #define pte_clear(mm, addr, xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEEDSYNC)) 83 84 #define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEEDSYNC)) 85 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) 86 87 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) 88 #define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEEDSYNC; } while (0) 89 90 #define pmd_needsync(x) (pmd_val(x) & _PAGE_NEEDSYNC) 91 #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEEDSYNC) 92 93 #define pud_needsync(x) (pud_val(x) & _PAGE_NEEDSYNC) 94 #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEEDSYNC) 95 96 #define p4d_needsync(x) (p4d_val(x) & _PAGE_NEEDSYNC) 97 #define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEEDSYNC) 98 99 #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) 100 #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK) 101 102 #define pte_page(x) pfn_to_page(pte_pfn(x)) 103 104 #define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE)) 105 106 /* 107 * ================================= 108 * Flags checking section. 109 * ================================= 110 */ 111 112 static inline int pte_none(pte_t pte) 113 { 114 return pte_is_zero(pte); 115 } 116 117 /* 118 * The following only work if pte_present() is true. 119 * Undefined behaviour if not.. 120 */ 121 static inline int pte_read(pte_t pte) 122 { 123 return((pte_get_bits(pte, _PAGE_USER)) && 124 !(pte_get_bits(pte, _PAGE_PROTNONE))); 125 } 126 127 static inline int pte_exec(pte_t pte){ 128 return((pte_get_bits(pte, _PAGE_USER)) && 129 !(pte_get_bits(pte, _PAGE_PROTNONE))); 130 } 131 132 static inline int pte_write(pte_t pte) 133 { 134 return((pte_get_bits(pte, _PAGE_RW)) && 135 !(pte_get_bits(pte, _PAGE_PROTNONE))); 136 } 137 138 static inline int pte_dirty(pte_t pte) 139 { 140 return pte_get_bits(pte, _PAGE_DIRTY); 141 } 142 143 static inline int pte_young(pte_t pte) 144 { 145 return pte_get_bits(pte, _PAGE_ACCESSED); 146 } 147 148 static inline int pte_needsync(pte_t pte) 149 { 150 return pte_get_bits(pte, _PAGE_NEEDSYNC); 151 } 152 153 /* 154 * ================================= 155 * Flags setting section. 156 * ================================= 157 */ 158 159 static inline pte_t pte_mkclean(pte_t pte) 160 { 161 pte_clear_bits(pte, _PAGE_DIRTY); 162 return(pte); 163 } 164 165 static inline pte_t pte_mkold(pte_t pte) 166 { 167 pte_clear_bits(pte, _PAGE_ACCESSED); 168 return(pte); 169 } 170 171 static inline pte_t pte_wrprotect(pte_t pte) 172 { 173 pte_clear_bits(pte, _PAGE_RW); 174 return pte; 175 } 176 177 static inline pte_t pte_mkread(pte_t pte) 178 { 179 pte_set_bits(pte, _PAGE_USER); 180 return pte; 181 } 182 183 static inline pte_t pte_mkdirty(pte_t pte) 184 { 185 pte_set_bits(pte, _PAGE_DIRTY); 186 return(pte); 187 } 188 189 static inline pte_t pte_mkyoung(pte_t pte) 190 { 191 pte_set_bits(pte, _PAGE_ACCESSED); 192 return(pte); 193 } 194 195 static inline pte_t pte_mkwrite_novma(pte_t pte) 196 { 197 pte_set_bits(pte, _PAGE_RW); 198 return pte; 199 } 200 201 static inline pte_t pte_mkuptodate(pte_t pte) 202 { 203 pte_clear_bits(pte, _PAGE_NEEDSYNC); 204 return pte; 205 } 206 207 static inline pte_t pte_mkneedsync(pte_t pte) 208 { 209 pte_set_bits(pte, _PAGE_NEEDSYNC); 210 return(pte); 211 } 212 213 static inline void set_pte(pte_t *pteptr, pte_t pteval) 214 { 215 pte_copy(*pteptr, pteval); 216 217 /* If it's a swap entry, it needs to be marked _PAGE_NEEDSYNC so 218 * update_pte_range knows to unmap it. 219 */ 220 221 *pteptr = pte_mkneedsync(*pteptr); 222 } 223 224 #define PFN_PTE_SHIFT PAGE_SHIFT 225 226 static inline void um_tlb_mark_sync(struct mm_struct *mm, unsigned long start, 227 unsigned long end) 228 { 229 if (!mm->context.sync_tlb_range_to) { 230 mm->context.sync_tlb_range_from = start; 231 mm->context.sync_tlb_range_to = end; 232 } else { 233 if (start < mm->context.sync_tlb_range_from) 234 mm->context.sync_tlb_range_from = start; 235 if (end > mm->context.sync_tlb_range_to) 236 mm->context.sync_tlb_range_to = end; 237 } 238 } 239 240 #define set_ptes set_ptes 241 static inline void set_ptes(struct mm_struct *mm, unsigned long addr, 242 pte_t *ptep, pte_t pte, int nr) 243 { 244 /* Basically the default implementation */ 245 size_t length = nr * PAGE_SIZE; 246 247 for (;;) { 248 set_pte(ptep, pte); 249 if (--nr == 0) 250 break; 251 ptep++; 252 pte = __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT)); 253 } 254 255 um_tlb_mark_sync(mm, addr, addr + length); 256 } 257 258 #define __HAVE_ARCH_PTE_SAME 259 static inline int pte_same(pte_t pte_a, pte_t pte_b) 260 { 261 return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEEDSYNC); 262 } 263 264 /* 265 * Conversion functions: convert a page and protection to a page entry, 266 * and a page entry and page directory to the page they refer to. 267 */ 268 269 #define __virt_to_page(virt) phys_to_page(__pa(virt)) 270 #define virt_to_page(addr) __virt_to_page((const unsigned long) addr) 271 272 #define mk_pte(page, pgprot) \ 273 ({ pte_t pte; \ 274 \ 275 pte_set_val(pte, page_to_phys(page), (pgprot)); \ 276 pte;}) 277 278 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 279 { 280 pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot); 281 return pte; 282 } 283 284 /* 285 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] 286 * 287 * this macro returns the index of the entry in the pmd page which would 288 * control the given virtual address 289 */ 290 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 291 292 struct mm_struct; 293 extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); 294 295 #define update_mmu_cache(vma,address,ptep) do {} while (0) 296 #define update_mmu_cache_range(vmf, vma, address, ptep, nr) do {} while (0) 297 298 /* 299 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that 300 * are !pte_none() && !pte_present(). 301 * 302 * Format of swap PTEs: 303 * 304 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 305 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 306 * <--------------- offset ----------------> E < type -> 0 0 0 1 0 307 * 308 * E is the exclusive marker that is not stored in swap entries. 309 * _PAGE_NEEDSYNC (bit 1) is always set to 1 in set_pte(). 310 */ 311 #define __swp_type(x) (((x).val >> 5) & 0x1f) 312 #define __swp_offset(x) ((x).val >> 11) 313 314 #define __swp_entry(type, offset) \ 315 ((swp_entry_t) { (((type) & 0x1f) << 5) | ((offset) << 11) }) 316 #define __pte_to_swp_entry(pte) \ 317 ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) }) 318 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 319 320 static inline int pte_swp_exclusive(pte_t pte) 321 { 322 return pte_get_bits(pte, _PAGE_SWP_EXCLUSIVE); 323 } 324 325 static inline pte_t pte_swp_mkexclusive(pte_t pte) 326 { 327 pte_set_bits(pte, _PAGE_SWP_EXCLUSIVE); 328 return pte; 329 } 330 331 static inline pte_t pte_swp_clear_exclusive(pte_t pte) 332 { 333 pte_clear_bits(pte, _PAGE_SWP_EXCLUSIVE); 334 return pte; 335 } 336 337 #endif 338