1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ALPHA_PGTABLE_H 3 #define _ALPHA_PGTABLE_H 4 5 #include <asm-generic/pgtable-nopud.h> 6 7 /* 8 * This file contains the functions and defines necessary to modify and use 9 * the Alpha page table tree. 10 * 11 * This hopefully works with any standard Alpha page-size, as defined 12 * in <asm/page.h> (currently 8192). 13 */ 14 #include <linux/mmzone.h> 15 16 #include <asm/page.h> 17 #include <asm/processor.h> /* For TASK_SIZE */ 18 #include <asm/machvec.h> 19 #include <asm/setup.h> 20 #include <linux/page_table_check.h> 21 22 struct mm_struct; 23 struct vm_area_struct; 24 25 /* Certain architectures need to do special things when PTEs 26 * within a page table are directly modified. Thus, the following 27 * hook is made available. 28 */ 29 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) 30 31 /* PMD_SHIFT determines the size of the area a second-level page table can map */ 32 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) 33 #define PMD_SIZE (1UL << PMD_SHIFT) 34 #define PMD_MASK (~(PMD_SIZE-1)) 35 36 /* PGDIR_SHIFT determines what a third-level page table entry can map */ 37 #define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3)) 38 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 39 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 40 41 /* 42 * Entries per page directory level: the Alpha is three-level, with 43 * all levels having a one-page page table. 44 */ 45 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) 46 #define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3)) 47 #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3)) 48 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 49 50 /* Number of pointers that fit on a page: this will go away. */ 51 #define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3)) 52 53 #ifdef CONFIG_ALPHA_LARGE_VMALLOC 54 #define VMALLOC_START 0xfffffe0000000000 55 #else 56 #define VMALLOC_START (-2*PGDIR_SIZE) 57 #endif 58 #define VMALLOC_END (-PGDIR_SIZE) 59 60 /* 61 * OSF/1 PAL-code-imposed page table bits 62 */ 63 #define _PAGE_VALID 0x0001 64 #define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */ 65 #define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */ 66 #define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */ 67 #define _PAGE_ASM 0x0010 68 #define _PAGE_KRE 0x0100 /* xxx - see below on the "accessed" bit */ 69 #define _PAGE_URE 0x0200 /* xxx */ 70 #define _PAGE_KWE 0x1000 /* used to do the dirty bit in software */ 71 #define _PAGE_UWE 0x2000 /* used to do the dirty bit in software */ 72 73 /* .. and these are ours ... */ 74 #define _PAGE_DIRTY 0x20000 75 #define _PAGE_ACCESSED 0x40000 76 77 /* We borrow bit 39 to store the exclusive marker in swap PTEs. */ 78 #define _PAGE_SWP_EXCLUSIVE 0x8000000000UL 79 80 /* 81 * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly 82 * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it. 83 * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use 84 * the KRE/URE bits to watch for it. That way we don't need to overload the 85 * KWE/UWE bits with both handling dirty and accessed. 86 * 87 * Note that the kernel uses the accessed bit just to check whether to page 88 * out a page or not, so it doesn't have to be exact anyway. 89 */ 90 91 #define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE) 92 #define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE) 93 94 #define _PFN_MASK 0xFFFFFFFF00000000UL 95 96 #define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS) 97 #define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS) 98 99 /* 100 * All the normal masks have the "page accessed" bits on, as any time they are used, 101 * the page is accessed. They are cleared only by the page-out routines 102 */ 103 #define PAGE_NONE __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE) 104 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) 105 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) 106 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) 107 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) 108 109 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) 110 111 #define _PAGE_P(x) _PAGE_NORMAL((x) | _PAGE_FOW) 112 #define _PAGE_S(x) _PAGE_NORMAL(x) 113 114 /* 115 * The hardware can handle write-only mappings, but as the Alpha 116 * architecture does byte-wide writes with a read-modify-write 117 * sequence, it's not practical to have write-without-read privs. 118 * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in 119 * arch/alpha/mm/fault.c) 120 */ 121 /* xwr */ 122 123 /* 124 * pgprot_noncached() is only for infiniband pci support, and a real 125 * implementation for RAM would be more complicated. 126 */ 127 #define pgprot_noncached(prot) (prot) 128 129 /* 130 * ZERO_PAGE is a global shared page that is always zero: used 131 * for zero-mapped memory areas etc.. 132 */ 133 #define ZERO_PAGE(vaddr) (virt_to_page(ZERO_PGE)) 134 135 /* 136 * On certain platforms whose physical address space can overlap KSEG, 137 * namely EV6 and above, we must re-twiddle the physaddr to restore the 138 * correct high-order bits. 139 * 140 * This is extremely confusing until you realize that this is actually 141 * just working around a userspace bug. The X server was intending to 142 * provide the physical address but instead provided the KSEG address. 143 * Or tried to, except it's not representable. 144 * 145 * On Tsunami there's nothing meaningful at 0x40000000000, so this is 146 * a safe thing to do. Come the first core logic that does put something 147 * in this area -- memory or whathaveyou -- then this hack will have 148 * to go away. So be prepared! 149 */ 150 151 #if defined(CONFIG_ALPHA_GENERIC) && defined(USE_48_BIT_KSEG) 152 #error "EV6-only feature in a generic kernel" 153 #endif 154 #if defined(CONFIG_ALPHA_GENERIC) || \ 155 (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG)) 156 #define KSEG_PFN (0xc0000000000UL >> PAGE_SHIFT) 157 #define PHYS_TWIDDLE(pfn) \ 158 ((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \ 159 ? ((pfn) ^= KSEG_PFN) : (pfn)) 160 #else 161 #define PHYS_TWIDDLE(pfn) (pfn) 162 #endif 163 164 /* 165 * Conversion functions: convert a page and protection to a page entry, 166 * and a page entry and page directory to the page they refer to. 167 */ 168 #define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT) 169 #define PFN_PTE_SHIFT 32 170 #define pte_pfn(pte) (pte_val(pte) >> PFN_PTE_SHIFT) 171 172 #define pte_page(pte) pfn_to_page(pte_pfn(pte)) 173 174 extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot) 175 { pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; } 176 177 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 178 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } 179 180 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep) 181 { pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); } 182 183 extern inline void pud_set(pud_t * pudp, pmd_t * pmdp) 184 { pud_val(*pudp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); } 185 186 187 extern void migrate_flush_tlb_page(struct vm_area_struct *vma, 188 unsigned long addr); 189 190 extern inline unsigned long 191 pmd_page_vaddr(pmd_t pmd) 192 { 193 return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET; 194 } 195 196 #define pmd_pfn(pmd) (pmd_val(pmd) >> 32) 197 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32)) 198 #define pud_page(pud) (pfn_to_page(pud_val(pud) >> 32)) 199 200 extern inline pmd_t *pud_pgtable(pud_t pgd) 201 { 202 return (pmd_t *)(PAGE_OFFSET + ((pud_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT))); 203 } 204 205 extern inline int pte_none(pte_t pte) { return !pte_val(pte); } 206 extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; } 207 extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 208 { 209 WRITE_ONCE(pte_val(*ptep), 0); 210 } 211 212 extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); } 213 extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; } 214 extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_VALID; } 215 extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; } 216 217 extern inline int pud_none(pud_t pud) { return !pud_val(pud); } 218 extern inline int pud_bad(pud_t pud) { return (pud_val(pud) & ~_PFN_MASK) != _PAGE_TABLE; } 219 extern inline int pud_present(pud_t pud) { return pud_val(pud) & _PAGE_VALID; } 220 extern inline void pud_clear(pud_t * pudp) { pud_val(*pudp) = 0; } 221 222 /* 223 * The following only work if pte_present() is true. 224 * Undefined behaviour if not.. 225 */ 226 extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); } 227 extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 228 extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 229 230 extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; } 231 extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; } 232 extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); return pte; } 233 extern inline pte_t pte_mkwrite_novma(pte_t pte){ pte_val(pte) &= ~_PAGE_FOW; return pte; } 234 extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; } 235 extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; } 236 237 /* 238 * The smp_rmb() in the following functions are required to order the load of 239 * *dir (the pointer in the top level page table) with any subsequent load of 240 * the returned pmd_t *ret (ret is data dependent on *dir). 241 * 242 * If this ordering is not enforced, the CPU might load an older value of 243 * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for 244 * more details. 245 * 246 * Note that we never change the mm->pgd pointer after the task is running, so 247 * pgd_offset does not require such a barrier. 248 */ 249 250 /* Find an entry in the second-level page table.. */ 251 extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address) 252 { 253 pmd_t *ret = pud_pgtable(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1)); 254 smp_rmb(); /* see above */ 255 return ret; 256 } 257 #define pmd_offset pmd_offset 258 259 /* Find an entry in the third-level page table.. */ 260 extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address) 261 { 262 pte_t *ret = (pte_t *) pmd_page_vaddr(*dir) 263 + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1)); 264 smp_rmb(); /* see above */ 265 return ret; 266 } 267 #define pte_offset_kernel pte_offset_kernel 268 269 extern pgd_t swapper_pg_dir[1024]; 270 271 #ifdef CONFIG_COMPACTION 272 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 273 274 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 275 unsigned long address, 276 pte_t *ptep) 277 { 278 pte_t pte = READ_ONCE(*ptep); 279 280 pte_clear(mm, address, ptep); 281 return pte; 282 } 283 284 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH 285 286 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 287 unsigned long addr, pte_t *ptep) 288 { 289 struct mm_struct *mm = vma->vm_mm; 290 pte_t pte = ptep_get_and_clear(mm, addr, ptep); 291 292 page_table_check_pte_clear(mm, addr, pte); 293 migrate_flush_tlb_page(vma, addr); 294 return pte; 295 } 296 297 #endif 298 /* 299 * The Alpha doesn't have any external MMU info: the kernel page 300 * tables contain all the necessary information. 301 */ 302 extern inline void update_mmu_cache(struct vm_area_struct * vma, 303 unsigned long address, pte_t *ptep) 304 { 305 } 306 307 static inline void update_mmu_cache_range(struct vm_fault *vmf, 308 struct vm_area_struct *vma, unsigned long address, 309 pte_t *ptep, unsigned int nr) 310 { 311 } 312 313 /* 314 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that 315 * are !pte_none() && !pte_present(). 316 * 317 * Format of swap PTEs: 318 * 319 * 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 320 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 321 * <------------------- offset ------------------> E <--- type --> 322 * 323 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 324 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 325 * <--------------------------- zeroes --------------------------> 326 * 327 * E is the exclusive marker that is not stored in swap entries. 328 */ 329 extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 330 { pte_t pte; pte_val(pte) = ((type & 0x7f) << 32) | (offset << 40); return pte; } 331 332 #define __swp_type(x) (((x).val >> 32) & 0x7f) 333 #define __swp_offset(x) ((x).val >> 40) 334 #define __swp_entry(type, off) ((swp_entry_t) { pte_val(mk_swap_pte((type), (off))) }) 335 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 336 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 337 338 static inline bool pte_swp_exclusive(pte_t pte) 339 { 340 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; 341 } 342 343 static inline pte_t pte_swp_mkexclusive(pte_t pte) 344 { 345 pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; 346 return pte; 347 } 348 349 static inline pte_t pte_swp_clear_exclusive(pte_t pte) 350 { 351 pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; 352 return pte; 353 } 354 355 #define pte_ERROR(e) \ 356 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) 357 #define pmd_ERROR(e) \ 358 printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) 359 #define pgd_ERROR(e) \ 360 printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) 361 362 extern void paging_init(void); 363 364 /* We have our own get_unmapped_area */ 365 #define HAVE_ARCH_UNMAPPED_AREA 366 367 #endif /* _ALPHA_PGTABLE_H */ 368