1 /* SPDX-License-Identifier: GPL-2.0 2 * 3 * This file contains the functions and defines necessary to modify and 4 * use the SuperH page table tree. 5 * 6 * Copyright (C) 1999 Niibe Yutaka 7 * Copyright (C) 2002 - 2007 Paul Mundt 8 */ 9 #ifndef __ASM_SH_PGTABLE_H 10 #define __ASM_SH_PGTABLE_H 11 12 #ifdef CONFIG_X2TLB 13 #include <asm/pgtable-3level.h> 14 #else 15 #include <asm/pgtable-2level.h> 16 #endif 17 #include <asm/page.h> 18 #include <asm/mmu.h> 19 20 #ifndef __ASSEMBLY__ 21 #include <asm/addrspace.h> 22 #include <asm/fixmap.h> 23 24 /* 25 * ZERO_PAGE is a global shared page that is always zero: used 26 * for zero-mapped memory areas etc.. 27 */ 28 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 29 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 30 31 #endif /* !__ASSEMBLY__ */ 32 33 /* 34 * Effective and physical address definitions, to aid with sign 35 * extension. 36 */ 37 #define NEFF 32 38 #define NEFF_SIGN (1LL << (NEFF - 1)) 39 #define NEFF_MASK (-1LL << NEFF) 40 41 static inline unsigned long long neff_sign_extend(unsigned long val) 42 { 43 unsigned long long extended = val; 44 return (extended & NEFF_SIGN) ? (extended | NEFF_MASK) : extended; 45 } 46 47 #ifdef CONFIG_29BIT 48 #define NPHYS 29 49 #else 50 #define NPHYS 32 51 #endif 52 53 #define NPHYS_SIGN (1LL << (NPHYS - 1)) 54 #define NPHYS_MASK (-1LL << NPHYS) 55 56 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 57 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 58 59 /* Entries per level */ 60 #define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE)) 61 62 #define FIRST_USER_ADDRESS 0UL 63 64 #define PHYS_ADDR_MASK29 0x1fffffff 65 #define PHYS_ADDR_MASK32 0xffffffff 66 67 static inline unsigned long phys_addr_mask(void) 68 { 69 /* Is the MMU in 29bit mode? */ 70 if (__in_29bit_mode()) 71 return PHYS_ADDR_MASK29; 72 73 return PHYS_ADDR_MASK32; 74 } 75 76 #define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK) 77 #define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT) 78 79 #define VMALLOC_START (P3SEG) 80 #define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) 81 82 #include <asm/pgtable_32.h> 83 84 /* 85 * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page 86 * protection for execute, and considers it the same as a read. Also, write 87 * permission implies read permission. This is the closest we can get.. 88 * 89 * SH-X2 (SH7785) and later parts take this to the opposite end of the extreme, 90 * not only supporting separate execute, read, and write bits, but having 91 * completely separate permission bits for user and kernel space. 92 */ 93 /*xwr*/ 94 #define __P000 PAGE_NONE 95 #define __P001 PAGE_READONLY 96 #define __P010 PAGE_COPY 97 #define __P011 PAGE_COPY 98 #define __P100 PAGE_EXECREAD 99 #define __P101 PAGE_EXECREAD 100 #define __P110 PAGE_COPY 101 #define __P111 PAGE_COPY 102 103 #define __S000 PAGE_NONE 104 #define __S001 PAGE_READONLY 105 #define __S010 PAGE_WRITEONLY 106 #define __S011 PAGE_SHARED 107 #define __S100 PAGE_EXECREAD 108 #define __S101 PAGE_EXECREAD 109 #define __S110 PAGE_RWX 110 #define __S111 PAGE_RWX 111 112 typedef pte_t *pte_addr_t; 113 114 #define kern_addr_valid(addr) (1) 115 116 #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) 117 118 struct vm_area_struct; 119 struct mm_struct; 120 121 extern void __update_cache(struct vm_area_struct *vma, 122 unsigned long address, pte_t pte); 123 extern void __update_tlb(struct vm_area_struct *vma, 124 unsigned long address, pte_t pte); 125 126 static inline void 127 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 128 { 129 pte_t pte = *ptep; 130 __update_cache(vma, address, pte); 131 __update_tlb(vma, address, pte); 132 } 133 134 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 135 extern void paging_init(void); 136 extern void page_table_range_init(unsigned long start, unsigned long end, 137 pgd_t *pgd); 138 139 static inline bool __pte_access_permitted(pte_t pte, u64 prot) 140 { 141 return (pte_val(pte) & (prot | _PAGE_SPECIAL)) == prot; 142 } 143 144 #ifdef CONFIG_X2TLB 145 static inline bool pte_access_permitted(pte_t pte, bool write) 146 { 147 u64 prot = _PAGE_PRESENT; 148 149 prot |= _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ); 150 if (write) 151 prot |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE); 152 return __pte_access_permitted(pte, prot); 153 } 154 #else 155 static inline bool pte_access_permitted(pte_t pte, bool write) 156 { 157 u64 prot = _PAGE_PRESENT | _PAGE_USER; 158 159 if (write) 160 prot |= _PAGE_RW; 161 return __pte_access_permitted(pte, prot); 162 } 163 #endif 164 165 #define pte_access_permitted pte_access_permitted 166 167 /* arch/sh/mm/mmap.c */ 168 #define HAVE_ARCH_UNMAPPED_AREA 169 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 170 171 #endif /* __ASM_SH_PGTABLE_H */ 172