1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H 3 #define _ASM_POWERPC_NOHASH_PGTABLE_H 4 5 #if defined(CONFIG_PPC64) 6 #include <asm/nohash/64/pgtable.h> 7 #else 8 #include <asm/nohash/32/pgtable.h> 9 #endif 10 11 #ifndef __ASSEMBLY__ 12 13 /* Generic accessors to PTE bits */ 14 static inline int pte_write(pte_t pte) 15 { 16 return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO; 17 } 18 static inline int pte_read(pte_t pte) { return 1; } 19 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 20 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 21 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } 22 static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } 23 24 #ifdef CONFIG_NUMA_BALANCING 25 /* 26 * These work without NUMA balancing but the kernel does not care. See the 27 * comment in include/asm-generic/pgtable.h . On powerpc, this will only 28 * work for user pages and always return true for kernel pages. 29 */ 30 static inline int pte_protnone(pte_t pte) 31 { 32 return (pte_val(pte) & 33 (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT; 34 } 35 36 static inline int pmd_protnone(pmd_t pmd) 37 { 38 return pte_protnone(pmd_pte(pmd)); 39 } 40 #endif /* CONFIG_NUMA_BALANCING */ 41 42 static inline int pte_present(pte_t pte) 43 { 44 return pte_val(pte) & _PAGE_PRESENT; 45 } 46 47 /* 48 * We only find page table entry in the last level 49 * Hence no need for other accessors 50 */ 51 #define pte_access_permitted pte_access_permitted 52 static inline bool pte_access_permitted(pte_t pte, bool write) 53 { 54 /* 55 * A read-only access is controlled by _PAGE_USER bit. 56 * We have _PAGE_READ set for WRITE and EXECUTE 57 */ 58 if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte)) 59 return false; 60 61 if (write && !pte_write(pte)) 62 return false; 63 64 return true; 65 } 66 67 /* Conversion functions: convert a page and protection to a page entry, 68 * and a page entry and page directory to the page they refer to. 69 * 70 * Even if PTEs can be unsigned long long, a PFN is always an unsigned 71 * long for now. 72 */ 73 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { 74 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | 75 pgprot_val(pgprot)); } 76 static inline unsigned long pte_pfn(pte_t pte) { 77 return pte_val(pte) >> PTE_RPN_SHIFT; } 78 79 /* Generic modifiers for PTE bits */ 80 static inline pte_t pte_wrprotect(pte_t pte) 81 { 82 pte_basic_t ptev; 83 84 ptev = pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE); 85 ptev |= _PAGE_RO; 86 return __pte(ptev); 87 } 88 89 static inline pte_t pte_mkclean(pte_t pte) 90 { 91 return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE)); 92 } 93 94 static inline pte_t pte_mkold(pte_t pte) 95 { 96 return __pte(pte_val(pte) & ~_PAGE_ACCESSED); 97 } 98 99 static inline pte_t pte_mkwrite(pte_t pte) 100 { 101 pte_basic_t ptev; 102 103 ptev = pte_val(pte) & ~_PAGE_RO; 104 ptev |= _PAGE_RW; 105 return __pte(ptev); 106 } 107 108 static inline pte_t pte_mkdirty(pte_t pte) 109 { 110 return __pte(pte_val(pte) | _PAGE_DIRTY); 111 } 112 113 static inline pte_t pte_mkyoung(pte_t pte) 114 { 115 return __pte(pte_val(pte) | _PAGE_ACCESSED); 116 } 117 118 static inline pte_t pte_mkspecial(pte_t pte) 119 { 120 return __pte(pte_val(pte) | _PAGE_SPECIAL); 121 } 122 123 static inline pte_t pte_mkhuge(pte_t pte) 124 { 125 return __pte(pte_val(pte) | _PAGE_HUGE); 126 } 127 128 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 129 { 130 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); 131 } 132 133 /* Insert a PTE, top-level function is out of line. It uses an inline 134 * low level function in the respective pgtable-* files 135 */ 136 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 137 pte_t pte); 138 139 /* This low level function performs the actual PTE insertion 140 * Setting the PTE depends on the MMU type and other factors. It's 141 * an horrible mess that I'm not going to try to clean up now but 142 * I'm keeping it in one place rather than spread around 143 */ 144 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, 145 pte_t *ptep, pte_t pte, int percpu) 146 { 147 /* Second case is 32-bit with 64-bit PTE. In this case, we 148 * can just store as long as we do the two halves in the right order 149 * with a barrier in between. 150 * In the percpu case, we also fallback to the simple update 151 */ 152 if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) { 153 __asm__ __volatile__("\ 154 stw%U0%X0 %2,%0\n\ 155 eieio\n\ 156 stw%U0%X0 %L2,%1" 157 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) 158 : "r" (pte) : "memory"); 159 return; 160 } 161 /* Anything else just stores the PTE normally. That covers all 64-bit 162 * cases, and 32-bit non-hash with 32-bit PTEs. 163 */ 164 *ptep = pte; 165 166 /* 167 * With hardware tablewalk, a sync is needed to ensure that 168 * subsequent accesses see the PTE we just wrote. Unlike userspace 169 * mappings, we can't tolerate spurious faults, so make sure 170 * the new PTE will be seen the first time. 171 */ 172 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr)) 173 mb(); 174 } 175 176 177 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 178 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, 179 pte_t *ptep, pte_t entry, int dirty); 180 181 /* 182 * Macro to mark a page protection value as "uncacheable". 183 */ 184 185 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ 186 _PAGE_WRITETHRU) 187 188 #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 189 _PAGE_NO_CACHE | _PAGE_GUARDED)) 190 191 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 192 _PAGE_NO_CACHE)) 193 194 #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 195 _PAGE_COHERENT)) 196 197 #if _PAGE_WRITETHRU != 0 198 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 199 _PAGE_COHERENT | _PAGE_WRITETHRU)) 200 #endif 201 202 #define pgprot_cached_noncoherent(prot) \ 203 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL)) 204 205 #define pgprot_writecombine pgprot_noncached_wc 206 207 struct file; 208 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 209 unsigned long size, pgprot_t vma_prot); 210 #define __HAVE_PHYS_MEM_ACCESS_PROT 211 212 #ifdef CONFIG_HUGETLB_PAGE 213 static inline int hugepd_ok(hugepd_t hpd) 214 { 215 #ifdef CONFIG_PPC_8xx 216 return ((hpd_val(hpd) & 0x4) != 0); 217 #else 218 /* We clear the top bit to indicate hugepd */ 219 return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0); 220 #endif 221 } 222 223 static inline int pmd_huge(pmd_t pmd) 224 { 225 return 0; 226 } 227 228 static inline int pud_huge(pud_t pud) 229 { 230 return 0; 231 } 232 233 static inline int pgd_huge(pgd_t pgd) 234 { 235 return 0; 236 } 237 #define pgd_huge pgd_huge 238 239 #define is_hugepd(hpd) (hugepd_ok(hpd)) 240 #endif 241 242 #endif /* __ASSEMBLY__ */ 243 #endif 244