1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _MCF_PGTABLE_H 3 #define _MCF_PGTABLE_H 4 5 #include <asm/mcfmmu.h> 6 #include <asm/page.h> 7 8 /* 9 * MMUDR bits, in proper place. We write these directly into the MMUDR 10 * after masking from the pte. 11 */ 12 #define CF_PAGE_LOCKED MMUDR_LK /* 0x00000002 */ 13 #define CF_PAGE_EXEC MMUDR_X /* 0x00000004 */ 14 #define CF_PAGE_WRITABLE MMUDR_W /* 0x00000008 */ 15 #define CF_PAGE_READABLE MMUDR_R /* 0x00000010 */ 16 #define CF_PAGE_SYSTEM MMUDR_SP /* 0x00000020 */ 17 #define CF_PAGE_COPYBACK MMUDR_CM_CCB /* 0x00000040 */ 18 #define CF_PAGE_NOCACHE MMUDR_CM_NCP /* 0x00000080 */ 19 20 #define CF_CACHEMASK (~MMUDR_CM_CCB) 21 #define CF_PAGE_MMUDR_MASK 0x000000fe 22 23 #define _PAGE_NOCACHE030 CF_PAGE_NOCACHE 24 25 /* 26 * MMUTR bits, need shifting down. 27 */ 28 #define CF_PAGE_MMUTR_MASK 0x00000c00 29 #define CF_PAGE_MMUTR_SHIFT 10 30 31 #define CF_PAGE_VALID (MMUTR_V << CF_PAGE_MMUTR_SHIFT) 32 #define CF_PAGE_SHARED (MMUTR_SG << CF_PAGE_MMUTR_SHIFT) 33 34 /* 35 * Fake bits, not implemented in CF, will get masked out before 36 * hitting hardware. 37 */ 38 #define CF_PAGE_DIRTY 0x00000001 39 #define CF_PAGE_ACCESSED 0x00001000 40 41 #define _PAGE_CACHE040 0x020 /* 68040 cache mode, cachable, copyback */ 42 #define _PAGE_NOCACHE_S 0x040 /* 68040 no-cache mode, serialized */ 43 #define _PAGE_NOCACHE 0x060 /* 68040 cache mode, non-serialized */ 44 #define _PAGE_CACHE040W 0x000 /* 68040 cache mode, cachable, write-through */ 45 #define _DESCTYPE_MASK 0x003 46 #define _CACHEMASK040 (~0x060) 47 #define _PAGE_GLOBAL040 0x400 /* 68040 global bit, used for kva descs */ 48 49 /* We borrow bit 7 to store the exclusive marker in swap PTEs. */ 50 #define _PAGE_SWP_EXCLUSIVE CF_PAGE_NOCACHE 51 52 /* 53 * Externally used page protection values. 54 */ 55 #define _PAGE_PRESENT (CF_PAGE_VALID) 56 #define _PAGE_ACCESSED (CF_PAGE_ACCESSED) 57 #define _PAGE_DIRTY (CF_PAGE_DIRTY) 58 #define _PAGE_READWRITE (CF_PAGE_READABLE \ 59 | CF_PAGE_WRITABLE \ 60 | CF_PAGE_SYSTEM \ 61 | CF_PAGE_SHARED) 62 63 /* 64 * Compound page protection values. 65 */ 66 #define PAGE_NONE __pgprot(CF_PAGE_VALID \ 67 | CF_PAGE_ACCESSED) 68 69 #define PAGE_SHARED __pgprot(CF_PAGE_VALID \ 70 | CF_PAGE_ACCESSED \ 71 | CF_PAGE_SHARED) 72 73 #define PAGE_INIT __pgprot(CF_PAGE_VALID \ 74 | CF_PAGE_READABLE \ 75 | CF_PAGE_WRITABLE \ 76 | CF_PAGE_EXEC \ 77 | CF_PAGE_SYSTEM) 78 79 #define PAGE_KERNEL __pgprot(CF_PAGE_VALID \ 80 | CF_PAGE_ACCESSED \ 81 | CF_PAGE_READABLE \ 82 | CF_PAGE_WRITABLE \ 83 | CF_PAGE_EXEC \ 84 | CF_PAGE_SYSTEM \ 85 | CF_PAGE_SHARED) 86 87 #define PAGE_COPY __pgprot(CF_PAGE_VALID \ 88 | CF_PAGE_ACCESSED \ 89 | CF_PAGE_READABLE \ 90 | CF_PAGE_DIRTY) 91 92 #define PTE_MASK PAGE_MASK 93 #define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY) 94 95 #ifndef __ASSEMBLER__ 96 97 #define pmd_pgtable(pmd) pfn_to_virt(pmd_val(pmd) >> PAGE_SHIFT) 98 99 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 100 { 101 pte_val(pte) = (pte_val(pte) & CF_PAGE_CHG_MASK) | pgprot_val(newprot); 102 return pte; 103 } 104 105 #define pmd_set(pmdp, ptep) do {} while (0) 106 107 static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp) 108 { 109 pgd_val(*pgdp) = virt_to_phys(pmdp); 110 } 111 112 #define __pte_page(pte) ((void *) (pte_val(pte) & PAGE_MASK)) 113 #define pmd_page_vaddr(pmd) ((unsigned long) (pmd_val(pmd))) 114 115 static inline int pte_none(pte_t pte) 116 { 117 return !pte_val(pte); 118 } 119 120 static inline int pte_present(pte_t pte) 121 { 122 return pte_val(pte) & CF_PAGE_VALID; 123 } 124 125 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 126 pte_t *ptep) 127 { 128 pte_val(*ptep) = 0; 129 } 130 131 #define pte_page(pte) virt_to_page(__pte_page(pte)) 132 133 static inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); } 134 #define pmd_none(pmd) pmd_none2(&(pmd)) 135 static inline int pmd_bad2(pmd_t *pmd) { return 0; } 136 #define pmd_bad(pmd) pmd_bad2(&(pmd)) 137 #define pmd_present(pmd) (!pmd_none2(&(pmd))) 138 static inline void pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; } 139 140 #define pte_ERROR(e) \ 141 printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \ 142 __FILE__, __LINE__, pte_val(e)) 143 #define pgd_ERROR(e) \ 144 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \ 145 __FILE__, __LINE__, pgd_val(e)) 146 147 /* 148 * The following only work if pte_present() is true. 149 * Undefined behaviour if not... 150 * [we have the full set here even if they don't change from m68k] 151 */ 152 static inline int pte_read(pte_t pte) 153 { 154 return pte_val(pte) & CF_PAGE_READABLE; 155 } 156 157 static inline int pte_write(pte_t pte) 158 { 159 return pte_val(pte) & CF_PAGE_WRITABLE; 160 } 161 162 static inline int pte_exec(pte_t pte) 163 { 164 return pte_val(pte) & CF_PAGE_EXEC; 165 } 166 167 static inline int pte_dirty(pte_t pte) 168 { 169 return pte_val(pte) & CF_PAGE_DIRTY; 170 } 171 172 static inline int pte_young(pte_t pte) 173 { 174 return pte_val(pte) & CF_PAGE_ACCESSED; 175 } 176 177 static inline pte_t pte_wrprotect(pte_t pte) 178 { 179 pte_val(pte) &= ~CF_PAGE_WRITABLE; 180 return pte; 181 } 182 183 static inline pte_t pte_rdprotect(pte_t pte) 184 { 185 pte_val(pte) &= ~CF_PAGE_READABLE; 186 return pte; 187 } 188 189 static inline pte_t pte_exprotect(pte_t pte) 190 { 191 pte_val(pte) &= ~CF_PAGE_EXEC; 192 return pte; 193 } 194 195 static inline pte_t pte_mkclean(pte_t pte) 196 { 197 pte_val(pte) &= ~CF_PAGE_DIRTY; 198 return pte; 199 } 200 201 static inline pte_t pte_mkold(pte_t pte) 202 { 203 pte_val(pte) &= ~CF_PAGE_ACCESSED; 204 return pte; 205 } 206 207 static inline pte_t pte_mkwrite_novma(pte_t pte) 208 { 209 pte_val(pte) |= CF_PAGE_WRITABLE; 210 return pte; 211 } 212 213 static inline pte_t pte_mkread(pte_t pte) 214 { 215 pte_val(pte) |= CF_PAGE_READABLE; 216 return pte; 217 } 218 219 static inline pte_t pte_mkexec(pte_t pte) 220 { 221 pte_val(pte) |= CF_PAGE_EXEC; 222 return pte; 223 } 224 225 static inline pte_t pte_mkdirty(pte_t pte) 226 { 227 pte_val(pte) |= CF_PAGE_DIRTY; 228 return pte; 229 } 230 231 static inline pte_t pte_mkyoung(pte_t pte) 232 { 233 pte_val(pte) |= CF_PAGE_ACCESSED; 234 return pte; 235 } 236 237 static inline pte_t pte_mknocache(pte_t pte) 238 { 239 pte_val(pte) |= 0x80 | (pte_val(pte) & ~0x40); 240 return pte; 241 } 242 243 static inline pte_t pte_mkcache(pte_t pte) 244 { 245 pte_val(pte) &= ~CF_PAGE_NOCACHE; 246 return pte; 247 } 248 249 #define swapper_pg_dir kernel_pg_dir 250 extern pgd_t kernel_pg_dir[PTRS_PER_PGD]; 251 252 /* 253 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that 254 * are !pte_none() && !pte_present(). 255 * 256 * Format of swap PTEs: 257 * 258 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 259 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 260 * <------------------ offset -------------> 0 0 0 E <-- type ---> 261 * 262 * E is the exclusive marker that is not stored in swap entries. 263 */ 264 #define __swp_type(x) ((x).val & 0x7f) 265 #define __swp_offset(x) ((x).val >> 11) 266 #define __swp_entry(typ, off) ((swp_entry_t) { ((typ) & 0x7f) | \ 267 (off << 11) }) 268 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 269 #define __swp_entry_to_pte(x) (__pte((x).val)) 270 271 static inline bool pte_swp_exclusive(pte_t pte) 272 { 273 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; 274 } 275 276 static inline pte_t pte_swp_mkexclusive(pte_t pte) 277 { 278 pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; 279 return pte; 280 } 281 282 static inline pte_t pte_swp_clear_exclusive(pte_t pte) 283 { 284 pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; 285 return pte; 286 } 287 288 #define PFN_PTE_SHIFT PAGE_SHIFT 289 #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) 290 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) 291 292 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) 293 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 294 295 #endif /* !__ASSEMBLER__ */ 296 #endif /* _MCF_PGTABLE_H */ 297