1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2002 by Ralf Baechle 7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. 8 * Copyright (C) 2002 Maciej W. Rozycki 9 */ 10 #ifndef _ASM_PGTABLE_BITS_H 11 #define _ASM_PGTABLE_BITS_H 12 13 14 /* 15 * Note that we shift the lower 32bits of each EntryLo[01] entry 16 * 6 bits to the left. That way we can convert the PFN into the 17 * physical address by a single 'and' operation and gain 6 additional 18 * bits for storing information which isn't present in a normal 19 * MIPS page table. 20 * 21 * Similar to the Alpha port, we need to keep track of the ref 22 * and mod bits in software. We have a software "yeah you can read 23 * from this page" bit, and a hardware one which actually lets the 24 * process read from the page. On the same token we have a software 25 * writable bit and the real hardware one which actually lets the 26 * process write to the page, this keeps a mod bit via the hardware 27 * dirty bit. 28 * 29 * Certain revisions of the R4000 and R5000 have a bug where if a 30 * certain sequence occurs in the last 3 instructions of an executable 31 * page, and the following page is not mapped, the cpu can do 32 * unpredictable things. The code (when it is written) to deal with 33 * this problem will be in the update_mmu_cache() code for the r4k. 34 */ 35 #if defined(CONFIG_XPA) 36 37 /* 38 * Page table bit offsets used for 64 bit physical addressing on 39 * MIPS32r5 with XPA. 40 */ 41 enum pgtable_bits { 42 /* Used by TLB hardware (placed in EntryLo*) */ 43 _PAGE_NO_EXEC_SHIFT, 44 _PAGE_NO_READ_SHIFT, 45 _PAGE_GLOBAL_SHIFT, 46 _PAGE_VALID_SHIFT, 47 _PAGE_DIRTY_SHIFT, 48 _CACHE_SHIFT, 49 50 /* Used only by software (masked out before writing EntryLo*) */ 51 _PAGE_PRESENT_SHIFT = 24, 52 _PAGE_WRITE_SHIFT, 53 _PAGE_ACCESSED_SHIFT, 54 _PAGE_MODIFIED_SHIFT, 55 }; 56 57 /* 58 * Bits for extended EntryLo0/EntryLo1 registers 59 */ 60 #define _PFNX_MASK 0xffffff 61 62 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 63 64 /* 65 * Page table bit offsets used for 36 bit physical addressing on MIPS32, 66 * for example with Alchemy or Netlogic XLP/XLR. 67 */ 68 enum pgtable_bits { 69 /* Used by TLB hardware (placed in EntryLo*) */ 70 _PAGE_GLOBAL_SHIFT, 71 _PAGE_VALID_SHIFT, 72 _PAGE_DIRTY_SHIFT, 73 _CACHE_SHIFT, 74 75 /* Used only by software (masked out before writing EntryLo*) */ 76 _PAGE_PRESENT_SHIFT = _CACHE_SHIFT + 3, 77 _PAGE_NO_READ_SHIFT, 78 _PAGE_WRITE_SHIFT, 79 _PAGE_ACCESSED_SHIFT, 80 _PAGE_MODIFIED_SHIFT, 81 }; 82 83 #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 84 85 /* Page table bits used for r3k systems */ 86 enum pgtable_bits { 87 /* Used only by software (writes to EntryLo ignored) */ 88 _PAGE_PRESENT_SHIFT, 89 _PAGE_NO_READ_SHIFT, 90 _PAGE_WRITE_SHIFT, 91 _PAGE_ACCESSED_SHIFT, 92 _PAGE_MODIFIED_SHIFT, 93 94 /* Used by TLB hardware (placed in EntryLo) */ 95 _PAGE_GLOBAL_SHIFT = 8, 96 _PAGE_VALID_SHIFT, 97 _PAGE_DIRTY_SHIFT, 98 _CACHE_UNCACHED_SHIFT, 99 }; 100 101 #else 102 103 /* Page table bits used for r4k systems */ 104 enum pgtable_bits { 105 /* Used only by software (masked out before writing EntryLo*) */ 106 _PAGE_PRESENT_SHIFT, 107 #if !defined(CONFIG_CPU_HAS_RIXI) 108 _PAGE_NO_READ_SHIFT, 109 #endif 110 _PAGE_WRITE_SHIFT, 111 _PAGE_ACCESSED_SHIFT, 112 _PAGE_MODIFIED_SHIFT, 113 #if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) 114 _PAGE_HUGE_SHIFT, 115 #endif 116 117 /* Used by TLB hardware (placed in EntryLo*) */ 118 #if defined(CONFIG_CPU_HAS_RIXI) 119 _PAGE_NO_EXEC_SHIFT, 120 _PAGE_NO_READ_SHIFT, 121 #endif 122 _PAGE_GLOBAL_SHIFT, 123 _PAGE_VALID_SHIFT, 124 _PAGE_DIRTY_SHIFT, 125 _CACHE_SHIFT, 126 }; 127 128 #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */ 129 130 /* Used only by software */ 131 #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 132 #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 133 #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) 134 #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 135 #if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) 136 # define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) 137 #endif 138 139 /* Used by TLB hardware (placed in EntryLo*) */ 140 #if defined(CONFIG_XPA) 141 # define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT) 142 #elif defined(CONFIG_CPU_HAS_RIXI) 143 # define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0) 144 #endif 145 #define _PAGE_NO_READ (1 << _PAGE_NO_READ_SHIFT) 146 #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 147 #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 148 #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) 149 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 150 # define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT) 151 # define _CACHE_MASK _CACHE_UNCACHED 152 # define _PFN_SHIFT PAGE_SHIFT 153 #else 154 # define _CACHE_MASK (7 << _CACHE_SHIFT) 155 # define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) 156 #endif 157 158 #ifndef _PAGE_NO_EXEC 159 #define _PAGE_NO_EXEC 0 160 #endif 161 162 #define _PAGE_SILENT_READ _PAGE_VALID 163 #define _PAGE_SILENT_WRITE _PAGE_DIRTY 164 165 #define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1)) 166 167 /* 168 * The final layouts of the PTE bits are: 169 * 170 * 64-bit, R1 or earlier: CCC D V G [S H] M A W R P 171 * 32-bit, R1 or earler: CCC D V G M A W R P 172 * 64-bit, R2 or later: CCC D V G RI/R XI [S H] M A W P 173 * 32-bit, R2 or later: CCC D V G RI/R XI M A W P 174 */ 175 176 177 /* 178 * pte_to_entrylo converts a page table entry (PTE) into a Mips 179 * entrylo0/1 value. 180 */ 181 static inline uint64_t pte_to_entrylo(unsigned long pte_val) 182 { 183 #ifdef CONFIG_CPU_HAS_RIXI 184 if (cpu_has_rixi) { 185 int sa; 186 #ifdef CONFIG_32BIT 187 sa = 31 - _PAGE_NO_READ_SHIFT; 188 #else 189 sa = 63 - _PAGE_NO_READ_SHIFT; 190 #endif 191 /* 192 * C has no way to express that this is a DSRL 193 * _PAGE_NO_EXEC_SHIFT followed by a ROTR 2. Luckily 194 * in the fast path this is done in assembly 195 */ 196 return (pte_val >> _PAGE_GLOBAL_SHIFT) | 197 ((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa); 198 } 199 #endif 200 201 return pte_val >> _PAGE_GLOBAL_SHIFT; 202 } 203 204 /* 205 * Cache attributes 206 */ 207 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 208 209 #define _CACHE_CACHABLE_NONCOHERENT 0 210 #define _CACHE_UNCACHED_ACCELERATED _CACHE_UNCACHED 211 212 #elif defined(CONFIG_CPU_SB1) 213 214 /* No penalty for being coherent on the SB1, so just 215 use it for "noncoherent" spaces, too. Shouldn't hurt. */ 216 217 #define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT) 218 219 #elif defined(CONFIG_CPU_LOONGSON3) 220 221 /* Using COHERENT flag for NONCOHERENT doesn't hurt. */ 222 223 #define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* LOONGSON */ 224 #define _CACHE_CACHABLE_COHERENT (3<<_CACHE_SHIFT) /* LOONGSON-3 */ 225 226 #elif defined(CONFIG_MACH_INGENIC) 227 228 /* Ingenic uses the WA bit to achieve write-combine memory writes */ 229 #define _CACHE_UNCACHED_ACCELERATED (1<<_CACHE_SHIFT) 230 231 #endif 232 233 #ifndef _CACHE_CACHABLE_NO_WA 234 #define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) 235 #endif 236 #ifndef _CACHE_CACHABLE_WA 237 #define _CACHE_CACHABLE_WA (1<<_CACHE_SHIFT) 238 #endif 239 #ifndef _CACHE_UNCACHED 240 #define _CACHE_UNCACHED (2<<_CACHE_SHIFT) 241 #endif 242 #ifndef _CACHE_CACHABLE_NONCOHERENT 243 #define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) 244 #endif 245 #ifndef _CACHE_CACHABLE_CE 246 #define _CACHE_CACHABLE_CE (4<<_CACHE_SHIFT) 247 #endif 248 #ifndef _CACHE_CACHABLE_COW 249 #define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT) 250 #endif 251 #ifndef _CACHE_CACHABLE_CUW 252 #define _CACHE_CACHABLE_CUW (6<<_CACHE_SHIFT) 253 #endif 254 #ifndef _CACHE_UNCACHED_ACCELERATED 255 #define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) 256 #endif 257 258 #define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED) 259 #define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED) 260 261 #define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \ 262 _PFN_MASK | _CACHE_MASK) 263 264 #endif /* _ASM_PGTABLE_BITS_H */ 265