1 #ifndef _ASM_POWERPC_PGTABLE_H 2 #define _ASM_POWERPC_PGTABLE_H 3 #ifdef __KERNEL__ 4 5 #ifndef __ASSEMBLY__ 6 #include <linux/mmdebug.h> 7 #include <asm/processor.h> /* For TASK_SIZE */ 8 #include <asm/mmu.h> 9 #include <asm/page.h> 10 11 struct mm_struct; 12 13 #endif /* !__ASSEMBLY__ */ 14 15 #if defined(CONFIG_PPC64) 16 # include <asm/pgtable-ppc64.h> 17 #else 18 # include <asm/pgtable-ppc32.h> 19 #endif 20 21 /* 22 * We save the slot number & secondary bit in the second half of the 23 * PTE page. We use the 8 bytes per each pte entry. 24 */ 25 #define PTE_PAGE_HIDX_OFFSET (PTRS_PER_PTE * 8) 26 27 #ifndef __ASSEMBLY__ 28 29 #include <asm/tlbflush.h> 30 31 /* Generic accessors to PTE bits */ 32 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } 33 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 34 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 35 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 36 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 37 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } 38 static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } 39 40 #ifdef CONFIG_NUMA_BALANCING 41 static inline int pte_present(pte_t pte) 42 { 43 return pte_val(pte) & _PAGE_NUMA_MASK; 44 } 45 46 #define pte_present_nonuma pte_present_nonuma 47 static inline int pte_present_nonuma(pte_t pte) 48 { 49 return pte_val(pte) & (_PAGE_PRESENT); 50 } 51 52 #define ptep_set_numa ptep_set_numa 53 static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, 54 pte_t *ptep) 55 { 56 if ((pte_val(*ptep) & _PAGE_PRESENT) == 0) 57 VM_BUG_ON(1); 58 59 pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0); 60 return; 61 } 62 63 #define pmdp_set_numa pmdp_set_numa 64 static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, 65 pmd_t *pmdp) 66 { 67 if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0) 68 VM_BUG_ON(1); 69 70 pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA); 71 return; 72 } 73 74 /* 75 * Generic NUMA pte helpers expect pteval_t and pmdval_t types to exist 76 * which was inherited from x86. For the purposes of powerpc pte_basic_t and 77 * pmd_t are equivalent 78 */ 79 #define pteval_t pte_basic_t 80 #define pmdval_t pmd_t 81 static inline pteval_t ptenuma_flags(pte_t pte) 82 { 83 return pte_val(pte) & _PAGE_NUMA_MASK; 84 } 85 86 static inline pmdval_t pmdnuma_flags(pmd_t pmd) 87 { 88 return pmd_val(pmd) & _PAGE_NUMA_MASK; 89 } 90 91 # else 92 93 static inline int pte_present(pte_t pte) 94 { 95 return pte_val(pte) & _PAGE_PRESENT; 96 } 97 #endif /* CONFIG_NUMA_BALANCING */ 98 99 /* Conversion functions: convert a page and protection to a page entry, 100 * and a page entry and page directory to the page they refer to. 101 * 102 * Even if PTEs can be unsigned long long, a PFN is always an unsigned 103 * long for now. 104 */ 105 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { 106 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | 107 pgprot_val(pgprot)); } 108 static inline unsigned long pte_pfn(pte_t pte) { 109 return pte_val(pte) >> PTE_RPN_SHIFT; } 110 111 /* Keep these as a macros to avoid include dependency mess */ 112 #define pte_page(x) pfn_to_page(pte_pfn(x)) 113 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 114 115 /* Generic modifiers for PTE bits */ 116 static inline pte_t pte_wrprotect(pte_t pte) { 117 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } 118 static inline pte_t pte_mkclean(pte_t pte) { 119 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } 120 static inline pte_t pte_mkold(pte_t pte) { 121 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } 122 static inline pte_t pte_mkwrite(pte_t pte) { 123 pte_val(pte) |= _PAGE_RW; return pte; } 124 static inline pte_t pte_mkdirty(pte_t pte) { 125 pte_val(pte) |= _PAGE_DIRTY; return pte; } 126 static inline pte_t pte_mkyoung(pte_t pte) { 127 pte_val(pte) |= _PAGE_ACCESSED; return pte; } 128 static inline pte_t pte_mkspecial(pte_t pte) { 129 pte_val(pte) |= _PAGE_SPECIAL; return pte; } 130 static inline pte_t pte_mkhuge(pte_t pte) { 131 return pte; } 132 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 133 { 134 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); 135 return pte; 136 } 137 138 139 /* Insert a PTE, top-level function is out of line. It uses an inline 140 * low level function in the respective pgtable-* files 141 */ 142 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 143 pte_t pte); 144 145 /* This low level function performs the actual PTE insertion 146 * Setting the PTE depends on the MMU type and other factors. It's 147 * an horrible mess that I'm not going to try to clean up now but 148 * I'm keeping it in one place rather than spread around 149 */ 150 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, 151 pte_t *ptep, pte_t pte, int percpu) 152 { 153 #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) 154 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the 155 * helper pte_update() which does an atomic update. We need to do that 156 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a 157 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving 158 * the hash bits instead (ie, same as the non-SMP case) 159 */ 160 if (percpu) 161 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) 162 | (pte_val(pte) & ~_PAGE_HASHPTE)); 163 else 164 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); 165 166 #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) 167 /* Second case is 32-bit with 64-bit PTE. In this case, we 168 * can just store as long as we do the two halves in the right order 169 * with a barrier in between. This is possible because we take care, 170 * in the hash code, to pre-invalidate if the PTE was already hashed, 171 * which synchronizes us with any concurrent invalidation. 172 * In the percpu case, we also fallback to the simple update preserving 173 * the hash bits 174 */ 175 if (percpu) { 176 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) 177 | (pte_val(pte) & ~_PAGE_HASHPTE)); 178 return; 179 } 180 #if _PAGE_HASHPTE != 0 181 if (pte_val(*ptep) & _PAGE_HASHPTE) 182 flush_hash_entry(mm, ptep, addr); 183 #endif 184 __asm__ __volatile__("\ 185 stw%U0%X0 %2,%0\n\ 186 eieio\n\ 187 stw%U0%X0 %L2,%1" 188 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) 189 : "r" (pte) : "memory"); 190 191 #elif defined(CONFIG_PPC_STD_MMU_32) 192 /* Third case is 32-bit hash table in UP mode, we need to preserve 193 * the _PAGE_HASHPTE bit since we may not have invalidated the previous 194 * translation in the hash yet (done in a subsequent flush_tlb_xxx()) 195 * and see we need to keep track that this PTE needs invalidating 196 */ 197 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) 198 | (pte_val(pte) & ~_PAGE_HASHPTE)); 199 200 #else 201 /* Anything else just stores the PTE normally. That covers all 64-bit 202 * cases, and 32-bit non-hash with 32-bit PTEs. 203 */ 204 *ptep = pte; 205 #endif 206 } 207 208 209 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 210 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, 211 pte_t *ptep, pte_t entry, int dirty); 212 213 /* 214 * Macro to mark a page protection value as "uncacheable". 215 */ 216 217 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ 218 _PAGE_WRITETHRU) 219 220 #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 221 _PAGE_NO_CACHE | _PAGE_GUARDED)) 222 223 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 224 _PAGE_NO_CACHE)) 225 226 #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 227 _PAGE_COHERENT)) 228 229 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 230 _PAGE_COHERENT | _PAGE_WRITETHRU)) 231 232 #define pgprot_cached_noncoherent(prot) \ 233 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL)) 234 235 #define pgprot_writecombine pgprot_noncached_wc 236 237 struct file; 238 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 239 unsigned long size, pgprot_t vma_prot); 240 #define __HAVE_PHYS_MEM_ACCESS_PROT 241 242 /* 243 * ZERO_PAGE is a global shared page that is always zero: used 244 * for zero-mapped memory areas etc.. 245 */ 246 extern unsigned long empty_zero_page[]; 247 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 248 249 extern pgd_t swapper_pg_dir[]; 250 251 extern void paging_init(void); 252 253 /* 254 * kern_addr_valid is intended to indicate whether an address is a valid 255 * kernel address. Most 32-bit archs define it as always true (like this) 256 * but most 64-bit archs actually perform a test. What should we do here? 257 */ 258 #define kern_addr_valid(addr) (1) 259 260 #include <asm-generic/pgtable.h> 261 262 263 /* 264 * This gets called at the end of handling a page fault, when 265 * the kernel has put a new PTE into the page table for the process. 266 * We use it to ensure coherency between the i-cache and d-cache 267 * for the page which has just been mapped in. 268 * On machines which use an MMU hash table, we use this to put a 269 * corresponding HPTE into the hash table ahead of time, instead of 270 * waiting for the inevitable extra hash-table miss exception. 271 */ 272 extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); 273 274 extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr, 275 unsigned long end, int write, struct page **pages, int *nr); 276 277 extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, 278 unsigned long end, int write, struct page **pages, int *nr); 279 #ifndef CONFIG_TRANSPARENT_HUGEPAGE 280 #define pmd_large(pmd) 0 281 #define has_transparent_hugepage() 0 282 #endif 283 pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, 284 unsigned *shift); 285 286 static inline pte_t *lookup_linux_ptep(pgd_t *pgdir, unsigned long hva, 287 unsigned long *pte_sizep) 288 { 289 pte_t *ptep; 290 unsigned long ps = *pte_sizep; 291 unsigned int shift; 292 293 ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift); 294 if (!ptep) 295 return NULL; 296 if (shift) 297 *pte_sizep = 1ul << shift; 298 else 299 *pte_sizep = PAGE_SIZE; 300 301 if (ps > *pte_sizep) 302 return NULL; 303 304 return ptep; 305 } 306 #endif /* __ASSEMBLY__ */ 307 308 #endif /* __KERNEL__ */ 309 #endif /* _ASM_POWERPC_PGTABLE_H */ 310