1 #ifndef _ASM_POWERPC_PGTABLE_H 2 #define _ASM_POWERPC_PGTABLE_H 3 #ifdef __KERNEL__ 4 5 #ifndef __ASSEMBLY__ 6 #include <asm/processor.h> /* For TASK_SIZE */ 7 #include <asm/mmu.h> 8 #include <asm/page.h> 9 10 struct mm_struct; 11 12 #endif /* !__ASSEMBLY__ */ 13 14 #if defined(CONFIG_PPC64) 15 # include <asm/pgtable-ppc64.h> 16 #else 17 # include <asm/pgtable-ppc32.h> 18 #endif 19 20 #ifndef __ASSEMBLY__ 21 22 #include <asm/tlbflush.h> 23 24 /* Generic accessors to PTE bits */ 25 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } 26 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 27 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 28 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 29 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 30 static inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; } 31 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } 32 static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } 33 34 /* Conversion functions: convert a page and protection to a page entry, 35 * and a page entry and page directory to the page they refer to. 36 * 37 * Even if PTEs can be unsigned long long, a PFN is always an unsigned 38 * long for now. 39 */ 40 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { 41 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | 42 pgprot_val(pgprot)); } 43 static inline unsigned long pte_pfn(pte_t pte) { 44 return pte_val(pte) >> PTE_RPN_SHIFT; } 45 46 /* Keep these as a macros to avoid include dependency mess */ 47 #define pte_page(x) pfn_to_page(pte_pfn(x)) 48 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 49 50 /* Generic modifiers for PTE bits */ 51 static inline pte_t pte_wrprotect(pte_t pte) { 52 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } 53 static inline pte_t pte_mkclean(pte_t pte) { 54 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } 55 static inline pte_t pte_mkold(pte_t pte) { 56 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } 57 static inline pte_t pte_mkwrite(pte_t pte) { 58 pte_val(pte) |= _PAGE_RW; return pte; } 59 static inline pte_t pte_mkdirty(pte_t pte) { 60 pte_val(pte) |= _PAGE_DIRTY; return pte; } 61 static inline pte_t pte_mkyoung(pte_t pte) { 62 pte_val(pte) |= _PAGE_ACCESSED; return pte; } 63 static inline pte_t pte_mkspecial(pte_t pte) { 64 pte_val(pte) |= _PAGE_SPECIAL; return pte; } 65 static inline pte_t pte_mkhuge(pte_t pte) { 66 return pte; } 67 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 68 { 69 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); 70 return pte; 71 } 72 73 74 /* Insert a PTE, top-level function is out of line. It uses an inline 75 * low level function in the respective pgtable-* files 76 */ 77 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 78 pte_t pte); 79 80 /* This low level function performs the actual PTE insertion 81 * Setting the PTE depends on the MMU type and other factors. It's 82 * an horrible mess that I'm not going to try to clean up now but 83 * I'm keeping it in one place rather than spread around 84 */ 85 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, 86 pte_t *ptep, pte_t pte, int percpu) 87 { 88 #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) 89 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the 90 * helper pte_update() which does an atomic update. We need to do that 91 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a 92 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving 93 * the hash bits instead (ie, same as the non-SMP case) 94 */ 95 if (percpu) 96 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) 97 | (pte_val(pte) & ~_PAGE_HASHPTE)); 98 else 99 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); 100 101 #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) 102 /* Second case is 32-bit with 64-bit PTE. In this case, we 103 * can just store as long as we do the two halves in the right order 104 * with a barrier in between. This is possible because we take care, 105 * in the hash code, to pre-invalidate if the PTE was already hashed, 106 * which synchronizes us with any concurrent invalidation. 107 * In the percpu case, we also fallback to the simple update preserving 108 * the hash bits 109 */ 110 if (percpu) { 111 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) 112 | (pte_val(pte) & ~_PAGE_HASHPTE)); 113 return; 114 } 115 #if _PAGE_HASHPTE != 0 116 if (pte_val(*ptep) & _PAGE_HASHPTE) 117 flush_hash_entry(mm, ptep, addr); 118 #endif 119 __asm__ __volatile__("\ 120 stw%U0%X0 %2,%0\n\ 121 eieio\n\ 122 stw%U0%X0 %L2,%1" 123 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) 124 : "r" (pte) : "memory"); 125 126 #elif defined(CONFIG_PPC_STD_MMU_32) 127 /* Third case is 32-bit hash table in UP mode, we need to preserve 128 * the _PAGE_HASHPTE bit since we may not have invalidated the previous 129 * translation in the hash yet (done in a subsequent flush_tlb_xxx()) 130 * and see we need to keep track that this PTE needs invalidating 131 */ 132 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) 133 | (pte_val(pte) & ~_PAGE_HASHPTE)); 134 135 #else 136 /* Anything else just stores the PTE normally. That covers all 64-bit 137 * cases, and 32-bit non-hash with 32-bit PTEs. 138 */ 139 *ptep = pte; 140 #endif 141 } 142 143 144 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 145 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, 146 pte_t *ptep, pte_t entry, int dirty); 147 148 /* 149 * Macro to mark a page protection value as "uncacheable". 150 */ 151 152 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ 153 _PAGE_WRITETHRU) 154 155 #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 156 _PAGE_NO_CACHE | _PAGE_GUARDED)) 157 158 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 159 _PAGE_NO_CACHE)) 160 161 #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 162 _PAGE_COHERENT)) 163 164 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 165 _PAGE_COHERENT | _PAGE_WRITETHRU)) 166 167 #define pgprot_cached_noncoherent(prot) \ 168 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL)) 169 170 #define pgprot_writecombine pgprot_noncached_wc 171 172 struct file; 173 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 174 unsigned long size, pgprot_t vma_prot); 175 #define __HAVE_PHYS_MEM_ACCESS_PROT 176 177 /* 178 * ZERO_PAGE is a global shared page that is always zero: used 179 * for zero-mapped memory areas etc.. 180 */ 181 extern unsigned long empty_zero_page[]; 182 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 183 184 extern pgd_t swapper_pg_dir[]; 185 186 extern void paging_init(void); 187 188 /* 189 * kern_addr_valid is intended to indicate whether an address is a valid 190 * kernel address. Most 32-bit archs define it as always true (like this) 191 * but most 64-bit archs actually perform a test. What should we do here? 192 */ 193 #define kern_addr_valid(addr) (1) 194 195 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 196 remap_pfn_range(vma, vaddr, pfn, size, prot) 197 198 #include <asm-generic/pgtable.h> 199 200 201 /* 202 * This gets called at the end of handling a page fault, when 203 * the kernel has put a new PTE into the page table for the process. 204 * We use it to ensure coherency between the i-cache and d-cache 205 * for the page which has just been mapped in. 206 * On machines which use an MMU hash table, we use this to put a 207 * corresponding HPTE into the hash table ahead of time, instead of 208 * waiting for the inevitable extra hash-table miss exception. 209 */ 210 extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); 211 212 extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr, 213 unsigned long end, int write, struct page **pages, int *nr); 214 215 #endif /* __ASSEMBLY__ */ 216 217 #endif /* __KERNEL__ */ 218 #endif /* _ASM_POWERPC_PGTABLE_H */ 219