1 #ifndef _ASM_POWERPC_HUGETLB_H 2 #define _ASM_POWERPC_HUGETLB_H 3 4 #ifdef CONFIG_HUGETLB_PAGE 5 #include <asm/page.h> 6 #include <asm-generic/hugetlb.h> 7 8 extern struct kmem_cache *hugepte_cache; 9 10 #ifdef CONFIG_PPC_BOOK3S_64 11 12 #include <asm/book3s/64/hugetlb.h> 13 /* 14 * This should work for other subarchs too. But right now we use the 15 * new format only for 64bit book3s 16 */ 17 static inline pte_t *hugepd_page(hugepd_t hpd) 18 { 19 BUG_ON(!hugepd_ok(hpd)); 20 /* 21 * We have only four bits to encode, MMU page size 22 */ 23 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf); 24 return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK); 25 } 26 27 static inline unsigned int hugepd_mmu_psize(hugepd_t hpd) 28 { 29 return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2; 30 } 31 32 static inline unsigned int hugepd_shift(hugepd_t hpd) 33 { 34 return mmu_psize_to_shift(hugepd_mmu_psize(hpd)); 35 } 36 static inline void flush_hugetlb_page(struct vm_area_struct *vma, 37 unsigned long vmaddr) 38 { 39 if (radix_enabled()) 40 return radix__flush_hugetlb_page(vma, vmaddr); 41 } 42 43 static inline void __local_flush_hugetlb_page(struct vm_area_struct *vma, 44 unsigned long vmaddr) 45 { 46 if (radix_enabled()) 47 return radix__local_flush_hugetlb_page(vma, vmaddr); 48 } 49 #else 50 51 static inline pte_t *hugepd_page(hugepd_t hpd) 52 { 53 BUG_ON(!hugepd_ok(hpd)); 54 #ifdef CONFIG_PPC_8xx 55 return (pte_t *)__va(hpd_val(hpd) & 56 ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK)); 57 #else 58 return (pte_t *)((hpd_val(hpd) & 59 ~HUGEPD_SHIFT_MASK) | PD_HUGE); 60 #endif 61 } 62 63 static inline unsigned int hugepd_shift(hugepd_t hpd) 64 { 65 #ifdef CONFIG_PPC_8xx 66 return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17; 67 #else 68 return hpd_val(hpd) & HUGEPD_SHIFT_MASK; 69 #endif 70 } 71 72 #endif /* CONFIG_PPC_BOOK3S_64 */ 73 74 75 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, 76 unsigned pdshift) 77 { 78 /* 79 * On FSL BookE, we have multiple higher-level table entries that 80 * point to the same hugepte. Just use the first one since they're all 81 * identical. So for that case, idx=0. 82 */ 83 unsigned long idx = 0; 84 85 pte_t *dir = hugepd_page(hpd); 86 #ifndef CONFIG_PPC_FSL_BOOK3E 87 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd); 88 #endif 89 90 return dir + idx; 91 } 92 93 pte_t *huge_pte_offset_and_shift(struct mm_struct *mm, 94 unsigned long addr, unsigned *shift); 95 96 void flush_dcache_icache_hugepage(struct page *page); 97 98 #if defined(CONFIG_PPC_MM_SLICES) 99 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, 100 unsigned long len); 101 #else 102 static inline int is_hugepage_only_range(struct mm_struct *mm, 103 unsigned long addr, 104 unsigned long len) 105 { 106 return 0; 107 } 108 #endif 109 110 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, 111 pte_t pte); 112 #ifdef CONFIG_PPC_8xx 113 static inline void flush_hugetlb_page(struct vm_area_struct *vma, 114 unsigned long vmaddr) 115 { 116 flush_tlb_page(vma, vmaddr); 117 } 118 #else 119 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 120 #endif 121 122 void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 123 unsigned long end, unsigned long floor, 124 unsigned long ceiling); 125 126 /* 127 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs 128 * to override the version in mm/hugetlb.c 129 */ 130 #define vma_mmu_pagesize vma_mmu_pagesize 131 132 /* 133 * If the arch doesn't supply something else, assume that hugepage 134 * size aligned regions are ok without further preparation. 135 */ 136 static inline int prepare_hugepage_range(struct file *file, 137 unsigned long addr, unsigned long len) 138 { 139 struct hstate *h = hstate_file(file); 140 if (len & ~huge_page_mask(h)) 141 return -EINVAL; 142 if (addr & ~huge_page_mask(h)) 143 return -EINVAL; 144 return 0; 145 } 146 147 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 148 pte_t *ptep, pte_t pte) 149 { 150 set_pte_at(mm, addr, ptep, pte); 151 } 152 153 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, 154 unsigned long addr, pte_t *ptep) 155 { 156 #ifdef CONFIG_PPC64 157 return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); 158 #else 159 return __pte(pte_update(ptep, ~0UL, 0)); 160 #endif 161 } 162 163 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, 164 unsigned long addr, pte_t *ptep) 165 { 166 pte_t pte; 167 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); 168 flush_hugetlb_page(vma, addr); 169 } 170 171 static inline int huge_pte_none(pte_t pte) 172 { 173 return pte_none(pte); 174 } 175 176 static inline pte_t huge_pte_wrprotect(pte_t pte) 177 { 178 return pte_wrprotect(pte); 179 } 180 181 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, 182 unsigned long addr, pte_t *ptep, 183 pte_t pte, int dirty) 184 { 185 #ifdef HUGETLB_NEED_PRELOAD 186 /* 187 * The "return 1" forces a call of update_mmu_cache, which will write a 188 * TLB entry. Without this, platforms that don't do a write of the TLB 189 * entry in the TLB miss handler asm will fault ad infinitum. 190 */ 191 ptep_set_access_flags(vma, addr, ptep, pte, dirty); 192 return 1; 193 #else 194 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); 195 #endif 196 } 197 198 static inline pte_t huge_ptep_get(pte_t *ptep) 199 { 200 return *ptep; 201 } 202 203 static inline void arch_clear_hugepage_flags(struct page *page) 204 { 205 } 206 207 #else /* ! CONFIG_HUGETLB_PAGE */ 208 static inline void flush_hugetlb_page(struct vm_area_struct *vma, 209 unsigned long vmaddr) 210 { 211 } 212 213 #define hugepd_shift(x) 0 214 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, 215 unsigned pdshift) 216 { 217 return 0; 218 } 219 #endif /* CONFIG_HUGETLB_PAGE */ 220 221 /* 222 * FSL Book3E platforms require special gpage handling - the gpages 223 * are reserved early in the boot process by memblock instead of via 224 * the .dts as on IBM platforms. 225 */ 226 #if defined(CONFIG_HUGETLB_PAGE) && (defined(CONFIG_PPC_FSL_BOOK3E) || \ 227 defined(CONFIG_PPC_8xx)) 228 extern void __init reserve_hugetlb_gpages(void); 229 #else 230 static inline void reserve_hugetlb_gpages(void) 231 { 232 } 233 #endif 234 235 #endif /* _ASM_POWERPC_HUGETLB_H */ 236