1 #ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H 2 #define _ASM_POWERPC_BOOK3S_64_PGALLOC_H 3 /* 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9 10 #include <linux/slab.h> 11 #include <linux/cpumask.h> 12 #include <linux/kmemleak.h> 13 #include <linux/percpu.h> 14 15 struct vmemmap_backing { 16 struct vmemmap_backing *list; 17 unsigned long phys; 18 unsigned long virt_addr; 19 }; 20 extern struct vmemmap_backing *vmemmap_list; 21 22 /* 23 * Functions that deal with pagetables that could be at any level of 24 * the table need to be passed an "index_size" so they know how to 25 * handle allocation. For PTE pages (which are linked to a struct 26 * page for now, and drawn from the main get_free_pages() pool), the 27 * allocation size will be (2^index_size * sizeof(pointer)) and 28 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size). 29 * 30 * The maximum index size needs to be big enough to allow any 31 * pagetable sizes we need, but small enough to fit in the low bits of 32 * any page table pointer. In other words all pagetables, even tiny 33 * ones, must be aligned to allow at least enough low 0 bits to 34 * contain this value. This value is also used as a mask, so it must 35 * be one less than a power of two. 36 */ 37 #define MAX_PGTABLE_INDEX_SIZE 0xf 38 39 extern struct kmem_cache *pgtable_cache[]; 40 #define PGT_CACHE(shift) ({ \ 41 BUG_ON(!(shift)); \ 42 pgtable_cache[(shift) - 1]; \ 43 }) 44 45 extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int); 46 extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long); 47 extern void pte_fragment_free(unsigned long *, int); 48 extern void pmd_fragment_free(unsigned long *); 49 extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); 50 #ifdef CONFIG_SMP 51 extern void __tlb_remove_table(void *_table); 52 #endif 53 54 static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm) 55 { 56 #ifdef CONFIG_PPC_64K_PAGES 57 return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP)); 58 #else 59 struct page *page; 60 page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL), 61 4); 62 if (!page) 63 return NULL; 64 return (pgd_t *) page_address(page); 65 #endif 66 } 67 68 static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd) 69 { 70 #ifdef CONFIG_PPC_64K_PAGES 71 free_page((unsigned long)pgd); 72 #else 73 free_pages((unsigned long)pgd, 4); 74 #endif 75 } 76 77 static inline pgd_t *pgd_alloc(struct mm_struct *mm) 78 { 79 pgd_t *pgd; 80 81 if (radix_enabled()) 82 return radix__pgd_alloc(mm); 83 84 pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), 85 pgtable_gfp_flags(mm, GFP_KERNEL)); 86 /* 87 * Don't scan the PGD for pointers, it contains references to PUDs but 88 * those references are not full pointers and so can't be recognised by 89 * kmemleak. 90 */ 91 kmemleak_no_scan(pgd); 92 93 /* 94 * With hugetlb, we don't clear the second half of the page table. 95 * If we share the same slab cache with the pmd or pud level table, 96 * we need to make sure we zero out the full table on alloc. 97 * With 4K we don't store slot in the second half. Hence we don't 98 * need to do this for 4k. 99 */ 100 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \ 101 (H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX) 102 memset(pgd, 0, PGD_TABLE_SIZE); 103 #endif 104 return pgd; 105 } 106 107 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 108 { 109 if (radix_enabled()) 110 return radix__pgd_free(mm, pgd); 111 kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); 112 } 113 114 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) 115 { 116 pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS); 117 } 118 119 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 120 { 121 pud_t *pud; 122 123 pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX), 124 pgtable_gfp_flags(mm, GFP_KERNEL)); 125 /* 126 * Tell kmemleak to ignore the PUD, that means don't scan it for 127 * pointers and don't consider it a leak. PUDs are typically only 128 * referred to by their PGD, but kmemleak is not able to recognise those 129 * as pointers, leading to false leak reports. 130 */ 131 kmemleak_ignore(pud); 132 133 return pud; 134 } 135 136 static inline void pud_free(struct mm_struct *mm, pud_t *pud) 137 { 138 kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud); 139 } 140 141 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 142 { 143 pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS); 144 } 145 146 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 147 unsigned long address) 148 { 149 /* 150 * By now all the pud entries should be none entries. So go 151 * ahead and flush the page walk cache 152 */ 153 flush_tlb_pgtable(tlb, address); 154 pgtable_free_tlb(tlb, pud, PUD_INDEX); 155 } 156 157 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 158 { 159 return pmd_fragment_alloc(mm, addr); 160 } 161 162 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 163 { 164 pmd_fragment_free((unsigned long *)pmd); 165 } 166 167 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 168 unsigned long address) 169 { 170 /* 171 * By now all the pud entries should be none entries. So go 172 * ahead and flush the page walk cache 173 */ 174 flush_tlb_pgtable(tlb, address); 175 return pgtable_free_tlb(tlb, pmd, PMD_INDEX); 176 } 177 178 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 179 pte_t *pte) 180 { 181 pmd_set(pmd, __pgtable_ptr_val(pte) | PMD_VAL_BITS); 182 } 183 184 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, 185 pgtable_t pte_page) 186 { 187 pmd_set(pmd, __pgtable_ptr_val(pte_page) | PMD_VAL_BITS); 188 } 189 190 static inline pgtable_t pmd_pgtable(pmd_t pmd) 191 { 192 return (pgtable_t)pmd_page_vaddr(pmd); 193 } 194 195 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 196 unsigned long address) 197 { 198 return (pte_t *)pte_fragment_alloc(mm, address, 1); 199 } 200 201 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 202 unsigned long address) 203 { 204 return (pgtable_t)pte_fragment_alloc(mm, address, 0); 205 } 206 207 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 208 { 209 pte_fragment_free((unsigned long *)pte, 1); 210 } 211 212 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) 213 { 214 pte_fragment_free((unsigned long *)ptepage, 0); 215 } 216 217 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, 218 unsigned long address) 219 { 220 /* 221 * By now all the pud entries should be none entries. So go 222 * ahead and flush the page walk cache 223 */ 224 flush_tlb_pgtable(tlb, address); 225 pgtable_free_tlb(tlb, table, PTE_INDEX); 226 } 227 228 #define check_pgt_cache() do { } while (0) 229 230 extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT]; 231 static inline void update_page_count(int psize, long count) 232 { 233 if (IS_ENABLED(CONFIG_PROC_FS)) 234 atomic_long_add(count, &direct_pages_count[psize]); 235 } 236 237 #endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */ 238