xref: /linux/include/asm-generic/pgalloc.h (revision 00c010e130e58301db2ea0cec1eadc931e1cb8cf)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_GENERIC_PGALLOC_H
3 #define __ASM_GENERIC_PGALLOC_H
4 
5 #ifdef CONFIG_MMU
6 
7 #define GFP_PGTABLE_KERNEL	(GFP_KERNEL | __GFP_ZERO)
8 #define GFP_PGTABLE_USER	(GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
9 
10 /**
11  * __pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
12  * @mm: the mm_struct of the current context
13  *
14  * This function is intended for architectures that need
15  * anything beyond simple page allocation.
16  *
17  * Return: pointer to the allocated memory or %NULL on error
18  */
__pte_alloc_one_kernel_noprof(struct mm_struct * mm)19 static inline pte_t *__pte_alloc_one_kernel_noprof(struct mm_struct *mm)
20 {
21 	struct ptdesc *ptdesc = pagetable_alloc_noprof(GFP_PGTABLE_KERNEL &
22 			~__GFP_HIGHMEM, 0);
23 
24 	if (!ptdesc)
25 		return NULL;
26 	if (!pagetable_pte_ctor(mm, ptdesc)) {
27 		pagetable_free(ptdesc);
28 		return NULL;
29 	}
30 
31 	return ptdesc_address(ptdesc);
32 }
33 #define __pte_alloc_one_kernel(...)	alloc_hooks(__pte_alloc_one_kernel_noprof(__VA_ARGS__))
34 
35 #ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
36 /**
37  * pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
38  * @mm: the mm_struct of the current context
39  *
40  * Return: pointer to the allocated memory or %NULL on error
41  */
pte_alloc_one_kernel_noprof(struct mm_struct * mm)42 static inline pte_t *pte_alloc_one_kernel_noprof(struct mm_struct *mm)
43 {
44 	return __pte_alloc_one_kernel_noprof(mm);
45 }
46 #define pte_alloc_one_kernel(...)	alloc_hooks(pte_alloc_one_kernel_noprof(__VA_ARGS__))
47 #endif
48 
49 /**
50  * pte_free_kernel - free PTE-level kernel page table memory
51  * @mm: the mm_struct of the current context
52  * @pte: pointer to the memory containing the page table
53  */
pte_free_kernel(struct mm_struct * mm,pte_t * pte)54 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
55 {
56 	pagetable_dtor_free(virt_to_ptdesc(pte));
57 }
58 
59 /**
60  * __pte_alloc_one - allocate memory for a PTE-level user page table
61  * @mm: the mm_struct of the current context
62  * @gfp: GFP flags to use for the allocation
63  *
64  * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
65  *
66  * This function is intended for architectures that need
67  * anything beyond simple page allocation or must have custom GFP flags.
68  *
69  * Return: `struct page` referencing the ptdesc or %NULL on error
70  */
__pte_alloc_one_noprof(struct mm_struct * mm,gfp_t gfp)71 static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp)
72 {
73 	struct ptdesc *ptdesc;
74 
75 	ptdesc = pagetable_alloc_noprof(gfp, 0);
76 	if (!ptdesc)
77 		return NULL;
78 	if (!pagetable_pte_ctor(mm, ptdesc)) {
79 		pagetable_free(ptdesc);
80 		return NULL;
81 	}
82 
83 	return ptdesc_page(ptdesc);
84 }
85 #define __pte_alloc_one(...)	alloc_hooks(__pte_alloc_one_noprof(__VA_ARGS__))
86 
87 #ifndef __HAVE_ARCH_PTE_ALLOC_ONE
88 /**
89  * pte_alloc_one - allocate a page for PTE-level user page table
90  * @mm: the mm_struct of the current context
91  *
92  * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
93  *
94  * Return: `struct page` referencing the ptdesc or %NULL on error
95  */
pte_alloc_one_noprof(struct mm_struct * mm)96 static inline pgtable_t pte_alloc_one_noprof(struct mm_struct *mm)
97 {
98 	return __pte_alloc_one_noprof(mm, GFP_PGTABLE_USER);
99 }
100 #define pte_alloc_one(...)	alloc_hooks(pte_alloc_one_noprof(__VA_ARGS__))
101 #endif
102 
103 /*
104  * Should really implement gc for free page table pages. This could be
105  * done with a reference count in struct page.
106  */
107 
108 /**
109  * pte_free - free PTE-level user page table memory
110  * @mm: the mm_struct of the current context
111  * @pte_page: the `struct page` referencing the ptdesc
112  */
pte_free(struct mm_struct * mm,struct page * pte_page)113 static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
114 {
115 	struct ptdesc *ptdesc = page_ptdesc(pte_page);
116 
117 	pagetable_dtor_free(ptdesc);
118 }
119 
120 
121 #if CONFIG_PGTABLE_LEVELS > 2
122 
123 #ifndef __HAVE_ARCH_PMD_ALLOC_ONE
124 /**
125  * pmd_alloc_one - allocate memory for a PMD-level page table
126  * @mm: the mm_struct of the current context
127  *
128  * Allocate memory for a page table and ptdesc and runs pagetable_pmd_ctor().
129  *
130  * Allocations use %GFP_PGTABLE_USER in user context and
131  * %GFP_PGTABLE_KERNEL in kernel context.
132  *
133  * Return: pointer to the allocated memory or %NULL on error
134  */
pmd_alloc_one_noprof(struct mm_struct * mm,unsigned long addr)135 static inline pmd_t *pmd_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
136 {
137 	struct ptdesc *ptdesc;
138 	gfp_t gfp = GFP_PGTABLE_USER;
139 
140 	if (mm == &init_mm)
141 		gfp = GFP_PGTABLE_KERNEL;
142 	ptdesc = pagetable_alloc_noprof(gfp, 0);
143 	if (!ptdesc)
144 		return NULL;
145 	if (!pagetable_pmd_ctor(mm, ptdesc)) {
146 		pagetable_free(ptdesc);
147 		return NULL;
148 	}
149 	return ptdesc_address(ptdesc);
150 }
151 #define pmd_alloc_one(...)	alloc_hooks(pmd_alloc_one_noprof(__VA_ARGS__))
152 #endif
153 
154 #ifndef __HAVE_ARCH_PMD_FREE
pmd_free(struct mm_struct * mm,pmd_t * pmd)155 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
156 {
157 	struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
158 
159 	BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
160 	pagetable_dtor_free(ptdesc);
161 }
162 #endif
163 
164 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
165 
166 #if CONFIG_PGTABLE_LEVELS > 3
167 
__pud_alloc_one_noprof(struct mm_struct * mm,unsigned long addr)168 static inline pud_t *__pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
169 {
170 	gfp_t gfp = GFP_PGTABLE_USER;
171 	struct ptdesc *ptdesc;
172 
173 	if (mm == &init_mm)
174 		gfp = GFP_PGTABLE_KERNEL;
175 	gfp &= ~__GFP_HIGHMEM;
176 
177 	ptdesc = pagetable_alloc_noprof(gfp, 0);
178 	if (!ptdesc)
179 		return NULL;
180 
181 	pagetable_pud_ctor(ptdesc);
182 	return ptdesc_address(ptdesc);
183 }
184 #define __pud_alloc_one(...)	alloc_hooks(__pud_alloc_one_noprof(__VA_ARGS__))
185 
186 #ifndef __HAVE_ARCH_PUD_ALLOC_ONE
187 /**
188  * pud_alloc_one - allocate memory for a PUD-level page table
189  * @mm: the mm_struct of the current context
190  *
191  * Allocate memory for a page table using %GFP_PGTABLE_USER for user context
192  * and %GFP_PGTABLE_KERNEL for kernel context.
193  *
194  * Return: pointer to the allocated memory or %NULL on error
195  */
pud_alloc_one_noprof(struct mm_struct * mm,unsigned long addr)196 static inline pud_t *pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
197 {
198 	return __pud_alloc_one_noprof(mm, addr);
199 }
200 #define pud_alloc_one(...)	alloc_hooks(pud_alloc_one_noprof(__VA_ARGS__))
201 #endif
202 
__pud_free(struct mm_struct * mm,pud_t * pud)203 static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
204 {
205 	struct ptdesc *ptdesc = virt_to_ptdesc(pud);
206 
207 	BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
208 	pagetable_dtor_free(ptdesc);
209 }
210 
211 #ifndef __HAVE_ARCH_PUD_FREE
pud_free(struct mm_struct * mm,pud_t * pud)212 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
213 {
214 	__pud_free(mm, pud);
215 }
216 #endif
217 
218 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
219 
220 #if CONFIG_PGTABLE_LEVELS > 4
221 
__p4d_alloc_one_noprof(struct mm_struct * mm,unsigned long addr)222 static inline p4d_t *__p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
223 {
224 	gfp_t gfp = GFP_PGTABLE_USER;
225 	struct ptdesc *ptdesc;
226 
227 	if (mm == &init_mm)
228 		gfp = GFP_PGTABLE_KERNEL;
229 	gfp &= ~__GFP_HIGHMEM;
230 
231 	ptdesc = pagetable_alloc_noprof(gfp, 0);
232 	if (!ptdesc)
233 		return NULL;
234 
235 	pagetable_p4d_ctor(ptdesc);
236 	return ptdesc_address(ptdesc);
237 }
238 #define __p4d_alloc_one(...)	alloc_hooks(__p4d_alloc_one_noprof(__VA_ARGS__))
239 
240 #ifndef __HAVE_ARCH_P4D_ALLOC_ONE
p4d_alloc_one_noprof(struct mm_struct * mm,unsigned long addr)241 static inline p4d_t *p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
242 {
243 	return __p4d_alloc_one_noprof(mm, addr);
244 }
245 #define p4d_alloc_one(...)	alloc_hooks(p4d_alloc_one_noprof(__VA_ARGS__))
246 #endif
247 
__p4d_free(struct mm_struct * mm,p4d_t * p4d)248 static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
249 {
250 	struct ptdesc *ptdesc = virt_to_ptdesc(p4d);
251 
252 	BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
253 	pagetable_dtor_free(ptdesc);
254 }
255 
256 #ifndef __HAVE_ARCH_P4D_FREE
p4d_free(struct mm_struct * mm,p4d_t * p4d)257 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
258 {
259 	if (!mm_p4d_folded(mm))
260 		__p4d_free(mm, p4d);
261 }
262 #endif
263 
264 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
265 
__pgd_alloc_noprof(struct mm_struct * mm,unsigned int order)266 static inline pgd_t *__pgd_alloc_noprof(struct mm_struct *mm, unsigned int order)
267 {
268 	gfp_t gfp = GFP_PGTABLE_USER;
269 	struct ptdesc *ptdesc;
270 
271 	if (mm == &init_mm)
272 		gfp = GFP_PGTABLE_KERNEL;
273 	gfp &= ~__GFP_HIGHMEM;
274 
275 	ptdesc = pagetable_alloc_noprof(gfp, order);
276 	if (!ptdesc)
277 		return NULL;
278 
279 	pagetable_pgd_ctor(ptdesc);
280 	return ptdesc_address(ptdesc);
281 }
282 #define __pgd_alloc(...)	alloc_hooks(__pgd_alloc_noprof(__VA_ARGS__))
283 
__pgd_free(struct mm_struct * mm,pgd_t * pgd)284 static inline void __pgd_free(struct mm_struct *mm, pgd_t *pgd)
285 {
286 	struct ptdesc *ptdesc = virt_to_ptdesc(pgd);
287 
288 	BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
289 	pagetable_dtor_free(ptdesc);
290 }
291 
292 #ifndef __HAVE_ARCH_PGD_FREE
pgd_free(struct mm_struct * mm,pgd_t * pgd)293 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
294 {
295 	__pgd_free(mm, pgd);
296 }
297 #endif
298 
299 #endif /* CONFIG_MMU */
300 
301 #endif /* __ASM_GENERIC_PGALLOC_H */
302