xref: /linux/include/asm-generic/pgalloc.h (revision 2ec41967189cd65a8f79c760dd1b50c4f56e8ac6)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_GENERIC_PGALLOC_H
3 #define __ASM_GENERIC_PGALLOC_H
4 
5 #ifdef CONFIG_MMU
6 
7 #define GFP_PGTABLE_KERNEL	(GFP_KERNEL | __GFP_ZERO)
8 #define GFP_PGTABLE_USER	(GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
9 
10 /**
11  * __pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
12  * @mm: the mm_struct of the current context
13  *
14  * This function is intended for architectures that need
15  * anything beyond simple page allocation.
16  *
17  * Return: pointer to the allocated memory or %NULL on error
18  */
19 static inline pte_t *__pte_alloc_one_kernel_noprof(struct mm_struct *mm)
20 {
21 	struct ptdesc *ptdesc = pagetable_alloc_noprof(GFP_PGTABLE_KERNEL &
22 			~__GFP_HIGHMEM, 0);
23 
24 	if (!ptdesc)
25 		return NULL;
26 	if (!pagetable_pte_ctor(mm, ptdesc)) {
27 		pagetable_free(ptdesc);
28 		return NULL;
29 	}
30 
31 	ptdesc_set_kernel(ptdesc);
32 
33 	return ptdesc_address(ptdesc);
34 }
35 #define __pte_alloc_one_kernel(...)	alloc_hooks(__pte_alloc_one_kernel_noprof(__VA_ARGS__))
36 
37 #ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
38 /**
39  * pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
40  * @mm: the mm_struct of the current context
41  *
42  * Return: pointer to the allocated memory or %NULL on error
43  */
44 static inline pte_t *pte_alloc_one_kernel_noprof(struct mm_struct *mm)
45 {
46 	return __pte_alloc_one_kernel_noprof(mm);
47 }
48 #define pte_alloc_one_kernel(...)	alloc_hooks(pte_alloc_one_kernel_noprof(__VA_ARGS__))
49 #endif
50 
51 /**
52  * pte_free_kernel - free PTE-level kernel page table memory
53  * @mm: the mm_struct of the current context
54  * @pte: pointer to the memory containing the page table
55  */
56 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
57 {
58 	pagetable_dtor_free(virt_to_ptdesc(pte));
59 }
60 
61 /**
62  * __pte_alloc_one - allocate memory for a PTE-level user page table
63  * @mm: the mm_struct of the current context
64  * @gfp: GFP flags to use for the allocation
65  *
66  * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
67  *
68  * This function is intended for architectures that need
69  * anything beyond simple page allocation or must have custom GFP flags.
70  *
71  * Return: `struct page` referencing the ptdesc or %NULL on error
72  */
73 static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp)
74 {
75 	struct ptdesc *ptdesc;
76 
77 	ptdesc = pagetable_alloc_noprof(gfp, 0);
78 	if (!ptdesc)
79 		return NULL;
80 	if (!pagetable_pte_ctor(mm, ptdesc)) {
81 		pagetable_free(ptdesc);
82 		return NULL;
83 	}
84 
85 	return ptdesc_page(ptdesc);
86 }
87 #define __pte_alloc_one(...)	alloc_hooks(__pte_alloc_one_noprof(__VA_ARGS__))
88 
89 #ifndef __HAVE_ARCH_PTE_ALLOC_ONE
90 /**
91  * pte_alloc_one - allocate a page for PTE-level user page table
92  * @mm: the mm_struct of the current context
93  *
94  * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
95  *
96  * Return: `struct page` referencing the ptdesc or %NULL on error
97  */
98 static inline pgtable_t pte_alloc_one_noprof(struct mm_struct *mm)
99 {
100 	return __pte_alloc_one_noprof(mm, GFP_PGTABLE_USER);
101 }
102 #define pte_alloc_one(...)	alloc_hooks(pte_alloc_one_noprof(__VA_ARGS__))
103 #endif
104 
105 /*
106  * Should really implement gc for free page table pages. This could be
107  * done with a reference count in struct page.
108  */
109 
110 /**
111  * pte_free - free PTE-level user page table memory
112  * @mm: the mm_struct of the current context
113  * @pte_page: the `struct page` referencing the ptdesc
114  */
115 static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
116 {
117 	struct ptdesc *ptdesc = page_ptdesc(pte_page);
118 
119 	pagetable_dtor_free(ptdesc);
120 }
121 
122 
123 #if CONFIG_PGTABLE_LEVELS > 2
124 
125 #ifndef __HAVE_ARCH_PMD_ALLOC_ONE
126 /**
127  * pmd_alloc_one - allocate memory for a PMD-level page table
128  * @mm: the mm_struct of the current context
129  *
130  * Allocate memory for a page table and ptdesc and runs pagetable_pmd_ctor().
131  *
132  * Allocations use %GFP_PGTABLE_USER in user context and
133  * %GFP_PGTABLE_KERNEL in kernel context.
134  *
135  * Return: pointer to the allocated memory or %NULL on error
136  */
137 static inline pmd_t *pmd_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
138 {
139 	struct ptdesc *ptdesc;
140 	gfp_t gfp = GFP_PGTABLE_USER;
141 
142 	if (mm == &init_mm)
143 		gfp = GFP_PGTABLE_KERNEL;
144 	ptdesc = pagetable_alloc_noprof(gfp, 0);
145 	if (!ptdesc)
146 		return NULL;
147 	if (!pagetable_pmd_ctor(mm, ptdesc)) {
148 		pagetable_free(ptdesc);
149 		return NULL;
150 	}
151 
152 	if (mm == &init_mm)
153 		ptdesc_set_kernel(ptdesc);
154 
155 	return ptdesc_address(ptdesc);
156 }
157 #define pmd_alloc_one(...)	alloc_hooks(pmd_alloc_one_noprof(__VA_ARGS__))
158 #endif
159 
160 #ifndef __HAVE_ARCH_PMD_FREE
161 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
162 {
163 	struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
164 
165 	BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
166 	pagetable_dtor_free(ptdesc);
167 }
168 #endif
169 
170 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
171 
172 #if CONFIG_PGTABLE_LEVELS > 3
173 
174 static inline pud_t *__pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
175 {
176 	gfp_t gfp = GFP_PGTABLE_USER;
177 	struct ptdesc *ptdesc;
178 
179 	if (mm == &init_mm)
180 		gfp = GFP_PGTABLE_KERNEL;
181 	gfp &= ~__GFP_HIGHMEM;
182 
183 	ptdesc = pagetable_alloc_noprof(gfp, 0);
184 	if (!ptdesc)
185 		return NULL;
186 
187 	pagetable_pud_ctor(ptdesc);
188 
189 	if (mm == &init_mm)
190 		ptdesc_set_kernel(ptdesc);
191 
192 	return ptdesc_address(ptdesc);
193 }
194 #define __pud_alloc_one(...)	alloc_hooks(__pud_alloc_one_noprof(__VA_ARGS__))
195 
196 #ifndef __HAVE_ARCH_PUD_ALLOC_ONE
197 /**
198  * pud_alloc_one - allocate memory for a PUD-level page table
199  * @mm: the mm_struct of the current context
200  *
201  * Allocate memory for a page table using %GFP_PGTABLE_USER for user context
202  * and %GFP_PGTABLE_KERNEL for kernel context.
203  *
204  * Return: pointer to the allocated memory or %NULL on error
205  */
206 static inline pud_t *pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
207 {
208 	return __pud_alloc_one_noprof(mm, addr);
209 }
210 #define pud_alloc_one(...)	alloc_hooks(pud_alloc_one_noprof(__VA_ARGS__))
211 #endif
212 
213 static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
214 {
215 	struct ptdesc *ptdesc = virt_to_ptdesc(pud);
216 
217 	BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
218 	pagetable_dtor_free(ptdesc);
219 }
220 
221 #ifndef __HAVE_ARCH_PUD_FREE
222 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
223 {
224 	__pud_free(mm, pud);
225 }
226 #endif
227 
228 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
229 
230 #if CONFIG_PGTABLE_LEVELS > 4
231 
232 static inline p4d_t *__p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
233 {
234 	gfp_t gfp = GFP_PGTABLE_USER;
235 	struct ptdesc *ptdesc;
236 
237 	if (mm == &init_mm)
238 		gfp = GFP_PGTABLE_KERNEL;
239 	gfp &= ~__GFP_HIGHMEM;
240 
241 	ptdesc = pagetable_alloc_noprof(gfp, 0);
242 	if (!ptdesc)
243 		return NULL;
244 
245 	pagetable_p4d_ctor(ptdesc);
246 
247 	if (mm == &init_mm)
248 		ptdesc_set_kernel(ptdesc);
249 
250 	return ptdesc_address(ptdesc);
251 }
252 #define __p4d_alloc_one(...)	alloc_hooks(__p4d_alloc_one_noprof(__VA_ARGS__))
253 
254 #ifndef __HAVE_ARCH_P4D_ALLOC_ONE
255 static inline p4d_t *p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
256 {
257 	return __p4d_alloc_one_noprof(mm, addr);
258 }
259 #define p4d_alloc_one(...)	alloc_hooks(p4d_alloc_one_noprof(__VA_ARGS__))
260 #endif
261 
262 static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
263 {
264 	struct ptdesc *ptdesc = virt_to_ptdesc(p4d);
265 
266 	BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
267 	pagetable_dtor_free(ptdesc);
268 }
269 
270 #ifndef __HAVE_ARCH_P4D_FREE
271 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
272 {
273 	if (!mm_p4d_folded(mm))
274 		__p4d_free(mm, p4d);
275 }
276 #endif
277 
278 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
279 
280 static inline pgd_t *__pgd_alloc_noprof(struct mm_struct *mm, unsigned int order)
281 {
282 	gfp_t gfp = GFP_PGTABLE_USER;
283 	struct ptdesc *ptdesc;
284 
285 	if (mm == &init_mm)
286 		gfp = GFP_PGTABLE_KERNEL;
287 	gfp &= ~__GFP_HIGHMEM;
288 
289 	ptdesc = pagetable_alloc_noprof(gfp, order);
290 	if (!ptdesc)
291 		return NULL;
292 
293 	pagetable_pgd_ctor(ptdesc);
294 
295 	if (mm == &init_mm)
296 		ptdesc_set_kernel(ptdesc);
297 
298 	return ptdesc_address(ptdesc);
299 }
300 #define __pgd_alloc(...)	alloc_hooks(__pgd_alloc_noprof(__VA_ARGS__))
301 
302 static inline void __pgd_free(struct mm_struct *mm, pgd_t *pgd)
303 {
304 	struct ptdesc *ptdesc = virt_to_ptdesc(pgd);
305 
306 	BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
307 	pagetable_dtor_free(ptdesc);
308 }
309 
310 #ifndef __HAVE_ARCH_PGD_FREE
311 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
312 {
313 	__pgd_free(mm, pgd);
314 }
315 #endif
316 
317 #endif /* CONFIG_MMU */
318 
319 #endif /* __ASM_GENERIC_PGALLOC_H */
320