1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_GENERIC_PGALLOC_H
3 #define __ASM_GENERIC_PGALLOC_H
4
5 #ifdef CONFIG_MMU
6
7 #define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO)
8 #define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
9
10 /**
11 * __pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
12 * @mm: the mm_struct of the current context
13 *
14 * This function is intended for architectures that need
15 * anything beyond simple page allocation.
16 *
17 * Return: pointer to the allocated memory or %NULL on error
18 */
__pte_alloc_one_kernel_noprof(struct mm_struct * mm)19 static inline pte_t *__pte_alloc_one_kernel_noprof(struct mm_struct *mm)
20 {
21 struct ptdesc *ptdesc = pagetable_alloc_noprof(GFP_PGTABLE_KERNEL, 0);
22
23 if (!ptdesc)
24 return NULL;
25 if (!pagetable_pte_ctor(mm, ptdesc)) {
26 pagetable_free(ptdesc);
27 return NULL;
28 }
29
30 ptdesc_set_kernel(ptdesc);
31
32 return ptdesc_address(ptdesc);
33 }
34 #define __pte_alloc_one_kernel(...) alloc_hooks(__pte_alloc_one_kernel_noprof(__VA_ARGS__))
35
36 #ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
37 /**
38 * pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
39 * @mm: the mm_struct of the current context
40 *
41 * Return: pointer to the allocated memory or %NULL on error
42 */
pte_alloc_one_kernel_noprof(struct mm_struct * mm)43 static inline pte_t *pte_alloc_one_kernel_noprof(struct mm_struct *mm)
44 {
45 return __pte_alloc_one_kernel_noprof(mm);
46 }
47 #define pte_alloc_one_kernel(...) alloc_hooks(pte_alloc_one_kernel_noprof(__VA_ARGS__))
48 #endif
49
50 /**
51 * pte_free_kernel - free PTE-level kernel page table memory
52 * @mm: the mm_struct of the current context
53 * @pte: pointer to the memory containing the page table
54 */
pte_free_kernel(struct mm_struct * mm,pte_t * pte)55 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
56 {
57 pagetable_dtor_free(virt_to_ptdesc(pte));
58 }
59
60 /**
61 * __pte_alloc_one - allocate memory for a PTE-level user page table
62 * @mm: the mm_struct of the current context
63 * @gfp: GFP flags to use for the allocation
64 *
65 * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
66 *
67 * This function is intended for architectures that need
68 * anything beyond simple page allocation or must have custom GFP flags.
69 *
70 * Return: `struct page` referencing the ptdesc or %NULL on error
71 */
__pte_alloc_one_noprof(struct mm_struct * mm,gfp_t gfp)72 static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp)
73 {
74 struct ptdesc *ptdesc;
75
76 ptdesc = pagetable_alloc_noprof(gfp, 0);
77 if (!ptdesc)
78 return NULL;
79 if (!pagetable_pte_ctor(mm, ptdesc)) {
80 pagetable_free(ptdesc);
81 return NULL;
82 }
83
84 return ptdesc_page(ptdesc);
85 }
86 #define __pte_alloc_one(...) alloc_hooks(__pte_alloc_one_noprof(__VA_ARGS__))
87
88 #ifndef __HAVE_ARCH_PTE_ALLOC_ONE
89 /**
90 * pte_alloc_one - allocate a page for PTE-level user page table
91 * @mm: the mm_struct of the current context
92 *
93 * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
94 *
95 * Return: `struct page` referencing the ptdesc or %NULL on error
96 */
pte_alloc_one_noprof(struct mm_struct * mm)97 static inline pgtable_t pte_alloc_one_noprof(struct mm_struct *mm)
98 {
99 return __pte_alloc_one_noprof(mm, GFP_PGTABLE_USER);
100 }
101 #define pte_alloc_one(...) alloc_hooks(pte_alloc_one_noprof(__VA_ARGS__))
102 #endif
103
104 /*
105 * Should really implement gc for free page table pages. This could be
106 * done with a reference count in struct page.
107 */
108
109 /**
110 * pte_free - free PTE-level user page table memory
111 * @mm: the mm_struct of the current context
112 * @pte_page: the `struct page` referencing the ptdesc
113 */
pte_free(struct mm_struct * mm,struct page * pte_page)114 static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
115 {
116 struct ptdesc *ptdesc = page_ptdesc(pte_page);
117
118 pagetable_dtor_free(ptdesc);
119 }
120
121
122 #if CONFIG_PGTABLE_LEVELS > 2
123
124 #ifndef __HAVE_ARCH_PMD_ALLOC_ONE
125 /**
126 * pmd_alloc_one - allocate memory for a PMD-level page table
127 * @mm: the mm_struct of the current context
128 *
129 * Allocate memory for a page table and ptdesc and runs pagetable_pmd_ctor().
130 *
131 * Allocations use %GFP_PGTABLE_USER in user context and
132 * %GFP_PGTABLE_KERNEL in kernel context.
133 *
134 * Return: pointer to the allocated memory or %NULL on error
135 */
pmd_alloc_one_noprof(struct mm_struct * mm,unsigned long addr)136 static inline pmd_t *pmd_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
137 {
138 struct ptdesc *ptdesc;
139 gfp_t gfp = GFP_PGTABLE_USER;
140
141 if (mm == &init_mm)
142 gfp = GFP_PGTABLE_KERNEL;
143 ptdesc = pagetable_alloc_noprof(gfp, 0);
144 if (!ptdesc)
145 return NULL;
146 if (!pagetable_pmd_ctor(mm, ptdesc)) {
147 pagetable_free(ptdesc);
148 return NULL;
149 }
150
151 if (mm == &init_mm)
152 ptdesc_set_kernel(ptdesc);
153
154 return ptdesc_address(ptdesc);
155 }
156 #define pmd_alloc_one(...) alloc_hooks(pmd_alloc_one_noprof(__VA_ARGS__))
157 #endif
158
159 #ifndef __HAVE_ARCH_PMD_FREE
pmd_free(struct mm_struct * mm,pmd_t * pmd)160 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
161 {
162 struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
163
164 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
165 pagetable_dtor_free(ptdesc);
166 }
167 #endif
168
169 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
170
171 #if CONFIG_PGTABLE_LEVELS > 3
172
__pud_alloc_one_noprof(struct mm_struct * mm,unsigned long addr)173 static inline pud_t *__pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
174 {
175 gfp_t gfp = GFP_PGTABLE_USER;
176 struct ptdesc *ptdesc;
177
178 if (mm == &init_mm)
179 gfp = GFP_PGTABLE_KERNEL;
180
181 ptdesc = pagetable_alloc_noprof(gfp, 0);
182 if (!ptdesc)
183 return NULL;
184
185 pagetable_pud_ctor(ptdesc);
186
187 if (mm == &init_mm)
188 ptdesc_set_kernel(ptdesc);
189
190 return ptdesc_address(ptdesc);
191 }
192 #define __pud_alloc_one(...) alloc_hooks(__pud_alloc_one_noprof(__VA_ARGS__))
193
194 #ifndef __HAVE_ARCH_PUD_ALLOC_ONE
195 /**
196 * pud_alloc_one - allocate memory for a PUD-level page table
197 * @mm: the mm_struct of the current context
198 *
199 * Allocate memory for a page table using %GFP_PGTABLE_USER for user context
200 * and %GFP_PGTABLE_KERNEL for kernel context.
201 *
202 * Return: pointer to the allocated memory or %NULL on error
203 */
pud_alloc_one_noprof(struct mm_struct * mm,unsigned long addr)204 static inline pud_t *pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
205 {
206 return __pud_alloc_one_noprof(mm, addr);
207 }
208 #define pud_alloc_one(...) alloc_hooks(pud_alloc_one_noprof(__VA_ARGS__))
209 #endif
210
__pud_free(struct mm_struct * mm,pud_t * pud)211 static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
212 {
213 struct ptdesc *ptdesc = virt_to_ptdesc(pud);
214
215 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
216 pagetable_dtor_free(ptdesc);
217 }
218
219 #ifndef __HAVE_ARCH_PUD_FREE
pud_free(struct mm_struct * mm,pud_t * pud)220 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
221 {
222 __pud_free(mm, pud);
223 }
224 #endif
225
226 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
227
228 #if CONFIG_PGTABLE_LEVELS > 4
229
__p4d_alloc_one_noprof(struct mm_struct * mm,unsigned long addr)230 static inline p4d_t *__p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
231 {
232 gfp_t gfp = GFP_PGTABLE_USER;
233 struct ptdesc *ptdesc;
234
235 if (mm == &init_mm)
236 gfp = GFP_PGTABLE_KERNEL;
237
238 ptdesc = pagetable_alloc_noprof(gfp, 0);
239 if (!ptdesc)
240 return NULL;
241
242 pagetable_p4d_ctor(ptdesc);
243
244 if (mm == &init_mm)
245 ptdesc_set_kernel(ptdesc);
246
247 return ptdesc_address(ptdesc);
248 }
249 #define __p4d_alloc_one(...) alloc_hooks(__p4d_alloc_one_noprof(__VA_ARGS__))
250
251 #ifndef __HAVE_ARCH_P4D_ALLOC_ONE
p4d_alloc_one_noprof(struct mm_struct * mm,unsigned long addr)252 static inline p4d_t *p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
253 {
254 return __p4d_alloc_one_noprof(mm, addr);
255 }
256 #define p4d_alloc_one(...) alloc_hooks(p4d_alloc_one_noprof(__VA_ARGS__))
257 #endif
258
__p4d_free(struct mm_struct * mm,p4d_t * p4d)259 static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
260 {
261 struct ptdesc *ptdesc = virt_to_ptdesc(p4d);
262
263 BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
264 pagetable_dtor_free(ptdesc);
265 }
266
267 #ifndef __HAVE_ARCH_P4D_FREE
p4d_free(struct mm_struct * mm,p4d_t * p4d)268 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
269 {
270 if (!mm_p4d_folded(mm))
271 __p4d_free(mm, p4d);
272 }
273 #endif
274
275 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
276
__pgd_alloc_noprof(struct mm_struct * mm,unsigned int order)277 static inline pgd_t *__pgd_alloc_noprof(struct mm_struct *mm, unsigned int order)
278 {
279 gfp_t gfp = GFP_PGTABLE_USER;
280 struct ptdesc *ptdesc;
281
282 if (mm == &init_mm)
283 gfp = GFP_PGTABLE_KERNEL;
284
285 ptdesc = pagetable_alloc_noprof(gfp, order);
286 if (!ptdesc)
287 return NULL;
288
289 pagetable_pgd_ctor(ptdesc);
290
291 if (mm == &init_mm)
292 ptdesc_set_kernel(ptdesc);
293
294 return ptdesc_address(ptdesc);
295 }
296 #define __pgd_alloc(...) alloc_hooks(__pgd_alloc_noprof(__VA_ARGS__))
297
__pgd_free(struct mm_struct * mm,pgd_t * pgd)298 static inline void __pgd_free(struct mm_struct *mm, pgd_t *pgd)
299 {
300 struct ptdesc *ptdesc = virt_to_ptdesc(pgd);
301
302 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
303 pagetable_dtor_free(ptdesc);
304 }
305
306 #ifndef __HAVE_ARCH_PGD_FREE
pgd_free(struct mm_struct * mm,pgd_t * pgd)307 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
308 {
309 __pgd_free(mm, pgd);
310 }
311 #endif
312
313 #endif /* CONFIG_MMU */
314
315 #endif /* __ASM_GENERIC_PGALLOC_H */
316