xref: /linux/mm/debug_vm_pgtable.c (revision 8e1bb4a41aa78d6105e59186af3dcd545fc66e70)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This kernel test validates architecture page table helpers and
4  * accessors and helps in verifying their continued compliance with
5  * expected generic MM semantics.
6  *
7  * Copyright (C) 2019 ARM Ltd.
8  *
9  * Author: Anshuman Khandual <anshuman.khandual@arm.com>
10  */
11 #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
12 
13 #include <linux/gfp.h>
14 #include <linux/highmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/kernel.h>
17 #include <linux/kconfig.h>
18 #include <linux/memblock.h>
19 #include <linux/mm.h>
20 #include <linux/mman.h>
21 #include <linux/mm_types.h>
22 #include <linux/module.h>
23 #include <linux/pfn_t.h>
24 #include <linux/printk.h>
25 #include <linux/pgtable.h>
26 #include <linux/random.h>
27 #include <linux/spinlock.h>
28 #include <linux/swap.h>
29 #include <linux/swapops.h>
30 #include <linux/start_kernel.h>
31 #include <linux/sched/mm.h>
32 #include <linux/io.h>
33 #include <linux/vmalloc.h>
34 
35 #include <asm/cacheflush.h>
36 #include <asm/pgalloc.h>
37 #include <asm/tlbflush.h>
38 
39 /*
40  * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics
41  * expectations that are being validated here. All future changes in here
42  * or the documentation need to be in sync.
43  */
44 #define RANDOM_NZVALUE	GENMASK(7, 0)
45 
46 struct pgtable_debug_args {
47 	struct mm_struct	*mm;
48 	struct vm_area_struct	*vma;
49 
50 	pgd_t			*pgdp;
51 	p4d_t			*p4dp;
52 	pud_t			*pudp;
53 	pmd_t			*pmdp;
54 	pte_t			*ptep;
55 
56 	p4d_t			*start_p4dp;
57 	pud_t			*start_pudp;
58 	pmd_t			*start_pmdp;
59 	pgtable_t		start_ptep;
60 
61 	unsigned long		vaddr;
62 	pgprot_t		page_prot;
63 	pgprot_t		page_prot_none;
64 
65 	bool			is_contiguous_page;
66 	unsigned long		pud_pfn;
67 	unsigned long		pmd_pfn;
68 	unsigned long		pte_pfn;
69 
70 	unsigned long		fixed_alignment;
71 	unsigned long		fixed_pgd_pfn;
72 	unsigned long		fixed_p4d_pfn;
73 	unsigned long		fixed_pud_pfn;
74 	unsigned long		fixed_pmd_pfn;
75 	unsigned long		fixed_pte_pfn;
76 };
77 
78 static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx)
79 {
80 	pgprot_t prot = vm_get_page_prot(idx);
81 	pte_t pte = pfn_pte(args->fixed_pte_pfn, prot);
82 	unsigned long val = idx, *ptr = &val;
83 
84 	pr_debug("Validating PTE basic (%pGv)\n", ptr);
85 
86 	/*
87 	 * This test needs to be executed after the given page table entry
88 	 * is created with pfn_pte() to make sure that vm_get_page_prot(idx)
89 	 * does not have the dirty bit enabled from the beginning. This is
90 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
91 	 * dirty bit being set.
92 	 */
93 	WARN_ON(pte_dirty(pte_wrprotect(pte)));
94 
95 	WARN_ON(!pte_same(pte, pte));
96 	WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
97 	WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
98 	WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte), args->vma)));
99 	WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
100 	WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
101 	WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte, args->vma))));
102 	WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
103 	WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
104 }
105 
106 static void __init pte_advanced_tests(struct pgtable_debug_args *args)
107 {
108 	struct page *page;
109 	pte_t pte;
110 
111 	/*
112 	 * Architectures optimize set_pte_at by avoiding TLB flush.
113 	 * This requires set_pte_at to be not used to update an
114 	 * existing pte entry. Clear pte before we do set_pte_at
115 	 *
116 	 * flush_dcache_page() is called after set_pte_at() to clear
117 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
118 	 * when it's released and page allocation check will fail when
119 	 * the page is allocated again. For architectures other than ARM64,
120 	 * the unexpected overhead of cache flushing is acceptable.
121 	 */
122 	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
123 	if (!page)
124 		return;
125 
126 	pr_debug("Validating PTE advanced\n");
127 	if (WARN_ON(!args->ptep))
128 		return;
129 
130 	pte = pfn_pte(args->pte_pfn, args->page_prot);
131 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
132 	flush_dcache_page(page);
133 	ptep_set_wrprotect(args->mm, args->vaddr, args->ptep);
134 	pte = ptep_get(args->ptep);
135 	WARN_ON(pte_write(pte));
136 	ptep_get_and_clear(args->mm, args->vaddr, args->ptep);
137 	pte = ptep_get(args->ptep);
138 	WARN_ON(!pte_none(pte));
139 
140 	pte = pfn_pte(args->pte_pfn, args->page_prot);
141 	pte = pte_wrprotect(pte);
142 	pte = pte_mkclean(pte);
143 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
144 	flush_dcache_page(page);
145 	pte = pte_mkwrite(pte, args->vma);
146 	pte = pte_mkdirty(pte);
147 	ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1);
148 	pte = ptep_get(args->ptep);
149 	WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
150 	ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
151 	pte = ptep_get(args->ptep);
152 	WARN_ON(!pte_none(pte));
153 
154 	pte = pfn_pte(args->pte_pfn, args->page_prot);
155 	pte = pte_mkyoung(pte);
156 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
157 	flush_dcache_page(page);
158 	ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep);
159 	pte = ptep_get(args->ptep);
160 	WARN_ON(pte_young(pte));
161 
162 	ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
163 }
164 
165 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
166 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx)
167 {
168 	pgprot_t prot = vm_get_page_prot(idx);
169 	unsigned long val = idx, *ptr = &val;
170 	pmd_t pmd;
171 
172 	if (!has_transparent_hugepage())
173 		return;
174 
175 	pr_debug("Validating PMD basic (%pGv)\n", ptr);
176 	pmd = pfn_pmd(args->fixed_pmd_pfn, prot);
177 
178 	/*
179 	 * This test needs to be executed after the given page table entry
180 	 * is created with pfn_pmd() to make sure that vm_get_page_prot(idx)
181 	 * does not have the dirty bit enabled from the beginning. This is
182 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
183 	 * dirty bit being set.
184 	 */
185 	WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
186 
187 
188 	WARN_ON(!pmd_same(pmd, pmd));
189 	WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
190 	WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
191 	WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd), args->vma)));
192 	WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
193 	WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
194 	WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd, args->vma))));
195 	WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
196 	WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
197 	/*
198 	 * A huge page does not point to next level page table
199 	 * entry. Hence this must qualify as pmd_bad().
200 	 */
201 	WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
202 }
203 
204 static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
205 {
206 	struct page *page;
207 	pmd_t pmd;
208 	unsigned long vaddr = args->vaddr;
209 
210 	if (!has_transparent_hugepage())
211 		return;
212 
213 	page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL;
214 	if (!page)
215 		return;
216 
217 	/*
218 	 * flush_dcache_page() is called after set_pmd_at() to clear
219 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
220 	 * when it's released and page allocation check will fail when
221 	 * the page is allocated again. For architectures other than ARM64,
222 	 * the unexpected overhead of cache flushing is acceptable.
223 	 */
224 	pr_debug("Validating PMD advanced\n");
225 	/* Align the address wrt HPAGE_PMD_SIZE */
226 	vaddr &= HPAGE_PMD_MASK;
227 
228 	pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep);
229 
230 	pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
231 	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
232 	flush_dcache_page(page);
233 	pmdp_set_wrprotect(args->mm, vaddr, args->pmdp);
234 	pmd = READ_ONCE(*args->pmdp);
235 	WARN_ON(pmd_write(pmd));
236 	pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
237 	pmd = READ_ONCE(*args->pmdp);
238 	WARN_ON(!pmd_none(pmd));
239 
240 	pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
241 	pmd = pmd_wrprotect(pmd);
242 	pmd = pmd_mkclean(pmd);
243 	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
244 	flush_dcache_page(page);
245 	pmd = pmd_mkwrite(pmd, args->vma);
246 	pmd = pmd_mkdirty(pmd);
247 	pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1);
248 	pmd = READ_ONCE(*args->pmdp);
249 	WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
250 	pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1);
251 	pmd = READ_ONCE(*args->pmdp);
252 	WARN_ON(!pmd_none(pmd));
253 
254 	pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot));
255 	pmd = pmd_mkyoung(pmd);
256 	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
257 	flush_dcache_page(page);
258 	pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp);
259 	pmd = READ_ONCE(*args->pmdp);
260 	WARN_ON(pmd_young(pmd));
261 
262 	/*  Clear the pte entries  */
263 	pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
264 	pgtable_trans_huge_withdraw(args->mm, args->pmdp);
265 }
266 
267 static void __init pmd_leaf_tests(struct pgtable_debug_args *args)
268 {
269 	pmd_t pmd;
270 
271 	if (!has_transparent_hugepage())
272 		return;
273 
274 	pr_debug("Validating PMD leaf\n");
275 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
276 
277 	/*
278 	 * PMD based THP is a leaf entry.
279 	 */
280 	pmd = pmd_mkhuge(pmd);
281 	WARN_ON(!pmd_leaf(pmd));
282 }
283 
284 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
285 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx)
286 {
287 	pgprot_t prot = vm_get_page_prot(idx);
288 	unsigned long val = idx, *ptr = &val;
289 	pud_t pud;
290 
291 	if (!has_transparent_pud_hugepage())
292 		return;
293 
294 	pr_debug("Validating PUD basic (%pGv)\n", ptr);
295 	pud = pfn_pud(args->fixed_pud_pfn, prot);
296 
297 	/*
298 	 * This test needs to be executed after the given page table entry
299 	 * is created with pfn_pud() to make sure that vm_get_page_prot(idx)
300 	 * does not have the dirty bit enabled from the beginning. This is
301 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
302 	 * dirty bit being set.
303 	 */
304 	WARN_ON(pud_dirty(pud_wrprotect(pud)));
305 
306 	WARN_ON(!pud_same(pud, pud));
307 	WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
308 	WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
309 	WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
310 	WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
311 	WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
312 	WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
313 	WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
314 	WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
315 
316 	if (mm_pmd_folded(args->mm))
317 		return;
318 
319 	/*
320 	 * A huge page does not point to next level page table
321 	 * entry. Hence this must qualify as pud_bad().
322 	 */
323 	WARN_ON(!pud_bad(pud_mkhuge(pud)));
324 }
325 
326 static void __init pud_advanced_tests(struct pgtable_debug_args *args)
327 {
328 	struct page *page;
329 	unsigned long vaddr = args->vaddr;
330 	pud_t pud;
331 
332 	if (!has_transparent_pud_hugepage())
333 		return;
334 
335 	page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL;
336 	if (!page)
337 		return;
338 
339 	/*
340 	 * flush_dcache_page() is called after set_pud_at() to clear
341 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
342 	 * when it's released and page allocation check will fail when
343 	 * the page is allocated again. For architectures other than ARM64,
344 	 * the unexpected overhead of cache flushing is acceptable.
345 	 */
346 	pr_debug("Validating PUD advanced\n");
347 	/* Align the address wrt HPAGE_PUD_SIZE */
348 	vaddr &= HPAGE_PUD_MASK;
349 
350 	pud = pfn_pud(args->pud_pfn, args->page_prot);
351 	/*
352 	 * Some architectures have debug checks to make sure
353 	 * huge pud mapping are only found with devmap entries
354 	 * For now test with only devmap entries.
355 	 */
356 	pud = pud_mkdevmap(pud);
357 	set_pud_at(args->mm, vaddr, args->pudp, pud);
358 	flush_dcache_page(page);
359 	pudp_set_wrprotect(args->mm, vaddr, args->pudp);
360 	pud = READ_ONCE(*args->pudp);
361 	WARN_ON(pud_write(pud));
362 
363 #ifndef __PAGETABLE_PMD_FOLDED
364 	pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
365 	pud = READ_ONCE(*args->pudp);
366 	WARN_ON(!pud_none(pud));
367 #endif /* __PAGETABLE_PMD_FOLDED */
368 	pud = pfn_pud(args->pud_pfn, args->page_prot);
369 	pud = pud_mkdevmap(pud);
370 	pud = pud_wrprotect(pud);
371 	pud = pud_mkclean(pud);
372 	set_pud_at(args->mm, vaddr, args->pudp, pud);
373 	flush_dcache_page(page);
374 	pud = pud_mkwrite(pud);
375 	pud = pud_mkdirty(pud);
376 	pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1);
377 	pud = READ_ONCE(*args->pudp);
378 	WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
379 
380 #ifndef __PAGETABLE_PMD_FOLDED
381 	pudp_huge_get_and_clear_full(args->vma, vaddr, args->pudp, 1);
382 	pud = READ_ONCE(*args->pudp);
383 	WARN_ON(!pud_none(pud));
384 #endif /* __PAGETABLE_PMD_FOLDED */
385 
386 	pud = pfn_pud(args->pud_pfn, args->page_prot);
387 	pud = pud_mkdevmap(pud);
388 	pud = pud_mkyoung(pud);
389 	set_pud_at(args->mm, vaddr, args->pudp, pud);
390 	flush_dcache_page(page);
391 	pudp_test_and_clear_young(args->vma, vaddr, args->pudp);
392 	pud = READ_ONCE(*args->pudp);
393 	WARN_ON(pud_young(pud));
394 
395 	pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
396 }
397 
398 static void __init pud_leaf_tests(struct pgtable_debug_args *args)
399 {
400 	pud_t pud;
401 
402 	if (!has_transparent_pud_hugepage())
403 		return;
404 
405 	pr_debug("Validating PUD leaf\n");
406 	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
407 	/*
408 	 * PUD based THP is a leaf entry.
409 	 */
410 	pud = pud_mkhuge(pud);
411 	WARN_ON(!pud_leaf(pud));
412 }
413 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
414 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
415 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
416 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
417 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
418 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
419 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { }
420 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
421 static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { }
422 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
423 static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { }
424 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
425 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
426 
427 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
428 static void __init pmd_huge_tests(struct pgtable_debug_args *args)
429 {
430 	pmd_t pmd;
431 
432 	if (!arch_vmap_pmd_supported(args->page_prot) ||
433 	    args->fixed_alignment < PMD_SIZE)
434 		return;
435 
436 	pr_debug("Validating PMD huge\n");
437 	/*
438 	 * X86 defined pmd_set_huge() verifies that the given
439 	 * PMD is not a populated non-leaf entry.
440 	 */
441 	WRITE_ONCE(*args->pmdp, __pmd(0));
442 	WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot));
443 	WARN_ON(!pmd_clear_huge(args->pmdp));
444 	pmd = READ_ONCE(*args->pmdp);
445 	WARN_ON(!pmd_none(pmd));
446 }
447 
448 static void __init pud_huge_tests(struct pgtable_debug_args *args)
449 {
450 	pud_t pud;
451 
452 	if (!arch_vmap_pud_supported(args->page_prot) ||
453 	    args->fixed_alignment < PUD_SIZE)
454 		return;
455 
456 	pr_debug("Validating PUD huge\n");
457 	/*
458 	 * X86 defined pud_set_huge() verifies that the given
459 	 * PUD is not a populated non-leaf entry.
460 	 */
461 	WRITE_ONCE(*args->pudp, __pud(0));
462 	WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot));
463 	WARN_ON(!pud_clear_huge(args->pudp));
464 	pud = READ_ONCE(*args->pudp);
465 	WARN_ON(!pud_none(pud));
466 }
467 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
468 static void __init pmd_huge_tests(struct pgtable_debug_args *args) { }
469 static void __init pud_huge_tests(struct pgtable_debug_args *args) { }
470 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
471 
472 static void __init p4d_basic_tests(struct pgtable_debug_args *args)
473 {
474 	p4d_t p4d;
475 
476 	pr_debug("Validating P4D basic\n");
477 	memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
478 	WARN_ON(!p4d_same(p4d, p4d));
479 }
480 
481 static void __init pgd_basic_tests(struct pgtable_debug_args *args)
482 {
483 	pgd_t pgd;
484 
485 	pr_debug("Validating PGD basic\n");
486 	memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
487 	WARN_ON(!pgd_same(pgd, pgd));
488 }
489 
490 #ifndef __PAGETABLE_PUD_FOLDED
491 static void __init pud_clear_tests(struct pgtable_debug_args *args)
492 {
493 	pud_t pud = READ_ONCE(*args->pudp);
494 
495 	if (mm_pmd_folded(args->mm))
496 		return;
497 
498 	pr_debug("Validating PUD clear\n");
499 	WARN_ON(pud_none(pud));
500 	pud_clear(args->pudp);
501 	pud = READ_ONCE(*args->pudp);
502 	WARN_ON(!pud_none(pud));
503 }
504 
505 static void __init pud_populate_tests(struct pgtable_debug_args *args)
506 {
507 	pud_t pud;
508 
509 	if (mm_pmd_folded(args->mm))
510 		return;
511 
512 	pr_debug("Validating PUD populate\n");
513 	/*
514 	 * This entry points to next level page table page.
515 	 * Hence this must not qualify as pud_bad().
516 	 */
517 	pud_populate(args->mm, args->pudp, args->start_pmdp);
518 	pud = READ_ONCE(*args->pudp);
519 	WARN_ON(pud_bad(pud));
520 }
521 #else  /* !__PAGETABLE_PUD_FOLDED */
522 static void __init pud_clear_tests(struct pgtable_debug_args *args) { }
523 static void __init pud_populate_tests(struct pgtable_debug_args *args) { }
524 #endif /* PAGETABLE_PUD_FOLDED */
525 
526 #ifndef __PAGETABLE_P4D_FOLDED
527 static void __init p4d_clear_tests(struct pgtable_debug_args *args)
528 {
529 	p4d_t p4d = READ_ONCE(*args->p4dp);
530 
531 	if (mm_pud_folded(args->mm))
532 		return;
533 
534 	pr_debug("Validating P4D clear\n");
535 	WARN_ON(p4d_none(p4d));
536 	p4d_clear(args->p4dp);
537 	p4d = READ_ONCE(*args->p4dp);
538 	WARN_ON(!p4d_none(p4d));
539 }
540 
541 static void __init p4d_populate_tests(struct pgtable_debug_args *args)
542 {
543 	p4d_t p4d;
544 
545 	if (mm_pud_folded(args->mm))
546 		return;
547 
548 	pr_debug("Validating P4D populate\n");
549 	/*
550 	 * This entry points to next level page table page.
551 	 * Hence this must not qualify as p4d_bad().
552 	 */
553 	pud_clear(args->pudp);
554 	p4d_clear(args->p4dp);
555 	p4d_populate(args->mm, args->p4dp, args->start_pudp);
556 	p4d = READ_ONCE(*args->p4dp);
557 	WARN_ON(p4d_bad(p4d));
558 }
559 
560 static void __init pgd_clear_tests(struct pgtable_debug_args *args)
561 {
562 	pgd_t pgd = READ_ONCE(*(args->pgdp));
563 
564 	if (mm_p4d_folded(args->mm))
565 		return;
566 
567 	pr_debug("Validating PGD clear\n");
568 	WARN_ON(pgd_none(pgd));
569 	pgd_clear(args->pgdp);
570 	pgd = READ_ONCE(*args->pgdp);
571 	WARN_ON(!pgd_none(pgd));
572 }
573 
574 static void __init pgd_populate_tests(struct pgtable_debug_args *args)
575 {
576 	pgd_t pgd;
577 
578 	if (mm_p4d_folded(args->mm))
579 		return;
580 
581 	pr_debug("Validating PGD populate\n");
582 	/*
583 	 * This entry points to next level page table page.
584 	 * Hence this must not qualify as pgd_bad().
585 	 */
586 	p4d_clear(args->p4dp);
587 	pgd_clear(args->pgdp);
588 	pgd_populate(args->mm, args->pgdp, args->start_p4dp);
589 	pgd = READ_ONCE(*args->pgdp);
590 	WARN_ON(pgd_bad(pgd));
591 }
592 #else  /* !__PAGETABLE_P4D_FOLDED */
593 static void __init p4d_clear_tests(struct pgtable_debug_args *args) { }
594 static void __init pgd_clear_tests(struct pgtable_debug_args *args) { }
595 static void __init p4d_populate_tests(struct pgtable_debug_args *args) { }
596 static void __init pgd_populate_tests(struct pgtable_debug_args *args) { }
597 #endif /* PAGETABLE_P4D_FOLDED */
598 
599 static void __init pte_clear_tests(struct pgtable_debug_args *args)
600 {
601 	struct page *page;
602 	pte_t pte = pfn_pte(args->pte_pfn, args->page_prot);
603 
604 	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
605 	if (!page)
606 		return;
607 
608 	/*
609 	 * flush_dcache_page() is called after set_pte_at() to clear
610 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
611 	 * when it's released and page allocation check will fail when
612 	 * the page is allocated again. For architectures other than ARM64,
613 	 * the unexpected overhead of cache flushing is acceptable.
614 	 */
615 	pr_debug("Validating PTE clear\n");
616 	if (WARN_ON(!args->ptep))
617 		return;
618 
619 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
620 	WARN_ON(pte_none(pte));
621 	flush_dcache_page(page);
622 	barrier();
623 	ptep_clear(args->mm, args->vaddr, args->ptep);
624 	pte = ptep_get(args->ptep);
625 	WARN_ON(!pte_none(pte));
626 }
627 
628 static void __init pmd_clear_tests(struct pgtable_debug_args *args)
629 {
630 	pmd_t pmd = READ_ONCE(*args->pmdp);
631 
632 	pr_debug("Validating PMD clear\n");
633 	WARN_ON(pmd_none(pmd));
634 	pmd_clear(args->pmdp);
635 	pmd = READ_ONCE(*args->pmdp);
636 	WARN_ON(!pmd_none(pmd));
637 }
638 
639 static void __init pmd_populate_tests(struct pgtable_debug_args *args)
640 {
641 	pmd_t pmd;
642 
643 	pr_debug("Validating PMD populate\n");
644 	/*
645 	 * This entry points to next level page table page.
646 	 * Hence this must not qualify as pmd_bad().
647 	 */
648 	pmd_populate(args->mm, args->pmdp, args->start_ptep);
649 	pmd = READ_ONCE(*args->pmdp);
650 	WARN_ON(pmd_bad(pmd));
651 }
652 
653 static void __init pte_special_tests(struct pgtable_debug_args *args)
654 {
655 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
656 
657 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
658 		return;
659 
660 	pr_debug("Validating PTE special\n");
661 	WARN_ON(!pte_special(pte_mkspecial(pte)));
662 }
663 
664 static void __init pte_protnone_tests(struct pgtable_debug_args *args)
665 {
666 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none);
667 
668 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
669 		return;
670 
671 	pr_debug("Validating PTE protnone\n");
672 	WARN_ON(!pte_protnone(pte));
673 	WARN_ON(!pte_present(pte));
674 }
675 
676 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
677 static void __init pmd_protnone_tests(struct pgtable_debug_args *args)
678 {
679 	pmd_t pmd;
680 
681 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
682 		return;
683 
684 	if (!has_transparent_hugepage())
685 		return;
686 
687 	pr_debug("Validating PMD protnone\n");
688 	pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none));
689 	WARN_ON(!pmd_protnone(pmd));
690 	WARN_ON(!pmd_present(pmd));
691 }
692 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
693 static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { }
694 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
695 
696 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
697 static void __init pte_devmap_tests(struct pgtable_debug_args *args)
698 {
699 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
700 
701 	pr_debug("Validating PTE devmap\n");
702 	WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
703 }
704 
705 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
706 static void __init pmd_devmap_tests(struct pgtable_debug_args *args)
707 {
708 	pmd_t pmd;
709 
710 	if (!has_transparent_hugepage())
711 		return;
712 
713 	pr_debug("Validating PMD devmap\n");
714 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
715 	WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
716 }
717 
718 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
719 static void __init pud_devmap_tests(struct pgtable_debug_args *args)
720 {
721 	pud_t pud;
722 
723 	if (!has_transparent_pud_hugepage())
724 		return;
725 
726 	pr_debug("Validating PUD devmap\n");
727 	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
728 	WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
729 }
730 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
731 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
732 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
733 #else  /* CONFIG_TRANSPARENT_HUGEPAGE */
734 static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
735 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
736 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
737 #else
738 static void __init pte_devmap_tests(struct pgtable_debug_args *args) { }
739 static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
740 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
741 #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
742 
743 static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args)
744 {
745 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
746 
747 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
748 		return;
749 
750 	pr_debug("Validating PTE soft dirty\n");
751 	WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
752 	WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
753 }
754 
755 static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args)
756 {
757 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
758 
759 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
760 		return;
761 
762 	pr_debug("Validating PTE swap soft dirty\n");
763 	WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
764 	WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
765 }
766 
767 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
768 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args)
769 {
770 	pmd_t pmd;
771 
772 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
773 		return;
774 
775 	if (!has_transparent_hugepage())
776 		return;
777 
778 	pr_debug("Validating PMD soft dirty\n");
779 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
780 	WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
781 	WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
782 }
783 
784 static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args)
785 {
786 	pmd_t pmd;
787 
788 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
789 		!IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
790 		return;
791 
792 	if (!has_transparent_hugepage())
793 		return;
794 
795 	pr_debug("Validating PMD swap soft dirty\n");
796 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
797 	WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
798 	WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
799 }
800 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
801 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { }
802 static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { }
803 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
804 
805 static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args)
806 {
807 	unsigned long max_swap_offset;
808 	swp_entry_t entry, entry2;
809 	pte_t pte;
810 
811 	pr_debug("Validating PTE swap exclusive\n");
812 
813 	/* See generic_max_swapfile_size(): probe the maximum offset */
814 	max_swap_offset = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0, ~0UL))));
815 
816 	/* Create a swp entry with all possible bits set */
817 	entry = swp_entry((1 << MAX_SWAPFILES_SHIFT) - 1, max_swap_offset);
818 
819 	pte = swp_entry_to_pte(entry);
820 	WARN_ON(pte_swp_exclusive(pte));
821 	WARN_ON(!is_swap_pte(pte));
822 	entry2 = pte_to_swp_entry(pte);
823 	WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
824 
825 	pte = pte_swp_mkexclusive(pte);
826 	WARN_ON(!pte_swp_exclusive(pte));
827 	WARN_ON(!is_swap_pte(pte));
828 	WARN_ON(pte_swp_soft_dirty(pte));
829 	entry2 = pte_to_swp_entry(pte);
830 	WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
831 
832 	pte = pte_swp_clear_exclusive(pte);
833 	WARN_ON(pte_swp_exclusive(pte));
834 	WARN_ON(!is_swap_pte(pte));
835 	entry2 = pte_to_swp_entry(pte);
836 	WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
837 }
838 
839 static void __init pte_swap_tests(struct pgtable_debug_args *args)
840 {
841 	swp_entry_t swp;
842 	pte_t pte;
843 
844 	pr_debug("Validating PTE swap\n");
845 	pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
846 	swp = __pte_to_swp_entry(pte);
847 	pte = __swp_entry_to_pte(swp);
848 	WARN_ON(args->fixed_pte_pfn != pte_pfn(pte));
849 }
850 
851 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
852 static void __init pmd_swap_tests(struct pgtable_debug_args *args)
853 {
854 	swp_entry_t swp;
855 	pmd_t pmd;
856 
857 	if (!has_transparent_hugepage())
858 		return;
859 
860 	pr_debug("Validating PMD swap\n");
861 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
862 	swp = __pmd_to_swp_entry(pmd);
863 	pmd = __swp_entry_to_pmd(swp);
864 	WARN_ON(args->fixed_pmd_pfn != pmd_pfn(pmd));
865 }
866 #else  /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
867 static void __init pmd_swap_tests(struct pgtable_debug_args *args) { }
868 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
869 
870 static void __init swap_migration_tests(struct pgtable_debug_args *args)
871 {
872 	struct page *page;
873 	swp_entry_t swp;
874 
875 	if (!IS_ENABLED(CONFIG_MIGRATION))
876 		return;
877 
878 	/*
879 	 * swap_migration_tests() requires a dedicated page as it needs to
880 	 * be locked before creating a migration entry from it. Locking the
881 	 * page that actually maps kernel text ('start_kernel') can be real
882 	 * problematic. Lets use the allocated page explicitly for this
883 	 * purpose.
884 	 */
885 	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
886 	if (!page)
887 		return;
888 
889 	pr_debug("Validating swap migration\n");
890 
891 	/*
892 	 * make_[readable|writable]_migration_entry() expects given page to
893 	 * be locked, otherwise it stumbles upon a BUG_ON().
894 	 */
895 	__SetPageLocked(page);
896 	swp = make_writable_migration_entry(page_to_pfn(page));
897 	WARN_ON(!is_migration_entry(swp));
898 	WARN_ON(!is_writable_migration_entry(swp));
899 
900 	swp = make_readable_migration_entry(swp_offset(swp));
901 	WARN_ON(!is_migration_entry(swp));
902 	WARN_ON(is_writable_migration_entry(swp));
903 
904 	swp = make_readable_migration_entry(page_to_pfn(page));
905 	WARN_ON(!is_migration_entry(swp));
906 	WARN_ON(is_writable_migration_entry(swp));
907 	__ClearPageLocked(page);
908 }
909 
910 #ifdef CONFIG_HUGETLB_PAGE
911 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args)
912 {
913 	struct page *page;
914 	pte_t pte;
915 
916 	pr_debug("Validating HugeTLB basic\n");
917 	/*
918 	 * Accessing the page associated with the pfn is safe here,
919 	 * as it was previously derived from a real kernel symbol.
920 	 */
921 	page = pfn_to_page(args->fixed_pmd_pfn);
922 	pte = mk_huge_pte(page, args->page_prot);
923 
924 	WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
925 	WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
926 	WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
927 
928 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
929 	pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot);
930 
931 	WARN_ON(!pte_huge(arch_make_huge_pte(pte, PMD_SHIFT, VM_ACCESS_FLAGS)));
932 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
933 }
934 #else  /* !CONFIG_HUGETLB_PAGE */
935 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { }
936 #endif /* CONFIG_HUGETLB_PAGE */
937 
938 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
939 static void __init pmd_thp_tests(struct pgtable_debug_args *args)
940 {
941 	pmd_t pmd;
942 
943 	if (!has_transparent_hugepage())
944 		return;
945 
946 	pr_debug("Validating PMD based THP\n");
947 	/*
948 	 * pmd_trans_huge() and pmd_present() must return positive after
949 	 * MMU invalidation with pmd_mkinvalid(). This behavior is an
950 	 * optimization for transparent huge page. pmd_trans_huge() must
951 	 * be true if pmd_page() returns a valid THP to avoid taking the
952 	 * pmd_lock when others walk over non transhuge pmds (i.e. there
953 	 * are no THP allocated). Especially when splitting a THP and
954 	 * removing the present bit from the pmd, pmd_trans_huge() still
955 	 * needs to return true. pmd_present() should be true whenever
956 	 * pmd_trans_huge() returns true.
957 	 */
958 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
959 	WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
960 
961 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
962 	WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
963 	WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
964 	WARN_ON(!pmd_leaf(pmd_mkinvalid(pmd_mkhuge(pmd))));
965 #endif /* __HAVE_ARCH_PMDP_INVALIDATE */
966 }
967 
968 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
969 static void __init pud_thp_tests(struct pgtable_debug_args *args)
970 {
971 	pud_t pud;
972 
973 	if (!has_transparent_pud_hugepage())
974 		return;
975 
976 	pr_debug("Validating PUD based THP\n");
977 	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
978 	WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
979 
980 	/*
981 	 * pud_mkinvalid() has been dropped for now. Enable back
982 	 * these tests when it comes back with a modified pud_present().
983 	 *
984 	 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
985 	 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
986 	 */
987 }
988 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
989 static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
990 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
991 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
992 static void __init pmd_thp_tests(struct pgtable_debug_args *args) { }
993 static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
994 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
995 
996 static unsigned long __init get_random_vaddr(void)
997 {
998 	unsigned long random_vaddr, random_pages, total_user_pages;
999 
1000 	total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
1001 
1002 	random_pages = get_random_long() % total_user_pages;
1003 	random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
1004 
1005 	return random_vaddr;
1006 }
1007 
1008 static void __init destroy_args(struct pgtable_debug_args *args)
1009 {
1010 	struct page *page = NULL;
1011 
1012 	/* Free (huge) page */
1013 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1014 	    has_transparent_pud_hugepage() &&
1015 	    args->pud_pfn != ULONG_MAX) {
1016 		if (args->is_contiguous_page) {
1017 			free_contig_range(args->pud_pfn,
1018 					  (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT)));
1019 		} else {
1020 			page = pfn_to_page(args->pud_pfn);
1021 			__free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT);
1022 		}
1023 
1024 		args->pud_pfn = ULONG_MAX;
1025 		args->pmd_pfn = ULONG_MAX;
1026 		args->pte_pfn = ULONG_MAX;
1027 	}
1028 
1029 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1030 	    has_transparent_hugepage() &&
1031 	    args->pmd_pfn != ULONG_MAX) {
1032 		if (args->is_contiguous_page) {
1033 			free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER));
1034 		} else {
1035 			page = pfn_to_page(args->pmd_pfn);
1036 			__free_pages(page, HPAGE_PMD_ORDER);
1037 		}
1038 
1039 		args->pmd_pfn = ULONG_MAX;
1040 		args->pte_pfn = ULONG_MAX;
1041 	}
1042 
1043 	if (args->pte_pfn != ULONG_MAX) {
1044 		page = pfn_to_page(args->pte_pfn);
1045 		__free_page(page);
1046 
1047 		args->pte_pfn = ULONG_MAX;
1048 	}
1049 
1050 	/* Free page table entries */
1051 	if (args->start_ptep) {
1052 		pte_free(args->mm, args->start_ptep);
1053 		mm_dec_nr_ptes(args->mm);
1054 	}
1055 
1056 	if (args->start_pmdp) {
1057 		pmd_free(args->mm, args->start_pmdp);
1058 		mm_dec_nr_pmds(args->mm);
1059 	}
1060 
1061 	if (args->start_pudp) {
1062 		pud_free(args->mm, args->start_pudp);
1063 		mm_dec_nr_puds(args->mm);
1064 	}
1065 
1066 	if (args->start_p4dp)
1067 		p4d_free(args->mm, args->start_p4dp);
1068 
1069 	/* Free vma and mm struct */
1070 	if (args->vma)
1071 		vm_area_free(args->vma);
1072 
1073 	if (args->mm)
1074 		mmdrop(args->mm);
1075 }
1076 
1077 static struct page * __init
1078 debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
1079 {
1080 	struct page *page = NULL;
1081 
1082 #ifdef CONFIG_CONTIG_ALLOC
1083 	if (order > MAX_PAGE_ORDER) {
1084 		page = alloc_contig_pages((1 << order), GFP_KERNEL,
1085 					  first_online_node, NULL);
1086 		if (page) {
1087 			args->is_contiguous_page = true;
1088 			return page;
1089 		}
1090 	}
1091 #endif
1092 
1093 	if (order <= MAX_PAGE_ORDER)
1094 		page = alloc_pages(GFP_KERNEL, order);
1095 
1096 	return page;
1097 }
1098 
1099 /*
1100  * Check if a physical memory range described by <pstart, pend> contains
1101  * an area that is of size psize, and aligned to psize.
1102  *
1103  * Don't use address 0, an all-zeroes physical address might mask bugs, and
1104  * it's not used on x86.
1105  */
1106 static void  __init phys_align_check(phys_addr_t pstart,
1107 				     phys_addr_t pend, unsigned long psize,
1108 				     phys_addr_t *physp, unsigned long *alignp)
1109 {
1110 	phys_addr_t aligned_start, aligned_end;
1111 
1112 	if (pstart == 0)
1113 		pstart = PAGE_SIZE;
1114 
1115 	aligned_start = ALIGN(pstart, psize);
1116 	aligned_end = aligned_start + psize;
1117 
1118 	if (aligned_end > aligned_start && aligned_end <= pend) {
1119 		*alignp = psize;
1120 		*physp = aligned_start;
1121 	}
1122 }
1123 
1124 static void __init init_fixed_pfns(struct pgtable_debug_args *args)
1125 {
1126 	u64 idx;
1127 	phys_addr_t phys, pstart, pend;
1128 
1129 	/*
1130 	 * Initialize the fixed pfns. To do this, try to find a
1131 	 * valid physical range, preferably aligned to PUD_SIZE,
1132 	 * but settling for aligned to PMD_SIZE as a fallback. If
1133 	 * neither of those is found, use the physical address of
1134 	 * the start_kernel symbol.
1135 	 *
1136 	 * The memory doesn't need to be allocated, it just needs to exist
1137 	 * as usable memory. It won't be touched.
1138 	 *
1139 	 * The alignment is recorded, and can be checked to see if we
1140 	 * can run the tests that require an actual valid physical
1141 	 * address range on some architectures ({pmd,pud}_huge_test
1142 	 * on x86).
1143 	 */
1144 
1145 	phys = __pa_symbol(&start_kernel);
1146 	args->fixed_alignment = PAGE_SIZE;
1147 
1148 	for_each_mem_range(idx, &pstart, &pend) {
1149 		/* First check for a PUD-aligned area */
1150 		phys_align_check(pstart, pend, PUD_SIZE, &phys,
1151 				 &args->fixed_alignment);
1152 
1153 		/* If a PUD-aligned area is found, we're done */
1154 		if (args->fixed_alignment == PUD_SIZE)
1155 			break;
1156 
1157 		/*
1158 		 * If no PMD-aligned area found yet, check for one,
1159 		 * but continue the loop to look for a PUD-aligned area.
1160 		 */
1161 		if (args->fixed_alignment < PMD_SIZE)
1162 			phys_align_check(pstart, pend, PMD_SIZE, &phys,
1163 					 &args->fixed_alignment);
1164 	}
1165 
1166 	args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK);
1167 	args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK);
1168 	args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK);
1169 	args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK);
1170 	args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK);
1171 	WARN_ON(!pfn_valid(args->fixed_pte_pfn));
1172 }
1173 
1174 
1175 static int __init init_args(struct pgtable_debug_args *args)
1176 {
1177 	struct page *page = NULL;
1178 	int ret = 0;
1179 
1180 	/*
1181 	 * Initialize the debugging data.
1182 	 *
1183 	 * vm_get_page_prot(VM_NONE) or vm_get_page_prot(VM_SHARED|VM_NONE)
1184 	 * will help create page table entries with PROT_NONE permission as
1185 	 * required for pxx_protnone_tests().
1186 	 */
1187 	memset(args, 0, sizeof(*args));
1188 	args->vaddr              = get_random_vaddr();
1189 	args->page_prot          = vm_get_page_prot(VM_ACCESS_FLAGS);
1190 	args->page_prot_none     = vm_get_page_prot(VM_NONE);
1191 	args->is_contiguous_page = false;
1192 	args->pud_pfn            = ULONG_MAX;
1193 	args->pmd_pfn            = ULONG_MAX;
1194 	args->pte_pfn            = ULONG_MAX;
1195 	args->fixed_pgd_pfn      = ULONG_MAX;
1196 	args->fixed_p4d_pfn      = ULONG_MAX;
1197 	args->fixed_pud_pfn      = ULONG_MAX;
1198 	args->fixed_pmd_pfn      = ULONG_MAX;
1199 	args->fixed_pte_pfn      = ULONG_MAX;
1200 
1201 	/* Allocate mm and vma */
1202 	args->mm = mm_alloc();
1203 	if (!args->mm) {
1204 		pr_err("Failed to allocate mm struct\n");
1205 		ret = -ENOMEM;
1206 		goto error;
1207 	}
1208 
1209 	args->vma = vm_area_alloc(args->mm);
1210 	if (!args->vma) {
1211 		pr_err("Failed to allocate vma\n");
1212 		ret = -ENOMEM;
1213 		goto error;
1214 	}
1215 
1216 	/*
1217 	 * Allocate page table entries. They will be modified in the tests.
1218 	 * Lets save the page table entries so that they can be released
1219 	 * when the tests are completed.
1220 	 */
1221 	args->pgdp = pgd_offset(args->mm, args->vaddr);
1222 	args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr);
1223 	if (!args->p4dp) {
1224 		pr_err("Failed to allocate p4d entries\n");
1225 		ret = -ENOMEM;
1226 		goto error;
1227 	}
1228 	args->start_p4dp = p4d_offset(args->pgdp, 0UL);
1229 	WARN_ON(!args->start_p4dp);
1230 
1231 	args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr);
1232 	if (!args->pudp) {
1233 		pr_err("Failed to allocate pud entries\n");
1234 		ret = -ENOMEM;
1235 		goto error;
1236 	}
1237 	args->start_pudp = pud_offset(args->p4dp, 0UL);
1238 	WARN_ON(!args->start_pudp);
1239 
1240 	args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr);
1241 	if (!args->pmdp) {
1242 		pr_err("Failed to allocate pmd entries\n");
1243 		ret = -ENOMEM;
1244 		goto error;
1245 	}
1246 	args->start_pmdp = pmd_offset(args->pudp, 0UL);
1247 	WARN_ON(!args->start_pmdp);
1248 
1249 	if (pte_alloc(args->mm, args->pmdp)) {
1250 		pr_err("Failed to allocate pte entries\n");
1251 		ret = -ENOMEM;
1252 		goto error;
1253 	}
1254 	args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp));
1255 	WARN_ON(!args->start_ptep);
1256 
1257 	init_fixed_pfns(args);
1258 
1259 	/*
1260 	 * Allocate (huge) pages because some of the tests need to access
1261 	 * the data in the pages. The corresponding tests will be skipped
1262 	 * if we fail to allocate (huge) pages.
1263 	 */
1264 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1265 	    has_transparent_pud_hugepage()) {
1266 		page = debug_vm_pgtable_alloc_huge_page(args,
1267 				HPAGE_PUD_SHIFT - PAGE_SHIFT);
1268 		if (page) {
1269 			args->pud_pfn = page_to_pfn(page);
1270 			args->pmd_pfn = args->pud_pfn;
1271 			args->pte_pfn = args->pud_pfn;
1272 			return 0;
1273 		}
1274 	}
1275 
1276 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1277 	    has_transparent_hugepage()) {
1278 		page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER);
1279 		if (page) {
1280 			args->pmd_pfn = page_to_pfn(page);
1281 			args->pte_pfn = args->pmd_pfn;
1282 			return 0;
1283 		}
1284 	}
1285 
1286 	page = alloc_page(GFP_KERNEL);
1287 	if (page)
1288 		args->pte_pfn = page_to_pfn(page);
1289 
1290 	return 0;
1291 
1292 error:
1293 	destroy_args(args);
1294 	return ret;
1295 }
1296 
1297 static int __init debug_vm_pgtable(void)
1298 {
1299 	struct pgtable_debug_args args;
1300 	spinlock_t *ptl = NULL;
1301 	int idx, ret;
1302 
1303 	pr_info("Validating architecture page table helpers\n");
1304 	ret = init_args(&args);
1305 	if (ret)
1306 		return ret;
1307 
1308 	/*
1309 	 * Iterate over each possible vm_flags to make sure that all
1310 	 * the basic page table transformation validations just hold
1311 	 * true irrespective of the starting protection value for a
1312 	 * given page table entry.
1313 	 *
1314 	 * Protection based vm_flags combinations are always linear
1315 	 * and increasing i.e starting from VM_NONE and going up to
1316 	 * (VM_SHARED | READ | WRITE | EXEC).
1317 	 */
1318 #define VM_FLAGS_START	(VM_NONE)
1319 #define VM_FLAGS_END	(VM_SHARED | VM_EXEC | VM_WRITE | VM_READ)
1320 
1321 	for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) {
1322 		pte_basic_tests(&args, idx);
1323 		pmd_basic_tests(&args, idx);
1324 		pud_basic_tests(&args, idx);
1325 	}
1326 
1327 	/*
1328 	 * Both P4D and PGD level tests are very basic which do not
1329 	 * involve creating page table entries from the protection
1330 	 * value and the given pfn. Hence just keep them out from
1331 	 * the above iteration for now to save some test execution
1332 	 * time.
1333 	 */
1334 	p4d_basic_tests(&args);
1335 	pgd_basic_tests(&args);
1336 
1337 	pmd_leaf_tests(&args);
1338 	pud_leaf_tests(&args);
1339 
1340 	pte_special_tests(&args);
1341 	pte_protnone_tests(&args);
1342 	pmd_protnone_tests(&args);
1343 
1344 	pte_devmap_tests(&args);
1345 	pmd_devmap_tests(&args);
1346 	pud_devmap_tests(&args);
1347 
1348 	pte_soft_dirty_tests(&args);
1349 	pmd_soft_dirty_tests(&args);
1350 	pte_swap_soft_dirty_tests(&args);
1351 	pmd_swap_soft_dirty_tests(&args);
1352 
1353 	pte_swap_exclusive_tests(&args);
1354 
1355 	pte_swap_tests(&args);
1356 	pmd_swap_tests(&args);
1357 
1358 	swap_migration_tests(&args);
1359 
1360 	pmd_thp_tests(&args);
1361 	pud_thp_tests(&args);
1362 
1363 	hugetlb_basic_tests(&args);
1364 
1365 	/*
1366 	 * Page table modifying tests. They need to hold
1367 	 * proper page table lock.
1368 	 */
1369 
1370 	args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl);
1371 	pte_clear_tests(&args);
1372 	pte_advanced_tests(&args);
1373 	if (args.ptep)
1374 		pte_unmap_unlock(args.ptep, ptl);
1375 
1376 	ptl = pmd_lock(args.mm, args.pmdp);
1377 	pmd_clear_tests(&args);
1378 	pmd_advanced_tests(&args);
1379 	pmd_huge_tests(&args);
1380 	pmd_populate_tests(&args);
1381 	spin_unlock(ptl);
1382 
1383 	ptl = pud_lock(args.mm, args.pudp);
1384 	pud_clear_tests(&args);
1385 	pud_advanced_tests(&args);
1386 	pud_huge_tests(&args);
1387 	pud_populate_tests(&args);
1388 	spin_unlock(ptl);
1389 
1390 	spin_lock(&(args.mm->page_table_lock));
1391 	p4d_clear_tests(&args);
1392 	pgd_clear_tests(&args);
1393 	p4d_populate_tests(&args);
1394 	pgd_populate_tests(&args);
1395 	spin_unlock(&(args.mm->page_table_lock));
1396 
1397 	destroy_args(&args);
1398 	return 0;
1399 }
1400 late_initcall(debug_vm_pgtable);
1401