xref: /linux/mm/debug_vm_pgtable.c (revision c5e67d40a10234541e220750297304df79aaedd0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This kernel test validates architecture page table helpers and
4  * accessors and helps in verifying their continued compliance with
5  * expected generic MM semantics.
6  *
7  * Copyright (C) 2019 ARM Ltd.
8  *
9  * Author: Anshuman Khandual <anshuman.khandual@arm.com>
10  */
11 #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
12 
13 #include <linux/gfp.h>
14 #include <linux/highmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/kernel.h>
17 #include <linux/kconfig.h>
18 #include <linux/memblock.h>
19 #include <linux/mm.h>
20 #include <linux/mman.h>
21 #include <linux/mm_types.h>
22 #include <linux/module.h>
23 #include <linux/printk.h>
24 #include <linux/pgtable.h>
25 #include <linux/random.h>
26 #include <linux/spinlock.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/start_kernel.h>
30 #include <linux/sched/mm.h>
31 #include <linux/io.h>
32 #include <linux/vmalloc.h>
33 
34 #include <asm/cacheflush.h>
35 #include <asm/pgalloc.h>
36 #include <asm/tlbflush.h>
37 
38 /*
39  * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics
40  * expectations that are being validated here. All future changes in here
41  * or the documentation need to be in sync.
42  */
43 #define RANDOM_NZVALUE	GENMASK(7, 0)
44 
45 struct pgtable_debug_args {
46 	struct mm_struct	*mm;
47 	struct vm_area_struct	*vma;
48 
49 	pgd_t			*pgdp;
50 	p4d_t			*p4dp;
51 	pud_t			*pudp;
52 	pmd_t			*pmdp;
53 	pte_t			*ptep;
54 
55 	p4d_t			*start_p4dp;
56 	pud_t			*start_pudp;
57 	pmd_t			*start_pmdp;
58 	pgtable_t		start_ptep;
59 
60 	unsigned long		vaddr;
61 	pgprot_t		page_prot;
62 	pgprot_t		page_prot_none;
63 
64 	bool			is_contiguous_page;
65 	unsigned long		pud_pfn;
66 	unsigned long		pmd_pfn;
67 	unsigned long		pte_pfn;
68 
69 	unsigned long		fixed_alignment;
70 	unsigned long		fixed_pgd_pfn;
71 	unsigned long		fixed_p4d_pfn;
72 	unsigned long		fixed_pud_pfn;
73 	unsigned long		fixed_pmd_pfn;
74 	unsigned long		fixed_pte_pfn;
75 };
76 
77 static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx)
78 {
79 	pgprot_t prot = vm_get_page_prot(idx);
80 	pte_t pte = pfn_pte(args->fixed_pte_pfn, prot);
81 	unsigned long val = idx, *ptr = &val;
82 
83 	pr_debug("Validating PTE basic (%pGv)\n", ptr);
84 
85 	/*
86 	 * This test needs to be executed after the given page table entry
87 	 * is created with pfn_pte() to make sure that vm_get_page_prot(idx)
88 	 * does not have the dirty bit enabled from the beginning. This is
89 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
90 	 * dirty bit being set.
91 	 */
92 	WARN_ON(pte_dirty(pte_wrprotect(pte)));
93 
94 	WARN_ON(!pte_same(pte, pte));
95 	WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
96 	WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
97 	WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte), args->vma)));
98 	WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
99 	WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
100 	WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte, args->vma))));
101 	WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
102 	WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
103 }
104 
105 static void __init pte_advanced_tests(struct pgtable_debug_args *args)
106 {
107 	struct page *page;
108 	pte_t pte;
109 
110 	/*
111 	 * Architectures optimize set_pte_at by avoiding TLB flush.
112 	 * This requires set_pte_at to be not used to update an
113 	 * existing pte entry. Clear pte before we do set_pte_at
114 	 *
115 	 * flush_dcache_page() is called after set_pte_at() to clear
116 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
117 	 * when it's released and page allocation check will fail when
118 	 * the page is allocated again. For architectures other than ARM64,
119 	 * the unexpected overhead of cache flushing is acceptable.
120 	 */
121 	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
122 	if (!page)
123 		return;
124 
125 	pr_debug("Validating PTE advanced\n");
126 	if (WARN_ON(!args->ptep))
127 		return;
128 
129 	pte = pfn_pte(args->pte_pfn, args->page_prot);
130 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
131 	flush_dcache_page(page);
132 	ptep_set_wrprotect(args->mm, args->vaddr, args->ptep);
133 	pte = ptep_get(args->ptep);
134 	WARN_ON(pte_write(pte));
135 	ptep_get_and_clear(args->mm, args->vaddr, args->ptep);
136 	pte = ptep_get(args->ptep);
137 	WARN_ON(!pte_none(pte));
138 
139 	pte = pfn_pte(args->pte_pfn, args->page_prot);
140 	pte = pte_wrprotect(pte);
141 	pte = pte_mkclean(pte);
142 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
143 	flush_dcache_page(page);
144 	pte = pte_mkwrite(pte, args->vma);
145 	pte = pte_mkdirty(pte);
146 	ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1);
147 	pte = ptep_get(args->ptep);
148 	WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
149 	ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
150 	pte = ptep_get(args->ptep);
151 	WARN_ON(!pte_none(pte));
152 
153 	pte = pfn_pte(args->pte_pfn, args->page_prot);
154 	pte = pte_mkyoung(pte);
155 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
156 	flush_dcache_page(page);
157 	ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep);
158 	pte = ptep_get(args->ptep);
159 	WARN_ON(pte_young(pte));
160 
161 	ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
162 }
163 
164 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
165 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx)
166 {
167 	pgprot_t prot = vm_get_page_prot(idx);
168 	unsigned long val = idx, *ptr = &val;
169 	pmd_t pmd;
170 
171 	if (!has_transparent_hugepage())
172 		return;
173 
174 	pr_debug("Validating PMD basic (%pGv)\n", ptr);
175 	pmd = pfn_pmd(args->fixed_pmd_pfn, prot);
176 
177 	/*
178 	 * This test needs to be executed after the given page table entry
179 	 * is created with pfn_pmd() to make sure that vm_get_page_prot(idx)
180 	 * does not have the dirty bit enabled from the beginning. This is
181 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
182 	 * dirty bit being set.
183 	 */
184 	WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
185 
186 
187 	WARN_ON(!pmd_same(pmd, pmd));
188 	WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
189 	WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
190 	WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd), args->vma)));
191 	WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
192 	WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
193 	WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd, args->vma))));
194 	WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
195 	WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
196 	/*
197 	 * A huge page does not point to next level page table
198 	 * entry. Hence this must qualify as pmd_bad().
199 	 */
200 	WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
201 }
202 
203 static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
204 {
205 	struct page *page;
206 	pmd_t pmd;
207 	unsigned long vaddr = args->vaddr;
208 
209 	if (!has_transparent_hugepage())
210 		return;
211 
212 	page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL;
213 	if (!page)
214 		return;
215 
216 	/*
217 	 * flush_dcache_page() is called after set_pmd_at() to clear
218 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
219 	 * when it's released and page allocation check will fail when
220 	 * the page is allocated again. For architectures other than ARM64,
221 	 * the unexpected overhead of cache flushing is acceptable.
222 	 */
223 	pr_debug("Validating PMD advanced\n");
224 	/* Align the address wrt HPAGE_PMD_SIZE */
225 	vaddr &= HPAGE_PMD_MASK;
226 
227 	pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep);
228 
229 	pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
230 	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
231 	flush_dcache_page(page);
232 	pmdp_set_wrprotect(args->mm, vaddr, args->pmdp);
233 	pmd = pmdp_get(args->pmdp);
234 	WARN_ON(pmd_write(pmd));
235 	pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
236 	pmd = pmdp_get(args->pmdp);
237 	WARN_ON(!pmd_none(pmd));
238 
239 	pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
240 	pmd = pmd_wrprotect(pmd);
241 	pmd = pmd_mkclean(pmd);
242 	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
243 	flush_dcache_page(page);
244 	pmd = pmd_mkwrite(pmd, args->vma);
245 	pmd = pmd_mkdirty(pmd);
246 	pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1);
247 	pmd = pmdp_get(args->pmdp);
248 	WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
249 	pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1);
250 	pmd = pmdp_get(args->pmdp);
251 	WARN_ON(!pmd_none(pmd));
252 
253 	pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot));
254 	pmd = pmd_mkyoung(pmd);
255 	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
256 	flush_dcache_page(page);
257 	pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp);
258 	pmd = pmdp_get(args->pmdp);
259 	WARN_ON(pmd_young(pmd));
260 
261 	/*  Clear the pte entries  */
262 	pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
263 	pgtable_trans_huge_withdraw(args->mm, args->pmdp);
264 }
265 
266 static void __init pmd_leaf_tests(struct pgtable_debug_args *args)
267 {
268 	pmd_t pmd;
269 
270 	if (!has_transparent_hugepage())
271 		return;
272 
273 	pr_debug("Validating PMD leaf\n");
274 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
275 
276 	/*
277 	 * PMD based THP is a leaf entry.
278 	 */
279 	pmd = pmd_mkhuge(pmd);
280 	WARN_ON(!pmd_leaf(pmd));
281 }
282 
283 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
284 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx)
285 {
286 	pgprot_t prot = vm_get_page_prot(idx);
287 	unsigned long val = idx, *ptr = &val;
288 	pud_t pud;
289 
290 	if (!has_transparent_pud_hugepage())
291 		return;
292 
293 	pr_debug("Validating PUD basic (%pGv)\n", ptr);
294 	pud = pfn_pud(args->fixed_pud_pfn, prot);
295 
296 	/*
297 	 * This test needs to be executed after the given page table entry
298 	 * is created with pfn_pud() to make sure that vm_get_page_prot(idx)
299 	 * does not have the dirty bit enabled from the beginning. This is
300 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
301 	 * dirty bit being set.
302 	 */
303 	WARN_ON(pud_dirty(pud_wrprotect(pud)));
304 
305 	WARN_ON(!pud_same(pud, pud));
306 	WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
307 	WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
308 	WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
309 	WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
310 	WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
311 	WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
312 	WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
313 	WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
314 
315 	if (mm_pmd_folded(args->mm))
316 		return;
317 
318 	/*
319 	 * A huge page does not point to next level page table
320 	 * entry. Hence this must qualify as pud_bad().
321 	 */
322 	WARN_ON(!pud_bad(pud_mkhuge(pud)));
323 }
324 
325 static void __init pud_advanced_tests(struct pgtable_debug_args *args)
326 {
327 	struct page *page;
328 	unsigned long vaddr = args->vaddr;
329 	pud_t pud;
330 
331 	if (!has_transparent_pud_hugepage())
332 		return;
333 
334 	page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL;
335 	if (!page)
336 		return;
337 
338 	/*
339 	 * flush_dcache_page() is called after set_pud_at() to clear
340 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
341 	 * when it's released and page allocation check will fail when
342 	 * the page is allocated again. For architectures other than ARM64,
343 	 * the unexpected overhead of cache flushing is acceptable.
344 	 */
345 	pr_debug("Validating PUD advanced\n");
346 	/* Align the address wrt HPAGE_PUD_SIZE */
347 	vaddr &= HPAGE_PUD_MASK;
348 
349 	pud = pfn_pud(args->pud_pfn, args->page_prot);
350 	set_pud_at(args->mm, vaddr, args->pudp, pud);
351 	flush_dcache_page(page);
352 	pudp_set_wrprotect(args->mm, vaddr, args->pudp);
353 	pud = pudp_get(args->pudp);
354 	WARN_ON(pud_write(pud));
355 
356 #ifndef __PAGETABLE_PMD_FOLDED
357 	pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
358 	pud = pudp_get(args->pudp);
359 	WARN_ON(!pud_none(pud));
360 #endif /* __PAGETABLE_PMD_FOLDED */
361 	pud = pfn_pud(args->pud_pfn, args->page_prot);
362 	pud = pud_wrprotect(pud);
363 	pud = pud_mkclean(pud);
364 	set_pud_at(args->mm, vaddr, args->pudp, pud);
365 	flush_dcache_page(page);
366 	pud = pud_mkwrite(pud);
367 	pud = pud_mkdirty(pud);
368 	pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1);
369 	pud = pudp_get(args->pudp);
370 	WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
371 
372 #ifndef __PAGETABLE_PMD_FOLDED
373 	pudp_huge_get_and_clear_full(args->vma, vaddr, args->pudp, 1);
374 	pud = pudp_get(args->pudp);
375 	WARN_ON(!pud_none(pud));
376 #endif /* __PAGETABLE_PMD_FOLDED */
377 
378 	pud = pfn_pud(args->pud_pfn, args->page_prot);
379 	pud = pud_mkyoung(pud);
380 	set_pud_at(args->mm, vaddr, args->pudp, pud);
381 	flush_dcache_page(page);
382 	pudp_test_and_clear_young(args->vma, vaddr, args->pudp);
383 	pud = pudp_get(args->pudp);
384 	WARN_ON(pud_young(pud));
385 
386 	pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
387 }
388 
389 static void __init pud_leaf_tests(struct pgtable_debug_args *args)
390 {
391 	pud_t pud;
392 
393 	if (!has_transparent_pud_hugepage())
394 		return;
395 
396 	pr_debug("Validating PUD leaf\n");
397 	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
398 	/*
399 	 * PUD based THP is a leaf entry.
400 	 */
401 	pud = pud_mkhuge(pud);
402 	WARN_ON(!pud_leaf(pud));
403 }
404 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
405 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
406 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
407 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
408 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
409 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
410 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { }
411 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
412 static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { }
413 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
414 static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { }
415 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
416 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
417 
418 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
419 static void __init pmd_huge_tests(struct pgtable_debug_args *args)
420 {
421 	pmd_t pmd;
422 
423 	if (!arch_vmap_pmd_supported(args->page_prot) ||
424 	    args->fixed_alignment < PMD_SIZE)
425 		return;
426 
427 	pr_debug("Validating PMD huge\n");
428 	/*
429 	 * X86 defined pmd_set_huge() verifies that the given
430 	 * PMD is not a populated non-leaf entry.
431 	 */
432 	WRITE_ONCE(*args->pmdp, __pmd(0));
433 	WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot));
434 	WARN_ON(!pmd_clear_huge(args->pmdp));
435 	pmd = pmdp_get(args->pmdp);
436 	WARN_ON(!pmd_none(pmd));
437 }
438 
439 static void __init pud_huge_tests(struct pgtable_debug_args *args)
440 {
441 	pud_t pud;
442 
443 	if (!arch_vmap_pud_supported(args->page_prot) ||
444 	    args->fixed_alignment < PUD_SIZE)
445 		return;
446 
447 	pr_debug("Validating PUD huge\n");
448 	/*
449 	 * X86 defined pud_set_huge() verifies that the given
450 	 * PUD is not a populated non-leaf entry.
451 	 */
452 	WRITE_ONCE(*args->pudp, __pud(0));
453 	WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot));
454 	WARN_ON(!pud_clear_huge(args->pudp));
455 	pud = pudp_get(args->pudp);
456 	WARN_ON(!pud_none(pud));
457 }
458 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
459 static void __init pmd_huge_tests(struct pgtable_debug_args *args) { }
460 static void __init pud_huge_tests(struct pgtable_debug_args *args) { }
461 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
462 
463 static void __init p4d_basic_tests(struct pgtable_debug_args *args)
464 {
465 	p4d_t p4d;
466 
467 	pr_debug("Validating P4D basic\n");
468 	memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
469 	WARN_ON(!p4d_same(p4d, p4d));
470 }
471 
472 static void __init pgd_basic_tests(struct pgtable_debug_args *args)
473 {
474 	pgd_t pgd;
475 
476 	pr_debug("Validating PGD basic\n");
477 	memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
478 	WARN_ON(!pgd_same(pgd, pgd));
479 }
480 
481 #ifndef __PAGETABLE_PUD_FOLDED
482 static void __init pud_clear_tests(struct pgtable_debug_args *args)
483 {
484 	pud_t pud = pudp_get(args->pudp);
485 
486 	if (mm_pmd_folded(args->mm))
487 		return;
488 
489 	pr_debug("Validating PUD clear\n");
490 	WARN_ON(pud_none(pud));
491 	pud_clear(args->pudp);
492 	pud = pudp_get(args->pudp);
493 	WARN_ON(!pud_none(pud));
494 }
495 
496 static void __init pud_populate_tests(struct pgtable_debug_args *args)
497 {
498 	pud_t pud;
499 
500 	if (mm_pmd_folded(args->mm))
501 		return;
502 
503 	pr_debug("Validating PUD populate\n");
504 	/*
505 	 * This entry points to next level page table page.
506 	 * Hence this must not qualify as pud_bad().
507 	 */
508 	pud_populate(args->mm, args->pudp, args->start_pmdp);
509 	pud = pudp_get(args->pudp);
510 	WARN_ON(pud_bad(pud));
511 }
512 #else  /* !__PAGETABLE_PUD_FOLDED */
513 static void __init pud_clear_tests(struct pgtable_debug_args *args) { }
514 static void __init pud_populate_tests(struct pgtable_debug_args *args) { }
515 #endif /* PAGETABLE_PUD_FOLDED */
516 
517 #ifndef __PAGETABLE_P4D_FOLDED
518 static void __init p4d_clear_tests(struct pgtable_debug_args *args)
519 {
520 	p4d_t p4d = p4dp_get(args->p4dp);
521 
522 	if (mm_pud_folded(args->mm))
523 		return;
524 
525 	pr_debug("Validating P4D clear\n");
526 	WARN_ON(p4d_none(p4d));
527 	p4d_clear(args->p4dp);
528 	p4d = p4dp_get(args->p4dp);
529 	WARN_ON(!p4d_none(p4d));
530 }
531 
532 static void __init p4d_populate_tests(struct pgtable_debug_args *args)
533 {
534 	p4d_t p4d;
535 
536 	if (mm_pud_folded(args->mm))
537 		return;
538 
539 	pr_debug("Validating P4D populate\n");
540 	/*
541 	 * This entry points to next level page table page.
542 	 * Hence this must not qualify as p4d_bad().
543 	 */
544 	pud_clear(args->pudp);
545 	p4d_clear(args->p4dp);
546 	p4d_populate(args->mm, args->p4dp, args->start_pudp);
547 	p4d = p4dp_get(args->p4dp);
548 	WARN_ON(p4d_bad(p4d));
549 }
550 
551 static void __init pgd_clear_tests(struct pgtable_debug_args *args)
552 {
553 	pgd_t pgd = pgdp_get(args->pgdp);
554 
555 	if (mm_p4d_folded(args->mm))
556 		return;
557 
558 	pr_debug("Validating PGD clear\n");
559 	WARN_ON(pgd_none(pgd));
560 	pgd_clear(args->pgdp);
561 	pgd = pgdp_get(args->pgdp);
562 	WARN_ON(!pgd_none(pgd));
563 }
564 
565 static void __init pgd_populate_tests(struct pgtable_debug_args *args)
566 {
567 	pgd_t pgd;
568 
569 	if (mm_p4d_folded(args->mm))
570 		return;
571 
572 	pr_debug("Validating PGD populate\n");
573 	/*
574 	 * This entry points to next level page table page.
575 	 * Hence this must not qualify as pgd_bad().
576 	 */
577 	p4d_clear(args->p4dp);
578 	pgd_clear(args->pgdp);
579 	pgd_populate(args->mm, args->pgdp, args->start_p4dp);
580 	pgd = pgdp_get(args->pgdp);
581 	WARN_ON(pgd_bad(pgd));
582 }
583 #else  /* !__PAGETABLE_P4D_FOLDED */
584 static void __init p4d_clear_tests(struct pgtable_debug_args *args) { }
585 static void __init pgd_clear_tests(struct pgtable_debug_args *args) { }
586 static void __init p4d_populate_tests(struct pgtable_debug_args *args) { }
587 static void __init pgd_populate_tests(struct pgtable_debug_args *args) { }
588 #endif /* PAGETABLE_P4D_FOLDED */
589 
590 static void __init pte_clear_tests(struct pgtable_debug_args *args)
591 {
592 	struct page *page;
593 	pte_t pte = pfn_pte(args->pte_pfn, args->page_prot);
594 
595 	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
596 	if (!page)
597 		return;
598 
599 	/*
600 	 * flush_dcache_page() is called after set_pte_at() to clear
601 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
602 	 * when it's released and page allocation check will fail when
603 	 * the page is allocated again. For architectures other than ARM64,
604 	 * the unexpected overhead of cache flushing is acceptable.
605 	 */
606 	pr_debug("Validating PTE clear\n");
607 	if (WARN_ON(!args->ptep))
608 		return;
609 
610 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
611 	WARN_ON(pte_none(pte));
612 	flush_dcache_page(page);
613 	barrier();
614 	ptep_clear(args->mm, args->vaddr, args->ptep);
615 	pte = ptep_get(args->ptep);
616 	WARN_ON(!pte_none(pte));
617 }
618 
619 static void __init pmd_clear_tests(struct pgtable_debug_args *args)
620 {
621 	pmd_t pmd = pmdp_get(args->pmdp);
622 
623 	pr_debug("Validating PMD clear\n");
624 	WARN_ON(pmd_none(pmd));
625 	pmd_clear(args->pmdp);
626 	pmd = pmdp_get(args->pmdp);
627 	WARN_ON(!pmd_none(pmd));
628 }
629 
630 static void __init pmd_populate_tests(struct pgtable_debug_args *args)
631 {
632 	pmd_t pmd;
633 
634 	pr_debug("Validating PMD populate\n");
635 	/*
636 	 * This entry points to next level page table page.
637 	 * Hence this must not qualify as pmd_bad().
638 	 */
639 	pmd_populate(args->mm, args->pmdp, args->start_ptep);
640 	pmd = pmdp_get(args->pmdp);
641 	WARN_ON(pmd_bad(pmd));
642 }
643 
644 static void __init pte_special_tests(struct pgtable_debug_args *args)
645 {
646 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
647 
648 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
649 		return;
650 
651 	pr_debug("Validating PTE special\n");
652 	WARN_ON(!pte_special(pte_mkspecial(pte)));
653 }
654 
655 static void __init pte_protnone_tests(struct pgtable_debug_args *args)
656 {
657 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none);
658 
659 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
660 		return;
661 
662 	pr_debug("Validating PTE protnone\n");
663 	WARN_ON(!pte_protnone(pte));
664 	WARN_ON(!pte_present(pte));
665 }
666 
667 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
668 static void __init pmd_protnone_tests(struct pgtable_debug_args *args)
669 {
670 	pmd_t pmd;
671 
672 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
673 		return;
674 
675 	if (!has_transparent_hugepage())
676 		return;
677 
678 	pr_debug("Validating PMD protnone\n");
679 	pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none));
680 	WARN_ON(!pmd_protnone(pmd));
681 	WARN_ON(!pmd_present(pmd));
682 }
683 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
684 static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { }
685 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
686 
687 static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args)
688 {
689 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
690 
691 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
692 		return;
693 
694 	pr_debug("Validating PTE soft dirty\n");
695 	WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
696 	WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
697 }
698 
699 static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args)
700 {
701 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
702 
703 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
704 		return;
705 
706 	pr_debug("Validating PTE swap soft dirty\n");
707 	WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
708 	WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
709 }
710 
711 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
712 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args)
713 {
714 	pmd_t pmd;
715 
716 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
717 		return;
718 
719 	if (!has_transparent_hugepage())
720 		return;
721 
722 	pr_debug("Validating PMD soft dirty\n");
723 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
724 	WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
725 	WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
726 }
727 
728 static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args)
729 {
730 	pmd_t pmd;
731 
732 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
733 		!IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
734 		return;
735 
736 	if (!has_transparent_hugepage())
737 		return;
738 
739 	pr_debug("Validating PMD swap soft dirty\n");
740 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
741 	WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
742 	WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
743 }
744 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
745 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { }
746 static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { }
747 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
748 
749 static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args)
750 {
751 	unsigned long max_swap_offset;
752 	swp_entry_t entry, entry2;
753 	pte_t pte;
754 
755 	pr_debug("Validating PTE swap exclusive\n");
756 
757 	/* See generic_max_swapfile_size(): probe the maximum offset */
758 	max_swap_offset = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0, ~0UL))));
759 
760 	/* Create a swp entry with all possible bits set */
761 	entry = swp_entry((1 << MAX_SWAPFILES_SHIFT) - 1, max_swap_offset);
762 
763 	pte = swp_entry_to_pte(entry);
764 	WARN_ON(pte_swp_exclusive(pte));
765 	WARN_ON(!is_swap_pte(pte));
766 	entry2 = pte_to_swp_entry(pte);
767 	WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
768 
769 	pte = pte_swp_mkexclusive(pte);
770 	WARN_ON(!pte_swp_exclusive(pte));
771 	WARN_ON(!is_swap_pte(pte));
772 	WARN_ON(pte_swp_soft_dirty(pte));
773 	entry2 = pte_to_swp_entry(pte);
774 	WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
775 
776 	pte = pte_swp_clear_exclusive(pte);
777 	WARN_ON(pte_swp_exclusive(pte));
778 	WARN_ON(!is_swap_pte(pte));
779 	entry2 = pte_to_swp_entry(pte);
780 	WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
781 }
782 
783 static void __init pte_swap_tests(struct pgtable_debug_args *args)
784 {
785 	swp_entry_t swp;
786 	pte_t pte;
787 
788 	pr_debug("Validating PTE swap\n");
789 	pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
790 	swp = __pte_to_swp_entry(pte);
791 	pte = __swp_entry_to_pte(swp);
792 	WARN_ON(args->fixed_pte_pfn != pte_pfn(pte));
793 }
794 
795 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
796 static void __init pmd_swap_tests(struct pgtable_debug_args *args)
797 {
798 	swp_entry_t swp;
799 	pmd_t pmd;
800 
801 	if (!has_transparent_hugepage())
802 		return;
803 
804 	pr_debug("Validating PMD swap\n");
805 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
806 	swp = __pmd_to_swp_entry(pmd);
807 	pmd = __swp_entry_to_pmd(swp);
808 	WARN_ON(args->fixed_pmd_pfn != pmd_pfn(pmd));
809 }
810 #else  /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
811 static void __init pmd_swap_tests(struct pgtable_debug_args *args) { }
812 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
813 
814 static void __init swap_migration_tests(struct pgtable_debug_args *args)
815 {
816 	struct page *page;
817 	swp_entry_t swp;
818 
819 	if (!IS_ENABLED(CONFIG_MIGRATION))
820 		return;
821 
822 	/*
823 	 * swap_migration_tests() requires a dedicated page as it needs to
824 	 * be locked before creating a migration entry from it. Locking the
825 	 * page that actually maps kernel text ('start_kernel') can be real
826 	 * problematic. Lets use the allocated page explicitly for this
827 	 * purpose.
828 	 */
829 	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
830 	if (!page)
831 		return;
832 
833 	pr_debug("Validating swap migration\n");
834 
835 	/*
836 	 * make_[readable|writable]_migration_entry() expects given page to
837 	 * be locked, otherwise it stumbles upon a BUG_ON().
838 	 */
839 	__SetPageLocked(page);
840 	swp = make_writable_migration_entry(page_to_pfn(page));
841 	WARN_ON(!is_migration_entry(swp));
842 	WARN_ON(!is_writable_migration_entry(swp));
843 
844 	swp = make_readable_migration_entry(swp_offset(swp));
845 	WARN_ON(!is_migration_entry(swp));
846 	WARN_ON(is_writable_migration_entry(swp));
847 
848 	swp = make_readable_migration_entry(page_to_pfn(page));
849 	WARN_ON(!is_migration_entry(swp));
850 	WARN_ON(is_writable_migration_entry(swp));
851 	__ClearPageLocked(page);
852 }
853 
854 #ifdef CONFIG_HUGETLB_PAGE
855 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args)
856 {
857 	pte_t pte;
858 
859 	pr_debug("Validating HugeTLB basic\n");
860 	pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot);
861 	pte = arch_make_huge_pte(pte, PMD_SHIFT, VM_ACCESS_FLAGS);
862 
863 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
864 	WARN_ON(!pte_huge(pte));
865 #endif
866 	WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
867 	WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
868 	WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
869 }
870 #else  /* !CONFIG_HUGETLB_PAGE */
871 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { }
872 #endif /* CONFIG_HUGETLB_PAGE */
873 
874 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
875 static void __init pmd_thp_tests(struct pgtable_debug_args *args)
876 {
877 	pmd_t pmd;
878 
879 	if (!has_transparent_hugepage())
880 		return;
881 
882 	pr_debug("Validating PMD based THP\n");
883 	/*
884 	 * pmd_trans_huge() and pmd_present() must return positive after
885 	 * MMU invalidation with pmd_mkinvalid(). This behavior is an
886 	 * optimization for transparent huge page. pmd_trans_huge() must
887 	 * be true if pmd_page() returns a valid THP to avoid taking the
888 	 * pmd_lock when others walk over non transhuge pmds (i.e. there
889 	 * are no THP allocated). Especially when splitting a THP and
890 	 * removing the present bit from the pmd, pmd_trans_huge() still
891 	 * needs to return true. pmd_present() should be true whenever
892 	 * pmd_trans_huge() returns true.
893 	 */
894 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
895 	WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
896 
897 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
898 	WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
899 	WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
900 	WARN_ON(!pmd_leaf(pmd_mkinvalid(pmd_mkhuge(pmd))));
901 #endif /* __HAVE_ARCH_PMDP_INVALIDATE */
902 }
903 
904 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
905 static void __init pud_thp_tests(struct pgtable_debug_args *args)
906 {
907 	pud_t pud;
908 
909 	if (!has_transparent_pud_hugepage())
910 		return;
911 
912 	pr_debug("Validating PUD based THP\n");
913 	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
914 	WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
915 
916 	/*
917 	 * pud_mkinvalid() has been dropped for now. Enable back
918 	 * these tests when it comes back with a modified pud_present().
919 	 *
920 	 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
921 	 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
922 	 */
923 }
924 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
925 static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
926 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
927 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
928 static void __init pmd_thp_tests(struct pgtable_debug_args *args) { }
929 static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
930 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
931 
932 static unsigned long __init get_random_vaddr(void)
933 {
934 	unsigned long random_vaddr, random_pages, total_user_pages;
935 
936 	total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
937 
938 	random_pages = get_random_long() % total_user_pages;
939 	random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
940 
941 	return random_vaddr;
942 }
943 
944 static void __init destroy_args(struct pgtable_debug_args *args)
945 {
946 	struct page *page = NULL;
947 
948 	/* Free (huge) page */
949 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
950 	    has_transparent_pud_hugepage() &&
951 	    args->pud_pfn != ULONG_MAX) {
952 		if (args->is_contiguous_page) {
953 			free_contig_range(args->pud_pfn,
954 					  (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT)));
955 		} else {
956 			page = pfn_to_page(args->pud_pfn);
957 			__free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT);
958 		}
959 
960 		args->pud_pfn = ULONG_MAX;
961 		args->pmd_pfn = ULONG_MAX;
962 		args->pte_pfn = ULONG_MAX;
963 	}
964 
965 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
966 	    has_transparent_hugepage() &&
967 	    args->pmd_pfn != ULONG_MAX) {
968 		if (args->is_contiguous_page) {
969 			free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER));
970 		} else {
971 			page = pfn_to_page(args->pmd_pfn);
972 			__free_pages(page, HPAGE_PMD_ORDER);
973 		}
974 
975 		args->pmd_pfn = ULONG_MAX;
976 		args->pte_pfn = ULONG_MAX;
977 	}
978 
979 	if (args->pte_pfn != ULONG_MAX) {
980 		page = pfn_to_page(args->pte_pfn);
981 		__free_page(page);
982 
983 		args->pte_pfn = ULONG_MAX;
984 	}
985 
986 	/* Free page table entries */
987 	if (args->start_ptep) {
988 		pte_free(args->mm, args->start_ptep);
989 		mm_dec_nr_ptes(args->mm);
990 	}
991 
992 	if (args->start_pmdp) {
993 		pmd_free(args->mm, args->start_pmdp);
994 		mm_dec_nr_pmds(args->mm);
995 	}
996 
997 	if (args->start_pudp) {
998 		pud_free(args->mm, args->start_pudp);
999 		mm_dec_nr_puds(args->mm);
1000 	}
1001 
1002 	if (args->start_p4dp)
1003 		p4d_free(args->mm, args->start_p4dp);
1004 
1005 	/* Free vma and mm struct */
1006 	if (args->vma)
1007 		vm_area_free(args->vma);
1008 
1009 	if (args->mm)
1010 		mmdrop(args->mm);
1011 }
1012 
1013 static struct page * __init
1014 debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
1015 {
1016 	struct page *page = NULL;
1017 
1018 #ifdef CONFIG_CONTIG_ALLOC
1019 	if (order > MAX_PAGE_ORDER) {
1020 		page = alloc_contig_pages((1 << order), GFP_KERNEL,
1021 					  first_online_node, NULL);
1022 		if (page) {
1023 			args->is_contiguous_page = true;
1024 			return page;
1025 		}
1026 	}
1027 #endif
1028 
1029 	if (order <= MAX_PAGE_ORDER)
1030 		page = alloc_pages(GFP_KERNEL, order);
1031 
1032 	return page;
1033 }
1034 
1035 /*
1036  * Check if a physical memory range described by <pstart, pend> contains
1037  * an area that is of size psize, and aligned to psize.
1038  *
1039  * Don't use address 0, an all-zeroes physical address might mask bugs, and
1040  * it's not used on x86.
1041  */
1042 static void  __init phys_align_check(phys_addr_t pstart,
1043 				     phys_addr_t pend, unsigned long psize,
1044 				     phys_addr_t *physp, unsigned long *alignp)
1045 {
1046 	phys_addr_t aligned_start, aligned_end;
1047 
1048 	if (pstart == 0)
1049 		pstart = PAGE_SIZE;
1050 
1051 	aligned_start = ALIGN(pstart, psize);
1052 	aligned_end = aligned_start + psize;
1053 
1054 	if (aligned_end > aligned_start && aligned_end <= pend) {
1055 		*alignp = psize;
1056 		*physp = aligned_start;
1057 	}
1058 }
1059 
1060 static void __init init_fixed_pfns(struct pgtable_debug_args *args)
1061 {
1062 	u64 idx;
1063 	phys_addr_t phys, pstart, pend;
1064 
1065 	/*
1066 	 * Initialize the fixed pfns. To do this, try to find a
1067 	 * valid physical range, preferably aligned to PUD_SIZE,
1068 	 * but settling for aligned to PMD_SIZE as a fallback. If
1069 	 * neither of those is found, use the physical address of
1070 	 * the start_kernel symbol.
1071 	 *
1072 	 * The memory doesn't need to be allocated, it just needs to exist
1073 	 * as usable memory. It won't be touched.
1074 	 *
1075 	 * The alignment is recorded, and can be checked to see if we
1076 	 * can run the tests that require an actual valid physical
1077 	 * address range on some architectures ({pmd,pud}_huge_test
1078 	 * on x86).
1079 	 */
1080 
1081 	phys = __pa_symbol(&start_kernel);
1082 	args->fixed_alignment = PAGE_SIZE;
1083 
1084 	for_each_mem_range(idx, &pstart, &pend) {
1085 		/* First check for a PUD-aligned area */
1086 		phys_align_check(pstart, pend, PUD_SIZE, &phys,
1087 				 &args->fixed_alignment);
1088 
1089 		/* If a PUD-aligned area is found, we're done */
1090 		if (args->fixed_alignment == PUD_SIZE)
1091 			break;
1092 
1093 		/*
1094 		 * If no PMD-aligned area found yet, check for one,
1095 		 * but continue the loop to look for a PUD-aligned area.
1096 		 */
1097 		if (args->fixed_alignment < PMD_SIZE)
1098 			phys_align_check(pstart, pend, PMD_SIZE, &phys,
1099 					 &args->fixed_alignment);
1100 	}
1101 
1102 	args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK);
1103 	args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK);
1104 	args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK);
1105 	args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK);
1106 	args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK);
1107 	WARN_ON(!pfn_valid(args->fixed_pte_pfn));
1108 }
1109 
1110 
1111 static int __init init_args(struct pgtable_debug_args *args)
1112 {
1113 	struct page *page = NULL;
1114 	int ret = 0;
1115 
1116 	/*
1117 	 * Initialize the debugging data.
1118 	 *
1119 	 * vm_get_page_prot(VM_NONE) or vm_get_page_prot(VM_SHARED|VM_NONE)
1120 	 * will help create page table entries with PROT_NONE permission as
1121 	 * required for pxx_protnone_tests().
1122 	 */
1123 	memset(args, 0, sizeof(*args));
1124 	args->vaddr              = get_random_vaddr();
1125 	args->page_prot          = vm_get_page_prot(VM_ACCESS_FLAGS);
1126 	args->page_prot_none     = vm_get_page_prot(VM_NONE);
1127 	args->is_contiguous_page = false;
1128 	args->pud_pfn            = ULONG_MAX;
1129 	args->pmd_pfn            = ULONG_MAX;
1130 	args->pte_pfn            = ULONG_MAX;
1131 	args->fixed_pgd_pfn      = ULONG_MAX;
1132 	args->fixed_p4d_pfn      = ULONG_MAX;
1133 	args->fixed_pud_pfn      = ULONG_MAX;
1134 	args->fixed_pmd_pfn      = ULONG_MAX;
1135 	args->fixed_pte_pfn      = ULONG_MAX;
1136 
1137 	/* Allocate mm and vma */
1138 	args->mm = mm_alloc();
1139 	if (!args->mm) {
1140 		pr_err("Failed to allocate mm struct\n");
1141 		ret = -ENOMEM;
1142 		goto error;
1143 	}
1144 
1145 	args->vma = vm_area_alloc(args->mm);
1146 	if (!args->vma) {
1147 		pr_err("Failed to allocate vma\n");
1148 		ret = -ENOMEM;
1149 		goto error;
1150 	}
1151 
1152 	/*
1153 	 * Allocate page table entries. They will be modified in the tests.
1154 	 * Lets save the page table entries so that they can be released
1155 	 * when the tests are completed.
1156 	 */
1157 	args->pgdp = pgd_offset(args->mm, args->vaddr);
1158 	args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr);
1159 	if (!args->p4dp) {
1160 		pr_err("Failed to allocate p4d entries\n");
1161 		ret = -ENOMEM;
1162 		goto error;
1163 	}
1164 	args->start_p4dp = p4d_offset(args->pgdp, 0UL);
1165 	WARN_ON(!args->start_p4dp);
1166 
1167 	args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr);
1168 	if (!args->pudp) {
1169 		pr_err("Failed to allocate pud entries\n");
1170 		ret = -ENOMEM;
1171 		goto error;
1172 	}
1173 	args->start_pudp = pud_offset(args->p4dp, 0UL);
1174 	WARN_ON(!args->start_pudp);
1175 
1176 	args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr);
1177 	if (!args->pmdp) {
1178 		pr_err("Failed to allocate pmd entries\n");
1179 		ret = -ENOMEM;
1180 		goto error;
1181 	}
1182 	args->start_pmdp = pmd_offset(args->pudp, 0UL);
1183 	WARN_ON(!args->start_pmdp);
1184 
1185 	if (pte_alloc(args->mm, args->pmdp)) {
1186 		pr_err("Failed to allocate pte entries\n");
1187 		ret = -ENOMEM;
1188 		goto error;
1189 	}
1190 	args->start_ptep = pmd_pgtable(pmdp_get(args->pmdp));
1191 	WARN_ON(!args->start_ptep);
1192 
1193 	init_fixed_pfns(args);
1194 
1195 	/*
1196 	 * Allocate (huge) pages because some of the tests need to access
1197 	 * the data in the pages. The corresponding tests will be skipped
1198 	 * if we fail to allocate (huge) pages.
1199 	 */
1200 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1201 	    has_transparent_pud_hugepage()) {
1202 		page = debug_vm_pgtable_alloc_huge_page(args,
1203 				HPAGE_PUD_SHIFT - PAGE_SHIFT);
1204 		if (page) {
1205 			args->pud_pfn = page_to_pfn(page);
1206 			args->pmd_pfn = args->pud_pfn;
1207 			args->pte_pfn = args->pud_pfn;
1208 			return 0;
1209 		}
1210 	}
1211 
1212 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1213 	    has_transparent_hugepage()) {
1214 		page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER);
1215 		if (page) {
1216 			args->pmd_pfn = page_to_pfn(page);
1217 			args->pte_pfn = args->pmd_pfn;
1218 			return 0;
1219 		}
1220 	}
1221 
1222 	page = alloc_page(GFP_KERNEL);
1223 	if (page)
1224 		args->pte_pfn = page_to_pfn(page);
1225 
1226 	return 0;
1227 
1228 error:
1229 	destroy_args(args);
1230 	return ret;
1231 }
1232 
1233 static int __init debug_vm_pgtable(void)
1234 {
1235 	struct pgtable_debug_args args;
1236 	spinlock_t *ptl = NULL;
1237 	int idx, ret;
1238 
1239 	pr_info("Validating architecture page table helpers\n");
1240 	ret = init_args(&args);
1241 	if (ret)
1242 		return ret;
1243 
1244 	/*
1245 	 * Iterate over each possible vm_flags to make sure that all
1246 	 * the basic page table transformation validations just hold
1247 	 * true irrespective of the starting protection value for a
1248 	 * given page table entry.
1249 	 *
1250 	 * Protection based vm_flags combinations are always linear
1251 	 * and increasing i.e starting from VM_NONE and going up to
1252 	 * (VM_SHARED | READ | WRITE | EXEC).
1253 	 */
1254 #define VM_FLAGS_START	(VM_NONE)
1255 #define VM_FLAGS_END	(VM_SHARED | VM_EXEC | VM_WRITE | VM_READ)
1256 
1257 	for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) {
1258 		pte_basic_tests(&args, idx);
1259 		pmd_basic_tests(&args, idx);
1260 		pud_basic_tests(&args, idx);
1261 	}
1262 
1263 	/*
1264 	 * Both P4D and PGD level tests are very basic which do not
1265 	 * involve creating page table entries from the protection
1266 	 * value and the given pfn. Hence just keep them out from
1267 	 * the above iteration for now to save some test execution
1268 	 * time.
1269 	 */
1270 	p4d_basic_tests(&args);
1271 	pgd_basic_tests(&args);
1272 
1273 	pmd_leaf_tests(&args);
1274 	pud_leaf_tests(&args);
1275 
1276 	pte_special_tests(&args);
1277 	pte_protnone_tests(&args);
1278 	pmd_protnone_tests(&args);
1279 
1280 	pte_soft_dirty_tests(&args);
1281 	pmd_soft_dirty_tests(&args);
1282 	pte_swap_soft_dirty_tests(&args);
1283 	pmd_swap_soft_dirty_tests(&args);
1284 
1285 	pte_swap_exclusive_tests(&args);
1286 
1287 	pte_swap_tests(&args);
1288 	pmd_swap_tests(&args);
1289 
1290 	swap_migration_tests(&args);
1291 
1292 	pmd_thp_tests(&args);
1293 	pud_thp_tests(&args);
1294 
1295 	hugetlb_basic_tests(&args);
1296 
1297 	/*
1298 	 * Page table modifying tests. They need to hold
1299 	 * proper page table lock.
1300 	 */
1301 
1302 	args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl);
1303 	pte_clear_tests(&args);
1304 	pte_advanced_tests(&args);
1305 	if (args.ptep)
1306 		pte_unmap_unlock(args.ptep, ptl);
1307 
1308 	ptl = pmd_lock(args.mm, args.pmdp);
1309 	pmd_clear_tests(&args);
1310 	pmd_advanced_tests(&args);
1311 	pmd_huge_tests(&args);
1312 	pmd_populate_tests(&args);
1313 	spin_unlock(ptl);
1314 
1315 	ptl = pud_lock(args.mm, args.pudp);
1316 	pud_clear_tests(&args);
1317 	pud_advanced_tests(&args);
1318 	pud_huge_tests(&args);
1319 	pud_populate_tests(&args);
1320 	spin_unlock(ptl);
1321 
1322 	spin_lock(&(args.mm->page_table_lock));
1323 	p4d_clear_tests(&args);
1324 	pgd_clear_tests(&args);
1325 	p4d_populate_tests(&args);
1326 	pgd_populate_tests(&args);
1327 	spin_unlock(&(args.mm->page_table_lock));
1328 
1329 	destroy_args(&args);
1330 	return 0;
1331 }
1332 late_initcall(debug_vm_pgtable);
1333