xref: /linux/mm/debug_vm_pgtable.c (revision 2ec41967189cd65a8f79c760dd1b50c4f56e8ac6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This kernel test validates architecture page table helpers and
4  * accessors and helps in verifying their continued compliance with
5  * expected generic MM semantics.
6  *
7  * Copyright (C) 2019 ARM Ltd.
8  *
9  * Author: Anshuman Khandual <anshuman.khandual@arm.com>
10  */
11 #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
12 
13 #include <linux/gfp.h>
14 #include <linux/highmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/kernel.h>
17 #include <linux/kconfig.h>
18 #include <linux/memblock.h>
19 #include <linux/mm.h>
20 #include <linux/mman.h>
21 #include <linux/mm_types.h>
22 #include <linux/module.h>
23 #include <linux/printk.h>
24 #include <linux/pgtable.h>
25 #include <linux/random.h>
26 #include <linux/spinlock.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/start_kernel.h>
30 #include <linux/sched/mm.h>
31 #include <linux/io.h>
32 #include <linux/vmalloc.h>
33 #include <linux/pgalloc.h>
34 
35 #include <asm/cacheflush.h>
36 #include <asm/tlbflush.h>
37 
38 /*
39  * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics
40  * expectations that are being validated here. All future changes in here
41  * or the documentation need to be in sync.
42  */
43 #define RANDOM_NZVALUE	GENMASK(7, 0)
44 
45 struct pgtable_debug_args {
46 	struct mm_struct	*mm;
47 	struct vm_area_struct	*vma;
48 
49 	pgd_t			*pgdp;
50 	p4d_t			*p4dp;
51 	pud_t			*pudp;
52 	pmd_t			*pmdp;
53 	pte_t			*ptep;
54 
55 	p4d_t			*start_p4dp;
56 	pud_t			*start_pudp;
57 	pmd_t			*start_pmdp;
58 	pgtable_t		start_ptep;
59 
60 	unsigned long		vaddr;
61 	pgprot_t		page_prot;
62 	pgprot_t		page_prot_none;
63 
64 	bool			is_contiguous_page;
65 	unsigned long		pud_pfn;
66 	unsigned long		pmd_pfn;
67 	unsigned long		pte_pfn;
68 
69 	unsigned long		fixed_alignment;
70 	unsigned long		fixed_pgd_pfn;
71 	unsigned long		fixed_p4d_pfn;
72 	unsigned long		fixed_pud_pfn;
73 	unsigned long		fixed_pmd_pfn;
74 	unsigned long		fixed_pte_pfn;
75 
76 	swp_entry_t		swp_entry;
77 };
78 
79 static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx)
80 {
81 	pgprot_t prot = vm_get_page_prot(idx);
82 	pte_t pte = pfn_pte(args->fixed_pte_pfn, prot);
83 	unsigned long val = idx, *ptr = &val;
84 
85 	pr_debug("Validating PTE basic (%pGv)\n", ptr);
86 
87 	/*
88 	 * This test needs to be executed after the given page table entry
89 	 * is created with pfn_pte() to make sure that vm_get_page_prot(idx)
90 	 * does not have the dirty bit enabled from the beginning. This is
91 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
92 	 * dirty bit being set.
93 	 */
94 	WARN_ON(pte_dirty(pte_wrprotect(pte)));
95 
96 	WARN_ON(!pte_same(pte, pte));
97 	WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
98 	WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
99 	WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte), args->vma)));
100 	WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
101 	WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
102 	WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte, args->vma))));
103 	WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
104 	WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
105 
106 	WARN_ON(!pte_dirty(pte_mkwrite_novma(pte_mkdirty(pte))));
107 	WARN_ON(pte_dirty(pte_mkwrite_novma(pte_mkclean(pte))));
108 	WARN_ON(!pte_write(pte_mkdirty(pte_mkwrite_novma(pte))));
109 	WARN_ON(!pte_write(pte_mkwrite_novma(pte_wrprotect(pte))));
110 	WARN_ON(pte_write(pte_wrprotect(pte_mkwrite_novma(pte))));
111 }
112 
113 static void __init pte_advanced_tests(struct pgtable_debug_args *args)
114 {
115 	struct page *page;
116 	pte_t pte;
117 
118 	/*
119 	 * Architectures optimize set_pte_at by avoiding TLB flush.
120 	 * This requires set_pte_at to be not used to update an
121 	 * existing pte entry. Clear pte before we do set_pte_at
122 	 *
123 	 * flush_dcache_page() is called after set_pte_at() to clear
124 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
125 	 * when it's released and page allocation check will fail when
126 	 * the page is allocated again. For architectures other than ARM64,
127 	 * the unexpected overhead of cache flushing is acceptable.
128 	 */
129 	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
130 	if (!page)
131 		return;
132 
133 	pr_debug("Validating PTE advanced\n");
134 	if (WARN_ON(!args->ptep))
135 		return;
136 
137 	pte = pfn_pte(args->pte_pfn, args->page_prot);
138 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
139 	flush_dcache_page(page);
140 	ptep_set_wrprotect(args->mm, args->vaddr, args->ptep);
141 	pte = ptep_get(args->ptep);
142 	WARN_ON(pte_write(pte));
143 	ptep_get_and_clear(args->mm, args->vaddr, args->ptep);
144 	pte = ptep_get(args->ptep);
145 	WARN_ON(!pte_none(pte));
146 
147 	pte = pfn_pte(args->pte_pfn, args->page_prot);
148 	pte = pte_wrprotect(pte);
149 	pte = pte_mkclean(pte);
150 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
151 	flush_dcache_page(page);
152 	pte = pte_mkwrite(pte, args->vma);
153 	pte = pte_mkdirty(pte);
154 	ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1);
155 	pte = ptep_get(args->ptep);
156 	WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
157 	ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
158 	pte = ptep_get(args->ptep);
159 	WARN_ON(!pte_none(pte));
160 
161 	pte = pfn_pte(args->pte_pfn, args->page_prot);
162 	pte = pte_mkyoung(pte);
163 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
164 	flush_dcache_page(page);
165 	ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep);
166 	pte = ptep_get(args->ptep);
167 	WARN_ON(pte_young(pte));
168 
169 	ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
170 }
171 
172 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
173 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx)
174 {
175 	pgprot_t prot = vm_get_page_prot(idx);
176 	unsigned long val = idx, *ptr = &val;
177 	pmd_t pmd;
178 
179 	if (!has_transparent_hugepage())
180 		return;
181 
182 	pr_debug("Validating PMD basic (%pGv)\n", ptr);
183 	pmd = pfn_pmd(args->fixed_pmd_pfn, prot);
184 
185 	/*
186 	 * This test needs to be executed after the given page table entry
187 	 * is created with pfn_pmd() to make sure that vm_get_page_prot(idx)
188 	 * does not have the dirty bit enabled from the beginning. This is
189 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
190 	 * dirty bit being set.
191 	 */
192 	WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
193 
194 
195 	WARN_ON(!pmd_same(pmd, pmd));
196 	WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
197 	WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
198 	WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd), args->vma)));
199 	WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
200 	WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
201 	WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd, args->vma))));
202 	WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
203 	WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
204 
205 	WARN_ON(!pmd_dirty(pmd_mkwrite_novma(pmd_mkdirty(pmd))));
206 	WARN_ON(pmd_dirty(pmd_mkwrite_novma(pmd_mkclean(pmd))));
207 	WARN_ON(!pmd_write(pmd_mkdirty(pmd_mkwrite_novma(pmd))));
208 	WARN_ON(!pmd_write(pmd_mkwrite_novma(pmd_wrprotect(pmd))));
209 	WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite_novma(pmd))));
210 
211 	/*
212 	 * A huge page does not point to next level page table
213 	 * entry. Hence this must qualify as pmd_bad().
214 	 */
215 	WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
216 }
217 
218 static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
219 {
220 	struct page *page;
221 	pmd_t pmd;
222 	unsigned long vaddr = args->vaddr;
223 
224 	if (!has_transparent_hugepage())
225 		return;
226 
227 	page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL;
228 	if (!page)
229 		return;
230 
231 	/*
232 	 * flush_dcache_page() is called after set_pmd_at() to clear
233 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
234 	 * when it's released and page allocation check will fail when
235 	 * the page is allocated again. For architectures other than ARM64,
236 	 * the unexpected overhead of cache flushing is acceptable.
237 	 */
238 	pr_debug("Validating PMD advanced\n");
239 	/* Align the address wrt HPAGE_PMD_SIZE */
240 	vaddr &= HPAGE_PMD_MASK;
241 
242 	pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep);
243 
244 	pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
245 	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
246 	flush_dcache_page(page);
247 	pmdp_set_wrprotect(args->mm, vaddr, args->pmdp);
248 	pmd = pmdp_get(args->pmdp);
249 	WARN_ON(pmd_write(pmd));
250 	pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
251 	pmd = pmdp_get(args->pmdp);
252 	WARN_ON(!pmd_none(pmd));
253 
254 	pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
255 	pmd = pmd_wrprotect(pmd);
256 	pmd = pmd_mkclean(pmd);
257 	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
258 	flush_dcache_page(page);
259 	pmd = pmd_mkwrite(pmd, args->vma);
260 	pmd = pmd_mkdirty(pmd);
261 	pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1);
262 	pmd = pmdp_get(args->pmdp);
263 	WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
264 	pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1);
265 	pmd = pmdp_get(args->pmdp);
266 	WARN_ON(!pmd_none(pmd));
267 
268 	pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot));
269 	pmd = pmd_mkyoung(pmd);
270 	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
271 	flush_dcache_page(page);
272 	pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp);
273 	pmd = pmdp_get(args->pmdp);
274 	WARN_ON(pmd_young(pmd));
275 
276 	/*  Clear the pte entries  */
277 	pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
278 	pgtable_trans_huge_withdraw(args->mm, args->pmdp);
279 }
280 
281 static void __init pmd_leaf_tests(struct pgtable_debug_args *args)
282 {
283 	pmd_t pmd;
284 
285 	if (!has_transparent_hugepage())
286 		return;
287 
288 	pr_debug("Validating PMD leaf\n");
289 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
290 
291 	/*
292 	 * PMD based THP is a leaf entry.
293 	 */
294 	pmd = pmd_mkhuge(pmd);
295 	WARN_ON(!pmd_leaf(pmd));
296 }
297 
298 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
299 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx)
300 {
301 	pgprot_t prot = vm_get_page_prot(idx);
302 	unsigned long val = idx, *ptr = &val;
303 	pud_t pud;
304 
305 	if (!has_transparent_pud_hugepage())
306 		return;
307 
308 	pr_debug("Validating PUD basic (%pGv)\n", ptr);
309 	pud = pfn_pud(args->fixed_pud_pfn, prot);
310 
311 	/*
312 	 * This test needs to be executed after the given page table entry
313 	 * is created with pfn_pud() to make sure that vm_get_page_prot(idx)
314 	 * does not have the dirty bit enabled from the beginning. This is
315 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
316 	 * dirty bit being set.
317 	 */
318 	WARN_ON(pud_dirty(pud_wrprotect(pud)));
319 
320 	WARN_ON(!pud_same(pud, pud));
321 	WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
322 	WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
323 	WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
324 	WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
325 	WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
326 	WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
327 	WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
328 	WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
329 
330 	if (mm_pmd_folded(args->mm))
331 		return;
332 
333 	/*
334 	 * A huge page does not point to next level page table
335 	 * entry. Hence this must qualify as pud_bad().
336 	 */
337 	WARN_ON(!pud_bad(pud_mkhuge(pud)));
338 }
339 
340 static void __init pud_advanced_tests(struct pgtable_debug_args *args)
341 {
342 	struct page *page;
343 	unsigned long vaddr = args->vaddr;
344 	pud_t pud;
345 
346 	if (!has_transparent_pud_hugepage())
347 		return;
348 
349 	page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL;
350 	if (!page)
351 		return;
352 
353 	/*
354 	 * flush_dcache_page() is called after set_pud_at() to clear
355 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
356 	 * when it's released and page allocation check will fail when
357 	 * the page is allocated again. For architectures other than ARM64,
358 	 * the unexpected overhead of cache flushing is acceptable.
359 	 */
360 	pr_debug("Validating PUD advanced\n");
361 	/* Align the address wrt HPAGE_PUD_SIZE */
362 	vaddr &= HPAGE_PUD_MASK;
363 
364 	pud = pfn_pud(args->pud_pfn, args->page_prot);
365 	set_pud_at(args->mm, vaddr, args->pudp, pud);
366 	flush_dcache_page(page);
367 	pudp_set_wrprotect(args->mm, vaddr, args->pudp);
368 	pud = pudp_get(args->pudp);
369 	WARN_ON(pud_write(pud));
370 
371 #ifndef __PAGETABLE_PMD_FOLDED
372 	pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
373 	pud = pudp_get(args->pudp);
374 	WARN_ON(!pud_none(pud));
375 #endif /* __PAGETABLE_PMD_FOLDED */
376 	pud = pfn_pud(args->pud_pfn, args->page_prot);
377 	pud = pud_wrprotect(pud);
378 	pud = pud_mkclean(pud);
379 	set_pud_at(args->mm, vaddr, args->pudp, pud);
380 	flush_dcache_page(page);
381 	pud = pud_mkwrite(pud);
382 	pud = pud_mkdirty(pud);
383 	pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1);
384 	pud = pudp_get(args->pudp);
385 	WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
386 
387 #ifndef __PAGETABLE_PMD_FOLDED
388 	pudp_huge_get_and_clear_full(args->vma, vaddr, args->pudp, 1);
389 	pud = pudp_get(args->pudp);
390 	WARN_ON(!pud_none(pud));
391 #endif /* __PAGETABLE_PMD_FOLDED */
392 
393 	pud = pfn_pud(args->pud_pfn, args->page_prot);
394 	pud = pud_mkyoung(pud);
395 	set_pud_at(args->mm, vaddr, args->pudp, pud);
396 	flush_dcache_page(page);
397 	pudp_test_and_clear_young(args->vma, vaddr, args->pudp);
398 	pud = pudp_get(args->pudp);
399 	WARN_ON(pud_young(pud));
400 
401 	pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
402 }
403 
404 static void __init pud_leaf_tests(struct pgtable_debug_args *args)
405 {
406 	pud_t pud;
407 
408 	if (!has_transparent_pud_hugepage())
409 		return;
410 
411 	pr_debug("Validating PUD leaf\n");
412 	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
413 	/*
414 	 * PUD based THP is a leaf entry.
415 	 */
416 	pud = pud_mkhuge(pud);
417 	WARN_ON(!pud_leaf(pud));
418 }
419 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
420 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
421 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
422 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
423 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
424 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
425 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { }
426 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
427 static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { }
428 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
429 static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { }
430 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
431 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
432 
433 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
434 static void __init pmd_huge_tests(struct pgtable_debug_args *args)
435 {
436 	pmd_t pmd;
437 
438 	if (!arch_vmap_pmd_supported(args->page_prot) ||
439 	    args->fixed_alignment < PMD_SIZE)
440 		return;
441 
442 	pr_debug("Validating PMD huge\n");
443 	/*
444 	 * X86 defined pmd_set_huge() verifies that the given
445 	 * PMD is not a populated non-leaf entry.
446 	 */
447 	WRITE_ONCE(*args->pmdp, __pmd(0));
448 	WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot));
449 	WARN_ON(!pmd_clear_huge(args->pmdp));
450 	pmd = pmdp_get(args->pmdp);
451 	WARN_ON(!pmd_none(pmd));
452 }
453 
454 static void __init pud_huge_tests(struct pgtable_debug_args *args)
455 {
456 	pud_t pud;
457 
458 	if (!arch_vmap_pud_supported(args->page_prot) ||
459 	    args->fixed_alignment < PUD_SIZE)
460 		return;
461 
462 	pr_debug("Validating PUD huge\n");
463 	/*
464 	 * X86 defined pud_set_huge() verifies that the given
465 	 * PUD is not a populated non-leaf entry.
466 	 */
467 	WRITE_ONCE(*args->pudp, __pud(0));
468 	WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot));
469 	WARN_ON(!pud_clear_huge(args->pudp));
470 	pud = pudp_get(args->pudp);
471 	WARN_ON(!pud_none(pud));
472 }
473 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
474 static void __init pmd_huge_tests(struct pgtable_debug_args *args) { }
475 static void __init pud_huge_tests(struct pgtable_debug_args *args) { }
476 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
477 
478 static void __init p4d_basic_tests(struct pgtable_debug_args *args)
479 {
480 	p4d_t p4d;
481 
482 	pr_debug("Validating P4D basic\n");
483 	memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
484 	WARN_ON(!p4d_same(p4d, p4d));
485 }
486 
487 static void __init pgd_basic_tests(struct pgtable_debug_args *args)
488 {
489 	pgd_t pgd;
490 
491 	pr_debug("Validating PGD basic\n");
492 	memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
493 	WARN_ON(!pgd_same(pgd, pgd));
494 }
495 
496 #ifndef __PAGETABLE_PUD_FOLDED
497 static void __init pud_clear_tests(struct pgtable_debug_args *args)
498 {
499 	pud_t pud = pudp_get(args->pudp);
500 
501 	if (mm_pmd_folded(args->mm))
502 		return;
503 
504 	pr_debug("Validating PUD clear\n");
505 	WARN_ON(pud_none(pud));
506 	pud_clear(args->pudp);
507 	pud = pudp_get(args->pudp);
508 	WARN_ON(!pud_none(pud));
509 }
510 
511 static void __init pud_populate_tests(struct pgtable_debug_args *args)
512 {
513 	pud_t pud;
514 
515 	if (mm_pmd_folded(args->mm))
516 		return;
517 
518 	pr_debug("Validating PUD populate\n");
519 	/*
520 	 * This entry points to next level page table page.
521 	 * Hence this must not qualify as pud_bad().
522 	 */
523 	pud_populate(args->mm, args->pudp, args->start_pmdp);
524 	pud = pudp_get(args->pudp);
525 	WARN_ON(pud_bad(pud));
526 }
527 #else  /* !__PAGETABLE_PUD_FOLDED */
528 static void __init pud_clear_tests(struct pgtable_debug_args *args) { }
529 static void __init pud_populate_tests(struct pgtable_debug_args *args) { }
530 #endif /* PAGETABLE_PUD_FOLDED */
531 
532 #ifndef __PAGETABLE_P4D_FOLDED
533 static void __init p4d_clear_tests(struct pgtable_debug_args *args)
534 {
535 	p4d_t p4d = p4dp_get(args->p4dp);
536 
537 	if (mm_pud_folded(args->mm))
538 		return;
539 
540 	pr_debug("Validating P4D clear\n");
541 	WARN_ON(p4d_none(p4d));
542 	p4d_clear(args->p4dp);
543 	p4d = p4dp_get(args->p4dp);
544 	WARN_ON(!p4d_none(p4d));
545 }
546 
547 static void __init p4d_populate_tests(struct pgtable_debug_args *args)
548 {
549 	p4d_t p4d;
550 
551 	if (mm_pud_folded(args->mm))
552 		return;
553 
554 	pr_debug("Validating P4D populate\n");
555 	/*
556 	 * This entry points to next level page table page.
557 	 * Hence this must not qualify as p4d_bad().
558 	 */
559 	pud_clear(args->pudp);
560 	p4d_clear(args->p4dp);
561 	p4d_populate(args->mm, args->p4dp, args->start_pudp);
562 	p4d = p4dp_get(args->p4dp);
563 	WARN_ON(p4d_bad(p4d));
564 }
565 
566 static void __init pgd_clear_tests(struct pgtable_debug_args *args)
567 {
568 	pgd_t pgd = pgdp_get(args->pgdp);
569 
570 	if (mm_p4d_folded(args->mm))
571 		return;
572 
573 	pr_debug("Validating PGD clear\n");
574 	WARN_ON(pgd_none(pgd));
575 	pgd_clear(args->pgdp);
576 	pgd = pgdp_get(args->pgdp);
577 	WARN_ON(!pgd_none(pgd));
578 }
579 
580 static void __init pgd_populate_tests(struct pgtable_debug_args *args)
581 {
582 	pgd_t pgd;
583 
584 	if (mm_p4d_folded(args->mm))
585 		return;
586 
587 	pr_debug("Validating PGD populate\n");
588 	/*
589 	 * This entry points to next level page table page.
590 	 * Hence this must not qualify as pgd_bad().
591 	 */
592 	p4d_clear(args->p4dp);
593 	pgd_clear(args->pgdp);
594 	pgd_populate(args->mm, args->pgdp, args->start_p4dp);
595 	pgd = pgdp_get(args->pgdp);
596 	WARN_ON(pgd_bad(pgd));
597 }
598 #else  /* !__PAGETABLE_P4D_FOLDED */
599 static void __init p4d_clear_tests(struct pgtable_debug_args *args) { }
600 static void __init pgd_clear_tests(struct pgtable_debug_args *args) { }
601 static void __init p4d_populate_tests(struct pgtable_debug_args *args) { }
602 static void __init pgd_populate_tests(struct pgtable_debug_args *args) { }
603 #endif /* PAGETABLE_P4D_FOLDED */
604 
605 static void __init pte_clear_tests(struct pgtable_debug_args *args)
606 {
607 	struct page *page;
608 	pte_t pte = pfn_pte(args->pte_pfn, args->page_prot);
609 
610 	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
611 	if (!page)
612 		return;
613 
614 	/*
615 	 * flush_dcache_page() is called after set_pte_at() to clear
616 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
617 	 * when it's released and page allocation check will fail when
618 	 * the page is allocated again. For architectures other than ARM64,
619 	 * the unexpected overhead of cache flushing is acceptable.
620 	 */
621 	pr_debug("Validating PTE clear\n");
622 	if (WARN_ON(!args->ptep))
623 		return;
624 
625 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
626 	WARN_ON(pte_none(pte));
627 	flush_dcache_page(page);
628 	barrier();
629 	ptep_clear(args->mm, args->vaddr, args->ptep);
630 	pte = ptep_get(args->ptep);
631 	WARN_ON(!pte_none(pte));
632 }
633 
634 static void __init pmd_clear_tests(struct pgtable_debug_args *args)
635 {
636 	pmd_t pmd = pmdp_get(args->pmdp);
637 
638 	pr_debug("Validating PMD clear\n");
639 	WARN_ON(pmd_none(pmd));
640 	pmd_clear(args->pmdp);
641 	pmd = pmdp_get(args->pmdp);
642 	WARN_ON(!pmd_none(pmd));
643 }
644 
645 static void __init pmd_populate_tests(struct pgtable_debug_args *args)
646 {
647 	pmd_t pmd;
648 
649 	pr_debug("Validating PMD populate\n");
650 	/*
651 	 * This entry points to next level page table page.
652 	 * Hence this must not qualify as pmd_bad().
653 	 */
654 	pmd_populate(args->mm, args->pmdp, args->start_ptep);
655 	pmd = pmdp_get(args->pmdp);
656 	WARN_ON(pmd_bad(pmd));
657 }
658 
659 static void __init pte_special_tests(struct pgtable_debug_args *args)
660 {
661 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
662 
663 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
664 		return;
665 
666 	pr_debug("Validating PTE special\n");
667 	WARN_ON(!pte_special(pte_mkspecial(pte)));
668 }
669 
670 static void __init pte_protnone_tests(struct pgtable_debug_args *args)
671 {
672 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none);
673 
674 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
675 		return;
676 
677 	pr_debug("Validating PTE protnone\n");
678 	WARN_ON(!pte_protnone(pte));
679 	WARN_ON(!pte_present(pte));
680 }
681 
682 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
683 static void __init pmd_protnone_tests(struct pgtable_debug_args *args)
684 {
685 	pmd_t pmd;
686 
687 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
688 		return;
689 
690 	if (!has_transparent_hugepage())
691 		return;
692 
693 	pr_debug("Validating PMD protnone\n");
694 	pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none));
695 	WARN_ON(!pmd_protnone(pmd));
696 	WARN_ON(!pmd_present(pmd));
697 }
698 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
699 static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { }
700 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
701 
702 static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args)
703 {
704 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
705 
706 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
707 		return;
708 
709 	pr_debug("Validating PTE soft dirty\n");
710 	WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
711 	WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
712 }
713 
714 static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args)
715 {
716 	pte_t pte;
717 
718 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
719 		return;
720 
721 	pr_debug("Validating PTE swap soft dirty\n");
722 	pte = swp_entry_to_pte(args->swp_entry);
723 	WARN_ON(!is_swap_pte(pte));
724 
725 	WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
726 	WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
727 }
728 
729 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
730 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args)
731 {
732 	pmd_t pmd;
733 
734 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
735 		return;
736 
737 	if (!has_transparent_hugepage())
738 		return;
739 
740 	pr_debug("Validating PMD soft dirty\n");
741 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
742 	WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
743 	WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
744 }
745 
746 static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args)
747 {
748 	pmd_t pmd;
749 
750 	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
751 		!IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
752 		return;
753 
754 	if (!has_transparent_hugepage())
755 		return;
756 
757 	pr_debug("Validating PMD swap soft dirty\n");
758 	pmd = swp_entry_to_pmd(args->swp_entry);
759 	WARN_ON(!is_swap_pmd(pmd));
760 
761 	WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
762 	WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
763 }
764 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
765 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { }
766 static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { }
767 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
768 
769 static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args)
770 {
771 	swp_entry_t entry, entry2;
772 	pte_t pte;
773 
774 	pr_debug("Validating PTE swap exclusive\n");
775 	entry = args->swp_entry;
776 
777 	pte = swp_entry_to_pte(entry);
778 	WARN_ON(pte_swp_exclusive(pte));
779 	WARN_ON(!is_swap_pte(pte));
780 	entry2 = pte_to_swp_entry(pte);
781 	WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
782 
783 	pte = pte_swp_mkexclusive(pte);
784 	WARN_ON(!pte_swp_exclusive(pte));
785 	WARN_ON(!is_swap_pte(pte));
786 	WARN_ON(pte_swp_soft_dirty(pte));
787 	entry2 = pte_to_swp_entry(pte);
788 	WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
789 
790 	pte = pte_swp_clear_exclusive(pte);
791 	WARN_ON(pte_swp_exclusive(pte));
792 	WARN_ON(!is_swap_pte(pte));
793 	entry2 = pte_to_swp_entry(pte);
794 	WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
795 }
796 
797 static void __init pte_swap_tests(struct pgtable_debug_args *args)
798 {
799 	swp_entry_t arch_entry;
800 	pte_t pte1, pte2;
801 
802 	pr_debug("Validating PTE swap\n");
803 	pte1 = swp_entry_to_pte(args->swp_entry);
804 	WARN_ON(!is_swap_pte(pte1));
805 
806 	arch_entry = __pte_to_swp_entry(pte1);
807 	pte2 = __swp_entry_to_pte(arch_entry);
808 	WARN_ON(memcmp(&pte1, &pte2, sizeof(pte1)));
809 }
810 
811 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
812 static void __init pmd_swap_tests(struct pgtable_debug_args *args)
813 {
814 	swp_entry_t arch_entry;
815 	pmd_t pmd1, pmd2;
816 
817 	if (!has_transparent_hugepage())
818 		return;
819 
820 	pr_debug("Validating PMD swap\n");
821 	pmd1 = swp_entry_to_pmd(args->swp_entry);
822 	WARN_ON(!is_swap_pmd(pmd1));
823 
824 	arch_entry = __pmd_to_swp_entry(pmd1);
825 	pmd2 = __swp_entry_to_pmd(arch_entry);
826 	WARN_ON(memcmp(&pmd1, &pmd2, sizeof(pmd1)));
827 }
828 #else  /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
829 static void __init pmd_swap_tests(struct pgtable_debug_args *args) { }
830 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
831 
832 static void __init swap_migration_tests(struct pgtable_debug_args *args)
833 {
834 	struct page *page;
835 	swp_entry_t swp;
836 
837 	if (!IS_ENABLED(CONFIG_MIGRATION))
838 		return;
839 
840 	/*
841 	 * swap_migration_tests() requires a dedicated page as it needs to
842 	 * be locked before creating a migration entry from it. Locking the
843 	 * page that actually maps kernel text ('start_kernel') can be real
844 	 * problematic. Lets use the allocated page explicitly for this
845 	 * purpose.
846 	 */
847 	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
848 	if (!page)
849 		return;
850 
851 	pr_debug("Validating swap migration\n");
852 
853 	/*
854 	 * make_[readable|writable]_migration_entry() expects given page to
855 	 * be locked, otherwise it stumbles upon a BUG_ON().
856 	 */
857 	__SetPageLocked(page);
858 	swp = make_writable_migration_entry(page_to_pfn(page));
859 	WARN_ON(!is_migration_entry(swp));
860 	WARN_ON(!is_writable_migration_entry(swp));
861 
862 	swp = make_readable_migration_entry(swp_offset(swp));
863 	WARN_ON(!is_migration_entry(swp));
864 	WARN_ON(is_writable_migration_entry(swp));
865 
866 	swp = make_readable_migration_entry(page_to_pfn(page));
867 	WARN_ON(!is_migration_entry(swp));
868 	WARN_ON(is_writable_migration_entry(swp));
869 	__ClearPageLocked(page);
870 }
871 
872 #ifdef CONFIG_HUGETLB_PAGE
873 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args)
874 {
875 	pte_t pte;
876 
877 	pr_debug("Validating HugeTLB basic\n");
878 	pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot);
879 	pte = arch_make_huge_pte(pte, PMD_SHIFT, VM_ACCESS_FLAGS);
880 
881 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
882 	WARN_ON(!pte_huge(pte));
883 #endif
884 	WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
885 	WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
886 	WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
887 }
888 #else  /* !CONFIG_HUGETLB_PAGE */
889 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { }
890 #endif /* CONFIG_HUGETLB_PAGE */
891 
892 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
893 static void __init pmd_thp_tests(struct pgtable_debug_args *args)
894 {
895 	pmd_t pmd;
896 
897 	if (!has_transparent_hugepage())
898 		return;
899 
900 	pr_debug("Validating PMD based THP\n");
901 	/*
902 	 * pmd_trans_huge() and pmd_present() must return positive after
903 	 * MMU invalidation with pmd_mkinvalid(). This behavior is an
904 	 * optimization for transparent huge page. pmd_trans_huge() must
905 	 * be true if pmd_page() returns a valid THP to avoid taking the
906 	 * pmd_lock when others walk over non transhuge pmds (i.e. there
907 	 * are no THP allocated). Especially when splitting a THP and
908 	 * removing the present bit from the pmd, pmd_trans_huge() still
909 	 * needs to return true. pmd_present() should be true whenever
910 	 * pmd_trans_huge() returns true.
911 	 */
912 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
913 	WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
914 
915 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
916 	WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
917 	WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
918 	WARN_ON(!pmd_leaf(pmd_mkinvalid(pmd_mkhuge(pmd))));
919 #endif /* __HAVE_ARCH_PMDP_INVALIDATE */
920 }
921 
922 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
923 static void __init pud_thp_tests(struct pgtable_debug_args *args)
924 {
925 	pud_t pud;
926 
927 	if (!has_transparent_pud_hugepage())
928 		return;
929 
930 	pr_debug("Validating PUD based THP\n");
931 	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
932 	WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
933 
934 	/*
935 	 * pud_mkinvalid() has been dropped for now. Enable back
936 	 * these tests when it comes back with a modified pud_present().
937 	 *
938 	 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
939 	 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
940 	 */
941 }
942 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
943 static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
944 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
945 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
946 static void __init pmd_thp_tests(struct pgtable_debug_args *args) { }
947 static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
948 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
949 
950 static unsigned long __init get_random_vaddr(void)
951 {
952 	unsigned long random_vaddr, random_pages, total_user_pages;
953 
954 	total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
955 
956 	random_pages = get_random_long() % total_user_pages;
957 	random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
958 
959 	return random_vaddr;
960 }
961 
962 static void __init destroy_args(struct pgtable_debug_args *args)
963 {
964 	struct page *page = NULL;
965 
966 	/* Free (huge) page */
967 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
968 	    has_transparent_pud_hugepage() &&
969 	    args->pud_pfn != ULONG_MAX) {
970 		if (args->is_contiguous_page) {
971 			free_contig_range(args->pud_pfn,
972 					  (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT)));
973 		} else {
974 			page = pfn_to_page(args->pud_pfn);
975 			__free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT);
976 		}
977 
978 		args->pud_pfn = ULONG_MAX;
979 		args->pmd_pfn = ULONG_MAX;
980 		args->pte_pfn = ULONG_MAX;
981 	}
982 
983 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
984 	    has_transparent_hugepage() &&
985 	    args->pmd_pfn != ULONG_MAX) {
986 		if (args->is_contiguous_page) {
987 			free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER));
988 		} else {
989 			page = pfn_to_page(args->pmd_pfn);
990 			__free_pages(page, HPAGE_PMD_ORDER);
991 		}
992 
993 		args->pmd_pfn = ULONG_MAX;
994 		args->pte_pfn = ULONG_MAX;
995 	}
996 
997 	if (args->pte_pfn != ULONG_MAX) {
998 		page = pfn_to_page(args->pte_pfn);
999 		__free_page(page);
1000 
1001 		args->pte_pfn = ULONG_MAX;
1002 	}
1003 
1004 	/* Free page table entries */
1005 	if (args->start_ptep) {
1006 		pmd_clear(args->pmdp);
1007 		pte_free(args->mm, args->start_ptep);
1008 		mm_dec_nr_ptes(args->mm);
1009 	}
1010 
1011 	if (args->start_pmdp) {
1012 		pud_clear(args->pudp);
1013 		pmd_free(args->mm, args->start_pmdp);
1014 		mm_dec_nr_pmds(args->mm);
1015 	}
1016 
1017 	if (args->start_pudp) {
1018 		p4d_clear(args->p4dp);
1019 		pud_free(args->mm, args->start_pudp);
1020 		mm_dec_nr_puds(args->mm);
1021 	}
1022 
1023 	if (args->start_p4dp) {
1024 		pgd_clear(args->pgdp);
1025 		p4d_free(args->mm, args->start_p4dp);
1026 	}
1027 
1028 	/* Free vma and mm struct */
1029 	if (args->vma)
1030 		vm_area_free(args->vma);
1031 
1032 	if (args->mm)
1033 		mmput(args->mm);
1034 }
1035 
1036 static struct page * __init
1037 debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
1038 {
1039 	struct page *page = NULL;
1040 
1041 #ifdef CONFIG_CONTIG_ALLOC
1042 	if (order > MAX_PAGE_ORDER) {
1043 		page = alloc_contig_pages((1 << order), GFP_KERNEL,
1044 					  first_online_node, NULL);
1045 		if (page) {
1046 			args->is_contiguous_page = true;
1047 			return page;
1048 		}
1049 	}
1050 #endif
1051 
1052 	if (order <= MAX_PAGE_ORDER)
1053 		page = alloc_pages(GFP_KERNEL, order);
1054 
1055 	return page;
1056 }
1057 
1058 /*
1059  * Check if a physical memory range described by <pstart, pend> contains
1060  * an area that is of size psize, and aligned to psize.
1061  *
1062  * Don't use address 0, an all-zeroes physical address might mask bugs, and
1063  * it's not used on x86.
1064  */
1065 static void  __init phys_align_check(phys_addr_t pstart,
1066 				     phys_addr_t pend, unsigned long psize,
1067 				     phys_addr_t *physp, unsigned long *alignp)
1068 {
1069 	phys_addr_t aligned_start, aligned_end;
1070 
1071 	if (pstart == 0)
1072 		pstart = PAGE_SIZE;
1073 
1074 	aligned_start = ALIGN(pstart, psize);
1075 	aligned_end = aligned_start + psize;
1076 
1077 	if (aligned_end > aligned_start && aligned_end <= pend) {
1078 		*alignp = psize;
1079 		*physp = aligned_start;
1080 	}
1081 }
1082 
1083 static void __init init_fixed_pfns(struct pgtable_debug_args *args)
1084 {
1085 	u64 idx;
1086 	phys_addr_t phys, pstart, pend;
1087 
1088 	/*
1089 	 * Initialize the fixed pfns. To do this, try to find a
1090 	 * valid physical range, preferably aligned to PUD_SIZE,
1091 	 * but settling for aligned to PMD_SIZE as a fallback. If
1092 	 * neither of those is found, use the physical address of
1093 	 * the start_kernel symbol.
1094 	 *
1095 	 * The memory doesn't need to be allocated, it just needs to exist
1096 	 * as usable memory. It won't be touched.
1097 	 *
1098 	 * The alignment is recorded, and can be checked to see if we
1099 	 * can run the tests that require an actual valid physical
1100 	 * address range on some architectures ({pmd,pud}_huge_test
1101 	 * on x86).
1102 	 */
1103 
1104 	phys = __pa_symbol(&start_kernel);
1105 	args->fixed_alignment = PAGE_SIZE;
1106 
1107 	for_each_mem_range(idx, &pstart, &pend) {
1108 		/* First check for a PUD-aligned area */
1109 		phys_align_check(pstart, pend, PUD_SIZE, &phys,
1110 				 &args->fixed_alignment);
1111 
1112 		/* If a PUD-aligned area is found, we're done */
1113 		if (args->fixed_alignment == PUD_SIZE)
1114 			break;
1115 
1116 		/*
1117 		 * If no PMD-aligned area found yet, check for one,
1118 		 * but continue the loop to look for a PUD-aligned area.
1119 		 */
1120 		if (args->fixed_alignment < PMD_SIZE)
1121 			phys_align_check(pstart, pend, PMD_SIZE, &phys,
1122 					 &args->fixed_alignment);
1123 	}
1124 
1125 	args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK);
1126 	args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK);
1127 	args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK);
1128 	args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK);
1129 	args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK);
1130 	WARN_ON(!pfn_valid(args->fixed_pte_pfn));
1131 }
1132 
1133 
1134 static int __init init_args(struct pgtable_debug_args *args)
1135 {
1136 	unsigned long max_swap_offset;
1137 	struct page *page = NULL;
1138 	int ret = 0;
1139 
1140 	/*
1141 	 * Initialize the debugging data.
1142 	 *
1143 	 * vm_get_page_prot(VM_NONE) or vm_get_page_prot(VM_SHARED|VM_NONE)
1144 	 * will help create page table entries with PROT_NONE permission as
1145 	 * required for pxx_protnone_tests().
1146 	 */
1147 	memset(args, 0, sizeof(*args));
1148 	args->vaddr              = get_random_vaddr();
1149 	args->page_prot          = vm_get_page_prot(VM_ACCESS_FLAGS);
1150 	args->page_prot_none     = vm_get_page_prot(VM_NONE);
1151 	args->is_contiguous_page = false;
1152 	args->pud_pfn            = ULONG_MAX;
1153 	args->pmd_pfn            = ULONG_MAX;
1154 	args->pte_pfn            = ULONG_MAX;
1155 	args->fixed_pgd_pfn      = ULONG_MAX;
1156 	args->fixed_p4d_pfn      = ULONG_MAX;
1157 	args->fixed_pud_pfn      = ULONG_MAX;
1158 	args->fixed_pmd_pfn      = ULONG_MAX;
1159 	args->fixed_pte_pfn      = ULONG_MAX;
1160 
1161 	/* Allocate mm and vma */
1162 	args->mm = mm_alloc();
1163 	if (!args->mm) {
1164 		pr_err("Failed to allocate mm struct\n");
1165 		ret = -ENOMEM;
1166 		goto error;
1167 	}
1168 
1169 	args->vma = vm_area_alloc(args->mm);
1170 	if (!args->vma) {
1171 		pr_err("Failed to allocate vma\n");
1172 		ret = -ENOMEM;
1173 		goto error;
1174 	}
1175 
1176 	/*
1177 	 * Allocate page table entries. They will be modified in the tests.
1178 	 * Lets save the page table entries so that they can be released
1179 	 * when the tests are completed.
1180 	 */
1181 	args->pgdp = pgd_offset(args->mm, args->vaddr);
1182 	args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr);
1183 	if (!args->p4dp) {
1184 		pr_err("Failed to allocate p4d entries\n");
1185 		ret = -ENOMEM;
1186 		goto error;
1187 	}
1188 	args->start_p4dp = p4d_offset(args->pgdp, 0UL);
1189 	WARN_ON(!args->start_p4dp);
1190 
1191 	args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr);
1192 	if (!args->pudp) {
1193 		pr_err("Failed to allocate pud entries\n");
1194 		ret = -ENOMEM;
1195 		goto error;
1196 	}
1197 	args->start_pudp = pud_offset(args->p4dp, 0UL);
1198 	WARN_ON(!args->start_pudp);
1199 
1200 	args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr);
1201 	if (!args->pmdp) {
1202 		pr_err("Failed to allocate pmd entries\n");
1203 		ret = -ENOMEM;
1204 		goto error;
1205 	}
1206 	args->start_pmdp = pmd_offset(args->pudp, 0UL);
1207 	WARN_ON(!args->start_pmdp);
1208 
1209 	if (pte_alloc(args->mm, args->pmdp)) {
1210 		pr_err("Failed to allocate pte entries\n");
1211 		ret = -ENOMEM;
1212 		goto error;
1213 	}
1214 	args->start_ptep = pmd_pgtable(pmdp_get(args->pmdp));
1215 	WARN_ON(!args->start_ptep);
1216 
1217 	init_fixed_pfns(args);
1218 
1219 	/* See generic_max_swapfile_size(): probe the maximum offset */
1220 	max_swap_offset = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0, ~0UL))));
1221 	/* Create a swp entry with all possible bits set */
1222 	args->swp_entry = swp_entry((1 << MAX_SWAPFILES_SHIFT) - 1, max_swap_offset);
1223 
1224 	/*
1225 	 * Allocate (huge) pages because some of the tests need to access
1226 	 * the data in the pages. The corresponding tests will be skipped
1227 	 * if we fail to allocate (huge) pages.
1228 	 */
1229 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1230 	    has_transparent_pud_hugepage()) {
1231 		page = debug_vm_pgtable_alloc_huge_page(args,
1232 				HPAGE_PUD_SHIFT - PAGE_SHIFT);
1233 		if (page) {
1234 			args->pud_pfn = page_to_pfn(page);
1235 			args->pmd_pfn = args->pud_pfn;
1236 			args->pte_pfn = args->pud_pfn;
1237 			return 0;
1238 		}
1239 	}
1240 
1241 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1242 	    has_transparent_hugepage()) {
1243 		page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER);
1244 		if (page) {
1245 			args->pmd_pfn = page_to_pfn(page);
1246 			args->pte_pfn = args->pmd_pfn;
1247 			return 0;
1248 		}
1249 	}
1250 
1251 	page = alloc_page(GFP_KERNEL);
1252 	if (page)
1253 		args->pte_pfn = page_to_pfn(page);
1254 
1255 	return 0;
1256 
1257 error:
1258 	destroy_args(args);
1259 	return ret;
1260 }
1261 
1262 static int __init debug_vm_pgtable(void)
1263 {
1264 	struct pgtable_debug_args args;
1265 	spinlock_t *ptl = NULL;
1266 	int idx, ret;
1267 
1268 	pr_info("Validating architecture page table helpers\n");
1269 	ret = init_args(&args);
1270 	if (ret)
1271 		return ret;
1272 
1273 	/*
1274 	 * Iterate over each possible vm_flags to make sure that all
1275 	 * the basic page table transformation validations just hold
1276 	 * true irrespective of the starting protection value for a
1277 	 * given page table entry.
1278 	 *
1279 	 * Protection based vm_flags combinations are always linear
1280 	 * and increasing i.e starting from VM_NONE and going up to
1281 	 * (VM_SHARED | READ | WRITE | EXEC).
1282 	 */
1283 #define VM_FLAGS_START	(VM_NONE)
1284 #define VM_FLAGS_END	(VM_SHARED | VM_EXEC | VM_WRITE | VM_READ)
1285 
1286 	for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) {
1287 		pte_basic_tests(&args, idx);
1288 		pmd_basic_tests(&args, idx);
1289 		pud_basic_tests(&args, idx);
1290 	}
1291 
1292 	/*
1293 	 * Both P4D and PGD level tests are very basic which do not
1294 	 * involve creating page table entries from the protection
1295 	 * value and the given pfn. Hence just keep them out from
1296 	 * the above iteration for now to save some test execution
1297 	 * time.
1298 	 */
1299 	p4d_basic_tests(&args);
1300 	pgd_basic_tests(&args);
1301 
1302 	pmd_leaf_tests(&args);
1303 	pud_leaf_tests(&args);
1304 
1305 	pte_special_tests(&args);
1306 	pte_protnone_tests(&args);
1307 	pmd_protnone_tests(&args);
1308 
1309 	pte_soft_dirty_tests(&args);
1310 	pmd_soft_dirty_tests(&args);
1311 	pte_swap_soft_dirty_tests(&args);
1312 	pmd_swap_soft_dirty_tests(&args);
1313 
1314 	pte_swap_exclusive_tests(&args);
1315 
1316 	pte_swap_tests(&args);
1317 	pmd_swap_tests(&args);
1318 
1319 	swap_migration_tests(&args);
1320 
1321 	pmd_thp_tests(&args);
1322 	pud_thp_tests(&args);
1323 
1324 	hugetlb_basic_tests(&args);
1325 
1326 	/*
1327 	 * Page table modifying tests. They need to hold
1328 	 * proper page table lock.
1329 	 */
1330 
1331 	args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl);
1332 	pte_clear_tests(&args);
1333 	pte_advanced_tests(&args);
1334 	if (args.ptep)
1335 		pte_unmap_unlock(args.ptep, ptl);
1336 
1337 	ptl = pmd_lock(args.mm, args.pmdp);
1338 	pmd_clear_tests(&args);
1339 	pmd_advanced_tests(&args);
1340 	pmd_huge_tests(&args);
1341 	pmd_populate_tests(&args);
1342 	spin_unlock(ptl);
1343 
1344 	ptl = pud_lock(args.mm, args.pudp);
1345 	pud_clear_tests(&args);
1346 	pud_advanced_tests(&args);
1347 	pud_huge_tests(&args);
1348 	pud_populate_tests(&args);
1349 	spin_unlock(ptl);
1350 
1351 	spin_lock(&(args.mm->page_table_lock));
1352 	p4d_clear_tests(&args);
1353 	pgd_clear_tests(&args);
1354 	p4d_populate_tests(&args);
1355 	pgd_populate_tests(&args);
1356 	spin_unlock(&(args.mm->page_table_lock));
1357 
1358 	destroy_args(&args);
1359 	return 0;
1360 }
1361 late_initcall(debug_vm_pgtable);
1362