xref: /linux/mm/debug_vm_pgtable.c (revision 7203ca412fc8e8a0588e9adc0f777d3163f8dff3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This kernel test validates architecture page table helpers and
4  * accessors and helps in verifying their continued compliance with
5  * expected generic MM semantics.
6  *
7  * Copyright (C) 2019 ARM Ltd.
8  *
9  * Author: Anshuman Khandual <anshuman.khandual@arm.com>
10  */
11 #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
12 
13 #include <linux/gfp.h>
14 #include <linux/highmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/kernel.h>
17 #include <linux/kconfig.h>
18 #include <linux/memblock.h>
19 #include <linux/mm.h>
20 #include <linux/mman.h>
21 #include <linux/mm_types.h>
22 #include <linux/module.h>
23 #include <linux/printk.h>
24 #include <linux/pgtable.h>
25 #include <linux/random.h>
26 #include <linux/spinlock.h>
27 #include <linux/swap.h>
28 #include <linux/leafops.h>
29 #include <linux/start_kernel.h>
30 #include <linux/sched/mm.h>
31 #include <linux/io.h>
32 #include <linux/vmalloc.h>
33 #include <linux/pgalloc.h>
34 
35 #include <asm/cacheflush.h>
36 #include <asm/tlbflush.h>
37 
38 /*
39  * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics
40  * expectations that are being validated here. All future changes in here
41  * or the documentation need to be in sync.
42  */
43 #define RANDOM_NZVALUE	GENMASK(7, 0)
44 
45 struct pgtable_debug_args {
46 	struct mm_struct	*mm;
47 	struct vm_area_struct	*vma;
48 
49 	pgd_t			*pgdp;
50 	p4d_t			*p4dp;
51 	pud_t			*pudp;
52 	pmd_t			*pmdp;
53 	pte_t			*ptep;
54 
55 	p4d_t			*start_p4dp;
56 	pud_t			*start_pudp;
57 	pmd_t			*start_pmdp;
58 	pgtable_t		start_ptep;
59 
60 	unsigned long		vaddr;
61 	pgprot_t		page_prot;
62 	pgprot_t		page_prot_none;
63 
64 	bool			is_contiguous_page;
65 	unsigned long		pud_pfn;
66 	unsigned long		pmd_pfn;
67 	unsigned long		pte_pfn;
68 
69 	unsigned long		fixed_alignment;
70 	unsigned long		fixed_pgd_pfn;
71 	unsigned long		fixed_p4d_pfn;
72 	unsigned long		fixed_pud_pfn;
73 	unsigned long		fixed_pmd_pfn;
74 	unsigned long		fixed_pte_pfn;
75 
76 	swp_entry_t		swp_entry;
77 	swp_entry_t		leaf_entry;
78 };
79 
pte_basic_tests(struct pgtable_debug_args * args,int idx)80 static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx)
81 {
82 	pgprot_t prot = vm_get_page_prot(idx);
83 	pte_t pte = pfn_pte(args->fixed_pte_pfn, prot);
84 	unsigned long val = idx, *ptr = &val;
85 
86 	pr_debug("Validating PTE basic (%pGv)\n", ptr);
87 
88 	/*
89 	 * This test needs to be executed after the given page table entry
90 	 * is created with pfn_pte() to make sure that vm_get_page_prot(idx)
91 	 * does not have the dirty bit enabled from the beginning. This is
92 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
93 	 * dirty bit being set.
94 	 */
95 	WARN_ON(pte_dirty(pte_wrprotect(pte)));
96 
97 	WARN_ON(!pte_same(pte, pte));
98 	WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
99 	WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
100 	WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte), args->vma)));
101 	WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
102 	WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
103 	WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte, args->vma))));
104 	WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
105 	WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
106 
107 	WARN_ON(!pte_dirty(pte_mkwrite_novma(pte_mkdirty(pte))));
108 	WARN_ON(pte_dirty(pte_mkwrite_novma(pte_mkclean(pte))));
109 	WARN_ON(!pte_write(pte_mkdirty(pte_mkwrite_novma(pte))));
110 	WARN_ON(!pte_write(pte_mkwrite_novma(pte_wrprotect(pte))));
111 	WARN_ON(pte_write(pte_wrprotect(pte_mkwrite_novma(pte))));
112 }
113 
pte_advanced_tests(struct pgtable_debug_args * args)114 static void __init pte_advanced_tests(struct pgtable_debug_args *args)
115 {
116 	struct page *page;
117 	pte_t pte;
118 
119 	/*
120 	 * Architectures optimize set_pte_at by avoiding TLB flush.
121 	 * This requires set_pte_at to be not used to update an
122 	 * existing pte entry. Clear pte before we do set_pte_at
123 	 *
124 	 * flush_dcache_page() is called after set_pte_at() to clear
125 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
126 	 * when it's released and page allocation check will fail when
127 	 * the page is allocated again. For architectures other than ARM64,
128 	 * the unexpected overhead of cache flushing is acceptable.
129 	 */
130 	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
131 	if (!page)
132 		return;
133 
134 	pr_debug("Validating PTE advanced\n");
135 	if (WARN_ON(!args->ptep))
136 		return;
137 
138 	pte = pfn_pte(args->pte_pfn, args->page_prot);
139 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
140 	flush_dcache_page(page);
141 	ptep_set_wrprotect(args->mm, args->vaddr, args->ptep);
142 	pte = ptep_get(args->ptep);
143 	WARN_ON(pte_write(pte));
144 	ptep_get_and_clear(args->mm, args->vaddr, args->ptep);
145 	pte = ptep_get(args->ptep);
146 	WARN_ON(!pte_none(pte));
147 
148 	pte = pfn_pte(args->pte_pfn, args->page_prot);
149 	pte = pte_wrprotect(pte);
150 	pte = pte_mkclean(pte);
151 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
152 	flush_dcache_page(page);
153 	pte = pte_mkwrite(pte, args->vma);
154 	pte = pte_mkdirty(pte);
155 	ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1);
156 	pte = ptep_get(args->ptep);
157 	WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
158 	ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
159 	pte = ptep_get(args->ptep);
160 	WARN_ON(!pte_none(pte));
161 
162 	pte = pfn_pte(args->pte_pfn, args->page_prot);
163 	pte = pte_mkyoung(pte);
164 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
165 	flush_dcache_page(page);
166 	ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep);
167 	pte = ptep_get(args->ptep);
168 	WARN_ON(pte_young(pte));
169 
170 	ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
171 }
172 
173 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_basic_tests(struct pgtable_debug_args * args,int idx)174 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx)
175 {
176 	pgprot_t prot = vm_get_page_prot(idx);
177 	unsigned long val = idx, *ptr = &val;
178 	pmd_t pmd;
179 
180 	if (!has_transparent_hugepage())
181 		return;
182 
183 	pr_debug("Validating PMD basic (%pGv)\n", ptr);
184 	pmd = pfn_pmd(args->fixed_pmd_pfn, prot);
185 
186 	/*
187 	 * This test needs to be executed after the given page table entry
188 	 * is created with pfn_pmd() to make sure that vm_get_page_prot(idx)
189 	 * does not have the dirty bit enabled from the beginning. This is
190 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
191 	 * dirty bit being set.
192 	 */
193 	WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
194 
195 
196 	WARN_ON(!pmd_same(pmd, pmd));
197 	WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
198 	WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
199 	WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd), args->vma)));
200 	WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
201 	WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
202 	WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd, args->vma))));
203 	WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
204 	WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
205 
206 	WARN_ON(!pmd_dirty(pmd_mkwrite_novma(pmd_mkdirty(pmd))));
207 	WARN_ON(pmd_dirty(pmd_mkwrite_novma(pmd_mkclean(pmd))));
208 	WARN_ON(!pmd_write(pmd_mkdirty(pmd_mkwrite_novma(pmd))));
209 	WARN_ON(!pmd_write(pmd_mkwrite_novma(pmd_wrprotect(pmd))));
210 	WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite_novma(pmd))));
211 
212 	/*
213 	 * A huge page does not point to next level page table
214 	 * entry. Hence this must qualify as pmd_bad().
215 	 */
216 	WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
217 }
218 
pmd_advanced_tests(struct pgtable_debug_args * args)219 static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
220 {
221 	struct page *page;
222 	pmd_t pmd;
223 	unsigned long vaddr = args->vaddr;
224 
225 	if (!has_transparent_hugepage())
226 		return;
227 
228 	page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL;
229 	if (!page)
230 		return;
231 
232 	/*
233 	 * flush_dcache_page() is called after set_pmd_at() to clear
234 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
235 	 * when it's released and page allocation check will fail when
236 	 * the page is allocated again. For architectures other than ARM64,
237 	 * the unexpected overhead of cache flushing is acceptable.
238 	 */
239 	pr_debug("Validating PMD advanced\n");
240 	/* Align the address wrt HPAGE_PMD_SIZE */
241 	vaddr &= HPAGE_PMD_MASK;
242 
243 	pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep);
244 
245 	pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
246 	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
247 	flush_dcache_page(page);
248 	pmdp_set_wrprotect(args->mm, vaddr, args->pmdp);
249 	pmd = pmdp_get(args->pmdp);
250 	WARN_ON(pmd_write(pmd));
251 	pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
252 	pmd = pmdp_get(args->pmdp);
253 	WARN_ON(!pmd_none(pmd));
254 
255 	pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
256 	pmd = pmd_wrprotect(pmd);
257 	pmd = pmd_mkclean(pmd);
258 	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
259 	flush_dcache_page(page);
260 	pmd = pmd_mkwrite(pmd, args->vma);
261 	pmd = pmd_mkdirty(pmd);
262 	pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1);
263 	pmd = pmdp_get(args->pmdp);
264 	WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
265 	pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1);
266 	pmd = pmdp_get(args->pmdp);
267 	WARN_ON(!pmd_none(pmd));
268 
269 	pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot));
270 	pmd = pmd_mkyoung(pmd);
271 	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
272 	flush_dcache_page(page);
273 	pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp);
274 	pmd = pmdp_get(args->pmdp);
275 	WARN_ON(pmd_young(pmd));
276 
277 	/*  Clear the pte entries  */
278 	pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
279 	pgtable_trans_huge_withdraw(args->mm, args->pmdp);
280 }
281 
pmd_leaf_tests(struct pgtable_debug_args * args)282 static void __init pmd_leaf_tests(struct pgtable_debug_args *args)
283 {
284 	pmd_t pmd;
285 
286 	if (!has_transparent_hugepage())
287 		return;
288 
289 	pr_debug("Validating PMD leaf\n");
290 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
291 
292 	/*
293 	 * PMD based THP is a leaf entry.
294 	 */
295 	pmd = pmd_mkhuge(pmd);
296 	WARN_ON(!pmd_leaf(pmd));
297 }
298 
299 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_basic_tests(struct pgtable_debug_args * args,int idx)300 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx)
301 {
302 	pgprot_t prot = vm_get_page_prot(idx);
303 	unsigned long val = idx, *ptr = &val;
304 	pud_t pud;
305 
306 	if (!has_transparent_pud_hugepage())
307 		return;
308 
309 	pr_debug("Validating PUD basic (%pGv)\n", ptr);
310 	pud = pfn_pud(args->fixed_pud_pfn, prot);
311 
312 	/*
313 	 * This test needs to be executed after the given page table entry
314 	 * is created with pfn_pud() to make sure that vm_get_page_prot(idx)
315 	 * does not have the dirty bit enabled from the beginning. This is
316 	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
317 	 * dirty bit being set.
318 	 */
319 	WARN_ON(pud_dirty(pud_wrprotect(pud)));
320 
321 	WARN_ON(!pud_same(pud, pud));
322 	WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
323 	WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
324 	WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
325 	WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
326 	WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
327 	WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
328 	WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
329 	WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
330 
331 	if (mm_pmd_folded(args->mm))
332 		return;
333 
334 	/*
335 	 * A huge page does not point to next level page table
336 	 * entry. Hence this must qualify as pud_bad().
337 	 */
338 	WARN_ON(!pud_bad(pud_mkhuge(pud)));
339 }
340 
pud_advanced_tests(struct pgtable_debug_args * args)341 static void __init pud_advanced_tests(struct pgtable_debug_args *args)
342 {
343 	struct page *page;
344 	unsigned long vaddr = args->vaddr;
345 	pud_t pud;
346 
347 	if (!has_transparent_pud_hugepage())
348 		return;
349 
350 	page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL;
351 	if (!page)
352 		return;
353 
354 	/*
355 	 * flush_dcache_page() is called after set_pud_at() to clear
356 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
357 	 * when it's released and page allocation check will fail when
358 	 * the page is allocated again. For architectures other than ARM64,
359 	 * the unexpected overhead of cache flushing is acceptable.
360 	 */
361 	pr_debug("Validating PUD advanced\n");
362 	/* Align the address wrt HPAGE_PUD_SIZE */
363 	vaddr &= HPAGE_PUD_MASK;
364 
365 	pud = pfn_pud(args->pud_pfn, args->page_prot);
366 	set_pud_at(args->mm, vaddr, args->pudp, pud);
367 	flush_dcache_page(page);
368 	pudp_set_wrprotect(args->mm, vaddr, args->pudp);
369 	pud = pudp_get(args->pudp);
370 	WARN_ON(pud_write(pud));
371 
372 #ifndef __PAGETABLE_PMD_FOLDED
373 	pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
374 	pud = pudp_get(args->pudp);
375 	WARN_ON(!pud_none(pud));
376 #endif /* __PAGETABLE_PMD_FOLDED */
377 	pud = pfn_pud(args->pud_pfn, args->page_prot);
378 	pud = pud_wrprotect(pud);
379 	pud = pud_mkclean(pud);
380 	set_pud_at(args->mm, vaddr, args->pudp, pud);
381 	flush_dcache_page(page);
382 	pud = pud_mkwrite(pud);
383 	pud = pud_mkdirty(pud);
384 	pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1);
385 	pud = pudp_get(args->pudp);
386 	WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
387 
388 #ifndef __PAGETABLE_PMD_FOLDED
389 	pudp_huge_get_and_clear_full(args->vma, vaddr, args->pudp, 1);
390 	pud = pudp_get(args->pudp);
391 	WARN_ON(!pud_none(pud));
392 #endif /* __PAGETABLE_PMD_FOLDED */
393 
394 	pud = pfn_pud(args->pud_pfn, args->page_prot);
395 	pud = pud_mkyoung(pud);
396 	set_pud_at(args->mm, vaddr, args->pudp, pud);
397 	flush_dcache_page(page);
398 	pudp_test_and_clear_young(args->vma, vaddr, args->pudp);
399 	pud = pudp_get(args->pudp);
400 	WARN_ON(pud_young(pud));
401 
402 	pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
403 }
404 
pud_leaf_tests(struct pgtable_debug_args * args)405 static void __init pud_leaf_tests(struct pgtable_debug_args *args)
406 {
407 	pud_t pud;
408 
409 	if (!has_transparent_pud_hugepage())
410 		return;
411 
412 	pr_debug("Validating PUD leaf\n");
413 	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
414 	/*
415 	 * PUD based THP is a leaf entry.
416 	 */
417 	pud = pud_mkhuge(pud);
418 	WARN_ON(!pud_leaf(pud));
419 }
420 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
pud_basic_tests(struct pgtable_debug_args * args,int idx)421 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
pud_advanced_tests(struct pgtable_debug_args * args)422 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
pud_leaf_tests(struct pgtable_debug_args * args)423 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
424 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
425 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
pmd_basic_tests(struct pgtable_debug_args * args,int idx)426 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { }
pud_basic_tests(struct pgtable_debug_args * args,int idx)427 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
pmd_advanced_tests(struct pgtable_debug_args * args)428 static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { }
pud_advanced_tests(struct pgtable_debug_args * args)429 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
pmd_leaf_tests(struct pgtable_debug_args * args)430 static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { }
pud_leaf_tests(struct pgtable_debug_args * args)431 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
432 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
433 
434 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
pmd_huge_tests(struct pgtable_debug_args * args)435 static void __init pmd_huge_tests(struct pgtable_debug_args *args)
436 {
437 	pmd_t pmd;
438 
439 	if (!arch_vmap_pmd_supported(args->page_prot) ||
440 	    args->fixed_alignment < PMD_SIZE)
441 		return;
442 
443 	pr_debug("Validating PMD huge\n");
444 	/*
445 	 * X86 defined pmd_set_huge() verifies that the given
446 	 * PMD is not a populated non-leaf entry.
447 	 */
448 	WRITE_ONCE(*args->pmdp, __pmd(0));
449 	WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot));
450 	WARN_ON(!pmd_clear_huge(args->pmdp));
451 	pmd = pmdp_get(args->pmdp);
452 	WARN_ON(!pmd_none(pmd));
453 }
454 
pud_huge_tests(struct pgtable_debug_args * args)455 static void __init pud_huge_tests(struct pgtable_debug_args *args)
456 {
457 	pud_t pud;
458 
459 	if (!arch_vmap_pud_supported(args->page_prot) ||
460 	    args->fixed_alignment < PUD_SIZE)
461 		return;
462 
463 	pr_debug("Validating PUD huge\n");
464 	/*
465 	 * X86 defined pud_set_huge() verifies that the given
466 	 * PUD is not a populated non-leaf entry.
467 	 */
468 	WRITE_ONCE(*args->pudp, __pud(0));
469 	WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot));
470 	WARN_ON(!pud_clear_huge(args->pudp));
471 	pud = pudp_get(args->pudp);
472 	WARN_ON(!pud_none(pud));
473 }
474 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
pmd_huge_tests(struct pgtable_debug_args * args)475 static void __init pmd_huge_tests(struct pgtable_debug_args *args) { }
pud_huge_tests(struct pgtable_debug_args * args)476 static void __init pud_huge_tests(struct pgtable_debug_args *args) { }
477 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
478 
p4d_basic_tests(struct pgtable_debug_args * args)479 static void __init p4d_basic_tests(struct pgtable_debug_args *args)
480 {
481 	p4d_t p4d;
482 
483 	pr_debug("Validating P4D basic\n");
484 	memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
485 	WARN_ON(!p4d_same(p4d, p4d));
486 }
487 
pgd_basic_tests(struct pgtable_debug_args * args)488 static void __init pgd_basic_tests(struct pgtable_debug_args *args)
489 {
490 	pgd_t pgd;
491 
492 	pr_debug("Validating PGD basic\n");
493 	memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
494 	WARN_ON(!pgd_same(pgd, pgd));
495 }
496 
497 #ifndef __PAGETABLE_PUD_FOLDED
pud_clear_tests(struct pgtable_debug_args * args)498 static void __init pud_clear_tests(struct pgtable_debug_args *args)
499 {
500 	pud_t pud = pudp_get(args->pudp);
501 
502 	if (mm_pmd_folded(args->mm))
503 		return;
504 
505 	pr_debug("Validating PUD clear\n");
506 	WARN_ON(pud_none(pud));
507 	pud_clear(args->pudp);
508 	pud = pudp_get(args->pudp);
509 	WARN_ON(!pud_none(pud));
510 }
511 
pud_populate_tests(struct pgtable_debug_args * args)512 static void __init pud_populate_tests(struct pgtable_debug_args *args)
513 {
514 	pud_t pud;
515 
516 	if (mm_pmd_folded(args->mm))
517 		return;
518 
519 	pr_debug("Validating PUD populate\n");
520 	/*
521 	 * This entry points to next level page table page.
522 	 * Hence this must not qualify as pud_bad().
523 	 */
524 	pud_populate(args->mm, args->pudp, args->start_pmdp);
525 	pud = pudp_get(args->pudp);
526 	WARN_ON(pud_bad(pud));
527 }
528 #else  /* !__PAGETABLE_PUD_FOLDED */
pud_clear_tests(struct pgtable_debug_args * args)529 static void __init pud_clear_tests(struct pgtable_debug_args *args) { }
pud_populate_tests(struct pgtable_debug_args * args)530 static void __init pud_populate_tests(struct pgtable_debug_args *args) { }
531 #endif /* PAGETABLE_PUD_FOLDED */
532 
533 #ifndef __PAGETABLE_P4D_FOLDED
p4d_clear_tests(struct pgtable_debug_args * args)534 static void __init p4d_clear_tests(struct pgtable_debug_args *args)
535 {
536 	p4d_t p4d = p4dp_get(args->p4dp);
537 
538 	if (mm_pud_folded(args->mm))
539 		return;
540 
541 	pr_debug("Validating P4D clear\n");
542 	WARN_ON(p4d_none(p4d));
543 	p4d_clear(args->p4dp);
544 	p4d = p4dp_get(args->p4dp);
545 	WARN_ON(!p4d_none(p4d));
546 }
547 
p4d_populate_tests(struct pgtable_debug_args * args)548 static void __init p4d_populate_tests(struct pgtable_debug_args *args)
549 {
550 	p4d_t p4d;
551 
552 	if (mm_pud_folded(args->mm))
553 		return;
554 
555 	pr_debug("Validating P4D populate\n");
556 	/*
557 	 * This entry points to next level page table page.
558 	 * Hence this must not qualify as p4d_bad().
559 	 */
560 	pud_clear(args->pudp);
561 	p4d_clear(args->p4dp);
562 	p4d_populate(args->mm, args->p4dp, args->start_pudp);
563 	p4d = p4dp_get(args->p4dp);
564 	WARN_ON(p4d_bad(p4d));
565 }
566 
pgd_clear_tests(struct pgtable_debug_args * args)567 static void __init pgd_clear_tests(struct pgtable_debug_args *args)
568 {
569 	pgd_t pgd = pgdp_get(args->pgdp);
570 
571 	if (mm_p4d_folded(args->mm))
572 		return;
573 
574 	pr_debug("Validating PGD clear\n");
575 	WARN_ON(pgd_none(pgd));
576 	pgd_clear(args->pgdp);
577 	pgd = pgdp_get(args->pgdp);
578 	WARN_ON(!pgd_none(pgd));
579 }
580 
pgd_populate_tests(struct pgtable_debug_args * args)581 static void __init pgd_populate_tests(struct pgtable_debug_args *args)
582 {
583 	pgd_t pgd;
584 
585 	if (mm_p4d_folded(args->mm))
586 		return;
587 
588 	pr_debug("Validating PGD populate\n");
589 	/*
590 	 * This entry points to next level page table page.
591 	 * Hence this must not qualify as pgd_bad().
592 	 */
593 	p4d_clear(args->p4dp);
594 	pgd_clear(args->pgdp);
595 	pgd_populate(args->mm, args->pgdp, args->start_p4dp);
596 	pgd = pgdp_get(args->pgdp);
597 	WARN_ON(pgd_bad(pgd));
598 }
599 #else  /* !__PAGETABLE_P4D_FOLDED */
p4d_clear_tests(struct pgtable_debug_args * args)600 static void __init p4d_clear_tests(struct pgtable_debug_args *args) { }
pgd_clear_tests(struct pgtable_debug_args * args)601 static void __init pgd_clear_tests(struct pgtable_debug_args *args) { }
p4d_populate_tests(struct pgtable_debug_args * args)602 static void __init p4d_populate_tests(struct pgtable_debug_args *args) { }
pgd_populate_tests(struct pgtable_debug_args * args)603 static void __init pgd_populate_tests(struct pgtable_debug_args *args) { }
604 #endif /* PAGETABLE_P4D_FOLDED */
605 
pte_clear_tests(struct pgtable_debug_args * args)606 static void __init pte_clear_tests(struct pgtable_debug_args *args)
607 {
608 	struct page *page;
609 	pte_t pte = pfn_pte(args->pte_pfn, args->page_prot);
610 
611 	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
612 	if (!page)
613 		return;
614 
615 	/*
616 	 * flush_dcache_page() is called after set_pte_at() to clear
617 	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
618 	 * when it's released and page allocation check will fail when
619 	 * the page is allocated again. For architectures other than ARM64,
620 	 * the unexpected overhead of cache flushing is acceptable.
621 	 */
622 	pr_debug("Validating PTE clear\n");
623 	if (WARN_ON(!args->ptep))
624 		return;
625 
626 	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
627 	WARN_ON(pte_none(pte));
628 	flush_dcache_page(page);
629 	barrier();
630 	ptep_clear(args->mm, args->vaddr, args->ptep);
631 	pte = ptep_get(args->ptep);
632 	WARN_ON(!pte_none(pte));
633 }
634 
pmd_clear_tests(struct pgtable_debug_args * args)635 static void __init pmd_clear_tests(struct pgtable_debug_args *args)
636 {
637 	pmd_t pmd = pmdp_get(args->pmdp);
638 
639 	pr_debug("Validating PMD clear\n");
640 	WARN_ON(pmd_none(pmd));
641 	pmd_clear(args->pmdp);
642 	pmd = pmdp_get(args->pmdp);
643 	WARN_ON(!pmd_none(pmd));
644 }
645 
pmd_populate_tests(struct pgtable_debug_args * args)646 static void __init pmd_populate_tests(struct pgtable_debug_args *args)
647 {
648 	pmd_t pmd;
649 
650 	pr_debug("Validating PMD populate\n");
651 	/*
652 	 * This entry points to next level page table page.
653 	 * Hence this must not qualify as pmd_bad().
654 	 */
655 	pmd_populate(args->mm, args->pmdp, args->start_ptep);
656 	pmd = pmdp_get(args->pmdp);
657 	WARN_ON(pmd_bad(pmd));
658 }
659 
pte_special_tests(struct pgtable_debug_args * args)660 static void __init pte_special_tests(struct pgtable_debug_args *args)
661 {
662 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
663 
664 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
665 		return;
666 
667 	pr_debug("Validating PTE special\n");
668 	WARN_ON(!pte_special(pte_mkspecial(pte)));
669 }
670 
pte_protnone_tests(struct pgtable_debug_args * args)671 static void __init pte_protnone_tests(struct pgtable_debug_args *args)
672 {
673 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none);
674 
675 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
676 		return;
677 
678 	pr_debug("Validating PTE protnone\n");
679 	WARN_ON(!pte_protnone(pte));
680 	WARN_ON(!pte_present(pte));
681 }
682 
683 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_protnone_tests(struct pgtable_debug_args * args)684 static void __init pmd_protnone_tests(struct pgtable_debug_args *args)
685 {
686 	pmd_t pmd;
687 
688 	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
689 		return;
690 
691 	if (!has_transparent_hugepage())
692 		return;
693 
694 	pr_debug("Validating PMD protnone\n");
695 	pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none));
696 	WARN_ON(!pmd_protnone(pmd));
697 	WARN_ON(!pmd_present(pmd));
698 }
699 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
pmd_protnone_tests(struct pgtable_debug_args * args)700 static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { }
701 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
702 
pte_soft_dirty_tests(struct pgtable_debug_args * args)703 static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args)
704 {
705 	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
706 
707 	if (!pgtable_supports_soft_dirty())
708 		return;
709 
710 	pr_debug("Validating PTE soft dirty\n");
711 	WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
712 	WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
713 }
714 
pte_swap_soft_dirty_tests(struct pgtable_debug_args * args)715 static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args)
716 {
717 	pte_t pte;
718 	softleaf_t entry;
719 
720 	if (!pgtable_supports_soft_dirty())
721 		return;
722 
723 	pr_debug("Validating PTE swap soft dirty\n");
724 	pte = swp_entry_to_pte(args->swp_entry);
725 	entry = softleaf_from_pte(pte);
726 
727 	WARN_ON(!softleaf_is_swap(entry));
728 	WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
729 	WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
730 }
731 
732 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_soft_dirty_tests(struct pgtable_debug_args * args)733 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args)
734 {
735 	pmd_t pmd;
736 
737 	if (!pgtable_supports_soft_dirty())
738 		return;
739 
740 	if (!has_transparent_hugepage())
741 		return;
742 
743 	pr_debug("Validating PMD soft dirty\n");
744 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
745 	WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
746 	WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
747 }
748 
pmd_leaf_soft_dirty_tests(struct pgtable_debug_args * args)749 static void __init pmd_leaf_soft_dirty_tests(struct pgtable_debug_args *args)
750 {
751 	pmd_t pmd;
752 
753 	if (!pgtable_supports_soft_dirty() ||
754 	    !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
755 		return;
756 
757 	if (!has_transparent_hugepage())
758 		return;
759 
760 	pr_debug("Validating PMD swap soft dirty\n");
761 	pmd = swp_entry_to_pmd(args->leaf_entry);
762 	WARN_ON(!pmd_is_huge(pmd));
763 	WARN_ON(!pmd_is_valid_softleaf(pmd));
764 
765 	WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
766 	WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
767 }
768 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
pmd_soft_dirty_tests(struct pgtable_debug_args * args)769 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { }
pmd_leaf_soft_dirty_tests(struct pgtable_debug_args * args)770 static void __init pmd_leaf_soft_dirty_tests(struct pgtable_debug_args *args) { }
771 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
772 
pte_swap_exclusive_tests(struct pgtable_debug_args * args)773 static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args)
774 {
775 	swp_entry_t entry;
776 	softleaf_t softleaf;
777 	pte_t pte;
778 
779 	pr_debug("Validating PTE swap exclusive\n");
780 	entry = args->swp_entry;
781 
782 	pte = swp_entry_to_pte(entry);
783 	softleaf = softleaf_from_pte(pte);
784 
785 	WARN_ON(pte_swp_exclusive(pte));
786 	WARN_ON(!softleaf_is_swap(softleaf));
787 	WARN_ON(memcmp(&entry, &softleaf, sizeof(entry)));
788 
789 	pte = pte_swp_mkexclusive(pte);
790 	softleaf = softleaf_from_pte(pte);
791 
792 	WARN_ON(!pte_swp_exclusive(pte));
793 	WARN_ON(!softleaf_is_swap(softleaf));
794 	WARN_ON(pte_swp_soft_dirty(pte));
795 	WARN_ON(memcmp(&entry, &softleaf, sizeof(entry)));
796 
797 	pte = pte_swp_clear_exclusive(pte);
798 	softleaf = softleaf_from_pte(pte);
799 
800 	WARN_ON(pte_swp_exclusive(pte));
801 	WARN_ON(!softleaf_is_swap(softleaf));
802 	WARN_ON(memcmp(&entry, &softleaf, sizeof(entry)));
803 }
804 
pte_swap_tests(struct pgtable_debug_args * args)805 static void __init pte_swap_tests(struct pgtable_debug_args *args)
806 {
807 	swp_entry_t arch_entry;
808 	softleaf_t entry;
809 	pte_t pte1, pte2;
810 
811 	pr_debug("Validating PTE swap\n");
812 	pte1 = swp_entry_to_pte(args->swp_entry);
813 	entry = softleaf_from_pte(pte1);
814 
815 	WARN_ON(!softleaf_is_swap(entry));
816 
817 	arch_entry = __pte_to_swp_entry(pte1);
818 	pte2 = __swp_entry_to_pte(arch_entry);
819 	WARN_ON(memcmp(&pte1, &pte2, sizeof(pte1)));
820 }
821 
822 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_softleaf_tests(struct pgtable_debug_args * args)823 static void __init pmd_softleaf_tests(struct pgtable_debug_args *args)
824 {
825 	swp_entry_t arch_entry;
826 	pmd_t pmd1, pmd2;
827 
828 	if (!has_transparent_hugepage())
829 		return;
830 
831 	pr_debug("Validating PMD swap\n");
832 	pmd1 = swp_entry_to_pmd(args->leaf_entry);
833 	WARN_ON(!pmd_is_huge(pmd1));
834 	WARN_ON(!pmd_is_valid_softleaf(pmd1));
835 
836 	arch_entry = __pmd_to_swp_entry(pmd1);
837 	pmd2 = __swp_entry_to_pmd(arch_entry);
838 	WARN_ON(memcmp(&pmd1, &pmd2, sizeof(pmd1)));
839 }
840 #else  /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
pmd_softleaf_tests(struct pgtable_debug_args * args)841 static void __init pmd_softleaf_tests(struct pgtable_debug_args *args) { }
842 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
843 
swap_migration_tests(struct pgtable_debug_args * args)844 static void __init swap_migration_tests(struct pgtable_debug_args *args)
845 {
846 	struct page *page;
847 	softleaf_t entry;
848 
849 	if (!IS_ENABLED(CONFIG_MIGRATION))
850 		return;
851 
852 	/*
853 	 * swap_migration_tests() requires a dedicated page as it needs to
854 	 * be locked before creating a migration entry from it. Locking the
855 	 * page that actually maps kernel text ('start_kernel') can be real
856 	 * problematic. Lets use the allocated page explicitly for this
857 	 * purpose.
858 	 */
859 	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
860 	if (!page)
861 		return;
862 
863 	pr_debug("Validating swap migration\n");
864 
865 	/*
866 	 * make_[readable|writable]_migration_entry() expects given page to
867 	 * be locked, otherwise it stumbles upon a BUG_ON().
868 	 */
869 	__SetPageLocked(page);
870 	entry = make_writable_migration_entry(page_to_pfn(page));
871 	WARN_ON(!softleaf_is_migration(entry));
872 	WARN_ON(!softleaf_is_migration_write(entry));
873 
874 	entry = make_readable_migration_entry(swp_offset(entry));
875 	WARN_ON(!softleaf_is_migration(entry));
876 	WARN_ON(softleaf_is_migration_write(entry));
877 
878 	entry = make_readable_migration_entry(page_to_pfn(page));
879 	WARN_ON(!softleaf_is_migration(entry));
880 	WARN_ON(softleaf_is_migration_write(entry));
881 	__ClearPageLocked(page);
882 }
883 
884 #ifdef CONFIG_HUGETLB_PAGE
hugetlb_basic_tests(struct pgtable_debug_args * args)885 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args)
886 {
887 	pte_t pte;
888 
889 	pr_debug("Validating HugeTLB basic\n");
890 	pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot);
891 	pte = arch_make_huge_pte(pte, PMD_SHIFT, VM_ACCESS_FLAGS);
892 
893 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
894 	WARN_ON(!pte_huge(pte));
895 #endif
896 	WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
897 	WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
898 	WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
899 }
900 #else  /* !CONFIG_HUGETLB_PAGE */
hugetlb_basic_tests(struct pgtable_debug_args * args)901 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { }
902 #endif /* CONFIG_HUGETLB_PAGE */
903 
904 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_thp_tests(struct pgtable_debug_args * args)905 static void __init pmd_thp_tests(struct pgtable_debug_args *args)
906 {
907 	pmd_t pmd;
908 
909 	if (!has_transparent_hugepage())
910 		return;
911 
912 	pr_debug("Validating PMD based THP\n");
913 	/*
914 	 * pmd_trans_huge() and pmd_present() must return positive after
915 	 * MMU invalidation with pmd_mkinvalid(). This behavior is an
916 	 * optimization for transparent huge page. pmd_trans_huge() must
917 	 * be true if pmd_page() returns a valid THP to avoid taking the
918 	 * pmd_lock when others walk over non transhuge pmds (i.e. there
919 	 * are no THP allocated). Especially when splitting a THP and
920 	 * removing the present bit from the pmd, pmd_trans_huge() still
921 	 * needs to return true. pmd_present() should be true whenever
922 	 * pmd_trans_huge() returns true.
923 	 */
924 	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
925 	WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
926 
927 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
928 	WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
929 	WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
930 	WARN_ON(!pmd_leaf(pmd_mkinvalid(pmd_mkhuge(pmd))));
931 #endif /* __HAVE_ARCH_PMDP_INVALIDATE */
932 }
933 
934 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_thp_tests(struct pgtable_debug_args * args)935 static void __init pud_thp_tests(struct pgtable_debug_args *args)
936 {
937 	pud_t pud;
938 
939 	if (!has_transparent_pud_hugepage())
940 		return;
941 
942 	pr_debug("Validating PUD based THP\n");
943 	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
944 	WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
945 
946 	/*
947 	 * pud_mkinvalid() has been dropped for now. Enable back
948 	 * these tests when it comes back with a modified pud_present().
949 	 *
950 	 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
951 	 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
952 	 */
953 }
954 #else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
pud_thp_tests(struct pgtable_debug_args * args)955 static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
956 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
957 #else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
pmd_thp_tests(struct pgtable_debug_args * args)958 static void __init pmd_thp_tests(struct pgtable_debug_args *args) { }
pud_thp_tests(struct pgtable_debug_args * args)959 static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
960 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
961 
get_random_vaddr(void)962 static unsigned long __init get_random_vaddr(void)
963 {
964 	unsigned long random_vaddr, random_pages, total_user_pages;
965 
966 	total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
967 
968 	random_pages = get_random_long() % total_user_pages;
969 	random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
970 
971 	return random_vaddr;
972 }
973 
destroy_args(struct pgtable_debug_args * args)974 static void __init destroy_args(struct pgtable_debug_args *args)
975 {
976 	struct page *page = NULL;
977 
978 	/* Free (huge) page */
979 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
980 	    has_transparent_pud_hugepage() &&
981 	    args->pud_pfn != ULONG_MAX) {
982 		if (args->is_contiguous_page) {
983 			free_contig_range(args->pud_pfn,
984 					  (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT)));
985 		} else {
986 			page = pfn_to_page(args->pud_pfn);
987 			__free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT);
988 		}
989 
990 		args->pud_pfn = ULONG_MAX;
991 		args->pmd_pfn = ULONG_MAX;
992 		args->pte_pfn = ULONG_MAX;
993 	}
994 
995 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
996 	    has_transparent_hugepage() &&
997 	    args->pmd_pfn != ULONG_MAX) {
998 		if (args->is_contiguous_page) {
999 			free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER));
1000 		} else {
1001 			page = pfn_to_page(args->pmd_pfn);
1002 			__free_pages(page, HPAGE_PMD_ORDER);
1003 		}
1004 
1005 		args->pmd_pfn = ULONG_MAX;
1006 		args->pte_pfn = ULONG_MAX;
1007 	}
1008 
1009 	if (args->pte_pfn != ULONG_MAX) {
1010 		page = pfn_to_page(args->pte_pfn);
1011 		__free_page(page);
1012 
1013 		args->pte_pfn = ULONG_MAX;
1014 	}
1015 
1016 	/* Free page table entries */
1017 	if (args->start_ptep) {
1018 		pmd_clear(args->pmdp);
1019 		pte_free(args->mm, args->start_ptep);
1020 		mm_dec_nr_ptes(args->mm);
1021 	}
1022 
1023 	if (args->start_pmdp) {
1024 		pud_clear(args->pudp);
1025 		pmd_free(args->mm, args->start_pmdp);
1026 		mm_dec_nr_pmds(args->mm);
1027 	}
1028 
1029 	if (args->start_pudp) {
1030 		p4d_clear(args->p4dp);
1031 		pud_free(args->mm, args->start_pudp);
1032 		mm_dec_nr_puds(args->mm);
1033 	}
1034 
1035 	if (args->start_p4dp) {
1036 		pgd_clear(args->pgdp);
1037 		p4d_free(args->mm, args->start_p4dp);
1038 	}
1039 
1040 	/* Free vma and mm struct */
1041 	if (args->vma)
1042 		vm_area_free(args->vma);
1043 
1044 	if (args->mm)
1045 		mmput(args->mm);
1046 }
1047 
1048 static struct page * __init
debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args * args,int order)1049 debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
1050 {
1051 	struct page *page = NULL;
1052 
1053 #ifdef CONFIG_CONTIG_ALLOC
1054 	if (order > MAX_PAGE_ORDER) {
1055 		page = alloc_contig_pages((1 << order), GFP_KERNEL,
1056 					  first_online_node, NULL);
1057 		if (page) {
1058 			args->is_contiguous_page = true;
1059 			return page;
1060 		}
1061 	}
1062 #endif
1063 
1064 	if (order <= MAX_PAGE_ORDER)
1065 		page = alloc_pages(GFP_KERNEL, order);
1066 
1067 	return page;
1068 }
1069 
1070 /*
1071  * Check if a physical memory range described by <pstart, pend> contains
1072  * an area that is of size psize, and aligned to psize.
1073  *
1074  * Don't use address 0, an all-zeroes physical address might mask bugs, and
1075  * it's not used on x86.
1076  */
phys_align_check(phys_addr_t pstart,phys_addr_t pend,unsigned long psize,phys_addr_t * physp,unsigned long * alignp)1077 static void  __init phys_align_check(phys_addr_t pstart,
1078 				     phys_addr_t pend, unsigned long psize,
1079 				     phys_addr_t *physp, unsigned long *alignp)
1080 {
1081 	phys_addr_t aligned_start, aligned_end;
1082 
1083 	if (pstart == 0)
1084 		pstart = PAGE_SIZE;
1085 
1086 	aligned_start = ALIGN(pstart, psize);
1087 	aligned_end = aligned_start + psize;
1088 
1089 	if (aligned_end > aligned_start && aligned_end <= pend) {
1090 		*alignp = psize;
1091 		*physp = aligned_start;
1092 	}
1093 }
1094 
init_fixed_pfns(struct pgtable_debug_args * args)1095 static void __init init_fixed_pfns(struct pgtable_debug_args *args)
1096 {
1097 	u64 idx;
1098 	phys_addr_t phys, pstart, pend;
1099 
1100 	/*
1101 	 * Initialize the fixed pfns. To do this, try to find a
1102 	 * valid physical range, preferably aligned to PUD_SIZE,
1103 	 * but settling for aligned to PMD_SIZE as a fallback. If
1104 	 * neither of those is found, use the physical address of
1105 	 * the start_kernel symbol.
1106 	 *
1107 	 * The memory doesn't need to be allocated, it just needs to exist
1108 	 * as usable memory. It won't be touched.
1109 	 *
1110 	 * The alignment is recorded, and can be checked to see if we
1111 	 * can run the tests that require an actual valid physical
1112 	 * address range on some architectures ({pmd,pud}_huge_test
1113 	 * on x86).
1114 	 */
1115 
1116 	phys = __pa_symbol(&start_kernel);
1117 	args->fixed_alignment = PAGE_SIZE;
1118 
1119 	for_each_mem_range(idx, &pstart, &pend) {
1120 		/* First check for a PUD-aligned area */
1121 		phys_align_check(pstart, pend, PUD_SIZE, &phys,
1122 				 &args->fixed_alignment);
1123 
1124 		/* If a PUD-aligned area is found, we're done */
1125 		if (args->fixed_alignment == PUD_SIZE)
1126 			break;
1127 
1128 		/*
1129 		 * If no PMD-aligned area found yet, check for one,
1130 		 * but continue the loop to look for a PUD-aligned area.
1131 		 */
1132 		if (args->fixed_alignment < PMD_SIZE)
1133 			phys_align_check(pstart, pend, PMD_SIZE, &phys,
1134 					 &args->fixed_alignment);
1135 	}
1136 
1137 	args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK);
1138 	args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK);
1139 	args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK);
1140 	args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK);
1141 	args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK);
1142 	WARN_ON(!pfn_valid(args->fixed_pte_pfn));
1143 }
1144 
1145 
init_args(struct pgtable_debug_args * args)1146 static int __init init_args(struct pgtable_debug_args *args)
1147 {
1148 	unsigned long max_swap_offset;
1149 	struct page *page = NULL;
1150 	int ret = 0;
1151 
1152 	/*
1153 	 * Initialize the debugging data.
1154 	 *
1155 	 * vm_get_page_prot(VM_NONE) or vm_get_page_prot(VM_SHARED|VM_NONE)
1156 	 * will help create page table entries with PROT_NONE permission as
1157 	 * required for pxx_protnone_tests().
1158 	 */
1159 	memset(args, 0, sizeof(*args));
1160 	args->vaddr              = get_random_vaddr();
1161 	args->page_prot          = vm_get_page_prot(VM_ACCESS_FLAGS);
1162 	args->page_prot_none     = vm_get_page_prot(VM_NONE);
1163 	args->is_contiguous_page = false;
1164 	args->pud_pfn            = ULONG_MAX;
1165 	args->pmd_pfn            = ULONG_MAX;
1166 	args->pte_pfn            = ULONG_MAX;
1167 	args->fixed_pgd_pfn      = ULONG_MAX;
1168 	args->fixed_p4d_pfn      = ULONG_MAX;
1169 	args->fixed_pud_pfn      = ULONG_MAX;
1170 	args->fixed_pmd_pfn      = ULONG_MAX;
1171 	args->fixed_pte_pfn      = ULONG_MAX;
1172 
1173 	/* Allocate mm and vma */
1174 	args->mm = mm_alloc();
1175 	if (!args->mm) {
1176 		pr_err("Failed to allocate mm struct\n");
1177 		ret = -ENOMEM;
1178 		goto error;
1179 	}
1180 
1181 	args->vma = vm_area_alloc(args->mm);
1182 	if (!args->vma) {
1183 		pr_err("Failed to allocate vma\n");
1184 		ret = -ENOMEM;
1185 		goto error;
1186 	}
1187 
1188 	/*
1189 	 * Allocate page table entries. They will be modified in the tests.
1190 	 * Lets save the page table entries so that they can be released
1191 	 * when the tests are completed.
1192 	 */
1193 	args->pgdp = pgd_offset(args->mm, args->vaddr);
1194 	args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr);
1195 	if (!args->p4dp) {
1196 		pr_err("Failed to allocate p4d entries\n");
1197 		ret = -ENOMEM;
1198 		goto error;
1199 	}
1200 	args->start_p4dp = p4d_offset(args->pgdp, 0UL);
1201 	WARN_ON(!args->start_p4dp);
1202 
1203 	args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr);
1204 	if (!args->pudp) {
1205 		pr_err("Failed to allocate pud entries\n");
1206 		ret = -ENOMEM;
1207 		goto error;
1208 	}
1209 	args->start_pudp = pud_offset(args->p4dp, 0UL);
1210 	WARN_ON(!args->start_pudp);
1211 
1212 	args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr);
1213 	if (!args->pmdp) {
1214 		pr_err("Failed to allocate pmd entries\n");
1215 		ret = -ENOMEM;
1216 		goto error;
1217 	}
1218 	args->start_pmdp = pmd_offset(args->pudp, 0UL);
1219 	WARN_ON(!args->start_pmdp);
1220 
1221 	if (pte_alloc(args->mm, args->pmdp)) {
1222 		pr_err("Failed to allocate pte entries\n");
1223 		ret = -ENOMEM;
1224 		goto error;
1225 	}
1226 	args->start_ptep = pmd_pgtable(pmdp_get(args->pmdp));
1227 	WARN_ON(!args->start_ptep);
1228 
1229 	init_fixed_pfns(args);
1230 
1231 	/* See generic_max_swapfile_size(): probe the maximum offset */
1232 	max_swap_offset = swp_offset(softleaf_from_pte(softleaf_to_pte(swp_entry(0, ~0UL))));
1233 	/* Create a swp entry with all possible bits set while still being swap. */
1234 	args->swp_entry = swp_entry(MAX_SWAPFILES - 1, max_swap_offset);
1235 	/* Create a non-present migration entry. */
1236 	args->leaf_entry = make_writable_migration_entry(~0UL);
1237 
1238 	/*
1239 	 * Allocate (huge) pages because some of the tests need to access
1240 	 * the data in the pages. The corresponding tests will be skipped
1241 	 * if we fail to allocate (huge) pages.
1242 	 */
1243 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1244 	    has_transparent_pud_hugepage()) {
1245 		page = debug_vm_pgtable_alloc_huge_page(args,
1246 				HPAGE_PUD_SHIFT - PAGE_SHIFT);
1247 		if (page) {
1248 			args->pud_pfn = page_to_pfn(page);
1249 			args->pmd_pfn = args->pud_pfn;
1250 			args->pte_pfn = args->pud_pfn;
1251 			return 0;
1252 		}
1253 	}
1254 
1255 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1256 	    has_transparent_hugepage()) {
1257 		page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER);
1258 		if (page) {
1259 			args->pmd_pfn = page_to_pfn(page);
1260 			args->pte_pfn = args->pmd_pfn;
1261 			return 0;
1262 		}
1263 	}
1264 
1265 	page = alloc_page(GFP_KERNEL);
1266 	if (page)
1267 		args->pte_pfn = page_to_pfn(page);
1268 
1269 	return 0;
1270 
1271 error:
1272 	destroy_args(args);
1273 	return ret;
1274 }
1275 
debug_vm_pgtable(void)1276 static int __init debug_vm_pgtable(void)
1277 {
1278 	struct pgtable_debug_args args;
1279 	spinlock_t *ptl = NULL;
1280 	int idx, ret;
1281 
1282 	pr_info("Validating architecture page table helpers\n");
1283 	ret = init_args(&args);
1284 	if (ret)
1285 		return ret;
1286 
1287 	/*
1288 	 * Iterate over each possible vm_flags to make sure that all
1289 	 * the basic page table transformation validations just hold
1290 	 * true irrespective of the starting protection value for a
1291 	 * given page table entry.
1292 	 *
1293 	 * Protection based vm_flags combinations are always linear
1294 	 * and increasing i.e starting from VM_NONE and going up to
1295 	 * (VM_SHARED | READ | WRITE | EXEC).
1296 	 */
1297 #define VM_FLAGS_START	(VM_NONE)
1298 #define VM_FLAGS_END	(VM_SHARED | VM_EXEC | VM_WRITE | VM_READ)
1299 
1300 	for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) {
1301 		pte_basic_tests(&args, idx);
1302 		pmd_basic_tests(&args, idx);
1303 		pud_basic_tests(&args, idx);
1304 	}
1305 
1306 	/*
1307 	 * Both P4D and PGD level tests are very basic which do not
1308 	 * involve creating page table entries from the protection
1309 	 * value and the given pfn. Hence just keep them out from
1310 	 * the above iteration for now to save some test execution
1311 	 * time.
1312 	 */
1313 	p4d_basic_tests(&args);
1314 	pgd_basic_tests(&args);
1315 
1316 	pmd_leaf_tests(&args);
1317 	pud_leaf_tests(&args);
1318 
1319 	pte_special_tests(&args);
1320 	pte_protnone_tests(&args);
1321 	pmd_protnone_tests(&args);
1322 
1323 	pte_soft_dirty_tests(&args);
1324 	pmd_soft_dirty_tests(&args);
1325 	pte_swap_soft_dirty_tests(&args);
1326 	pmd_leaf_soft_dirty_tests(&args);
1327 
1328 	pte_swap_exclusive_tests(&args);
1329 
1330 	pte_swap_tests(&args);
1331 	pmd_softleaf_tests(&args);
1332 
1333 	swap_migration_tests(&args);
1334 
1335 	pmd_thp_tests(&args);
1336 	pud_thp_tests(&args);
1337 
1338 	hugetlb_basic_tests(&args);
1339 
1340 	/*
1341 	 * Page table modifying tests. They need to hold
1342 	 * proper page table lock.
1343 	 */
1344 
1345 	args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl);
1346 	pte_clear_tests(&args);
1347 	pte_advanced_tests(&args);
1348 	if (args.ptep)
1349 		pte_unmap_unlock(args.ptep, ptl);
1350 
1351 	ptl = pmd_lock(args.mm, args.pmdp);
1352 	pmd_clear_tests(&args);
1353 	pmd_advanced_tests(&args);
1354 	pmd_huge_tests(&args);
1355 	pmd_populate_tests(&args);
1356 	spin_unlock(ptl);
1357 
1358 	ptl = pud_lock(args.mm, args.pudp);
1359 	pud_clear_tests(&args);
1360 	pud_advanced_tests(&args);
1361 	pud_huge_tests(&args);
1362 	pud_populate_tests(&args);
1363 	spin_unlock(ptl);
1364 
1365 	spin_lock(&(args.mm->page_table_lock));
1366 	p4d_clear_tests(&args);
1367 	pgd_clear_tests(&args);
1368 	p4d_populate_tests(&args);
1369 	pgd_populate_tests(&args);
1370 	spin_unlock(&(args.mm->page_table_lock));
1371 
1372 	destroy_args(&args);
1373 	return 0;
1374 }
1375 late_initcall(debug_vm_pgtable);
1376