xref: /linux/arch/powerpc/mm/kasan/init_book3s_64.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KASAN for 64-bit Book3S powerpc
4  *
5  * Copyright 2019-2022, Daniel Axtens, IBM Corporation.
6  */
7 
8 /*
9  * ppc64 turns on virtual memory late in boot, after calling into generic code
10  * like the device-tree parser, so it uses this in conjunction with a hook in
11  * outline mode to avoid invalid access early in boot.
12  */
13 
14 #define DISABLE_BRANCH_PROFILING
15 
16 #include <linux/kasan.h>
17 #include <linux/printk.h>
18 #include <linux/sched/task.h>
19 #include <linux/memblock.h>
20 #include <asm/pgalloc.h>
21 
22 static void __init kasan_init_phys_region(void *start, void *end)
23 {
24 	unsigned long k_start, k_end, k_cur;
25 	void *va;
26 
27 	if (start >= end)
28 		return;
29 
30 	k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE);
31 	k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE);
32 
33 	va = memblock_alloc_or_panic(k_end - k_start, PAGE_SIZE);
34 	for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE)
35 		map_kernel_page(k_cur, __pa(va), PAGE_KERNEL);
36 }
37 
38 void __init kasan_init(void)
39 {
40 	/*
41 	 * We want to do the following things:
42 	 *  1) Map real memory into the shadow for all physical memblocks
43 	 *     This takes us from c000... to c008...
44 	 *  2) Leave a hole over the shadow of vmalloc space. KASAN_VMALLOC
45 	 *     will manage this for us.
46 	 *     This takes us from c008... to c00a...
47 	 *  3) Map the 'early shadow'/zero page over iomap and vmemmap space.
48 	 *     This takes us up to where we start at c00e...
49 	 */
50 
51 	void *k_start = kasan_mem_to_shadow((void *)RADIX_VMALLOC_END);
52 	void *k_end = kasan_mem_to_shadow((void *)RADIX_VMEMMAP_END);
53 	phys_addr_t start, end;
54 	u64 i;
55 	pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL);
56 
57 	if (!early_radix_enabled()) {
58 		pr_warn("KASAN not enabled as it requires radix!");
59 		return;
60 	}
61 
62 	for_each_mem_range(i, &start, &end)
63 		kasan_init_phys_region(phys_to_virt(start), phys_to_virt(end));
64 
65 	for (i = 0; i < PTRS_PER_PTE; i++)
66 		__set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
67 			     &kasan_early_shadow_pte[i], zero_pte, 0);
68 
69 	for (i = 0; i < PTRS_PER_PMD; i++)
70 		pmd_populate_kernel(&init_mm, &kasan_early_shadow_pmd[i],
71 				    kasan_early_shadow_pte);
72 
73 	for (i = 0; i < PTRS_PER_PUD; i++)
74 		pud_populate(&init_mm, &kasan_early_shadow_pud[i],
75 			     kasan_early_shadow_pmd);
76 
77 	/* map the early shadow over the iomap and vmemmap space */
78 	kasan_populate_early_shadow(k_start, k_end);
79 
80 	/* mark early shadow region as RO and wipe it */
81 	zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL_RO);
82 	for (i = 0; i < PTRS_PER_PTE; i++)
83 		__set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
84 			     &kasan_early_shadow_pte[i], zero_pte, 0);
85 
86 	/*
87 	 * clear_page relies on some cache info that hasn't been set up yet.
88 	 * It ends up looping ~forever and blows up other data.
89 	 * Use memset instead.
90 	 */
91 	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
92 
93 	/* Enable error messages */
94 	init_task.kasan_depth = 0;
95 	kasan_init_generic();
96 }
97 
98 void __init kasan_early_init(void) { }
99 
100 void __init kasan_late_init(void) { }
101