xref: /linux/arch/powerpc/mm/kasan/init_32.c (revision 4b660dbd9ee2059850fd30e0df420ca7a38a1856)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #define DISABLE_BRANCH_PROFILING
4 
5 #include <linux/kasan.h>
6 #include <linux/printk.h>
7 #include <linux/memblock.h>
8 #include <linux/sched/task.h>
9 #include <asm/pgalloc.h>
10 #include <asm/code-patching.h>
11 #include <mm/mmu_decl.h>
12 
13 static pgprot_t __init kasan_prot_ro(void)
14 {
15 	if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
16 		return PAGE_READONLY;
17 
18 	return PAGE_KERNEL_RO;
19 }
20 
21 static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
22 {
23 	unsigned long va = (unsigned long)kasan_early_shadow_page;
24 	phys_addr_t pa = __pa(kasan_early_shadow_page);
25 	int i;
26 
27 	for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
28 		__set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 1);
29 }
30 
31 int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
32 {
33 	pmd_t *pmd;
34 	unsigned long k_cur, k_next;
35 
36 	pmd = pmd_off_k(k_start);
37 
38 	for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
39 		pte_t *new;
40 
41 		k_next = pgd_addr_end(k_cur, k_end);
42 		if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
43 			continue;
44 
45 		new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
46 
47 		if (!new)
48 			return -ENOMEM;
49 		kasan_populate_pte(new, PAGE_KERNEL);
50 		pmd_populate_kernel(&init_mm, pmd, new);
51 	}
52 	return 0;
53 }
54 
55 int __init __weak kasan_init_region(void *start, size_t size)
56 {
57 	unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
58 	unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
59 	unsigned long k_cur;
60 	int ret;
61 	void *block;
62 
63 	ret = kasan_init_shadow_page_tables(k_start, k_end);
64 	if (ret)
65 		return ret;
66 
67 	k_start = k_start & PAGE_MASK;
68 	block = memblock_alloc(k_end - k_start, PAGE_SIZE);
69 	if (!block)
70 		return -ENOMEM;
71 
72 	for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
73 		pmd_t *pmd = pmd_off_k(k_cur);
74 		void *va = block + k_cur - k_start;
75 		pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
76 
77 		__set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
78 	}
79 	flush_tlb_kernel_range(k_start, k_end);
80 	return 0;
81 }
82 
83 void __init
84 kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte)
85 {
86 	unsigned long k_cur;
87 
88 	for (k_cur = k_start; k_cur != k_end; k_cur += PAGE_SIZE) {
89 		pmd_t *pmd = pmd_off_k(k_cur);
90 		pte_t *ptep = pte_offset_kernel(pmd, k_cur);
91 
92 		if (pte_page(*ptep) != virt_to_page(lm_alias(kasan_early_shadow_page)))
93 			continue;
94 
95 		__set_pte_at(&init_mm, k_cur, ptep, pte, 0);
96 	}
97 
98 	flush_tlb_kernel_range(k_start, k_end);
99 }
100 
101 static void __init kasan_remap_early_shadow_ro(void)
102 {
103 	pgprot_t prot = kasan_prot_ro();
104 	phys_addr_t pa = __pa(kasan_early_shadow_page);
105 
106 	kasan_populate_pte(kasan_early_shadow_pte, prot);
107 
108 	kasan_update_early_region(KASAN_SHADOW_START, KASAN_SHADOW_END,
109 				  pfn_pte(PHYS_PFN(pa), prot));
110 }
111 
112 static void __init kasan_unmap_early_shadow_vmalloc(void)
113 {
114 	unsigned long k_start = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START);
115 	unsigned long k_end = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_END);
116 
117 	kasan_update_early_region(k_start, k_end, __pte(0));
118 
119 #ifdef MODULES_VADDR
120 	k_start = (unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR);
121 	k_end = (unsigned long)kasan_mem_to_shadow((void *)MODULES_END);
122 	kasan_update_early_region(k_start, k_end, __pte(0));
123 #endif
124 }
125 
126 void __init kasan_mmu_init(void)
127 {
128 	int ret;
129 
130 	if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
131 		ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
132 
133 		if (ret)
134 			panic("kasan: kasan_init_shadow_page_tables() failed");
135 	}
136 }
137 
138 void __init kasan_init(void)
139 {
140 	phys_addr_t base, end;
141 	u64 i;
142 	int ret;
143 
144 	for_each_mem_range(i, &base, &end) {
145 		phys_addr_t top = min(end, total_lowmem);
146 
147 		if (base >= top)
148 			continue;
149 
150 		ret = kasan_init_region(__va(base), top - base);
151 		if (ret)
152 			panic("kasan: kasan_init_region() failed");
153 	}
154 
155 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
156 		ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
157 
158 		if (ret)
159 			panic("kasan: kasan_init_shadow_page_tables() failed");
160 	}
161 
162 	kasan_remap_early_shadow_ro();
163 
164 	clear_page(kasan_early_shadow_page);
165 
166 	/* At this point kasan is fully initialized. Enable error messages */
167 	init_task.kasan_depth = 0;
168 	pr_info("KASAN init done\n");
169 }
170 
171 void __init kasan_late_init(void)
172 {
173 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
174 		kasan_unmap_early_shadow_vmalloc();
175 }
176 
177 void __init kasan_early_init(void)
178 {
179 	unsigned long addr = KASAN_SHADOW_START;
180 	unsigned long end = KASAN_SHADOW_END;
181 	unsigned long next;
182 	pmd_t *pmd = pmd_off_k(addr);
183 
184 	BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK);
185 
186 	kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL);
187 
188 	do {
189 		next = pgd_addr_end(addr, end);
190 		pmd_populate_kernel(&init_mm, pmd, kasan_early_shadow_pte);
191 	} while (pmd++, addr = next, addr != end);
192 }
193