xref: /linux/arch/arm64/mm/kasan_init.c (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 /*
2  * This file contains kasan initialization code for ARM64.
3  *
4  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  */
12 
13 #define pr_fmt(fmt) "kasan: " fmt
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/sched/task.h>
17 #include <linux/memblock.h>
18 #include <linux/start_kernel.h>
19 #include <linux/mm.h>
20 
21 #include <asm/mmu_context.h>
22 #include <asm/kernel-pgtable.h>
23 #include <asm/page.h>
24 #include <asm/pgalloc.h>
25 #include <asm/pgtable.h>
26 #include <asm/sections.h>
27 #include <asm/tlbflush.h>
28 
29 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
30 
31 /*
32  * The p*d_populate functions call virt_to_phys implicitly so they can't be used
33  * directly on kernel symbols (bm_p*d). All the early functions are called too
34  * early to use lm_alias so __p*d_populate functions must be used to populate
35  * with the physical address from __pa_symbol.
36  */
37 
38 static phys_addr_t __init kasan_alloc_zeroed_page(int node)
39 {
40 	void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
41 					      __pa(MAX_DMA_ADDRESS),
42 					      MEMBLOCK_ALLOC_KASAN, node);
43 	return __pa(p);
44 }
45 
46 static phys_addr_t __init kasan_alloc_raw_page(int node)
47 {
48 	void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
49 						__pa(MAX_DMA_ADDRESS),
50 						MEMBLOCK_ALLOC_KASAN, node);
51 	return __pa(p);
52 }
53 
54 static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
55 				      bool early)
56 {
57 	if (pmd_none(READ_ONCE(*pmdp))) {
58 		phys_addr_t pte_phys = early ?
59 				__pa_symbol(kasan_early_shadow_pte)
60 					: kasan_alloc_zeroed_page(node);
61 		__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
62 	}
63 
64 	return early ? pte_offset_kimg(pmdp, addr)
65 		     : pte_offset_kernel(pmdp, addr);
66 }
67 
68 static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
69 				      bool early)
70 {
71 	if (pud_none(READ_ONCE(*pudp))) {
72 		phys_addr_t pmd_phys = early ?
73 				__pa_symbol(kasan_early_shadow_pmd)
74 					: kasan_alloc_zeroed_page(node);
75 		__pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
76 	}
77 
78 	return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
79 }
80 
81 static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node,
82 				      bool early)
83 {
84 	if (pgd_none(READ_ONCE(*pgdp))) {
85 		phys_addr_t pud_phys = early ?
86 				__pa_symbol(kasan_early_shadow_pud)
87 					: kasan_alloc_zeroed_page(node);
88 		__pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE);
89 	}
90 
91 	return early ? pud_offset_kimg(pgdp, addr) : pud_offset(pgdp, addr);
92 }
93 
94 static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
95 				      unsigned long end, int node, bool early)
96 {
97 	unsigned long next;
98 	pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
99 
100 	do {
101 		phys_addr_t page_phys = early ?
102 				__pa_symbol(kasan_early_shadow_page)
103 					: kasan_alloc_raw_page(node);
104 		if (!early)
105 			memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
106 		next = addr + PAGE_SIZE;
107 		set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
108 	} while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
109 }
110 
111 static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
112 				      unsigned long end, int node, bool early)
113 {
114 	unsigned long next;
115 	pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
116 
117 	do {
118 		next = pmd_addr_end(addr, end);
119 		kasan_pte_populate(pmdp, addr, next, node, early);
120 	} while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
121 }
122 
123 static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr,
124 				      unsigned long end, int node, bool early)
125 {
126 	unsigned long next;
127 	pud_t *pudp = kasan_pud_offset(pgdp, addr, node, early);
128 
129 	do {
130 		next = pud_addr_end(addr, end);
131 		kasan_pmd_populate(pudp, addr, next, node, early);
132 	} while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
133 }
134 
135 static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
136 				      int node, bool early)
137 {
138 	unsigned long next;
139 	pgd_t *pgdp;
140 
141 	pgdp = pgd_offset_k(addr);
142 	do {
143 		next = pgd_addr_end(addr, end);
144 		kasan_pud_populate(pgdp, addr, next, node, early);
145 	} while (pgdp++, addr = next, addr != end);
146 }
147 
148 /* The early shadow maps everything to a single page of zeroes */
149 asmlinkage void __init kasan_early_init(void)
150 {
151 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
152 		KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
153 	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
154 	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
155 	kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
156 			   true);
157 }
158 
159 /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
160 static void __init kasan_map_populate(unsigned long start, unsigned long end,
161 				      int node)
162 {
163 	kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
164 }
165 
166 /*
167  * Copy the current shadow region into a new pgdir.
168  */
169 void __init kasan_copy_shadow(pgd_t *pgdir)
170 {
171 	pgd_t *pgdp, *pgdp_new, *pgdp_end;
172 
173 	pgdp = pgd_offset_k(KASAN_SHADOW_START);
174 	pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
175 	pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
176 	do {
177 		set_pgd(pgdp_new, READ_ONCE(*pgdp));
178 	} while (pgdp++, pgdp_new++, pgdp != pgdp_end);
179 }
180 
181 static void __init clear_pgds(unsigned long start,
182 			unsigned long end)
183 {
184 	/*
185 	 * Remove references to kasan page tables from
186 	 * swapper_pg_dir. pgd_clear() can't be used
187 	 * here because it's nop on 2,3-level pagetable setups
188 	 */
189 	for (; start < end; start += PGDIR_SIZE)
190 		set_pgd(pgd_offset_k(start), __pgd(0));
191 }
192 
193 void __init kasan_init(void)
194 {
195 	u64 kimg_shadow_start, kimg_shadow_end;
196 	u64 mod_shadow_start, mod_shadow_end;
197 	struct memblock_region *reg;
198 	int i;
199 
200 	kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
201 	kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));
202 
203 	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
204 	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
205 
206 	/*
207 	 * We are going to perform proper setup of shadow memory.
208 	 * At first we should unmap early shadow (clear_pgds() call below).
209 	 * However, instrumented code couldn't execute without shadow memory.
210 	 * tmp_pg_dir used to keep early shadow mapped until full shadow
211 	 * setup will be finished.
212 	 */
213 	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
214 	dsb(ishst);
215 	cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
216 
217 	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
218 
219 	kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
220 			   early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
221 
222 	kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
223 				    (void *)mod_shadow_start);
224 	kasan_populate_early_shadow((void *)kimg_shadow_end,
225 				    kasan_mem_to_shadow((void *)PAGE_OFFSET));
226 
227 	if (kimg_shadow_start > mod_shadow_end)
228 		kasan_populate_early_shadow((void *)mod_shadow_end,
229 					    (void *)kimg_shadow_start);
230 
231 	for_each_memblock(memory, reg) {
232 		void *start = (void *)__phys_to_virt(reg->base);
233 		void *end = (void *)__phys_to_virt(reg->base + reg->size);
234 
235 		if (start >= end)
236 			break;
237 
238 		kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
239 				   (unsigned long)kasan_mem_to_shadow(end),
240 				   early_pfn_to_nid(virt_to_pfn(start)));
241 	}
242 
243 	/*
244 	 * KAsan may reuse the contents of kasan_early_shadow_pte directly,
245 	 * so we should make sure that it maps the zero page read-only.
246 	 */
247 	for (i = 0; i < PTRS_PER_PTE; i++)
248 		set_pte(&kasan_early_shadow_pte[i],
249 			pfn_pte(sym_to_pfn(kasan_early_shadow_page),
250 				PAGE_KERNEL_RO));
251 
252 	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
253 	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
254 
255 	kasan_init_tags();
256 
257 	/* At this point kasan is fully initialized. Enable error messages */
258 	init_task.kasan_depth = 0;
259 	pr_info("KernelAddressSanitizer initialized\n");
260 }
261