xref: /linux/arch/riscv/mm/kasan_init.c (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Andes Technology Corporation
3 
4 #include <linux/pfn.h>
5 #include <linux/init_task.h>
6 #include <linux/kasan.h>
7 #include <linux/kernel.h>
8 #include <linux/memblock.h>
9 #include <linux/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <asm/fixmap.h>
12 #include <asm/pgalloc.h>
13 
14 /*
15  * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
16  * which is right before the kernel.
17  *
18  * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
19  * the page global directory with kasan_early_shadow_pmd.
20  *
21  * For sv48 and sv57, the region start is aligned on PGDIR_SIZE whereas the end
22  * region is not and then we have to go down to the PUD level.
23  */
24 
25 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
26 static p4d_t tmp_p4d[PTRS_PER_P4D] __page_aligned_bss;
27 static pud_t tmp_pud[PTRS_PER_PUD] __page_aligned_bss;
28 
29 static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
30 {
31 	phys_addr_t phys_addr;
32 	pte_t *ptep, *p;
33 
34 	if (pmd_none(*pmd)) {
35 		p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
36 		set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE));
37 	}
38 
39 	ptep = pte_offset_kernel(pmd, vaddr);
40 
41 	do {
42 		if (pte_none(*ptep)) {
43 			phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
44 			set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
45 			memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE);
46 		}
47 	} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
48 }
49 
50 static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
51 {
52 	phys_addr_t phys_addr;
53 	pmd_t *pmdp, *p;
54 	unsigned long next;
55 
56 	if (pud_none(*pud)) {
57 		p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
58 		set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
59 	}
60 
61 	pmdp = pmd_offset(pud, vaddr);
62 
63 	do {
64 		next = pmd_addr_end(vaddr, end);
65 
66 		if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
67 			phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
68 			if (phys_addr) {
69 				set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
70 				memset(__va(phys_addr), KASAN_SHADOW_INIT, PMD_SIZE);
71 				continue;
72 			}
73 		}
74 
75 		kasan_populate_pte(pmdp, vaddr, next);
76 	} while (pmdp++, vaddr = next, vaddr != end);
77 }
78 
79 static void __init kasan_populate_pud(p4d_t *p4d,
80 				      unsigned long vaddr, unsigned long end)
81 {
82 	phys_addr_t phys_addr;
83 	pud_t *pudp, *p;
84 	unsigned long next;
85 
86 	if (p4d_none(*p4d)) {
87 		p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
88 		set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
89 	}
90 
91 	pudp = pud_offset(p4d, vaddr);
92 
93 	do {
94 		next = pud_addr_end(vaddr, end);
95 
96 		if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
97 			phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
98 			if (phys_addr) {
99 				set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
100 				memset(__va(phys_addr), KASAN_SHADOW_INIT, PUD_SIZE);
101 				continue;
102 			}
103 		}
104 
105 		kasan_populate_pmd(pudp, vaddr, next);
106 	} while (pudp++, vaddr = next, vaddr != end);
107 }
108 
109 static void __init kasan_populate_p4d(pgd_t *pgd,
110 				      unsigned long vaddr, unsigned long end)
111 {
112 	phys_addr_t phys_addr;
113 	p4d_t *p4dp, *p;
114 	unsigned long next;
115 
116 	if (pgd_none(*pgd)) {
117 		p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
118 		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
119 	}
120 
121 	p4dp = p4d_offset(pgd, vaddr);
122 
123 	do {
124 		next = p4d_addr_end(vaddr, end);
125 
126 		if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
127 			phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
128 			if (phys_addr) {
129 				set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
130 				memset(__va(phys_addr), KASAN_SHADOW_INIT, P4D_SIZE);
131 				continue;
132 			}
133 		}
134 
135 		kasan_populate_pud(p4dp, vaddr, next);
136 	} while (p4dp++, vaddr = next, vaddr != end);
137 }
138 
139 static void __init kasan_populate_pgd(pgd_t *pgdp,
140 				      unsigned long vaddr, unsigned long end)
141 {
142 	phys_addr_t phys_addr;
143 	unsigned long next;
144 
145 	do {
146 		next = pgd_addr_end(vaddr, end);
147 
148 		if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
149 		    (next - vaddr) >= PGDIR_SIZE) {
150 			phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
151 			if (phys_addr) {
152 				set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
153 				memset(__va(phys_addr), KASAN_SHADOW_INIT, PGDIR_SIZE);
154 				continue;
155 			}
156 		}
157 
158 		kasan_populate_p4d(pgdp, vaddr, next);
159 	} while (pgdp++, vaddr = next, vaddr != end);
160 }
161 
162 static void __init kasan_early_clear_pud(p4d_t *p4dp,
163 					 unsigned long vaddr, unsigned long end)
164 {
165 	pud_t *pudp, *base_pud;
166 	unsigned long next;
167 
168 	if (!pgtable_l4_enabled) {
169 		pudp = (pud_t *)p4dp;
170 	} else {
171 		base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
172 		pudp = base_pud + pud_index(vaddr);
173 	}
174 
175 	do {
176 		next = pud_addr_end(vaddr, end);
177 
178 		if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
179 			pud_clear(pudp);
180 			continue;
181 		}
182 
183 		BUG();
184 	} while (pudp++, vaddr = next, vaddr != end);
185 }
186 
187 static void __init kasan_early_clear_p4d(pgd_t *pgdp,
188 					 unsigned long vaddr, unsigned long end)
189 {
190 	p4d_t *p4dp, *base_p4d;
191 	unsigned long next;
192 
193 	if (!pgtable_l5_enabled) {
194 		p4dp = (p4d_t *)pgdp;
195 	} else {
196 		base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
197 		p4dp = base_p4d + p4d_index(vaddr);
198 	}
199 
200 	do {
201 		next = p4d_addr_end(vaddr, end);
202 
203 		if (pgtable_l4_enabled && IS_ALIGNED(vaddr, P4D_SIZE) &&
204 		    (next - vaddr) >= P4D_SIZE) {
205 			p4d_clear(p4dp);
206 			continue;
207 		}
208 
209 		kasan_early_clear_pud(p4dp, vaddr, next);
210 	} while (p4dp++, vaddr = next, vaddr != end);
211 }
212 
213 static void __init kasan_early_clear_pgd(pgd_t *pgdp,
214 					 unsigned long vaddr, unsigned long end)
215 {
216 	unsigned long next;
217 
218 	do {
219 		next = pgd_addr_end(vaddr, end);
220 
221 		if (pgtable_l5_enabled && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
222 		    (next - vaddr) >= PGDIR_SIZE) {
223 			pgd_clear(pgdp);
224 			continue;
225 		}
226 
227 		kasan_early_clear_p4d(pgdp, vaddr, next);
228 	} while (pgdp++, vaddr = next, vaddr != end);
229 }
230 
231 static void __init kasan_early_populate_pud(p4d_t *p4dp,
232 					    unsigned long vaddr,
233 					    unsigned long end)
234 {
235 	pud_t *pudp, *base_pud;
236 	phys_addr_t phys_addr;
237 	unsigned long next;
238 
239 	if (!pgtable_l4_enabled) {
240 		pudp = (pud_t *)p4dp;
241 	} else {
242 		base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
243 		pudp = base_pud + pud_index(vaddr);
244 	}
245 
246 	do {
247 		next = pud_addr_end(vaddr, end);
248 
249 		if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) &&
250 		    (next - vaddr) >= PUD_SIZE) {
251 			phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd);
252 			set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
253 			continue;
254 		}
255 
256 		BUG();
257 	} while (pudp++, vaddr = next, vaddr != end);
258 }
259 
260 static void __init kasan_early_populate_p4d(pgd_t *pgdp,
261 					    unsigned long vaddr,
262 					    unsigned long end)
263 {
264 	p4d_t *p4dp, *base_p4d;
265 	phys_addr_t phys_addr;
266 	unsigned long next;
267 
268 	/*
269 	 * We can't use pgd_page_vaddr here as it would return a linear
270 	 * mapping address but it is not mapped yet, but when populating
271 	 * early_pg_dir, we need the physical address and when populating
272 	 * swapper_pg_dir, we need the kernel virtual address so use
273 	 * pt_ops facility.
274 	 * Note that this test is then completely equivalent to
275 	 * p4dp = p4d_offset(pgdp, vaddr)
276 	 */
277 	if (!pgtable_l5_enabled) {
278 		p4dp = (p4d_t *)pgdp;
279 	} else {
280 		base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
281 		p4dp = base_p4d + p4d_index(vaddr);
282 	}
283 
284 	do {
285 		next = p4d_addr_end(vaddr, end);
286 
287 		if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) &&
288 		    (next - vaddr) >= P4D_SIZE) {
289 			phys_addr = __pa((uintptr_t)kasan_early_shadow_pud);
290 			set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
291 			continue;
292 		}
293 
294 		kasan_early_populate_pud(p4dp, vaddr, next);
295 	} while (p4dp++, vaddr = next, vaddr != end);
296 }
297 
298 static void __init kasan_early_populate_pgd(pgd_t *pgdp,
299 					    unsigned long vaddr,
300 					    unsigned long end)
301 {
302 	phys_addr_t phys_addr;
303 	unsigned long next;
304 
305 	do {
306 		next = pgd_addr_end(vaddr, end);
307 
308 		if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
309 		    (next - vaddr) >= PGDIR_SIZE) {
310 			phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d);
311 			set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
312 			continue;
313 		}
314 
315 		kasan_early_populate_p4d(pgdp, vaddr, next);
316 	} while (pgdp++, vaddr = next, vaddr != end);
317 }
318 
319 asmlinkage void __init kasan_early_init(void)
320 {
321 	uintptr_t i;
322 
323 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
324 		KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
325 
326 	for (i = 0; i < PTRS_PER_PTE; ++i)
327 		set_pte(kasan_early_shadow_pte + i,
328 			pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
329 
330 	for (i = 0; i < PTRS_PER_PMD; ++i)
331 		set_pmd(kasan_early_shadow_pmd + i,
332 			pfn_pmd(PFN_DOWN
333 				(__pa((uintptr_t)kasan_early_shadow_pte)),
334 				PAGE_TABLE));
335 
336 	if (pgtable_l4_enabled) {
337 		for (i = 0; i < PTRS_PER_PUD; ++i)
338 			set_pud(kasan_early_shadow_pud + i,
339 				pfn_pud(PFN_DOWN
340 					(__pa(((uintptr_t)kasan_early_shadow_pmd))),
341 					PAGE_TABLE));
342 	}
343 
344 	if (pgtable_l5_enabled) {
345 		for (i = 0; i < PTRS_PER_P4D; ++i)
346 			set_p4d(kasan_early_shadow_p4d + i,
347 				pfn_p4d(PFN_DOWN
348 					(__pa(((uintptr_t)kasan_early_shadow_pud))),
349 					PAGE_TABLE));
350 	}
351 
352 	kasan_early_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
353 				 KASAN_SHADOW_START, KASAN_SHADOW_END);
354 
355 	local_flush_tlb_all();
356 }
357 
358 void __init kasan_swapper_init(void)
359 {
360 	kasan_early_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
361 				 KASAN_SHADOW_START, KASAN_SHADOW_END);
362 
363 	local_flush_tlb_all();
364 }
365 
366 static void __init kasan_populate(void *start, void *end)
367 {
368 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
369 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
370 
371 	kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend);
372 }
373 
374 static void __init kasan_shallow_populate_pud(p4d_t *p4d,
375 					      unsigned long vaddr, unsigned long end)
376 {
377 	unsigned long next;
378 	void *p;
379 	pud_t *pud_k = pud_offset(p4d, vaddr);
380 
381 	do {
382 		next = pud_addr_end(vaddr, end);
383 
384 		if (pud_none(*pud_k)) {
385 			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
386 			set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
387 			continue;
388 		}
389 
390 		BUG();
391 	} while (pud_k++, vaddr = next, vaddr != end);
392 }
393 
394 static void __init kasan_shallow_populate_p4d(pgd_t *pgd,
395 					      unsigned long vaddr, unsigned long end)
396 {
397 	unsigned long next;
398 	void *p;
399 	p4d_t *p4d_k = p4d_offset(pgd, vaddr);
400 
401 	do {
402 		next = p4d_addr_end(vaddr, end);
403 
404 		if (p4d_none(*p4d_k)) {
405 			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
406 			set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
407 			continue;
408 		}
409 
410 		kasan_shallow_populate_pud(p4d_k, vaddr, end);
411 	} while (p4d_k++, vaddr = next, vaddr != end);
412 }
413 
414 static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
415 {
416 	unsigned long next;
417 	void *p;
418 	pgd_t *pgd_k = pgd_offset_k(vaddr);
419 
420 	do {
421 		next = pgd_addr_end(vaddr, end);
422 
423 		if (pgd_none(*pgd_k)) {
424 			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
425 			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
426 			continue;
427 		}
428 
429 		kasan_shallow_populate_p4d(pgd_k, vaddr, next);
430 	} while (pgd_k++, vaddr = next, vaddr != end);
431 }
432 
433 static void __init kasan_shallow_populate(void *start, void *end)
434 {
435 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
436 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
437 
438 	kasan_shallow_populate_pgd(vaddr, vend);
439 }
440 
441 static void __init create_tmp_mapping(void)
442 {
443 	void *ptr;
444 	p4d_t *base_p4d;
445 
446 	/*
447 	 * We need to clean the early mapping: this is hard to achieve "in-place",
448 	 * so install a temporary mapping like arm64 and x86 do.
449 	 */
450 	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(pgd_t) * PTRS_PER_PGD);
451 
452 	/* Copy the last p4d since it is shared with the kernel mapping. */
453 	if (pgtable_l5_enabled) {
454 		ptr = (p4d_t *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
455 		memcpy(tmp_p4d, ptr, sizeof(p4d_t) * PTRS_PER_P4D);
456 		set_pgd(&tmp_pg_dir[pgd_index(KASAN_SHADOW_END)],
457 			pfn_pgd(PFN_DOWN(__pa(tmp_p4d)), PAGE_TABLE));
458 		base_p4d = tmp_p4d;
459 	} else {
460 		base_p4d = (p4d_t *)tmp_pg_dir;
461 	}
462 
463 	/* Copy the last pud since it is shared with the kernel mapping. */
464 	if (pgtable_l4_enabled) {
465 		ptr = (pud_t *)p4d_page_vaddr(*(base_p4d + p4d_index(KASAN_SHADOW_END)));
466 		memcpy(tmp_pud, ptr, sizeof(pud_t) * PTRS_PER_PUD);
467 		set_p4d(&base_p4d[p4d_index(KASAN_SHADOW_END)],
468 			pfn_p4d(PFN_DOWN(__pa(tmp_pud)), PAGE_TABLE));
469 	}
470 }
471 
472 void __init kasan_init(void)
473 {
474 	phys_addr_t p_start, p_end;
475 	u64 i;
476 
477 	create_tmp_mapping();
478 	csr_write(CSR_SATP, PFN_DOWN(__pa(tmp_pg_dir)) | satp_mode);
479 
480 	kasan_early_clear_pgd(pgd_offset_k(KASAN_SHADOW_START),
481 			      KASAN_SHADOW_START, KASAN_SHADOW_END);
482 
483 	kasan_populate_early_shadow((void *)kasan_mem_to_shadow((void *)FIXADDR_START),
484 				    (void *)kasan_mem_to_shadow((void *)VMALLOC_START));
485 
486 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
487 		kasan_shallow_populate(
488 			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
489 			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
490 		/* Shallow populate modules and BPF which are vmalloc-allocated */
491 		kasan_shallow_populate(
492 			(void *)kasan_mem_to_shadow((void *)MODULES_VADDR),
493 			(void *)kasan_mem_to_shadow((void *)MODULES_END));
494 	} else {
495 		kasan_populate_early_shadow((void *)kasan_mem_to_shadow((void *)VMALLOC_START),
496 					    (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
497 	}
498 
499 	/* Populate the linear mapping */
500 	for_each_mem_range(i, &p_start, &p_end) {
501 		void *start = (void *)__va(p_start);
502 		void *end = (void *)__va(p_end);
503 
504 		if (start >= end)
505 			break;
506 
507 		kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
508 	}
509 
510 	/* Populate kernel */
511 	kasan_populate(kasan_mem_to_shadow((const void *)MODULES_END),
512 		       kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
513 
514 	for (i = 0; i < PTRS_PER_PTE; i++)
515 		set_pte(&kasan_early_shadow_pte[i],
516 			mk_pte(virt_to_page(kasan_early_shadow_page),
517 			       __pgprot(_PAGE_PRESENT | _PAGE_READ |
518 					_PAGE_ACCESSED)));
519 
520 	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
521 	init_task.kasan_depth = 0;
522 
523 	csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | satp_mode);
524 	local_flush_tlb_all();
525 }
526