xref: /linux/arch/xtensa/mm/kasan_init.c (revision 4e0ae876f77bc01a7e77724dea57b4b82bd53244)
1 /*
2  * Xtensa KASAN shadow map initialization
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 2017 Cadence Design Systems Inc.
9  */
10 
11 #include <linux/memblock.h>
12 #include <linux/init_task.h>
13 #include <linux/kasan.h>
14 #include <linux/kernel.h>
15 #include <asm/initialize_mmu.h>
16 #include <asm/tlbflush.h>
17 #include <asm/traps.h>
18 
19 void __init kasan_early_init(void)
20 {
21 	unsigned long vaddr = KASAN_SHADOW_START;
22 	pgd_t *pgd = pgd_offset_k(vaddr);
23 	pmd_t *pmd = pmd_offset(pgd, vaddr);
24 	int i;
25 
26 	for (i = 0; i < PTRS_PER_PTE; ++i)
27 		set_pte(kasan_early_shadow_pte + i,
28 			mk_pte(virt_to_page(kasan_early_shadow_page),
29 				PAGE_KERNEL));
30 
31 	for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
32 		BUG_ON(!pmd_none(*pmd));
33 		set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte));
34 	}
35 	early_trap_init();
36 }
37 
38 static void __init populate(void *start, void *end)
39 {
40 	unsigned long n_pages = (end - start) / PAGE_SIZE;
41 	unsigned long n_pmds = n_pages / PTRS_PER_PTE;
42 	unsigned long i, j;
43 	unsigned long vaddr = (unsigned long)start;
44 	pgd_t *pgd = pgd_offset_k(vaddr);
45 	pmd_t *pmd = pmd_offset(pgd, vaddr);
46 	pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
47 
48 	if (!pte)
49 		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
50 		      __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
51 
52 	pr_debug("%s: %p - %p\n", __func__, start, end);
53 
54 	for (i = j = 0; i < n_pmds; ++i) {
55 		int k;
56 
57 		for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
58 			phys_addr_t phys =
59 				memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
60 
61 			if (!phys)
62 				panic("Failed to allocate page table page\n");
63 
64 			set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
65 		}
66 	}
67 
68 	for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE)
69 		set_pmd(pmd + i, __pmd((unsigned long)pte));
70 
71 	local_flush_tlb_all();
72 	memset(start, 0, end - start);
73 }
74 
75 void __init kasan_init(void)
76 {
77 	int i;
78 
79 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START -
80 		     (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT));
81 	BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR);
82 
83 	/*
84 	 * Replace shadow map pages that cover addresses from VMALLOC area
85 	 * start to the end of KSEG with clean writable pages.
86 	 */
87 	populate(kasan_mem_to_shadow((void *)VMALLOC_START),
88 		 kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
89 
90 	/*
91 	 * Write protect kasan_early_shadow_page and zero-initialize it again.
92 	 */
93 	for (i = 0; i < PTRS_PER_PTE; ++i)
94 		set_pte(kasan_early_shadow_pte + i,
95 			mk_pte(virt_to_page(kasan_early_shadow_page),
96 				PAGE_KERNEL_RO));
97 
98 	local_flush_tlb_all();
99 	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
100 
101 	/* At this point kasan is fully initialized. Enable error messages. */
102 	current->kasan_depth = 0;
103 	pr_info("KernelAddressSanitizer initialized\n");
104 }
105