xref: /linux/arch/riscv/mm/kasan_init.c (revision c6ca7616f7d5c2ce166280107ba74db1d528fcb7)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Andes Technology Corporation
3 
4 #include <linux/pfn.h>
5 #include <linux/init_task.h>
6 #include <linux/kasan.h>
7 #include <linux/kernel.h>
8 #include <linux/memblock.h>
9 #include <linux/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <asm/fixmap.h>
12 #include <asm/pgalloc.h>
13 
14 static __init void *early_alloc(size_t size, int node)
15 {
16 	void *ptr = memblock_alloc_try_nid(size, size,
17 		__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
18 
19 	if (!ptr)
20 		panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
21 			__func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
22 
23 	return ptr;
24 }
25 
26 extern pgd_t early_pg_dir[PTRS_PER_PGD];
27 asmlinkage void __init kasan_early_init(void)
28 {
29 	uintptr_t i;
30 	pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
31 
32 	for (i = 0; i < PTRS_PER_PTE; ++i)
33 		set_pte(kasan_early_shadow_pte + i,
34 			mk_pte(virt_to_page(kasan_early_shadow_page),
35 			       PAGE_KERNEL));
36 
37 	for (i = 0; i < PTRS_PER_PMD; ++i)
38 		set_pmd(kasan_early_shadow_pmd + i,
39 			pfn_pmd(PFN_DOWN
40 				(__pa((uintptr_t) kasan_early_shadow_pte)),
41 				__pgprot(_PAGE_TABLE)));
42 
43 	for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
44 	     i += PGDIR_SIZE, ++pgd)
45 		set_pgd(pgd,
46 			pfn_pgd(PFN_DOWN
47 				(__pa(((uintptr_t) kasan_early_shadow_pmd))),
48 				__pgprot(_PAGE_TABLE)));
49 
50 	/* init for swapper_pg_dir */
51 	pgd = pgd_offset_k(KASAN_SHADOW_START);
52 
53 	for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
54 	     i += PGDIR_SIZE, ++pgd)
55 		set_pgd(pgd,
56 			pfn_pgd(PFN_DOWN
57 				(__pa(((uintptr_t) kasan_early_shadow_pmd))),
58 				__pgprot(_PAGE_TABLE)));
59 
60 	local_flush_tlb_all();
61 }
62 
63 static void __init populate(void *start, void *end)
64 {
65 	unsigned long i, offset;
66 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
67 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
68 	unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
69 	unsigned long n_ptes =
70 	    ((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE;
71 	unsigned long n_pmds =
72 	    ((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD;
73 
74 	pte_t *pte =
75 	    memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
76 	pmd_t *pmd =
77 	    memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
78 	pgd_t *pgd = pgd_offset_k(vaddr);
79 
80 	for (i = 0; i < n_pages; i++) {
81 		phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
82 		set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
83 	}
84 
85 	for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE)
86 		set_pmd(&pmd[i],
87 			pfn_pmd(PFN_DOWN(__pa(&pte[offset])),
88 				__pgprot(_PAGE_TABLE)));
89 
90 	for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD)
91 		set_pgd(&pgd[i],
92 			pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
93 				__pgprot(_PAGE_TABLE)));
94 
95 	local_flush_tlb_all();
96 	memset(start, 0, end - start);
97 }
98 
99 void __init kasan_shallow_populate(void *start, void *end)
100 {
101 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
102 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
103 	unsigned long pfn;
104 	int index;
105 	void *p;
106 	pud_t *pud_dir, *pud_k;
107 	pgd_t *pgd_dir, *pgd_k;
108 	p4d_t *p4d_dir, *p4d_k;
109 
110 	while (vaddr < vend) {
111 		index = pgd_index(vaddr);
112 		pfn = csr_read(CSR_SATP) & SATP_PPN;
113 		pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index;
114 		pgd_k = init_mm.pgd + index;
115 		pgd_dir = pgd_offset_k(vaddr);
116 		set_pgd(pgd_dir, *pgd_k);
117 
118 		p4d_dir = p4d_offset(pgd_dir, vaddr);
119 		p4d_k  = p4d_offset(pgd_k, vaddr);
120 
121 		vaddr = (vaddr + PUD_SIZE) & PUD_MASK;
122 		pud_dir = pud_offset(p4d_dir, vaddr);
123 		pud_k = pud_offset(p4d_k, vaddr);
124 
125 		if (pud_present(*pud_dir)) {
126 			p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
127 			pud_populate(&init_mm, pud_dir, p);
128 		}
129 		vaddr += PAGE_SIZE;
130 	}
131 }
132 
133 void __init kasan_init(void)
134 {
135 	phys_addr_t _start, _end;
136 	u64 i;
137 
138 	kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
139 				    (void *)kasan_mem_to_shadow((void *)
140 								VMEMMAP_END));
141 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
142 		kasan_shallow_populate(
143 			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
144 			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
145 	else
146 		kasan_populate_early_shadow(
147 			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
148 			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
149 
150 	for_each_mem_range(i, &_start, &_end) {
151 		void *start = (void *)_start;
152 		void *end = (void *)_end;
153 
154 		if (start >= end)
155 			break;
156 
157 		populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
158 	};
159 
160 	for (i = 0; i < PTRS_PER_PTE; i++)
161 		set_pte(&kasan_early_shadow_pte[i],
162 			mk_pte(virt_to_page(kasan_early_shadow_page),
163 			       __pgprot(_PAGE_PRESENT | _PAGE_READ |
164 					_PAGE_ACCESSED)));
165 
166 	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
167 	init_task.kasan_depth = 0;
168 }
169