xref: /linux/arch/loongarch/mm/kasan_init.c (revision ff124bbbca1d3a07fa1392ffdbbdeece71f68ece)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023 Loongson Technology Corporation Limited
4  */
5 #define pr_fmt(fmt) "kasan: " fmt
6 #include <linux/kasan.h>
7 #include <linux/memblock.h>
8 #include <linux/sched/task.h>
9 
10 #include <asm/tlbflush.h>
11 #include <asm/pgalloc.h>
12 #include <asm-generic/sections.h>
13 
14 static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
15 
16 #ifdef __PAGETABLE_P4D_FOLDED
17 #define __pgd_none(early, pgd) (0)
18 #else
19 #define __pgd_none(early, pgd) (early ? (pgd_val(pgd) == 0) : \
20 (__pa(pgd_val(pgd)) == (unsigned long)__pa(kasan_early_shadow_p4d)))
21 #endif
22 
23 #ifdef __PAGETABLE_PUD_FOLDED
24 #define __p4d_none(early, p4d) (0)
25 #else
26 #define __p4d_none(early, p4d) (early ? (p4d_val(p4d) == 0) : \
27 (__pa(p4d_val(p4d)) == (unsigned long)__pa(kasan_early_shadow_pud)))
28 #endif
29 
30 #ifdef __PAGETABLE_PMD_FOLDED
31 #define __pud_none(early, pud) (0)
32 #else
33 #define __pud_none(early, pud) (early ? (pud_val(pud) == 0) : \
34 (__pa(pud_val(pud)) == (unsigned long)__pa(kasan_early_shadow_pmd)))
35 #endif
36 
37 #define __pmd_none(early, pmd) (early ? (pmd_val(pmd) == 0) : \
38 (__pa(pmd_val(pmd)) == (unsigned long)__pa(kasan_early_shadow_pte)))
39 
40 #define __pte_none(early, pte) (early ? pte_none(pte) : \
41 ((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page)))
42 
43 static void *mem_to_shadow(const void *addr)
44 {
45 	unsigned long offset = 0;
46 	unsigned long maddr = (unsigned long)addr;
47 	unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
48 
49 	if (maddr >= FIXADDR_START)
50 		return (void *)(kasan_early_shadow_page);
51 
52 	maddr &= XRANGE_SHADOW_MASK;
53 	switch (xrange) {
54 	case XKPRANGE_CC_SEG:
55 		offset = XKPRANGE_CC_SHADOW_OFFSET;
56 		break;
57 	case XKPRANGE_UC_SEG:
58 		offset = XKPRANGE_UC_SHADOW_OFFSET;
59 		break;
60 	case XKPRANGE_WC_SEG:
61 		offset = XKPRANGE_WC_SHADOW_OFFSET;
62 		break;
63 	case XKVRANGE_VC_SEG:
64 		offset = XKVRANGE_VC_SHADOW_OFFSET;
65 		break;
66 	default:
67 		WARN_ON(1);
68 		return NULL;
69 	}
70 
71 	return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
72 }
73 
74 void *kasan_mem_to_shadow(const void *addr)
75 {
76 	if (kasan_enabled())
77 		return mem_to_shadow(addr);
78 	else
79 		return (void *)(kasan_early_shadow_page);
80 }
81 
82 const void *kasan_shadow_to_mem(const void *shadow_addr)
83 {
84 	unsigned long addr = (unsigned long)shadow_addr;
85 
86 	if (unlikely(addr > KASAN_SHADOW_END) ||
87 		unlikely(addr < KASAN_SHADOW_START)) {
88 		WARN_ON(1);
89 		return NULL;
90 	}
91 
92 	if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
93 		return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
94 	else if (addr >= XKPRANGE_WC_SHADOW_OFFSET)
95 		return (void *)(((addr - XKPRANGE_WC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_WC_START);
96 	else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
97 		return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
98 	else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
99 		return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
100 	else {
101 		WARN_ON(1);
102 		return NULL;
103 	}
104 }
105 
106 /*
107  * Alloc memory for shadow memory page table.
108  */
109 static phys_addr_t __init kasan_alloc_zeroed_page(int node)
110 {
111 	void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
112 					__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
113 	if (!p)
114 		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
115 			__func__, PAGE_SIZE, PAGE_SIZE, node, __pa(MAX_DMA_ADDRESS));
116 
117 	return __pa(p);
118 }
119 
120 static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early)
121 {
122 	if (__pmd_none(early, pmdp_get(pmdp))) {
123 		phys_addr_t pte_phys = early ?
124 				__pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node);
125 		if (!early)
126 			memcpy(__va(pte_phys), kasan_early_shadow_pte, sizeof(kasan_early_shadow_pte));
127 		pmd_populate_kernel(NULL, pmdp, (pte_t *)__va(pte_phys));
128 	}
129 
130 	return pte_offset_kernel(pmdp, addr);
131 }
132 
133 static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early)
134 {
135 	if (__pud_none(early, pudp_get(pudp))) {
136 		phys_addr_t pmd_phys = early ?
137 				__pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node);
138 		if (!early)
139 			memcpy(__va(pmd_phys), kasan_early_shadow_pmd, sizeof(kasan_early_shadow_pmd));
140 		pud_populate(&init_mm, pudp, (pmd_t *)__va(pmd_phys));
141 	}
142 
143 	return pmd_offset(pudp, addr);
144 }
145 
146 static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early)
147 {
148 	if (__p4d_none(early, p4dp_get(p4dp))) {
149 		phys_addr_t pud_phys = early ?
150 			__pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node);
151 		if (!early)
152 			memcpy(__va(pud_phys), kasan_early_shadow_pud, sizeof(kasan_early_shadow_pud));
153 		p4d_populate(&init_mm, p4dp, (pud_t *)__va(pud_phys));
154 	}
155 
156 	return pud_offset(p4dp, addr);
157 }
158 
159 static p4d_t *__init kasan_p4d_offset(pgd_t *pgdp, unsigned long addr, int node, bool early)
160 {
161 	if (__pgd_none(early, pgdp_get(pgdp))) {
162 		phys_addr_t p4d_phys = early ?
163 			__pa_symbol(kasan_early_shadow_p4d) : kasan_alloc_zeroed_page(node);
164 		if (!early)
165 			memcpy(__va(p4d_phys), kasan_early_shadow_p4d, sizeof(kasan_early_shadow_p4d));
166 		pgd_populate(&init_mm, pgdp, (p4d_t *)__va(p4d_phys));
167 	}
168 
169 	return p4d_offset(pgdp, addr);
170 }
171 
172 static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
173 				      unsigned long end, int node, bool early)
174 {
175 	unsigned long next;
176 	pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
177 
178 	do {
179 		phys_addr_t page_phys = early ?
180 					__pa_symbol(kasan_early_shadow_page)
181 					      : kasan_alloc_zeroed_page(node);
182 		next = addr + PAGE_SIZE;
183 		set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
184 	} while (ptep++, addr = next, addr != end && __pte_none(early, ptep_get(ptep)));
185 }
186 
187 static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
188 				      unsigned long end, int node, bool early)
189 {
190 	unsigned long next;
191 	pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
192 
193 	do {
194 		next = pmd_addr_end(addr, end);
195 		kasan_pte_populate(pmdp, addr, next, node, early);
196 	} while (pmdp++, addr = next, addr != end && __pmd_none(early, pmdp_get(pmdp)));
197 }
198 
199 static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
200 					    unsigned long end, int node, bool early)
201 {
202 	unsigned long next;
203 	pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
204 
205 	do {
206 		next = pud_addr_end(addr, end);
207 		kasan_pmd_populate(pudp, addr, next, node, early);
208 	} while (pudp++, addr = next, addr != end && __pud_none(early, READ_ONCE(*pudp)));
209 }
210 
211 static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
212 					    unsigned long end, int node, bool early)
213 {
214 	unsigned long next;
215 	p4d_t *p4dp = kasan_p4d_offset(pgdp, addr, node, early);
216 
217 	do {
218 		next = p4d_addr_end(addr, end);
219 		kasan_pud_populate(p4dp, addr, next, node, early);
220 	} while (p4dp++, addr = next, addr != end && __p4d_none(early, READ_ONCE(*p4dp)));
221 }
222 
223 static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
224 				      int node, bool early)
225 {
226 	unsigned long next;
227 	pgd_t *pgdp;
228 
229 	pgdp = pgd_offset_k(addr);
230 
231 	do {
232 		next = pgd_addr_end(addr, end);
233 		kasan_p4d_populate(pgdp, addr, next, node, early);
234 	} while (pgdp++, addr = next, addr != end);
235 
236 }
237 
238 /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
239 static void __init kasan_map_populate(unsigned long start, unsigned long end,
240 				      int node)
241 {
242 	kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
243 }
244 
245 asmlinkage void __init kasan_early_init(void)
246 {
247 	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
248 	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END + 1, PGDIR_SIZE));
249 }
250 
251 static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
252 {
253 	WRITE_ONCE(*pgdp, pgdval);
254 }
255 
256 static void __init clear_pgds(unsigned long start, unsigned long end)
257 {
258 	/*
259 	 * Remove references to kasan page tables from
260 	 * swapper_pg_dir. pgd_clear() can't be used
261 	 * here because it's nop on 2,3-level pagetable setups
262 	 */
263 	for (; start < end; start = pgd_addr_end(start, end))
264 		kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
265 }
266 
267 void __init kasan_init(void)
268 {
269 	u64 i;
270 	phys_addr_t pa_start, pa_end;
271 
272 	/*
273 	 * If PGDIR_SIZE is too large for cpu_vabits, KASAN_SHADOW_END will
274 	 * overflow UINTPTR_MAX and then looks like a user space address.
275 	 * For example, PGDIR_SIZE of CONFIG_4KB_4LEVEL is 2^39, which is too
276 	 * large for Loongson-2K series whose cpu_vabits = 39.
277 	 */
278 	if (KASAN_SHADOW_END < vm_map_base) {
279 		pr_warn("PGDIR_SIZE too large for cpu_vabits, KernelAddressSanitizer disabled.\n");
280 		return;
281 	}
282 
283 	/*
284 	 * PGD was populated as invalid_pmd_table or invalid_pud_table
285 	 * in pagetable_init() which depends on how many levels of page
286 	 * table you are using, but we had to clean the gpd of kasan
287 	 * shadow memory, as the pgd value is none-zero.
288 	 * The assertion pgd_none is going to be false and the formal populate
289 	 * afterwards is not going to create any new pgd at all.
290 	 */
291 	memcpy(kasan_pg_dir, swapper_pg_dir, sizeof(kasan_pg_dir));
292 	csr_write64(__pa_symbol(kasan_pg_dir), LOONGARCH_CSR_PGDH);
293 	local_flush_tlb_all();
294 
295 	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
296 
297 	/* Maps everything to a single page of zeroes */
298 	kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true);
299 
300 	kasan_populate_early_shadow(mem_to_shadow((void *)VMALLOC_START),
301 					mem_to_shadow((void *)KFENCE_AREA_END));
302 
303 	/* Populate the linear mapping */
304 	for_each_mem_range(i, &pa_start, &pa_end) {
305 		void *start = (void *)phys_to_virt(pa_start);
306 		void *end   = (void *)phys_to_virt(pa_end);
307 
308 		if (start >= end)
309 			break;
310 
311 		kasan_map_populate((unsigned long)mem_to_shadow(start),
312 			(unsigned long)mem_to_shadow(end), NUMA_NO_NODE);
313 	}
314 
315 	/* Populate modules mapping */
316 	kasan_map_populate((unsigned long)mem_to_shadow((void *)MODULES_VADDR),
317 		(unsigned long)mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
318 	/*
319 	 * KAsan may reuse the contents of kasan_early_shadow_pte directly, so we
320 	 * should make sure that it maps the zero page read-only.
321 	 */
322 	for (i = 0; i < PTRS_PER_PTE; i++)
323 		set_pte(&kasan_early_shadow_pte[i],
324 			pfn_pte(__phys_to_pfn(__pa_symbol(kasan_early_shadow_page)), PAGE_KERNEL_RO));
325 
326 	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
327 	csr_write64(__pa_symbol(swapper_pg_dir), LOONGARCH_CSR_PGDH);
328 	local_flush_tlb_all();
329 
330 	/* At this point kasan is fully initialized. Enable error messages */
331 	init_task.kasan_depth = 0;
332 	kasan_init_generic();
333 }
334