xref: /linux/arch/riscv/mm/kasan_init.c (revision 1b0975ee3bdd3eb19a47371c26fd7ef8f7f6b599)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Andes Technology Corporation
3 
4 #include <linux/pfn.h>
5 #include <linux/init_task.h>
6 #include <linux/kasan.h>
7 #include <linux/kernel.h>
8 #include <linux/memblock.h>
9 #include <linux/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <asm/fixmap.h>
12 #include <asm/pgalloc.h>
13 
14 /*
15  * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
16  * which is right before the kernel.
17  *
18  * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
19  * the page global directory with kasan_early_shadow_pmd.
20  *
21  * For sv48 and sv57, the region start is aligned on PGDIR_SIZE whereas the end
22  * region is not and then we have to go down to the PUD level.
23  */
24 
25 extern pgd_t early_pg_dir[PTRS_PER_PGD];
26 pgd_t tmp_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
27 p4d_t tmp_p4d[PTRS_PER_P4D] __page_aligned_bss;
28 pud_t tmp_pud[PTRS_PER_PUD] __page_aligned_bss;
29 
30 static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
31 {
32 	phys_addr_t phys_addr;
33 	pte_t *ptep, *p;
34 
35 	if (pmd_none(*pmd)) {
36 		p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
37 		set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE));
38 	}
39 
40 	ptep = pte_offset_kernel(pmd, vaddr);
41 
42 	do {
43 		if (pte_none(*ptep)) {
44 			phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
45 			set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
46 			memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE);
47 		}
48 	} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
49 }
50 
51 static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
52 {
53 	phys_addr_t phys_addr;
54 	pmd_t *pmdp, *p;
55 	unsigned long next;
56 
57 	if (pud_none(*pud)) {
58 		p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
59 		set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
60 	}
61 
62 	pmdp = pmd_offset(pud, vaddr);
63 
64 	do {
65 		next = pmd_addr_end(vaddr, end);
66 
67 		if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
68 			phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
69 			if (phys_addr) {
70 				set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
71 				memset(__va(phys_addr), KASAN_SHADOW_INIT, PMD_SIZE);
72 				continue;
73 			}
74 		}
75 
76 		kasan_populate_pte(pmdp, vaddr, next);
77 	} while (pmdp++, vaddr = next, vaddr != end);
78 }
79 
80 static void __init kasan_populate_pud(p4d_t *p4d,
81 				      unsigned long vaddr, unsigned long end)
82 {
83 	phys_addr_t phys_addr;
84 	pud_t *pudp, *p;
85 	unsigned long next;
86 
87 	if (p4d_none(*p4d)) {
88 		p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
89 		set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
90 	}
91 
92 	pudp = pud_offset(p4d, vaddr);
93 
94 	do {
95 		next = pud_addr_end(vaddr, end);
96 
97 		if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
98 			phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
99 			if (phys_addr) {
100 				set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
101 				memset(__va(phys_addr), KASAN_SHADOW_INIT, PUD_SIZE);
102 				continue;
103 			}
104 		}
105 
106 		kasan_populate_pmd(pudp, vaddr, next);
107 	} while (pudp++, vaddr = next, vaddr != end);
108 }
109 
110 static void __init kasan_populate_p4d(pgd_t *pgd,
111 				      unsigned long vaddr, unsigned long end)
112 {
113 	phys_addr_t phys_addr;
114 	p4d_t *p4dp, *p;
115 	unsigned long next;
116 
117 	if (pgd_none(*pgd)) {
118 		p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
119 		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
120 	}
121 
122 	p4dp = p4d_offset(pgd, vaddr);
123 
124 	do {
125 		next = p4d_addr_end(vaddr, end);
126 
127 		if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
128 			phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
129 			if (phys_addr) {
130 				set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
131 				memset(__va(phys_addr), KASAN_SHADOW_INIT, P4D_SIZE);
132 				continue;
133 			}
134 		}
135 
136 		kasan_populate_pud(p4dp, vaddr, next);
137 	} while (p4dp++, vaddr = next, vaddr != end);
138 }
139 
140 static void __init kasan_populate_pgd(pgd_t *pgdp,
141 				      unsigned long vaddr, unsigned long end)
142 {
143 	phys_addr_t phys_addr;
144 	unsigned long next;
145 
146 	do {
147 		next = pgd_addr_end(vaddr, end);
148 
149 		if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
150 		    (next - vaddr) >= PGDIR_SIZE) {
151 			phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
152 			if (phys_addr) {
153 				set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
154 				memset(__va(phys_addr), KASAN_SHADOW_INIT, PGDIR_SIZE);
155 				continue;
156 			}
157 		}
158 
159 		kasan_populate_p4d(pgdp, vaddr, next);
160 	} while (pgdp++, vaddr = next, vaddr != end);
161 }
162 
163 static void __init kasan_early_clear_pud(p4d_t *p4dp,
164 					 unsigned long vaddr, unsigned long end)
165 {
166 	pud_t *pudp, *base_pud;
167 	unsigned long next;
168 
169 	if (!pgtable_l4_enabled) {
170 		pudp = (pud_t *)p4dp;
171 	} else {
172 		base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
173 		pudp = base_pud + pud_index(vaddr);
174 	}
175 
176 	do {
177 		next = pud_addr_end(vaddr, end);
178 
179 		if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
180 			pud_clear(pudp);
181 			continue;
182 		}
183 
184 		BUG();
185 	} while (pudp++, vaddr = next, vaddr != end);
186 }
187 
188 static void __init kasan_early_clear_p4d(pgd_t *pgdp,
189 					 unsigned long vaddr, unsigned long end)
190 {
191 	p4d_t *p4dp, *base_p4d;
192 	unsigned long next;
193 
194 	if (!pgtable_l5_enabled) {
195 		p4dp = (p4d_t *)pgdp;
196 	} else {
197 		base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
198 		p4dp = base_p4d + p4d_index(vaddr);
199 	}
200 
201 	do {
202 		next = p4d_addr_end(vaddr, end);
203 
204 		if (pgtable_l4_enabled && IS_ALIGNED(vaddr, P4D_SIZE) &&
205 		    (next - vaddr) >= P4D_SIZE) {
206 			p4d_clear(p4dp);
207 			continue;
208 		}
209 
210 		kasan_early_clear_pud(p4dp, vaddr, next);
211 	} while (p4dp++, vaddr = next, vaddr != end);
212 }
213 
214 static void __init kasan_early_clear_pgd(pgd_t *pgdp,
215 					 unsigned long vaddr, unsigned long end)
216 {
217 	unsigned long next;
218 
219 	do {
220 		next = pgd_addr_end(vaddr, end);
221 
222 		if (pgtable_l5_enabled && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
223 		    (next - vaddr) >= PGDIR_SIZE) {
224 			pgd_clear(pgdp);
225 			continue;
226 		}
227 
228 		kasan_early_clear_p4d(pgdp, vaddr, next);
229 	} while (pgdp++, vaddr = next, vaddr != end);
230 }
231 
232 static void __init kasan_early_populate_pud(p4d_t *p4dp,
233 					    unsigned long vaddr,
234 					    unsigned long end)
235 {
236 	pud_t *pudp, *base_pud;
237 	phys_addr_t phys_addr;
238 	unsigned long next;
239 
240 	if (!pgtable_l4_enabled) {
241 		pudp = (pud_t *)p4dp;
242 	} else {
243 		base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
244 		pudp = base_pud + pud_index(vaddr);
245 	}
246 
247 	do {
248 		next = pud_addr_end(vaddr, end);
249 
250 		if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) &&
251 		    (next - vaddr) >= PUD_SIZE) {
252 			phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd);
253 			set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
254 			continue;
255 		}
256 
257 		BUG();
258 	} while (pudp++, vaddr = next, vaddr != end);
259 }
260 
261 static void __init kasan_early_populate_p4d(pgd_t *pgdp,
262 					    unsigned long vaddr,
263 					    unsigned long end)
264 {
265 	p4d_t *p4dp, *base_p4d;
266 	phys_addr_t phys_addr;
267 	unsigned long next;
268 
269 	/*
270 	 * We can't use pgd_page_vaddr here as it would return a linear
271 	 * mapping address but it is not mapped yet, but when populating
272 	 * early_pg_dir, we need the physical address and when populating
273 	 * swapper_pg_dir, we need the kernel virtual address so use
274 	 * pt_ops facility.
275 	 * Note that this test is then completely equivalent to
276 	 * p4dp = p4d_offset(pgdp, vaddr)
277 	 */
278 	if (!pgtable_l5_enabled) {
279 		p4dp = (p4d_t *)pgdp;
280 	} else {
281 		base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
282 		p4dp = base_p4d + p4d_index(vaddr);
283 	}
284 
285 	do {
286 		next = p4d_addr_end(vaddr, end);
287 
288 		if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) &&
289 		    (next - vaddr) >= P4D_SIZE) {
290 			phys_addr = __pa((uintptr_t)kasan_early_shadow_pud);
291 			set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
292 			continue;
293 		}
294 
295 		kasan_early_populate_pud(p4dp, vaddr, next);
296 	} while (p4dp++, vaddr = next, vaddr != end);
297 }
298 
299 static void __init kasan_early_populate_pgd(pgd_t *pgdp,
300 					    unsigned long vaddr,
301 					    unsigned long end)
302 {
303 	phys_addr_t phys_addr;
304 	unsigned long next;
305 
306 	do {
307 		next = pgd_addr_end(vaddr, end);
308 
309 		if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
310 		    (next - vaddr) >= PGDIR_SIZE) {
311 			phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d);
312 			set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
313 			continue;
314 		}
315 
316 		kasan_early_populate_p4d(pgdp, vaddr, next);
317 	} while (pgdp++, vaddr = next, vaddr != end);
318 }
319 
320 asmlinkage void __init kasan_early_init(void)
321 {
322 	uintptr_t i;
323 
324 	BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
325 		KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
326 
327 	for (i = 0; i < PTRS_PER_PTE; ++i)
328 		set_pte(kasan_early_shadow_pte + i,
329 			pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
330 
331 	for (i = 0; i < PTRS_PER_PMD; ++i)
332 		set_pmd(kasan_early_shadow_pmd + i,
333 			pfn_pmd(PFN_DOWN
334 				(__pa((uintptr_t)kasan_early_shadow_pte)),
335 				PAGE_TABLE));
336 
337 	if (pgtable_l4_enabled) {
338 		for (i = 0; i < PTRS_PER_PUD; ++i)
339 			set_pud(kasan_early_shadow_pud + i,
340 				pfn_pud(PFN_DOWN
341 					(__pa(((uintptr_t)kasan_early_shadow_pmd))),
342 					PAGE_TABLE));
343 	}
344 
345 	if (pgtable_l5_enabled) {
346 		for (i = 0; i < PTRS_PER_P4D; ++i)
347 			set_p4d(kasan_early_shadow_p4d + i,
348 				pfn_p4d(PFN_DOWN
349 					(__pa(((uintptr_t)kasan_early_shadow_pud))),
350 					PAGE_TABLE));
351 	}
352 
353 	kasan_early_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
354 				 KASAN_SHADOW_START, KASAN_SHADOW_END);
355 
356 	local_flush_tlb_all();
357 }
358 
359 void __init kasan_swapper_init(void)
360 {
361 	kasan_early_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
362 				 KASAN_SHADOW_START, KASAN_SHADOW_END);
363 
364 	local_flush_tlb_all();
365 }
366 
367 static void __init kasan_populate(void *start, void *end)
368 {
369 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
370 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
371 
372 	kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend);
373 }
374 
375 static void __init kasan_shallow_populate_pud(p4d_t *p4d,
376 					      unsigned long vaddr, unsigned long end)
377 {
378 	unsigned long next;
379 	void *p;
380 	pud_t *pud_k = pud_offset(p4d, vaddr);
381 
382 	do {
383 		next = pud_addr_end(vaddr, end);
384 
385 		if (pud_none(*pud_k)) {
386 			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
387 			set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
388 			continue;
389 		}
390 
391 		BUG();
392 	} while (pud_k++, vaddr = next, vaddr != end);
393 }
394 
395 static void __init kasan_shallow_populate_p4d(pgd_t *pgd,
396 					      unsigned long vaddr, unsigned long end)
397 {
398 	unsigned long next;
399 	void *p;
400 	p4d_t *p4d_k = p4d_offset(pgd, vaddr);
401 
402 	do {
403 		next = p4d_addr_end(vaddr, end);
404 
405 		if (p4d_none(*p4d_k)) {
406 			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
407 			set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
408 			continue;
409 		}
410 
411 		kasan_shallow_populate_pud(p4d_k, vaddr, end);
412 	} while (p4d_k++, vaddr = next, vaddr != end);
413 }
414 
415 static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
416 {
417 	unsigned long next;
418 	void *p;
419 	pgd_t *pgd_k = pgd_offset_k(vaddr);
420 
421 	do {
422 		next = pgd_addr_end(vaddr, end);
423 
424 		if (pgd_none(*pgd_k)) {
425 			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
426 			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
427 			continue;
428 		}
429 
430 		kasan_shallow_populate_p4d(pgd_k, vaddr, next);
431 	} while (pgd_k++, vaddr = next, vaddr != end);
432 }
433 
434 static void __init kasan_shallow_populate(void *start, void *end)
435 {
436 	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
437 	unsigned long vend = PAGE_ALIGN((unsigned long)end);
438 
439 	kasan_shallow_populate_pgd(vaddr, vend);
440 }
441 
442 static void create_tmp_mapping(void)
443 {
444 	void *ptr;
445 	p4d_t *base_p4d;
446 
447 	/*
448 	 * We need to clean the early mapping: this is hard to achieve "in-place",
449 	 * so install a temporary mapping like arm64 and x86 do.
450 	 */
451 	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(pgd_t) * PTRS_PER_PGD);
452 
453 	/* Copy the last p4d since it is shared with the kernel mapping. */
454 	if (pgtable_l5_enabled) {
455 		ptr = (p4d_t *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
456 		memcpy(tmp_p4d, ptr, sizeof(p4d_t) * PTRS_PER_P4D);
457 		set_pgd(&tmp_pg_dir[pgd_index(KASAN_SHADOW_END)],
458 			pfn_pgd(PFN_DOWN(__pa(tmp_p4d)), PAGE_TABLE));
459 		base_p4d = tmp_p4d;
460 	} else {
461 		base_p4d = (p4d_t *)tmp_pg_dir;
462 	}
463 
464 	/* Copy the last pud since it is shared with the kernel mapping. */
465 	if (pgtable_l4_enabled) {
466 		ptr = (pud_t *)p4d_page_vaddr(*(base_p4d + p4d_index(KASAN_SHADOW_END)));
467 		memcpy(tmp_pud, ptr, sizeof(pud_t) * PTRS_PER_PUD);
468 		set_p4d(&base_p4d[p4d_index(KASAN_SHADOW_END)],
469 			pfn_p4d(PFN_DOWN(__pa(tmp_pud)), PAGE_TABLE));
470 	}
471 }
472 
473 void __init kasan_init(void)
474 {
475 	phys_addr_t p_start, p_end;
476 	u64 i;
477 
478 	create_tmp_mapping();
479 	csr_write(CSR_SATP, PFN_DOWN(__pa(tmp_pg_dir)) | satp_mode);
480 
481 	kasan_early_clear_pgd(pgd_offset_k(KASAN_SHADOW_START),
482 			      KASAN_SHADOW_START, KASAN_SHADOW_END);
483 
484 	kasan_populate_early_shadow((void *)kasan_mem_to_shadow((void *)FIXADDR_START),
485 				    (void *)kasan_mem_to_shadow((void *)VMALLOC_START));
486 
487 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
488 		kasan_shallow_populate(
489 			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
490 			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
491 		/* Shallow populate modules and BPF which are vmalloc-allocated */
492 		kasan_shallow_populate(
493 			(void *)kasan_mem_to_shadow((void *)MODULES_VADDR),
494 			(void *)kasan_mem_to_shadow((void *)MODULES_END));
495 	} else {
496 		kasan_populate_early_shadow((void *)kasan_mem_to_shadow((void *)VMALLOC_START),
497 					    (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
498 	}
499 
500 	/* Populate the linear mapping */
501 	for_each_mem_range(i, &p_start, &p_end) {
502 		void *start = (void *)__va(p_start);
503 		void *end = (void *)__va(p_end);
504 
505 		if (start >= end)
506 			break;
507 
508 		kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
509 	}
510 
511 	/* Populate kernel */
512 	kasan_populate(kasan_mem_to_shadow((const void *)MODULES_END),
513 		       kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
514 
515 	for (i = 0; i < PTRS_PER_PTE; i++)
516 		set_pte(&kasan_early_shadow_pte[i],
517 			mk_pte(virt_to_page(kasan_early_shadow_page),
518 			       __pgprot(_PAGE_PRESENT | _PAGE_READ |
519 					_PAGE_ACCESSED)));
520 
521 	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
522 	init_task.kasan_depth = 0;
523 
524 	csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | satp_mode);
525 	local_flush_tlb_all();
526 }
527