xref: /linux/arch/loongarch/include/asm/kfence.h (revision 7255fcc80d4b525cc10cfaaf7f485830d4ed2000)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * KFENCE support for LoongArch.
4  *
5  * Author: Enze Li <lienze@kylinos.cn>
6  * Copyright (C) 2022-2023 KylinSoft Corporation.
7  */
8 
9 #ifndef _ASM_LOONGARCH_KFENCE_H
10 #define _ASM_LOONGARCH_KFENCE_H
11 
12 #include <linux/kfence.h>
13 #include <asm/pgtable.h>
14 #include <asm/tlb.h>
15 
16 static inline bool arch_kfence_init_pool(void)
17 {
18 	int err;
19 	char *kaddr, *vaddr;
20 	char *kfence_pool = __kfence_pool;
21 	struct vm_struct *area;
22 
23 	area = __get_vm_area_caller(KFENCE_POOL_SIZE, VM_IOREMAP,
24 				    KFENCE_AREA_START, KFENCE_AREA_END,
25 				    __builtin_return_address(0));
26 	if (!area)
27 		return false;
28 
29 	__kfence_pool = (char *)area->addr;
30 	err = ioremap_page_range((unsigned long)__kfence_pool,
31 				 (unsigned long)__kfence_pool + KFENCE_POOL_SIZE,
32 				 virt_to_phys((void *)kfence_pool), PAGE_KERNEL);
33 	if (err) {
34 		free_vm_area(area);
35 		__kfence_pool = kfence_pool;
36 		return false;
37 	}
38 
39 	kaddr = kfence_pool;
40 	vaddr = __kfence_pool;
41 	while (kaddr < kfence_pool + KFENCE_POOL_SIZE) {
42 		set_page_address(virt_to_page(kaddr), vaddr);
43 		kaddr += PAGE_SIZE;
44 		vaddr += PAGE_SIZE;
45 	}
46 
47 	return true;
48 }
49 
50 /* Protect the given page and flush TLB. */
51 static inline bool kfence_protect_page(unsigned long addr, bool protect)
52 {
53 	pte_t *pte = virt_to_kpte(addr);
54 
55 	if (WARN_ON(!pte) || pte_none(*pte))
56 		return false;
57 
58 	if (protect)
59 		set_pte(pte, __pte(pte_val(*pte) & ~(_PAGE_VALID | _PAGE_PRESENT)));
60 	else
61 		set_pte(pte, __pte(pte_val(*pte) | (_PAGE_VALID | _PAGE_PRESENT)));
62 
63 	preempt_disable();
64 	local_flush_tlb_one(addr);
65 	preempt_enable();
66 
67 	return true;
68 }
69 
70 #endif /* _ASM_LOONGARCH_KFENCE_H */
71