xref: /linux/arch/arm/include/asm/kfence.h (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef __ASM_ARM_KFENCE_H
4 #define __ASM_ARM_KFENCE_H
5 
6 #include <linux/kfence.h>
7 
8 #include <asm/pgalloc.h>
9 #include <asm/set_memory.h>
10 
11 static inline int split_pmd_page(pmd_t *pmd, unsigned long addr)
12 {
13 	int i;
14 	unsigned long pfn = PFN_DOWN(__pa(addr));
15 	pte_t *pte = pte_alloc_one_kernel(&init_mm);
16 
17 	if (!pte)
18 		return -ENOMEM;
19 
20 	for (i = 0; i < PTRS_PER_PTE; i++)
21 		set_pte_ext(pte + i, pfn_pte(pfn + i, PAGE_KERNEL), 0);
22 	pmd_populate_kernel(&init_mm, pmd, pte);
23 
24 	flush_tlb_kernel_range(addr, addr + PMD_SIZE);
25 	return 0;
26 }
27 
28 static inline bool arch_kfence_init_pool(void)
29 {
30 	unsigned long addr;
31 	pmd_t *pmd;
32 
33 	for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
34 	     addr += PAGE_SIZE) {
35 		pmd = pmd_off_k(addr);
36 
37 		if (pmd_leaf(*pmd)) {
38 			if (split_pmd_page(pmd, addr & PMD_MASK))
39 				return false;
40 		}
41 	}
42 
43 	return true;
44 }
45 
46 static inline bool kfence_protect_page(unsigned long addr, bool protect)
47 {
48 	set_memory_valid(addr, 1, !protect);
49 
50 	return true;
51 }
52 
53 #endif /* __ASM_ARM_KFENCE_H */
54