xref: /linux/arch/x86/include/asm/kfence.h (revision 85ebe5aeef9b0bf4c91ff91652b32f9c54f71d34)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * x86 KFENCE support.
4  *
5  * Copyright (C) 2020, Google LLC.
6  */
7 
8 #ifndef _ASM_X86_KFENCE_H
9 #define _ASM_X86_KFENCE_H
10 
11 #include <linux/bug.h>
12 #include <linux/kfence.h>
13 
14 #include <asm/pgalloc.h>
15 #include <asm/pgtable.h>
16 #include <asm/set_memory.h>
17 #include <asm/tlbflush.h>
18 
19 /* Force 4K pages for __kfence_pool. */
20 static inline bool arch_kfence_init_pool(void)
21 {
22 	unsigned long addr;
23 
24 	for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
25 	     addr += PAGE_SIZE) {
26 		unsigned int level;
27 
28 		if (!lookup_address(addr, &level))
29 			return false;
30 
31 		if (level != PG_LEVEL_4K)
32 			set_memory_4k(addr, 1);
33 	}
34 
35 	return true;
36 }
37 
38 /* Protect the given page and flush TLB. */
39 static inline bool kfence_protect_page(unsigned long addr, bool protect)
40 {
41 	unsigned int level;
42 	pte_t *pte = lookup_address(addr, &level);
43 
44 	if (WARN_ON(!pte || level != PG_LEVEL_4K))
45 		return false;
46 
47 	/*
48 	 * We need to avoid IPIs, as we may get KFENCE allocations or faults
49 	 * with interrupts disabled. Therefore, the below is best-effort, and
50 	 * does not flush TLBs on all CPUs. We can tolerate some inaccuracy;
51 	 * lazy fault handling takes care of faults after the page is PRESENT.
52 	 */
53 
54 	if (protect)
55 		set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
56 	else
57 		set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
58 
59 	/*
60 	 * Flush this CPU's TLB, assuming whoever did the allocation/free is
61 	 * likely to continue running on this CPU.
62 	 */
63 	preempt_disable();
64 	flush_tlb_one_kernel(addr);
65 	preempt_enable();
66 	return true;
67 }
68 
69 #endif /* _ASM_X86_KFENCE_H */
70