xref: /linux/arch/x86/include/asm/kfence.h (revision af0bc3ac9a9e830cb52b718ecb237c4e76a466be)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * x86 KFENCE support.
4  *
5  * Copyright (C) 2020, Google LLC.
6  */
7 
8 #ifndef _ASM_X86_KFENCE_H
9 #define _ASM_X86_KFENCE_H
10 
11 #ifndef MODULE
12 
13 #include <linux/bug.h>
14 #include <linux/kfence.h>
15 
16 #include <asm/pgalloc.h>
17 #include <asm/pgtable.h>
18 #include <asm/set_memory.h>
19 #include <asm/tlbflush.h>
20 
21 /* Force 4K pages for __kfence_pool. */
22 static inline bool arch_kfence_init_pool(void)
23 {
24 	unsigned long addr;
25 
26 	for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
27 	     addr += PAGE_SIZE) {
28 		unsigned int level;
29 
30 		if (!lookup_address(addr, &level))
31 			return false;
32 
33 		if (level != PG_LEVEL_4K)
34 			set_memory_4k(addr, 1);
35 	}
36 
37 	return true;
38 }
39 
40 /* Protect the given page and flush TLB. */
41 static inline bool kfence_protect_page(unsigned long addr, bool protect)
42 {
43 	unsigned int level;
44 	pte_t *pte = lookup_address(addr, &level);
45 	pteval_t val;
46 
47 	if (WARN_ON(!pte || level != PG_LEVEL_4K))
48 		return false;
49 
50 	val = pte_val(*pte);
51 
52 	/*
53 	 * protect requires making the page not-present.  If the PTE is
54 	 * already in the right state, there's nothing to do.
55 	 */
56 	if (protect != !!(val & _PAGE_PRESENT))
57 		return true;
58 
59 	/*
60 	 * Otherwise, invert the entire PTE.  This avoids writing out an
61 	 * L1TF-vulnerable PTE (not present, without the high address bits
62 	 * set).
63 	 */
64 	set_pte(pte, __pte(~val));
65 
66 	/*
67 	 * If the page was protected (non-present) and we're making it
68 	 * present, there is no need to flush the TLB at all.
69 	 */
70 	if (!protect)
71 		return true;
72 
73 	/*
74 	 * We need to avoid IPIs, as we may get KFENCE allocations or faults
75 	 * with interrupts disabled. Therefore, the below is best-effort, and
76 	 * does not flush TLBs on all CPUs. We can tolerate some inaccuracy;
77 	 * lazy fault handling takes care of faults after the page is PRESENT.
78 	 */
79 
80 	/*
81 	 * Flush this CPU's TLB, assuming whoever did the allocation/free is
82 	 * likely to continue running on this CPU.
83 	 */
84 	preempt_disable();
85 	flush_tlb_one_kernel(addr);
86 	preempt_enable();
87 	return true;
88 }
89 
90 #endif /* !MODULE */
91 
92 #endif /* _ASM_X86_KFENCE_H */
93