1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * x86 KFENCE support. 4 * 5 * Copyright (C) 2020, Google LLC. 6 */ 7 8 #ifndef _ASM_X86_KFENCE_H 9 #define _ASM_X86_KFENCE_H 10 11 #ifndef MODULE 12 13 #include <linux/bug.h> 14 #include <linux/kfence.h> 15 16 #include <asm/pgalloc.h> 17 #include <asm/pgtable.h> 18 #include <asm/set_memory.h> 19 #include <asm/tlbflush.h> 20 21 /* Force 4K pages for __kfence_pool. */ 22 static inline bool arch_kfence_init_pool(void) 23 { 24 unsigned long addr; 25 26 for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr); 27 addr += PAGE_SIZE) { 28 unsigned int level; 29 30 if (!lookup_address(addr, &level)) 31 return false; 32 33 if (level != PG_LEVEL_4K) 34 set_memory_4k(addr, 1); 35 } 36 37 return true; 38 } 39 40 /* Protect the given page and flush TLB. */ 41 static inline bool kfence_protect_page(unsigned long addr, bool protect) 42 { 43 unsigned int level; 44 pte_t *pte = lookup_address(addr, &level); 45 pteval_t val, new; 46 47 if (WARN_ON(!pte || level != PG_LEVEL_4K)) 48 return false; 49 50 val = pte_val(*pte); 51 52 /* 53 * protect requires making the page not-present. If the PTE is 54 * already in the right state, there's nothing to do. 55 */ 56 if (protect != !!(val & _PAGE_PRESENT)) 57 return true; 58 59 /* 60 * Otherwise, flip the Present bit, taking care to avoid writing an 61 * L1TF-vulnerable PTE (not present, without the high address bits 62 * set). 63 */ 64 new = val ^ _PAGE_PRESENT; 65 set_pte(pte, __pte(flip_protnone_guard(val, new, PTE_PFN_MASK))); 66 67 /* 68 * If the page was protected (non-present) and we're making it 69 * present, there is no need to flush the TLB at all. 70 */ 71 if (!protect) 72 return true; 73 74 /* 75 * We need to avoid IPIs, as we may get KFENCE allocations or faults 76 * with interrupts disabled. Therefore, the below is best-effort, and 77 * does not flush TLBs on all CPUs. We can tolerate some inaccuracy; 78 * lazy fault handling takes care of faults after the page is PRESENT. 79 */ 80 81 /* 82 * Flush this CPU's TLB, assuming whoever did the allocation/free is 83 * likely to continue running on this CPU. 84 */ 85 preempt_disable(); 86 flush_tlb_one_kernel(addr); 87 preempt_enable(); 88 return true; 89 } 90 91 #endif /* !MODULE */ 92 93 #endif /* _ASM_X86_KFENCE_H */ 94