1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_MMU_CONTEXT_H 3 #define _ASM_X86_MMU_CONTEXT_H 4 5 #include <asm/desc.h> 6 #include <linux/atomic.h> 7 #include <linux/mm_types.h> 8 #include <linux/pkeys.h> 9 10 #include <trace/events/tlb.h> 11 12 #include <asm/tlbflush.h> 13 #include <asm/paravirt.h> 14 #include <asm/debugreg.h> 15 #include <asm/gsseg.h> 16 17 extern atomic64_t last_mm_ctx_id; 18 19 #ifndef CONFIG_PARAVIRT_XXL 20 static inline void paravirt_activate_mm(struct mm_struct *prev, 21 struct mm_struct *next) 22 { 23 } 24 #endif /* !CONFIG_PARAVIRT_XXL */ 25 26 #ifdef CONFIG_PERF_EVENTS 27 DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key); 28 DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key); 29 void cr4_update_pce(void *ignored); 30 #endif 31 32 #ifdef CONFIG_MODIFY_LDT_SYSCALL 33 /* 34 * ldt_structs can be allocated, used, and freed, but they are never 35 * modified while live. 36 */ 37 struct ldt_struct { 38 /* 39 * Xen requires page-aligned LDTs with special permissions. This is 40 * needed to prevent us from installing evil descriptors such as 41 * call gates. On native, we could merge the ldt_struct and LDT 42 * allocations, but it's not worth trying to optimize. 43 */ 44 struct desc_struct *entries; 45 unsigned int nr_entries; 46 47 /* 48 * If PTI is in use, then the entries array is not mapped while we're 49 * in user mode. The whole array will be aliased at the addressed 50 * given by ldt_slot_va(slot). We use two slots so that we can allocate 51 * and map, and enable a new LDT without invalidating the mapping 52 * of an older, still-in-use LDT. 53 * 54 * slot will be -1 if this LDT doesn't have an alias mapping. 55 */ 56 int slot; 57 }; 58 59 /* 60 * Used for LDT copy/destruction. 61 */ 62 static inline void init_new_context_ldt(struct mm_struct *mm) 63 { 64 mm->context.ldt = NULL; 65 init_rwsem(&mm->context.ldt_usr_sem); 66 } 67 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm); 68 void destroy_context_ldt(struct mm_struct *mm); 69 void ldt_arch_exit_mmap(struct mm_struct *mm); 70 #else /* CONFIG_MODIFY_LDT_SYSCALL */ 71 static inline void init_new_context_ldt(struct mm_struct *mm) { } 72 static inline int ldt_dup_context(struct mm_struct *oldmm, 73 struct mm_struct *mm) 74 { 75 return 0; 76 } 77 static inline void destroy_context_ldt(struct mm_struct *mm) { } 78 static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } 79 #endif 80 81 #ifdef CONFIG_MODIFY_LDT_SYSCALL 82 extern void load_mm_ldt(struct mm_struct *mm); 83 extern void switch_ldt(struct mm_struct *prev, struct mm_struct *next); 84 #else 85 static inline void load_mm_ldt(struct mm_struct *mm) 86 { 87 clear_LDT(); 88 } 89 static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) 90 { 91 DEBUG_LOCKS_WARN_ON(preemptible()); 92 } 93 #endif 94 95 #define enter_lazy_tlb enter_lazy_tlb 96 extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); 97 98 /* 99 * Init a new mm. Used on mm copies, like at fork() 100 * and on mm's that are brand-new, like at execve(). 101 */ 102 #define init_new_context init_new_context 103 static inline int init_new_context(struct task_struct *tsk, 104 struct mm_struct *mm) 105 { 106 mutex_init(&mm->context.lock); 107 108 mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); 109 atomic64_set(&mm->context.tlb_gen, 0); 110 111 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 112 if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { 113 /* pkey 0 is the default and allocated implicitly */ 114 mm->context.pkey_allocation_map = 0x1; 115 /* -1 means unallocated or invalid */ 116 mm->context.execute_only_pkey = -1; 117 } 118 #endif 119 init_new_context_ldt(mm); 120 return 0; 121 } 122 123 #define destroy_context destroy_context 124 static inline void destroy_context(struct mm_struct *mm) 125 { 126 destroy_context_ldt(mm); 127 } 128 129 extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, 130 struct task_struct *tsk); 131 132 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, 133 struct task_struct *tsk); 134 #define switch_mm_irqs_off switch_mm_irqs_off 135 136 #define activate_mm(prev, next) \ 137 do { \ 138 paravirt_activate_mm((prev), (next)); \ 139 switch_mm((prev), (next), NULL); \ 140 } while (0); 141 142 #ifdef CONFIG_X86_32 143 #define deactivate_mm(tsk, mm) \ 144 do { \ 145 loadsegment(gs, 0); \ 146 } while (0) 147 #else 148 #define deactivate_mm(tsk, mm) \ 149 do { \ 150 load_gs_index(0); \ 151 loadsegment(fs, 0); \ 152 } while (0) 153 #endif 154 155 static inline void arch_dup_pkeys(struct mm_struct *oldmm, 156 struct mm_struct *mm) 157 { 158 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 159 if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) 160 return; 161 162 /* Duplicate the oldmm pkey state in mm: */ 163 mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map; 164 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey; 165 #endif 166 } 167 168 static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) 169 { 170 arch_dup_pkeys(oldmm, mm); 171 paravirt_arch_dup_mmap(oldmm, mm); 172 return ldt_dup_context(oldmm, mm); 173 } 174 175 static inline void arch_exit_mmap(struct mm_struct *mm) 176 { 177 paravirt_arch_exit_mmap(mm); 178 ldt_arch_exit_mmap(mm); 179 } 180 181 #ifdef CONFIG_X86_64 182 static inline bool is_64bit_mm(struct mm_struct *mm) 183 { 184 return !IS_ENABLED(CONFIG_IA32_EMULATION) || 185 !(mm->context.flags & MM_CONTEXT_UPROBE_IA32); 186 } 187 #else 188 static inline bool is_64bit_mm(struct mm_struct *mm) 189 { 190 return false; 191 } 192 #endif 193 194 static inline void arch_unmap(struct mm_struct *mm, unsigned long start, 195 unsigned long end) 196 { 197 } 198 199 /* 200 * We only want to enforce protection keys on the current process 201 * because we effectively have no access to PKRU for other 202 * processes or any way to tell *which * PKRU in a threaded 203 * process we could use. 204 * 205 * So do not enforce things if the VMA is not from the current 206 * mm, or if we are in a kernel thread. 207 */ 208 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, 209 bool write, bool execute, bool foreign) 210 { 211 /* pkeys never affect instruction fetches */ 212 if (execute) 213 return true; 214 /* allow access if the VMA is not one from this process */ 215 if (foreign || vma_is_foreign(vma)) 216 return true; 217 return __pkru_allows_pkey(vma_pkey(vma), write); 218 } 219 220 unsigned long __get_current_cr3_fast(void); 221 222 #include <asm-generic/mmu_context.h> 223 224 #endif /* _ASM_X86_MMU_CONTEXT_H */ 225