11965aae3SH. Peter Anvin #ifndef _ASM_X86_MMU_CONTEXT_H 21965aae3SH. Peter Anvin #define _ASM_X86_MMU_CONTEXT_H 3bb898558SAl Viro 4bb898558SAl Viro #include <asm/desc.h> 560063497SArun Sharma #include <linux/atomic.h> 6d17d8f9dSDave Hansen #include <linux/mm_types.h> 77d06d9c9SDave Hansen #include <linux/pkeys.h> 8d17d8f9dSDave Hansen 9d17d8f9dSDave Hansen #include <trace/events/tlb.h> 10d17d8f9dSDave Hansen 11bb898558SAl Viro #include <asm/pgalloc.h> 12bb898558SAl Viro #include <asm/tlbflush.h> 13bb898558SAl Viro #include <asm/paravirt.h> 14fe3d197fSDave Hansen #include <asm/mpx.h> 15bb898558SAl Viro #ifndef CONFIG_PARAVIRT 16bb898558SAl Viro static inline void paravirt_activate_mm(struct mm_struct *prev, 17bb898558SAl Viro struct mm_struct *next) 18bb898558SAl Viro { 19bb898558SAl Viro } 20bb898558SAl Viro #endif /* !CONFIG_PARAVIRT */ 21bb898558SAl Viro 227911d3f7SAndy Lutomirski #ifdef CONFIG_PERF_EVENTS 23a6673429SAndy Lutomirski extern struct static_key rdpmc_always_available; 24a6673429SAndy Lutomirski 257911d3f7SAndy Lutomirski static inline void load_mm_cr4(struct mm_struct *mm) 267911d3f7SAndy Lutomirski { 27a833581eSPeter Zijlstra if (static_key_false(&rdpmc_always_available) || 28a6673429SAndy Lutomirski atomic_read(&mm->context.perf_rdpmc_allowed)) 297911d3f7SAndy Lutomirski cr4_set_bits(X86_CR4_PCE); 307911d3f7SAndy Lutomirski else 317911d3f7SAndy Lutomirski cr4_clear_bits(X86_CR4_PCE); 327911d3f7SAndy Lutomirski } 337911d3f7SAndy Lutomirski #else 347911d3f7SAndy Lutomirski static inline void load_mm_cr4(struct mm_struct *mm) {} 357911d3f7SAndy Lutomirski #endif 367911d3f7SAndy Lutomirski 37a5b9e5a2SAndy Lutomirski #ifdef CONFIG_MODIFY_LDT_SYSCALL 38bb898558SAl Viro /* 3937868fe1SAndy Lutomirski * ldt_structs can be allocated, used, and freed, but they are never 4037868fe1SAndy Lutomirski * modified while live. 4137868fe1SAndy Lutomirski */ 4237868fe1SAndy Lutomirski struct ldt_struct { 4337868fe1SAndy Lutomirski /* 4437868fe1SAndy Lutomirski * Xen requires page-aligned LDTs with special permissions. This is 4537868fe1SAndy Lutomirski * needed to prevent us from installing evil descriptors such as 4637868fe1SAndy Lutomirski * call gates. On native, we could merge the ldt_struct and LDT 4737868fe1SAndy Lutomirski * allocations, but it's not worth trying to optimize. 4837868fe1SAndy Lutomirski */ 4937868fe1SAndy Lutomirski struct desc_struct *entries; 5037868fe1SAndy Lutomirski int size; 5137868fe1SAndy Lutomirski }; 5237868fe1SAndy Lutomirski 53a5b9e5a2SAndy Lutomirski /* 54a5b9e5a2SAndy Lutomirski * Used for LDT copy/destruction. 55a5b9e5a2SAndy Lutomirski */ 5639a0526fSDave Hansen int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm); 5739a0526fSDave Hansen void destroy_context_ldt(struct mm_struct *mm); 58a5b9e5a2SAndy Lutomirski #else /* CONFIG_MODIFY_LDT_SYSCALL */ 5939a0526fSDave Hansen static inline int init_new_context_ldt(struct task_struct *tsk, 60a5b9e5a2SAndy Lutomirski struct mm_struct *mm) 61a5b9e5a2SAndy Lutomirski { 62a5b9e5a2SAndy Lutomirski return 0; 63a5b9e5a2SAndy Lutomirski } 6439a0526fSDave Hansen static inline void destroy_context_ldt(struct mm_struct *mm) {} 65a5b9e5a2SAndy Lutomirski #endif 66a5b9e5a2SAndy Lutomirski 6737868fe1SAndy Lutomirski static inline void load_mm_ldt(struct mm_struct *mm) 6837868fe1SAndy Lutomirski { 69a5b9e5a2SAndy Lutomirski #ifdef CONFIG_MODIFY_LDT_SYSCALL 7037868fe1SAndy Lutomirski struct ldt_struct *ldt; 7137868fe1SAndy Lutomirski 7237868fe1SAndy Lutomirski /* lockless_dereference synchronizes with smp_store_release */ 7337868fe1SAndy Lutomirski ldt = lockless_dereference(mm->context.ldt); 7437868fe1SAndy Lutomirski 7537868fe1SAndy Lutomirski /* 7637868fe1SAndy Lutomirski * Any change to mm->context.ldt is followed by an IPI to all 7737868fe1SAndy Lutomirski * CPUs with the mm active. The LDT will not be freed until 7837868fe1SAndy Lutomirski * after the IPI is handled by all such CPUs. This means that, 7937868fe1SAndy Lutomirski * if the ldt_struct changes before we return, the values we see 8037868fe1SAndy Lutomirski * will be safe, and the new values will be loaded before we run 8137868fe1SAndy Lutomirski * any user code. 8237868fe1SAndy Lutomirski * 8337868fe1SAndy Lutomirski * NB: don't try to convert this to use RCU without extreme care. 8437868fe1SAndy Lutomirski * We would still need IRQs off, because we don't want to change 8537868fe1SAndy Lutomirski * the local LDT after an IPI loaded a newer value than the one 8637868fe1SAndy Lutomirski * that we can see. 8737868fe1SAndy Lutomirski */ 8837868fe1SAndy Lutomirski 8937868fe1SAndy Lutomirski if (unlikely(ldt)) 9037868fe1SAndy Lutomirski set_ldt(ldt->entries, ldt->size); 9137868fe1SAndy Lutomirski else 9237868fe1SAndy Lutomirski clear_LDT(); 93a5b9e5a2SAndy Lutomirski #else 94a5b9e5a2SAndy Lutomirski clear_LDT(); 95a5b9e5a2SAndy Lutomirski #endif 9637868fe1SAndy Lutomirski 9737868fe1SAndy Lutomirski DEBUG_LOCKS_WARN_ON(preemptible()); 9837868fe1SAndy Lutomirski } 9937868fe1SAndy Lutomirski 1006826c8ffSBrian Gerst static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 1016826c8ffSBrian Gerst { 1026826c8ffSBrian Gerst #ifdef CONFIG_SMP 103c6ae41e7SAlex Shi if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) 104c6ae41e7SAlex Shi this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); 105bb898558SAl Viro #endif 1066826c8ffSBrian Gerst } 1076826c8ffSBrian Gerst 10839a0526fSDave Hansen static inline int init_new_context(struct task_struct *tsk, 10939a0526fSDave Hansen struct mm_struct *mm) 11039a0526fSDave Hansen { 111*e8c24d3aSDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 112*e8c24d3aSDave Hansen if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { 113*e8c24d3aSDave Hansen /* pkey 0 is the default and always allocated */ 114*e8c24d3aSDave Hansen mm->context.pkey_allocation_map = 0x1; 115*e8c24d3aSDave Hansen /* -1 means unallocated or invalid */ 116*e8c24d3aSDave Hansen mm->context.execute_only_pkey = -1; 117*e8c24d3aSDave Hansen } 118*e8c24d3aSDave Hansen #endif 11939a0526fSDave Hansen init_new_context_ldt(tsk, mm); 120*e8c24d3aSDave Hansen 12139a0526fSDave Hansen return 0; 12239a0526fSDave Hansen } 12339a0526fSDave Hansen static inline void destroy_context(struct mm_struct *mm) 12439a0526fSDave Hansen { 12539a0526fSDave Hansen destroy_context_ldt(mm); 12639a0526fSDave Hansen } 12739a0526fSDave Hansen 12869c0319aSAndy Lutomirski extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, 12969c0319aSAndy Lutomirski struct task_struct *tsk); 1306826c8ffSBrian Gerst 131078194f8SAndy Lutomirski extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, 132078194f8SAndy Lutomirski struct task_struct *tsk); 133078194f8SAndy Lutomirski #define switch_mm_irqs_off switch_mm_irqs_off 134bb898558SAl Viro 135bb898558SAl Viro #define activate_mm(prev, next) \ 136bb898558SAl Viro do { \ 137bb898558SAl Viro paravirt_activate_mm((prev), (next)); \ 138bb898558SAl Viro switch_mm((prev), (next), NULL); \ 139bb898558SAl Viro } while (0); 140bb898558SAl Viro 1416826c8ffSBrian Gerst #ifdef CONFIG_X86_32 1426826c8ffSBrian Gerst #define deactivate_mm(tsk, mm) \ 1436826c8ffSBrian Gerst do { \ 144ccbeed3aSTejun Heo lazy_load_gs(0); \ 1456826c8ffSBrian Gerst } while (0) 1466826c8ffSBrian Gerst #else 1476826c8ffSBrian Gerst #define deactivate_mm(tsk, mm) \ 1486826c8ffSBrian Gerst do { \ 1496826c8ffSBrian Gerst load_gs_index(0); \ 1506826c8ffSBrian Gerst loadsegment(fs, 0); \ 1516826c8ffSBrian Gerst } while (0) 1526826c8ffSBrian Gerst #endif 153bb898558SAl Viro 154a1ea1c03SDave Hansen static inline void arch_dup_mmap(struct mm_struct *oldmm, 155a1ea1c03SDave Hansen struct mm_struct *mm) 156a1ea1c03SDave Hansen { 157a1ea1c03SDave Hansen paravirt_arch_dup_mmap(oldmm, mm); 158a1ea1c03SDave Hansen } 159a1ea1c03SDave Hansen 160a1ea1c03SDave Hansen static inline void arch_exit_mmap(struct mm_struct *mm) 161a1ea1c03SDave Hansen { 162a1ea1c03SDave Hansen paravirt_arch_exit_mmap(mm); 163a1ea1c03SDave Hansen } 164a1ea1c03SDave Hansen 165b0e9b09bSDave Hansen #ifdef CONFIG_X86_64 166b0e9b09bSDave Hansen static inline bool is_64bit_mm(struct mm_struct *mm) 167b0e9b09bSDave Hansen { 16897f2645fSMasahiro Yamada return !IS_ENABLED(CONFIG_IA32_EMULATION) || 169b0e9b09bSDave Hansen !(mm->context.ia32_compat == TIF_IA32); 170b0e9b09bSDave Hansen } 171b0e9b09bSDave Hansen #else 172b0e9b09bSDave Hansen static inline bool is_64bit_mm(struct mm_struct *mm) 173b0e9b09bSDave Hansen { 174b0e9b09bSDave Hansen return false; 175b0e9b09bSDave Hansen } 176b0e9b09bSDave Hansen #endif 177b0e9b09bSDave Hansen 178fe3d197fSDave Hansen static inline void arch_bprm_mm_init(struct mm_struct *mm, 179fe3d197fSDave Hansen struct vm_area_struct *vma) 180fe3d197fSDave Hansen { 181fe3d197fSDave Hansen mpx_mm_init(mm); 182fe3d197fSDave Hansen } 183fe3d197fSDave Hansen 1841de4fa14SDave Hansen static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, 1851de4fa14SDave Hansen unsigned long start, unsigned long end) 1861de4fa14SDave Hansen { 187c922228eSDave Hansen /* 188c922228eSDave Hansen * mpx_notify_unmap() goes and reads a rarely-hot 189c922228eSDave Hansen * cacheline in the mm_struct. That can be expensive 190c922228eSDave Hansen * enough to be seen in profiles. 191c922228eSDave Hansen * 192c922228eSDave Hansen * The mpx_notify_unmap() call and its contents have been 193c922228eSDave Hansen * observed to affect munmap() performance on hardware 194c922228eSDave Hansen * where MPX is not present. 195c922228eSDave Hansen * 196c922228eSDave Hansen * The unlikely() optimizes for the fast case: no MPX 197c922228eSDave Hansen * in the CPU, or no MPX use in the process. Even if 198c922228eSDave Hansen * we get this wrong (in the unlikely event that MPX 199c922228eSDave Hansen * is widely enabled on some system) the overhead of 200c922228eSDave Hansen * MPX itself (reading bounds tables) is expected to 201c922228eSDave Hansen * overwhelm the overhead of getting this unlikely() 202c922228eSDave Hansen * consistently wrong. 203c922228eSDave Hansen */ 204c922228eSDave Hansen if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) 2051de4fa14SDave Hansen mpx_notify_unmap(mm, vma, start, end); 2061de4fa14SDave Hansen } 2071de4fa14SDave Hansen 2087d06d9c9SDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 2098f62c883SDave Hansen static inline int vma_pkey(struct vm_area_struct *vma) 2108f62c883SDave Hansen { 2118f62c883SDave Hansen unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 | 2128f62c883SDave Hansen VM_PKEY_BIT2 | VM_PKEY_BIT3; 2137d06d9c9SDave Hansen 2147d06d9c9SDave Hansen return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT; 2158f62c883SDave Hansen } 2167d06d9c9SDave Hansen #else 2177d06d9c9SDave Hansen static inline int vma_pkey(struct vm_area_struct *vma) 2187d06d9c9SDave Hansen { 2197d06d9c9SDave Hansen return 0; 2207d06d9c9SDave Hansen } 2217d06d9c9SDave Hansen #endif 2228f62c883SDave Hansen 22333a709b2SDave Hansen static inline bool __pkru_allows_pkey(u16 pkey, bool write) 22433a709b2SDave Hansen { 22533a709b2SDave Hansen u32 pkru = read_pkru(); 22633a709b2SDave Hansen 22733a709b2SDave Hansen if (!__pkru_allows_read(pkru, pkey)) 22833a709b2SDave Hansen return false; 22933a709b2SDave Hansen if (write && !__pkru_allows_write(pkru, pkey)) 23033a709b2SDave Hansen return false; 23133a709b2SDave Hansen 23233a709b2SDave Hansen return true; 23333a709b2SDave Hansen } 23433a709b2SDave Hansen 23533a709b2SDave Hansen /* 23633a709b2SDave Hansen * We only want to enforce protection keys on the current process 23733a709b2SDave Hansen * because we effectively have no access to PKRU for other 23833a709b2SDave Hansen * processes or any way to tell *which * PKRU in a threaded 23933a709b2SDave Hansen * process we could use. 24033a709b2SDave Hansen * 24133a709b2SDave Hansen * So do not enforce things if the VMA is not from the current 24233a709b2SDave Hansen * mm, or if we are in a kernel thread. 24333a709b2SDave Hansen */ 24433a709b2SDave Hansen static inline bool vma_is_foreign(struct vm_area_struct *vma) 24533a709b2SDave Hansen { 24633a709b2SDave Hansen if (!current->mm) 24733a709b2SDave Hansen return true; 24833a709b2SDave Hansen /* 24933a709b2SDave Hansen * Should PKRU be enforced on the access to this VMA? If 25033a709b2SDave Hansen * the VMA is from another process, then PKRU has no 25133a709b2SDave Hansen * relevance and should not be enforced. 25233a709b2SDave Hansen */ 25333a709b2SDave Hansen if (current->mm != vma->vm_mm) 25433a709b2SDave Hansen return true; 25533a709b2SDave Hansen 25633a709b2SDave Hansen return false; 25733a709b2SDave Hansen } 25833a709b2SDave Hansen 2591b2ee126SDave Hansen static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, 260d61172b4SDave Hansen bool write, bool execute, bool foreign) 26133a709b2SDave Hansen { 262d61172b4SDave Hansen /* pkeys never affect instruction fetches */ 263d61172b4SDave Hansen if (execute) 264d61172b4SDave Hansen return true; 26533a709b2SDave Hansen /* allow access if the VMA is not one from this process */ 2661b2ee126SDave Hansen if (foreign || vma_is_foreign(vma)) 26733a709b2SDave Hansen return true; 26833a709b2SDave Hansen return __pkru_allows_pkey(vma_pkey(vma), write); 26933a709b2SDave Hansen } 27033a709b2SDave Hansen 27133a709b2SDave Hansen static inline bool arch_pte_access_permitted(pte_t pte, bool write) 27233a709b2SDave Hansen { 27333a709b2SDave Hansen return __pkru_allows_pkey(pte_flags_pkey(pte_flags(pte)), write); 27433a709b2SDave Hansen } 2751965aae3SH. Peter Anvin #endif /* _ASM_X86_MMU_CONTEXT_H */ 276