11965aae3SH. Peter Anvin #ifndef _ASM_X86_MMU_CONTEXT_H 21965aae3SH. Peter Anvin #define _ASM_X86_MMU_CONTEXT_H 3bb898558SAl Viro 4bb898558SAl Viro #include <asm/desc.h> 560063497SArun Sharma #include <linux/atomic.h> 6d17d8f9dSDave Hansen #include <linux/mm_types.h> 77d06d9c9SDave Hansen #include <linux/pkeys.h> 8d17d8f9dSDave Hansen 9d17d8f9dSDave Hansen #include <trace/events/tlb.h> 10d17d8f9dSDave Hansen 11bb898558SAl Viro #include <asm/pgalloc.h> 12bb898558SAl Viro #include <asm/tlbflush.h> 13bb898558SAl Viro #include <asm/paravirt.h> 14fe3d197fSDave Hansen #include <asm/mpx.h> 15f39681edSAndy Lutomirski 16f39681edSAndy Lutomirski extern atomic64_t last_mm_ctx_id; 17f39681edSAndy Lutomirski 18bb898558SAl Viro #ifndef CONFIG_PARAVIRT 19bb898558SAl Viro static inline void paravirt_activate_mm(struct mm_struct *prev, 20bb898558SAl Viro struct mm_struct *next) 21bb898558SAl Viro { 22bb898558SAl Viro } 23bb898558SAl Viro #endif /* !CONFIG_PARAVIRT */ 24bb898558SAl Viro 257911d3f7SAndy Lutomirski #ifdef CONFIG_PERF_EVENTS 26a6673429SAndy Lutomirski extern struct static_key rdpmc_always_available; 27a6673429SAndy Lutomirski 287911d3f7SAndy Lutomirski static inline void load_mm_cr4(struct mm_struct *mm) 297911d3f7SAndy Lutomirski { 30a833581eSPeter Zijlstra if (static_key_false(&rdpmc_always_available) || 31a6673429SAndy Lutomirski atomic_read(&mm->context.perf_rdpmc_allowed)) 327911d3f7SAndy Lutomirski cr4_set_bits(X86_CR4_PCE); 337911d3f7SAndy Lutomirski else 347911d3f7SAndy Lutomirski cr4_clear_bits(X86_CR4_PCE); 357911d3f7SAndy Lutomirski } 367911d3f7SAndy Lutomirski #else 377911d3f7SAndy Lutomirski static inline void load_mm_cr4(struct mm_struct *mm) {} 387911d3f7SAndy Lutomirski #endif 397911d3f7SAndy Lutomirski 40a5b9e5a2SAndy Lutomirski #ifdef CONFIG_MODIFY_LDT_SYSCALL 41bb898558SAl Viro /* 4237868fe1SAndy Lutomirski * ldt_structs can be allocated, used, and freed, but they are never 4337868fe1SAndy Lutomirski * modified while live. 4437868fe1SAndy Lutomirski */ 4537868fe1SAndy Lutomirski struct ldt_struct { 4637868fe1SAndy Lutomirski /* 4737868fe1SAndy Lutomirski * Xen requires page-aligned LDTs with special permissions. This is 4837868fe1SAndy Lutomirski * needed to prevent us from installing evil descriptors such as 4937868fe1SAndy Lutomirski * call gates. On native, we could merge the ldt_struct and LDT 5037868fe1SAndy Lutomirski * allocations, but it's not worth trying to optimize. 5137868fe1SAndy Lutomirski */ 5237868fe1SAndy Lutomirski struct desc_struct *entries; 53bbf79d21SBorislav Petkov unsigned int nr_entries; 5437868fe1SAndy Lutomirski }; 5537868fe1SAndy Lutomirski 56a5b9e5a2SAndy Lutomirski /* 57a5b9e5a2SAndy Lutomirski * Used for LDT copy/destruction. 58a5b9e5a2SAndy Lutomirski */ 5939a0526fSDave Hansen int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm); 6039a0526fSDave Hansen void destroy_context_ldt(struct mm_struct *mm); 61a5b9e5a2SAndy Lutomirski #else /* CONFIG_MODIFY_LDT_SYSCALL */ 6239a0526fSDave Hansen static inline int init_new_context_ldt(struct task_struct *tsk, 63a5b9e5a2SAndy Lutomirski struct mm_struct *mm) 64a5b9e5a2SAndy Lutomirski { 65a5b9e5a2SAndy Lutomirski return 0; 66a5b9e5a2SAndy Lutomirski } 6739a0526fSDave Hansen static inline void destroy_context_ldt(struct mm_struct *mm) {} 68a5b9e5a2SAndy Lutomirski #endif 69a5b9e5a2SAndy Lutomirski 7037868fe1SAndy Lutomirski static inline void load_mm_ldt(struct mm_struct *mm) 7137868fe1SAndy Lutomirski { 72a5b9e5a2SAndy Lutomirski #ifdef CONFIG_MODIFY_LDT_SYSCALL 7337868fe1SAndy Lutomirski struct ldt_struct *ldt; 7437868fe1SAndy Lutomirski 7537868fe1SAndy Lutomirski /* lockless_dereference synchronizes with smp_store_release */ 7637868fe1SAndy Lutomirski ldt = lockless_dereference(mm->context.ldt); 7737868fe1SAndy Lutomirski 7837868fe1SAndy Lutomirski /* 7937868fe1SAndy Lutomirski * Any change to mm->context.ldt is followed by an IPI to all 8037868fe1SAndy Lutomirski * CPUs with the mm active. The LDT will not be freed until 8137868fe1SAndy Lutomirski * after the IPI is handled by all such CPUs. This means that, 8237868fe1SAndy Lutomirski * if the ldt_struct changes before we return, the values we see 8337868fe1SAndy Lutomirski * will be safe, and the new values will be loaded before we run 8437868fe1SAndy Lutomirski * any user code. 8537868fe1SAndy Lutomirski * 8637868fe1SAndy Lutomirski * NB: don't try to convert this to use RCU without extreme care. 8737868fe1SAndy Lutomirski * We would still need IRQs off, because we don't want to change 8837868fe1SAndy Lutomirski * the local LDT after an IPI loaded a newer value than the one 8937868fe1SAndy Lutomirski * that we can see. 9037868fe1SAndy Lutomirski */ 9137868fe1SAndy Lutomirski 9237868fe1SAndy Lutomirski if (unlikely(ldt)) 93bbf79d21SBorislav Petkov set_ldt(ldt->entries, ldt->nr_entries); 9437868fe1SAndy Lutomirski else 9537868fe1SAndy Lutomirski clear_LDT(); 96a5b9e5a2SAndy Lutomirski #else 97a5b9e5a2SAndy Lutomirski clear_LDT(); 98a5b9e5a2SAndy Lutomirski #endif 9973534258SAndy Lutomirski } 10073534258SAndy Lutomirski 10173534258SAndy Lutomirski static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) 10273534258SAndy Lutomirski { 10373534258SAndy Lutomirski #ifdef CONFIG_MODIFY_LDT_SYSCALL 10473534258SAndy Lutomirski /* 10573534258SAndy Lutomirski * Load the LDT if either the old or new mm had an LDT. 10673534258SAndy Lutomirski * 10773534258SAndy Lutomirski * An mm will never go from having an LDT to not having an LDT. Two 10873534258SAndy Lutomirski * mms never share an LDT, so we don't gain anything by checking to 10973534258SAndy Lutomirski * see whether the LDT changed. There's also no guarantee that 11073534258SAndy Lutomirski * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL, 11173534258SAndy Lutomirski * then prev->context.ldt will also be non-NULL. 11273534258SAndy Lutomirski * 11373534258SAndy Lutomirski * If we really cared, we could optimize the case where prev == next 11473534258SAndy Lutomirski * and we're exiting lazy mode. Most of the time, if this happens, 11573534258SAndy Lutomirski * we don't actually need to reload LDTR, but modify_ldt() is mostly 11673534258SAndy Lutomirski * used by legacy code and emulators where we don't need this level of 11773534258SAndy Lutomirski * performance. 11873534258SAndy Lutomirski * 11973534258SAndy Lutomirski * This uses | instead of || because it generates better code. 12073534258SAndy Lutomirski */ 12173534258SAndy Lutomirski if (unlikely((unsigned long)prev->context.ldt | 12273534258SAndy Lutomirski (unsigned long)next->context.ldt)) 12373534258SAndy Lutomirski load_mm_ldt(next); 12473534258SAndy Lutomirski #endif 12537868fe1SAndy Lutomirski 12637868fe1SAndy Lutomirski DEBUG_LOCKS_WARN_ON(preemptible()); 12737868fe1SAndy Lutomirski } 12837868fe1SAndy Lutomirski 129*b956575bSAndy Lutomirski void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); 1306826c8ffSBrian Gerst 13139a0526fSDave Hansen static inline int init_new_context(struct task_struct *tsk, 13239a0526fSDave Hansen struct mm_struct *mm) 13339a0526fSDave Hansen { 134f39681edSAndy Lutomirski mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); 135f39681edSAndy Lutomirski atomic64_set(&mm->context.tlb_gen, 0); 136f39681edSAndy Lutomirski 137e8c24d3aSDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 138e8c24d3aSDave Hansen if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { 139e8c24d3aSDave Hansen /* pkey 0 is the default and always allocated */ 140e8c24d3aSDave Hansen mm->context.pkey_allocation_map = 0x1; 141e8c24d3aSDave Hansen /* -1 means unallocated or invalid */ 142e8c24d3aSDave Hansen mm->context.execute_only_pkey = -1; 143e8c24d3aSDave Hansen } 144e8c24d3aSDave Hansen #endif 145ccd5b323SEric Biggers return init_new_context_ldt(tsk, mm); 14639a0526fSDave Hansen } 14739a0526fSDave Hansen static inline void destroy_context(struct mm_struct *mm) 14839a0526fSDave Hansen { 14939a0526fSDave Hansen destroy_context_ldt(mm); 15039a0526fSDave Hansen } 15139a0526fSDave Hansen 15269c0319aSAndy Lutomirski extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, 15369c0319aSAndy Lutomirski struct task_struct *tsk); 1546826c8ffSBrian Gerst 155078194f8SAndy Lutomirski extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, 156078194f8SAndy Lutomirski struct task_struct *tsk); 157078194f8SAndy Lutomirski #define switch_mm_irqs_off switch_mm_irqs_off 158bb898558SAl Viro 159bb898558SAl Viro #define activate_mm(prev, next) \ 160bb898558SAl Viro do { \ 161bb898558SAl Viro paravirt_activate_mm((prev), (next)); \ 162bb898558SAl Viro switch_mm((prev), (next), NULL); \ 163bb898558SAl Viro } while (0); 164bb898558SAl Viro 1656826c8ffSBrian Gerst #ifdef CONFIG_X86_32 1666826c8ffSBrian Gerst #define deactivate_mm(tsk, mm) \ 1676826c8ffSBrian Gerst do { \ 168ccbeed3aSTejun Heo lazy_load_gs(0); \ 1696826c8ffSBrian Gerst } while (0) 1706826c8ffSBrian Gerst #else 1716826c8ffSBrian Gerst #define deactivate_mm(tsk, mm) \ 1726826c8ffSBrian Gerst do { \ 1736826c8ffSBrian Gerst load_gs_index(0); \ 1746826c8ffSBrian Gerst loadsegment(fs, 0); \ 1756826c8ffSBrian Gerst } while (0) 1766826c8ffSBrian Gerst #endif 177bb898558SAl Viro 178a1ea1c03SDave Hansen static inline void arch_dup_mmap(struct mm_struct *oldmm, 179a1ea1c03SDave Hansen struct mm_struct *mm) 180a1ea1c03SDave Hansen { 181a1ea1c03SDave Hansen paravirt_arch_dup_mmap(oldmm, mm); 182a1ea1c03SDave Hansen } 183a1ea1c03SDave Hansen 184a1ea1c03SDave Hansen static inline void arch_exit_mmap(struct mm_struct *mm) 185a1ea1c03SDave Hansen { 186a1ea1c03SDave Hansen paravirt_arch_exit_mmap(mm); 187a1ea1c03SDave Hansen } 188a1ea1c03SDave Hansen 189b0e9b09bSDave Hansen #ifdef CONFIG_X86_64 190b0e9b09bSDave Hansen static inline bool is_64bit_mm(struct mm_struct *mm) 191b0e9b09bSDave Hansen { 19297f2645fSMasahiro Yamada return !IS_ENABLED(CONFIG_IA32_EMULATION) || 193b0e9b09bSDave Hansen !(mm->context.ia32_compat == TIF_IA32); 194b0e9b09bSDave Hansen } 195b0e9b09bSDave Hansen #else 196b0e9b09bSDave Hansen static inline bool is_64bit_mm(struct mm_struct *mm) 197b0e9b09bSDave Hansen { 198b0e9b09bSDave Hansen return false; 199b0e9b09bSDave Hansen } 200b0e9b09bSDave Hansen #endif 201b0e9b09bSDave Hansen 202fe3d197fSDave Hansen static inline void arch_bprm_mm_init(struct mm_struct *mm, 203fe3d197fSDave Hansen struct vm_area_struct *vma) 204fe3d197fSDave Hansen { 205fe3d197fSDave Hansen mpx_mm_init(mm); 206fe3d197fSDave Hansen } 207fe3d197fSDave Hansen 2081de4fa14SDave Hansen static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, 2091de4fa14SDave Hansen unsigned long start, unsigned long end) 2101de4fa14SDave Hansen { 211c922228eSDave Hansen /* 212c922228eSDave Hansen * mpx_notify_unmap() goes and reads a rarely-hot 213c922228eSDave Hansen * cacheline in the mm_struct. That can be expensive 214c922228eSDave Hansen * enough to be seen in profiles. 215c922228eSDave Hansen * 216c922228eSDave Hansen * The mpx_notify_unmap() call and its contents have been 217c922228eSDave Hansen * observed to affect munmap() performance on hardware 218c922228eSDave Hansen * where MPX is not present. 219c922228eSDave Hansen * 220c922228eSDave Hansen * The unlikely() optimizes for the fast case: no MPX 221c922228eSDave Hansen * in the CPU, or no MPX use in the process. Even if 222c922228eSDave Hansen * we get this wrong (in the unlikely event that MPX 223c922228eSDave Hansen * is widely enabled on some system) the overhead of 224c922228eSDave Hansen * MPX itself (reading bounds tables) is expected to 225c922228eSDave Hansen * overwhelm the overhead of getting this unlikely() 226c922228eSDave Hansen * consistently wrong. 227c922228eSDave Hansen */ 228c922228eSDave Hansen if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) 2291de4fa14SDave Hansen mpx_notify_unmap(mm, vma, start, end); 2301de4fa14SDave Hansen } 2311de4fa14SDave Hansen 2327d06d9c9SDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 2338f62c883SDave Hansen static inline int vma_pkey(struct vm_area_struct *vma) 2348f62c883SDave Hansen { 2358f62c883SDave Hansen unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 | 2368f62c883SDave Hansen VM_PKEY_BIT2 | VM_PKEY_BIT3; 2377d06d9c9SDave Hansen 2387d06d9c9SDave Hansen return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT; 2398f62c883SDave Hansen } 2407d06d9c9SDave Hansen #else 2417d06d9c9SDave Hansen static inline int vma_pkey(struct vm_area_struct *vma) 2427d06d9c9SDave Hansen { 2437d06d9c9SDave Hansen return 0; 2447d06d9c9SDave Hansen } 2457d06d9c9SDave Hansen #endif 2468f62c883SDave Hansen 24733a709b2SDave Hansen /* 24833a709b2SDave Hansen * We only want to enforce protection keys on the current process 24933a709b2SDave Hansen * because we effectively have no access to PKRU for other 25033a709b2SDave Hansen * processes or any way to tell *which * PKRU in a threaded 25133a709b2SDave Hansen * process we could use. 25233a709b2SDave Hansen * 25333a709b2SDave Hansen * So do not enforce things if the VMA is not from the current 25433a709b2SDave Hansen * mm, or if we are in a kernel thread. 25533a709b2SDave Hansen */ 25633a709b2SDave Hansen static inline bool vma_is_foreign(struct vm_area_struct *vma) 25733a709b2SDave Hansen { 25833a709b2SDave Hansen if (!current->mm) 25933a709b2SDave Hansen return true; 26033a709b2SDave Hansen /* 26133a709b2SDave Hansen * Should PKRU be enforced on the access to this VMA? If 26233a709b2SDave Hansen * the VMA is from another process, then PKRU has no 26333a709b2SDave Hansen * relevance and should not be enforced. 26433a709b2SDave Hansen */ 26533a709b2SDave Hansen if (current->mm != vma->vm_mm) 26633a709b2SDave Hansen return true; 26733a709b2SDave Hansen 26833a709b2SDave Hansen return false; 26933a709b2SDave Hansen } 27033a709b2SDave Hansen 2711b2ee126SDave Hansen static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, 272d61172b4SDave Hansen bool write, bool execute, bool foreign) 27333a709b2SDave Hansen { 274d61172b4SDave Hansen /* pkeys never affect instruction fetches */ 275d61172b4SDave Hansen if (execute) 276d61172b4SDave Hansen return true; 27733a709b2SDave Hansen /* allow access if the VMA is not one from this process */ 2781b2ee126SDave Hansen if (foreign || vma_is_foreign(vma)) 27933a709b2SDave Hansen return true; 28033a709b2SDave Hansen return __pkru_allows_pkey(vma_pkey(vma), write); 28133a709b2SDave Hansen } 28233a709b2SDave Hansen 28352a2af40SAndy Lutomirski /* 28452a2af40SAndy Lutomirski * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID 28552a2af40SAndy Lutomirski * bits. This serves two purposes. It prevents a nasty situation in 28652a2af40SAndy Lutomirski * which PCID-unaware code saves CR3, loads some other value (with PCID 28752a2af40SAndy Lutomirski * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if 28852a2af40SAndy Lutomirski * the saved ASID was nonzero. It also means that any bugs involving 28952a2af40SAndy Lutomirski * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger 29052a2af40SAndy Lutomirski * deterministically. 29152a2af40SAndy Lutomirski */ 29252a2af40SAndy Lutomirski 29347061a24SAndy Lutomirski static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid) 29447061a24SAndy Lutomirski { 29552a2af40SAndy Lutomirski if (static_cpu_has(X86_FEATURE_PCID)) { 29652a2af40SAndy Lutomirski VM_WARN_ON_ONCE(asid > 4094); 29752a2af40SAndy Lutomirski return __sme_pa(mm->pgd) | (asid + 1); 29852a2af40SAndy Lutomirski } else { 29952a2af40SAndy Lutomirski VM_WARN_ON_ONCE(asid != 0); 30052a2af40SAndy Lutomirski return __sme_pa(mm->pgd); 30152a2af40SAndy Lutomirski } 30247061a24SAndy Lutomirski } 30347061a24SAndy Lutomirski 30447061a24SAndy Lutomirski static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid) 30547061a24SAndy Lutomirski { 30652a2af40SAndy Lutomirski VM_WARN_ON_ONCE(asid > 4094); 30752a2af40SAndy Lutomirski return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH; 30847061a24SAndy Lutomirski } 309d6e41f11SAndy Lutomirski 310d6e41f11SAndy Lutomirski /* 311d6e41f11SAndy Lutomirski * This can be used from process context to figure out what the value of 3126c690ee1SAndy Lutomirski * CR3 is without needing to do a (slow) __read_cr3(). 313d6e41f11SAndy Lutomirski * 314d6e41f11SAndy Lutomirski * It's intended to be used for code like KVM that sneakily changes CR3 315d6e41f11SAndy Lutomirski * and needs to restore it. It needs to be used very carefully. 316d6e41f11SAndy Lutomirski */ 317d6e41f11SAndy Lutomirski static inline unsigned long __get_current_cr3_fast(void) 318d6e41f11SAndy Lutomirski { 31947061a24SAndy Lutomirski unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm), 32047061a24SAndy Lutomirski this_cpu_read(cpu_tlbstate.loaded_mm_asid)); 32110af6235SAndy Lutomirski 322d6e41f11SAndy Lutomirski /* For now, be very restrictive about when this can be called. */ 3234c07f904SRoman Kagan VM_WARN_ON(in_nmi() || preemptible()); 324d6e41f11SAndy Lutomirski 3256c690ee1SAndy Lutomirski VM_BUG_ON(cr3 != __read_cr3()); 326d6e41f11SAndy Lutomirski return cr3; 327d6e41f11SAndy Lutomirski } 328d6e41f11SAndy Lutomirski 3291965aae3SH. Peter Anvin #endif /* _ASM_X86_MMU_CONTEXT_H */ 330