11965aae3SH. Peter Anvin #ifndef _ASM_X86_MMU_CONTEXT_H 21965aae3SH. Peter Anvin #define _ASM_X86_MMU_CONTEXT_H 3bb898558SAl Viro 4bb898558SAl Viro #include <asm/desc.h> 560063497SArun Sharma #include <linux/atomic.h> 6d17d8f9dSDave Hansen #include <linux/mm_types.h> 77d06d9c9SDave Hansen #include <linux/pkeys.h> 8d17d8f9dSDave Hansen 9d17d8f9dSDave Hansen #include <trace/events/tlb.h> 10d17d8f9dSDave Hansen 11bb898558SAl Viro #include <asm/pgalloc.h> 12bb898558SAl Viro #include <asm/tlbflush.h> 13bb898558SAl Viro #include <asm/paravirt.h> 14fe3d197fSDave Hansen #include <asm/mpx.h> 15f39681edSAndy Lutomirski 16f39681edSAndy Lutomirski extern atomic64_t last_mm_ctx_id; 17f39681edSAndy Lutomirski 18bb898558SAl Viro #ifndef CONFIG_PARAVIRT 19bb898558SAl Viro static inline void paravirt_activate_mm(struct mm_struct *prev, 20bb898558SAl Viro struct mm_struct *next) 21bb898558SAl Viro { 22bb898558SAl Viro } 23bb898558SAl Viro #endif /* !CONFIG_PARAVIRT */ 24bb898558SAl Viro 257911d3f7SAndy Lutomirski #ifdef CONFIG_PERF_EVENTS 26a6673429SAndy Lutomirski extern struct static_key rdpmc_always_available; 27a6673429SAndy Lutomirski 287911d3f7SAndy Lutomirski static inline void load_mm_cr4(struct mm_struct *mm) 297911d3f7SAndy Lutomirski { 30a833581eSPeter Zijlstra if (static_key_false(&rdpmc_always_available) || 31a6673429SAndy Lutomirski atomic_read(&mm->context.perf_rdpmc_allowed)) 327911d3f7SAndy Lutomirski cr4_set_bits(X86_CR4_PCE); 337911d3f7SAndy Lutomirski else 347911d3f7SAndy Lutomirski cr4_clear_bits(X86_CR4_PCE); 357911d3f7SAndy Lutomirski } 367911d3f7SAndy Lutomirski #else 377911d3f7SAndy Lutomirski static inline void load_mm_cr4(struct mm_struct *mm) {} 387911d3f7SAndy Lutomirski #endif 397911d3f7SAndy Lutomirski 40a5b9e5a2SAndy Lutomirski #ifdef CONFIG_MODIFY_LDT_SYSCALL 41bb898558SAl Viro /* 4237868fe1SAndy Lutomirski * ldt_structs can be allocated, used, and freed, but they are never 4337868fe1SAndy Lutomirski * modified while live. 4437868fe1SAndy Lutomirski */ 4537868fe1SAndy Lutomirski struct ldt_struct { 4637868fe1SAndy Lutomirski /* 4737868fe1SAndy Lutomirski * Xen requires page-aligned LDTs with special permissions. This is 4837868fe1SAndy Lutomirski * needed to prevent us from installing evil descriptors such as 4937868fe1SAndy Lutomirski * call gates. On native, we could merge the ldt_struct and LDT 5037868fe1SAndy Lutomirski * allocations, but it's not worth trying to optimize. 5137868fe1SAndy Lutomirski */ 5237868fe1SAndy Lutomirski struct desc_struct *entries; 53bbf79d21SBorislav Petkov unsigned int nr_entries; 5437868fe1SAndy Lutomirski }; 5537868fe1SAndy Lutomirski 56a5b9e5a2SAndy Lutomirski /* 57a5b9e5a2SAndy Lutomirski * Used for LDT copy/destruction. 58a5b9e5a2SAndy Lutomirski */ 5939a0526fSDave Hansen int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm); 6039a0526fSDave Hansen void destroy_context_ldt(struct mm_struct *mm); 61a5b9e5a2SAndy Lutomirski #else /* CONFIG_MODIFY_LDT_SYSCALL */ 6239a0526fSDave Hansen static inline int init_new_context_ldt(struct task_struct *tsk, 63a5b9e5a2SAndy Lutomirski struct mm_struct *mm) 64a5b9e5a2SAndy Lutomirski { 65a5b9e5a2SAndy Lutomirski return 0; 66a5b9e5a2SAndy Lutomirski } 6739a0526fSDave Hansen static inline void destroy_context_ldt(struct mm_struct *mm) {} 68a5b9e5a2SAndy Lutomirski #endif 69a5b9e5a2SAndy Lutomirski 7037868fe1SAndy Lutomirski static inline void load_mm_ldt(struct mm_struct *mm) 7137868fe1SAndy Lutomirski { 72a5b9e5a2SAndy Lutomirski #ifdef CONFIG_MODIFY_LDT_SYSCALL 7337868fe1SAndy Lutomirski struct ldt_struct *ldt; 7437868fe1SAndy Lutomirski 7537868fe1SAndy Lutomirski /* lockless_dereference synchronizes with smp_store_release */ 7637868fe1SAndy Lutomirski ldt = lockless_dereference(mm->context.ldt); 7737868fe1SAndy Lutomirski 7837868fe1SAndy Lutomirski /* 7937868fe1SAndy Lutomirski * Any change to mm->context.ldt is followed by an IPI to all 8037868fe1SAndy Lutomirski * CPUs with the mm active. The LDT will not be freed until 8137868fe1SAndy Lutomirski * after the IPI is handled by all such CPUs. This means that, 8237868fe1SAndy Lutomirski * if the ldt_struct changes before we return, the values we see 8337868fe1SAndy Lutomirski * will be safe, and the new values will be loaded before we run 8437868fe1SAndy Lutomirski * any user code. 8537868fe1SAndy Lutomirski * 8637868fe1SAndy Lutomirski * NB: don't try to convert this to use RCU without extreme care. 8737868fe1SAndy Lutomirski * We would still need IRQs off, because we don't want to change 8837868fe1SAndy Lutomirski * the local LDT after an IPI loaded a newer value than the one 8937868fe1SAndy Lutomirski * that we can see. 9037868fe1SAndy Lutomirski */ 9137868fe1SAndy Lutomirski 9237868fe1SAndy Lutomirski if (unlikely(ldt)) 93bbf79d21SBorislav Petkov set_ldt(ldt->entries, ldt->nr_entries); 9437868fe1SAndy Lutomirski else 9537868fe1SAndy Lutomirski clear_LDT(); 96a5b9e5a2SAndy Lutomirski #else 97a5b9e5a2SAndy Lutomirski clear_LDT(); 98a5b9e5a2SAndy Lutomirski #endif 9973534258SAndy Lutomirski } 10073534258SAndy Lutomirski 10173534258SAndy Lutomirski static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) 10273534258SAndy Lutomirski { 10373534258SAndy Lutomirski #ifdef CONFIG_MODIFY_LDT_SYSCALL 10473534258SAndy Lutomirski /* 10573534258SAndy Lutomirski * Load the LDT if either the old or new mm had an LDT. 10673534258SAndy Lutomirski * 10773534258SAndy Lutomirski * An mm will never go from having an LDT to not having an LDT. Two 10873534258SAndy Lutomirski * mms never share an LDT, so we don't gain anything by checking to 10973534258SAndy Lutomirski * see whether the LDT changed. There's also no guarantee that 11073534258SAndy Lutomirski * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL, 11173534258SAndy Lutomirski * then prev->context.ldt will also be non-NULL. 11273534258SAndy Lutomirski * 11373534258SAndy Lutomirski * If we really cared, we could optimize the case where prev == next 11473534258SAndy Lutomirski * and we're exiting lazy mode. Most of the time, if this happens, 11573534258SAndy Lutomirski * we don't actually need to reload LDTR, but modify_ldt() is mostly 11673534258SAndy Lutomirski * used by legacy code and emulators where we don't need this level of 11773534258SAndy Lutomirski * performance. 11873534258SAndy Lutomirski * 11973534258SAndy Lutomirski * This uses | instead of || because it generates better code. 12073534258SAndy Lutomirski */ 12173534258SAndy Lutomirski if (unlikely((unsigned long)prev->context.ldt | 12273534258SAndy Lutomirski (unsigned long)next->context.ldt)) 12373534258SAndy Lutomirski load_mm_ldt(next); 12473534258SAndy Lutomirski #endif 12537868fe1SAndy Lutomirski 12637868fe1SAndy Lutomirski DEBUG_LOCKS_WARN_ON(preemptible()); 12737868fe1SAndy Lutomirski } 12837868fe1SAndy Lutomirski 1296826c8ffSBrian Gerst static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 1306826c8ffSBrian Gerst { 13194b1b03bSAndy Lutomirski int cpu = smp_processor_id(); 13294b1b03bSAndy Lutomirski 13394b1b03bSAndy Lutomirski if (cpumask_test_cpu(cpu, mm_cpumask(mm))) 13494b1b03bSAndy Lutomirski cpumask_clear_cpu(cpu, mm_cpumask(mm)); 1356826c8ffSBrian Gerst } 1366826c8ffSBrian Gerst 13739a0526fSDave Hansen static inline int init_new_context(struct task_struct *tsk, 13839a0526fSDave Hansen struct mm_struct *mm) 13939a0526fSDave Hansen { 140f39681edSAndy Lutomirski mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); 141f39681edSAndy Lutomirski atomic64_set(&mm->context.tlb_gen, 0); 142f39681edSAndy Lutomirski 143e8c24d3aSDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 144e8c24d3aSDave Hansen if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { 145e8c24d3aSDave Hansen /* pkey 0 is the default and always allocated */ 146e8c24d3aSDave Hansen mm->context.pkey_allocation_map = 0x1; 147e8c24d3aSDave Hansen /* -1 means unallocated or invalid */ 148e8c24d3aSDave Hansen mm->context.execute_only_pkey = -1; 149e8c24d3aSDave Hansen } 150e8c24d3aSDave Hansen #endif 15139a0526fSDave Hansen init_new_context_ldt(tsk, mm); 152e8c24d3aSDave Hansen 15339a0526fSDave Hansen return 0; 15439a0526fSDave Hansen } 15539a0526fSDave Hansen static inline void destroy_context(struct mm_struct *mm) 15639a0526fSDave Hansen { 15739a0526fSDave Hansen destroy_context_ldt(mm); 15839a0526fSDave Hansen } 15939a0526fSDave Hansen 16069c0319aSAndy Lutomirski extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, 16169c0319aSAndy Lutomirski struct task_struct *tsk); 1626826c8ffSBrian Gerst 163078194f8SAndy Lutomirski extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, 164078194f8SAndy Lutomirski struct task_struct *tsk); 165078194f8SAndy Lutomirski #define switch_mm_irqs_off switch_mm_irqs_off 166bb898558SAl Viro 167bb898558SAl Viro #define activate_mm(prev, next) \ 168bb898558SAl Viro do { \ 169bb898558SAl Viro paravirt_activate_mm((prev), (next)); \ 170bb898558SAl Viro switch_mm((prev), (next), NULL); \ 171bb898558SAl Viro } while (0); 172bb898558SAl Viro 1736826c8ffSBrian Gerst #ifdef CONFIG_X86_32 1746826c8ffSBrian Gerst #define deactivate_mm(tsk, mm) \ 1756826c8ffSBrian Gerst do { \ 176ccbeed3aSTejun Heo lazy_load_gs(0); \ 1776826c8ffSBrian Gerst } while (0) 1786826c8ffSBrian Gerst #else 1796826c8ffSBrian Gerst #define deactivate_mm(tsk, mm) \ 1806826c8ffSBrian Gerst do { \ 1816826c8ffSBrian Gerst load_gs_index(0); \ 1826826c8ffSBrian Gerst loadsegment(fs, 0); \ 1836826c8ffSBrian Gerst } while (0) 1846826c8ffSBrian Gerst #endif 185bb898558SAl Viro 186a1ea1c03SDave Hansen static inline void arch_dup_mmap(struct mm_struct *oldmm, 187a1ea1c03SDave Hansen struct mm_struct *mm) 188a1ea1c03SDave Hansen { 189a1ea1c03SDave Hansen paravirt_arch_dup_mmap(oldmm, mm); 190a1ea1c03SDave Hansen } 191a1ea1c03SDave Hansen 192a1ea1c03SDave Hansen static inline void arch_exit_mmap(struct mm_struct *mm) 193a1ea1c03SDave Hansen { 194a1ea1c03SDave Hansen paravirt_arch_exit_mmap(mm); 195a1ea1c03SDave Hansen } 196a1ea1c03SDave Hansen 197b0e9b09bSDave Hansen #ifdef CONFIG_X86_64 198b0e9b09bSDave Hansen static inline bool is_64bit_mm(struct mm_struct *mm) 199b0e9b09bSDave Hansen { 20097f2645fSMasahiro Yamada return !IS_ENABLED(CONFIG_IA32_EMULATION) || 201b0e9b09bSDave Hansen !(mm->context.ia32_compat == TIF_IA32); 202b0e9b09bSDave Hansen } 203b0e9b09bSDave Hansen #else 204b0e9b09bSDave Hansen static inline bool is_64bit_mm(struct mm_struct *mm) 205b0e9b09bSDave Hansen { 206b0e9b09bSDave Hansen return false; 207b0e9b09bSDave Hansen } 208b0e9b09bSDave Hansen #endif 209b0e9b09bSDave Hansen 210fe3d197fSDave Hansen static inline void arch_bprm_mm_init(struct mm_struct *mm, 211fe3d197fSDave Hansen struct vm_area_struct *vma) 212fe3d197fSDave Hansen { 213fe3d197fSDave Hansen mpx_mm_init(mm); 214fe3d197fSDave Hansen } 215fe3d197fSDave Hansen 2161de4fa14SDave Hansen static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, 2171de4fa14SDave Hansen unsigned long start, unsigned long end) 2181de4fa14SDave Hansen { 219c922228eSDave Hansen /* 220c922228eSDave Hansen * mpx_notify_unmap() goes and reads a rarely-hot 221c922228eSDave Hansen * cacheline in the mm_struct. That can be expensive 222c922228eSDave Hansen * enough to be seen in profiles. 223c922228eSDave Hansen * 224c922228eSDave Hansen * The mpx_notify_unmap() call and its contents have been 225c922228eSDave Hansen * observed to affect munmap() performance on hardware 226c922228eSDave Hansen * where MPX is not present. 227c922228eSDave Hansen * 228c922228eSDave Hansen * The unlikely() optimizes for the fast case: no MPX 229c922228eSDave Hansen * in the CPU, or no MPX use in the process. Even if 230c922228eSDave Hansen * we get this wrong (in the unlikely event that MPX 231c922228eSDave Hansen * is widely enabled on some system) the overhead of 232c922228eSDave Hansen * MPX itself (reading bounds tables) is expected to 233c922228eSDave Hansen * overwhelm the overhead of getting this unlikely() 234c922228eSDave Hansen * consistently wrong. 235c922228eSDave Hansen */ 236c922228eSDave Hansen if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) 2371de4fa14SDave Hansen mpx_notify_unmap(mm, vma, start, end); 2381de4fa14SDave Hansen } 2391de4fa14SDave Hansen 2407d06d9c9SDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 2418f62c883SDave Hansen static inline int vma_pkey(struct vm_area_struct *vma) 2428f62c883SDave Hansen { 2438f62c883SDave Hansen unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 | 2448f62c883SDave Hansen VM_PKEY_BIT2 | VM_PKEY_BIT3; 2457d06d9c9SDave Hansen 2467d06d9c9SDave Hansen return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT; 2478f62c883SDave Hansen } 2487d06d9c9SDave Hansen #else 2497d06d9c9SDave Hansen static inline int vma_pkey(struct vm_area_struct *vma) 2507d06d9c9SDave Hansen { 2517d06d9c9SDave Hansen return 0; 2527d06d9c9SDave Hansen } 2537d06d9c9SDave Hansen #endif 2548f62c883SDave Hansen 25533a709b2SDave Hansen /* 25633a709b2SDave Hansen * We only want to enforce protection keys on the current process 25733a709b2SDave Hansen * because we effectively have no access to PKRU for other 25833a709b2SDave Hansen * processes or any way to tell *which * PKRU in a threaded 25933a709b2SDave Hansen * process we could use. 26033a709b2SDave Hansen * 26133a709b2SDave Hansen * So do not enforce things if the VMA is not from the current 26233a709b2SDave Hansen * mm, or if we are in a kernel thread. 26333a709b2SDave Hansen */ 26433a709b2SDave Hansen static inline bool vma_is_foreign(struct vm_area_struct *vma) 26533a709b2SDave Hansen { 26633a709b2SDave Hansen if (!current->mm) 26733a709b2SDave Hansen return true; 26833a709b2SDave Hansen /* 26933a709b2SDave Hansen * Should PKRU be enforced on the access to this VMA? If 27033a709b2SDave Hansen * the VMA is from another process, then PKRU has no 27133a709b2SDave Hansen * relevance and should not be enforced. 27233a709b2SDave Hansen */ 27333a709b2SDave Hansen if (current->mm != vma->vm_mm) 27433a709b2SDave Hansen return true; 27533a709b2SDave Hansen 27633a709b2SDave Hansen return false; 27733a709b2SDave Hansen } 27833a709b2SDave Hansen 2791b2ee126SDave Hansen static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, 280d61172b4SDave Hansen bool write, bool execute, bool foreign) 28133a709b2SDave Hansen { 282d61172b4SDave Hansen /* pkeys never affect instruction fetches */ 283d61172b4SDave Hansen if (execute) 284d61172b4SDave Hansen return true; 28533a709b2SDave Hansen /* allow access if the VMA is not one from this process */ 2861b2ee126SDave Hansen if (foreign || vma_is_foreign(vma)) 28733a709b2SDave Hansen return true; 28833a709b2SDave Hansen return __pkru_allows_pkey(vma_pkey(vma), write); 28933a709b2SDave Hansen } 29033a709b2SDave Hansen 291d6e41f11SAndy Lutomirski 292d6e41f11SAndy Lutomirski /* 293d6e41f11SAndy Lutomirski * This can be used from process context to figure out what the value of 2946c690ee1SAndy Lutomirski * CR3 is without needing to do a (slow) __read_cr3(). 295d6e41f11SAndy Lutomirski * 296d6e41f11SAndy Lutomirski * It's intended to be used for code like KVM that sneakily changes CR3 297d6e41f11SAndy Lutomirski * and needs to restore it. It needs to be used very carefully. 298d6e41f11SAndy Lutomirski */ 299d6e41f11SAndy Lutomirski static inline unsigned long __get_current_cr3_fast(void) 300d6e41f11SAndy Lutomirski { 301d6e41f11SAndy Lutomirski unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd); 302d6e41f11SAndy Lutomirski 303*10af6235SAndy Lutomirski if (static_cpu_has(X86_FEATURE_PCID)) 304*10af6235SAndy Lutomirski cr3 |= this_cpu_read(cpu_tlbstate.loaded_mm_asid); 305*10af6235SAndy Lutomirski 306d6e41f11SAndy Lutomirski /* For now, be very restrictive about when this can be called. */ 307d6e41f11SAndy Lutomirski VM_WARN_ON(in_nmi() || !in_atomic()); 308d6e41f11SAndy Lutomirski 3096c690ee1SAndy Lutomirski VM_BUG_ON(cr3 != __read_cr3()); 310d6e41f11SAndy Lutomirski return cr3; 311d6e41f11SAndy Lutomirski } 312d6e41f11SAndy Lutomirski 3131965aae3SH. Peter Anvin #endif /* _ASM_X86_MMU_CONTEXT_H */ 314