xref: /linux/arch/x86/include/asm/mmu_context.h (revision 52a2af400c1075219b3f0ce5c96fc961da44018a)
11965aae3SH. Peter Anvin #ifndef _ASM_X86_MMU_CONTEXT_H
21965aae3SH. Peter Anvin #define _ASM_X86_MMU_CONTEXT_H
3bb898558SAl Viro 
4bb898558SAl Viro #include <asm/desc.h>
560063497SArun Sharma #include <linux/atomic.h>
6d17d8f9dSDave Hansen #include <linux/mm_types.h>
77d06d9c9SDave Hansen #include <linux/pkeys.h>
8d17d8f9dSDave Hansen 
9d17d8f9dSDave Hansen #include <trace/events/tlb.h>
10d17d8f9dSDave Hansen 
11bb898558SAl Viro #include <asm/pgalloc.h>
12bb898558SAl Viro #include <asm/tlbflush.h>
13bb898558SAl Viro #include <asm/paravirt.h>
14fe3d197fSDave Hansen #include <asm/mpx.h>
15f39681edSAndy Lutomirski 
16f39681edSAndy Lutomirski extern atomic64_t last_mm_ctx_id;
17f39681edSAndy Lutomirski 
18bb898558SAl Viro #ifndef CONFIG_PARAVIRT
19bb898558SAl Viro static inline void paravirt_activate_mm(struct mm_struct *prev,
20bb898558SAl Viro 					struct mm_struct *next)
21bb898558SAl Viro {
22bb898558SAl Viro }
23bb898558SAl Viro #endif	/* !CONFIG_PARAVIRT */
24bb898558SAl Viro 
257911d3f7SAndy Lutomirski #ifdef CONFIG_PERF_EVENTS
26a6673429SAndy Lutomirski extern struct static_key rdpmc_always_available;
27a6673429SAndy Lutomirski 
287911d3f7SAndy Lutomirski static inline void load_mm_cr4(struct mm_struct *mm)
297911d3f7SAndy Lutomirski {
30a833581eSPeter Zijlstra 	if (static_key_false(&rdpmc_always_available) ||
31a6673429SAndy Lutomirski 	    atomic_read(&mm->context.perf_rdpmc_allowed))
327911d3f7SAndy Lutomirski 		cr4_set_bits(X86_CR4_PCE);
337911d3f7SAndy Lutomirski 	else
347911d3f7SAndy Lutomirski 		cr4_clear_bits(X86_CR4_PCE);
357911d3f7SAndy Lutomirski }
367911d3f7SAndy Lutomirski #else
377911d3f7SAndy Lutomirski static inline void load_mm_cr4(struct mm_struct *mm) {}
387911d3f7SAndy Lutomirski #endif
397911d3f7SAndy Lutomirski 
40a5b9e5a2SAndy Lutomirski #ifdef CONFIG_MODIFY_LDT_SYSCALL
41bb898558SAl Viro /*
4237868fe1SAndy Lutomirski  * ldt_structs can be allocated, used, and freed, but they are never
4337868fe1SAndy Lutomirski  * modified while live.
4437868fe1SAndy Lutomirski  */
4537868fe1SAndy Lutomirski struct ldt_struct {
4637868fe1SAndy Lutomirski 	/*
4737868fe1SAndy Lutomirski 	 * Xen requires page-aligned LDTs with special permissions.  This is
4837868fe1SAndy Lutomirski 	 * needed to prevent us from installing evil descriptors such as
4937868fe1SAndy Lutomirski 	 * call gates.  On native, we could merge the ldt_struct and LDT
5037868fe1SAndy Lutomirski 	 * allocations, but it's not worth trying to optimize.
5137868fe1SAndy Lutomirski 	 */
5237868fe1SAndy Lutomirski 	struct desc_struct *entries;
53bbf79d21SBorislav Petkov 	unsigned int nr_entries;
5437868fe1SAndy Lutomirski };
5537868fe1SAndy Lutomirski 
56a5b9e5a2SAndy Lutomirski /*
57a5b9e5a2SAndy Lutomirski  * Used for LDT copy/destruction.
58a5b9e5a2SAndy Lutomirski  */
5939a0526fSDave Hansen int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm);
6039a0526fSDave Hansen void destroy_context_ldt(struct mm_struct *mm);
61a5b9e5a2SAndy Lutomirski #else	/* CONFIG_MODIFY_LDT_SYSCALL */
6239a0526fSDave Hansen static inline int init_new_context_ldt(struct task_struct *tsk,
63a5b9e5a2SAndy Lutomirski 				       struct mm_struct *mm)
64a5b9e5a2SAndy Lutomirski {
65a5b9e5a2SAndy Lutomirski 	return 0;
66a5b9e5a2SAndy Lutomirski }
6739a0526fSDave Hansen static inline void destroy_context_ldt(struct mm_struct *mm) {}
68a5b9e5a2SAndy Lutomirski #endif
69a5b9e5a2SAndy Lutomirski 
7037868fe1SAndy Lutomirski static inline void load_mm_ldt(struct mm_struct *mm)
7137868fe1SAndy Lutomirski {
72a5b9e5a2SAndy Lutomirski #ifdef CONFIG_MODIFY_LDT_SYSCALL
7337868fe1SAndy Lutomirski 	struct ldt_struct *ldt;
7437868fe1SAndy Lutomirski 
7537868fe1SAndy Lutomirski 	/* lockless_dereference synchronizes with smp_store_release */
7637868fe1SAndy Lutomirski 	ldt = lockless_dereference(mm->context.ldt);
7737868fe1SAndy Lutomirski 
7837868fe1SAndy Lutomirski 	/*
7937868fe1SAndy Lutomirski 	 * Any change to mm->context.ldt is followed by an IPI to all
8037868fe1SAndy Lutomirski 	 * CPUs with the mm active.  The LDT will not be freed until
8137868fe1SAndy Lutomirski 	 * after the IPI is handled by all such CPUs.  This means that,
8237868fe1SAndy Lutomirski 	 * if the ldt_struct changes before we return, the values we see
8337868fe1SAndy Lutomirski 	 * will be safe, and the new values will be loaded before we run
8437868fe1SAndy Lutomirski 	 * any user code.
8537868fe1SAndy Lutomirski 	 *
8637868fe1SAndy Lutomirski 	 * NB: don't try to convert this to use RCU without extreme care.
8737868fe1SAndy Lutomirski 	 * We would still need IRQs off, because we don't want to change
8837868fe1SAndy Lutomirski 	 * the local LDT after an IPI loaded a newer value than the one
8937868fe1SAndy Lutomirski 	 * that we can see.
9037868fe1SAndy Lutomirski 	 */
9137868fe1SAndy Lutomirski 
9237868fe1SAndy Lutomirski 	if (unlikely(ldt))
93bbf79d21SBorislav Petkov 		set_ldt(ldt->entries, ldt->nr_entries);
9437868fe1SAndy Lutomirski 	else
9537868fe1SAndy Lutomirski 		clear_LDT();
96a5b9e5a2SAndy Lutomirski #else
97a5b9e5a2SAndy Lutomirski 	clear_LDT();
98a5b9e5a2SAndy Lutomirski #endif
9973534258SAndy Lutomirski }
10073534258SAndy Lutomirski 
10173534258SAndy Lutomirski static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
10273534258SAndy Lutomirski {
10373534258SAndy Lutomirski #ifdef CONFIG_MODIFY_LDT_SYSCALL
10473534258SAndy Lutomirski 	/*
10573534258SAndy Lutomirski 	 * Load the LDT if either the old or new mm had an LDT.
10673534258SAndy Lutomirski 	 *
10773534258SAndy Lutomirski 	 * An mm will never go from having an LDT to not having an LDT.  Two
10873534258SAndy Lutomirski 	 * mms never share an LDT, so we don't gain anything by checking to
10973534258SAndy Lutomirski 	 * see whether the LDT changed.  There's also no guarantee that
11073534258SAndy Lutomirski 	 * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
11173534258SAndy Lutomirski 	 * then prev->context.ldt will also be non-NULL.
11273534258SAndy Lutomirski 	 *
11373534258SAndy Lutomirski 	 * If we really cared, we could optimize the case where prev == next
11473534258SAndy Lutomirski 	 * and we're exiting lazy mode.  Most of the time, if this happens,
11573534258SAndy Lutomirski 	 * we don't actually need to reload LDTR, but modify_ldt() is mostly
11673534258SAndy Lutomirski 	 * used by legacy code and emulators where we don't need this level of
11773534258SAndy Lutomirski 	 * performance.
11873534258SAndy Lutomirski 	 *
11973534258SAndy Lutomirski 	 * This uses | instead of || because it generates better code.
12073534258SAndy Lutomirski 	 */
12173534258SAndy Lutomirski 	if (unlikely((unsigned long)prev->context.ldt |
12273534258SAndy Lutomirski 		     (unsigned long)next->context.ldt))
12373534258SAndy Lutomirski 		load_mm_ldt(next);
12473534258SAndy Lutomirski #endif
12537868fe1SAndy Lutomirski 
12637868fe1SAndy Lutomirski 	DEBUG_LOCKS_WARN_ON(preemptible());
12737868fe1SAndy Lutomirski }
12837868fe1SAndy Lutomirski 
1296826c8ffSBrian Gerst static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
1306826c8ffSBrian Gerst {
13194b1b03bSAndy Lutomirski 	int cpu = smp_processor_id();
13294b1b03bSAndy Lutomirski 
13394b1b03bSAndy Lutomirski 	if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
13494b1b03bSAndy Lutomirski 		cpumask_clear_cpu(cpu, mm_cpumask(mm));
1356826c8ffSBrian Gerst }
1366826c8ffSBrian Gerst 
13739a0526fSDave Hansen static inline int init_new_context(struct task_struct *tsk,
13839a0526fSDave Hansen 				   struct mm_struct *mm)
13939a0526fSDave Hansen {
140f39681edSAndy Lutomirski 	mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
141f39681edSAndy Lutomirski 	atomic64_set(&mm->context.tlb_gen, 0);
142f39681edSAndy Lutomirski 
143e8c24d3aSDave Hansen 	#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
144e8c24d3aSDave Hansen 	if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
145e8c24d3aSDave Hansen 		/* pkey 0 is the default and always allocated */
146e8c24d3aSDave Hansen 		mm->context.pkey_allocation_map = 0x1;
147e8c24d3aSDave Hansen 		/* -1 means unallocated or invalid */
148e8c24d3aSDave Hansen 		mm->context.execute_only_pkey = -1;
149e8c24d3aSDave Hansen 	}
150e8c24d3aSDave Hansen 	#endif
151ccd5b323SEric Biggers 	return init_new_context_ldt(tsk, mm);
15239a0526fSDave Hansen }
15339a0526fSDave Hansen static inline void destroy_context(struct mm_struct *mm)
15439a0526fSDave Hansen {
15539a0526fSDave Hansen 	destroy_context_ldt(mm);
15639a0526fSDave Hansen }
15739a0526fSDave Hansen 
15869c0319aSAndy Lutomirski extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15969c0319aSAndy Lutomirski 		      struct task_struct *tsk);
1606826c8ffSBrian Gerst 
161078194f8SAndy Lutomirski extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
162078194f8SAndy Lutomirski 			       struct task_struct *tsk);
163078194f8SAndy Lutomirski #define switch_mm_irqs_off switch_mm_irqs_off
164bb898558SAl Viro 
165bb898558SAl Viro #define activate_mm(prev, next)			\
166bb898558SAl Viro do {						\
167bb898558SAl Viro 	paravirt_activate_mm((prev), (next));	\
168bb898558SAl Viro 	switch_mm((prev), (next), NULL);	\
169bb898558SAl Viro } while (0);
170bb898558SAl Viro 
1716826c8ffSBrian Gerst #ifdef CONFIG_X86_32
1726826c8ffSBrian Gerst #define deactivate_mm(tsk, mm)			\
1736826c8ffSBrian Gerst do {						\
174ccbeed3aSTejun Heo 	lazy_load_gs(0);			\
1756826c8ffSBrian Gerst } while (0)
1766826c8ffSBrian Gerst #else
1776826c8ffSBrian Gerst #define deactivate_mm(tsk, mm)			\
1786826c8ffSBrian Gerst do {						\
1796826c8ffSBrian Gerst 	load_gs_index(0);			\
1806826c8ffSBrian Gerst 	loadsegment(fs, 0);			\
1816826c8ffSBrian Gerst } while (0)
1826826c8ffSBrian Gerst #endif
183bb898558SAl Viro 
184a1ea1c03SDave Hansen static inline void arch_dup_mmap(struct mm_struct *oldmm,
185a1ea1c03SDave Hansen 				 struct mm_struct *mm)
186a1ea1c03SDave Hansen {
187a1ea1c03SDave Hansen 	paravirt_arch_dup_mmap(oldmm, mm);
188a1ea1c03SDave Hansen }
189a1ea1c03SDave Hansen 
190a1ea1c03SDave Hansen static inline void arch_exit_mmap(struct mm_struct *mm)
191a1ea1c03SDave Hansen {
192a1ea1c03SDave Hansen 	paravirt_arch_exit_mmap(mm);
193a1ea1c03SDave Hansen }
194a1ea1c03SDave Hansen 
195b0e9b09bSDave Hansen #ifdef CONFIG_X86_64
196b0e9b09bSDave Hansen static inline bool is_64bit_mm(struct mm_struct *mm)
197b0e9b09bSDave Hansen {
19897f2645fSMasahiro Yamada 	return	!IS_ENABLED(CONFIG_IA32_EMULATION) ||
199b0e9b09bSDave Hansen 		!(mm->context.ia32_compat == TIF_IA32);
200b0e9b09bSDave Hansen }
201b0e9b09bSDave Hansen #else
202b0e9b09bSDave Hansen static inline bool is_64bit_mm(struct mm_struct *mm)
203b0e9b09bSDave Hansen {
204b0e9b09bSDave Hansen 	return false;
205b0e9b09bSDave Hansen }
206b0e9b09bSDave Hansen #endif
207b0e9b09bSDave Hansen 
208fe3d197fSDave Hansen static inline void arch_bprm_mm_init(struct mm_struct *mm,
209fe3d197fSDave Hansen 		struct vm_area_struct *vma)
210fe3d197fSDave Hansen {
211fe3d197fSDave Hansen 	mpx_mm_init(mm);
212fe3d197fSDave Hansen }
213fe3d197fSDave Hansen 
2141de4fa14SDave Hansen static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
2151de4fa14SDave Hansen 			      unsigned long start, unsigned long end)
2161de4fa14SDave Hansen {
217c922228eSDave Hansen 	/*
218c922228eSDave Hansen 	 * mpx_notify_unmap() goes and reads a rarely-hot
219c922228eSDave Hansen 	 * cacheline in the mm_struct.  That can be expensive
220c922228eSDave Hansen 	 * enough to be seen in profiles.
221c922228eSDave Hansen 	 *
222c922228eSDave Hansen 	 * The mpx_notify_unmap() call and its contents have been
223c922228eSDave Hansen 	 * observed to affect munmap() performance on hardware
224c922228eSDave Hansen 	 * where MPX is not present.
225c922228eSDave Hansen 	 *
226c922228eSDave Hansen 	 * The unlikely() optimizes for the fast case: no MPX
227c922228eSDave Hansen 	 * in the CPU, or no MPX use in the process.  Even if
228c922228eSDave Hansen 	 * we get this wrong (in the unlikely event that MPX
229c922228eSDave Hansen 	 * is widely enabled on some system) the overhead of
230c922228eSDave Hansen 	 * MPX itself (reading bounds tables) is expected to
231c922228eSDave Hansen 	 * overwhelm the overhead of getting this unlikely()
232c922228eSDave Hansen 	 * consistently wrong.
233c922228eSDave Hansen 	 */
234c922228eSDave Hansen 	if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
2351de4fa14SDave Hansen 		mpx_notify_unmap(mm, vma, start, end);
2361de4fa14SDave Hansen }
2371de4fa14SDave Hansen 
2387d06d9c9SDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
2398f62c883SDave Hansen static inline int vma_pkey(struct vm_area_struct *vma)
2408f62c883SDave Hansen {
2418f62c883SDave Hansen 	unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 |
2428f62c883SDave Hansen 				      VM_PKEY_BIT2 | VM_PKEY_BIT3;
2437d06d9c9SDave Hansen 
2447d06d9c9SDave Hansen 	return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT;
2458f62c883SDave Hansen }
2467d06d9c9SDave Hansen #else
2477d06d9c9SDave Hansen static inline int vma_pkey(struct vm_area_struct *vma)
2487d06d9c9SDave Hansen {
2497d06d9c9SDave Hansen 	return 0;
2507d06d9c9SDave Hansen }
2517d06d9c9SDave Hansen #endif
2528f62c883SDave Hansen 
25333a709b2SDave Hansen /*
25433a709b2SDave Hansen  * We only want to enforce protection keys on the current process
25533a709b2SDave Hansen  * because we effectively have no access to PKRU for other
25633a709b2SDave Hansen  * processes or any way to tell *which * PKRU in a threaded
25733a709b2SDave Hansen  * process we could use.
25833a709b2SDave Hansen  *
25933a709b2SDave Hansen  * So do not enforce things if the VMA is not from the current
26033a709b2SDave Hansen  * mm, or if we are in a kernel thread.
26133a709b2SDave Hansen  */
26233a709b2SDave Hansen static inline bool vma_is_foreign(struct vm_area_struct *vma)
26333a709b2SDave Hansen {
26433a709b2SDave Hansen 	if (!current->mm)
26533a709b2SDave Hansen 		return true;
26633a709b2SDave Hansen 	/*
26733a709b2SDave Hansen 	 * Should PKRU be enforced on the access to this VMA?  If
26833a709b2SDave Hansen 	 * the VMA is from another process, then PKRU has no
26933a709b2SDave Hansen 	 * relevance and should not be enforced.
27033a709b2SDave Hansen 	 */
27133a709b2SDave Hansen 	if (current->mm != vma->vm_mm)
27233a709b2SDave Hansen 		return true;
27333a709b2SDave Hansen 
27433a709b2SDave Hansen 	return false;
27533a709b2SDave Hansen }
27633a709b2SDave Hansen 
2771b2ee126SDave Hansen static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
278d61172b4SDave Hansen 		bool write, bool execute, bool foreign)
27933a709b2SDave Hansen {
280d61172b4SDave Hansen 	/* pkeys never affect instruction fetches */
281d61172b4SDave Hansen 	if (execute)
282d61172b4SDave Hansen 		return true;
28333a709b2SDave Hansen 	/* allow access if the VMA is not one from this process */
2841b2ee126SDave Hansen 	if (foreign || vma_is_foreign(vma))
28533a709b2SDave Hansen 		return true;
28633a709b2SDave Hansen 	return __pkru_allows_pkey(vma_pkey(vma), write);
28733a709b2SDave Hansen }
28833a709b2SDave Hansen 
289*52a2af40SAndy Lutomirski /*
290*52a2af40SAndy Lutomirski  * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID
291*52a2af40SAndy Lutomirski  * bits.  This serves two purposes.  It prevents a nasty situation in
292*52a2af40SAndy Lutomirski  * which PCID-unaware code saves CR3, loads some other value (with PCID
293*52a2af40SAndy Lutomirski  * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if
294*52a2af40SAndy Lutomirski  * the saved ASID was nonzero.  It also means that any bugs involving
295*52a2af40SAndy Lutomirski  * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger
296*52a2af40SAndy Lutomirski  * deterministically.
297*52a2af40SAndy Lutomirski  */
298*52a2af40SAndy Lutomirski 
29947061a24SAndy Lutomirski static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid)
30047061a24SAndy Lutomirski {
301*52a2af40SAndy Lutomirski 	if (static_cpu_has(X86_FEATURE_PCID)) {
302*52a2af40SAndy Lutomirski 		VM_WARN_ON_ONCE(asid > 4094);
303*52a2af40SAndy Lutomirski 		return __sme_pa(mm->pgd) | (asid + 1);
304*52a2af40SAndy Lutomirski 	} else {
305*52a2af40SAndy Lutomirski 		VM_WARN_ON_ONCE(asid != 0);
306*52a2af40SAndy Lutomirski 		return __sme_pa(mm->pgd);
307*52a2af40SAndy Lutomirski 	}
30847061a24SAndy Lutomirski }
30947061a24SAndy Lutomirski 
31047061a24SAndy Lutomirski static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid)
31147061a24SAndy Lutomirski {
312*52a2af40SAndy Lutomirski 	VM_WARN_ON_ONCE(asid > 4094);
313*52a2af40SAndy Lutomirski 	return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH;
31447061a24SAndy Lutomirski }
315d6e41f11SAndy Lutomirski 
316d6e41f11SAndy Lutomirski /*
317d6e41f11SAndy Lutomirski  * This can be used from process context to figure out what the value of
3186c690ee1SAndy Lutomirski  * CR3 is without needing to do a (slow) __read_cr3().
319d6e41f11SAndy Lutomirski  *
320d6e41f11SAndy Lutomirski  * It's intended to be used for code like KVM that sneakily changes CR3
321d6e41f11SAndy Lutomirski  * and needs to restore it.  It needs to be used very carefully.
322d6e41f11SAndy Lutomirski  */
323d6e41f11SAndy Lutomirski static inline unsigned long __get_current_cr3_fast(void)
324d6e41f11SAndy Lutomirski {
32547061a24SAndy Lutomirski 	unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm),
32647061a24SAndy Lutomirski 		this_cpu_read(cpu_tlbstate.loaded_mm_asid));
32710af6235SAndy Lutomirski 
328d6e41f11SAndy Lutomirski 	/* For now, be very restrictive about when this can be called. */
3294c07f904SRoman Kagan 	VM_WARN_ON(in_nmi() || preemptible());
330d6e41f11SAndy Lutomirski 
3316c690ee1SAndy Lutomirski 	VM_BUG_ON(cr3 != __read_cr3());
332d6e41f11SAndy Lutomirski 	return cr3;
333d6e41f11SAndy Lutomirski }
334d6e41f11SAndy Lutomirski 
3351965aae3SH. Peter Anvin #endif /* _ASM_X86_MMU_CONTEXT_H */
336