xref: /linux/arch/x86/include/asm/mmu_context.h (revision 4c07f9046e48c3126978911239e20a5622ad0ad6)
11965aae3SH. Peter Anvin #ifndef _ASM_X86_MMU_CONTEXT_H
21965aae3SH. Peter Anvin #define _ASM_X86_MMU_CONTEXT_H
3bb898558SAl Viro 
4bb898558SAl Viro #include <asm/desc.h>
560063497SArun Sharma #include <linux/atomic.h>
6d17d8f9dSDave Hansen #include <linux/mm_types.h>
77d06d9c9SDave Hansen #include <linux/pkeys.h>
8d17d8f9dSDave Hansen 
9d17d8f9dSDave Hansen #include <trace/events/tlb.h>
10d17d8f9dSDave Hansen 
11bb898558SAl Viro #include <asm/pgalloc.h>
12bb898558SAl Viro #include <asm/tlbflush.h>
13bb898558SAl Viro #include <asm/paravirt.h>
14fe3d197fSDave Hansen #include <asm/mpx.h>
15bb898558SAl Viro #ifndef CONFIG_PARAVIRT
16bb898558SAl Viro static inline void paravirt_activate_mm(struct mm_struct *prev,
17bb898558SAl Viro 					struct mm_struct *next)
18bb898558SAl Viro {
19bb898558SAl Viro }
20bb898558SAl Viro #endif	/* !CONFIG_PARAVIRT */
21bb898558SAl Viro 
227911d3f7SAndy Lutomirski #ifdef CONFIG_PERF_EVENTS
23a6673429SAndy Lutomirski extern struct static_key rdpmc_always_available;
24a6673429SAndy Lutomirski 
257911d3f7SAndy Lutomirski static inline void load_mm_cr4(struct mm_struct *mm)
267911d3f7SAndy Lutomirski {
27a833581eSPeter Zijlstra 	if (static_key_false(&rdpmc_always_available) ||
28a6673429SAndy Lutomirski 	    atomic_read(&mm->context.perf_rdpmc_allowed))
297911d3f7SAndy Lutomirski 		cr4_set_bits(X86_CR4_PCE);
307911d3f7SAndy Lutomirski 	else
317911d3f7SAndy Lutomirski 		cr4_clear_bits(X86_CR4_PCE);
327911d3f7SAndy Lutomirski }
337911d3f7SAndy Lutomirski #else
347911d3f7SAndy Lutomirski static inline void load_mm_cr4(struct mm_struct *mm) {}
357911d3f7SAndy Lutomirski #endif
367911d3f7SAndy Lutomirski 
37a5b9e5a2SAndy Lutomirski #ifdef CONFIG_MODIFY_LDT_SYSCALL
38bb898558SAl Viro /*
3937868fe1SAndy Lutomirski  * ldt_structs can be allocated, used, and freed, but they are never
4037868fe1SAndy Lutomirski  * modified while live.
4137868fe1SAndy Lutomirski  */
4237868fe1SAndy Lutomirski struct ldt_struct {
4337868fe1SAndy Lutomirski 	/*
4437868fe1SAndy Lutomirski 	 * Xen requires page-aligned LDTs with special permissions.  This is
4537868fe1SAndy Lutomirski 	 * needed to prevent us from installing evil descriptors such as
4637868fe1SAndy Lutomirski 	 * call gates.  On native, we could merge the ldt_struct and LDT
4737868fe1SAndy Lutomirski 	 * allocations, but it's not worth trying to optimize.
4837868fe1SAndy Lutomirski 	 */
4937868fe1SAndy Lutomirski 	struct desc_struct *entries;
50bbf79d21SBorislav Petkov 	unsigned int nr_entries;
5137868fe1SAndy Lutomirski };
5237868fe1SAndy Lutomirski 
53a5b9e5a2SAndy Lutomirski /*
54a5b9e5a2SAndy Lutomirski  * Used for LDT copy/destruction.
55a5b9e5a2SAndy Lutomirski  */
5639a0526fSDave Hansen int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm);
5739a0526fSDave Hansen void destroy_context_ldt(struct mm_struct *mm);
58a5b9e5a2SAndy Lutomirski #else	/* CONFIG_MODIFY_LDT_SYSCALL */
5939a0526fSDave Hansen static inline int init_new_context_ldt(struct task_struct *tsk,
60a5b9e5a2SAndy Lutomirski 				       struct mm_struct *mm)
61a5b9e5a2SAndy Lutomirski {
62a5b9e5a2SAndy Lutomirski 	return 0;
63a5b9e5a2SAndy Lutomirski }
6439a0526fSDave Hansen static inline void destroy_context_ldt(struct mm_struct *mm) {}
65a5b9e5a2SAndy Lutomirski #endif
66a5b9e5a2SAndy Lutomirski 
6737868fe1SAndy Lutomirski static inline void load_mm_ldt(struct mm_struct *mm)
6837868fe1SAndy Lutomirski {
69a5b9e5a2SAndy Lutomirski #ifdef CONFIG_MODIFY_LDT_SYSCALL
7037868fe1SAndy Lutomirski 	struct ldt_struct *ldt;
7137868fe1SAndy Lutomirski 
7237868fe1SAndy Lutomirski 	/* lockless_dereference synchronizes with smp_store_release */
7337868fe1SAndy Lutomirski 	ldt = lockless_dereference(mm->context.ldt);
7437868fe1SAndy Lutomirski 
7537868fe1SAndy Lutomirski 	/*
7637868fe1SAndy Lutomirski 	 * Any change to mm->context.ldt is followed by an IPI to all
7737868fe1SAndy Lutomirski 	 * CPUs with the mm active.  The LDT will not be freed until
7837868fe1SAndy Lutomirski 	 * after the IPI is handled by all such CPUs.  This means that,
7937868fe1SAndy Lutomirski 	 * if the ldt_struct changes before we return, the values we see
8037868fe1SAndy Lutomirski 	 * will be safe, and the new values will be loaded before we run
8137868fe1SAndy Lutomirski 	 * any user code.
8237868fe1SAndy Lutomirski 	 *
8337868fe1SAndy Lutomirski 	 * NB: don't try to convert this to use RCU without extreme care.
8437868fe1SAndy Lutomirski 	 * We would still need IRQs off, because we don't want to change
8537868fe1SAndy Lutomirski 	 * the local LDT after an IPI loaded a newer value than the one
8637868fe1SAndy Lutomirski 	 * that we can see.
8737868fe1SAndy Lutomirski 	 */
8837868fe1SAndy Lutomirski 
8937868fe1SAndy Lutomirski 	if (unlikely(ldt))
90bbf79d21SBorislav Petkov 		set_ldt(ldt->entries, ldt->nr_entries);
9137868fe1SAndy Lutomirski 	else
9237868fe1SAndy Lutomirski 		clear_LDT();
93a5b9e5a2SAndy Lutomirski #else
94a5b9e5a2SAndy Lutomirski 	clear_LDT();
95a5b9e5a2SAndy Lutomirski #endif
9673534258SAndy Lutomirski }
9773534258SAndy Lutomirski 
9873534258SAndy Lutomirski static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
9973534258SAndy Lutomirski {
10073534258SAndy Lutomirski #ifdef CONFIG_MODIFY_LDT_SYSCALL
10173534258SAndy Lutomirski 	/*
10273534258SAndy Lutomirski 	 * Load the LDT if either the old or new mm had an LDT.
10373534258SAndy Lutomirski 	 *
10473534258SAndy Lutomirski 	 * An mm will never go from having an LDT to not having an LDT.  Two
10573534258SAndy Lutomirski 	 * mms never share an LDT, so we don't gain anything by checking to
10673534258SAndy Lutomirski 	 * see whether the LDT changed.  There's also no guarantee that
10773534258SAndy Lutomirski 	 * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
10873534258SAndy Lutomirski 	 * then prev->context.ldt will also be non-NULL.
10973534258SAndy Lutomirski 	 *
11073534258SAndy Lutomirski 	 * If we really cared, we could optimize the case where prev == next
11173534258SAndy Lutomirski 	 * and we're exiting lazy mode.  Most of the time, if this happens,
11273534258SAndy Lutomirski 	 * we don't actually need to reload LDTR, but modify_ldt() is mostly
11373534258SAndy Lutomirski 	 * used by legacy code and emulators where we don't need this level of
11473534258SAndy Lutomirski 	 * performance.
11573534258SAndy Lutomirski 	 *
11673534258SAndy Lutomirski 	 * This uses | instead of || because it generates better code.
11773534258SAndy Lutomirski 	 */
11873534258SAndy Lutomirski 	if (unlikely((unsigned long)prev->context.ldt |
11973534258SAndy Lutomirski 		     (unsigned long)next->context.ldt))
12073534258SAndy Lutomirski 		load_mm_ldt(next);
12173534258SAndy Lutomirski #endif
12237868fe1SAndy Lutomirski 
12337868fe1SAndy Lutomirski 	DEBUG_LOCKS_WARN_ON(preemptible());
12437868fe1SAndy Lutomirski }
12537868fe1SAndy Lutomirski 
1266826c8ffSBrian Gerst static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
1276826c8ffSBrian Gerst {
128c6ae41e7SAlex Shi 	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
129c6ae41e7SAlex Shi 		this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
1306826c8ffSBrian Gerst }
1316826c8ffSBrian Gerst 
13239a0526fSDave Hansen static inline int init_new_context(struct task_struct *tsk,
13339a0526fSDave Hansen 				   struct mm_struct *mm)
13439a0526fSDave Hansen {
135e8c24d3aSDave Hansen 	#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
136e8c24d3aSDave Hansen 	if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
137e8c24d3aSDave Hansen 		/* pkey 0 is the default and always allocated */
138e8c24d3aSDave Hansen 		mm->context.pkey_allocation_map = 0x1;
139e8c24d3aSDave Hansen 		/* -1 means unallocated or invalid */
140e8c24d3aSDave Hansen 		mm->context.execute_only_pkey = -1;
141e8c24d3aSDave Hansen 	}
142e8c24d3aSDave Hansen 	#endif
14339a0526fSDave Hansen 	init_new_context_ldt(tsk, mm);
144e8c24d3aSDave Hansen 
14539a0526fSDave Hansen 	return 0;
14639a0526fSDave Hansen }
14739a0526fSDave Hansen static inline void destroy_context(struct mm_struct *mm)
14839a0526fSDave Hansen {
14939a0526fSDave Hansen 	destroy_context_ldt(mm);
15039a0526fSDave Hansen }
15139a0526fSDave Hansen 
15269c0319aSAndy Lutomirski extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
15369c0319aSAndy Lutomirski 		      struct task_struct *tsk);
1546826c8ffSBrian Gerst 
155078194f8SAndy Lutomirski extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
156078194f8SAndy Lutomirski 			       struct task_struct *tsk);
157078194f8SAndy Lutomirski #define switch_mm_irqs_off switch_mm_irqs_off
158bb898558SAl Viro 
159bb898558SAl Viro #define activate_mm(prev, next)			\
160bb898558SAl Viro do {						\
161bb898558SAl Viro 	paravirt_activate_mm((prev), (next));	\
162bb898558SAl Viro 	switch_mm((prev), (next), NULL);	\
163bb898558SAl Viro } while (0);
164bb898558SAl Viro 
1656826c8ffSBrian Gerst #ifdef CONFIG_X86_32
1666826c8ffSBrian Gerst #define deactivate_mm(tsk, mm)			\
1676826c8ffSBrian Gerst do {						\
168ccbeed3aSTejun Heo 	lazy_load_gs(0);			\
1696826c8ffSBrian Gerst } while (0)
1706826c8ffSBrian Gerst #else
1716826c8ffSBrian Gerst #define deactivate_mm(tsk, mm)			\
1726826c8ffSBrian Gerst do {						\
1736826c8ffSBrian Gerst 	load_gs_index(0);			\
1746826c8ffSBrian Gerst 	loadsegment(fs, 0);			\
1756826c8ffSBrian Gerst } while (0)
1766826c8ffSBrian Gerst #endif
177bb898558SAl Viro 
178a1ea1c03SDave Hansen static inline void arch_dup_mmap(struct mm_struct *oldmm,
179a1ea1c03SDave Hansen 				 struct mm_struct *mm)
180a1ea1c03SDave Hansen {
181a1ea1c03SDave Hansen 	paravirt_arch_dup_mmap(oldmm, mm);
182a1ea1c03SDave Hansen }
183a1ea1c03SDave Hansen 
184a1ea1c03SDave Hansen static inline void arch_exit_mmap(struct mm_struct *mm)
185a1ea1c03SDave Hansen {
186a1ea1c03SDave Hansen 	paravirt_arch_exit_mmap(mm);
187a1ea1c03SDave Hansen }
188a1ea1c03SDave Hansen 
189b0e9b09bSDave Hansen #ifdef CONFIG_X86_64
190b0e9b09bSDave Hansen static inline bool is_64bit_mm(struct mm_struct *mm)
191b0e9b09bSDave Hansen {
19297f2645fSMasahiro Yamada 	return	!IS_ENABLED(CONFIG_IA32_EMULATION) ||
193b0e9b09bSDave Hansen 		!(mm->context.ia32_compat == TIF_IA32);
194b0e9b09bSDave Hansen }
195b0e9b09bSDave Hansen #else
196b0e9b09bSDave Hansen static inline bool is_64bit_mm(struct mm_struct *mm)
197b0e9b09bSDave Hansen {
198b0e9b09bSDave Hansen 	return false;
199b0e9b09bSDave Hansen }
200b0e9b09bSDave Hansen #endif
201b0e9b09bSDave Hansen 
202fe3d197fSDave Hansen static inline void arch_bprm_mm_init(struct mm_struct *mm,
203fe3d197fSDave Hansen 		struct vm_area_struct *vma)
204fe3d197fSDave Hansen {
205fe3d197fSDave Hansen 	mpx_mm_init(mm);
206fe3d197fSDave Hansen }
207fe3d197fSDave Hansen 
2081de4fa14SDave Hansen static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
2091de4fa14SDave Hansen 			      unsigned long start, unsigned long end)
2101de4fa14SDave Hansen {
211c922228eSDave Hansen 	/*
212c922228eSDave Hansen 	 * mpx_notify_unmap() goes and reads a rarely-hot
213c922228eSDave Hansen 	 * cacheline in the mm_struct.  That can be expensive
214c922228eSDave Hansen 	 * enough to be seen in profiles.
215c922228eSDave Hansen 	 *
216c922228eSDave Hansen 	 * The mpx_notify_unmap() call and its contents have been
217c922228eSDave Hansen 	 * observed to affect munmap() performance on hardware
218c922228eSDave Hansen 	 * where MPX is not present.
219c922228eSDave Hansen 	 *
220c922228eSDave Hansen 	 * The unlikely() optimizes for the fast case: no MPX
221c922228eSDave Hansen 	 * in the CPU, or no MPX use in the process.  Even if
222c922228eSDave Hansen 	 * we get this wrong (in the unlikely event that MPX
223c922228eSDave Hansen 	 * is widely enabled on some system) the overhead of
224c922228eSDave Hansen 	 * MPX itself (reading bounds tables) is expected to
225c922228eSDave Hansen 	 * overwhelm the overhead of getting this unlikely()
226c922228eSDave Hansen 	 * consistently wrong.
227c922228eSDave Hansen 	 */
228c922228eSDave Hansen 	if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
2291de4fa14SDave Hansen 		mpx_notify_unmap(mm, vma, start, end);
2301de4fa14SDave Hansen }
2311de4fa14SDave Hansen 
2327d06d9c9SDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
2338f62c883SDave Hansen static inline int vma_pkey(struct vm_area_struct *vma)
2348f62c883SDave Hansen {
2358f62c883SDave Hansen 	unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 |
2368f62c883SDave Hansen 				      VM_PKEY_BIT2 | VM_PKEY_BIT3;
2377d06d9c9SDave Hansen 
2387d06d9c9SDave Hansen 	return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT;
2398f62c883SDave Hansen }
2407d06d9c9SDave Hansen #else
2417d06d9c9SDave Hansen static inline int vma_pkey(struct vm_area_struct *vma)
2427d06d9c9SDave Hansen {
2437d06d9c9SDave Hansen 	return 0;
2447d06d9c9SDave Hansen }
2457d06d9c9SDave Hansen #endif
2468f62c883SDave Hansen 
24733a709b2SDave Hansen /*
24833a709b2SDave Hansen  * We only want to enforce protection keys on the current process
24933a709b2SDave Hansen  * because we effectively have no access to PKRU for other
25033a709b2SDave Hansen  * processes or any way to tell *which * PKRU in a threaded
25133a709b2SDave Hansen  * process we could use.
25233a709b2SDave Hansen  *
25333a709b2SDave Hansen  * So do not enforce things if the VMA is not from the current
25433a709b2SDave Hansen  * mm, or if we are in a kernel thread.
25533a709b2SDave Hansen  */
25633a709b2SDave Hansen static inline bool vma_is_foreign(struct vm_area_struct *vma)
25733a709b2SDave Hansen {
25833a709b2SDave Hansen 	if (!current->mm)
25933a709b2SDave Hansen 		return true;
26033a709b2SDave Hansen 	/*
26133a709b2SDave Hansen 	 * Should PKRU be enforced on the access to this VMA?  If
26233a709b2SDave Hansen 	 * the VMA is from another process, then PKRU has no
26333a709b2SDave Hansen 	 * relevance and should not be enforced.
26433a709b2SDave Hansen 	 */
26533a709b2SDave Hansen 	if (current->mm != vma->vm_mm)
26633a709b2SDave Hansen 		return true;
26733a709b2SDave Hansen 
26833a709b2SDave Hansen 	return false;
26933a709b2SDave Hansen }
27033a709b2SDave Hansen 
2711b2ee126SDave Hansen static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
272d61172b4SDave Hansen 		bool write, bool execute, bool foreign)
27333a709b2SDave Hansen {
274d61172b4SDave Hansen 	/* pkeys never affect instruction fetches */
275d61172b4SDave Hansen 	if (execute)
276d61172b4SDave Hansen 		return true;
27733a709b2SDave Hansen 	/* allow access if the VMA is not one from this process */
2781b2ee126SDave Hansen 	if (foreign || vma_is_foreign(vma))
27933a709b2SDave Hansen 		return true;
28033a709b2SDave Hansen 	return __pkru_allows_pkey(vma_pkey(vma), write);
28133a709b2SDave Hansen }
28233a709b2SDave Hansen 
283d6e41f11SAndy Lutomirski 
284d6e41f11SAndy Lutomirski /*
285d6e41f11SAndy Lutomirski  * This can be used from process context to figure out what the value of
2866c690ee1SAndy Lutomirski  * CR3 is without needing to do a (slow) __read_cr3().
287d6e41f11SAndy Lutomirski  *
288d6e41f11SAndy Lutomirski  * It's intended to be used for code like KVM that sneakily changes CR3
289d6e41f11SAndy Lutomirski  * and needs to restore it.  It needs to be used very carefully.
290d6e41f11SAndy Lutomirski  */
291d6e41f11SAndy Lutomirski static inline unsigned long __get_current_cr3_fast(void)
292d6e41f11SAndy Lutomirski {
293d6e41f11SAndy Lutomirski 	unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd);
294d6e41f11SAndy Lutomirski 
295d6e41f11SAndy Lutomirski 	/* For now, be very restrictive about when this can be called. */
296*4c07f904SRoman Kagan 	VM_WARN_ON(in_nmi() || preemptible());
297d6e41f11SAndy Lutomirski 
2986c690ee1SAndy Lutomirski 	VM_BUG_ON(cr3 != __read_cr3());
299d6e41f11SAndy Lutomirski 	return cr3;
300d6e41f11SAndy Lutomirski }
301d6e41f11SAndy Lutomirski 
3021965aae3SH. Peter Anvin #endif /* _ASM_X86_MMU_CONTEXT_H */
303