xref: /linux/arch/x86/include/asm/mmu_context.h (revision 617a814f14b8914271f7a70366d72c6196d17663)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_MMU_CONTEXT_H
31965aae3SH. Peter Anvin #define _ASM_X86_MMU_CONTEXT_H
4bb898558SAl Viro 
5bb898558SAl Viro #include <asm/desc.h>
660063497SArun Sharma #include <linux/atomic.h>
7d17d8f9dSDave Hansen #include <linux/mm_types.h>
87d06d9c9SDave Hansen #include <linux/pkeys.h>
9d17d8f9dSDave Hansen 
10d17d8f9dSDave Hansen #include <trace/events/tlb.h>
11d17d8f9dSDave Hansen 
12bb898558SAl Viro #include <asm/tlbflush.h>
13bb898558SAl Viro #include <asm/paravirt.h>
14d97080ebSNadav Amit #include <asm/debugreg.h>
15ae53fa18SH. Peter Anvin (Intel) #include <asm/gsseg.h>
16f39681edSAndy Lutomirski 
17f39681edSAndy Lutomirski extern atomic64_t last_mm_ctx_id;
18f39681edSAndy Lutomirski 
197911d3f7SAndy Lutomirski #ifdef CONFIG_PERF_EVENTS
20405b4537SAnthony Steinhauser DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
21631fe154SDavidlohr Bueso DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
22cb2a0235SThomas Gleixner void cr4_update_pce(void *ignored);
237911d3f7SAndy Lutomirski #endif
247911d3f7SAndy Lutomirski 
25a5b9e5a2SAndy Lutomirski #ifdef CONFIG_MODIFY_LDT_SYSCALL
26bb898558SAl Viro /*
2737868fe1SAndy Lutomirski  * ldt_structs can be allocated, used, and freed, but they are never
2837868fe1SAndy Lutomirski  * modified while live.
2937868fe1SAndy Lutomirski  */
3037868fe1SAndy Lutomirski struct ldt_struct {
3137868fe1SAndy Lutomirski 	/*
3237868fe1SAndy Lutomirski 	 * Xen requires page-aligned LDTs with special permissions.  This is
3337868fe1SAndy Lutomirski 	 * needed to prevent us from installing evil descriptors such as
3437868fe1SAndy Lutomirski 	 * call gates.  On native, we could merge the ldt_struct and LDT
3537868fe1SAndy Lutomirski 	 * allocations, but it's not worth trying to optimize.
3637868fe1SAndy Lutomirski 	 */
3737868fe1SAndy Lutomirski 	struct desc_struct	*entries;
38bbf79d21SBorislav Petkov 	unsigned int		nr_entries;
39f55f0501SAndy Lutomirski 
40f55f0501SAndy Lutomirski 	/*
41f55f0501SAndy Lutomirski 	 * If PTI is in use, then the entries array is not mapped while we're
42f55f0501SAndy Lutomirski 	 * in user mode.  The whole array will be aliased at the addressed
43f55f0501SAndy Lutomirski 	 * given by ldt_slot_va(slot).  We use two slots so that we can allocate
44f55f0501SAndy Lutomirski 	 * and map, and enable a new LDT without invalidating the mapping
45f55f0501SAndy Lutomirski 	 * of an older, still-in-use LDT.
46f55f0501SAndy Lutomirski 	 *
47f55f0501SAndy Lutomirski 	 * slot will be -1 if this LDT doesn't have an alias mapping.
48f55f0501SAndy Lutomirski 	 */
49f55f0501SAndy Lutomirski 	int			slot;
5037868fe1SAndy Lutomirski };
5137868fe1SAndy Lutomirski 
52a5b9e5a2SAndy Lutomirski /*
53a5b9e5a2SAndy Lutomirski  * Used for LDT copy/destruction.
54a5b9e5a2SAndy Lutomirski  */
55a4828f81SThomas Gleixner static inline void init_new_context_ldt(struct mm_struct *mm)
56a4828f81SThomas Gleixner {
57a4828f81SThomas Gleixner 	mm->context.ldt = NULL;
58a4828f81SThomas Gleixner 	init_rwsem(&mm->context.ldt_usr_sem);
59a4828f81SThomas Gleixner }
60a4828f81SThomas Gleixner int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
6139a0526fSDave Hansen void destroy_context_ldt(struct mm_struct *mm);
62f55f0501SAndy Lutomirski void ldt_arch_exit_mmap(struct mm_struct *mm);
63a5b9e5a2SAndy Lutomirski #else	/* CONFIG_MODIFY_LDT_SYSCALL */
64a4828f81SThomas Gleixner static inline void init_new_context_ldt(struct mm_struct *mm) { }
65a4828f81SThomas Gleixner static inline int ldt_dup_context(struct mm_struct *oldmm,
66a5b9e5a2SAndy Lutomirski 				  struct mm_struct *mm)
67a5b9e5a2SAndy Lutomirski {
68a5b9e5a2SAndy Lutomirski 	return 0;
69a5b9e5a2SAndy Lutomirski }
7039a0526fSDave Hansen static inline void destroy_context_ldt(struct mm_struct *mm) { }
71f55f0501SAndy Lutomirski static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
72a5b9e5a2SAndy Lutomirski #endif
73a5b9e5a2SAndy Lutomirski 
74186525bdSIngo Molnar #ifdef CONFIG_MODIFY_LDT_SYSCALL
75186525bdSIngo Molnar extern void load_mm_ldt(struct mm_struct *mm);
76186525bdSIngo Molnar extern void switch_ldt(struct mm_struct *prev, struct mm_struct *next);
77186525bdSIngo Molnar #else
7837868fe1SAndy Lutomirski static inline void load_mm_ldt(struct mm_struct *mm)
7937868fe1SAndy Lutomirski {
80f55f0501SAndy Lutomirski 	clear_LDT();
81f55f0501SAndy Lutomirski }
8273534258SAndy Lutomirski static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
8373534258SAndy Lutomirski {
8437868fe1SAndy Lutomirski 	DEBUG_LOCKS_WARN_ON(preemptible());
8537868fe1SAndy Lutomirski }
86186525bdSIngo Molnar #endif
8737868fe1SAndy Lutomirski 
8882721d8bSKirill A. Shutemov #ifdef CONFIG_ADDRESS_MASKING
8982721d8bSKirill A. Shutemov static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
9082721d8bSKirill A. Shutemov {
91*ec225f8cSYosry Ahmed 	/*
92*ec225f8cSYosry Ahmed 	 * When switch_mm_irqs_off() is called for a kthread, it may race with
93*ec225f8cSYosry Ahmed 	 * LAM enablement. switch_mm_irqs_off() uses the LAM mask to do two
94*ec225f8cSYosry Ahmed 	 * things: populate CR3 and populate 'cpu_tlbstate.lam'. Make sure it
95*ec225f8cSYosry Ahmed 	 * reads a single value for both.
96*ec225f8cSYosry Ahmed 	 */
97*ec225f8cSYosry Ahmed 	return READ_ONCE(mm->context.lam_cr3_mask);
9882721d8bSKirill A. Shutemov }
9982721d8bSKirill A. Shutemov 
10082721d8bSKirill A. Shutemov static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
10182721d8bSKirill A. Shutemov {
10282721d8bSKirill A. Shutemov 	mm->context.lam_cr3_mask = oldmm->context.lam_cr3_mask;
10374c228d2SKirill A. Shutemov 	mm->context.untag_mask = oldmm->context.untag_mask;
10474c228d2SKirill A. Shutemov }
10574c228d2SKirill A. Shutemov 
106f7d30434SKirill A. Shutemov #define mm_untag_mask mm_untag_mask
107f7d30434SKirill A. Shutemov static inline unsigned long mm_untag_mask(struct mm_struct *mm)
108f7d30434SKirill A. Shutemov {
109f7d30434SKirill A. Shutemov 	return mm->context.untag_mask;
110f7d30434SKirill A. Shutemov }
111f7d30434SKirill A. Shutemov 
11274c228d2SKirill A. Shutemov static inline void mm_reset_untag_mask(struct mm_struct *mm)
11374c228d2SKirill A. Shutemov {
11474c228d2SKirill A. Shutemov 	mm->context.untag_mask = -1UL;
11582721d8bSKirill A. Shutemov }
11682721d8bSKirill A. Shutemov 
11723e5d9ecSKirill A. Shutemov #define arch_pgtable_dma_compat arch_pgtable_dma_compat
11823e5d9ecSKirill A. Shutemov static inline bool arch_pgtable_dma_compat(struct mm_struct *mm)
11923e5d9ecSKirill A. Shutemov {
12023e5d9ecSKirill A. Shutemov 	return !mm_lam_cr3_mask(mm) ||
12123e5d9ecSKirill A. Shutemov 		test_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &mm->context.flags);
12223e5d9ecSKirill A. Shutemov }
12382721d8bSKirill A. Shutemov #else
12482721d8bSKirill A. Shutemov 
12582721d8bSKirill A. Shutemov static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
12682721d8bSKirill A. Shutemov {
12782721d8bSKirill A. Shutemov 	return 0;
12882721d8bSKirill A. Shutemov }
12982721d8bSKirill A. Shutemov 
13082721d8bSKirill A. Shutemov static inline void dup_lam(struct mm_struct *oldmm, struct mm_struct *mm)
13182721d8bSKirill A. Shutemov {
13282721d8bSKirill A. Shutemov }
13374c228d2SKirill A. Shutemov 
13474c228d2SKirill A. Shutemov static inline void mm_reset_untag_mask(struct mm_struct *mm)
13574c228d2SKirill A. Shutemov {
13674c228d2SKirill A. Shutemov }
13782721d8bSKirill A. Shutemov #endif
13882721d8bSKirill A. Shutemov 
139586c4f24SNicholas Piggin #define enter_lazy_tlb enter_lazy_tlb
140186525bdSIngo Molnar extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
1416826c8ffSBrian Gerst 
142a31e184eSDave Hansen /*
143a31e184eSDave Hansen  * Init a new mm.  Used on mm copies, like at fork()
144a31e184eSDave Hansen  * and on mm's that are brand-new, like at execve().
145a31e184eSDave Hansen  */
146586c4f24SNicholas Piggin #define init_new_context init_new_context
14739a0526fSDave Hansen static inline int init_new_context(struct task_struct *tsk,
14839a0526fSDave Hansen 				   struct mm_struct *mm)
14939a0526fSDave Hansen {
150c2b3496bSPeter Zijlstra 	mutex_init(&mm->context.lock);
151c2b3496bSPeter Zijlstra 
152f39681edSAndy Lutomirski 	mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
153f39681edSAndy Lutomirski 	atomic64_set(&mm->context.tlb_gen, 0);
154f39681edSAndy Lutomirski 
155e8c24d3aSDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
156e8c24d3aSDave Hansen 	if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
1572fa9d1cfSDave Hansen 		/* pkey 0 is the default and allocated implicitly */
158e8c24d3aSDave Hansen 		mm->context.pkey_allocation_map = 0x1;
159e8c24d3aSDave Hansen 		/* -1 means unallocated or invalid */
160e8c24d3aSDave Hansen 		mm->context.execute_only_pkey = -1;
161e8c24d3aSDave Hansen 	}
162e8c24d3aSDave Hansen #endif
16374c228d2SKirill A. Shutemov 	mm_reset_untag_mask(mm);
164a4828f81SThomas Gleixner 	init_new_context_ldt(mm);
165a4828f81SThomas Gleixner 	return 0;
16639a0526fSDave Hansen }
167586c4f24SNicholas Piggin 
168586c4f24SNicholas Piggin #define destroy_context destroy_context
16939a0526fSDave Hansen static inline void destroy_context(struct mm_struct *mm)
17039a0526fSDave Hansen {
17139a0526fSDave Hansen 	destroy_context_ldt(mm);
17239a0526fSDave Hansen }
17339a0526fSDave Hansen 
17469c0319aSAndy Lutomirski extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17569c0319aSAndy Lutomirski 		      struct task_struct *tsk);
1766826c8ffSBrian Gerst 
177078194f8SAndy Lutomirski extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
178078194f8SAndy Lutomirski 			       struct task_struct *tsk);
179078194f8SAndy Lutomirski #define switch_mm_irqs_off switch_mm_irqs_off
180bb898558SAl Viro 
181bb898558SAl Viro #define activate_mm(prev, next)			\
182bb898558SAl Viro do {						\
183c9ae1b10SJuergen Gross 	paravirt_enter_mmap(next);		\
184bb898558SAl Viro 	switch_mm((prev), (next), NULL);	\
185bb898558SAl Viro } while (0);
186bb898558SAl Viro 
1876826c8ffSBrian Gerst #ifdef CONFIG_X86_32
1886826c8ffSBrian Gerst #define deactivate_mm(tsk, mm)			\
1896826c8ffSBrian Gerst do {						\
1903a24a608SBrian Gerst 	loadsegment(gs, 0);			\
1916826c8ffSBrian Gerst } while (0)
1926826c8ffSBrian Gerst #else
1936826c8ffSBrian Gerst #define deactivate_mm(tsk, mm)			\
1946826c8ffSBrian Gerst do {						\
195b2926a36SRick Edgecombe 	shstk_free(tsk);			\
1966826c8ffSBrian Gerst 	load_gs_index(0);			\
1976826c8ffSBrian Gerst 	loadsegment(fs, 0);			\
1986826c8ffSBrian Gerst } while (0)
1996826c8ffSBrian Gerst #endif
200bb898558SAl Viro 
201a31e184eSDave Hansen static inline void arch_dup_pkeys(struct mm_struct *oldmm,
202a31e184eSDave Hansen 				  struct mm_struct *mm)
203a31e184eSDave Hansen {
204a31e184eSDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
205a31e184eSDave Hansen 	if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
206a31e184eSDave Hansen 		return;
207a31e184eSDave Hansen 
208a31e184eSDave Hansen 	/* Duplicate the oldmm pkey state in mm: */
209a31e184eSDave Hansen 	mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
210a31e184eSDave Hansen 	mm->context.execute_only_pkey   = oldmm->context.execute_only_pkey;
211a31e184eSDave Hansen #endif
212a31e184eSDave Hansen }
213a31e184eSDave Hansen 
214c10e83f5SThomas Gleixner static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
215a1ea1c03SDave Hansen {
216a31e184eSDave Hansen 	arch_dup_pkeys(oldmm, mm);
217c9ae1b10SJuergen Gross 	paravirt_enter_mmap(mm);
21882721d8bSKirill A. Shutemov 	dup_lam(oldmm, mm);
219a4828f81SThomas Gleixner 	return ldt_dup_context(oldmm, mm);
220a1ea1c03SDave Hansen }
221a1ea1c03SDave Hansen 
222a1ea1c03SDave Hansen static inline void arch_exit_mmap(struct mm_struct *mm)
223a1ea1c03SDave Hansen {
224a1ea1c03SDave Hansen 	paravirt_arch_exit_mmap(mm);
225f55f0501SAndy Lutomirski 	ldt_arch_exit_mmap(mm);
226a1ea1c03SDave Hansen }
227a1ea1c03SDave Hansen 
228b0e9b09bSDave Hansen #ifdef CONFIG_X86_64
229b0e9b09bSDave Hansen static inline bool is_64bit_mm(struct mm_struct *mm)
230b0e9b09bSDave Hansen {
23197f2645fSMasahiro Yamada 	return	!IS_ENABLED(CONFIG_IA32_EMULATION) ||
2325ef495e5SKirill A. Shutemov 		!test_bit(MM_CONTEXT_UPROBE_IA32, &mm->context.flags);
233b0e9b09bSDave Hansen }
234b0e9b09bSDave Hansen #else
235b0e9b09bSDave Hansen static inline bool is_64bit_mm(struct mm_struct *mm)
236b0e9b09bSDave Hansen {
237b0e9b09bSDave Hansen 	return false;
238b0e9b09bSDave Hansen }
239b0e9b09bSDave Hansen #endif
240b0e9b09bSDave Hansen 
24133a709b2SDave Hansen /*
24233a709b2SDave Hansen  * We only want to enforce protection keys on the current process
24333a709b2SDave Hansen  * because we effectively have no access to PKRU for other
24433a709b2SDave Hansen  * processes or any way to tell *which * PKRU in a threaded
24533a709b2SDave Hansen  * process we could use.
24633a709b2SDave Hansen  *
24733a709b2SDave Hansen  * So do not enforce things if the VMA is not from the current
24833a709b2SDave Hansen  * mm, or if we are in a kernel thread.
24933a709b2SDave Hansen  */
2501b2ee126SDave Hansen static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
251d61172b4SDave Hansen 		bool write, bool execute, bool foreign)
25233a709b2SDave Hansen {
253d61172b4SDave Hansen 	/* pkeys never affect instruction fetches */
254d61172b4SDave Hansen 	if (execute)
255d61172b4SDave Hansen 		return true;
25633a709b2SDave Hansen 	/* allow access if the VMA is not one from this process */
2571b2ee126SDave Hansen 	if (foreign || vma_is_foreign(vma))
25833a709b2SDave Hansen 		return true;
25933a709b2SDave Hansen 	return __pkru_allows_pkey(vma_pkey(vma), write);
26033a709b2SDave Hansen }
26133a709b2SDave Hansen 
2628c5cc19eSThomas Gleixner unsigned long __get_current_cr3_fast(void);
263d6e41f11SAndy Lutomirski 
264586c4f24SNicholas Piggin #include <asm-generic/mmu_context.h>
265586c4f24SNicholas Piggin 
2661965aae3SH. Peter Anvin #endif /* _ASM_X86_MMU_CONTEXT_H */
267