xref: /linux/arch/x86/include/asm/entry-common.h (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
127d6b4d1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
227d6b4d1SThomas Gleixner #ifndef _ASM_X86_ENTRY_COMMON_H
327d6b4d1SThomas Gleixner #define _ASM_X86_ENTRY_COMMON_H
427d6b4d1SThomas Gleixner 
5fe950f60SKees Cook #include <linux/randomize_kstack.h>
6167fd210SThomas Gleixner #include <linux/user-return-notifier.h>
7167fd210SThomas Gleixner 
8167fd210SThomas Gleixner #include <asm/nospec-branch.h>
9167fd210SThomas Gleixner #include <asm/io_bitmap.h>
10167fd210SThomas Gleixner #include <asm/fpu/api.h>
11*fe85ee39SXin Li (Intel) #include <asm/fred.h>
12167fd210SThomas Gleixner 
1327d6b4d1SThomas Gleixner /* Check that the stack and regs on entry from user mode are sane. */
arch_enter_from_user_mode(struct pt_regs * regs)146d97af48SSven Schnelle static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs)
1527d6b4d1SThomas Gleixner {
1627d6b4d1SThomas Gleixner 	if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) {
1727d6b4d1SThomas Gleixner 		/*
1827d6b4d1SThomas Gleixner 		 * Make sure that the entry code gave us a sensible EFLAGS
1927d6b4d1SThomas Gleixner 		 * register.  Native because we want to check the actual CPU
2027d6b4d1SThomas Gleixner 		 * state, not the interrupt state as imagined by Xen.
2127d6b4d1SThomas Gleixner 		 */
2227d6b4d1SThomas Gleixner 		unsigned long flags = native_save_fl();
23662a0221SPeter Zijlstra 		unsigned long mask = X86_EFLAGS_DF | X86_EFLAGS_NT;
24662a0221SPeter Zijlstra 
25662a0221SPeter Zijlstra 		/*
26662a0221SPeter Zijlstra 		 * For !SMAP hardware we patch out CLAC on entry.
27662a0221SPeter Zijlstra 		 */
280bafc51bSJuergen Gross 		if (cpu_feature_enabled(X86_FEATURE_SMAP) ||
290bafc51bSJuergen Gross 		    cpu_feature_enabled(X86_FEATURE_XENPV))
30662a0221SPeter Zijlstra 			mask |= X86_EFLAGS_AC;
31662a0221SPeter Zijlstra 
32662a0221SPeter Zijlstra 		WARN_ON_ONCE(flags & mask);
3327d6b4d1SThomas Gleixner 
3427d6b4d1SThomas Gleixner 		/* We think we came from user mode. Make sure pt_regs agrees. */
3527d6b4d1SThomas Gleixner 		WARN_ON_ONCE(!user_mode(regs));
3627d6b4d1SThomas Gleixner 
3727d6b4d1SThomas Gleixner 		/*
3827d6b4d1SThomas Gleixner 		 * All entries from user mode (except #DF) should be on the
3927d6b4d1SThomas Gleixner 		 * normal thread stack and should have user pt_regs in the
4027d6b4d1SThomas Gleixner 		 * correct location.
4127d6b4d1SThomas Gleixner 		 */
4227d6b4d1SThomas Gleixner 		WARN_ON_ONCE(!on_thread_stack());
4327d6b4d1SThomas Gleixner 		WARN_ON_ONCE(regs != task_pt_regs(current));
4427d6b4d1SThomas Gleixner 	}
4527d6b4d1SThomas Gleixner }
466d97af48SSven Schnelle #define arch_enter_from_user_mode arch_enter_from_user_mode
4727d6b4d1SThomas Gleixner 
arch_exit_work(unsigned long ti_work)480dfac6f2SXin Li (Intel) static inline void arch_exit_work(unsigned long ti_work)
49167fd210SThomas Gleixner {
50167fd210SThomas Gleixner 	if (ti_work & _TIF_USER_RETURN_NOTIFY)
51167fd210SThomas Gleixner 		fire_user_return_notifiers();
52167fd210SThomas Gleixner 
53167fd210SThomas Gleixner 	if (unlikely(ti_work & _TIF_IO_BITMAP))
54167fd210SThomas Gleixner 		tss_update_io_bitmap();
55167fd210SThomas Gleixner 
56167fd210SThomas Gleixner 	fpregs_assert_state_consistent();
57167fd210SThomas Gleixner 	if (unlikely(ti_work & _TIF_NEED_FPU_LOAD))
58167fd210SThomas Gleixner 		switch_fpu_return();
590dfac6f2SXin Li (Intel) }
600dfac6f2SXin Li (Intel) 
arch_exit_to_user_mode_prepare(struct pt_regs * regs,unsigned long ti_work)610dfac6f2SXin Li (Intel) static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
620dfac6f2SXin Li (Intel) 						  unsigned long ti_work)
630dfac6f2SXin Li (Intel) {
640dfac6f2SXin Li (Intel) 	if (IS_ENABLED(CONFIG_X86_DEBUG_FPU) || unlikely(ti_work))
650dfac6f2SXin Li (Intel) 		arch_exit_work(ti_work);
66167fd210SThomas Gleixner 
67*fe85ee39SXin Li (Intel) 	fred_update_rsp0();
68*fe85ee39SXin Li (Intel) 
69167fd210SThomas Gleixner #ifdef CONFIG_COMPAT
70167fd210SThomas Gleixner 	/*
71167fd210SThomas Gleixner 	 * Compat syscalls set TS_COMPAT.  Make sure we clear it before
72167fd210SThomas Gleixner 	 * returning to user mode.  We need to clear it *after* signal
73167fd210SThomas Gleixner 	 * handling, because syscall restart has a fixup for compat
74167fd210SThomas Gleixner 	 * syscalls.  The fixup is exercised by the ptrace_syscall_32
75167fd210SThomas Gleixner 	 * selftest.
76167fd210SThomas Gleixner 	 *
77167fd210SThomas Gleixner 	 * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
78167fd210SThomas Gleixner 	 * special case only applies after poking regs and before the
79167fd210SThomas Gleixner 	 * very next return to user mode.
80167fd210SThomas Gleixner 	 */
81167fd210SThomas Gleixner 	current_thread_info()->status &= ~(TS_COMPAT | TS_I386_REGS_POKED);
82167fd210SThomas Gleixner #endif
83fe950f60SKees Cook 
84fe950f60SKees Cook 	/*
856db1208bSKees Cook 	 * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
866db1208bSKees Cook 	 * bits. The actual entropy will be further reduced by the compiler
876db1208bSKees Cook 	 * when applying stack alignment constraints (see cc_stack_align4/8 in
88fe950f60SKees Cook 	 * arch/x86/Makefile), which will remove the 3 (x86_64) or 2 (ia32)
89fe950f60SKees Cook 	 * low bits from any entropy chosen here.
90fe950f60SKees Cook 	 *
916db1208bSKees Cook 	 * Therefore, final stack offset entropy will be 7 (x86_64) or
926db1208bSKees Cook 	 * 8 (ia32) bits.
93fe950f60SKees Cook 	 */
946db1208bSKees Cook 	choose_random_kstack_offset(rdtsc());
95167fd210SThomas Gleixner }
96167fd210SThomas Gleixner #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
97167fd210SThomas Gleixner 
arch_exit_to_user_mode(void)98167fd210SThomas Gleixner static __always_inline void arch_exit_to_user_mode(void)
99167fd210SThomas Gleixner {
100f58d6fbcSBorislav Petkov (AMD) 	amd_clear_divider();
101167fd210SThomas Gleixner }
102167fd210SThomas Gleixner #define arch_exit_to_user_mode arch_exit_to_user_mode
103167fd210SThomas Gleixner 
10427d6b4d1SThomas Gleixner #endif
105