127d6b4d1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */ 227d6b4d1SThomas Gleixner #ifndef _ASM_X86_ENTRY_COMMON_H 327d6b4d1SThomas Gleixner #define _ASM_X86_ENTRY_COMMON_H 427d6b4d1SThomas Gleixner 5*167fd210SThomas Gleixner #include <linux/user-return-notifier.h> 6*167fd210SThomas Gleixner 7*167fd210SThomas Gleixner #include <asm/nospec-branch.h> 8*167fd210SThomas Gleixner #include <asm/io_bitmap.h> 9*167fd210SThomas Gleixner #include <asm/fpu/api.h> 10*167fd210SThomas Gleixner 1127d6b4d1SThomas Gleixner /* Check that the stack and regs on entry from user mode are sane. */ 1227d6b4d1SThomas Gleixner static __always_inline void arch_check_user_regs(struct pt_regs *regs) 1327d6b4d1SThomas Gleixner { 1427d6b4d1SThomas Gleixner if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) { 1527d6b4d1SThomas Gleixner /* 1627d6b4d1SThomas Gleixner * Make sure that the entry code gave us a sensible EFLAGS 1727d6b4d1SThomas Gleixner * register. Native because we want to check the actual CPU 1827d6b4d1SThomas Gleixner * state, not the interrupt state as imagined by Xen. 1927d6b4d1SThomas Gleixner */ 2027d6b4d1SThomas Gleixner unsigned long flags = native_save_fl(); 2127d6b4d1SThomas Gleixner WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF | 2227d6b4d1SThomas Gleixner X86_EFLAGS_NT)); 2327d6b4d1SThomas Gleixner 2427d6b4d1SThomas Gleixner /* We think we came from user mode. Make sure pt_regs agrees. */ 2527d6b4d1SThomas Gleixner WARN_ON_ONCE(!user_mode(regs)); 2627d6b4d1SThomas Gleixner 2727d6b4d1SThomas Gleixner /* 2827d6b4d1SThomas Gleixner * All entries from user mode (except #DF) should be on the 2927d6b4d1SThomas Gleixner * normal thread stack and should have user pt_regs in the 3027d6b4d1SThomas Gleixner * correct location. 3127d6b4d1SThomas Gleixner */ 3227d6b4d1SThomas Gleixner WARN_ON_ONCE(!on_thread_stack()); 3327d6b4d1SThomas Gleixner WARN_ON_ONCE(regs != task_pt_regs(current)); 3427d6b4d1SThomas Gleixner } 3527d6b4d1SThomas Gleixner } 3627d6b4d1SThomas Gleixner #define arch_check_user_regs arch_check_user_regs 3727d6b4d1SThomas Gleixner 38*167fd210SThomas Gleixner #define ARCH_SYSCALL_EXIT_WORK (_TIF_SINGLESTEP) 39*167fd210SThomas Gleixner 40*167fd210SThomas Gleixner static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, 41*167fd210SThomas Gleixner unsigned long ti_work) 42*167fd210SThomas Gleixner { 43*167fd210SThomas Gleixner if (ti_work & _TIF_USER_RETURN_NOTIFY) 44*167fd210SThomas Gleixner fire_user_return_notifiers(); 45*167fd210SThomas Gleixner 46*167fd210SThomas Gleixner if (unlikely(ti_work & _TIF_IO_BITMAP)) 47*167fd210SThomas Gleixner tss_update_io_bitmap(); 48*167fd210SThomas Gleixner 49*167fd210SThomas Gleixner fpregs_assert_state_consistent(); 50*167fd210SThomas Gleixner if (unlikely(ti_work & _TIF_NEED_FPU_LOAD)) 51*167fd210SThomas Gleixner switch_fpu_return(); 52*167fd210SThomas Gleixner 53*167fd210SThomas Gleixner #ifdef CONFIG_COMPAT 54*167fd210SThomas Gleixner /* 55*167fd210SThomas Gleixner * Compat syscalls set TS_COMPAT. Make sure we clear it before 56*167fd210SThomas Gleixner * returning to user mode. We need to clear it *after* signal 57*167fd210SThomas Gleixner * handling, because syscall restart has a fixup for compat 58*167fd210SThomas Gleixner * syscalls. The fixup is exercised by the ptrace_syscall_32 59*167fd210SThomas Gleixner * selftest. 60*167fd210SThomas Gleixner * 61*167fd210SThomas Gleixner * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer 62*167fd210SThomas Gleixner * special case only applies after poking regs and before the 63*167fd210SThomas Gleixner * very next return to user mode. 64*167fd210SThomas Gleixner */ 65*167fd210SThomas Gleixner current_thread_info()->status &= ~(TS_COMPAT | TS_I386_REGS_POKED); 66*167fd210SThomas Gleixner #endif 67*167fd210SThomas Gleixner } 68*167fd210SThomas Gleixner #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare 69*167fd210SThomas Gleixner 70*167fd210SThomas Gleixner static __always_inline void arch_exit_to_user_mode(void) 71*167fd210SThomas Gleixner { 72*167fd210SThomas Gleixner mds_user_clear_cpu_buffers(); 73*167fd210SThomas Gleixner } 74*167fd210SThomas Gleixner #define arch_exit_to_user_mode arch_exit_to_user_mode 75*167fd210SThomas Gleixner 7627d6b4d1SThomas Gleixner #endif 77