1142781e1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0 */ 2142781e1SThomas Gleixner #ifndef __LINUX_ENTRYCOMMON_H 3142781e1SThomas Gleixner #define __LINUX_ENTRYCOMMON_H 4142781e1SThomas Gleixner 540607ee9SPeter Zijlstra (Intel) #include <linux/static_call_types.h> 6153474baSEric W. Biederman #include <linux/ptrace.h> 7142781e1SThomas Gleixner #include <linux/syscalls.h> 8142781e1SThomas Gleixner #include <linux/seccomp.h> 9142781e1SThomas Gleixner #include <linux/sched.h> 10d6801947SSven Schnelle #include <linux/context_tracking.h> 11d6801947SSven Schnelle #include <linux/livepatch.h> 12d6801947SSven Schnelle #include <linux/resume_user_mode.h> 13d6801947SSven Schnelle #include <linux/tick.h> 14*caf4062eSSven Schnelle #include <linux/kmsan.h> 15142781e1SThomas Gleixner 16142781e1SThomas Gleixner #include <asm/entry-common.h> 17142781e1SThomas Gleixner 18142781e1SThomas Gleixner /* 19142781e1SThomas Gleixner * Define dummy _TIF work flags if not defined by the architecture or for 20142781e1SThomas Gleixner * disabled functionality. 21142781e1SThomas Gleixner */ 22a9f3a74aSThomas Gleixner #ifndef _TIF_PATCH_PENDING 23a9f3a74aSThomas Gleixner # define _TIF_PATCH_PENDING (0) 24a9f3a74aSThomas Gleixner #endif 25a9f3a74aSThomas Gleixner 26a9f3a74aSThomas Gleixner #ifndef _TIF_UPROBE 27a9f3a74aSThomas Gleixner # define _TIF_UPROBE (0) 28a9f3a74aSThomas Gleixner #endif 29a9f3a74aSThomas Gleixner 30142781e1SThomas Gleixner /* 3129915524SGabriel Krisman Bertazi * SYSCALL_WORK flags handled in syscall_enter_from_user_mode() 32142781e1SThomas Gleixner */ 3329915524SGabriel Krisman Bertazi #ifndef ARCH_SYSCALL_WORK_ENTER 3429915524SGabriel Krisman Bertazi # define ARCH_SYSCALL_WORK_ENTER (0) 35142781e1SThomas Gleixner #endif 36142781e1SThomas Gleixner 37a9f3a74aSThomas Gleixner /* 3829915524SGabriel Krisman Bertazi * SYSCALL_WORK flags handled in syscall_exit_to_user_mode() 39a9f3a74aSThomas Gleixner */ 4029915524SGabriel Krisman Bertazi #ifndef ARCH_SYSCALL_WORK_EXIT 4129915524SGabriel Krisman Bertazi # define ARCH_SYSCALL_WORK_EXIT (0) 42a9f3a74aSThomas Gleixner #endif 43a9f3a74aSThomas Gleixner 44524666cbSGabriel Krisman Bertazi #define SYSCALL_WORK_ENTER (SYSCALL_WORK_SECCOMP | \ 4564c19ba2SGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_TRACEPOINT | \ 4664eb35f7SGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_TRACE | \ 47785dc4ebSGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_EMU | \ 4829915524SGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_AUDIT | \ 4911894468SGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_USER_DISPATCH | \ 5029915524SGabriel Krisman Bertazi ARCH_SYSCALL_WORK_ENTER) 5164c19ba2SGabriel Krisman Bertazi #define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \ 52785dc4ebSGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_TRACE | \ 5329915524SGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_AUDIT | \ 5411894468SGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_USER_DISPATCH | \ 556342adcaSGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_EXIT_TRAP | \ 5629915524SGabriel Krisman Bertazi ARCH_SYSCALL_WORK_EXIT) 57b86678cfSGabriel Krisman Bertazi 58a9f3a74aSThomas Gleixner /* 59a9f3a74aSThomas Gleixner * TIF flags handled in exit_to_user_mode_loop() 60a9f3a74aSThomas Gleixner */ 61a9f3a74aSThomas Gleixner #ifndef ARCH_EXIT_TO_USER_MODE_WORK 62a9f3a74aSThomas Gleixner # define ARCH_EXIT_TO_USER_MODE_WORK (0) 63a9f3a74aSThomas Gleixner #endif 64a9f3a74aSThomas Gleixner 65a9f3a74aSThomas Gleixner #define EXIT_TO_USER_MODE_WORK \ 66a9f3a74aSThomas Gleixner (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ 6712db8b69SJens Axboe _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ 68a9f3a74aSThomas Gleixner ARCH_EXIT_TO_USER_MODE_WORK) 69a9f3a74aSThomas Gleixner 70142781e1SThomas Gleixner /** 716d97af48SSven Schnelle * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs 72142781e1SThomas Gleixner * @regs: Pointer to currents pt_regs 73142781e1SThomas Gleixner * 74142781e1SThomas Gleixner * Defaults to an empty implementation. Can be replaced by architecture 75142781e1SThomas Gleixner * specific code. 76142781e1SThomas Gleixner * 77142781e1SThomas Gleixner * Invoked from syscall_enter_from_user_mode() in the non-instrumentable 78142781e1SThomas Gleixner * section. Use __always_inline so the compiler cannot push it out of line 79142781e1SThomas Gleixner * and make it instrumentable. 80142781e1SThomas Gleixner */ 816d97af48SSven Schnelle static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs); 82142781e1SThomas Gleixner 836d97af48SSven Schnelle #ifndef arch_enter_from_user_mode 846d97af48SSven Schnelle static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {} 85142781e1SThomas Gleixner #endif 86142781e1SThomas Gleixner 87142781e1SThomas Gleixner /** 8896e2fbccSSven Schnelle * enter_from_user_mode - Establish state when coming from user mode 8996e2fbccSSven Schnelle * 9096e2fbccSSven Schnelle * Syscall/interrupt entry disables interrupts, but user mode is traced as 9196e2fbccSSven Schnelle * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. 9296e2fbccSSven Schnelle * 9396e2fbccSSven Schnelle * 1) Tell lockdep that interrupts are disabled 9496e2fbccSSven Schnelle * 2) Invoke context tracking if enabled to reactivate RCU 9596e2fbccSSven Schnelle * 3) Trace interrupts off state 9696e2fbccSSven Schnelle * 9796e2fbccSSven Schnelle * Invoked from architecture specific syscall entry code with interrupts 9896e2fbccSSven Schnelle * disabled. The calling code has to be non-instrumentable. When the 9996e2fbccSSven Schnelle * function returns all state is correct and interrupts are still 10096e2fbccSSven Schnelle * disabled. The subsequent functions can be instrumented. 10196e2fbccSSven Schnelle * 10296e2fbccSSven Schnelle * This is invoked when there is architecture specific functionality to be 10396e2fbccSSven Schnelle * done between establishing state and enabling interrupts. The caller must 10496e2fbccSSven Schnelle * enable interrupts before invoking syscall_enter_from_user_mode_work(). 10596e2fbccSSven Schnelle */ 106*caf4062eSSven Schnelle static __always_inline void enter_from_user_mode(struct pt_regs *regs) 107*caf4062eSSven Schnelle { 108*caf4062eSSven Schnelle arch_enter_from_user_mode(regs); 109*caf4062eSSven Schnelle lockdep_hardirqs_off(CALLER_ADDR0); 110*caf4062eSSven Schnelle 111*caf4062eSSven Schnelle CT_WARN_ON(__ct_state() != CONTEXT_USER); 112*caf4062eSSven Schnelle user_exit_irqoff(); 113*caf4062eSSven Schnelle 114*caf4062eSSven Schnelle instrumentation_begin(); 115*caf4062eSSven Schnelle kmsan_unpoison_entry_regs(regs); 116*caf4062eSSven Schnelle trace_hardirqs_off_finish(); 117*caf4062eSSven Schnelle instrumentation_end(); 118*caf4062eSSven Schnelle } 11996e2fbccSSven Schnelle 12096e2fbccSSven Schnelle /** 1214facb95bSThomas Gleixner * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts 1224facb95bSThomas Gleixner * @regs: Pointer to currents pt_regs 1234facb95bSThomas Gleixner * 1244facb95bSThomas Gleixner * Invoked from architecture specific syscall entry code with interrupts 1254facb95bSThomas Gleixner * disabled. The calling code has to be non-instrumentable. When the 1264facb95bSThomas Gleixner * function returns all state is correct, interrupts are enabled and the 1274facb95bSThomas Gleixner * subsequent functions can be instrumented. 1284facb95bSThomas Gleixner * 12996e2fbccSSven Schnelle * This handles lockdep, RCU (context tracking) and tracing state, i.e. 13096e2fbccSSven Schnelle * the functionality provided by enter_from_user_mode(). 1314facb95bSThomas Gleixner * 1324facb95bSThomas Gleixner * This is invoked when there is extra architecture specific functionality 1334facb95bSThomas Gleixner * to be done between establishing state and handling user mode entry work. 1344facb95bSThomas Gleixner */ 1354facb95bSThomas Gleixner void syscall_enter_from_user_mode_prepare(struct pt_regs *regs); 1364facb95bSThomas Gleixner 1374facb95bSThomas Gleixner /** 1384facb95bSThomas Gleixner * syscall_enter_from_user_mode_work - Check and handle work before invoking 139142781e1SThomas Gleixner * a syscall 140142781e1SThomas Gleixner * @regs: Pointer to currents pt_regs 141142781e1SThomas Gleixner * @syscall: The syscall number 142142781e1SThomas Gleixner * 143142781e1SThomas Gleixner * Invoked from architecture specific syscall entry code with interrupts 1444facb95bSThomas Gleixner * enabled after invoking syscall_enter_from_user_mode_prepare() and extra 1454facb95bSThomas Gleixner * architecture specific work. 146142781e1SThomas Gleixner * 147142781e1SThomas Gleixner * Returns: The original or a modified syscall number 148142781e1SThomas Gleixner * 149142781e1SThomas Gleixner * If the returned syscall number is -1 then the syscall should be 150142781e1SThomas Gleixner * skipped. In this case the caller may invoke syscall_set_error() or 151142781e1SThomas Gleixner * syscall_set_return_value() first. If neither of those are called and -1 152142781e1SThomas Gleixner * is returned, then the syscall will fail with ENOSYS. 153142781e1SThomas Gleixner * 1544facb95bSThomas Gleixner * It handles the following work items: 155142781e1SThomas Gleixner * 15629915524SGabriel Krisman Bertazi * 1) syscall_work flag dependent invocations of 1570cfcb2b9SEric W. Biederman * ptrace_report_syscall_entry(), __secure_computing(), trace_sys_enter() 1584facb95bSThomas Gleixner * 2) Invocation of audit_syscall_entry() 1594facb95bSThomas Gleixner */ 1604facb95bSThomas Gleixner long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall); 1614facb95bSThomas Gleixner 1624facb95bSThomas Gleixner /** 1634facb95bSThomas Gleixner * syscall_enter_from_user_mode - Establish state and check and handle work 1644facb95bSThomas Gleixner * before invoking a syscall 1654facb95bSThomas Gleixner * @regs: Pointer to currents pt_regs 1664facb95bSThomas Gleixner * @syscall: The syscall number 1674facb95bSThomas Gleixner * 1684facb95bSThomas Gleixner * Invoked from architecture specific syscall entry code with interrupts 1694facb95bSThomas Gleixner * disabled. The calling code has to be non-instrumentable. When the 1704facb95bSThomas Gleixner * function returns all state is correct, interrupts are enabled and the 1714facb95bSThomas Gleixner * subsequent functions can be instrumented. 1724facb95bSThomas Gleixner * 1734facb95bSThomas Gleixner * This is combination of syscall_enter_from_user_mode_prepare() and 1744facb95bSThomas Gleixner * syscall_enter_from_user_mode_work(). 1754facb95bSThomas Gleixner * 1764facb95bSThomas Gleixner * Returns: The original or a modified syscall number. See 1774facb95bSThomas Gleixner * syscall_enter_from_user_mode_work() for further explanation. 178142781e1SThomas Gleixner */ 179142781e1SThomas Gleixner long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall); 180142781e1SThomas Gleixner 181142781e1SThomas Gleixner /** 182a9f3a74aSThomas Gleixner * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable() 183a9f3a74aSThomas Gleixner * @ti_work: Cached TIF flags gathered with interrupts disabled 184a9f3a74aSThomas Gleixner * 185a9f3a74aSThomas Gleixner * Defaults to local_irq_enable(). Can be supplied by architecture specific 186a9f3a74aSThomas Gleixner * code. 187a9f3a74aSThomas Gleixner */ 188a9f3a74aSThomas Gleixner static inline void local_irq_enable_exit_to_user(unsigned long ti_work); 189a9f3a74aSThomas Gleixner 190a9f3a74aSThomas Gleixner #ifndef local_irq_enable_exit_to_user 191a9f3a74aSThomas Gleixner static inline void local_irq_enable_exit_to_user(unsigned long ti_work) 192a9f3a74aSThomas Gleixner { 193a9f3a74aSThomas Gleixner local_irq_enable(); 194a9f3a74aSThomas Gleixner } 195a9f3a74aSThomas Gleixner #endif 196a9f3a74aSThomas Gleixner 197a9f3a74aSThomas Gleixner /** 198a9f3a74aSThomas Gleixner * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable() 199a9f3a74aSThomas Gleixner * 200a9f3a74aSThomas Gleixner * Defaults to local_irq_disable(). Can be supplied by architecture specific 201a9f3a74aSThomas Gleixner * code. 202a9f3a74aSThomas Gleixner */ 203a9f3a74aSThomas Gleixner static inline void local_irq_disable_exit_to_user(void); 204a9f3a74aSThomas Gleixner 205a9f3a74aSThomas Gleixner #ifndef local_irq_disable_exit_to_user 206a9f3a74aSThomas Gleixner static inline void local_irq_disable_exit_to_user(void) 207a9f3a74aSThomas Gleixner { 208a9f3a74aSThomas Gleixner local_irq_disable(); 209a9f3a74aSThomas Gleixner } 210a9f3a74aSThomas Gleixner #endif 211a9f3a74aSThomas Gleixner 212a9f3a74aSThomas Gleixner /** 213a9f3a74aSThomas Gleixner * arch_exit_to_user_mode_work - Architecture specific TIF work for exit 214a9f3a74aSThomas Gleixner * to user mode. 215a9f3a74aSThomas Gleixner * @regs: Pointer to currents pt_regs 216a9f3a74aSThomas Gleixner * @ti_work: Cached TIF flags gathered with interrupts disabled 217a9f3a74aSThomas Gleixner * 218a9f3a74aSThomas Gleixner * Invoked from exit_to_user_mode_loop() with interrupt enabled 219a9f3a74aSThomas Gleixner * 220a9f3a74aSThomas Gleixner * Defaults to NOOP. Can be supplied by architecture specific code. 221a9f3a74aSThomas Gleixner */ 222a9f3a74aSThomas Gleixner static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, 223a9f3a74aSThomas Gleixner unsigned long ti_work); 224a9f3a74aSThomas Gleixner 225a9f3a74aSThomas Gleixner #ifndef arch_exit_to_user_mode_work 226a9f3a74aSThomas Gleixner static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, 227a9f3a74aSThomas Gleixner unsigned long ti_work) 228a9f3a74aSThomas Gleixner { 229a9f3a74aSThomas Gleixner } 230a9f3a74aSThomas Gleixner #endif 231a9f3a74aSThomas Gleixner 232a9f3a74aSThomas Gleixner /** 233a9f3a74aSThomas Gleixner * arch_exit_to_user_mode_prepare - Architecture specific preparation for 234a9f3a74aSThomas Gleixner * exit to user mode. 235a9f3a74aSThomas Gleixner * @regs: Pointer to currents pt_regs 236a9f3a74aSThomas Gleixner * @ti_work: Cached TIF flags gathered with interrupts disabled 237a9f3a74aSThomas Gleixner * 238a9f3a74aSThomas Gleixner * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last 239a9f3a74aSThomas Gleixner * function before return. Defaults to NOOP. 240a9f3a74aSThomas Gleixner */ 241a9f3a74aSThomas Gleixner static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, 242a9f3a74aSThomas Gleixner unsigned long ti_work); 243a9f3a74aSThomas Gleixner 244a9f3a74aSThomas Gleixner #ifndef arch_exit_to_user_mode_prepare 245a9f3a74aSThomas Gleixner static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, 246a9f3a74aSThomas Gleixner unsigned long ti_work) 247a9f3a74aSThomas Gleixner { 248a9f3a74aSThomas Gleixner } 249a9f3a74aSThomas Gleixner #endif 250a9f3a74aSThomas Gleixner 251a9f3a74aSThomas Gleixner /** 252a9f3a74aSThomas Gleixner * arch_exit_to_user_mode - Architecture specific final work before 253a9f3a74aSThomas Gleixner * exit to user mode. 254a9f3a74aSThomas Gleixner * 255a9f3a74aSThomas Gleixner * Invoked from exit_to_user_mode() with interrupt disabled as the last 256a9f3a74aSThomas Gleixner * function before return. Defaults to NOOP. 257a9f3a74aSThomas Gleixner * 258a9f3a74aSThomas Gleixner * This needs to be __always_inline because it is non-instrumentable code 259a9f3a74aSThomas Gleixner * invoked after context tracking switched to user mode. 260a9f3a74aSThomas Gleixner * 261a9f3a74aSThomas Gleixner * An architecture implementation must not do anything complex, no locking 262a9f3a74aSThomas Gleixner * etc. The main purpose is for speculation mitigations. 263a9f3a74aSThomas Gleixner */ 264a9f3a74aSThomas Gleixner static __always_inline void arch_exit_to_user_mode(void); 265a9f3a74aSThomas Gleixner 266a9f3a74aSThomas Gleixner #ifndef arch_exit_to_user_mode 267a9f3a74aSThomas Gleixner static __always_inline void arch_exit_to_user_mode(void) { } 268a9f3a74aSThomas Gleixner #endif 269a9f3a74aSThomas Gleixner 270a9f3a74aSThomas Gleixner /** 27112db8b69SJens Axboe * arch_do_signal_or_restart - Architecture specific signal delivery function 272a9f3a74aSThomas Gleixner * @regs: Pointer to currents pt_regs 273a9f3a74aSThomas Gleixner * 274a9f3a74aSThomas Gleixner * Invoked from exit_to_user_mode_loop(). 275a9f3a74aSThomas Gleixner */ 2768ba62d37SEric W. Biederman void arch_do_signal_or_restart(struct pt_regs *regs); 277a9f3a74aSThomas Gleixner 278a9f3a74aSThomas Gleixner /** 279d6801947SSven Schnelle * exit_to_user_mode_loop - do any pending work before leaving to user space 280d6801947SSven Schnelle */ 281d6801947SSven Schnelle unsigned long exit_to_user_mode_loop(struct pt_regs *regs, 282d6801947SSven Schnelle unsigned long ti_work); 283d6801947SSven Schnelle 284d6801947SSven Schnelle /** 285d6801947SSven Schnelle * exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required 286d6801947SSven Schnelle * @regs: Pointer to pt_regs on entry stack 287d6801947SSven Schnelle * 288d6801947SSven Schnelle * 1) check that interrupts are disabled 289d6801947SSven Schnelle * 2) call tick_nohz_user_enter_prepare() 290d6801947SSven Schnelle * 3) call exit_to_user_mode_loop() if any flags from 291d6801947SSven Schnelle * EXIT_TO_USER_MODE_WORK are set 292d6801947SSven Schnelle * 4) check that interrupts are still disabled 293d6801947SSven Schnelle */ 294d6801947SSven Schnelle static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs) 295d6801947SSven Schnelle { 296d6801947SSven Schnelle unsigned long ti_work; 297d6801947SSven Schnelle 298d6801947SSven Schnelle lockdep_assert_irqs_disabled(); 299d6801947SSven Schnelle 300d6801947SSven Schnelle /* Flush pending rcuog wakeup before the last need_resched() check */ 301d6801947SSven Schnelle tick_nohz_user_enter_prepare(); 302d6801947SSven Schnelle 303d6801947SSven Schnelle ti_work = read_thread_flags(); 304d6801947SSven Schnelle if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK)) 305d6801947SSven Schnelle ti_work = exit_to_user_mode_loop(regs, ti_work); 306d6801947SSven Schnelle 307d6801947SSven Schnelle arch_exit_to_user_mode_prepare(regs, ti_work); 308d6801947SSven Schnelle 309d6801947SSven Schnelle /* Ensure that kernel state is sane for a return to userspace */ 310d6801947SSven Schnelle kmap_assert_nomap(); 311d6801947SSven Schnelle lockdep_assert_irqs_disabled(); 312d6801947SSven Schnelle lockdep_sys_exit(); 313d6801947SSven Schnelle } 314d6801947SSven Schnelle 315d6801947SSven Schnelle /** 316310de1a6SSven Schnelle * exit_to_user_mode - Fixup state when exiting to user mode 317310de1a6SSven Schnelle * 318310de1a6SSven Schnelle * Syscall/interrupt exit enables interrupts, but the kernel state is 319310de1a6SSven Schnelle * interrupts disabled when this is invoked. Also tell RCU about it. 320310de1a6SSven Schnelle * 321310de1a6SSven Schnelle * 1) Trace interrupts on state 322310de1a6SSven Schnelle * 2) Invoke context tracking if enabled to adjust RCU state 323310de1a6SSven Schnelle * 3) Invoke architecture specific last minute exit code, e.g. speculation 324310de1a6SSven Schnelle * mitigations, etc.: arch_exit_to_user_mode() 325310de1a6SSven Schnelle * 4) Tell lockdep that interrupts are enabled 326310de1a6SSven Schnelle * 327310de1a6SSven Schnelle * Invoked from architecture specific code when syscall_exit_to_user_mode() 328310de1a6SSven Schnelle * is not suitable as the last step before returning to userspace. Must be 329310de1a6SSven Schnelle * invoked with interrupts disabled and the caller must be 330310de1a6SSven Schnelle * non-instrumentable. 331c6156e1dSSven Schnelle * The caller has to invoke syscall_exit_to_user_mode_work() before this. 332310de1a6SSven Schnelle */ 333d6801947SSven Schnelle static __always_inline void exit_to_user_mode(void) 334d6801947SSven Schnelle { 335d6801947SSven Schnelle instrumentation_begin(); 336d6801947SSven Schnelle trace_hardirqs_on_prepare(); 337d6801947SSven Schnelle lockdep_hardirqs_on_prepare(); 338d6801947SSven Schnelle instrumentation_end(); 339d6801947SSven Schnelle 340d6801947SSven Schnelle user_enter_irqoff(); 341d6801947SSven Schnelle arch_exit_to_user_mode(); 342d6801947SSven Schnelle lockdep_hardirqs_on(CALLER_ADDR0); 343d6801947SSven Schnelle } 344310de1a6SSven Schnelle 345310de1a6SSven Schnelle /** 346c6156e1dSSven Schnelle * syscall_exit_to_user_mode_work - Handle work before returning to user mode 347c6156e1dSSven Schnelle * @regs: Pointer to currents pt_regs 348c6156e1dSSven Schnelle * 349c6156e1dSSven Schnelle * Same as step 1 and 2 of syscall_exit_to_user_mode() but without calling 350c6156e1dSSven Schnelle * exit_to_user_mode() to perform the final transition to user mode. 351c6156e1dSSven Schnelle * 352c6156e1dSSven Schnelle * Calling convention is the same as for syscall_exit_to_user_mode() and it 353c6156e1dSSven Schnelle * returns with all work handled and interrupts disabled. The caller must 354c6156e1dSSven Schnelle * invoke exit_to_user_mode() before actually switching to user mode to 355c6156e1dSSven Schnelle * make the final state transitions. Interrupts must stay disabled between 356c6156e1dSSven Schnelle * return from this function and the invocation of exit_to_user_mode(). 357c6156e1dSSven Schnelle */ 358c6156e1dSSven Schnelle void syscall_exit_to_user_mode_work(struct pt_regs *regs); 359c6156e1dSSven Schnelle 360c6156e1dSSven Schnelle /** 361a9f3a74aSThomas Gleixner * syscall_exit_to_user_mode - Handle work before returning to user mode 362a9f3a74aSThomas Gleixner * @regs: Pointer to currents pt_regs 363a9f3a74aSThomas Gleixner * 364a9f3a74aSThomas Gleixner * Invoked with interrupts enabled and fully valid regs. Returns with all 365a9f3a74aSThomas Gleixner * work handled, interrupts disabled such that the caller can immediately 366a9f3a74aSThomas Gleixner * switch to user mode. Called from architecture specific syscall and ret 367a9f3a74aSThomas Gleixner * from fork code. 368a9f3a74aSThomas Gleixner * 369a9f3a74aSThomas Gleixner * The call order is: 370a9f3a74aSThomas Gleixner * 1) One-time syscall exit work: 371a9f3a74aSThomas Gleixner * - rseq syscall exit 372a9f3a74aSThomas Gleixner * - audit 373a9f3a74aSThomas Gleixner * - syscall tracing 3740cfcb2b9SEric W. Biederman * - ptrace (single stepping) 375a9f3a74aSThomas Gleixner * 376a9f3a74aSThomas Gleixner * 2) Preparatory work 377a9f3a74aSThomas Gleixner * - Exit to user mode loop (common TIF handling). Invokes 378a9f3a74aSThomas Gleixner * arch_exit_to_user_mode_work() for architecture specific TIF work 379a9f3a74aSThomas Gleixner * - Architecture specific one time work arch_exit_to_user_mode_prepare() 380a9f3a74aSThomas Gleixner * - Address limit and lockdep checks 381a9f3a74aSThomas Gleixner * 382310de1a6SSven Schnelle * 3) Final transition (lockdep, tracing, context tracking, RCU), i.e. the 383310de1a6SSven Schnelle * functionality in exit_to_user_mode(). 384c6156e1dSSven Schnelle * 385c6156e1dSSven Schnelle * This is a combination of syscall_exit_to_user_mode_work() (1,2) and 386c6156e1dSSven Schnelle * exit_to_user_mode(). This function is preferred unless there is a 38797258ce9SIngo Molnar * compelling architectural reason to use the separate functions. 388a9f3a74aSThomas Gleixner */ 389a9f3a74aSThomas Gleixner void syscall_exit_to_user_mode(struct pt_regs *regs); 390a9f3a74aSThomas Gleixner 391a9f3a74aSThomas Gleixner /** 392142781e1SThomas Gleixner * irqentry_enter_from_user_mode - Establish state before invoking the irq handler 393142781e1SThomas Gleixner * @regs: Pointer to currents pt_regs 394142781e1SThomas Gleixner * 395142781e1SThomas Gleixner * Invoked from architecture specific entry code with interrupts disabled. 396142781e1SThomas Gleixner * Can only be called when the interrupt entry came from user mode. The 397142781e1SThomas Gleixner * calling code must be non-instrumentable. When the function returns all 398142781e1SThomas Gleixner * state is correct and the subsequent functions can be instrumented. 399142781e1SThomas Gleixner * 400142781e1SThomas Gleixner * The function establishes state (lockdep, RCU (context tracking), tracing) 401142781e1SThomas Gleixner */ 402142781e1SThomas Gleixner void irqentry_enter_from_user_mode(struct pt_regs *regs); 403142781e1SThomas Gleixner 404a9f3a74aSThomas Gleixner /** 405a9f3a74aSThomas Gleixner * irqentry_exit_to_user_mode - Interrupt exit work 406a9f3a74aSThomas Gleixner * @regs: Pointer to current's pt_regs 407a9f3a74aSThomas Gleixner * 40897258ce9SIngo Molnar * Invoked with interrupts disabled and fully valid regs. Returns with all 409a9f3a74aSThomas Gleixner * work handled, interrupts disabled such that the caller can immediately 410a9f3a74aSThomas Gleixner * switch to user mode. Called from architecture specific interrupt 411a9f3a74aSThomas Gleixner * handling code. 412a9f3a74aSThomas Gleixner * 413a9f3a74aSThomas Gleixner * The call order is #2 and #3 as described in syscall_exit_to_user_mode(). 414a9f3a74aSThomas Gleixner * Interrupt exit is not invoking #1 which is the syscall specific one time 415a9f3a74aSThomas Gleixner * work. 416a9f3a74aSThomas Gleixner */ 417a9f3a74aSThomas Gleixner void irqentry_exit_to_user_mode(struct pt_regs *regs); 418a9f3a74aSThomas Gleixner 419a5497babSThomas Gleixner #ifndef irqentry_state 420b6be002bSThomas Gleixner /** 421b6be002bSThomas Gleixner * struct irqentry_state - Opaque object for exception state storage 422b6be002bSThomas Gleixner * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the 4236f0e6c15SFrederic Weisbecker * exit path has to invoke ct_irq_exit(). 424b6be002bSThomas Gleixner * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that 425b6be002bSThomas Gleixner * lockdep state is restored correctly on exit from nmi. 426b6be002bSThomas Gleixner * 427b6be002bSThomas Gleixner * This opaque object is filled in by the irqentry_*_enter() functions and 428b6be002bSThomas Gleixner * must be passed back into the corresponding irqentry_*_exit() functions 429b6be002bSThomas Gleixner * when the exception is complete. 430b6be002bSThomas Gleixner * 431b6be002bSThomas Gleixner * Callers of irqentry_*_[enter|exit]() must consider this structure opaque 432b6be002bSThomas Gleixner * and all members private. Descriptions of the members are provided to aid in 433b6be002bSThomas Gleixner * the maintenance of the irqentry_*() functions. 434b6be002bSThomas Gleixner */ 435a5497babSThomas Gleixner typedef struct irqentry_state { 436b6be002bSThomas Gleixner union { 437a5497babSThomas Gleixner bool exit_rcu; 438b6be002bSThomas Gleixner bool lockdep; 439b6be002bSThomas Gleixner }; 440a5497babSThomas Gleixner } irqentry_state_t; 441a5497babSThomas Gleixner #endif 442a5497babSThomas Gleixner 443a5497babSThomas Gleixner /** 444a5497babSThomas Gleixner * irqentry_enter - Handle state tracking on ordinary interrupt entries 445a5497babSThomas Gleixner * @regs: Pointer to pt_regs of interrupted context 446a5497babSThomas Gleixner * 447a5497babSThomas Gleixner * Invokes: 448a5497babSThomas Gleixner * - lockdep irqflag state tracking as low level ASM entry disabled 449a5497babSThomas Gleixner * interrupts. 450a5497babSThomas Gleixner * 451a5497babSThomas Gleixner * - Context tracking if the exception hit user mode. 452a5497babSThomas Gleixner * 453a5497babSThomas Gleixner * - The hardirq tracer to keep the state consistent as low level ASM 454a5497babSThomas Gleixner * entry disabled interrupts. 455a5497babSThomas Gleixner * 456a5497babSThomas Gleixner * As a precondition, this requires that the entry came from user mode, 457a5497babSThomas Gleixner * idle, or a kernel context in which RCU is watching. 458a5497babSThomas Gleixner * 459a5497babSThomas Gleixner * For kernel mode entries RCU handling is done conditional. If RCU is 460a5497babSThomas Gleixner * watching then the only RCU requirement is to check whether the tick has 4616f0e6c15SFrederic Weisbecker * to be restarted. If RCU is not watching then ct_irq_enter() has to be 4626f0e6c15SFrederic Weisbecker * invoked on entry and ct_irq_exit() on exit. 463a5497babSThomas Gleixner * 4646f0e6c15SFrederic Weisbecker * Avoiding the ct_irq_enter/exit() calls is an optimization but also 465a5497babSThomas Gleixner * solves the problem of kernel mode pagefaults which can schedule, which 4666f0e6c15SFrederic Weisbecker * is not possible after invoking ct_irq_enter() without undoing it. 467a5497babSThomas Gleixner * 468a5497babSThomas Gleixner * For user mode entries irqentry_enter_from_user_mode() is invoked to 469a5497babSThomas Gleixner * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit 470a5497babSThomas Gleixner * would not be possible. 471a5497babSThomas Gleixner * 472a5497babSThomas Gleixner * Returns: An opaque object that must be passed to idtentry_exit() 473a5497babSThomas Gleixner */ 474a5497babSThomas Gleixner irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs); 475a5497babSThomas Gleixner 476a5497babSThomas Gleixner /** 477a5497babSThomas Gleixner * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt 478a5497babSThomas Gleixner * 479a5497babSThomas Gleixner * Conditional reschedule with additional sanity checks. 480a5497babSThomas Gleixner */ 4814624a14fSMark Rutland void raw_irqentry_exit_cond_resched(void); 48240607ee9SPeter Zijlstra (Intel) #ifdef CONFIG_PREEMPT_DYNAMIC 48399cf983cSMark Rutland #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 4844624a14fSMark Rutland #define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched 4858a69fe0bSMark Rutland #define irqentry_exit_cond_resched_dynamic_disabled NULL 4864624a14fSMark Rutland DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched); 4874624a14fSMark Rutland #define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)() 48899cf983cSMark Rutland #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 48999cf983cSMark Rutland DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); 49099cf983cSMark Rutland void dynamic_irqentry_exit_cond_resched(void); 49199cf983cSMark Rutland #define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched() 49240607ee9SPeter Zijlstra (Intel) #endif 49399cf983cSMark Rutland #else /* CONFIG_PREEMPT_DYNAMIC */ 49499cf983cSMark Rutland #define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched() 49599cf983cSMark Rutland #endif /* CONFIG_PREEMPT_DYNAMIC */ 496a5497babSThomas Gleixner 497a5497babSThomas Gleixner /** 498a5497babSThomas Gleixner * irqentry_exit - Handle return from exception that used irqentry_enter() 499a5497babSThomas Gleixner * @regs: Pointer to pt_regs (exception entry regs) 500a5497babSThomas Gleixner * @state: Return value from matching call to irqentry_enter() 501a5497babSThomas Gleixner * 502a5497babSThomas Gleixner * Depending on the return target (kernel/user) this runs the necessary 50378a56e04SIra Weiny * preemption and work checks if possible and required and returns to 504a5497babSThomas Gleixner * the caller with interrupts disabled and no further work pending. 505a5497babSThomas Gleixner * 506a5497babSThomas Gleixner * This is the last action before returning to the low level ASM code which 507a5497babSThomas Gleixner * just needs to return to the appropriate context. 508a5497babSThomas Gleixner * 509a5497babSThomas Gleixner * Counterpart to irqentry_enter(). 510a5497babSThomas Gleixner */ 511a5497babSThomas Gleixner void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state); 512a5497babSThomas Gleixner 513b6be002bSThomas Gleixner /** 514b6be002bSThomas Gleixner * irqentry_nmi_enter - Handle NMI entry 515b6be002bSThomas Gleixner * @regs: Pointer to currents pt_regs 516b6be002bSThomas Gleixner * 517b6be002bSThomas Gleixner * Similar to irqentry_enter() but taking care of the NMI constraints. 518b6be002bSThomas Gleixner */ 519b6be002bSThomas Gleixner irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs); 520b6be002bSThomas Gleixner 521b6be002bSThomas Gleixner /** 522b6be002bSThomas Gleixner * irqentry_nmi_exit - Handle return from NMI handling 523b6be002bSThomas Gleixner * @regs: Pointer to pt_regs (NMI entry regs) 524b6be002bSThomas Gleixner * @irq_state: Return value from matching call to irqentry_nmi_enter() 525b6be002bSThomas Gleixner * 52678a56e04SIra Weiny * Last action before returning to the low level assembly code. 527b6be002bSThomas Gleixner * 528b6be002bSThomas Gleixner * Counterpart to irqentry_nmi_enter(). 529b6be002bSThomas Gleixner */ 530b6be002bSThomas Gleixner void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state); 531b6be002bSThomas Gleixner 532142781e1SThomas Gleixner #endif 533