1142781e1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0 */ 2142781e1SThomas Gleixner #ifndef __LINUX_ENTRYCOMMON_H 3142781e1SThomas Gleixner #define __LINUX_ENTRYCOMMON_H 4142781e1SThomas Gleixner 540607ee9SPeter Zijlstra (Intel) #include <linux/static_call_types.h> 6153474baSEric W. Biederman #include <linux/ptrace.h> 7142781e1SThomas Gleixner #include <linux/syscalls.h> 8142781e1SThomas Gleixner #include <linux/seccomp.h> 9142781e1SThomas Gleixner #include <linux/sched.h> 10142781e1SThomas Gleixner 11142781e1SThomas Gleixner #include <asm/entry-common.h> 12142781e1SThomas Gleixner 13142781e1SThomas Gleixner /* 14142781e1SThomas Gleixner * Define dummy _TIF work flags if not defined by the architecture or for 15142781e1SThomas Gleixner * disabled functionality. 16142781e1SThomas Gleixner */ 17a9f3a74aSThomas Gleixner #ifndef _TIF_PATCH_PENDING 18a9f3a74aSThomas Gleixner # define _TIF_PATCH_PENDING (0) 19a9f3a74aSThomas Gleixner #endif 20a9f3a74aSThomas Gleixner 21a9f3a74aSThomas Gleixner #ifndef _TIF_UPROBE 22a9f3a74aSThomas Gleixner # define _TIF_UPROBE (0) 23a9f3a74aSThomas Gleixner #endif 24a9f3a74aSThomas Gleixner 25142781e1SThomas Gleixner /* 2629915524SGabriel Krisman Bertazi * SYSCALL_WORK flags handled in syscall_enter_from_user_mode() 27142781e1SThomas Gleixner */ 2829915524SGabriel Krisman Bertazi #ifndef ARCH_SYSCALL_WORK_ENTER 2929915524SGabriel Krisman Bertazi # define ARCH_SYSCALL_WORK_ENTER (0) 30142781e1SThomas Gleixner #endif 31142781e1SThomas Gleixner 32a9f3a74aSThomas Gleixner /* 3329915524SGabriel Krisman Bertazi * SYSCALL_WORK flags handled in syscall_exit_to_user_mode() 34a9f3a74aSThomas Gleixner */ 3529915524SGabriel Krisman Bertazi #ifndef ARCH_SYSCALL_WORK_EXIT 3629915524SGabriel Krisman Bertazi # define ARCH_SYSCALL_WORK_EXIT (0) 37a9f3a74aSThomas Gleixner #endif 38a9f3a74aSThomas Gleixner 39524666cbSGabriel Krisman Bertazi #define SYSCALL_WORK_ENTER (SYSCALL_WORK_SECCOMP | \ 4064c19ba2SGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_TRACEPOINT | \ 4164eb35f7SGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_TRACE | \ 42785dc4ebSGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_EMU | \ 4329915524SGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_AUDIT | \ 4411894468SGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_USER_DISPATCH | \ 4529915524SGabriel Krisman Bertazi ARCH_SYSCALL_WORK_ENTER) 4664c19ba2SGabriel Krisman Bertazi #define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \ 47785dc4ebSGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_TRACE | \ 4829915524SGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_AUDIT | \ 4911894468SGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_USER_DISPATCH | \ 506342adcaSGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_EXIT_TRAP | \ 5129915524SGabriel Krisman Bertazi ARCH_SYSCALL_WORK_EXIT) 52b86678cfSGabriel Krisman Bertazi 53a9f3a74aSThomas Gleixner /* 54a9f3a74aSThomas Gleixner * TIF flags handled in exit_to_user_mode_loop() 55a9f3a74aSThomas Gleixner */ 56a9f3a74aSThomas Gleixner #ifndef ARCH_EXIT_TO_USER_MODE_WORK 57a9f3a74aSThomas Gleixner # define ARCH_EXIT_TO_USER_MODE_WORK (0) 58a9f3a74aSThomas Gleixner #endif 59a9f3a74aSThomas Gleixner 60a9f3a74aSThomas Gleixner #define EXIT_TO_USER_MODE_WORK \ 61a9f3a74aSThomas Gleixner (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ 6212db8b69SJens Axboe _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ 63a9f3a74aSThomas Gleixner ARCH_EXIT_TO_USER_MODE_WORK) 64a9f3a74aSThomas Gleixner 65142781e1SThomas Gleixner /** 66142781e1SThomas Gleixner * arch_check_user_regs - Architecture specific sanity check for user mode regs 67142781e1SThomas Gleixner * @regs: Pointer to currents pt_regs 68142781e1SThomas Gleixner * 69142781e1SThomas Gleixner * Defaults to an empty implementation. Can be replaced by architecture 70142781e1SThomas Gleixner * specific code. 71142781e1SThomas Gleixner * 72142781e1SThomas Gleixner * Invoked from syscall_enter_from_user_mode() in the non-instrumentable 73142781e1SThomas Gleixner * section. Use __always_inline so the compiler cannot push it out of line 74142781e1SThomas Gleixner * and make it instrumentable. 75142781e1SThomas Gleixner */ 76142781e1SThomas Gleixner static __always_inline void arch_check_user_regs(struct pt_regs *regs); 77142781e1SThomas Gleixner 78142781e1SThomas Gleixner #ifndef arch_check_user_regs 79142781e1SThomas Gleixner static __always_inline void arch_check_user_regs(struct pt_regs *regs) {} 80142781e1SThomas Gleixner #endif 81142781e1SThomas Gleixner 82142781e1SThomas Gleixner /** 8396e2fbccSSven Schnelle * enter_from_user_mode - Establish state when coming from user mode 8496e2fbccSSven Schnelle * 8596e2fbccSSven Schnelle * Syscall/interrupt entry disables interrupts, but user mode is traced as 8696e2fbccSSven Schnelle * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. 8796e2fbccSSven Schnelle * 8896e2fbccSSven Schnelle * 1) Tell lockdep that interrupts are disabled 8996e2fbccSSven Schnelle * 2) Invoke context tracking if enabled to reactivate RCU 9096e2fbccSSven Schnelle * 3) Trace interrupts off state 9196e2fbccSSven Schnelle * 9296e2fbccSSven Schnelle * Invoked from architecture specific syscall entry code with interrupts 9396e2fbccSSven Schnelle * disabled. The calling code has to be non-instrumentable. When the 9496e2fbccSSven Schnelle * function returns all state is correct and interrupts are still 9596e2fbccSSven Schnelle * disabled. The subsequent functions can be instrumented. 9696e2fbccSSven Schnelle * 9796e2fbccSSven Schnelle * This is invoked when there is architecture specific functionality to be 9896e2fbccSSven Schnelle * done between establishing state and enabling interrupts. The caller must 9996e2fbccSSven Schnelle * enable interrupts before invoking syscall_enter_from_user_mode_work(). 10096e2fbccSSven Schnelle */ 10196e2fbccSSven Schnelle void enter_from_user_mode(struct pt_regs *regs); 10296e2fbccSSven Schnelle 10396e2fbccSSven Schnelle /** 1044facb95bSThomas Gleixner * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts 1054facb95bSThomas Gleixner * @regs: Pointer to currents pt_regs 1064facb95bSThomas Gleixner * 1074facb95bSThomas Gleixner * Invoked from architecture specific syscall entry code with interrupts 1084facb95bSThomas Gleixner * disabled. The calling code has to be non-instrumentable. When the 1094facb95bSThomas Gleixner * function returns all state is correct, interrupts are enabled and the 1104facb95bSThomas Gleixner * subsequent functions can be instrumented. 1114facb95bSThomas Gleixner * 11296e2fbccSSven Schnelle * This handles lockdep, RCU (context tracking) and tracing state, i.e. 11396e2fbccSSven Schnelle * the functionality provided by enter_from_user_mode(). 1144facb95bSThomas Gleixner * 1154facb95bSThomas Gleixner * This is invoked when there is extra architecture specific functionality 1164facb95bSThomas Gleixner * to be done between establishing state and handling user mode entry work. 1174facb95bSThomas Gleixner */ 1184facb95bSThomas Gleixner void syscall_enter_from_user_mode_prepare(struct pt_regs *regs); 1194facb95bSThomas Gleixner 1204facb95bSThomas Gleixner /** 1214facb95bSThomas Gleixner * syscall_enter_from_user_mode_work - Check and handle work before invoking 122142781e1SThomas Gleixner * a syscall 123142781e1SThomas Gleixner * @regs: Pointer to currents pt_regs 124142781e1SThomas Gleixner * @syscall: The syscall number 125142781e1SThomas Gleixner * 126142781e1SThomas Gleixner * Invoked from architecture specific syscall entry code with interrupts 1274facb95bSThomas Gleixner * enabled after invoking syscall_enter_from_user_mode_prepare() and extra 1284facb95bSThomas Gleixner * architecture specific work. 129142781e1SThomas Gleixner * 130142781e1SThomas Gleixner * Returns: The original or a modified syscall number 131142781e1SThomas Gleixner * 132142781e1SThomas Gleixner * If the returned syscall number is -1 then the syscall should be 133142781e1SThomas Gleixner * skipped. In this case the caller may invoke syscall_set_error() or 134142781e1SThomas Gleixner * syscall_set_return_value() first. If neither of those are called and -1 135142781e1SThomas Gleixner * is returned, then the syscall will fail with ENOSYS. 136142781e1SThomas Gleixner * 1374facb95bSThomas Gleixner * It handles the following work items: 138142781e1SThomas Gleixner * 13929915524SGabriel Krisman Bertazi * 1) syscall_work flag dependent invocations of 1400cfcb2b9SEric W. Biederman * ptrace_report_syscall_entry(), __secure_computing(), trace_sys_enter() 1414facb95bSThomas Gleixner * 2) Invocation of audit_syscall_entry() 1424facb95bSThomas Gleixner */ 1434facb95bSThomas Gleixner long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall); 1444facb95bSThomas Gleixner 1454facb95bSThomas Gleixner /** 1464facb95bSThomas Gleixner * syscall_enter_from_user_mode - Establish state and check and handle work 1474facb95bSThomas Gleixner * before invoking a syscall 1484facb95bSThomas Gleixner * @regs: Pointer to currents pt_regs 1494facb95bSThomas Gleixner * @syscall: The syscall number 1504facb95bSThomas Gleixner * 1514facb95bSThomas Gleixner * Invoked from architecture specific syscall entry code with interrupts 1524facb95bSThomas Gleixner * disabled. The calling code has to be non-instrumentable. When the 1534facb95bSThomas Gleixner * function returns all state is correct, interrupts are enabled and the 1544facb95bSThomas Gleixner * subsequent functions can be instrumented. 1554facb95bSThomas Gleixner * 1564facb95bSThomas Gleixner * This is combination of syscall_enter_from_user_mode_prepare() and 1574facb95bSThomas Gleixner * syscall_enter_from_user_mode_work(). 1584facb95bSThomas Gleixner * 1594facb95bSThomas Gleixner * Returns: The original or a modified syscall number. See 1604facb95bSThomas Gleixner * syscall_enter_from_user_mode_work() for further explanation. 161142781e1SThomas Gleixner */ 162142781e1SThomas Gleixner long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall); 163142781e1SThomas Gleixner 164142781e1SThomas Gleixner /** 165a9f3a74aSThomas Gleixner * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable() 166a9f3a74aSThomas Gleixner * @ti_work: Cached TIF flags gathered with interrupts disabled 167a9f3a74aSThomas Gleixner * 168a9f3a74aSThomas Gleixner * Defaults to local_irq_enable(). Can be supplied by architecture specific 169a9f3a74aSThomas Gleixner * code. 170a9f3a74aSThomas Gleixner */ 171a9f3a74aSThomas Gleixner static inline void local_irq_enable_exit_to_user(unsigned long ti_work); 172a9f3a74aSThomas Gleixner 173a9f3a74aSThomas Gleixner #ifndef local_irq_enable_exit_to_user 174a9f3a74aSThomas Gleixner static inline void local_irq_enable_exit_to_user(unsigned long ti_work) 175a9f3a74aSThomas Gleixner { 176a9f3a74aSThomas Gleixner local_irq_enable(); 177a9f3a74aSThomas Gleixner } 178a9f3a74aSThomas Gleixner #endif 179a9f3a74aSThomas Gleixner 180a9f3a74aSThomas Gleixner /** 181a9f3a74aSThomas Gleixner * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable() 182a9f3a74aSThomas Gleixner * 183a9f3a74aSThomas Gleixner * Defaults to local_irq_disable(). Can be supplied by architecture specific 184a9f3a74aSThomas Gleixner * code. 185a9f3a74aSThomas Gleixner */ 186a9f3a74aSThomas Gleixner static inline void local_irq_disable_exit_to_user(void); 187a9f3a74aSThomas Gleixner 188a9f3a74aSThomas Gleixner #ifndef local_irq_disable_exit_to_user 189a9f3a74aSThomas Gleixner static inline void local_irq_disable_exit_to_user(void) 190a9f3a74aSThomas Gleixner { 191a9f3a74aSThomas Gleixner local_irq_disable(); 192a9f3a74aSThomas Gleixner } 193a9f3a74aSThomas Gleixner #endif 194a9f3a74aSThomas Gleixner 195a9f3a74aSThomas Gleixner /** 196a9f3a74aSThomas Gleixner * arch_exit_to_user_mode_work - Architecture specific TIF work for exit 197a9f3a74aSThomas Gleixner * to user mode. 198a9f3a74aSThomas Gleixner * @regs: Pointer to currents pt_regs 199a9f3a74aSThomas Gleixner * @ti_work: Cached TIF flags gathered with interrupts disabled 200a9f3a74aSThomas Gleixner * 201a9f3a74aSThomas Gleixner * Invoked from exit_to_user_mode_loop() with interrupt enabled 202a9f3a74aSThomas Gleixner * 203a9f3a74aSThomas Gleixner * Defaults to NOOP. Can be supplied by architecture specific code. 204a9f3a74aSThomas Gleixner */ 205a9f3a74aSThomas Gleixner static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, 206a9f3a74aSThomas Gleixner unsigned long ti_work); 207a9f3a74aSThomas Gleixner 208a9f3a74aSThomas Gleixner #ifndef arch_exit_to_user_mode_work 209a9f3a74aSThomas Gleixner static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, 210a9f3a74aSThomas Gleixner unsigned long ti_work) 211a9f3a74aSThomas Gleixner { 212a9f3a74aSThomas Gleixner } 213a9f3a74aSThomas Gleixner #endif 214a9f3a74aSThomas Gleixner 215a9f3a74aSThomas Gleixner /** 216a9f3a74aSThomas Gleixner * arch_exit_to_user_mode_prepare - Architecture specific preparation for 217a9f3a74aSThomas Gleixner * exit to user mode. 218a9f3a74aSThomas Gleixner * @regs: Pointer to currents pt_regs 219a9f3a74aSThomas Gleixner * @ti_work: Cached TIF flags gathered with interrupts disabled 220a9f3a74aSThomas Gleixner * 221a9f3a74aSThomas Gleixner * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last 222a9f3a74aSThomas Gleixner * function before return. Defaults to NOOP. 223a9f3a74aSThomas Gleixner */ 224a9f3a74aSThomas Gleixner static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, 225a9f3a74aSThomas Gleixner unsigned long ti_work); 226a9f3a74aSThomas Gleixner 227a9f3a74aSThomas Gleixner #ifndef arch_exit_to_user_mode_prepare 228a9f3a74aSThomas Gleixner static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, 229a9f3a74aSThomas Gleixner unsigned long ti_work) 230a9f3a74aSThomas Gleixner { 231a9f3a74aSThomas Gleixner } 232a9f3a74aSThomas Gleixner #endif 233a9f3a74aSThomas Gleixner 234a9f3a74aSThomas Gleixner /** 235a9f3a74aSThomas Gleixner * arch_exit_to_user_mode - Architecture specific final work before 236a9f3a74aSThomas Gleixner * exit to user mode. 237a9f3a74aSThomas Gleixner * 238a9f3a74aSThomas Gleixner * Invoked from exit_to_user_mode() with interrupt disabled as the last 239a9f3a74aSThomas Gleixner * function before return. Defaults to NOOP. 240a9f3a74aSThomas Gleixner * 241a9f3a74aSThomas Gleixner * This needs to be __always_inline because it is non-instrumentable code 242a9f3a74aSThomas Gleixner * invoked after context tracking switched to user mode. 243a9f3a74aSThomas Gleixner * 244a9f3a74aSThomas Gleixner * An architecture implementation must not do anything complex, no locking 245a9f3a74aSThomas Gleixner * etc. The main purpose is for speculation mitigations. 246a9f3a74aSThomas Gleixner */ 247a9f3a74aSThomas Gleixner static __always_inline void arch_exit_to_user_mode(void); 248a9f3a74aSThomas Gleixner 249a9f3a74aSThomas Gleixner #ifndef arch_exit_to_user_mode 250a9f3a74aSThomas Gleixner static __always_inline void arch_exit_to_user_mode(void) { } 251a9f3a74aSThomas Gleixner #endif 252a9f3a74aSThomas Gleixner 253a9f3a74aSThomas Gleixner /** 25412db8b69SJens Axboe * arch_do_signal_or_restart - Architecture specific signal delivery function 255a9f3a74aSThomas Gleixner * @regs: Pointer to currents pt_regs 25612db8b69SJens Axboe * @has_signal: actual signal to handle 257a9f3a74aSThomas Gleixner * 258a9f3a74aSThomas Gleixner * Invoked from exit_to_user_mode_loop(). 259a9f3a74aSThomas Gleixner */ 260*8ba62d37SEric W. Biederman void arch_do_signal_or_restart(struct pt_regs *regs); 261a9f3a74aSThomas Gleixner 262a9f3a74aSThomas Gleixner /** 263310de1a6SSven Schnelle * exit_to_user_mode - Fixup state when exiting to user mode 264310de1a6SSven Schnelle * 265310de1a6SSven Schnelle * Syscall/interrupt exit enables interrupts, but the kernel state is 266310de1a6SSven Schnelle * interrupts disabled when this is invoked. Also tell RCU about it. 267310de1a6SSven Schnelle * 268310de1a6SSven Schnelle * 1) Trace interrupts on state 269310de1a6SSven Schnelle * 2) Invoke context tracking if enabled to adjust RCU state 270310de1a6SSven Schnelle * 3) Invoke architecture specific last minute exit code, e.g. speculation 271310de1a6SSven Schnelle * mitigations, etc.: arch_exit_to_user_mode() 272310de1a6SSven Schnelle * 4) Tell lockdep that interrupts are enabled 273310de1a6SSven Schnelle * 274310de1a6SSven Schnelle * Invoked from architecture specific code when syscall_exit_to_user_mode() 275310de1a6SSven Schnelle * is not suitable as the last step before returning to userspace. Must be 276310de1a6SSven Schnelle * invoked with interrupts disabled and the caller must be 277310de1a6SSven Schnelle * non-instrumentable. 278c6156e1dSSven Schnelle * The caller has to invoke syscall_exit_to_user_mode_work() before this. 279310de1a6SSven Schnelle */ 280310de1a6SSven Schnelle void exit_to_user_mode(void); 281310de1a6SSven Schnelle 282310de1a6SSven Schnelle /** 283c6156e1dSSven Schnelle * syscall_exit_to_user_mode_work - Handle work before returning to user mode 284c6156e1dSSven Schnelle * @regs: Pointer to currents pt_regs 285c6156e1dSSven Schnelle * 286c6156e1dSSven Schnelle * Same as step 1 and 2 of syscall_exit_to_user_mode() but without calling 287c6156e1dSSven Schnelle * exit_to_user_mode() to perform the final transition to user mode. 288c6156e1dSSven Schnelle * 289c6156e1dSSven Schnelle * Calling convention is the same as for syscall_exit_to_user_mode() and it 290c6156e1dSSven Schnelle * returns with all work handled and interrupts disabled. The caller must 291c6156e1dSSven Schnelle * invoke exit_to_user_mode() before actually switching to user mode to 292c6156e1dSSven Schnelle * make the final state transitions. Interrupts must stay disabled between 293c6156e1dSSven Schnelle * return from this function and the invocation of exit_to_user_mode(). 294c6156e1dSSven Schnelle */ 295c6156e1dSSven Schnelle void syscall_exit_to_user_mode_work(struct pt_regs *regs); 296c6156e1dSSven Schnelle 297c6156e1dSSven Schnelle /** 298a9f3a74aSThomas Gleixner * syscall_exit_to_user_mode - Handle work before returning to user mode 299a9f3a74aSThomas Gleixner * @regs: Pointer to currents pt_regs 300a9f3a74aSThomas Gleixner * 301a9f3a74aSThomas Gleixner * Invoked with interrupts enabled and fully valid regs. Returns with all 302a9f3a74aSThomas Gleixner * work handled, interrupts disabled such that the caller can immediately 303a9f3a74aSThomas Gleixner * switch to user mode. Called from architecture specific syscall and ret 304a9f3a74aSThomas Gleixner * from fork code. 305a9f3a74aSThomas Gleixner * 306a9f3a74aSThomas Gleixner * The call order is: 307a9f3a74aSThomas Gleixner * 1) One-time syscall exit work: 308a9f3a74aSThomas Gleixner * - rseq syscall exit 309a9f3a74aSThomas Gleixner * - audit 310a9f3a74aSThomas Gleixner * - syscall tracing 3110cfcb2b9SEric W. Biederman * - ptrace (single stepping) 312a9f3a74aSThomas Gleixner * 313a9f3a74aSThomas Gleixner * 2) Preparatory work 314a9f3a74aSThomas Gleixner * - Exit to user mode loop (common TIF handling). Invokes 315a9f3a74aSThomas Gleixner * arch_exit_to_user_mode_work() for architecture specific TIF work 316a9f3a74aSThomas Gleixner * - Architecture specific one time work arch_exit_to_user_mode_prepare() 317a9f3a74aSThomas Gleixner * - Address limit and lockdep checks 318a9f3a74aSThomas Gleixner * 319310de1a6SSven Schnelle * 3) Final transition (lockdep, tracing, context tracking, RCU), i.e. the 320310de1a6SSven Schnelle * functionality in exit_to_user_mode(). 321c6156e1dSSven Schnelle * 322c6156e1dSSven Schnelle * This is a combination of syscall_exit_to_user_mode_work() (1,2) and 323c6156e1dSSven Schnelle * exit_to_user_mode(). This function is preferred unless there is a 32497258ce9SIngo Molnar * compelling architectural reason to use the separate functions. 325a9f3a74aSThomas Gleixner */ 326a9f3a74aSThomas Gleixner void syscall_exit_to_user_mode(struct pt_regs *regs); 327a9f3a74aSThomas Gleixner 328a9f3a74aSThomas Gleixner /** 329142781e1SThomas Gleixner * irqentry_enter_from_user_mode - Establish state before invoking the irq handler 330142781e1SThomas Gleixner * @regs: Pointer to currents pt_regs 331142781e1SThomas Gleixner * 332142781e1SThomas Gleixner * Invoked from architecture specific entry code with interrupts disabled. 333142781e1SThomas Gleixner * Can only be called when the interrupt entry came from user mode. The 334142781e1SThomas Gleixner * calling code must be non-instrumentable. When the function returns all 335142781e1SThomas Gleixner * state is correct and the subsequent functions can be instrumented. 336142781e1SThomas Gleixner * 337142781e1SThomas Gleixner * The function establishes state (lockdep, RCU (context tracking), tracing) 338142781e1SThomas Gleixner */ 339142781e1SThomas Gleixner void irqentry_enter_from_user_mode(struct pt_regs *regs); 340142781e1SThomas Gleixner 341a9f3a74aSThomas Gleixner /** 342a9f3a74aSThomas Gleixner * irqentry_exit_to_user_mode - Interrupt exit work 343a9f3a74aSThomas Gleixner * @regs: Pointer to current's pt_regs 344a9f3a74aSThomas Gleixner * 34597258ce9SIngo Molnar * Invoked with interrupts disabled and fully valid regs. Returns with all 346a9f3a74aSThomas Gleixner * work handled, interrupts disabled such that the caller can immediately 347a9f3a74aSThomas Gleixner * switch to user mode. Called from architecture specific interrupt 348a9f3a74aSThomas Gleixner * handling code. 349a9f3a74aSThomas Gleixner * 350a9f3a74aSThomas Gleixner * The call order is #2 and #3 as described in syscall_exit_to_user_mode(). 351a9f3a74aSThomas Gleixner * Interrupt exit is not invoking #1 which is the syscall specific one time 352a9f3a74aSThomas Gleixner * work. 353a9f3a74aSThomas Gleixner */ 354a9f3a74aSThomas Gleixner void irqentry_exit_to_user_mode(struct pt_regs *regs); 355a9f3a74aSThomas Gleixner 356a5497babSThomas Gleixner #ifndef irqentry_state 357b6be002bSThomas Gleixner /** 358b6be002bSThomas Gleixner * struct irqentry_state - Opaque object for exception state storage 359b6be002bSThomas Gleixner * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the 360b6be002bSThomas Gleixner * exit path has to invoke rcu_irq_exit(). 361b6be002bSThomas Gleixner * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that 362b6be002bSThomas Gleixner * lockdep state is restored correctly on exit from nmi. 363b6be002bSThomas Gleixner * 364b6be002bSThomas Gleixner * This opaque object is filled in by the irqentry_*_enter() functions and 365b6be002bSThomas Gleixner * must be passed back into the corresponding irqentry_*_exit() functions 366b6be002bSThomas Gleixner * when the exception is complete. 367b6be002bSThomas Gleixner * 368b6be002bSThomas Gleixner * Callers of irqentry_*_[enter|exit]() must consider this structure opaque 369b6be002bSThomas Gleixner * and all members private. Descriptions of the members are provided to aid in 370b6be002bSThomas Gleixner * the maintenance of the irqentry_*() functions. 371b6be002bSThomas Gleixner */ 372a5497babSThomas Gleixner typedef struct irqentry_state { 373b6be002bSThomas Gleixner union { 374a5497babSThomas Gleixner bool exit_rcu; 375b6be002bSThomas Gleixner bool lockdep; 376b6be002bSThomas Gleixner }; 377a5497babSThomas Gleixner } irqentry_state_t; 378a5497babSThomas Gleixner #endif 379a5497babSThomas Gleixner 380a5497babSThomas Gleixner /** 381a5497babSThomas Gleixner * irqentry_enter - Handle state tracking on ordinary interrupt entries 382a5497babSThomas Gleixner * @regs: Pointer to pt_regs of interrupted context 383a5497babSThomas Gleixner * 384a5497babSThomas Gleixner * Invokes: 385a5497babSThomas Gleixner * - lockdep irqflag state tracking as low level ASM entry disabled 386a5497babSThomas Gleixner * interrupts. 387a5497babSThomas Gleixner * 388a5497babSThomas Gleixner * - Context tracking if the exception hit user mode. 389a5497babSThomas Gleixner * 390a5497babSThomas Gleixner * - The hardirq tracer to keep the state consistent as low level ASM 391a5497babSThomas Gleixner * entry disabled interrupts. 392a5497babSThomas Gleixner * 393a5497babSThomas Gleixner * As a precondition, this requires that the entry came from user mode, 394a5497babSThomas Gleixner * idle, or a kernel context in which RCU is watching. 395a5497babSThomas Gleixner * 396a5497babSThomas Gleixner * For kernel mode entries RCU handling is done conditional. If RCU is 397a5497babSThomas Gleixner * watching then the only RCU requirement is to check whether the tick has 398a5497babSThomas Gleixner * to be restarted. If RCU is not watching then rcu_irq_enter() has to be 399a5497babSThomas Gleixner * invoked on entry and rcu_irq_exit() on exit. 400a5497babSThomas Gleixner * 401a5497babSThomas Gleixner * Avoiding the rcu_irq_enter/exit() calls is an optimization but also 402a5497babSThomas Gleixner * solves the problem of kernel mode pagefaults which can schedule, which 403a5497babSThomas Gleixner * is not possible after invoking rcu_irq_enter() without undoing it. 404a5497babSThomas Gleixner * 405a5497babSThomas Gleixner * For user mode entries irqentry_enter_from_user_mode() is invoked to 406a5497babSThomas Gleixner * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit 407a5497babSThomas Gleixner * would not be possible. 408a5497babSThomas Gleixner * 409a5497babSThomas Gleixner * Returns: An opaque object that must be passed to idtentry_exit() 410a5497babSThomas Gleixner */ 411a5497babSThomas Gleixner irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs); 412a5497babSThomas Gleixner 413a5497babSThomas Gleixner /** 414a5497babSThomas Gleixner * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt 415a5497babSThomas Gleixner * 416a5497babSThomas Gleixner * Conditional reschedule with additional sanity checks. 417a5497babSThomas Gleixner */ 418a5497babSThomas Gleixner void irqentry_exit_cond_resched(void); 41940607ee9SPeter Zijlstra (Intel) #ifdef CONFIG_PREEMPT_DYNAMIC 42040607ee9SPeter Zijlstra (Intel) DECLARE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched); 42140607ee9SPeter Zijlstra (Intel) #endif 422a5497babSThomas Gleixner 423a5497babSThomas Gleixner /** 424a5497babSThomas Gleixner * irqentry_exit - Handle return from exception that used irqentry_enter() 425a5497babSThomas Gleixner * @regs: Pointer to pt_regs (exception entry regs) 426a5497babSThomas Gleixner * @state: Return value from matching call to irqentry_enter() 427a5497babSThomas Gleixner * 428a5497babSThomas Gleixner * Depending on the return target (kernel/user) this runs the necessary 42978a56e04SIra Weiny * preemption and work checks if possible and required and returns to 430a5497babSThomas Gleixner * the caller with interrupts disabled and no further work pending. 431a5497babSThomas Gleixner * 432a5497babSThomas Gleixner * This is the last action before returning to the low level ASM code which 433a5497babSThomas Gleixner * just needs to return to the appropriate context. 434a5497babSThomas Gleixner * 435a5497babSThomas Gleixner * Counterpart to irqentry_enter(). 436a5497babSThomas Gleixner */ 437a5497babSThomas Gleixner void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state); 438a5497babSThomas Gleixner 439b6be002bSThomas Gleixner /** 440b6be002bSThomas Gleixner * irqentry_nmi_enter - Handle NMI entry 441b6be002bSThomas Gleixner * @regs: Pointer to currents pt_regs 442b6be002bSThomas Gleixner * 443b6be002bSThomas Gleixner * Similar to irqentry_enter() but taking care of the NMI constraints. 444b6be002bSThomas Gleixner */ 445b6be002bSThomas Gleixner irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs); 446b6be002bSThomas Gleixner 447b6be002bSThomas Gleixner /** 448b6be002bSThomas Gleixner * irqentry_nmi_exit - Handle return from NMI handling 449b6be002bSThomas Gleixner * @regs: Pointer to pt_regs (NMI entry regs) 450b6be002bSThomas Gleixner * @irq_state: Return value from matching call to irqentry_nmi_enter() 451b6be002bSThomas Gleixner * 45278a56e04SIra Weiny * Last action before returning to the low level assembly code. 453b6be002bSThomas Gleixner * 454b6be002bSThomas Gleixner * Counterpart to irqentry_nmi_enter(). 455b6be002bSThomas Gleixner */ 456b6be002bSThomas Gleixner void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state); 457b6be002bSThomas Gleixner 458142781e1SThomas Gleixner #endif 459