1142781e1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0 */ 2142781e1SThomas Gleixner #ifndef __LINUX_ENTRYCOMMON_H 3142781e1SThomas Gleixner #define __LINUX_ENTRYCOMMON_H 4142781e1SThomas Gleixner 540607ee9SPeter Zijlstra (Intel) #include <linux/static_call_types.h> 6142781e1SThomas Gleixner #include <linux/tracehook.h> 7142781e1SThomas Gleixner #include <linux/syscalls.h> 8142781e1SThomas Gleixner #include <linux/seccomp.h> 9142781e1SThomas Gleixner #include <linux/sched.h> 10142781e1SThomas Gleixner 11142781e1SThomas Gleixner #include <asm/entry-common.h> 12142781e1SThomas Gleixner 13142781e1SThomas Gleixner /* 14142781e1SThomas Gleixner * Define dummy _TIF work flags if not defined by the architecture or for 15142781e1SThomas Gleixner * disabled functionality. 16142781e1SThomas Gleixner */ 17a9f3a74aSThomas Gleixner #ifndef _TIF_PATCH_PENDING 18a9f3a74aSThomas Gleixner # define _TIF_PATCH_PENDING (0) 19a9f3a74aSThomas Gleixner #endif 20a9f3a74aSThomas Gleixner 21a9f3a74aSThomas Gleixner #ifndef _TIF_UPROBE 22a9f3a74aSThomas Gleixner # define _TIF_UPROBE (0) 23a9f3a74aSThomas Gleixner #endif 24a9f3a74aSThomas Gleixner 25142781e1SThomas Gleixner /* 2629915524SGabriel Krisman Bertazi * SYSCALL_WORK flags handled in syscall_enter_from_user_mode() 27142781e1SThomas Gleixner */ 2829915524SGabriel Krisman Bertazi #ifndef ARCH_SYSCALL_WORK_ENTER 2929915524SGabriel Krisman Bertazi # define ARCH_SYSCALL_WORK_ENTER (0) 30142781e1SThomas Gleixner #endif 31142781e1SThomas Gleixner 32a9f3a74aSThomas Gleixner /* 3329915524SGabriel Krisman Bertazi * SYSCALL_WORK flags handled in syscall_exit_to_user_mode() 34a9f3a74aSThomas Gleixner */ 3529915524SGabriel Krisman Bertazi #ifndef ARCH_SYSCALL_WORK_EXIT 3629915524SGabriel Krisman Bertazi # define ARCH_SYSCALL_WORK_EXIT (0) 37a9f3a74aSThomas Gleixner #endif 38a9f3a74aSThomas Gleixner 39524666cbSGabriel Krisman Bertazi #define SYSCALL_WORK_ENTER (SYSCALL_WORK_SECCOMP | \ 4064c19ba2SGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_TRACEPOINT | \ 4164eb35f7SGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_TRACE | \ 42785dc4ebSGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_EMU | \ 4329915524SGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_AUDIT | \ 4411894468SGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_USER_DISPATCH | \ 4529915524SGabriel Krisman Bertazi ARCH_SYSCALL_WORK_ENTER) 4664c19ba2SGabriel Krisman Bertazi #define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \ 47785dc4ebSGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_TRACE | \ 4829915524SGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_AUDIT | \ 4911894468SGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_USER_DISPATCH | \ 506342adcaSGabriel Krisman Bertazi SYSCALL_WORK_SYSCALL_EXIT_TRAP | \ 5129915524SGabriel Krisman Bertazi ARCH_SYSCALL_WORK_EXIT) 52b86678cfSGabriel Krisman Bertazi 53a9f3a74aSThomas Gleixner /* 54a9f3a74aSThomas Gleixner * TIF flags handled in exit_to_user_mode_loop() 55a9f3a74aSThomas Gleixner */ 56a9f3a74aSThomas Gleixner #ifndef ARCH_EXIT_TO_USER_MODE_WORK 57a9f3a74aSThomas Gleixner # define ARCH_EXIT_TO_USER_MODE_WORK (0) 58a9f3a74aSThomas Gleixner #endif 59a9f3a74aSThomas Gleixner 60a9f3a74aSThomas Gleixner #define EXIT_TO_USER_MODE_WORK \ 61a9f3a74aSThomas Gleixner (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ 6212db8b69SJens Axboe _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ 63a9f3a74aSThomas Gleixner ARCH_EXIT_TO_USER_MODE_WORK) 64a9f3a74aSThomas Gleixner 65142781e1SThomas Gleixner /** 66142781e1SThomas Gleixner * arch_check_user_regs - Architecture specific sanity check for user mode regs 67142781e1SThomas Gleixner * @regs: Pointer to currents pt_regs 68142781e1SThomas Gleixner * 69142781e1SThomas Gleixner * Defaults to an empty implementation. Can be replaced by architecture 70142781e1SThomas Gleixner * specific code. 71142781e1SThomas Gleixner * 72142781e1SThomas Gleixner * Invoked from syscall_enter_from_user_mode() in the non-instrumentable 73142781e1SThomas Gleixner * section. Use __always_inline so the compiler cannot push it out of line 74142781e1SThomas Gleixner * and make it instrumentable. 75142781e1SThomas Gleixner */ 76142781e1SThomas Gleixner static __always_inline void arch_check_user_regs(struct pt_regs *regs); 77142781e1SThomas Gleixner 78142781e1SThomas Gleixner #ifndef arch_check_user_regs 79142781e1SThomas Gleixner static __always_inline void arch_check_user_regs(struct pt_regs *regs) {} 80142781e1SThomas Gleixner #endif 81142781e1SThomas Gleixner 82142781e1SThomas Gleixner /** 83142781e1SThomas Gleixner * arch_syscall_enter_tracehook - Wrapper around tracehook_report_syscall_entry() 84142781e1SThomas Gleixner * @regs: Pointer to currents pt_regs 85142781e1SThomas Gleixner * 86142781e1SThomas Gleixner * Returns: 0 on success or an error code to skip the syscall. 87142781e1SThomas Gleixner * 88142781e1SThomas Gleixner * Defaults to tracehook_report_syscall_entry(). Can be replaced by 89142781e1SThomas Gleixner * architecture specific code. 90142781e1SThomas Gleixner * 91142781e1SThomas Gleixner * Invoked from syscall_enter_from_user_mode() 92142781e1SThomas Gleixner */ 93142781e1SThomas Gleixner static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs); 94142781e1SThomas Gleixner 95142781e1SThomas Gleixner #ifndef arch_syscall_enter_tracehook 96142781e1SThomas Gleixner static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs) 97142781e1SThomas Gleixner { 98142781e1SThomas Gleixner return tracehook_report_syscall_entry(regs); 99142781e1SThomas Gleixner } 100142781e1SThomas Gleixner #endif 101142781e1SThomas Gleixner 102142781e1SThomas Gleixner /** 10396e2fbccSSven Schnelle * enter_from_user_mode - Establish state when coming from user mode 10496e2fbccSSven Schnelle * 10596e2fbccSSven Schnelle * Syscall/interrupt entry disables interrupts, but user mode is traced as 10696e2fbccSSven Schnelle * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. 10796e2fbccSSven Schnelle * 10896e2fbccSSven Schnelle * 1) Tell lockdep that interrupts are disabled 10996e2fbccSSven Schnelle * 2) Invoke context tracking if enabled to reactivate RCU 11096e2fbccSSven Schnelle * 3) Trace interrupts off state 11196e2fbccSSven Schnelle * 11296e2fbccSSven Schnelle * Invoked from architecture specific syscall entry code with interrupts 11396e2fbccSSven Schnelle * disabled. The calling code has to be non-instrumentable. When the 11496e2fbccSSven Schnelle * function returns all state is correct and interrupts are still 11596e2fbccSSven Schnelle * disabled. The subsequent functions can be instrumented. 11696e2fbccSSven Schnelle * 11796e2fbccSSven Schnelle * This is invoked when there is architecture specific functionality to be 11896e2fbccSSven Schnelle * done between establishing state and enabling interrupts. The caller must 11996e2fbccSSven Schnelle * enable interrupts before invoking syscall_enter_from_user_mode_work(). 12096e2fbccSSven Schnelle */ 12196e2fbccSSven Schnelle void enter_from_user_mode(struct pt_regs *regs); 12296e2fbccSSven Schnelle 12396e2fbccSSven Schnelle /** 1244facb95bSThomas Gleixner * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts 1254facb95bSThomas Gleixner * @regs: Pointer to currents pt_regs 1264facb95bSThomas Gleixner * 1274facb95bSThomas Gleixner * Invoked from architecture specific syscall entry code with interrupts 1284facb95bSThomas Gleixner * disabled. The calling code has to be non-instrumentable. When the 1294facb95bSThomas Gleixner * function returns all state is correct, interrupts are enabled and the 1304facb95bSThomas Gleixner * subsequent functions can be instrumented. 1314facb95bSThomas Gleixner * 13296e2fbccSSven Schnelle * This handles lockdep, RCU (context tracking) and tracing state, i.e. 13396e2fbccSSven Schnelle * the functionality provided by enter_from_user_mode(). 1344facb95bSThomas Gleixner * 1354facb95bSThomas Gleixner * This is invoked when there is extra architecture specific functionality 1364facb95bSThomas Gleixner * to be done between establishing state and handling user mode entry work. 1374facb95bSThomas Gleixner */ 1384facb95bSThomas Gleixner void syscall_enter_from_user_mode_prepare(struct pt_regs *regs); 1394facb95bSThomas Gleixner 1404facb95bSThomas Gleixner /** 1414facb95bSThomas Gleixner * syscall_enter_from_user_mode_work - Check and handle work before invoking 142142781e1SThomas Gleixner * a syscall 143142781e1SThomas Gleixner * @regs: Pointer to currents pt_regs 144142781e1SThomas Gleixner * @syscall: The syscall number 145142781e1SThomas Gleixner * 146142781e1SThomas Gleixner * Invoked from architecture specific syscall entry code with interrupts 1474facb95bSThomas Gleixner * enabled after invoking syscall_enter_from_user_mode_prepare() and extra 1484facb95bSThomas Gleixner * architecture specific work. 149142781e1SThomas Gleixner * 150142781e1SThomas Gleixner * Returns: The original or a modified syscall number 151142781e1SThomas Gleixner * 152142781e1SThomas Gleixner * If the returned syscall number is -1 then the syscall should be 153142781e1SThomas Gleixner * skipped. In this case the caller may invoke syscall_set_error() or 154142781e1SThomas Gleixner * syscall_set_return_value() first. If neither of those are called and -1 155142781e1SThomas Gleixner * is returned, then the syscall will fail with ENOSYS. 156142781e1SThomas Gleixner * 1574facb95bSThomas Gleixner * It handles the following work items: 158142781e1SThomas Gleixner * 15929915524SGabriel Krisman Bertazi * 1) syscall_work flag dependent invocations of 16029915524SGabriel Krisman Bertazi * arch_syscall_enter_tracehook(), __secure_computing(), trace_sys_enter() 1614facb95bSThomas Gleixner * 2) Invocation of audit_syscall_entry() 1624facb95bSThomas Gleixner */ 1634facb95bSThomas Gleixner long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall); 1644facb95bSThomas Gleixner 1654facb95bSThomas Gleixner /** 1664facb95bSThomas Gleixner * syscall_enter_from_user_mode - Establish state and check and handle work 1674facb95bSThomas Gleixner * before invoking a syscall 1684facb95bSThomas Gleixner * @regs: Pointer to currents pt_regs 1694facb95bSThomas Gleixner * @syscall: The syscall number 1704facb95bSThomas Gleixner * 1714facb95bSThomas Gleixner * Invoked from architecture specific syscall entry code with interrupts 1724facb95bSThomas Gleixner * disabled. The calling code has to be non-instrumentable. When the 1734facb95bSThomas Gleixner * function returns all state is correct, interrupts are enabled and the 1744facb95bSThomas Gleixner * subsequent functions can be instrumented. 1754facb95bSThomas Gleixner * 1764facb95bSThomas Gleixner * This is combination of syscall_enter_from_user_mode_prepare() and 1774facb95bSThomas Gleixner * syscall_enter_from_user_mode_work(). 1784facb95bSThomas Gleixner * 1794facb95bSThomas Gleixner * Returns: The original or a modified syscall number. See 1804facb95bSThomas Gleixner * syscall_enter_from_user_mode_work() for further explanation. 181142781e1SThomas Gleixner */ 182142781e1SThomas Gleixner long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall); 183142781e1SThomas Gleixner 184142781e1SThomas Gleixner /** 185a9f3a74aSThomas Gleixner * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable() 186a9f3a74aSThomas Gleixner * @ti_work: Cached TIF flags gathered with interrupts disabled 187a9f3a74aSThomas Gleixner * 188a9f3a74aSThomas Gleixner * Defaults to local_irq_enable(). Can be supplied by architecture specific 189a9f3a74aSThomas Gleixner * code. 190a9f3a74aSThomas Gleixner */ 191a9f3a74aSThomas Gleixner static inline void local_irq_enable_exit_to_user(unsigned long ti_work); 192a9f3a74aSThomas Gleixner 193a9f3a74aSThomas Gleixner #ifndef local_irq_enable_exit_to_user 194a9f3a74aSThomas Gleixner static inline void local_irq_enable_exit_to_user(unsigned long ti_work) 195a9f3a74aSThomas Gleixner { 196a9f3a74aSThomas Gleixner local_irq_enable(); 197a9f3a74aSThomas Gleixner } 198a9f3a74aSThomas Gleixner #endif 199a9f3a74aSThomas Gleixner 200a9f3a74aSThomas Gleixner /** 201a9f3a74aSThomas Gleixner * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable() 202a9f3a74aSThomas Gleixner * 203a9f3a74aSThomas Gleixner * Defaults to local_irq_disable(). Can be supplied by architecture specific 204a9f3a74aSThomas Gleixner * code. 205a9f3a74aSThomas Gleixner */ 206a9f3a74aSThomas Gleixner static inline void local_irq_disable_exit_to_user(void); 207a9f3a74aSThomas Gleixner 208a9f3a74aSThomas Gleixner #ifndef local_irq_disable_exit_to_user 209a9f3a74aSThomas Gleixner static inline void local_irq_disable_exit_to_user(void) 210a9f3a74aSThomas Gleixner { 211a9f3a74aSThomas Gleixner local_irq_disable(); 212a9f3a74aSThomas Gleixner } 213a9f3a74aSThomas Gleixner #endif 214a9f3a74aSThomas Gleixner 215a9f3a74aSThomas Gleixner /** 216a9f3a74aSThomas Gleixner * arch_exit_to_user_mode_work - Architecture specific TIF work for exit 217a9f3a74aSThomas Gleixner * to user mode. 218a9f3a74aSThomas Gleixner * @regs: Pointer to currents pt_regs 219a9f3a74aSThomas Gleixner * @ti_work: Cached TIF flags gathered with interrupts disabled 220a9f3a74aSThomas Gleixner * 221a9f3a74aSThomas Gleixner * Invoked from exit_to_user_mode_loop() with interrupt enabled 222a9f3a74aSThomas Gleixner * 223a9f3a74aSThomas Gleixner * Defaults to NOOP. Can be supplied by architecture specific code. 224a9f3a74aSThomas Gleixner */ 225a9f3a74aSThomas Gleixner static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, 226a9f3a74aSThomas Gleixner unsigned long ti_work); 227a9f3a74aSThomas Gleixner 228a9f3a74aSThomas Gleixner #ifndef arch_exit_to_user_mode_work 229a9f3a74aSThomas Gleixner static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, 230a9f3a74aSThomas Gleixner unsigned long ti_work) 231a9f3a74aSThomas Gleixner { 232a9f3a74aSThomas Gleixner } 233a9f3a74aSThomas Gleixner #endif 234a9f3a74aSThomas Gleixner 235a9f3a74aSThomas Gleixner /** 236a9f3a74aSThomas Gleixner * arch_exit_to_user_mode_prepare - Architecture specific preparation for 237a9f3a74aSThomas Gleixner * exit to user mode. 238a9f3a74aSThomas Gleixner * @regs: Pointer to currents pt_regs 239a9f3a74aSThomas Gleixner * @ti_work: Cached TIF flags gathered with interrupts disabled 240a9f3a74aSThomas Gleixner * 241a9f3a74aSThomas Gleixner * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last 242a9f3a74aSThomas Gleixner * function before return. Defaults to NOOP. 243a9f3a74aSThomas Gleixner */ 244a9f3a74aSThomas Gleixner static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, 245a9f3a74aSThomas Gleixner unsigned long ti_work); 246a9f3a74aSThomas Gleixner 247a9f3a74aSThomas Gleixner #ifndef arch_exit_to_user_mode_prepare 248a9f3a74aSThomas Gleixner static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, 249a9f3a74aSThomas Gleixner unsigned long ti_work) 250a9f3a74aSThomas Gleixner { 251a9f3a74aSThomas Gleixner } 252a9f3a74aSThomas Gleixner #endif 253a9f3a74aSThomas Gleixner 254a9f3a74aSThomas Gleixner /** 255a9f3a74aSThomas Gleixner * arch_exit_to_user_mode - Architecture specific final work before 256a9f3a74aSThomas Gleixner * exit to user mode. 257a9f3a74aSThomas Gleixner * 258a9f3a74aSThomas Gleixner * Invoked from exit_to_user_mode() with interrupt disabled as the last 259a9f3a74aSThomas Gleixner * function before return. Defaults to NOOP. 260a9f3a74aSThomas Gleixner * 261a9f3a74aSThomas Gleixner * This needs to be __always_inline because it is non-instrumentable code 262a9f3a74aSThomas Gleixner * invoked after context tracking switched to user mode. 263a9f3a74aSThomas Gleixner * 264a9f3a74aSThomas Gleixner * An architecture implementation must not do anything complex, no locking 265a9f3a74aSThomas Gleixner * etc. The main purpose is for speculation mitigations. 266a9f3a74aSThomas Gleixner */ 267a9f3a74aSThomas Gleixner static __always_inline void arch_exit_to_user_mode(void); 268a9f3a74aSThomas Gleixner 269a9f3a74aSThomas Gleixner #ifndef arch_exit_to_user_mode 270a9f3a74aSThomas Gleixner static __always_inline void arch_exit_to_user_mode(void) { } 271a9f3a74aSThomas Gleixner #endif 272a9f3a74aSThomas Gleixner 273a9f3a74aSThomas Gleixner /** 27412db8b69SJens Axboe * arch_do_signal_or_restart - Architecture specific signal delivery function 275a9f3a74aSThomas Gleixner * @regs: Pointer to currents pt_regs 27612db8b69SJens Axboe * @has_signal: actual signal to handle 277a9f3a74aSThomas Gleixner * 278a9f3a74aSThomas Gleixner * Invoked from exit_to_user_mode_loop(). 279a9f3a74aSThomas Gleixner */ 28012db8b69SJens Axboe void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal); 281a9f3a74aSThomas Gleixner 282a9f3a74aSThomas Gleixner /** 283a9f3a74aSThomas Gleixner * arch_syscall_exit_tracehook - Wrapper around tracehook_report_syscall_exit() 284a9f3a74aSThomas Gleixner * @regs: Pointer to currents pt_regs 285a9f3a74aSThomas Gleixner * @step: Indicator for single step 286a9f3a74aSThomas Gleixner * 287a9f3a74aSThomas Gleixner * Defaults to tracehook_report_syscall_exit(). Can be replaced by 288a9f3a74aSThomas Gleixner * architecture specific code. 289a9f3a74aSThomas Gleixner * 290a9f3a74aSThomas Gleixner * Invoked from syscall_exit_to_user_mode() 291a9f3a74aSThomas Gleixner */ 292a9f3a74aSThomas Gleixner static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step); 293a9f3a74aSThomas Gleixner 294a9f3a74aSThomas Gleixner #ifndef arch_syscall_exit_tracehook 295a9f3a74aSThomas Gleixner static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step) 296a9f3a74aSThomas Gleixner { 297a9f3a74aSThomas Gleixner tracehook_report_syscall_exit(regs, step); 298a9f3a74aSThomas Gleixner } 299a9f3a74aSThomas Gleixner #endif 300a9f3a74aSThomas Gleixner 301a9f3a74aSThomas Gleixner /** 302310de1a6SSven Schnelle * exit_to_user_mode - Fixup state when exiting to user mode 303310de1a6SSven Schnelle * 304310de1a6SSven Schnelle * Syscall/interrupt exit enables interrupts, but the kernel state is 305310de1a6SSven Schnelle * interrupts disabled when this is invoked. Also tell RCU about it. 306310de1a6SSven Schnelle * 307310de1a6SSven Schnelle * 1) Trace interrupts on state 308310de1a6SSven Schnelle * 2) Invoke context tracking if enabled to adjust RCU state 309310de1a6SSven Schnelle * 3) Invoke architecture specific last minute exit code, e.g. speculation 310310de1a6SSven Schnelle * mitigations, etc.: arch_exit_to_user_mode() 311310de1a6SSven Schnelle * 4) Tell lockdep that interrupts are enabled 312310de1a6SSven Schnelle * 313310de1a6SSven Schnelle * Invoked from architecture specific code when syscall_exit_to_user_mode() 314310de1a6SSven Schnelle * is not suitable as the last step before returning to userspace. Must be 315310de1a6SSven Schnelle * invoked with interrupts disabled and the caller must be 316310de1a6SSven Schnelle * non-instrumentable. 317c6156e1dSSven Schnelle * The caller has to invoke syscall_exit_to_user_mode_work() before this. 318310de1a6SSven Schnelle */ 319310de1a6SSven Schnelle void exit_to_user_mode(void); 320310de1a6SSven Schnelle 321310de1a6SSven Schnelle /** 322c6156e1dSSven Schnelle * syscall_exit_to_user_mode_work - Handle work before returning to user mode 323c6156e1dSSven Schnelle * @regs: Pointer to currents pt_regs 324c6156e1dSSven Schnelle * 325c6156e1dSSven Schnelle * Same as step 1 and 2 of syscall_exit_to_user_mode() but without calling 326c6156e1dSSven Schnelle * exit_to_user_mode() to perform the final transition to user mode. 327c6156e1dSSven Schnelle * 328c6156e1dSSven Schnelle * Calling convention is the same as for syscall_exit_to_user_mode() and it 329c6156e1dSSven Schnelle * returns with all work handled and interrupts disabled. The caller must 330c6156e1dSSven Schnelle * invoke exit_to_user_mode() before actually switching to user mode to 331c6156e1dSSven Schnelle * make the final state transitions. Interrupts must stay disabled between 332c6156e1dSSven Schnelle * return from this function and the invocation of exit_to_user_mode(). 333c6156e1dSSven Schnelle */ 334c6156e1dSSven Schnelle void syscall_exit_to_user_mode_work(struct pt_regs *regs); 335c6156e1dSSven Schnelle 336c6156e1dSSven Schnelle /** 337a9f3a74aSThomas Gleixner * syscall_exit_to_user_mode - Handle work before returning to user mode 338a9f3a74aSThomas Gleixner * @regs: Pointer to currents pt_regs 339a9f3a74aSThomas Gleixner * 340a9f3a74aSThomas Gleixner * Invoked with interrupts enabled and fully valid regs. Returns with all 341a9f3a74aSThomas Gleixner * work handled, interrupts disabled such that the caller can immediately 342a9f3a74aSThomas Gleixner * switch to user mode. Called from architecture specific syscall and ret 343a9f3a74aSThomas Gleixner * from fork code. 344a9f3a74aSThomas Gleixner * 345a9f3a74aSThomas Gleixner * The call order is: 346a9f3a74aSThomas Gleixner * 1) One-time syscall exit work: 347a9f3a74aSThomas Gleixner * - rseq syscall exit 348a9f3a74aSThomas Gleixner * - audit 349a9f3a74aSThomas Gleixner * - syscall tracing 350a9f3a74aSThomas Gleixner * - tracehook (single stepping) 351a9f3a74aSThomas Gleixner * 352a9f3a74aSThomas Gleixner * 2) Preparatory work 353a9f3a74aSThomas Gleixner * - Exit to user mode loop (common TIF handling). Invokes 354a9f3a74aSThomas Gleixner * arch_exit_to_user_mode_work() for architecture specific TIF work 355a9f3a74aSThomas Gleixner * - Architecture specific one time work arch_exit_to_user_mode_prepare() 356a9f3a74aSThomas Gleixner * - Address limit and lockdep checks 357a9f3a74aSThomas Gleixner * 358310de1a6SSven Schnelle * 3) Final transition (lockdep, tracing, context tracking, RCU), i.e. the 359310de1a6SSven Schnelle * functionality in exit_to_user_mode(). 360c6156e1dSSven Schnelle * 361c6156e1dSSven Schnelle * This is a combination of syscall_exit_to_user_mode_work() (1,2) and 362c6156e1dSSven Schnelle * exit_to_user_mode(). This function is preferred unless there is a 36397258ce9SIngo Molnar * compelling architectural reason to use the separate functions. 364a9f3a74aSThomas Gleixner */ 365a9f3a74aSThomas Gleixner void syscall_exit_to_user_mode(struct pt_regs *regs); 366a9f3a74aSThomas Gleixner 367a9f3a74aSThomas Gleixner /** 368142781e1SThomas Gleixner * irqentry_enter_from_user_mode - Establish state before invoking the irq handler 369142781e1SThomas Gleixner * @regs: Pointer to currents pt_regs 370142781e1SThomas Gleixner * 371142781e1SThomas Gleixner * Invoked from architecture specific entry code with interrupts disabled. 372142781e1SThomas Gleixner * Can only be called when the interrupt entry came from user mode. The 373142781e1SThomas Gleixner * calling code must be non-instrumentable. When the function returns all 374142781e1SThomas Gleixner * state is correct and the subsequent functions can be instrumented. 375142781e1SThomas Gleixner * 376142781e1SThomas Gleixner * The function establishes state (lockdep, RCU (context tracking), tracing) 377142781e1SThomas Gleixner */ 378142781e1SThomas Gleixner void irqentry_enter_from_user_mode(struct pt_regs *regs); 379142781e1SThomas Gleixner 380a9f3a74aSThomas Gleixner /** 381a9f3a74aSThomas Gleixner * irqentry_exit_to_user_mode - Interrupt exit work 382a9f3a74aSThomas Gleixner * @regs: Pointer to current's pt_regs 383a9f3a74aSThomas Gleixner * 38497258ce9SIngo Molnar * Invoked with interrupts disabled and fully valid regs. Returns with all 385a9f3a74aSThomas Gleixner * work handled, interrupts disabled such that the caller can immediately 386a9f3a74aSThomas Gleixner * switch to user mode. Called from architecture specific interrupt 387a9f3a74aSThomas Gleixner * handling code. 388a9f3a74aSThomas Gleixner * 389a9f3a74aSThomas Gleixner * The call order is #2 and #3 as described in syscall_exit_to_user_mode(). 390a9f3a74aSThomas Gleixner * Interrupt exit is not invoking #1 which is the syscall specific one time 391a9f3a74aSThomas Gleixner * work. 392a9f3a74aSThomas Gleixner */ 393a9f3a74aSThomas Gleixner void irqentry_exit_to_user_mode(struct pt_regs *regs); 394a9f3a74aSThomas Gleixner 395a5497babSThomas Gleixner #ifndef irqentry_state 396b6be002bSThomas Gleixner /** 397b6be002bSThomas Gleixner * struct irqentry_state - Opaque object for exception state storage 398b6be002bSThomas Gleixner * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the 399b6be002bSThomas Gleixner * exit path has to invoke rcu_irq_exit(). 400b6be002bSThomas Gleixner * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that 401b6be002bSThomas Gleixner * lockdep state is restored correctly on exit from nmi. 402b6be002bSThomas Gleixner * 403b6be002bSThomas Gleixner * This opaque object is filled in by the irqentry_*_enter() functions and 404b6be002bSThomas Gleixner * must be passed back into the corresponding irqentry_*_exit() functions 405b6be002bSThomas Gleixner * when the exception is complete. 406b6be002bSThomas Gleixner * 407b6be002bSThomas Gleixner * Callers of irqentry_*_[enter|exit]() must consider this structure opaque 408b6be002bSThomas Gleixner * and all members private. Descriptions of the members are provided to aid in 409b6be002bSThomas Gleixner * the maintenance of the irqentry_*() functions. 410b6be002bSThomas Gleixner */ 411a5497babSThomas Gleixner typedef struct irqentry_state { 412b6be002bSThomas Gleixner union { 413a5497babSThomas Gleixner bool exit_rcu; 414b6be002bSThomas Gleixner bool lockdep; 415b6be002bSThomas Gleixner }; 416a5497babSThomas Gleixner } irqentry_state_t; 417a5497babSThomas Gleixner #endif 418a5497babSThomas Gleixner 419a5497babSThomas Gleixner /** 420a5497babSThomas Gleixner * irqentry_enter - Handle state tracking on ordinary interrupt entries 421a5497babSThomas Gleixner * @regs: Pointer to pt_regs of interrupted context 422a5497babSThomas Gleixner * 423a5497babSThomas Gleixner * Invokes: 424a5497babSThomas Gleixner * - lockdep irqflag state tracking as low level ASM entry disabled 425a5497babSThomas Gleixner * interrupts. 426a5497babSThomas Gleixner * 427a5497babSThomas Gleixner * - Context tracking if the exception hit user mode. 428a5497babSThomas Gleixner * 429a5497babSThomas Gleixner * - The hardirq tracer to keep the state consistent as low level ASM 430a5497babSThomas Gleixner * entry disabled interrupts. 431a5497babSThomas Gleixner * 432a5497babSThomas Gleixner * As a precondition, this requires that the entry came from user mode, 433a5497babSThomas Gleixner * idle, or a kernel context in which RCU is watching. 434a5497babSThomas Gleixner * 435a5497babSThomas Gleixner * For kernel mode entries RCU handling is done conditional. If RCU is 436a5497babSThomas Gleixner * watching then the only RCU requirement is to check whether the tick has 437a5497babSThomas Gleixner * to be restarted. If RCU is not watching then rcu_irq_enter() has to be 438a5497babSThomas Gleixner * invoked on entry and rcu_irq_exit() on exit. 439a5497babSThomas Gleixner * 440a5497babSThomas Gleixner * Avoiding the rcu_irq_enter/exit() calls is an optimization but also 441a5497babSThomas Gleixner * solves the problem of kernel mode pagefaults which can schedule, which 442a5497babSThomas Gleixner * is not possible after invoking rcu_irq_enter() without undoing it. 443a5497babSThomas Gleixner * 444a5497babSThomas Gleixner * For user mode entries irqentry_enter_from_user_mode() is invoked to 445a5497babSThomas Gleixner * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit 446a5497babSThomas Gleixner * would not be possible. 447a5497babSThomas Gleixner * 448a5497babSThomas Gleixner * Returns: An opaque object that must be passed to idtentry_exit() 449a5497babSThomas Gleixner */ 450a5497babSThomas Gleixner irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs); 451a5497babSThomas Gleixner 452a5497babSThomas Gleixner /** 453a5497babSThomas Gleixner * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt 454a5497babSThomas Gleixner * 455a5497babSThomas Gleixner * Conditional reschedule with additional sanity checks. 456a5497babSThomas Gleixner */ 4574624a14fSMark Rutland void raw_irqentry_exit_cond_resched(void); 45840607ee9SPeter Zijlstra (Intel) #ifdef CONFIG_PREEMPT_DYNAMIC 459*99cf983cSMark Rutland #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) 4604624a14fSMark Rutland #define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched 4618a69fe0bSMark Rutland #define irqentry_exit_cond_resched_dynamic_disabled NULL 4624624a14fSMark Rutland DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched); 4634624a14fSMark Rutland #define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)() 464*99cf983cSMark Rutland #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) 465*99cf983cSMark Rutland DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); 466*99cf983cSMark Rutland void dynamic_irqentry_exit_cond_resched(void); 467*99cf983cSMark Rutland #define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched() 46840607ee9SPeter Zijlstra (Intel) #endif 469*99cf983cSMark Rutland #else /* CONFIG_PREEMPT_DYNAMIC */ 470*99cf983cSMark Rutland #define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched() 471*99cf983cSMark Rutland #endif /* CONFIG_PREEMPT_DYNAMIC */ 472a5497babSThomas Gleixner 473a5497babSThomas Gleixner /** 474a5497babSThomas Gleixner * irqentry_exit - Handle return from exception that used irqentry_enter() 475a5497babSThomas Gleixner * @regs: Pointer to pt_regs (exception entry regs) 476a5497babSThomas Gleixner * @state: Return value from matching call to irqentry_enter() 477a5497babSThomas Gleixner * 478a5497babSThomas Gleixner * Depending on the return target (kernel/user) this runs the necessary 47978a56e04SIra Weiny * preemption and work checks if possible and required and returns to 480a5497babSThomas Gleixner * the caller with interrupts disabled and no further work pending. 481a5497babSThomas Gleixner * 482a5497babSThomas Gleixner * This is the last action before returning to the low level ASM code which 483a5497babSThomas Gleixner * just needs to return to the appropriate context. 484a5497babSThomas Gleixner * 485a5497babSThomas Gleixner * Counterpart to irqentry_enter(). 486a5497babSThomas Gleixner */ 487a5497babSThomas Gleixner void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state); 488a5497babSThomas Gleixner 489b6be002bSThomas Gleixner /** 490b6be002bSThomas Gleixner * irqentry_nmi_enter - Handle NMI entry 491b6be002bSThomas Gleixner * @regs: Pointer to currents pt_regs 492b6be002bSThomas Gleixner * 493b6be002bSThomas Gleixner * Similar to irqentry_enter() but taking care of the NMI constraints. 494b6be002bSThomas Gleixner */ 495b6be002bSThomas Gleixner irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs); 496b6be002bSThomas Gleixner 497b6be002bSThomas Gleixner /** 498b6be002bSThomas Gleixner * irqentry_nmi_exit - Handle return from NMI handling 499b6be002bSThomas Gleixner * @regs: Pointer to pt_regs (NMI entry regs) 500b6be002bSThomas Gleixner * @irq_state: Return value from matching call to irqentry_nmi_enter() 501b6be002bSThomas Gleixner * 50278a56e04SIra Weiny * Last action before returning to the low level assembly code. 503b6be002bSThomas Gleixner * 504b6be002bSThomas Gleixner * Counterpart to irqentry_nmi_enter(). 505b6be002bSThomas Gleixner */ 506b6be002bSThomas Gleixner void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state); 507b6be002bSThomas Gleixner 508142781e1SThomas Gleixner #endif 509