xref: /linux/arch/arm64/include/asm/entry-common.h (revision feafee284579d29537a5a56ba8f23894f0463f3d)
1*b3cf0785SJinjie Ruan /* SPDX-License-Identifier: GPL-2.0 */
2*b3cf0785SJinjie Ruan 
3*b3cf0785SJinjie Ruan #ifndef _ASM_ARM64_ENTRY_COMMON_H
4*b3cf0785SJinjie Ruan #define _ASM_ARM64_ENTRY_COMMON_H
5*b3cf0785SJinjie Ruan 
6*b3cf0785SJinjie Ruan #include <linux/thread_info.h>
7*b3cf0785SJinjie Ruan 
8*b3cf0785SJinjie Ruan #include <asm/cpufeature.h>
9*b3cf0785SJinjie Ruan #include <asm/daifflags.h>
10*b3cf0785SJinjie Ruan #include <asm/fpsimd.h>
11*b3cf0785SJinjie Ruan #include <asm/mte.h>
12*b3cf0785SJinjie Ruan #include <asm/stacktrace.h>
13*b3cf0785SJinjie Ruan 
14*b3cf0785SJinjie Ruan #define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_MTE_ASYNC_FAULT | _TIF_FOREIGN_FPSTATE)
15*b3cf0785SJinjie Ruan 
arch_exit_to_user_mode_work(struct pt_regs * regs,unsigned long ti_work)16*b3cf0785SJinjie Ruan static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
17*b3cf0785SJinjie Ruan 							unsigned long ti_work)
18*b3cf0785SJinjie Ruan {
19*b3cf0785SJinjie Ruan 	if (ti_work & _TIF_MTE_ASYNC_FAULT) {
20*b3cf0785SJinjie Ruan 		clear_thread_flag(TIF_MTE_ASYNC_FAULT);
21*b3cf0785SJinjie Ruan 		send_sig_fault(SIGSEGV, SEGV_MTEAERR, (void __user *)NULL, current);
22*b3cf0785SJinjie Ruan 	}
23*b3cf0785SJinjie Ruan 
24*b3cf0785SJinjie Ruan 	if (ti_work & _TIF_FOREIGN_FPSTATE)
25*b3cf0785SJinjie Ruan 		fpsimd_restore_current_state();
26*b3cf0785SJinjie Ruan }
27*b3cf0785SJinjie Ruan 
28*b3cf0785SJinjie Ruan #define arch_exit_to_user_mode_work arch_exit_to_user_mode_work
29*b3cf0785SJinjie Ruan 
arch_irqentry_exit_need_resched(void)30*b3cf0785SJinjie Ruan static inline bool arch_irqentry_exit_need_resched(void)
31*b3cf0785SJinjie Ruan {
32*b3cf0785SJinjie Ruan 	/*
33*b3cf0785SJinjie Ruan 	 * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
34*b3cf0785SJinjie Ruan 	 * priority masking is used the GIC irqchip driver will clear DAIF.IF
35*b3cf0785SJinjie Ruan 	 * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
36*b3cf0785SJinjie Ruan 	 * DAIF we must have handled an NMI, so skip preemption.
37*b3cf0785SJinjie Ruan 	 */
38*b3cf0785SJinjie Ruan 	if (system_uses_irq_prio_masking() && read_sysreg(daif))
39*b3cf0785SJinjie Ruan 		return false;
40*b3cf0785SJinjie Ruan 
41*b3cf0785SJinjie Ruan 	/*
42*b3cf0785SJinjie Ruan 	 * Preempting a task from an IRQ means we leave copies of PSTATE
43*b3cf0785SJinjie Ruan 	 * on the stack. cpufeature's enable calls may modify PSTATE, but
44*b3cf0785SJinjie Ruan 	 * resuming one of these preempted tasks would undo those changes.
45*b3cf0785SJinjie Ruan 	 *
46*b3cf0785SJinjie Ruan 	 * Only allow a task to be preempted once cpufeatures have been
47*b3cf0785SJinjie Ruan 	 * enabled.
48*b3cf0785SJinjie Ruan 	 */
49*b3cf0785SJinjie Ruan 	if (!system_capabilities_finalized())
50*b3cf0785SJinjie Ruan 		return false;
51*b3cf0785SJinjie Ruan 
52*b3cf0785SJinjie Ruan 	return true;
53*b3cf0785SJinjie Ruan }
54*b3cf0785SJinjie Ruan 
55*b3cf0785SJinjie Ruan #define arch_irqentry_exit_need_resched arch_irqentry_exit_need_resched
56*b3cf0785SJinjie Ruan 
57*b3cf0785SJinjie Ruan #endif /* _ASM_ARM64_ENTRY_COMMON_H */
58