1 /* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
2 #ifndef _LINUX_RSEQ_H
3 #define _LINUX_RSEQ_H
4
5 #ifdef CONFIG_RSEQ
6
7 #include <linux/preempt.h>
8 #include <linux/sched.h>
9
10 /*
11 * Map the event mask on the user-space ABI enum rseq_cs_flags
12 * for direct mask checks.
13 */
14 enum rseq_event_mask_bits {
15 RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
16 RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
17 RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
18 };
19
20 enum rseq_event_mask {
21 RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
22 RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
23 RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
24 };
25
rseq_set_notify_resume(struct task_struct * t)26 static inline void rseq_set_notify_resume(struct task_struct *t)
27 {
28 if (t->rseq)
29 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
30 }
31
32 void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
33
rseq_handle_notify_resume(struct ksignal * ksig,struct pt_regs * regs)34 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
35 struct pt_regs *regs)
36 {
37 if (current->rseq)
38 __rseq_handle_notify_resume(ksig, regs);
39 }
40
rseq_signal_deliver(struct ksignal * ksig,struct pt_regs * regs)41 static inline void rseq_signal_deliver(struct ksignal *ksig,
42 struct pt_regs *regs)
43 {
44 preempt_disable();
45 __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
46 preempt_enable();
47 rseq_handle_notify_resume(ksig, regs);
48 }
49
50 /* rseq_preempt() requires preemption to be disabled. */
rseq_preempt(struct task_struct * t)51 static inline void rseq_preempt(struct task_struct *t)
52 {
53 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
54 rseq_set_notify_resume(t);
55 }
56
57 /* rseq_migrate() requires preemption to be disabled. */
rseq_migrate(struct task_struct * t)58 static inline void rseq_migrate(struct task_struct *t)
59 {
60 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
61 rseq_set_notify_resume(t);
62 }
63
64 /*
65 * If parent process has a registered restartable sequences area, the
66 * child inherits. Unregister rseq for a clone with CLONE_VM set.
67 */
rseq_fork(struct task_struct * t,unsigned long clone_flags)68 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
69 {
70 if (clone_flags & CLONE_VM) {
71 t->rseq = NULL;
72 t->rseq_len = 0;
73 t->rseq_sig = 0;
74 t->rseq_event_mask = 0;
75 } else {
76 t->rseq = current->rseq;
77 t->rseq_len = current->rseq_len;
78 t->rseq_sig = current->rseq_sig;
79 t->rseq_event_mask = current->rseq_event_mask;
80 }
81 }
82
rseq_execve(struct task_struct * t)83 static inline void rseq_execve(struct task_struct *t)
84 {
85 t->rseq = NULL;
86 t->rseq_len = 0;
87 t->rseq_sig = 0;
88 t->rseq_event_mask = 0;
89 }
90
91 #else
92
rseq_set_notify_resume(struct task_struct * t)93 static inline void rseq_set_notify_resume(struct task_struct *t)
94 {
95 }
rseq_handle_notify_resume(struct ksignal * ksig,struct pt_regs * regs)96 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
97 struct pt_regs *regs)
98 {
99 }
rseq_signal_deliver(struct ksignal * ksig,struct pt_regs * regs)100 static inline void rseq_signal_deliver(struct ksignal *ksig,
101 struct pt_regs *regs)
102 {
103 }
rseq_preempt(struct task_struct * t)104 static inline void rseq_preempt(struct task_struct *t)
105 {
106 }
rseq_migrate(struct task_struct * t)107 static inline void rseq_migrate(struct task_struct *t)
108 {
109 }
rseq_fork(struct task_struct * t,unsigned long clone_flags)110 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
111 {
112 }
rseq_execve(struct task_struct * t)113 static inline void rseq_execve(struct task_struct *t)
114 {
115 }
116
117 #endif
118
119 #ifdef CONFIG_DEBUG_RSEQ
120
121 void rseq_syscall(struct pt_regs *regs);
122
123 #else
124
rseq_syscall(struct pt_regs * regs)125 static inline void rseq_syscall(struct pt_regs *regs)
126 {
127 }
128
129 #endif
130
131 #endif /* _LINUX_RSEQ_H */
132