xref: /linux/include/linux/thread_info.h (revision fcc79e1714e8c2b8e216dc3149812edd37884eef)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* thread_info.h: common low-level thread information accessors
3  *
4  * Copyright (C) 2002  David Howells (dhowells@redhat.com)
5  * - Incorporating suggestions made by Linus Torvalds
6  */
7 
8 #ifndef _LINUX_THREAD_INFO_H
9 #define _LINUX_THREAD_INFO_H
10 
11 #include <linux/types.h>
12 #include <linux/limits.h>
13 #include <linux/bug.h>
14 #include <linux/restart_block.h>
15 #include <linux/errno.h>
16 
17 #ifdef CONFIG_THREAD_INFO_IN_TASK
18 /*
19  * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
20  * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
21  * including <asm/current.h> can cause a circular dependency on some platforms.
22  */
23 #include <asm/current.h>
24 #define current_thread_info() ((struct thread_info *)current)
25 #endif
26 
27 #include <linux/bitops.h>
28 
29 /*
30  * For per-arch arch_within_stack_frames() implementations, defined in
31  * asm/thread_info.h.
32  */
33 enum {
34 	BAD_STACK = -1,
35 	NOT_STACK = 0,
36 	GOOD_FRAME,
37 	GOOD_STACK,
38 };
39 
40 #ifdef CONFIG_GENERIC_ENTRY
41 enum syscall_work_bit {
42 	SYSCALL_WORK_BIT_SECCOMP,
43 	SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT,
44 	SYSCALL_WORK_BIT_SYSCALL_TRACE,
45 	SYSCALL_WORK_BIT_SYSCALL_EMU,
46 	SYSCALL_WORK_BIT_SYSCALL_AUDIT,
47 	SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH,
48 	SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP,
49 };
50 
51 #define SYSCALL_WORK_SECCOMP		BIT(SYSCALL_WORK_BIT_SECCOMP)
52 #define SYSCALL_WORK_SYSCALL_TRACEPOINT	BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT)
53 #define SYSCALL_WORK_SYSCALL_TRACE	BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE)
54 #define SYSCALL_WORK_SYSCALL_EMU	BIT(SYSCALL_WORK_BIT_SYSCALL_EMU)
55 #define SYSCALL_WORK_SYSCALL_AUDIT	BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT)
56 #define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH)
57 #define SYSCALL_WORK_SYSCALL_EXIT_TRAP	BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP)
58 #endif
59 
60 #include <asm/thread_info.h>
61 
62 #ifndef TIF_NEED_RESCHED_LAZY
63 #ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
64 #error Inconsistent PREEMPT_LAZY
65 #endif
66 #define TIF_NEED_RESCHED_LAZY TIF_NEED_RESCHED
67 #define _TIF_NEED_RESCHED_LAZY _TIF_NEED_RESCHED
68 #endif
69 
70 #ifdef __KERNEL__
71 
72 #ifndef arch_set_restart_data
73 #define arch_set_restart_data(restart) do { } while (0)
74 #endif
75 
76 static inline long set_restart_fn(struct restart_block *restart,
77 					long (*fn)(struct restart_block *))
78 {
79 	restart->fn = fn;
80 	arch_set_restart_data(restart);
81 	return -ERESTART_RESTARTBLOCK;
82 }
83 
84 #ifndef THREAD_ALIGN
85 #define THREAD_ALIGN	THREAD_SIZE
86 #endif
87 
88 #define THREADINFO_GFP		(GFP_KERNEL_ACCOUNT | __GFP_ZERO)
89 
90 /*
91  * flag set/clear/test wrappers
92  * - pass TIF_xxxx constants to these functions
93  */
94 
95 static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
96 {
97 	set_bit(flag, (unsigned long *)&ti->flags);
98 }
99 
100 static inline void clear_ti_thread_flag(struct thread_info *ti, int flag)
101 {
102 	clear_bit(flag, (unsigned long *)&ti->flags);
103 }
104 
105 static inline void update_ti_thread_flag(struct thread_info *ti, int flag,
106 					 bool value)
107 {
108 	if (value)
109 		set_ti_thread_flag(ti, flag);
110 	else
111 		clear_ti_thread_flag(ti, flag);
112 }
113 
114 static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
115 {
116 	return test_and_set_bit(flag, (unsigned long *)&ti->flags);
117 }
118 
119 static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
120 {
121 	return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
122 }
123 
124 static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
125 {
126 	return test_bit(flag, (unsigned long *)&ti->flags);
127 }
128 
129 /*
130  * This may be used in noinstr code, and needs to be __always_inline to prevent
131  * inadvertent instrumentation.
132  */
133 static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti)
134 {
135 	return READ_ONCE(ti->flags);
136 }
137 
138 #define set_thread_flag(flag) \
139 	set_ti_thread_flag(current_thread_info(), flag)
140 #define clear_thread_flag(flag) \
141 	clear_ti_thread_flag(current_thread_info(), flag)
142 #define update_thread_flag(flag, value) \
143 	update_ti_thread_flag(current_thread_info(), flag, value)
144 #define test_and_set_thread_flag(flag) \
145 	test_and_set_ti_thread_flag(current_thread_info(), flag)
146 #define test_and_clear_thread_flag(flag) \
147 	test_and_clear_ti_thread_flag(current_thread_info(), flag)
148 #define test_thread_flag(flag) \
149 	test_ti_thread_flag(current_thread_info(), flag)
150 #define read_thread_flags() \
151 	read_ti_thread_flags(current_thread_info())
152 
153 #define read_task_thread_flags(t) \
154 	read_ti_thread_flags(task_thread_info(t))
155 
156 #ifdef CONFIG_GENERIC_ENTRY
157 #define set_syscall_work(fl) \
158 	set_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
159 #define test_syscall_work(fl) \
160 	test_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
161 #define clear_syscall_work(fl) \
162 	clear_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
163 
164 #define set_task_syscall_work(t, fl) \
165 	set_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
166 #define test_task_syscall_work(t, fl) \
167 	test_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
168 #define clear_task_syscall_work(t, fl) \
169 	clear_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
170 
171 #else /* CONFIG_GENERIC_ENTRY */
172 
173 #define set_syscall_work(fl)						\
174 	set_ti_thread_flag(current_thread_info(), TIF_##fl)
175 #define test_syscall_work(fl) \
176 	test_ti_thread_flag(current_thread_info(), TIF_##fl)
177 #define clear_syscall_work(fl) \
178 	clear_ti_thread_flag(current_thread_info(), TIF_##fl)
179 
180 #define set_task_syscall_work(t, fl) \
181 	set_ti_thread_flag(task_thread_info(t), TIF_##fl)
182 #define test_task_syscall_work(t, fl) \
183 	test_ti_thread_flag(task_thread_info(t), TIF_##fl)
184 #define clear_task_syscall_work(t, fl) \
185 	clear_ti_thread_flag(task_thread_info(t), TIF_##fl)
186 #endif /* !CONFIG_GENERIC_ENTRY */
187 
188 #ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
189 
190 static __always_inline bool tif_test_bit(int bit)
191 {
192 	return arch_test_bit(bit,
193 			     (unsigned long *)(&current_thread_info()->flags));
194 }
195 
196 #else
197 
198 static __always_inline bool tif_test_bit(int bit)
199 {
200 	return test_bit(bit,
201 			(unsigned long *)(&current_thread_info()->flags));
202 }
203 
204 #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
205 
206 static __always_inline bool tif_need_resched(void)
207 {
208 	return tif_test_bit(TIF_NEED_RESCHED);
209 }
210 
211 #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
212 static inline int arch_within_stack_frames(const void * const stack,
213 					   const void * const stackend,
214 					   const void *obj, unsigned long len)
215 {
216 	return 0;
217 }
218 #endif
219 
220 #ifdef CONFIG_HARDENED_USERCOPY
221 extern void __check_object_size(const void *ptr, unsigned long n,
222 					bool to_user);
223 
224 static __always_inline void check_object_size(const void *ptr, unsigned long n,
225 					      bool to_user)
226 {
227 	if (!__builtin_constant_p(n))
228 		__check_object_size(ptr, n, to_user);
229 }
230 #else
231 static inline void check_object_size(const void *ptr, unsigned long n,
232 				     bool to_user)
233 { }
234 #endif /* CONFIG_HARDENED_USERCOPY */
235 
236 extern void __compiletime_error("copy source size is too small")
237 __bad_copy_from(void);
238 extern void __compiletime_error("copy destination size is too small")
239 __bad_copy_to(void);
240 
241 void __copy_overflow(int size, unsigned long count);
242 
243 static inline void copy_overflow(int size, unsigned long count)
244 {
245 	if (IS_ENABLED(CONFIG_BUG))
246 		__copy_overflow(size, count);
247 }
248 
249 static __always_inline __must_check bool
250 check_copy_size(const void *addr, size_t bytes, bool is_source)
251 {
252 	int sz = __builtin_object_size(addr, 0);
253 	if (unlikely(sz >= 0 && sz < bytes)) {
254 		if (!__builtin_constant_p(bytes))
255 			copy_overflow(sz, bytes);
256 		else if (is_source)
257 			__bad_copy_from();
258 		else
259 			__bad_copy_to();
260 		return false;
261 	}
262 	if (WARN_ON_ONCE(bytes > INT_MAX))
263 		return false;
264 	check_object_size(addr, bytes, is_source);
265 	return true;
266 }
267 
268 #ifndef arch_setup_new_exec
269 static inline void arch_setup_new_exec(void) { }
270 #endif
271 
272 void arch_task_cache_init(void); /* for CONFIG_SH */
273 void arch_release_task_struct(struct task_struct *tsk);
274 int arch_dup_task_struct(struct task_struct *dst,
275 				struct task_struct *src);
276 
277 #endif	/* __KERNEL__ */
278 
279 #endif /* _LINUX_THREAD_INFO_H */
280