xref: /linux/kernel/futex/futex.h (revision c042c505210dc3453f378df432c10fff3d471bc5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _FUTEX_H
3 #define _FUTEX_H
4 
5 #include <linux/futex.h>
6 #include <linux/rtmutex.h>
7 #include <linux/sched/wake_q.h>
8 #include <linux/compat.h>
9 #include <linux/uaccess.h>
10 #include <linux/cleanup.h>
11 
12 #ifdef CONFIG_PREEMPT_RT
13 #include <linux/rcuwait.h>
14 #endif
15 
16 #include <asm/futex.h>
17 
18 /*
19  * Futex flags used to encode options to functions and preserve them across
20  * restarts.
21  */
22 #define FLAGS_SIZE_8		0x0000
23 #define FLAGS_SIZE_16		0x0001
24 #define FLAGS_SIZE_32		0x0002
25 #define FLAGS_SIZE_64		0x0003
26 
27 #define FLAGS_SIZE_MASK		0x0003
28 
29 #ifdef CONFIG_MMU
30 # define FLAGS_SHARED		0x0010
31 #else
32 /*
33  * NOMMU does not have per process address space. Let the compiler optimize
34  * code away.
35  */
36 # define FLAGS_SHARED		0x0000
37 #endif
38 #define FLAGS_CLOCKRT		0x0020
39 #define FLAGS_HAS_TIMEOUT	0x0040
40 #define FLAGS_NUMA		0x0080
41 #define FLAGS_STRICT		0x0100
42 #define FLAGS_MPOL		0x0200
43 
44 /* FUTEX_ to FLAGS_ */
45 static inline unsigned int futex_to_flags(unsigned int op)
46 {
47 	unsigned int flags = FLAGS_SIZE_32;
48 
49 	if (!(op & FUTEX_PRIVATE_FLAG))
50 		flags |= FLAGS_SHARED;
51 
52 	if (op & FUTEX_CLOCK_REALTIME)
53 		flags |= FLAGS_CLOCKRT;
54 
55 	return flags;
56 }
57 
58 #define FUTEX2_VALID_MASK (FUTEX2_SIZE_MASK | FUTEX2_NUMA | FUTEX2_MPOL | FUTEX2_PRIVATE)
59 
60 /* FUTEX2_ to FLAGS_ */
61 static inline unsigned int futex2_to_flags(unsigned int flags2)
62 {
63 	unsigned int flags = flags2 & FUTEX2_SIZE_MASK;
64 
65 	if (!(flags2 & FUTEX2_PRIVATE))
66 		flags |= FLAGS_SHARED;
67 
68 	if (flags2 & FUTEX2_NUMA)
69 		flags |= FLAGS_NUMA;
70 
71 	if (flags2 & FUTEX2_MPOL)
72 		flags |= FLAGS_MPOL;
73 
74 	return flags;
75 }
76 
77 static inline unsigned int futex_size(unsigned int flags)
78 {
79 	return 1 << (flags & FLAGS_SIZE_MASK);
80 }
81 
82 static inline bool futex_flags_valid(unsigned int flags)
83 {
84 	/* Only 64bit futexes for 64bit code */
85 	if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall()) {
86 		if ((flags & FLAGS_SIZE_MASK) == FLAGS_SIZE_64)
87 			return false;
88 	}
89 
90 	/* Only 32bit futexes are implemented -- for now */
91 	if ((flags & FLAGS_SIZE_MASK) != FLAGS_SIZE_32)
92 		return false;
93 
94 	/*
95 	 * Must be able to represent both FUTEX_NO_NODE and every valid nodeid
96 	 * in a futex word.
97 	 */
98 	if (flags & FLAGS_NUMA) {
99 		int bits = 8 * futex_size(flags);
100 		u64 max = ~0ULL;
101 
102 		max >>= 64 - bits;
103 		if (nr_node_ids >= max)
104 			return false;
105 	}
106 
107 	return true;
108 }
109 
110 static inline bool futex_validate_input(unsigned int flags, u64 val)
111 {
112 	int bits = 8 * futex_size(flags);
113 
114 	if (bits < 64 && (val >> bits))
115 		return false;
116 
117 	return true;
118 }
119 
120 #ifdef CONFIG_FAIL_FUTEX
121 extern bool should_fail_futex(bool fshared);
122 #else
123 static inline bool should_fail_futex(bool fshared)
124 {
125 	return false;
126 }
127 #endif
128 
129 /*
130  * Hash buckets are shared by all the futex_keys that hash to the same
131  * location.  Each key may have multiple futex_q structures, one for each task
132  * waiting on a futex.
133  */
134 struct futex_hash_bucket {
135 	atomic_t waiters;
136 	spinlock_t lock;
137 	struct plist_head chain;
138 	struct futex_private_hash *priv;
139 } ____cacheline_aligned_in_smp;
140 
141 /*
142  * Priority Inheritance state:
143  */
144 struct futex_pi_state {
145 	/*
146 	 * list of 'owned' pi_state instances - these have to be
147 	 * cleaned up in do_exit() if the task exits prematurely:
148 	 */
149 	struct list_head list;
150 
151 	/*
152 	 * The PI object:
153 	 */
154 	struct rt_mutex_base pi_mutex;
155 
156 	struct task_struct *owner;
157 	refcount_t refcount;
158 
159 	union futex_key key;
160 } __randomize_layout;
161 
162 struct futex_q;
163 typedef void (futex_wake_fn)(struct wake_q_head *wake_q, struct futex_q *q);
164 
165 /**
166  * struct futex_q - The hashed futex queue entry, one per waiting task
167  * @list:		priority-sorted list of tasks waiting on this futex
168  * @task:		the task waiting on the futex
169  * @lock_ptr:		the hash bucket lock
170  * @wake:		the wake handler for this queue
171  * @wake_data:		data associated with the wake handler
172  * @key:		the key the futex is hashed on
173  * @pi_state:		optional priority inheritance state
174  * @rt_waiter:		rt_waiter storage for use with requeue_pi
175  * @requeue_pi_key:	the requeue_pi target futex key
176  * @bitset:		bitset for the optional bitmasked wakeup
177  * @requeue_state:	State field for futex_requeue_pi()
178  * @requeue_wait:	RCU wait for futex_requeue_pi() (RT only)
179  *
180  * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
181  * we can wake only the relevant ones (hashed queues may be shared).
182  *
183  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
184  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
185  * The order of wakeup is always to make the first condition true, then
186  * the second.
187  *
188  * PI futexes are typically woken before they are removed from the hash list via
189  * the rt_mutex code. See futex_unqueue_pi().
190  */
191 struct futex_q {
192 	struct plist_node list;
193 
194 	struct task_struct *task;
195 	spinlock_t *lock_ptr;
196 	futex_wake_fn *wake;
197 	void *wake_data;
198 	union futex_key key;
199 	struct futex_pi_state *pi_state;
200 	struct rt_mutex_waiter *rt_waiter;
201 	union futex_key *requeue_pi_key;
202 	u32 bitset;
203 	atomic_t requeue_state;
204 	bool drop_hb_ref;
205 #ifdef CONFIG_PREEMPT_RT
206 	struct rcuwait requeue_wait;
207 #endif
208 } __randomize_layout;
209 
210 extern const struct futex_q futex_q_init;
211 
212 enum futex_access {
213 	FUTEX_READ,
214 	FUTEX_WRITE
215 };
216 
217 extern int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
218 			 enum futex_access rw);
219 extern void futex_q_lockptr_lock(struct futex_q *q);
220 extern struct hrtimer_sleeper *
221 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
222 		  int flags, u64 range_ns);
223 
224 extern struct futex_hash_bucket *futex_hash(union futex_key *key);
225 #ifdef CONFIG_FUTEX_PRIVATE_HASH
226 extern void futex_hash_get(struct futex_hash_bucket *hb);
227 extern void futex_hash_put(struct futex_hash_bucket *hb);
228 
229 extern struct futex_private_hash *futex_private_hash(void);
230 extern bool futex_private_hash_get(struct futex_private_hash *fph);
231 extern void futex_private_hash_put(struct futex_private_hash *fph);
232 
233 #else /* !CONFIG_FUTEX_PRIVATE_HASH */
234 static inline void futex_hash_get(struct futex_hash_bucket *hb) { }
235 static inline void futex_hash_put(struct futex_hash_bucket *hb) { }
236 static inline struct futex_private_hash *futex_private_hash(void) { return NULL; }
237 static inline bool futex_private_hash_get(void) { return false; }
238 static inline void futex_private_hash_put(struct futex_private_hash *fph) { }
239 #endif
240 
241 DEFINE_CLASS(hb, struct futex_hash_bucket *,
242 	     if (_T) futex_hash_put(_T),
243 	     futex_hash(key), union futex_key *key);
244 
245 DEFINE_CLASS(private_hash, struct futex_private_hash *,
246 	     if (_T) futex_private_hash_put(_T),
247 	     futex_private_hash(), void);
248 
249 /**
250  * futex_match - Check whether two futex keys are equal
251  * @key1:	Pointer to key1
252  * @key2:	Pointer to key2
253  *
254  * Return 1 if two futex_keys are equal, 0 otherwise.
255  */
256 static inline int futex_match(union futex_key *key1, union futex_key *key2)
257 {
258 	return (key1 && key2
259 		&& key1->both.word == key2->both.word
260 		&& key1->both.ptr == key2->both.ptr
261 		&& key1->both.offset == key2->both.offset);
262 }
263 
264 extern int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
265 			    struct futex_q *q, union futex_key *key2,
266 			    struct task_struct *task);
267 extern void futex_do_wait(struct futex_q *q, struct hrtimer_sleeper *timeout);
268 extern bool __futex_wake_mark(struct futex_q *q);
269 extern void futex_wake_mark(struct wake_q_head *wake_q, struct futex_q *q);
270 
271 extern int fault_in_user_writeable(u32 __user *uaddr);
272 extern struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key);
273 
274 static inline int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval)
275 {
276 	int ret;
277 
278 	pagefault_disable();
279 	ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
280 	pagefault_enable();
281 
282 	return ret;
283 }
284 
285 /*
286  * This does a plain atomic user space read, and the user pointer has
287  * already been verified earlier by get_futex_key() to be both aligned
288  * and actually in user space, just like futex_atomic_cmpxchg_inatomic().
289  *
290  * We still want to avoid any speculation, and while __get_user() is
291  * the traditional model for this, it's actually slower than doing
292  * this manually these days.
293  *
294  * We could just have a per-architecture special function for it,
295  * the same way we do futex_atomic_cmpxchg_inatomic(), but rather
296  * than force everybody to do that, write it out long-hand using
297  * the low-level user-access infrastructure.
298  *
299  * This looks a bit overkill, but generally just results in a couple
300  * of instructions.
301  */
302 static __always_inline int futex_get_value(u32 *dest, u32 __user *from)
303 {
304 	u32 val;
305 
306 	if (can_do_masked_user_access())
307 		from = masked_user_access_begin(from);
308 	else if (!user_read_access_begin(from, sizeof(*from)))
309 		return -EFAULT;
310 	unsafe_get_user(val, from, Efault);
311 	user_read_access_end();
312 	*dest = val;
313 	return 0;
314 Efault:
315 	user_read_access_end();
316 	return -EFAULT;
317 }
318 
319 static __always_inline int futex_put_value(u32 val, u32 __user *to)
320 {
321 	if (can_do_masked_user_access())
322 		to = masked_user_access_begin(to);
323 	else if (!user_read_access_begin(to, sizeof(*to)))
324 		return -EFAULT;
325 	unsafe_put_user(val, to, Efault);
326 	user_read_access_end();
327 	return 0;
328 Efault:
329 	user_read_access_end();
330 	return -EFAULT;
331 }
332 
333 static inline int futex_get_value_locked(u32 *dest, u32 __user *from)
334 {
335 	int ret;
336 
337 	pagefault_disable();
338 	ret = futex_get_value(dest, from);
339 	pagefault_enable();
340 
341 	return ret;
342 }
343 
344 extern void __futex_unqueue(struct futex_q *q);
345 extern void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
346 				struct task_struct *task);
347 extern int futex_unqueue(struct futex_q *q);
348 
349 /**
350  * futex_queue() - Enqueue the futex_q on the futex_hash_bucket
351  * @q:	The futex_q to enqueue
352  * @hb:	The destination hash bucket
353  * @task: Task queueing this futex
354  *
355  * The hb->lock must be held by the caller, and is released here. A call to
356  * futex_queue() is typically paired with exactly one call to futex_unqueue().  The
357  * exceptions involve the PI related operations, which may use futex_unqueue_pi()
358  * or nothing if the unqueue is done as part of the wake process and the unqueue
359  * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
360  * an example).
361  *
362  * Note that @task may be NULL, for async usage of futexes.
363  */
364 static inline void futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
365 			       struct task_struct *task)
366 	__releases(&hb->lock)
367 {
368 	__futex_queue(q, hb, task);
369 	spin_unlock(&hb->lock);
370 }
371 
372 extern void futex_unqueue_pi(struct futex_q *q);
373 
374 extern void wait_for_owner_exiting(int ret, struct task_struct *exiting);
375 
376 /*
377  * Reflects a new waiter being added to the waitqueue.
378  */
379 static inline void futex_hb_waiters_inc(struct futex_hash_bucket *hb)
380 {
381 #ifdef CONFIG_SMP
382 	atomic_inc(&hb->waiters);
383 	/*
384 	 * Full barrier (A), see the ordering comment above.
385 	 */
386 	smp_mb__after_atomic();
387 #endif
388 }
389 
390 /*
391  * Reflects a waiter being removed from the waitqueue by wakeup
392  * paths.
393  */
394 static inline void futex_hb_waiters_dec(struct futex_hash_bucket *hb)
395 {
396 #ifdef CONFIG_SMP
397 	atomic_dec(&hb->waiters);
398 #endif
399 }
400 
401 static inline int futex_hb_waiters_pending(struct futex_hash_bucket *hb)
402 {
403 #ifdef CONFIG_SMP
404 	/*
405 	 * Full barrier (B), see the ordering comment above.
406 	 */
407 	smp_mb();
408 	return atomic_read(&hb->waiters);
409 #else
410 	return 1;
411 #endif
412 }
413 
414 extern void futex_q_lock(struct futex_q *q, struct futex_hash_bucket *hb);
415 extern void futex_q_unlock(struct futex_hash_bucket *hb);
416 
417 
418 extern int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
419 				union futex_key *key,
420 				struct futex_pi_state **ps,
421 				struct task_struct *task,
422 				struct task_struct **exiting,
423 				int set_waiters);
424 
425 extern int refill_pi_state_cache(void);
426 extern void get_pi_state(struct futex_pi_state *pi_state);
427 extern void put_pi_state(struct futex_pi_state *pi_state);
428 extern int fixup_pi_owner(u32 __user *uaddr, struct futex_q *q, int locked);
429 
430 /*
431  * Express the locking dependencies for lockdep:
432  */
433 static inline void
434 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
435 {
436 	if (hb1 > hb2)
437 		swap(hb1, hb2);
438 
439 	spin_lock(&hb1->lock);
440 	if (hb1 != hb2)
441 		spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
442 }
443 
444 static inline void
445 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
446 {
447 	spin_unlock(&hb1->lock);
448 	if (hb1 != hb2)
449 		spin_unlock(&hb2->lock);
450 }
451 
452 /* syscalls */
453 
454 extern int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, u32
455 				 val, ktime_t *abs_time, u32 bitset, u32 __user
456 				 *uaddr2);
457 
458 extern int futex_requeue(u32 __user *uaddr1, unsigned int flags1,
459 			 u32 __user *uaddr2, unsigned int flags2,
460 			 int nr_wake, int nr_requeue,
461 			 u32 *cmpval, int requeue_pi);
462 
463 extern int __futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
464 			struct hrtimer_sleeper *to, u32 bitset);
465 
466 extern int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
467 		      ktime_t *abs_time, u32 bitset);
468 
469 /**
470  * struct futex_vector - Auxiliary struct for futex_waitv()
471  * @w: Userspace provided data
472  * @q: Kernel side data
473  *
474  * Struct used to build an array with all data need for futex_waitv()
475  */
476 struct futex_vector {
477 	struct futex_waitv w;
478 	struct futex_q q;
479 };
480 
481 extern int futex_parse_waitv(struct futex_vector *futexv,
482 			     struct futex_waitv __user *uwaitv,
483 			     unsigned int nr_futexes, futex_wake_fn *wake,
484 			     void *wake_data);
485 
486 extern int futex_wait_multiple_setup(struct futex_vector *vs, int count,
487 				     int *woken);
488 
489 extern int futex_unqueue_multiple(struct futex_vector *v, int count);
490 
491 extern int futex_wait_multiple(struct futex_vector *vs, unsigned int count,
492 			       struct hrtimer_sleeper *to);
493 
494 extern int futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset);
495 
496 extern int futex_wake_op(u32 __user *uaddr1, unsigned int flags,
497 			 u32 __user *uaddr2, int nr_wake, int nr_wake2, int op);
498 
499 extern int futex_unlock_pi(u32 __user *uaddr, unsigned int flags);
500 
501 extern int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int trylock);
502 
503 #endif /* _FUTEX_H */
504