xref: /linux/kernel/futex/syscalls.c (revision 59fff63cc2b75dcfe08f9eeb4b2187d73e53843d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <linux/syscalls.h>
4 #include <linux/time_namespace.h>
5 
6 #include "futex.h"
7 
8 /*
9  * Support for robust futexes: the kernel cleans up held futexes at
10  * thread exit time.
11  *
12  * Implementation: user-space maintains a per-thread list of locks it
13  * is holding. Upon do_exit(), the kernel carefully walks this list,
14  * and marks all locks that are owned by this thread with the
15  * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
16  * always manipulated with the lock held, so the list is private and
17  * per-thread. Userspace also maintains a per-thread 'list_op_pending'
18  * field, to allow the kernel to clean up if the thread dies after
19  * acquiring the lock, but just before it could have added itself to
20  * the list. There can only be one such pending lock.
21  */
22 
23 /**
24  * sys_set_robust_list() - Set the robust-futex list head of a task
25  * @head:	pointer to the list-head
26  * @len:	length of the list-head, as userspace expects
27  */
28 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
29 		size_t, len)
30 {
31 	/*
32 	 * The kernel knows only one size for now:
33 	 */
34 	if (unlikely(len != sizeof(*head)))
35 		return -EINVAL;
36 
37 	current->robust_list = head;
38 
39 	return 0;
40 }
41 
42 /**
43  * sys_get_robust_list() - Get the robust-futex list head of a task
44  * @pid:	pid of the process [zero for current task]
45  * @head_ptr:	pointer to a list-head pointer, the kernel fills it in
46  * @len_ptr:	pointer to a length field, the kernel fills in the header size
47  */
48 SYSCALL_DEFINE3(get_robust_list, int, pid,
49 		struct robust_list_head __user * __user *, head_ptr,
50 		size_t __user *, len_ptr)
51 {
52 	struct robust_list_head __user *head;
53 	unsigned long ret;
54 	struct task_struct *p;
55 
56 	rcu_read_lock();
57 
58 	ret = -ESRCH;
59 	if (!pid)
60 		p = current;
61 	else {
62 		p = find_task_by_vpid(pid);
63 		if (!p)
64 			goto err_unlock;
65 	}
66 
67 	ret = -EPERM;
68 	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
69 		goto err_unlock;
70 
71 	head = p->robust_list;
72 	rcu_read_unlock();
73 
74 	if (put_user(sizeof(*head), len_ptr))
75 		return -EFAULT;
76 	return put_user(head, head_ptr);
77 
78 err_unlock:
79 	rcu_read_unlock();
80 
81 	return ret;
82 }
83 
84 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
85 		u32 __user *uaddr2, u32 val2, u32 val3)
86 {
87 	unsigned int flags = futex_to_flags(op);
88 	int cmd = op & FUTEX_CMD_MASK;
89 
90 	if (flags & FLAGS_CLOCKRT) {
91 		if (cmd != FUTEX_WAIT_BITSET &&
92 		    cmd != FUTEX_WAIT_REQUEUE_PI &&
93 		    cmd != FUTEX_LOCK_PI2)
94 			return -ENOSYS;
95 	}
96 
97 	switch (cmd) {
98 	case FUTEX_WAIT:
99 		val3 = FUTEX_BITSET_MATCH_ANY;
100 		fallthrough;
101 	case FUTEX_WAIT_BITSET:
102 		return futex_wait(uaddr, flags, val, timeout, val3);
103 	case FUTEX_WAKE:
104 		val3 = FUTEX_BITSET_MATCH_ANY;
105 		fallthrough;
106 	case FUTEX_WAKE_BITSET:
107 		return futex_wake(uaddr, flags, val, val3);
108 	case FUTEX_REQUEUE:
109 		return futex_requeue(uaddr, flags, uaddr2, flags, val, val2, NULL, 0);
110 	case FUTEX_CMP_REQUEUE:
111 		return futex_requeue(uaddr, flags, uaddr2, flags, val, val2, &val3, 0);
112 	case FUTEX_WAKE_OP:
113 		return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
114 	case FUTEX_LOCK_PI:
115 		flags |= FLAGS_CLOCKRT;
116 		fallthrough;
117 	case FUTEX_LOCK_PI2:
118 		return futex_lock_pi(uaddr, flags, timeout, 0);
119 	case FUTEX_UNLOCK_PI:
120 		return futex_unlock_pi(uaddr, flags);
121 	case FUTEX_TRYLOCK_PI:
122 		return futex_lock_pi(uaddr, flags, NULL, 1);
123 	case FUTEX_WAIT_REQUEUE_PI:
124 		val3 = FUTEX_BITSET_MATCH_ANY;
125 		return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
126 					     uaddr2);
127 	case FUTEX_CMP_REQUEUE_PI:
128 		return futex_requeue(uaddr, flags, uaddr2, flags, val, val2, &val3, 1);
129 	}
130 	return -ENOSYS;
131 }
132 
133 static __always_inline bool futex_cmd_has_timeout(u32 cmd)
134 {
135 	switch (cmd) {
136 	case FUTEX_WAIT:
137 	case FUTEX_LOCK_PI:
138 	case FUTEX_LOCK_PI2:
139 	case FUTEX_WAIT_BITSET:
140 	case FUTEX_WAIT_REQUEUE_PI:
141 		return true;
142 	}
143 	return false;
144 }
145 
146 static __always_inline int
147 futex_init_timeout(u32 cmd, u32 op, struct timespec64 *ts, ktime_t *t)
148 {
149 	if (!timespec64_valid(ts))
150 		return -EINVAL;
151 
152 	*t = timespec64_to_ktime(*ts);
153 	if (cmd == FUTEX_WAIT)
154 		*t = ktime_add_safe(ktime_get(), *t);
155 	else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
156 		*t = timens_ktime_to_host(CLOCK_MONOTONIC, *t);
157 	return 0;
158 }
159 
160 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
161 		const struct __kernel_timespec __user *, utime,
162 		u32 __user *, uaddr2, u32, val3)
163 {
164 	int ret, cmd = op & FUTEX_CMD_MASK;
165 	ktime_t t, *tp = NULL;
166 	struct timespec64 ts;
167 
168 	if (utime && futex_cmd_has_timeout(cmd)) {
169 		if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
170 			return -EFAULT;
171 		if (get_timespec64(&ts, utime))
172 			return -EFAULT;
173 		ret = futex_init_timeout(cmd, op, &ts, &t);
174 		if (ret)
175 			return ret;
176 		tp = &t;
177 	}
178 
179 	return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3);
180 }
181 
182 #define FUTEX2_VALID_MASK (FUTEX2_SIZE_MASK | FUTEX2_PRIVATE)
183 
184 /**
185  * futex_parse_waitv - Parse a waitv array from userspace
186  * @futexv:	Kernel side list of waiters to be filled
187  * @uwaitv:     Userspace list to be parsed
188  * @nr_futexes: Length of futexv
189  *
190  * Return: Error code on failure, 0 on success
191  */
192 static int futex_parse_waitv(struct futex_vector *futexv,
193 			     struct futex_waitv __user *uwaitv,
194 			     unsigned int nr_futexes)
195 {
196 	struct futex_waitv aux;
197 	unsigned int i;
198 
199 	for (i = 0; i < nr_futexes; i++) {
200 		unsigned int flags;
201 
202 		if (copy_from_user(&aux, &uwaitv[i], sizeof(aux)))
203 			return -EFAULT;
204 
205 		if ((aux.flags & ~FUTEX2_VALID_MASK) || aux.__reserved)
206 			return -EINVAL;
207 
208 		flags = futex2_to_flags(aux.flags);
209 		if (!futex_flags_valid(flags))
210 			return -EINVAL;
211 
212 		if (!futex_validate_input(flags, aux.val))
213 			return -EINVAL;
214 
215 		futexv[i].w.flags = flags;
216 		futexv[i].w.val = aux.val;
217 		futexv[i].w.uaddr = aux.uaddr;
218 		futexv[i].q = futex_q_init;
219 	}
220 
221 	return 0;
222 }
223 
224 static int futex2_setup_timeout(struct __kernel_timespec __user *timeout,
225 				clockid_t clockid, struct hrtimer_sleeper *to)
226 {
227 	int flag_clkid = 0, flag_init = 0;
228 	struct timespec64 ts;
229 	ktime_t time;
230 	int ret;
231 
232 	if (!timeout)
233 		return 0;
234 
235 	if (clockid == CLOCK_REALTIME) {
236 		flag_clkid = FLAGS_CLOCKRT;
237 		flag_init = FUTEX_CLOCK_REALTIME;
238 	}
239 
240 	if (clockid != CLOCK_REALTIME && clockid != CLOCK_MONOTONIC)
241 		return -EINVAL;
242 
243 	if (get_timespec64(&ts, timeout))
244 		return -EFAULT;
245 
246 	/*
247 	 * Since there's no opcode for futex_waitv, use
248 	 * FUTEX_WAIT_BITSET that uses absolute timeout as well
249 	 */
250 	ret = futex_init_timeout(FUTEX_WAIT_BITSET, flag_init, &ts, &time);
251 	if (ret)
252 		return ret;
253 
254 	futex_setup_timer(&time, to, flag_clkid, 0);
255 	return 0;
256 }
257 
258 static inline void futex2_destroy_timeout(struct hrtimer_sleeper *to)
259 {
260 	hrtimer_cancel(&to->timer);
261 	destroy_hrtimer_on_stack(&to->timer);
262 }
263 
264 /**
265  * sys_futex_waitv - Wait on a list of futexes
266  * @waiters:    List of futexes to wait on
267  * @nr_futexes: Length of futexv
268  * @flags:      Flag for timeout (monotonic/realtime)
269  * @timeout:	Optional absolute timeout.
270  * @clockid:	Clock to be used for the timeout, realtime or monotonic.
271  *
272  * Given an array of `struct futex_waitv`, wait on each uaddr. The thread wakes
273  * if a futex_wake() is performed at any uaddr. The syscall returns immediately
274  * if any waiter has *uaddr != val. *timeout is an optional timeout value for
275  * the operation. Each waiter has individual flags. The `flags` argument for
276  * the syscall should be used solely for specifying the timeout as realtime, if
277  * needed. Flags for private futexes, sizes, etc. should be used on the
278  * individual flags of each waiter.
279  *
280  * Returns the array index of one of the woken futexes. No further information
281  * is provided: any number of other futexes may also have been woken by the
282  * same event, and if more than one futex was woken, the retrned index may
283  * refer to any one of them. (It is not necessaryily the futex with the
284  * smallest index, nor the one most recently woken, nor...)
285  */
286 
287 SYSCALL_DEFINE5(futex_waitv, struct futex_waitv __user *, waiters,
288 		unsigned int, nr_futexes, unsigned int, flags,
289 		struct __kernel_timespec __user *, timeout, clockid_t, clockid)
290 {
291 	struct hrtimer_sleeper to;
292 	struct futex_vector *futexv;
293 	int ret;
294 
295 	/* This syscall supports no flags for now */
296 	if (flags)
297 		return -EINVAL;
298 
299 	if (!nr_futexes || nr_futexes > FUTEX_WAITV_MAX || !waiters)
300 		return -EINVAL;
301 
302 	if (timeout && (ret = futex2_setup_timeout(timeout, clockid, &to)))
303 		return ret;
304 
305 	futexv = kcalloc(nr_futexes, sizeof(*futexv), GFP_KERNEL);
306 	if (!futexv) {
307 		ret = -ENOMEM;
308 		goto destroy_timer;
309 	}
310 
311 	ret = futex_parse_waitv(futexv, waiters, nr_futexes);
312 	if (!ret)
313 		ret = futex_wait_multiple(futexv, nr_futexes, timeout ? &to : NULL);
314 
315 	kfree(futexv);
316 
317 destroy_timer:
318 	if (timeout)
319 		futex2_destroy_timeout(&to);
320 	return ret;
321 }
322 
323 /*
324  * sys_futex_wake - Wake a number of futexes
325  * @uaddr:	Address of the futex(es) to wake
326  * @mask:	bitmask
327  * @nr:		Number of the futexes to wake
328  * @flags:	FUTEX2 flags
329  *
330  * Identical to the traditional FUTEX_WAKE_BITSET op, except it is part of the
331  * futex2 family of calls.
332  */
333 
334 SYSCALL_DEFINE4(futex_wake,
335 		void __user *, uaddr,
336 		unsigned long, mask,
337 		int, nr,
338 		unsigned int, flags)
339 {
340 	if (flags & ~FUTEX2_VALID_MASK)
341 		return -EINVAL;
342 
343 	flags = futex2_to_flags(flags);
344 	if (!futex_flags_valid(flags))
345 		return -EINVAL;
346 
347 	if (!futex_validate_input(flags, mask))
348 		return -EINVAL;
349 
350 	return futex_wake(uaddr, FLAGS_STRICT | flags, nr, mask);
351 }
352 
353 /*
354  * sys_futex_wait - Wait on a futex
355  * @uaddr:	Address of the futex to wait on
356  * @val:	Value of @uaddr
357  * @mask:	bitmask
358  * @flags:	FUTEX2 flags
359  * @timeout:	Optional absolute timeout
360  * @clockid:	Clock to be used for the timeout, realtime or monotonic
361  *
362  * Identical to the traditional FUTEX_WAIT_BITSET op, except it is part of the
363  * futex2 familiy of calls.
364  */
365 
366 SYSCALL_DEFINE6(futex_wait,
367 		void __user *, uaddr,
368 		unsigned long, val,
369 		unsigned long, mask,
370 		unsigned int, flags,
371 		struct __kernel_timespec __user *, timeout,
372 		clockid_t, clockid)
373 {
374 	struct hrtimer_sleeper to;
375 	int ret;
376 
377 	if (flags & ~FUTEX2_VALID_MASK)
378 		return -EINVAL;
379 
380 	flags = futex2_to_flags(flags);
381 	if (!futex_flags_valid(flags))
382 		return -EINVAL;
383 
384 	if (!futex_validate_input(flags, val) ||
385 	    !futex_validate_input(flags, mask))
386 		return -EINVAL;
387 
388 	if (timeout && (ret = futex2_setup_timeout(timeout, clockid, &to)))
389 		return ret;
390 
391 	ret = __futex_wait(uaddr, flags, val, timeout ? &to : NULL, mask);
392 
393 	if (timeout)
394 		futex2_destroy_timeout(&to);
395 
396 	return ret;
397 }
398 
399 /*
400  * sys_futex_requeue - Requeue a waiter from one futex to another
401  * @waiters:	array describing the source and destination futex
402  * @flags:	unused
403  * @nr_wake:	number of futexes to wake
404  * @nr_requeue:	number of futexes to requeue
405  *
406  * Identical to the traditional FUTEX_CMP_REQUEUE op, except it is part of the
407  * futex2 family of calls.
408  */
409 
410 SYSCALL_DEFINE4(futex_requeue,
411 		struct futex_waitv __user *, waiters,
412 		unsigned int, flags,
413 		int, nr_wake,
414 		int, nr_requeue)
415 {
416 	struct futex_vector futexes[2];
417 	u32 cmpval;
418 	int ret;
419 
420 	if (flags)
421 		return -EINVAL;
422 
423 	if (!waiters)
424 		return -EINVAL;
425 
426 	ret = futex_parse_waitv(futexes, waiters, 2);
427 	if (ret)
428 		return ret;
429 
430 	cmpval = futexes[0].w.val;
431 
432 	return futex_requeue(u64_to_user_ptr(futexes[0].w.uaddr), futexes[0].w.flags,
433 			     u64_to_user_ptr(futexes[1].w.uaddr), futexes[1].w.flags,
434 			     nr_wake, nr_requeue, &cmpval, 0);
435 }
436 
437 #ifdef CONFIG_COMPAT
438 COMPAT_SYSCALL_DEFINE2(set_robust_list,
439 		struct compat_robust_list_head __user *, head,
440 		compat_size_t, len)
441 {
442 	if (unlikely(len != sizeof(*head)))
443 		return -EINVAL;
444 
445 	current->compat_robust_list = head;
446 
447 	return 0;
448 }
449 
450 COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
451 			compat_uptr_t __user *, head_ptr,
452 			compat_size_t __user *, len_ptr)
453 {
454 	struct compat_robust_list_head __user *head;
455 	unsigned long ret;
456 	struct task_struct *p;
457 
458 	rcu_read_lock();
459 
460 	ret = -ESRCH;
461 	if (!pid)
462 		p = current;
463 	else {
464 		p = find_task_by_vpid(pid);
465 		if (!p)
466 			goto err_unlock;
467 	}
468 
469 	ret = -EPERM;
470 	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
471 		goto err_unlock;
472 
473 	head = p->compat_robust_list;
474 	rcu_read_unlock();
475 
476 	if (put_user(sizeof(*head), len_ptr))
477 		return -EFAULT;
478 	return put_user(ptr_to_compat(head), head_ptr);
479 
480 err_unlock:
481 	rcu_read_unlock();
482 
483 	return ret;
484 }
485 #endif /* CONFIG_COMPAT */
486 
487 #ifdef CONFIG_COMPAT_32BIT_TIME
488 SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
489 		const struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
490 		u32, val3)
491 {
492 	int ret, cmd = op & FUTEX_CMD_MASK;
493 	ktime_t t, *tp = NULL;
494 	struct timespec64 ts;
495 
496 	if (utime && futex_cmd_has_timeout(cmd)) {
497 		if (get_old_timespec32(&ts, utime))
498 			return -EFAULT;
499 		ret = futex_init_timeout(cmd, op, &ts, &t);
500 		if (ret)
501 			return ret;
502 		tp = &t;
503 	}
504 
505 	return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3);
506 }
507 #endif /* CONFIG_COMPAT_32BIT_TIME */
508 
509