xref: /freebsd/sys/compat/linux/linux_futex.c (revision 9bd497b8354567454e075076d40c996e21bd6095)
1 /*	$NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $ */
2 
3 /*-
4  * Copyright (c) 2005 Emmanuel Dreyfus, all rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Emmanuel Dreyfus
17  * 4. The name of the author may not be used to endorse or promote
18  *    products derived from this software without specific prior written
19  *    permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE THE AUTHOR AND CONTRIBUTORS ``AS IS''
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 #if 0
37 __KERNEL_RCSID(1, "$NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $");
38 #endif
39 
40 #include "opt_compat.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/imgact.h>
45 #include <sys/kernel.h>
46 #include <sys/ktr.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/priv.h>
51 #include <sys/proc.h>
52 #include <sys/queue.h>
53 #include <sys/sched.h>
54 #include <sys/sx.h>
55 
56 #ifdef COMPAT_LINUX32
57 #include <machine/../linux32/linux.h>
58 #include <machine/../linux32/linux32_proto.h>
59 #else
60 #include <machine/../linux/linux.h>
61 #include <machine/../linux/linux_proto.h>
62 #endif
63 #include <compat/linux/linux_futex.h>
64 #include <compat/linux/linux_emul.h>
65 #include <compat/linux/linux_util.h>
66 
67 MALLOC_DEFINE(M_FUTEX, "futex", "Linux futexes");
68 MALLOC_DEFINE(M_FUTEX_WP, "futex wp", "Linux futexes wp");
69 
70 struct futex;
71 
72 struct waiting_proc {
73 	uint32_t	wp_flags;
74 	struct futex	*wp_futex;
75 	TAILQ_ENTRY(waiting_proc) wp_list;
76 };
77 
78 struct futex {
79 	struct sx	f_lck;
80 	uint32_t	*f_uaddr;
81 	uint32_t	f_refcount;
82 	LIST_ENTRY(futex) f_list;
83 	TAILQ_HEAD(lf_waiting_proc, waiting_proc) f_waiting_proc;
84 };
85 
86 struct futex_list futex_list;
87 
88 #define FUTEX_LOCK(f)		sx_xlock(&(f)->f_lck)
89 #define FUTEX_UNLOCK(f)		sx_xunlock(&(f)->f_lck)
90 #define FUTEX_INIT(f)		sx_init_flags(&(f)->f_lck, "ftlk", 0)
91 #define FUTEX_DESTROY(f)	sx_destroy(&(f)->f_lck)
92 #define FUTEX_ASSERT_LOCKED(f)	sx_assert(&(f)->f_lck, SA_XLOCKED)
93 
94 struct mtx futex_mtx;			/* protects the futex list */
95 #define FUTEXES_LOCK		mtx_lock(&futex_mtx)
96 #define FUTEXES_UNLOCK		mtx_unlock(&futex_mtx)
97 
98 /* flags for futex_get() */
99 #define FUTEX_CREATE_WP		0x1	/* create waiting_proc */
100 #define FUTEX_DONTCREATE	0x2	/* don't create futex if not exists */
101 #define FUTEX_DONTEXISTS	0x4	/* return EINVAL if futex exists */
102 
103 /* wp_flags */
104 #define FUTEX_WP_REQUEUED	0x1	/* wp requeued - wp moved from wp_list
105 					 * of futex where thread sleep to wp_list
106 					 * of another futex.
107 					 */
108 #define FUTEX_WP_REMOVED	0x2	/* wp is woken up and removed from futex
109 					 * wp_list to prevent double wakeup.
110 					 */
111 
112 /* support.s */
113 int futex_xchgl(int oparg, uint32_t *uaddr, int *oldval);
114 int futex_addl(int oparg, uint32_t *uaddr, int *oldval);
115 int futex_orl(int oparg, uint32_t *uaddr, int *oldval);
116 int futex_andl(int oparg, uint32_t *uaddr, int *oldval);
117 int futex_xorl(int oparg, uint32_t *uaddr, int *oldval);
118 
119 static void
120 futex_put(struct futex *f, struct waiting_proc *wp)
121 {
122 
123 	FUTEX_ASSERT_LOCKED(f);
124 	if (wp != NULL) {
125 		if ((wp->wp_flags & FUTEX_WP_REMOVED) == 0)
126 			TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
127 		free(wp, M_FUTEX_WP);
128 	}
129 
130 	FUTEXES_LOCK;
131 	if (--f->f_refcount == 0) {
132 		LIST_REMOVE(f, f_list);
133 		FUTEXES_UNLOCK;
134 		FUTEX_UNLOCK(f);
135 
136 		LINUX_CTR2(sys_futex, "futex_put destroy uaddr %p ref %d",
137 		    f->f_uaddr, f->f_refcount);
138 		FUTEX_DESTROY(f);
139 		free(f, M_FUTEX);
140 		return;
141 	}
142 
143 	LINUX_CTR2(sys_futex, "futex_put uaddr %p ref %d",
144 	    f->f_uaddr, f->f_refcount);
145 	FUTEXES_UNLOCK;
146 	FUTEX_UNLOCK(f);
147 }
148 
149 static int
150 futex_get0(uint32_t *uaddr, struct futex **newf, uint32_t flags)
151 {
152 	struct futex *f, *tmpf;
153 
154 	*newf = tmpf = NULL;
155 
156 retry:
157 	FUTEXES_LOCK;
158 	LIST_FOREACH(f, &futex_list, f_list) {
159 		if (f->f_uaddr == uaddr) {
160 			if (tmpf != NULL) {
161 				FUTEX_UNLOCK(tmpf);
162 				FUTEX_DESTROY(tmpf);
163 				free(tmpf, M_FUTEX);
164 			}
165 			if (flags & FUTEX_DONTEXISTS) {
166 				FUTEXES_UNLOCK;
167 				return (EINVAL);
168 			}
169 
170 			/*
171 			 * Increment refcount of the found futex to
172 			 * prevent it from deallocation before FUTEX_LOCK()
173 			 */
174 			++f->f_refcount;
175 			FUTEXES_UNLOCK;
176 
177 			FUTEX_LOCK(f);
178 			*newf = f;
179 			LINUX_CTR2(sys_futex, "futex_get uaddr %p ref %d",
180 			    uaddr, f->f_refcount);
181 			return (0);
182 		}
183 	}
184 
185 	if (flags & FUTEX_DONTCREATE) {
186 		FUTEXES_UNLOCK;
187 		LINUX_CTR1(sys_futex, "futex_get uaddr %p null", uaddr);
188 		return (0);
189 	}
190 
191 	if (tmpf == NULL) {
192 		FUTEXES_UNLOCK;
193 		tmpf = malloc(sizeof(*tmpf), M_FUTEX, M_WAITOK | M_ZERO);
194 		tmpf->f_uaddr = uaddr;
195 		tmpf->f_refcount = 1;
196 		FUTEX_INIT(tmpf);
197 		TAILQ_INIT(&tmpf->f_waiting_proc);
198 
199 		/*
200 		 * Lock the new futex before an insert into the futex_list
201 		 * to prevent futex usage by other.
202 		 */
203 		FUTEX_LOCK(tmpf);
204 		goto retry;
205 	}
206 
207 	LIST_INSERT_HEAD(&futex_list, tmpf, f_list);
208 	FUTEXES_UNLOCK;
209 
210 	LINUX_CTR2(sys_futex, "futex_get uaddr %p ref %d new",
211 	    uaddr, tmpf->f_refcount);
212 	*newf = tmpf;
213 	return (0);
214 }
215 
216 static int
217 futex_get(uint32_t *uaddr, struct waiting_proc **wp, struct futex **f,
218     uint32_t flags)
219 {
220 	int error;
221 
222 	if (flags & FUTEX_CREATE_WP) {
223 		*wp = malloc(sizeof(struct waiting_proc), M_FUTEX_WP, M_WAITOK);
224 		(*wp)->wp_flags = 0;
225 	}
226 	error = futex_get0(uaddr, f, flags);
227 	if (error) {
228 		if (flags & FUTEX_CREATE_WP)
229 			free(*wp, M_FUTEX_WP);
230 		return (error);
231 	}
232 	if (flags & FUTEX_CREATE_WP) {
233 		TAILQ_INSERT_HEAD(&(*f)->f_waiting_proc, *wp, wp_list);
234 		(*wp)->wp_futex = *f;
235 	}
236 
237 	return (error);
238 }
239 
240 static int
241 futex_sleep(struct futex *f, struct waiting_proc *wp, unsigned long timeout)
242 {
243 	int error;
244 
245 	FUTEX_ASSERT_LOCKED(f);
246 	LINUX_CTR4(sys_futex, "futex_sleep enter uaddr %p wp %p timo %ld ref %d",
247 	    f->f_uaddr, wp, timeout, f->f_refcount);
248 	error = sx_sleep(wp, &f->f_lck, PCATCH, "futex", timeout);
249 	if (wp->wp_flags & FUTEX_WP_REQUEUED) {
250 		KASSERT(f != wp->wp_futex, ("futex != wp_futex"));
251 		LINUX_CTR5(sys_futex, "futex_sleep out error %d uaddr %p w"
252 		    " %p requeued uaddr %p ref %d",
253 		    error, f->f_uaddr, wp, wp->wp_futex->f_uaddr,
254 		    wp->wp_futex->f_refcount);
255 		futex_put(f, NULL);
256 		f = wp->wp_futex;
257 		FUTEX_LOCK(f);
258 	} else
259 		LINUX_CTR3(sys_futex, "futex_sleep out error %d uaddr %p wp %p",
260 		    error, f->f_uaddr, wp);
261 
262 	futex_put(f, wp);
263 	return (error);
264 }
265 
266 static int
267 futex_wake(struct futex *f, int n)
268 {
269 	struct waiting_proc *wp, *wpt;
270 	int count = 0;
271 
272 	FUTEX_ASSERT_LOCKED(f);
273 	TAILQ_FOREACH_SAFE(wp, &f->f_waiting_proc, wp_list, wpt) {
274 		LINUX_CTR3(sys_futex, "futex_wake uaddr %p wp %p ref %d",
275 		    f->f_uaddr, wp, f->f_refcount);
276 		wp->wp_flags |= FUTEX_WP_REMOVED;
277 		TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
278 		wakeup_one(wp);
279 		if (++count == n)
280 			break;
281 	}
282 
283 	return (count);
284 }
285 
286 static int
287 futex_requeue(struct futex *f, int n, struct futex *f2, int n2)
288 {
289 	struct waiting_proc *wp, *wpt;
290 	int count = 0;
291 
292 	FUTEX_ASSERT_LOCKED(f);
293 	FUTEX_ASSERT_LOCKED(f2);
294 
295 	TAILQ_FOREACH_SAFE(wp, &f->f_waiting_proc, wp_list, wpt) {
296 		if (++count <= n) {
297 			LINUX_CTR2(sys_futex, "futex_req_wake uaddr %p wp %p",
298 			    f->f_uaddr, wp);
299 			wp->wp_flags |= FUTEX_WP_REMOVED;
300 			TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
301 			wakeup_one(wp);
302 		} else {
303 			LINUX_CTR3(sys_futex, "futex_requeue uaddr %p wp %p to %p",
304 			    f->f_uaddr, wp, f2->f_uaddr);
305 			wp->wp_flags |= FUTEX_WP_REQUEUED;
306 			/* Move wp to wp_list of f2 futex */
307 			TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
308 			TAILQ_INSERT_HEAD(&f2->f_waiting_proc, wp, wp_list);
309 
310 			/*
311 			 * Thread which sleeps on wp after waking should
312 			 * acquire f2 lock, so increment refcount of f2 to
313 			 * prevent it from premature deallocation.
314 			 */
315 			wp->wp_futex = f2;
316 			FUTEXES_LOCK;
317 			++f2->f_refcount;
318 			FUTEXES_UNLOCK;
319 			if (count - n >= n2)
320 				break;
321 		}
322 	}
323 
324 	return (count);
325 }
326 
327 static int
328 futex_wait(struct futex *f, struct waiting_proc *wp, struct l_timespec *ts)
329 {
330 	struct l_timespec timeout = {0, 0};
331 	struct timeval tv = {0, 0};
332 	int timeout_hz;
333 	int error;
334 
335 	if (ts != NULL) {
336 		error = copyin(ts, &timeout, sizeof(timeout));
337 		if (error)
338 			return (error);
339 	}
340 
341 	tv.tv_usec = timeout.tv_sec * 1000000 + timeout.tv_nsec / 1000;
342 	timeout_hz = tvtohz(&tv);
343 
344 	if (timeout.tv_sec == 0 && timeout.tv_nsec == 0)
345 		timeout_hz = 0;
346 
347 	/*
348 	 * If the user process requests a non null timeout,
349 	 * make sure we do not turn it into an infinite
350 	 * timeout because timeout_hz gets null.
351 	 *
352 	 * We use a minimal timeout of 1/hz. Maybe it would
353 	 * make sense to just return ETIMEDOUT without sleeping.
354 	 */
355 	if (((timeout.tv_sec != 0) || (timeout.tv_nsec != 0)) &&
356 	    (timeout_hz == 0))
357 		timeout_hz = 1;
358 
359 	error = futex_sleep(f, wp, timeout_hz);
360 	if (error == EWOULDBLOCK)
361 		error = ETIMEDOUT;
362 
363 	return (error);
364 }
365 
366 static int
367 futex_atomic_op(struct thread *td, int encoded_op, uint32_t *uaddr)
368 {
369 	int op = (encoded_op >> 28) & 7;
370 	int cmp = (encoded_op >> 24) & 15;
371 	int oparg = (encoded_op << 8) >> 20;
372 	int cmparg = (encoded_op << 20) >> 20;
373 	int oldval = 0, ret;
374 
375 	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
376 		oparg = 1 << oparg;
377 
378 #ifdef DEBUG
379 	if (ldebug(sys_futex))
380 		printf("futex_atomic_op: op = %d, cmp = %d, oparg = %x, "
381 		       "cmparg = %x, uaddr = %p\n",
382 		       op, cmp, oparg, cmparg, uaddr);
383 #endif
384 	/* XXX: linux verifies access here and returns EFAULT */
385 
386 	switch (op) {
387 	case FUTEX_OP_SET:
388 		ret = futex_xchgl(oparg, uaddr, &oldval);
389 		break;
390 	case FUTEX_OP_ADD:
391 		ret = futex_addl(oparg, uaddr, &oldval);
392 		break;
393 	case FUTEX_OP_OR:
394 		ret = futex_orl(oparg, uaddr, &oldval);
395 		break;
396 	case FUTEX_OP_ANDN:
397 		ret = futex_andl(~oparg, uaddr, &oldval);
398 		break;
399 	case FUTEX_OP_XOR:
400 		ret = futex_xorl(oparg, uaddr, &oldval);
401 		break;
402 	default:
403 		ret = -ENOSYS;
404 		break;
405 	}
406 
407 	if (ret)
408 		return (ret);
409 
410 	switch (cmp) {
411 	case FUTEX_OP_CMP_EQ:
412 		return (oldval == cmparg);
413 	case FUTEX_OP_CMP_NE:
414 		return (oldval != cmparg);
415 	case FUTEX_OP_CMP_LT:
416 		return (oldval < cmparg);
417 	case FUTEX_OP_CMP_GE:
418 		return (oldval >= cmparg);
419 	case FUTEX_OP_CMP_LE:
420 		return (oldval <= cmparg);
421 	case FUTEX_OP_CMP_GT:
422 		return (oldval > cmparg);
423 	default:
424 		return (-ENOSYS);
425 	}
426 }
427 
428 int
429 linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
430 {
431 	int op_ret, val, ret, nrwake;
432 	struct linux_emuldata *em;
433 	struct waiting_proc *wp;
434 	struct futex *f, *f2;
435 	int error = 0;
436 
437 	/*
438 	 * Our implementation provides only privates futexes. Most of the apps
439 	 * should use private futexes but don't claim so. Therefore we treat
440 	 * all futexes as private by clearing the FUTEX_PRIVATE_FLAG. It works
441 	 * in most cases (ie. when futexes are not shared on file descriptor
442 	 * or between different processes.).
443 	 */
444 	args->op = (args->op & ~LINUX_FUTEX_PRIVATE_FLAG);
445 
446 	switch (args->op) {
447 	case LINUX_FUTEX_WAIT:
448 
449 		LINUX_CTR2(sys_futex, "WAIT val %d uaddr %p",
450 		    args->val, args->uaddr);
451 #ifdef DEBUG
452 		if (ldebug(sys_futex))
453 			printf(ARGS(sys_futex, "futex_wait val %d uaddr %p"),
454 			    args->val, args->uaddr);
455 #endif
456 		error = futex_get(args->uaddr, &wp, &f, FUTEX_CREATE_WP);
457 		if (error)
458 			return (error);
459 		error = copyin(args->uaddr, &val, sizeof(val));
460 		if (error) {
461 			LINUX_CTR1(sys_futex, "WAIT copyin failed %d",
462 			    error);
463 			futex_put(f, wp);
464 			return (error);
465 		}
466 		if (val != args->val) {
467 			LINUX_CTR3(sys_futex, "WAIT uaddr %p val %d != uval %d",
468 			    args->uaddr, args->val, val);
469 			futex_put(f, wp);
470 			return (EWOULDBLOCK);
471 		}
472 
473 		error = futex_wait(f, wp, args->timeout);
474 		break;
475 
476 	case LINUX_FUTEX_WAKE:
477 
478 		LINUX_CTR2(sys_futex, "WAKE val %d uaddr %p",
479 		    args->val, args->uaddr);
480 
481 		/*
482 		 * XXX: Linux is able to cope with different addresses
483 		 * corresponding to the same mapped memory in the sleeping
484 		 * and waker process(es).
485 		 */
486 #ifdef DEBUG
487 		if (ldebug(sys_futex))
488 			printf(ARGS(sys_futex, "futex_wake val %d uaddr %p"),
489 			    args->val, args->uaddr);
490 #endif
491 		error = futex_get(args->uaddr, NULL, &f, FUTEX_DONTCREATE);
492 		if (error)
493 			return (error);
494 		if (f == NULL) {
495 			td->td_retval[0] = 0;
496 			return (error);
497 		}
498 		td->td_retval[0] = futex_wake(f, args->val);
499 		futex_put(f, NULL);
500 		break;
501 
502 	case LINUX_FUTEX_CMP_REQUEUE:
503 
504 		LINUX_CTR5(sys_futex, "CMP_REQUEUE uaddr %p "
505 		    "val %d val3 %d uaddr2 %p val2 %d",
506 		    args->uaddr, args->val, args->val3, args->uaddr2,
507 		    (int)(unsigned long)args->timeout);
508 
509 #ifdef DEBUG
510 		if (ldebug(sys_futex))
511 			printf(ARGS(sys_futex, "futex_cmp_requeue uaddr %p "
512 			    "val %d val3 %d uaddr2 %p val2 %d"),
513 			    args->uaddr, args->val, args->val3, args->uaddr2,
514 			    (int)(unsigned long)args->timeout);
515 #endif
516 
517 		/*
518 		 * Linux allows this, we would not, it is an incorrect
519 		 * usage of declared ABI, so return EINVAL.
520 		 */
521 		if (args->uaddr == args->uaddr2)
522 			return (EINVAL);
523 		error = futex_get0(args->uaddr, &f, 0);
524 		if (error)
525 			return (error);
526 
527 		/*
528 		 * To avoid deadlocks return EINVAL if second futex
529 		 * exists at this time. Otherwise create the new futex
530 		 * and ignore false positive LOR which thus happens.
531 		 *
532 		 * Glibc fall back to FUTEX_WAKE in case of any error
533 		 * returned by FUTEX_CMP_REQUEUE.
534 		 */
535 		error = futex_get0(args->uaddr2, &f2, FUTEX_DONTEXISTS);
536 		if (error) {
537 			futex_put(f, NULL);
538 			return (error);
539 		}
540 		error = copyin(args->uaddr, &val, sizeof(val));
541 		if (error) {
542 			LINUX_CTR1(sys_futex, "CMP_REQUEUE copyin failed %d",
543 			    error);
544 			futex_put(f2, NULL);
545 			futex_put(f, NULL);
546 			return (error);
547 		}
548 		if (val != args->val3) {
549 			LINUX_CTR2(sys_futex, "CMP_REQUEUE val %d != uval %d",
550 			    args->val, val);
551 			futex_put(f2, NULL);
552 			futex_put(f, NULL);
553 			return (EAGAIN);
554 		}
555 
556 		nrwake = (int)(unsigned long)args->timeout;
557 		td->td_retval[0] = futex_requeue(f, args->val, f2, nrwake);
558 		futex_put(f2, NULL);
559 		futex_put(f, NULL);
560 		break;
561 
562 	case LINUX_FUTEX_WAKE_OP:
563 
564 		LINUX_CTR5(sys_futex, "WAKE_OP "
565 		    "uaddr %p op %d val %x uaddr2 %p val3 %x",
566 		    args->uaddr, args->op, args->val,
567 		    args->uaddr2, args->val3);
568 
569 #ifdef DEBUG
570 		if (ldebug(sys_futex))
571 			printf(ARGS(sys_futex, "futex_wake_op "
572 			    "uaddr %p op %d val %x uaddr2 %p val3 %x"),
573 			    args->uaddr, args->op, args->val,
574 			    args->uaddr2, args->val3);
575 #endif
576 		error = futex_get0(args->uaddr, &f, 0);
577 		if (error)
578 			return (error);
579 		if (args->uaddr != args->uaddr2)
580 			error = futex_get0(args->uaddr2, &f2, 0);
581 		if (error) {
582 			futex_put(f, NULL);
583 			return (error);
584 		}
585 
586 		/*
587 		 * This function returns positive number as results and
588 		 * negative as errors
589 		 */
590 		op_ret = futex_atomic_op(td, args->val3, args->uaddr2);
591 
592 		if (op_ret < 0) {
593 			/* XXX: We don't handle the EFAULT yet. */
594 			if (op_ret != -EFAULT) {
595 				if (f2 != NULL)
596 					futex_put(f2, NULL);
597 				futex_put(f, NULL);
598 				return (-op_ret);
599 			}
600 			if (f2 != NULL)
601 				futex_put(f2, NULL);
602 			futex_put(f, NULL);
603 			return (EFAULT);
604 		}
605 
606 		ret = futex_wake(f, args->val);
607 
608 		if (op_ret > 0) {
609 			op_ret = 0;
610 			nrwake = (int)(unsigned long)args->timeout;
611 
612 			if (f2 != NULL)
613 				op_ret += futex_wake(f2, nrwake);
614 			else
615 				op_ret += futex_wake(f, nrwake);
616 			ret += op_ret;
617 
618 		}
619 		if (f2 != NULL)
620 			futex_put(f2, NULL);
621 		futex_put(f, NULL);
622 		td->td_retval[0] = ret;
623 		break;
624 
625 	case LINUX_FUTEX_LOCK_PI:
626 		/* not yet implemented */
627 		return (ENOSYS);
628 
629 	case LINUX_FUTEX_UNLOCK_PI:
630 		/* not yet implemented */
631 		return (ENOSYS);
632 
633 	case LINUX_FUTEX_TRYLOCK_PI:
634 		/* not yet implemented */
635 		return (ENOSYS);
636 
637 	case LINUX_FUTEX_REQUEUE:
638 
639 		/*
640 		 * Glibc does not use this operation since version 2.3.3,
641 		 * as it is racy and replaced by FUTEX_CMP_REQUEUE operation.
642 		 * Glibc versions prior to 2.3.3 fall back to FUTEX_WAKE when
643 		 * FUTEX_REQUEUE returned EINVAL.
644 		 */
645 		em = em_find(td->td_proc, EMUL_DONTLOCK);
646 		if (em->used_requeue == 0) {
647 			printf("linux(%s (%d)) sys_futex: "
648 			"unsupported futex_requeue op\n",
649 			td->td_proc->p_comm, td->td_proc->p_pid);
650 				em->used_requeue = 1;
651 		}
652 		return (EINVAL);
653 
654 	default:
655 		printf("linux_sys_futex: unknown op %d\n", args->op);
656 		return (ENOSYS);
657 	}
658 
659 	return (error);
660 }
661 
662 int
663 linux_set_robust_list(struct thread *td, struct linux_set_robust_list_args *args)
664 {
665 	struct linux_emuldata *em;
666 
667 #ifdef DEBUG
668 	if (ldebug(set_robust_list))
669 		printf(ARGS(set_robust_list, "head %p len %d"),
670 		    args->head, args->len);
671 #endif
672 
673 	if (args->len != sizeof(struct linux_robust_list_head))
674 		return (EINVAL);
675 
676 	em = em_find(td->td_proc, EMUL_DOLOCK);
677 	em->robust_futexes = args->head;
678 	EMUL_UNLOCK(&emul_lock);
679 
680 	return (0);
681 }
682 
683 int
684 linux_get_robust_list(struct thread *td, struct linux_get_robust_list_args *args)
685 {
686 	struct linux_emuldata *em;
687 	struct linux_robust_list_head *head;
688 	l_size_t len = sizeof(struct linux_robust_list_head);
689 	int error = 0;
690 
691 #ifdef	DEBUG
692 	if (ldebug(get_robust_list))
693 		printf(ARGS(get_robust_list, ""));
694 #endif
695 
696 	if (!args->pid) {
697 		em = em_find(td->td_proc, EMUL_DONTLOCK);
698 		head = em->robust_futexes;
699 	} else {
700 		struct proc *p;
701 
702 		p = pfind(args->pid);
703 		if (p == NULL)
704 			return (ESRCH);
705 
706 		em = em_find(p, EMUL_DONTLOCK);
707 		/* XXX: ptrace? */
708 		if (priv_check(td, PRIV_CRED_SETUID) ||
709 		    priv_check(td, PRIV_CRED_SETEUID) ||
710 		    p_candebug(td, p)) {
711 			PROC_UNLOCK(p);
712 			return (EPERM);
713 		}
714 		head = em->robust_futexes;
715 
716 		PROC_UNLOCK(p);
717 	}
718 
719 	error = copyout(&len, args->len, sizeof(l_size_t));
720 	if (error)
721 		return (EFAULT);
722 
723 	error = copyout(head, args->head, sizeof(struct linux_robust_list_head));
724 
725 	return (error);
726 }
727 
728 static int
729 handle_futex_death(struct proc *p, uint32_t *uaddr, int pi)
730 {
731 	uint32_t uval, nval, mval;
732 	struct futex *f;
733 	int error;
734 
735 retry:
736 	if (copyin(uaddr, &uval, 4))
737 		return (EFAULT);
738 	if ((uval & FUTEX_TID_MASK) == p->p_pid) {
739 		mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
740 		nval = casuword32(uaddr, uval, mval);
741 
742 		if (nval == -1)
743 			return (EFAULT);
744 
745 		if (nval != uval)
746 			goto retry;
747 
748 		if (!pi && (uval & FUTEX_WAITERS)) {
749 			error = futex_get(uaddr, NULL, &f,
750 			    FUTEX_DONTCREATE);
751 			if (error)
752 				return (error);
753 			if (f != NULL) {
754 				futex_wake(f, 1);
755 				futex_put(f, NULL);
756 			}
757 		}
758 	}
759 
760 	return (0);
761 }
762 
763 static int
764 fetch_robust_entry(struct linux_robust_list **entry,
765     struct linux_robust_list **head, int *pi)
766 {
767 	l_ulong uentry;
768 
769 	if (copyin((const void *)head, &uentry, sizeof(l_ulong)))
770 		return (EFAULT);
771 
772 	*entry = (void *)(uentry & ~1UL);
773 	*pi = uentry & 1;
774 
775 	return (0);
776 }
777 
778 /* This walks the list of robust futexes releasing them. */
779 void
780 release_futexes(struct proc *p)
781 {
782 	struct linux_robust_list_head *head = NULL;
783 	struct linux_robust_list *entry, *next_entry, *pending;
784 	unsigned int limit = 2048, pi, next_pi, pip;
785 	struct linux_emuldata *em;
786 	l_long futex_offset;
787 	int rc;
788 
789 	em = em_find(p, EMUL_DONTLOCK);
790 	head = em->robust_futexes;
791 
792 	if (head == NULL)
793 		return;
794 
795 	if (fetch_robust_entry(&entry, PTRIN(&head->list.next), &pi))
796 		return;
797 
798 	if (copyin(&head->futex_offset, &futex_offset, sizeof(futex_offset)))
799 		return;
800 
801 	if (fetch_robust_entry(&pending, PTRIN(&head->pending_list), &pip))
802 		return;
803 
804 	while (entry != &head->list) {
805 		rc = fetch_robust_entry(&next_entry, PTRIN(&entry->next), &next_pi);
806 
807 		if (entry != pending)
808 			if (handle_futex_death(p, (uint32_t *)entry + futex_offset, pi))
809 				return;
810 		if (rc)
811 			return;
812 
813 		entry = next_entry;
814 		pi = next_pi;
815 
816 		if (!--limit)
817 			break;
818 
819 		sched_relinquish(curthread);
820 	}
821 
822 	if (pending)
823 		handle_futex_death(p, (uint32_t *)pending + futex_offset, pip);
824 }
825