xref: /freebsd/sys/compat/linux/linux_futex.c (revision a3cf0ef5a295c885c895fabfd56470c0d1db322d)
1 /*	$NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $ */
2 
3 /*-
4  * Copyright (c) 2005 Emmanuel Dreyfus, all rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Emmanuel Dreyfus
17  * 4. The name of the author may not be used to endorse or promote
18  *    products derived from this software without specific prior written
19  *    permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE THE AUTHOR AND CONTRIBUTORS ``AS IS''
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 #if 0
37 __KERNEL_RCSID(1, "$NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $");
38 #endif
39 
40 #include "opt_compat.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/imgact.h>
45 #include <sys/kernel.h>
46 #include <sys/ktr.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/priv.h>
51 #include <sys/proc.h>
52 #include <sys/queue.h>
53 #include <sys/sched.h>
54 #include <sys/sx.h>
55 
56 #ifdef COMPAT_LINUX32
57 #include <machine/../linux32/linux.h>
58 #include <machine/../linux32/linux32_proto.h>
59 #else
60 #include <machine/../linux/linux.h>
61 #include <machine/../linux/linux_proto.h>
62 #endif
63 #include <compat/linux/linux_futex.h>
64 #include <compat/linux/linux_emul.h>
65 #include <compat/linux/linux_util.h>
66 
67 MALLOC_DEFINE(M_FUTEX, "futex", "Linux futexes");
68 MALLOC_DEFINE(M_FUTEX_WP, "futex wp", "Linux futexes wp");
69 
70 struct futex;
71 
72 struct waiting_proc {
73 	uint32_t	wp_flags;
74 	struct futex	*wp_futex;
75 	TAILQ_ENTRY(waiting_proc) wp_list;
76 };
77 
78 struct futex {
79 	struct sx	f_lck;
80 	uint32_t	*f_uaddr;
81 	uint32_t	f_refcount;
82 	LIST_ENTRY(futex) f_list;
83 	TAILQ_HEAD(lf_waiting_proc, waiting_proc) f_waiting_proc;
84 };
85 
86 struct futex_list futex_list;
87 
88 #define FUTEX_LOCK(f)		sx_xlock(&(f)->f_lck)
89 #define FUTEX_UNLOCK(f)		sx_xunlock(&(f)->f_lck)
90 #define FUTEX_INIT(f)		sx_init_flags(&(f)->f_lck, "ftlk", 0)
91 #define FUTEX_DESTROY(f)	sx_destroy(&(f)->f_lck)
92 #define FUTEX_ASSERT_LOCKED(f)	sx_assert(&(f)->f_lck, SA_XLOCKED)
93 
94 struct mtx futex_mtx;			/* protects the futex list */
95 #define FUTEXES_LOCK		mtx_lock(&futex_mtx)
96 #define FUTEXES_UNLOCK		mtx_unlock(&futex_mtx)
97 
98 /* flags for futex_get() */
99 #define FUTEX_CREATE_WP		0x1	/* create waiting_proc */
100 #define FUTEX_DONTCREATE	0x2	/* don't create futex if not exists */
101 #define FUTEX_DONTEXISTS	0x4	/* return EINVAL if futex exists */
102 
103 /* wp_flags */
104 #define FUTEX_WP_REQUEUED	0x1	/* wp requeued - wp moved from wp_list
105 					 * of futex where thread sleep to wp_list
106 					 * of another futex.
107 					 */
108 #define FUTEX_WP_REMOVED	0x2	/* wp is woken up and removed from futex
109 					 * wp_list to prevent double wakeup.
110 					 */
111 
112 /* support.s */
113 int futex_xchgl(int oparg, uint32_t *uaddr, int *oldval);
114 int futex_addl(int oparg, uint32_t *uaddr, int *oldval);
115 int futex_orl(int oparg, uint32_t *uaddr, int *oldval);
116 int futex_andl(int oparg, uint32_t *uaddr, int *oldval);
117 int futex_xorl(int oparg, uint32_t *uaddr, int *oldval);
118 
119 static void
120 futex_put(struct futex *f, struct waiting_proc *wp)
121 {
122 
123 	FUTEX_ASSERT_LOCKED(f);
124 	if (wp != NULL) {
125 		if ((wp->wp_flags & FUTEX_WP_REMOVED) == 0)
126 			TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
127 		free(wp, M_FUTEX_WP);
128 	}
129 
130 	FUTEXES_LOCK;
131 	if (--f->f_refcount == 0) {
132 		LIST_REMOVE(f, f_list);
133 		FUTEXES_UNLOCK;
134 		FUTEX_UNLOCK(f);
135 
136 		LINUX_CTR2(sys_futex, "futex_put destroy uaddr %p ref %d",
137 		    f->f_uaddr, f->f_refcount);
138 		FUTEX_DESTROY(f);
139 		free(f, M_FUTEX);
140 		return;
141 	}
142 
143 	LINUX_CTR2(sys_futex, "futex_put uaddr %p ref %d",
144 	    f->f_uaddr, f->f_refcount);
145 	FUTEXES_UNLOCK;
146 	FUTEX_UNLOCK(f);
147 }
148 
149 static int
150 futex_get0(uint32_t *uaddr, struct futex **newf, uint32_t flags)
151 {
152 	struct futex *f, *tmpf;
153 
154 	*newf = tmpf = NULL;
155 
156 retry:
157 	FUTEXES_LOCK;
158 	LIST_FOREACH(f, &futex_list, f_list) {
159 		if (f->f_uaddr == uaddr) {
160 			if (tmpf != NULL) {
161 				FUTEX_UNLOCK(tmpf);
162 				FUTEX_DESTROY(tmpf);
163 				free(tmpf, M_FUTEX);
164 			}
165 			if (flags & FUTEX_DONTEXISTS) {
166 				FUTEXES_UNLOCK;
167 				return (EINVAL);
168 			}
169 
170 			/*
171 			 * Increment refcount of the found futex to
172 			 * prevent it from deallocation before FUTEX_LOCK()
173 			 */
174 			++f->f_refcount;
175 			FUTEXES_UNLOCK;
176 
177 			FUTEX_LOCK(f);
178 			*newf = f;
179 			LINUX_CTR2(sys_futex, "futex_get uaddr %p ref %d",
180 			    uaddr, f->f_refcount);
181 			return (0);
182 		}
183 	}
184 
185 	if (flags & FUTEX_DONTCREATE) {
186 		FUTEXES_UNLOCK;
187 		LINUX_CTR1(sys_futex, "futex_get uaddr %p null", uaddr);
188 		return (0);
189 	}
190 
191 	if (tmpf == NULL) {
192 		FUTEXES_UNLOCK;
193 		tmpf = malloc(sizeof(*tmpf), M_FUTEX, M_WAITOK | M_ZERO);
194 		tmpf->f_uaddr = uaddr;
195 		tmpf->f_refcount = 1;
196 		FUTEX_INIT(tmpf);
197 		TAILQ_INIT(&tmpf->f_waiting_proc);
198 
199 		/*
200 		 * Lock the new futex before an insert into the futex_list
201 		 * to prevent futex usage by other.
202 		 */
203 		FUTEX_LOCK(tmpf);
204 		goto retry;
205 	}
206 
207 	LIST_INSERT_HEAD(&futex_list, tmpf, f_list);
208 	FUTEXES_UNLOCK;
209 
210 	LINUX_CTR2(sys_futex, "futex_get uaddr %p ref %d new",
211 	    uaddr, tmpf->f_refcount);
212 	*newf = tmpf;
213 	return (0);
214 }
215 
216 static int
217 futex_get(uint32_t *uaddr, struct waiting_proc **wp, struct futex **f,
218     uint32_t flags)
219 {
220 	int error;
221 
222 	if (flags & FUTEX_CREATE_WP) {
223 		*wp = malloc(sizeof(struct waiting_proc), M_FUTEX_WP, M_WAITOK);
224 		(*wp)->wp_flags = 0;
225 	}
226 	error = futex_get0(uaddr, f, flags);
227 	if (error) {
228 		if (flags & FUTEX_CREATE_WP)
229 			free(*wp, M_FUTEX_WP);
230 		return (error);
231 	}
232 	if (flags & FUTEX_CREATE_WP) {
233 		TAILQ_INSERT_HEAD(&(*f)->f_waiting_proc, *wp, wp_list);
234 		(*wp)->wp_futex = *f;
235 	}
236 
237 	return (error);
238 }
239 
240 static int
241 futex_sleep(struct futex *f, struct waiting_proc *wp, int timeout)
242 {
243 	int error;
244 
245 	FUTEX_ASSERT_LOCKED(f);
246 	LINUX_CTR4(sys_futex, "futex_sleep enter uaddr %p wp %p timo %d ref %d",
247 	    f->f_uaddr, wp, timeout, f->f_refcount);
248 	error = sx_sleep(wp, &f->f_lck, PCATCH, "futex", timeout);
249 	if (wp->wp_flags & FUTEX_WP_REQUEUED) {
250 		KASSERT(f != wp->wp_futex, ("futex != wp_futex"));
251 		LINUX_CTR5(sys_futex, "futex_sleep out error %d uaddr %p w"
252 		    " %p requeued uaddr %p ref %d",
253 		    error, f->f_uaddr, wp, wp->wp_futex->f_uaddr,
254 		    wp->wp_futex->f_refcount);
255 		futex_put(f, NULL);
256 		f = wp->wp_futex;
257 		FUTEX_LOCK(f);
258 	} else
259 		LINUX_CTR3(sys_futex, "futex_sleep out error %d uaddr %p wp %p",
260 		    error, f->f_uaddr, wp);
261 
262 	futex_put(f, wp);
263 	return (error);
264 }
265 
266 static int
267 futex_wake(struct futex *f, int n)
268 {
269 	struct waiting_proc *wp, *wpt;
270 	int count = 0;
271 
272 	FUTEX_ASSERT_LOCKED(f);
273 	TAILQ_FOREACH_SAFE(wp, &f->f_waiting_proc, wp_list, wpt) {
274 		LINUX_CTR3(sys_futex, "futex_wake uaddr %p wp %p ref %d",
275 		    f->f_uaddr, wp, f->f_refcount);
276 		wp->wp_flags |= FUTEX_WP_REMOVED;
277 		TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
278 		wakeup_one(wp);
279 		if (++count == n)
280 			break;
281 	}
282 
283 	return (count);
284 }
285 
286 static int
287 futex_requeue(struct futex *f, int n, struct futex *f2, int n2)
288 {
289 	struct waiting_proc *wp, *wpt;
290 	int count = 0;
291 
292 	FUTEX_ASSERT_LOCKED(f);
293 	FUTEX_ASSERT_LOCKED(f2);
294 
295 	TAILQ_FOREACH_SAFE(wp, &f->f_waiting_proc, wp_list, wpt) {
296 		if (++count <= n) {
297 			LINUX_CTR2(sys_futex, "futex_req_wake uaddr %p wp %p",
298 			    f->f_uaddr, wp);
299 			wp->wp_flags |= FUTEX_WP_REMOVED;
300 			TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
301 			wakeup_one(wp);
302 		} else {
303 			LINUX_CTR3(sys_futex, "futex_requeue uaddr %p wp %p to %p",
304 			    f->f_uaddr, wp, f2->f_uaddr);
305 			wp->wp_flags |= FUTEX_WP_REQUEUED;
306 			/* Move wp to wp_list of f2 futex */
307 			TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
308 			TAILQ_INSERT_HEAD(&f2->f_waiting_proc, wp, wp_list);
309 
310 			/*
311 			 * Thread which sleeps on wp after waking should
312 			 * acquire f2 lock, so increment refcount of f2 to
313 			 * prevent it from premature deallocation.
314 			 */
315 			wp->wp_futex = f2;
316 			FUTEXES_LOCK;
317 			++f2->f_refcount;
318 			FUTEXES_UNLOCK;
319 			if (count - n >= n2)
320 				break;
321 		}
322 	}
323 
324 	return (count);
325 }
326 
327 static int
328 futex_wait(struct futex *f, struct waiting_proc *wp, struct l_timespec *ts)
329 {
330 	struct l_timespec timeout;
331 	struct timeval tv;
332 	int timeout_hz;
333 	int error;
334 
335 	if (ts != NULL) {
336 		error = copyin(ts, &timeout, sizeof(timeout));
337 		if (error)
338 			return (error);
339 		TIMESPEC_TO_TIMEVAL(&tv, &timeout);
340 		error = itimerfix(&tv);
341 		if (error)
342 			return (error);
343 		timeout_hz = tvtohz(&tv);
344 	} else
345 		timeout_hz = 0;
346 
347 	error = futex_sleep(f, wp, timeout_hz);
348 	if (error == EWOULDBLOCK)
349 		error = ETIMEDOUT;
350 
351 	return (error);
352 }
353 
354 static int
355 futex_atomic_op(struct thread *td, int encoded_op, uint32_t *uaddr)
356 {
357 	int op = (encoded_op >> 28) & 7;
358 	int cmp = (encoded_op >> 24) & 15;
359 	int oparg = (encoded_op << 8) >> 20;
360 	int cmparg = (encoded_op << 20) >> 20;
361 	int oldval = 0, ret;
362 
363 	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
364 		oparg = 1 << oparg;
365 
366 #ifdef DEBUG
367 	if (ldebug(sys_futex))
368 		printf("futex_atomic_op: op = %d, cmp = %d, oparg = %x, "
369 		       "cmparg = %x, uaddr = %p\n",
370 		       op, cmp, oparg, cmparg, uaddr);
371 #endif
372 	/* XXX: linux verifies access here and returns EFAULT */
373 
374 	switch (op) {
375 	case FUTEX_OP_SET:
376 		ret = futex_xchgl(oparg, uaddr, &oldval);
377 		break;
378 	case FUTEX_OP_ADD:
379 		ret = futex_addl(oparg, uaddr, &oldval);
380 		break;
381 	case FUTEX_OP_OR:
382 		ret = futex_orl(oparg, uaddr, &oldval);
383 		break;
384 	case FUTEX_OP_ANDN:
385 		ret = futex_andl(~oparg, uaddr, &oldval);
386 		break;
387 	case FUTEX_OP_XOR:
388 		ret = futex_xorl(oparg, uaddr, &oldval);
389 		break;
390 	default:
391 		ret = -ENOSYS;
392 		break;
393 	}
394 
395 	if (ret)
396 		return (ret);
397 
398 	switch (cmp) {
399 	case FUTEX_OP_CMP_EQ:
400 		return (oldval == cmparg);
401 	case FUTEX_OP_CMP_NE:
402 		return (oldval != cmparg);
403 	case FUTEX_OP_CMP_LT:
404 		return (oldval < cmparg);
405 	case FUTEX_OP_CMP_GE:
406 		return (oldval >= cmparg);
407 	case FUTEX_OP_CMP_LE:
408 		return (oldval <= cmparg);
409 	case FUTEX_OP_CMP_GT:
410 		return (oldval > cmparg);
411 	default:
412 		return (-ENOSYS);
413 	}
414 }
415 
416 int
417 linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args)
418 {
419 	int op_ret, val, ret, nrwake;
420 	struct linux_emuldata *em;
421 	struct waiting_proc *wp;
422 	struct futex *f, *f2 = NULL;
423 	int error = 0;
424 
425 	/*
426 	 * Our implementation provides only privates futexes. Most of the apps
427 	 * should use private futexes but don't claim so. Therefore we treat
428 	 * all futexes as private by clearing the FUTEX_PRIVATE_FLAG. It works
429 	 * in most cases (ie. when futexes are not shared on file descriptor
430 	 * or between different processes.).
431 	 */
432 	args->op = (args->op & ~LINUX_FUTEX_PRIVATE_FLAG);
433 
434 	switch (args->op) {
435 	case LINUX_FUTEX_WAIT:
436 
437 		LINUX_CTR2(sys_futex, "WAIT val %d uaddr %p",
438 		    args->val, args->uaddr);
439 #ifdef DEBUG
440 		if (ldebug(sys_futex))
441 			printf(ARGS(sys_futex, "futex_wait val %d uaddr %p"),
442 			    args->val, args->uaddr);
443 #endif
444 		error = futex_get(args->uaddr, &wp, &f, FUTEX_CREATE_WP);
445 		if (error)
446 			return (error);
447 		error = copyin(args->uaddr, &val, sizeof(val));
448 		if (error) {
449 			LINUX_CTR1(sys_futex, "WAIT copyin failed %d",
450 			    error);
451 			futex_put(f, wp);
452 			return (error);
453 		}
454 		if (val != args->val) {
455 			LINUX_CTR3(sys_futex, "WAIT uaddr %p val %d != uval %d",
456 			    args->uaddr, args->val, val);
457 			futex_put(f, wp);
458 			return (EWOULDBLOCK);
459 		}
460 
461 		error = futex_wait(f, wp, args->timeout);
462 		break;
463 
464 	case LINUX_FUTEX_WAKE:
465 
466 		LINUX_CTR2(sys_futex, "WAKE val %d uaddr %p",
467 		    args->val, args->uaddr);
468 
469 		/*
470 		 * XXX: Linux is able to cope with different addresses
471 		 * corresponding to the same mapped memory in the sleeping
472 		 * and waker process(es).
473 		 */
474 #ifdef DEBUG
475 		if (ldebug(sys_futex))
476 			printf(ARGS(sys_futex, "futex_wake val %d uaddr %p"),
477 			    args->val, args->uaddr);
478 #endif
479 		error = futex_get(args->uaddr, NULL, &f, FUTEX_DONTCREATE);
480 		if (error)
481 			return (error);
482 		if (f == NULL) {
483 			td->td_retval[0] = 0;
484 			return (error);
485 		}
486 		td->td_retval[0] = futex_wake(f, args->val);
487 		futex_put(f, NULL);
488 		break;
489 
490 	case LINUX_FUTEX_CMP_REQUEUE:
491 
492 		LINUX_CTR5(sys_futex, "CMP_REQUEUE uaddr %p "
493 		    "val %d val3 %d uaddr2 %p val2 %d",
494 		    args->uaddr, args->val, args->val3, args->uaddr2,
495 		    (int)(unsigned long)args->timeout);
496 
497 #ifdef DEBUG
498 		if (ldebug(sys_futex))
499 			printf(ARGS(sys_futex, "futex_cmp_requeue uaddr %p "
500 			    "val %d val3 %d uaddr2 %p val2 %d"),
501 			    args->uaddr, args->val, args->val3, args->uaddr2,
502 			    (int)(unsigned long)args->timeout);
503 #endif
504 
505 		/*
506 		 * Linux allows this, we would not, it is an incorrect
507 		 * usage of declared ABI, so return EINVAL.
508 		 */
509 		if (args->uaddr == args->uaddr2)
510 			return (EINVAL);
511 		error = futex_get0(args->uaddr, &f, 0);
512 		if (error)
513 			return (error);
514 
515 		/*
516 		 * To avoid deadlocks return EINVAL if second futex
517 		 * exists at this time. Otherwise create the new futex
518 		 * and ignore false positive LOR which thus happens.
519 		 *
520 		 * Glibc fall back to FUTEX_WAKE in case of any error
521 		 * returned by FUTEX_CMP_REQUEUE.
522 		 */
523 		error = futex_get0(args->uaddr2, &f2, FUTEX_DONTEXISTS);
524 		if (error) {
525 			futex_put(f, NULL);
526 			return (error);
527 		}
528 		error = copyin(args->uaddr, &val, sizeof(val));
529 		if (error) {
530 			LINUX_CTR1(sys_futex, "CMP_REQUEUE copyin failed %d",
531 			    error);
532 			futex_put(f2, NULL);
533 			futex_put(f, NULL);
534 			return (error);
535 		}
536 		if (val != args->val3) {
537 			LINUX_CTR2(sys_futex, "CMP_REQUEUE val %d != uval %d",
538 			    args->val, val);
539 			futex_put(f2, NULL);
540 			futex_put(f, NULL);
541 			return (EAGAIN);
542 		}
543 
544 		nrwake = (int)(unsigned long)args->timeout;
545 		td->td_retval[0] = futex_requeue(f, args->val, f2, nrwake);
546 		futex_put(f2, NULL);
547 		futex_put(f, NULL);
548 		break;
549 
550 	case LINUX_FUTEX_WAKE_OP:
551 
552 		LINUX_CTR5(sys_futex, "WAKE_OP "
553 		    "uaddr %p op %d val %x uaddr2 %p val3 %x",
554 		    args->uaddr, args->op, args->val,
555 		    args->uaddr2, args->val3);
556 
557 #ifdef DEBUG
558 		if (ldebug(sys_futex))
559 			printf(ARGS(sys_futex, "futex_wake_op "
560 			    "uaddr %p op %d val %x uaddr2 %p val3 %x"),
561 			    args->uaddr, args->op, args->val,
562 			    args->uaddr2, args->val3);
563 #endif
564 		error = futex_get0(args->uaddr, &f, 0);
565 		if (error)
566 			return (error);
567 		if (args->uaddr != args->uaddr2)
568 			error = futex_get0(args->uaddr2, &f2, 0);
569 		if (error) {
570 			futex_put(f, NULL);
571 			return (error);
572 		}
573 
574 		/*
575 		 * This function returns positive number as results and
576 		 * negative as errors
577 		 */
578 		op_ret = futex_atomic_op(td, args->val3, args->uaddr2);
579 
580 		if (op_ret < 0) {
581 			/* XXX: We don't handle the EFAULT yet. */
582 			if (op_ret != -EFAULT) {
583 				if (f2 != NULL)
584 					futex_put(f2, NULL);
585 				futex_put(f, NULL);
586 				return (-op_ret);
587 			}
588 			if (f2 != NULL)
589 				futex_put(f2, NULL);
590 			futex_put(f, NULL);
591 			return (EFAULT);
592 		}
593 
594 		ret = futex_wake(f, args->val);
595 
596 		if (op_ret > 0) {
597 			op_ret = 0;
598 			nrwake = (int)(unsigned long)args->timeout;
599 
600 			if (f2 != NULL)
601 				op_ret += futex_wake(f2, nrwake);
602 			else
603 				op_ret += futex_wake(f, nrwake);
604 			ret += op_ret;
605 
606 		}
607 		if (f2 != NULL)
608 			futex_put(f2, NULL);
609 		futex_put(f, NULL);
610 		td->td_retval[0] = ret;
611 		break;
612 
613 	case LINUX_FUTEX_LOCK_PI:
614 		/* not yet implemented */
615 		return (ENOSYS);
616 
617 	case LINUX_FUTEX_UNLOCK_PI:
618 		/* not yet implemented */
619 		return (ENOSYS);
620 
621 	case LINUX_FUTEX_TRYLOCK_PI:
622 		/* not yet implemented */
623 		return (ENOSYS);
624 
625 	case LINUX_FUTEX_REQUEUE:
626 
627 		/*
628 		 * Glibc does not use this operation since version 2.3.3,
629 		 * as it is racy and replaced by FUTEX_CMP_REQUEUE operation.
630 		 * Glibc versions prior to 2.3.3 fall back to FUTEX_WAKE when
631 		 * FUTEX_REQUEUE returned EINVAL.
632 		 */
633 		em = em_find(td->td_proc, EMUL_DONTLOCK);
634 		if (em->used_requeue == 0) {
635 			printf("linux(%s (%d)) sys_futex: "
636 			"unsupported futex_requeue op\n",
637 			td->td_proc->p_comm, td->td_proc->p_pid);
638 				em->used_requeue = 1;
639 		}
640 		return (EINVAL);
641 
642 	default:
643 		printf("linux_sys_futex: unknown op %d\n", args->op);
644 		return (ENOSYS);
645 	}
646 
647 	return (error);
648 }
649 
650 int
651 linux_set_robust_list(struct thread *td, struct linux_set_robust_list_args *args)
652 {
653 	struct linux_emuldata *em;
654 
655 #ifdef DEBUG
656 	if (ldebug(set_robust_list))
657 		printf(ARGS(set_robust_list, "head %p len %d"),
658 		    args->head, args->len);
659 #endif
660 
661 	if (args->len != sizeof(struct linux_robust_list_head))
662 		return (EINVAL);
663 
664 	em = em_find(td->td_proc, EMUL_DOLOCK);
665 	em->robust_futexes = args->head;
666 	EMUL_UNLOCK(&emul_lock);
667 
668 	return (0);
669 }
670 
671 int
672 linux_get_robust_list(struct thread *td, struct linux_get_robust_list_args *args)
673 {
674 	struct linux_emuldata *em;
675 	struct linux_robust_list_head *head;
676 	l_size_t len = sizeof(struct linux_robust_list_head);
677 	int error = 0;
678 
679 #ifdef	DEBUG
680 	if (ldebug(get_robust_list))
681 		printf(ARGS(get_robust_list, ""));
682 #endif
683 
684 	if (!args->pid) {
685 		em = em_find(td->td_proc, EMUL_DONTLOCK);
686 		head = em->robust_futexes;
687 	} else {
688 		struct proc *p;
689 
690 		p = pfind(args->pid);
691 		if (p == NULL)
692 			return (ESRCH);
693 
694 		em = em_find(p, EMUL_DONTLOCK);
695 		/* XXX: ptrace? */
696 		if (priv_check(td, PRIV_CRED_SETUID) ||
697 		    priv_check(td, PRIV_CRED_SETEUID) ||
698 		    p_candebug(td, p)) {
699 			PROC_UNLOCK(p);
700 			return (EPERM);
701 		}
702 		head = em->robust_futexes;
703 
704 		PROC_UNLOCK(p);
705 	}
706 
707 	error = copyout(&len, args->len, sizeof(l_size_t));
708 	if (error)
709 		return (EFAULT);
710 
711 	error = copyout(head, args->head, sizeof(struct linux_robust_list_head));
712 
713 	return (error);
714 }
715 
716 static int
717 handle_futex_death(struct proc *p, uint32_t *uaddr, int pi)
718 {
719 	uint32_t uval, nval, mval;
720 	struct futex *f;
721 	int error;
722 
723 retry:
724 	if (copyin(uaddr, &uval, 4))
725 		return (EFAULT);
726 	if ((uval & FUTEX_TID_MASK) == p->p_pid) {
727 		mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
728 		nval = casuword32(uaddr, uval, mval);
729 
730 		if (nval == -1)
731 			return (EFAULT);
732 
733 		if (nval != uval)
734 			goto retry;
735 
736 		if (!pi && (uval & FUTEX_WAITERS)) {
737 			error = futex_get(uaddr, NULL, &f,
738 			    FUTEX_DONTCREATE);
739 			if (error)
740 				return (error);
741 			if (f != NULL) {
742 				futex_wake(f, 1);
743 				futex_put(f, NULL);
744 			}
745 		}
746 	}
747 
748 	return (0);
749 }
750 
751 static int
752 fetch_robust_entry(struct linux_robust_list **entry,
753     struct linux_robust_list **head, int *pi)
754 {
755 	l_ulong uentry;
756 
757 	if (copyin((const void *)head, &uentry, sizeof(l_ulong)))
758 		return (EFAULT);
759 
760 	*entry = (void *)(uentry & ~1UL);
761 	*pi = uentry & 1;
762 
763 	return (0);
764 }
765 
766 /* This walks the list of robust futexes releasing them. */
767 void
768 release_futexes(struct proc *p)
769 {
770 	struct linux_robust_list_head *head = NULL;
771 	struct linux_robust_list *entry, *next_entry, *pending;
772 	unsigned int limit = 2048, pi, next_pi, pip;
773 	struct linux_emuldata *em;
774 	l_long futex_offset;
775 	int rc;
776 
777 	em = em_find(p, EMUL_DONTLOCK);
778 	head = em->robust_futexes;
779 
780 	if (head == NULL)
781 		return;
782 
783 	if (fetch_robust_entry(&entry, PTRIN(&head->list.next), &pi))
784 		return;
785 
786 	if (copyin(&head->futex_offset, &futex_offset, sizeof(futex_offset)))
787 		return;
788 
789 	if (fetch_robust_entry(&pending, PTRIN(&head->pending_list), &pip))
790 		return;
791 
792 	while (entry != &head->list) {
793 		rc = fetch_robust_entry(&next_entry, PTRIN(&entry->next), &next_pi);
794 
795 		if (entry != pending)
796 			if (handle_futex_death(p, (uint32_t *)entry + futex_offset, pi))
797 				return;
798 		if (rc)
799 			return;
800 
801 		entry = next_entry;
802 		pi = next_pi;
803 
804 		if (!--limit)
805 			break;
806 
807 		sched_relinquish(curthread);
808 	}
809 
810 	if (pending)
811 		handle_futex_death(p, (uint32_t *)pending + futex_offset, pip);
812 }
813