1 /* $NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $ */ 2 3 /*- 4 * Copyright (c) 2005 Emmanuel Dreyfus, all rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Emmanuel Dreyfus 17 * 4. The name of the author may not be used to endorse or promote 18 * products derived from this software without specific prior written 19 * permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE THE AUTHOR AND CONTRIBUTORS ``AS IS'' 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 #if 0 37 __KERNEL_RCSID(1, "$NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $"); 38 #endif 39 40 #include "opt_compat.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/imgact.h> 45 #include <sys/kernel.h> 46 #include <sys/ktr.h> 47 #include <sys/lock.h> 48 #include <sys/malloc.h> 49 #include <sys/mutex.h> 50 #include <sys/priv.h> 51 #include <sys/proc.h> 52 #include <sys/queue.h> 53 #include <sys/sched.h> 54 #include <sys/sx.h> 55 #include <sys/umtx.h> 56 57 #ifdef COMPAT_LINUX32 58 #include <machine/../linux32/linux.h> 59 #include <machine/../linux32/linux32_proto.h> 60 #else 61 #include <machine/../linux/linux.h> 62 #include <machine/../linux/linux_proto.h> 63 #endif 64 #include <compat/linux/linux_emul.h> 65 #include <compat/linux/linux_futex.h> 66 #include <compat/linux/linux_util.h> 67 68 static MALLOC_DEFINE(M_FUTEX, "futex", "Linux futexes"); 69 static MALLOC_DEFINE(M_FUTEX_WP, "futex wp", "Linux futexes wp"); 70 71 struct futex; 72 73 struct waiting_proc { 74 uint32_t wp_flags; 75 struct futex *wp_futex; 76 TAILQ_ENTRY(waiting_proc) wp_list; 77 }; 78 79 struct futex { 80 struct sx f_lck; 81 uint32_t *f_uaddr; /* user-supplied value, for debug */ 82 struct umtx_key f_key; 83 uint32_t f_refcount; 84 uint32_t f_bitset; 85 LIST_ENTRY(futex) f_list; 86 TAILQ_HEAD(lf_waiting_proc, waiting_proc) f_waiting_proc; 87 }; 88 89 struct futex_list futex_list; 90 91 #define FUTEX_LOCK(f) sx_xlock(&(f)->f_lck) 92 #define FUTEX_UNLOCK(f) sx_xunlock(&(f)->f_lck) 93 #define FUTEX_INIT(f) sx_init_flags(&(f)->f_lck, "ftlk", SX_DUPOK) 94 #define FUTEX_DESTROY(f) sx_destroy(&(f)->f_lck) 95 #define FUTEX_ASSERT_LOCKED(f) sx_assert(&(f)->f_lck, SA_XLOCKED) 96 97 struct mtx futex_mtx; /* protects the futex list */ 98 #define FUTEXES_LOCK mtx_lock(&futex_mtx) 99 #define FUTEXES_UNLOCK mtx_unlock(&futex_mtx) 100 101 /* flags for futex_get() */ 102 #define FUTEX_CREATE_WP 0x1 /* create waiting_proc */ 103 #define FUTEX_DONTCREATE 0x2 /* don't create futex if not exists */ 104 #define FUTEX_DONTEXISTS 0x4 /* return EINVAL if futex exists */ 105 #define FUTEX_SHARED 0x8 /* shared futex */ 106 107 /* wp_flags */ 108 #define FUTEX_WP_REQUEUED 0x1 /* wp requeued - wp moved from wp_list 109 * of futex where thread sleep to wp_list 110 * of another futex. 111 */ 112 #define FUTEX_WP_REMOVED 0x2 /* wp is woken up and removed from futex 113 * wp_list to prevent double wakeup. 114 */ 115 116 /* support.s */ 117 int futex_xchgl(int oparg, uint32_t *uaddr, int *oldval); 118 int futex_addl(int oparg, uint32_t *uaddr, int *oldval); 119 int futex_orl(int oparg, uint32_t *uaddr, int *oldval); 120 int futex_andl(int oparg, uint32_t *uaddr, int *oldval); 121 int futex_xorl(int oparg, uint32_t *uaddr, int *oldval); 122 123 static void 124 futex_put(struct futex *f, struct waiting_proc *wp) 125 { 126 127 FUTEX_ASSERT_LOCKED(f); 128 if (wp != NULL) { 129 if ((wp->wp_flags & FUTEX_WP_REMOVED) == 0) 130 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list); 131 free(wp, M_FUTEX_WP); 132 } 133 134 FUTEXES_LOCK; 135 if (--f->f_refcount == 0) { 136 LIST_REMOVE(f, f_list); 137 FUTEXES_UNLOCK; 138 FUTEX_UNLOCK(f); 139 140 LINUX_CTR3(sys_futex, "futex_put destroy uaddr %p ref %d " 141 "shared %d", f->f_uaddr, f->f_refcount, f->f_key.shared); 142 umtx_key_release(&f->f_key); 143 FUTEX_DESTROY(f); 144 free(f, M_FUTEX); 145 return; 146 } 147 148 LINUX_CTR3(sys_futex, "futex_put uaddr %p ref %d shared %d", 149 f->f_uaddr, f->f_refcount, f->f_key.shared); 150 FUTEXES_UNLOCK; 151 FUTEX_UNLOCK(f); 152 } 153 154 static int 155 futex_get0(uint32_t *uaddr, struct futex **newf, uint32_t flags) 156 { 157 struct futex *f, *tmpf; 158 struct umtx_key key; 159 int error; 160 161 *newf = tmpf = NULL; 162 163 error = umtx_key_get(uaddr, TYPE_FUTEX, (flags & FUTEX_SHARED) ? 164 AUTO_SHARE : THREAD_SHARE, &key); 165 if (error) 166 return (error); 167 retry: 168 FUTEXES_LOCK; 169 LIST_FOREACH(f, &futex_list, f_list) { 170 if (umtx_key_match(&f->f_key, &key)) { 171 if (tmpf != NULL) { 172 FUTEX_UNLOCK(tmpf); 173 FUTEX_DESTROY(tmpf); 174 free(tmpf, M_FUTEX); 175 } 176 if (flags & FUTEX_DONTEXISTS) { 177 FUTEXES_UNLOCK; 178 umtx_key_release(&key); 179 return (EINVAL); 180 } 181 182 /* 183 * Increment refcount of the found futex to 184 * prevent it from deallocation before FUTEX_LOCK() 185 */ 186 ++f->f_refcount; 187 FUTEXES_UNLOCK; 188 umtx_key_release(&key); 189 190 FUTEX_LOCK(f); 191 *newf = f; 192 LINUX_CTR3(sys_futex, "futex_get uaddr %p ref %d shared %d", 193 uaddr, f->f_refcount, f->f_key.shared); 194 return (0); 195 } 196 } 197 198 if (flags & FUTEX_DONTCREATE) { 199 FUTEXES_UNLOCK; 200 umtx_key_release(&key); 201 LINUX_CTR1(sys_futex, "futex_get uaddr %p null", uaddr); 202 return (0); 203 } 204 205 if (tmpf == NULL) { 206 FUTEXES_UNLOCK; 207 tmpf = malloc(sizeof(*tmpf), M_FUTEX, M_WAITOK | M_ZERO); 208 tmpf->f_uaddr = uaddr; 209 tmpf->f_key = key; 210 tmpf->f_refcount = 1; 211 tmpf->f_bitset = FUTEX_BITSET_MATCH_ANY; 212 FUTEX_INIT(tmpf); 213 TAILQ_INIT(&tmpf->f_waiting_proc); 214 215 /* 216 * Lock the new futex before an insert into the futex_list 217 * to prevent futex usage by other. 218 */ 219 FUTEX_LOCK(tmpf); 220 goto retry; 221 } 222 223 LIST_INSERT_HEAD(&futex_list, tmpf, f_list); 224 FUTEXES_UNLOCK; 225 226 LINUX_CTR3(sys_futex, "futex_get uaddr %p ref %d shared %d new", 227 uaddr, tmpf->f_refcount, tmpf->f_key.shared); 228 *newf = tmpf; 229 return (0); 230 } 231 232 static int 233 futex_get(uint32_t *uaddr, struct waiting_proc **wp, struct futex **f, 234 uint32_t flags) 235 { 236 int error; 237 238 if (flags & FUTEX_CREATE_WP) { 239 *wp = malloc(sizeof(struct waiting_proc), M_FUTEX_WP, M_WAITOK); 240 (*wp)->wp_flags = 0; 241 } 242 error = futex_get0(uaddr, f, flags); 243 if (error) { 244 if (flags & FUTEX_CREATE_WP) 245 free(*wp, M_FUTEX_WP); 246 return (error); 247 } 248 if (flags & FUTEX_CREATE_WP) { 249 TAILQ_INSERT_HEAD(&(*f)->f_waiting_proc, *wp, wp_list); 250 (*wp)->wp_futex = *f; 251 } 252 253 return (error); 254 } 255 256 static int 257 futex_sleep(struct futex *f, struct waiting_proc *wp, int timeout) 258 { 259 int error; 260 261 FUTEX_ASSERT_LOCKED(f); 262 LINUX_CTR4(sys_futex, "futex_sleep enter uaddr %p wp %p timo %d ref %d", 263 f->f_uaddr, wp, timeout, f->f_refcount); 264 error = sx_sleep(wp, &f->f_lck, PCATCH, "futex", timeout); 265 if (wp->wp_flags & FUTEX_WP_REQUEUED) { 266 KASSERT(f != wp->wp_futex, ("futex != wp_futex")); 267 LINUX_CTR5(sys_futex, "futex_sleep out error %d uaddr %p w" 268 " %p requeued uaddr %p ref %d", 269 error, f->f_uaddr, wp, wp->wp_futex->f_uaddr, 270 wp->wp_futex->f_refcount); 271 futex_put(f, NULL); 272 f = wp->wp_futex; 273 FUTEX_LOCK(f); 274 } else 275 LINUX_CTR3(sys_futex, "futex_sleep out error %d uaddr %p wp %p", 276 error, f->f_uaddr, wp); 277 278 futex_put(f, wp); 279 return (error); 280 } 281 282 static int 283 futex_wake(struct futex *f, int n, uint32_t bitset) 284 { 285 struct waiting_proc *wp, *wpt; 286 int count = 0; 287 288 if (bitset == 0) 289 return (EINVAL); 290 291 FUTEX_ASSERT_LOCKED(f); 292 TAILQ_FOREACH_SAFE(wp, &f->f_waiting_proc, wp_list, wpt) { 293 LINUX_CTR3(sys_futex, "futex_wake uaddr %p wp %p ref %d", 294 f->f_uaddr, wp, f->f_refcount); 295 /* 296 * Unless we find a matching bit in 297 * the bitset, continue searching. 298 */ 299 if (!(wp->wp_futex->f_bitset & bitset)) 300 continue; 301 302 wp->wp_flags |= FUTEX_WP_REMOVED; 303 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list); 304 wakeup_one(wp); 305 if (++count == n) 306 break; 307 } 308 309 return (count); 310 } 311 312 static int 313 futex_requeue(struct futex *f, int n, struct futex *f2, int n2) 314 { 315 struct waiting_proc *wp, *wpt; 316 int count = 0; 317 318 FUTEX_ASSERT_LOCKED(f); 319 FUTEX_ASSERT_LOCKED(f2); 320 321 TAILQ_FOREACH_SAFE(wp, &f->f_waiting_proc, wp_list, wpt) { 322 if (++count <= n) { 323 LINUX_CTR2(sys_futex, "futex_req_wake uaddr %p wp %p", 324 f->f_uaddr, wp); 325 wp->wp_flags |= FUTEX_WP_REMOVED; 326 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list); 327 wakeup_one(wp); 328 } else { 329 LINUX_CTR3(sys_futex, "futex_requeue uaddr %p wp %p to %p", 330 f->f_uaddr, wp, f2->f_uaddr); 331 wp->wp_flags |= FUTEX_WP_REQUEUED; 332 /* Move wp to wp_list of f2 futex */ 333 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list); 334 TAILQ_INSERT_HEAD(&f2->f_waiting_proc, wp, wp_list); 335 336 /* 337 * Thread which sleeps on wp after waking should 338 * acquire f2 lock, so increment refcount of f2 to 339 * prevent it from premature deallocation. 340 */ 341 wp->wp_futex = f2; 342 FUTEXES_LOCK; 343 ++f2->f_refcount; 344 FUTEXES_UNLOCK; 345 if (count - n >= n2) 346 break; 347 } 348 } 349 350 return (count); 351 } 352 353 static int 354 futex_wait(struct futex *f, struct waiting_proc *wp, struct l_timespec *ts, 355 uint32_t bitset) 356 { 357 struct l_timespec timeout; 358 struct timeval tv; 359 int timeout_hz; 360 int error; 361 362 if (bitset == 0) 363 return (EINVAL); 364 f->f_bitset = bitset; 365 366 if (ts != NULL) { 367 error = copyin(ts, &timeout, sizeof(timeout)); 368 if (error) 369 return (error); 370 TIMESPEC_TO_TIMEVAL(&tv, &timeout); 371 error = itimerfix(&tv); 372 if (error) 373 return (error); 374 timeout_hz = tvtohz(&tv); 375 } else 376 timeout_hz = 0; 377 378 error = futex_sleep(f, wp, timeout_hz); 379 if (error == EWOULDBLOCK) 380 error = ETIMEDOUT; 381 382 return (error); 383 } 384 385 static int 386 futex_atomic_op(struct thread *td, int encoded_op, uint32_t *uaddr) 387 { 388 int op = (encoded_op >> 28) & 7; 389 int cmp = (encoded_op >> 24) & 15; 390 int oparg = (encoded_op << 8) >> 20; 391 int cmparg = (encoded_op << 20) >> 20; 392 int oldval = 0, ret; 393 394 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 395 oparg = 1 << oparg; 396 397 #ifdef DEBUG 398 if (ldebug(sys_futex)) 399 printf("futex_atomic_op: op = %d, cmp = %d, oparg = %x, " 400 "cmparg = %x, uaddr = %p\n", 401 op, cmp, oparg, cmparg, uaddr); 402 #endif 403 /* XXX: Linux verifies access here and returns EFAULT */ 404 405 switch (op) { 406 case FUTEX_OP_SET: 407 ret = futex_xchgl(oparg, uaddr, &oldval); 408 break; 409 case FUTEX_OP_ADD: 410 ret = futex_addl(oparg, uaddr, &oldval); 411 break; 412 case FUTEX_OP_OR: 413 ret = futex_orl(oparg, uaddr, &oldval); 414 break; 415 case FUTEX_OP_ANDN: 416 ret = futex_andl(~oparg, uaddr, &oldval); 417 break; 418 case FUTEX_OP_XOR: 419 ret = futex_xorl(oparg, uaddr, &oldval); 420 break; 421 default: 422 ret = -ENOSYS; 423 break; 424 } 425 426 if (ret) 427 return (ret); 428 429 switch (cmp) { 430 case FUTEX_OP_CMP_EQ: 431 return (oldval == cmparg); 432 case FUTEX_OP_CMP_NE: 433 return (oldval != cmparg); 434 case FUTEX_OP_CMP_LT: 435 return (oldval < cmparg); 436 case FUTEX_OP_CMP_GE: 437 return (oldval >= cmparg); 438 case FUTEX_OP_CMP_LE: 439 return (oldval <= cmparg); 440 case FUTEX_OP_CMP_GT: 441 return (oldval > cmparg); 442 default: 443 return (-ENOSYS); 444 } 445 } 446 447 int 448 linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args) 449 { 450 int clockrt, nrwake, op_ret, ret, val; 451 struct linux_emuldata *em; 452 struct waiting_proc *wp; 453 struct futex *f, *f2; 454 int error; 455 uint32_t flags; 456 457 if (args->op & LINUX_FUTEX_PRIVATE_FLAG) { 458 flags = 0; 459 args->op &= ~LINUX_FUTEX_PRIVATE_FLAG; 460 } else 461 flags = FUTEX_SHARED; 462 463 /* 464 * Currently support for switching between CLOCK_MONOTONIC and 465 * CLOCK_REALTIME is not present. However Linux forbids the use of 466 * FUTEX_CLOCK_REALTIME with any op except FUTEX_WAIT_BITSET and 467 * FUTEX_WAIT_REQUEUE_PI. 468 */ 469 clockrt = args->op & LINUX_FUTEX_CLOCK_REALTIME; 470 args->op = args->op & ~LINUX_FUTEX_CLOCK_REALTIME; 471 if (clockrt && args->op != LINUX_FUTEX_WAIT_BITSET && 472 args->op != LINUX_FUTEX_WAIT_REQUEUE_PI) 473 return (ENOSYS); 474 475 error = 0; 476 f = f2 = NULL; 477 478 switch (args->op) { 479 case LINUX_FUTEX_WAIT: 480 args->val3 = FUTEX_BITSET_MATCH_ANY; 481 /* FALLTHROUGH */ 482 483 case LINUX_FUTEX_WAIT_BITSET: 484 485 LINUX_CTR3(sys_futex, "WAIT uaddr %p val %d val3 %d", 486 args->uaddr, args->val, args->val3); 487 #ifdef DEBUG 488 if (ldebug(sys_futex)) 489 printf(ARGS(sys_futex, 490 "futex_wait uaddr %p val %d val3 %d"), 491 args->uaddr, args->val, args->val3); 492 #endif 493 error = futex_get(args->uaddr, &wp, &f, 494 flags | FUTEX_CREATE_WP); 495 if (error) 496 return (error); 497 error = copyin(args->uaddr, &val, sizeof(val)); 498 if (error) { 499 LINUX_CTR1(sys_futex, "WAIT copyin failed %d", 500 error); 501 futex_put(f, wp); 502 return (error); 503 } 504 if (val != args->val) { 505 LINUX_CTR4(sys_futex, 506 "WAIT uaddr %p val %d != uval %d val3 %d", 507 args->uaddr, args->val, val, args->val3); 508 futex_put(f, wp); 509 return (EWOULDBLOCK); 510 } 511 512 error = futex_wait(f, wp, args->timeout, args->val3); 513 break; 514 515 case LINUX_FUTEX_WAKE: 516 args->val3 = FUTEX_BITSET_MATCH_ANY; 517 /* FALLTHROUGH */ 518 519 case LINUX_FUTEX_WAKE_BITSET: 520 521 LINUX_CTR3(sys_futex, "WAKE uaddr %p val % d val3 %d", 522 args->uaddr, args->val, args->val3); 523 524 #ifdef DEBUG 525 if (ldebug(sys_futex)) 526 printf(ARGS(sys_futex, "futex_wake uaddr %p val %d val3 %d"), 527 args->uaddr, args->val, args->val3); 528 #endif 529 error = futex_get(args->uaddr, NULL, &f, 530 flags | FUTEX_DONTCREATE); 531 if (error) 532 return (error); 533 if (f == NULL) { 534 td->td_retval[0] = 0; 535 return (error); 536 } 537 td->td_retval[0] = futex_wake(f, args->val, args->val3); 538 futex_put(f, NULL); 539 break; 540 541 case LINUX_FUTEX_CMP_REQUEUE: 542 543 LINUX_CTR5(sys_futex, "CMP_REQUEUE uaddr %p " 544 "val %d val3 %d uaddr2 %p val2 %d", 545 args->uaddr, args->val, args->val3, args->uaddr2, 546 (int)(unsigned long)args->timeout); 547 548 #ifdef DEBUG 549 if (ldebug(sys_futex)) 550 printf(ARGS(sys_futex, "futex_cmp_requeue uaddr %p " 551 "val %d val3 %d uaddr2 %p val2 %d"), 552 args->uaddr, args->val, args->val3, args->uaddr2, 553 (int)(unsigned long)args->timeout); 554 #endif 555 556 /* 557 * Linux allows this, we would not, it is an incorrect 558 * usage of declared ABI, so return EINVAL. 559 */ 560 if (args->uaddr == args->uaddr2) 561 return (EINVAL); 562 error = futex_get(args->uaddr, NULL, &f, flags); 563 if (error) 564 return (error); 565 566 /* 567 * To avoid deadlocks return EINVAL if second futex 568 * exists at this time. 569 * 570 * Glibc fall back to FUTEX_WAKE in case of any error 571 * returned by FUTEX_CMP_REQUEUE. 572 */ 573 error = futex_get(args->uaddr2, NULL, &f2, 574 flags | FUTEX_DONTEXISTS); 575 if (error) { 576 futex_put(f, NULL); 577 return (error); 578 } 579 error = copyin(args->uaddr, &val, sizeof(val)); 580 if (error) { 581 LINUX_CTR1(sys_futex, "CMP_REQUEUE copyin failed %d", 582 error); 583 futex_put(f2, NULL); 584 futex_put(f, NULL); 585 return (error); 586 } 587 if (val != args->val3) { 588 LINUX_CTR2(sys_futex, "CMP_REQUEUE val %d != uval %d", 589 args->val, val); 590 futex_put(f2, NULL); 591 futex_put(f, NULL); 592 return (EAGAIN); 593 } 594 595 nrwake = (int)(unsigned long)args->timeout; 596 td->td_retval[0] = futex_requeue(f, args->val, f2, nrwake); 597 futex_put(f2, NULL); 598 futex_put(f, NULL); 599 break; 600 601 case LINUX_FUTEX_WAKE_OP: 602 603 LINUX_CTR5(sys_futex, "WAKE_OP " 604 "uaddr %p op %d val %x uaddr2 %p val3 %x", 605 args->uaddr, args->op, args->val, 606 args->uaddr2, args->val3); 607 608 #ifdef DEBUG 609 if (ldebug(sys_futex)) 610 printf(ARGS(sys_futex, "futex_wake_op " 611 "uaddr %p op %d val %x uaddr2 %p val3 %x"), 612 args->uaddr, args->op, args->val, 613 args->uaddr2, args->val3); 614 #endif 615 error = futex_get(args->uaddr, NULL, &f, flags); 616 if (error) 617 return (error); 618 if (args->uaddr != args->uaddr2) 619 error = futex_get(args->uaddr2, NULL, &f2, flags); 620 if (error) { 621 futex_put(f, NULL); 622 return (error); 623 } 624 625 /* 626 * This function returns positive number as results and 627 * negative as errors 628 */ 629 op_ret = futex_atomic_op(td, args->val3, args->uaddr2); 630 631 if (op_ret < 0) { 632 /* XXX: We don't handle the EFAULT yet. */ 633 if (op_ret != -EFAULT) { 634 if (f2 != NULL) 635 futex_put(f2, NULL); 636 futex_put(f, NULL); 637 return (-op_ret); 638 } 639 if (f2 != NULL) 640 futex_put(f2, NULL); 641 futex_put(f, NULL); 642 return (EFAULT); 643 } 644 645 ret = futex_wake(f, args->val, args->val3); 646 647 if (op_ret > 0) { 648 op_ret = 0; 649 nrwake = (int)(unsigned long)args->timeout; 650 651 if (f2 != NULL) 652 op_ret += futex_wake(f2, nrwake, args->val3); 653 else 654 op_ret += futex_wake(f, nrwake, args->val3); 655 ret += op_ret; 656 657 } 658 if (f2 != NULL) 659 futex_put(f2, NULL); 660 futex_put(f, NULL); 661 td->td_retval[0] = ret; 662 break; 663 664 case LINUX_FUTEX_LOCK_PI: 665 /* not yet implemented */ 666 linux_msg(td, 667 "linux_sys_futex: " 668 "op LINUX_FUTEX_LOCK_PI not implemented\n"); 669 return (ENOSYS); 670 671 case LINUX_FUTEX_UNLOCK_PI: 672 /* not yet implemented */ 673 linux_msg(td, 674 "linux_sys_futex: " 675 "op LINUX_FUTEX_UNLOCK_PI not implemented\n"); 676 return (ENOSYS); 677 678 case LINUX_FUTEX_TRYLOCK_PI: 679 /* not yet implemented */ 680 linux_msg(td, 681 "linux_sys_futex: " 682 "op LINUX_FUTEX_TRYLOCK_PI not implemented\n"); 683 return (ENOSYS); 684 685 case LINUX_FUTEX_REQUEUE: 686 687 /* 688 * Glibc does not use this operation since version 2.3.3, 689 * as it is racy and replaced by FUTEX_CMP_REQUEUE operation. 690 * Glibc versions prior to 2.3.3 fall back to FUTEX_WAKE when 691 * FUTEX_REQUEUE returned EINVAL. 692 */ 693 em = em_find(td->td_proc, EMUL_DONTLOCK); 694 if ((em->flags & LINUX_XDEPR_REQUEUEOP) == 0) { 695 linux_msg(td, 696 "linux_sys_futex: " 697 "unsupported futex_requeue op\n"); 698 em->flags |= LINUX_XDEPR_REQUEUEOP; 699 } 700 return (EINVAL); 701 702 case LINUX_FUTEX_WAIT_REQUEUE_PI: 703 /* not yet implemented */ 704 linux_msg(td, 705 "linux_sys_futex: " 706 "op FUTEX_WAIT_REQUEUE_PI not implemented\n"); 707 return (ENOSYS); 708 709 case LINUX_FUTEX_CMP_REQUEUE_PI: 710 /* not yet implemented */ 711 linux_msg(td, 712 "linux_sys_futex: " 713 "op LINUX_FUTEX_CMP_REQUEUE_PI not implemented\n"); 714 return (ENOSYS); 715 716 default: 717 linux_msg(td, 718 "linux_sys_futex: unknown op %d\n", args->op); 719 return (ENOSYS); 720 } 721 722 return (error); 723 } 724 725 int 726 linux_set_robust_list(struct thread *td, struct linux_set_robust_list_args *args) 727 { 728 struct linux_emuldata *em; 729 730 #ifdef DEBUG 731 if (ldebug(set_robust_list)) 732 printf(ARGS(set_robust_list, "head %p len %d"), 733 args->head, args->len); 734 #endif 735 736 if (args->len != sizeof(struct linux_robust_list_head)) 737 return (EINVAL); 738 739 em = em_find(td->td_proc, EMUL_DOLOCK); 740 em->robust_futexes = args->head; 741 EMUL_UNLOCK(&emul_lock); 742 743 return (0); 744 } 745 746 int 747 linux_get_robust_list(struct thread *td, struct linux_get_robust_list_args *args) 748 { 749 struct linux_emuldata *em; 750 struct linux_robust_list_head *head; 751 l_size_t len = sizeof(struct linux_robust_list_head); 752 int error = 0; 753 754 #ifdef DEBUG 755 if (ldebug(get_robust_list)) 756 printf(ARGS(get_robust_list, "")); 757 #endif 758 759 if (!args->pid) { 760 em = em_find(td->td_proc, EMUL_DONTLOCK); 761 head = em->robust_futexes; 762 } else { 763 struct proc *p; 764 765 p = pfind(args->pid); 766 if (p == NULL) 767 return (ESRCH); 768 769 em = em_find(p, EMUL_DONTLOCK); 770 /* XXX: ptrace? */ 771 if (priv_check(td, PRIV_CRED_SETUID) || 772 priv_check(td, PRIV_CRED_SETEUID) || 773 p_candebug(td, p)) { 774 PROC_UNLOCK(p); 775 return (EPERM); 776 } 777 head = em->robust_futexes; 778 779 PROC_UNLOCK(p); 780 } 781 782 error = copyout(&len, args->len, sizeof(l_size_t)); 783 if (error) 784 return (EFAULT); 785 786 error = copyout(head, args->head, sizeof(struct linux_robust_list_head)); 787 788 return (error); 789 } 790 791 static int 792 handle_futex_death(struct proc *p, uint32_t *uaddr, int pi) 793 { 794 uint32_t uval, nval, mval; 795 struct futex *f; 796 int error; 797 798 retry: 799 if (copyin(uaddr, &uval, 4)) 800 return (EFAULT); 801 if ((uval & FUTEX_TID_MASK) == p->p_pid) { 802 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; 803 nval = casuword32(uaddr, uval, mval); 804 805 if (nval == -1) 806 return (EFAULT); 807 808 if (nval != uval) 809 goto retry; 810 811 if (!pi && (uval & FUTEX_WAITERS)) { 812 error = futex_get(uaddr, NULL, &f, 813 FUTEX_DONTCREATE | FUTEX_SHARED); 814 if (error) 815 return (error); 816 if (f != NULL) { 817 futex_wake(f, 1, FUTEX_BITSET_MATCH_ANY); 818 futex_put(f, NULL); 819 } 820 } 821 } 822 823 return (0); 824 } 825 826 static int 827 fetch_robust_entry(struct linux_robust_list **entry, 828 struct linux_robust_list **head, int *pi) 829 { 830 l_ulong uentry; 831 832 if (copyin((const void *)head, &uentry, sizeof(l_ulong))) 833 return (EFAULT); 834 835 *entry = (void *)(uentry & ~1UL); 836 *pi = uentry & 1; 837 838 return (0); 839 } 840 841 /* This walks the list of robust futexes releasing them. */ 842 void 843 release_futexes(struct proc *p) 844 { 845 struct linux_robust_list_head *head = NULL; 846 struct linux_robust_list *entry, *next_entry, *pending; 847 unsigned int limit = 2048, pi, next_pi, pip; 848 struct linux_emuldata *em; 849 l_long futex_offset; 850 int rc; 851 852 em = em_find(p, EMUL_DONTLOCK); 853 head = em->robust_futexes; 854 855 if (head == NULL) 856 return; 857 858 if (fetch_robust_entry(&entry, PTRIN(&head->list.next), &pi)) 859 return; 860 861 if (copyin(&head->futex_offset, &futex_offset, sizeof(futex_offset))) 862 return; 863 864 if (fetch_robust_entry(&pending, PTRIN(&head->pending_list), &pip)) 865 return; 866 867 while (entry != &head->list) { 868 rc = fetch_robust_entry(&next_entry, PTRIN(&entry->next), &next_pi); 869 870 if (entry != pending) 871 if (handle_futex_death(p, (uint32_t *)entry + futex_offset, pi)) 872 return; 873 if (rc) 874 return; 875 876 entry = next_entry; 877 pi = next_pi; 878 879 if (!--limit) 880 break; 881 882 sched_relinquish(curthread); 883 } 884 885 if (pending) 886 handle_futex_death(p, (uint32_t *)pending + futex_offset, pip); 887 } 888