1 /* $NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $ */ 2 3 /*- 4 * Copyright (c) 2005 Emmanuel Dreyfus, all rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Emmanuel Dreyfus 17 * 4. The name of the author may not be used to endorse or promote 18 * products derived from this software without specific prior written 19 * permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE THE AUTHOR AND CONTRIBUTORS ``AS IS'' 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 #if 0 37 __KERNEL_RCSID(1, "$NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $"); 38 #endif 39 40 #include "opt_compat.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/imgact.h> 45 #include <sys/kernel.h> 46 #include <sys/ktr.h> 47 #include <sys/lock.h> 48 #include <sys/malloc.h> 49 #include <sys/mutex.h> 50 #include <sys/priv.h> 51 #include <sys/proc.h> 52 #include <sys/queue.h> 53 #include <sys/sched.h> 54 #include <sys/sx.h> 55 56 #ifdef COMPAT_LINUX32 57 #include <machine/../linux32/linux.h> 58 #include <machine/../linux32/linux32_proto.h> 59 #else 60 #include <machine/../linux/linux.h> 61 #include <machine/../linux/linux_proto.h> 62 #endif 63 #include <compat/linux/linux_emul.h> 64 #include <compat/linux/linux_futex.h> 65 #include <compat/linux/linux_util.h> 66 67 MALLOC_DEFINE(M_FUTEX, "futex", "Linux futexes"); 68 MALLOC_DEFINE(M_FUTEX_WP, "futex wp", "Linux futexes wp"); 69 70 struct futex; 71 72 struct waiting_proc { 73 uint32_t wp_flags; 74 struct futex *wp_futex; 75 TAILQ_ENTRY(waiting_proc) wp_list; 76 }; 77 78 struct futex { 79 struct sx f_lck; 80 uint32_t *f_uaddr; 81 uint32_t f_refcount; 82 uint32_t f_bitset; 83 LIST_ENTRY(futex) f_list; 84 TAILQ_HEAD(lf_waiting_proc, waiting_proc) f_waiting_proc; 85 }; 86 87 struct futex_list futex_list; 88 89 #define FUTEX_LOCK(f) sx_xlock(&(f)->f_lck) 90 #define FUTEX_UNLOCK(f) sx_xunlock(&(f)->f_lck) 91 #define FUTEX_INIT(f) sx_init_flags(&(f)->f_lck, "ftlk", SX_DUPOK) 92 #define FUTEX_DESTROY(f) sx_destroy(&(f)->f_lck) 93 #define FUTEX_ASSERT_LOCKED(f) sx_assert(&(f)->f_lck, SA_XLOCKED) 94 95 struct mtx futex_mtx; /* protects the futex list */ 96 #define FUTEXES_LOCK mtx_lock(&futex_mtx) 97 #define FUTEXES_UNLOCK mtx_unlock(&futex_mtx) 98 99 /* flags for futex_get() */ 100 #define FUTEX_CREATE_WP 0x1 /* create waiting_proc */ 101 #define FUTEX_DONTCREATE 0x2 /* don't create futex if not exists */ 102 #define FUTEX_DONTEXISTS 0x4 /* return EINVAL if futex exists */ 103 104 /* wp_flags */ 105 #define FUTEX_WP_REQUEUED 0x1 /* wp requeued - wp moved from wp_list 106 * of futex where thread sleep to wp_list 107 * of another futex. 108 */ 109 #define FUTEX_WP_REMOVED 0x2 /* wp is woken up and removed from futex 110 * wp_list to prevent double wakeup. 111 */ 112 113 /* support.s */ 114 int futex_xchgl(int oparg, uint32_t *uaddr, int *oldval); 115 int futex_addl(int oparg, uint32_t *uaddr, int *oldval); 116 int futex_orl(int oparg, uint32_t *uaddr, int *oldval); 117 int futex_andl(int oparg, uint32_t *uaddr, int *oldval); 118 int futex_xorl(int oparg, uint32_t *uaddr, int *oldval); 119 120 static void 121 futex_put(struct futex *f, struct waiting_proc *wp) 122 { 123 124 FUTEX_ASSERT_LOCKED(f); 125 if (wp != NULL) { 126 if ((wp->wp_flags & FUTEX_WP_REMOVED) == 0) 127 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list); 128 free(wp, M_FUTEX_WP); 129 } 130 131 FUTEXES_LOCK; 132 if (--f->f_refcount == 0) { 133 LIST_REMOVE(f, f_list); 134 FUTEXES_UNLOCK; 135 FUTEX_UNLOCK(f); 136 137 LINUX_CTR2(sys_futex, "futex_put destroy uaddr %p ref %d", 138 f->f_uaddr, f->f_refcount); 139 FUTEX_DESTROY(f); 140 free(f, M_FUTEX); 141 return; 142 } 143 144 LINUX_CTR2(sys_futex, "futex_put uaddr %p ref %d", 145 f->f_uaddr, f->f_refcount); 146 FUTEXES_UNLOCK; 147 FUTEX_UNLOCK(f); 148 } 149 150 static int 151 futex_get0(uint32_t *uaddr, struct futex **newf, uint32_t flags) 152 { 153 struct futex *f, *tmpf; 154 155 *newf = tmpf = NULL; 156 157 retry: 158 FUTEXES_LOCK; 159 LIST_FOREACH(f, &futex_list, f_list) { 160 if (f->f_uaddr == uaddr) { 161 if (tmpf != NULL) { 162 FUTEX_UNLOCK(tmpf); 163 FUTEX_DESTROY(tmpf); 164 free(tmpf, M_FUTEX); 165 } 166 if (flags & FUTEX_DONTEXISTS) { 167 FUTEXES_UNLOCK; 168 return (EINVAL); 169 } 170 171 /* 172 * Increment refcount of the found futex to 173 * prevent it from deallocation before FUTEX_LOCK() 174 */ 175 ++f->f_refcount; 176 FUTEXES_UNLOCK; 177 178 FUTEX_LOCK(f); 179 *newf = f; 180 LINUX_CTR2(sys_futex, "futex_get uaddr %p ref %d", 181 uaddr, f->f_refcount); 182 return (0); 183 } 184 } 185 186 if (flags & FUTEX_DONTCREATE) { 187 FUTEXES_UNLOCK; 188 LINUX_CTR1(sys_futex, "futex_get uaddr %p null", uaddr); 189 return (0); 190 } 191 192 if (tmpf == NULL) { 193 FUTEXES_UNLOCK; 194 tmpf = malloc(sizeof(*tmpf), M_FUTEX, M_WAITOK | M_ZERO); 195 tmpf->f_uaddr = uaddr; 196 tmpf->f_refcount = 1; 197 tmpf->f_bitset = FUTEX_BITSET_MATCH_ANY; 198 FUTEX_INIT(tmpf); 199 TAILQ_INIT(&tmpf->f_waiting_proc); 200 201 /* 202 * Lock the new futex before an insert into the futex_list 203 * to prevent futex usage by other. 204 */ 205 FUTEX_LOCK(tmpf); 206 goto retry; 207 } 208 209 LIST_INSERT_HEAD(&futex_list, tmpf, f_list); 210 FUTEXES_UNLOCK; 211 212 LINUX_CTR2(sys_futex, "futex_get uaddr %p ref %d new", 213 uaddr, tmpf->f_refcount); 214 *newf = tmpf; 215 return (0); 216 } 217 218 static int 219 futex_get(uint32_t *uaddr, struct waiting_proc **wp, struct futex **f, 220 uint32_t flags) 221 { 222 int error; 223 224 if (flags & FUTEX_CREATE_WP) { 225 *wp = malloc(sizeof(struct waiting_proc), M_FUTEX_WP, M_WAITOK); 226 (*wp)->wp_flags = 0; 227 } 228 error = futex_get0(uaddr, f, flags); 229 if (error) { 230 if (flags & FUTEX_CREATE_WP) 231 free(*wp, M_FUTEX_WP); 232 return (error); 233 } 234 if (flags & FUTEX_CREATE_WP) { 235 TAILQ_INSERT_HEAD(&(*f)->f_waiting_proc, *wp, wp_list); 236 (*wp)->wp_futex = *f; 237 } 238 239 return (error); 240 } 241 242 static int 243 futex_sleep(struct futex *f, struct waiting_proc *wp, int timeout) 244 { 245 int error; 246 247 FUTEX_ASSERT_LOCKED(f); 248 LINUX_CTR4(sys_futex, "futex_sleep enter uaddr %p wp %p timo %d ref %d", 249 f->f_uaddr, wp, timeout, f->f_refcount); 250 error = sx_sleep(wp, &f->f_lck, PCATCH, "futex", timeout); 251 if (wp->wp_flags & FUTEX_WP_REQUEUED) { 252 KASSERT(f != wp->wp_futex, ("futex != wp_futex")); 253 LINUX_CTR5(sys_futex, "futex_sleep out error %d uaddr %p w" 254 " %p requeued uaddr %p ref %d", 255 error, f->f_uaddr, wp, wp->wp_futex->f_uaddr, 256 wp->wp_futex->f_refcount); 257 futex_put(f, NULL); 258 f = wp->wp_futex; 259 FUTEX_LOCK(f); 260 } else 261 LINUX_CTR3(sys_futex, "futex_sleep out error %d uaddr %p wp %p", 262 error, f->f_uaddr, wp); 263 264 futex_put(f, wp); 265 return (error); 266 } 267 268 static int 269 futex_wake(struct futex *f, int n, uint32_t bitset) 270 { 271 struct waiting_proc *wp, *wpt; 272 int count = 0; 273 274 if (bitset == 0) 275 return (EINVAL); 276 277 FUTEX_ASSERT_LOCKED(f); 278 TAILQ_FOREACH_SAFE(wp, &f->f_waiting_proc, wp_list, wpt) { 279 LINUX_CTR3(sys_futex, "futex_wake uaddr %p wp %p ref %d", 280 f->f_uaddr, wp, f->f_refcount); 281 /* 282 * Unless we find a matching bit in 283 * the bitset, continue searching. 284 */ 285 if (!(wp->wp_futex->f_bitset & bitset)) 286 continue; 287 288 wp->wp_flags |= FUTEX_WP_REMOVED; 289 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list); 290 wakeup_one(wp); 291 if (++count == n) 292 break; 293 } 294 295 return (count); 296 } 297 298 static int 299 futex_requeue(struct futex *f, int n, struct futex *f2, int n2) 300 { 301 struct waiting_proc *wp, *wpt; 302 int count = 0; 303 304 FUTEX_ASSERT_LOCKED(f); 305 FUTEX_ASSERT_LOCKED(f2); 306 307 TAILQ_FOREACH_SAFE(wp, &f->f_waiting_proc, wp_list, wpt) { 308 if (++count <= n) { 309 LINUX_CTR2(sys_futex, "futex_req_wake uaddr %p wp %p", 310 f->f_uaddr, wp); 311 wp->wp_flags |= FUTEX_WP_REMOVED; 312 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list); 313 wakeup_one(wp); 314 } else { 315 LINUX_CTR3(sys_futex, "futex_requeue uaddr %p wp %p to %p", 316 f->f_uaddr, wp, f2->f_uaddr); 317 wp->wp_flags |= FUTEX_WP_REQUEUED; 318 /* Move wp to wp_list of f2 futex */ 319 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list); 320 TAILQ_INSERT_HEAD(&f2->f_waiting_proc, wp, wp_list); 321 322 /* 323 * Thread which sleeps on wp after waking should 324 * acquire f2 lock, so increment refcount of f2 to 325 * prevent it from premature deallocation. 326 */ 327 wp->wp_futex = f2; 328 FUTEXES_LOCK; 329 ++f2->f_refcount; 330 FUTEXES_UNLOCK; 331 if (count - n >= n2) 332 break; 333 } 334 } 335 336 return (count); 337 } 338 339 static int 340 futex_wait(struct futex *f, struct waiting_proc *wp, struct l_timespec *ts, 341 uint32_t bitset) 342 { 343 struct l_timespec timeout; 344 struct timeval tv; 345 int timeout_hz; 346 int error; 347 348 if (bitset == 0) 349 return (EINVAL); 350 f->f_bitset = bitset; 351 352 if (ts != NULL) { 353 error = copyin(ts, &timeout, sizeof(timeout)); 354 if (error) 355 return (error); 356 TIMESPEC_TO_TIMEVAL(&tv, &timeout); 357 error = itimerfix(&tv); 358 if (error) 359 return (error); 360 timeout_hz = tvtohz(&tv); 361 } else 362 timeout_hz = 0; 363 364 error = futex_sleep(f, wp, timeout_hz); 365 if (error == EWOULDBLOCK) 366 error = ETIMEDOUT; 367 368 return (error); 369 } 370 371 static int 372 futex_atomic_op(struct thread *td, int encoded_op, uint32_t *uaddr) 373 { 374 int op = (encoded_op >> 28) & 7; 375 int cmp = (encoded_op >> 24) & 15; 376 int oparg = (encoded_op << 8) >> 20; 377 int cmparg = (encoded_op << 20) >> 20; 378 int oldval = 0, ret; 379 380 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 381 oparg = 1 << oparg; 382 383 #ifdef DEBUG 384 if (ldebug(sys_futex)) 385 printf("futex_atomic_op: op = %d, cmp = %d, oparg = %x, " 386 "cmparg = %x, uaddr = %p\n", 387 op, cmp, oparg, cmparg, uaddr); 388 #endif 389 /* XXX: linux verifies access here and returns EFAULT */ 390 391 switch (op) { 392 case FUTEX_OP_SET: 393 ret = futex_xchgl(oparg, uaddr, &oldval); 394 break; 395 case FUTEX_OP_ADD: 396 ret = futex_addl(oparg, uaddr, &oldval); 397 break; 398 case FUTEX_OP_OR: 399 ret = futex_orl(oparg, uaddr, &oldval); 400 break; 401 case FUTEX_OP_ANDN: 402 ret = futex_andl(~oparg, uaddr, &oldval); 403 break; 404 case FUTEX_OP_XOR: 405 ret = futex_xorl(oparg, uaddr, &oldval); 406 break; 407 default: 408 ret = -ENOSYS; 409 break; 410 } 411 412 if (ret) 413 return (ret); 414 415 switch (cmp) { 416 case FUTEX_OP_CMP_EQ: 417 return (oldval == cmparg); 418 case FUTEX_OP_CMP_NE: 419 return (oldval != cmparg); 420 case FUTEX_OP_CMP_LT: 421 return (oldval < cmparg); 422 case FUTEX_OP_CMP_GE: 423 return (oldval >= cmparg); 424 case FUTEX_OP_CMP_LE: 425 return (oldval <= cmparg); 426 case FUTEX_OP_CMP_GT: 427 return (oldval > cmparg); 428 default: 429 return (-ENOSYS); 430 } 431 } 432 433 int 434 linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args) 435 { 436 int clockrt, nrwake, op_ret, ret, val; 437 struct linux_emuldata *em; 438 struct waiting_proc *wp; 439 struct futex *f, *f2; 440 int error; 441 442 /* 443 * Our implementation provides only privates futexes. Most of the apps 444 * should use private futexes but don't claim so. Therefore we treat 445 * all futexes as private by clearing the FUTEX_PRIVATE_FLAG. It works 446 * in most cases (ie. when futexes are not shared on file descriptor 447 * or between different processes.). 448 */ 449 args->op = args->op & ~LINUX_FUTEX_PRIVATE_FLAG; 450 451 /* 452 * Currently support for switching between CLOCK_MONOTONIC and 453 * CLOCK_REALTIME is not present. However Linux forbids the use of 454 * FUTEX_CLOCK_REALTIME with any op except FUTEX_WAIT_BITSET and 455 * FUTEX_WAIT_REQUEUE_PI. 456 */ 457 clockrt = args->op & LINUX_FUTEX_CLOCK_REALTIME; 458 args->op = args->op & ~LINUX_FUTEX_CLOCK_REALTIME; 459 if (clockrt && args->op != LINUX_FUTEX_WAIT_BITSET && 460 args->op != LINUX_FUTEX_WAIT_REQUEUE_PI) 461 return (ENOSYS); 462 463 error = 0; 464 f = f2 = NULL; 465 466 switch (args->op) { 467 case LINUX_FUTEX_WAIT: 468 args->val3 = FUTEX_BITSET_MATCH_ANY; 469 /* FALLTHROUGH */ 470 471 case LINUX_FUTEX_WAIT_BITSET: 472 473 LINUX_CTR3(sys_futex, "WAIT uaddr %p val %d val3 %d", 474 args->uaddr, args->val, args->val3); 475 #ifdef DEBUG 476 if (ldebug(sys_futex)) 477 printf(ARGS(sys_futex, 478 "futex_wait uaddr %p val %d val3 %d"), 479 args->uaddr, args->val, args->val3); 480 #endif 481 error = futex_get(args->uaddr, &wp, &f, FUTEX_CREATE_WP); 482 if (error) 483 return (error); 484 error = copyin(args->uaddr, &val, sizeof(val)); 485 if (error) { 486 LINUX_CTR1(sys_futex, "WAIT copyin failed %d", 487 error); 488 futex_put(f, wp); 489 return (error); 490 } 491 if (val != args->val) { 492 LINUX_CTR4(sys_futex, 493 "WAIT uaddr %p val %d != uval %d val3 %d", 494 args->uaddr, args->val, val, args->val3); 495 futex_put(f, wp); 496 return (EWOULDBLOCK); 497 } 498 499 error = futex_wait(f, wp, args->timeout, args->val3); 500 break; 501 502 case LINUX_FUTEX_WAKE: 503 args->val3 = FUTEX_BITSET_MATCH_ANY; 504 /* FALLTHROUGH */ 505 506 case LINUX_FUTEX_WAKE_BITSET: 507 508 LINUX_CTR3(sys_futex, "WAKE uaddr %p val % d val3 %d", 509 args->uaddr, args->val, args->val3); 510 511 /* 512 * XXX: Linux is able to cope with different addresses 513 * corresponding to the same mapped memory in the sleeping 514 * and waker process(es). 515 */ 516 #ifdef DEBUG 517 if (ldebug(sys_futex)) 518 printf(ARGS(sys_futex, "futex_wake uaddr %p val %d val3 %d"), 519 args->uaddr, args->val, args->val3); 520 #endif 521 error = futex_get(args->uaddr, NULL, &f, FUTEX_DONTCREATE); 522 if (error) 523 return (error); 524 if (f == NULL) { 525 td->td_retval[0] = 0; 526 return (error); 527 } 528 td->td_retval[0] = futex_wake(f, args->val, args->val3); 529 futex_put(f, NULL); 530 break; 531 532 case LINUX_FUTEX_CMP_REQUEUE: 533 534 LINUX_CTR5(sys_futex, "CMP_REQUEUE uaddr %p " 535 "val %d val3 %d uaddr2 %p val2 %d", 536 args->uaddr, args->val, args->val3, args->uaddr2, 537 (int)(unsigned long)args->timeout); 538 539 #ifdef DEBUG 540 if (ldebug(sys_futex)) 541 printf(ARGS(sys_futex, "futex_cmp_requeue uaddr %p " 542 "val %d val3 %d uaddr2 %p val2 %d"), 543 args->uaddr, args->val, args->val3, args->uaddr2, 544 (int)(unsigned long)args->timeout); 545 #endif 546 547 /* 548 * Linux allows this, we would not, it is an incorrect 549 * usage of declared ABI, so return EINVAL. 550 */ 551 if (args->uaddr == args->uaddr2) 552 return (EINVAL); 553 error = futex_get0(args->uaddr, &f, 0); 554 if (error) 555 return (error); 556 557 /* 558 * To avoid deadlocks return EINVAL if second futex 559 * exists at this time. 560 * 561 * Glibc fall back to FUTEX_WAKE in case of any error 562 * returned by FUTEX_CMP_REQUEUE. 563 */ 564 error = futex_get0(args->uaddr2, &f2, FUTEX_DONTEXISTS); 565 if (error) { 566 futex_put(f, NULL); 567 return (error); 568 } 569 error = copyin(args->uaddr, &val, sizeof(val)); 570 if (error) { 571 LINUX_CTR1(sys_futex, "CMP_REQUEUE copyin failed %d", 572 error); 573 futex_put(f2, NULL); 574 futex_put(f, NULL); 575 return (error); 576 } 577 if (val != args->val3) { 578 LINUX_CTR2(sys_futex, "CMP_REQUEUE val %d != uval %d", 579 args->val, val); 580 futex_put(f2, NULL); 581 futex_put(f, NULL); 582 return (EAGAIN); 583 } 584 585 nrwake = (int)(unsigned long)args->timeout; 586 td->td_retval[0] = futex_requeue(f, args->val, f2, nrwake); 587 futex_put(f2, NULL); 588 futex_put(f, NULL); 589 break; 590 591 case LINUX_FUTEX_WAKE_OP: 592 593 LINUX_CTR5(sys_futex, "WAKE_OP " 594 "uaddr %p op %d val %x uaddr2 %p val3 %x", 595 args->uaddr, args->op, args->val, 596 args->uaddr2, args->val3); 597 598 #ifdef DEBUG 599 if (ldebug(sys_futex)) 600 printf(ARGS(sys_futex, "futex_wake_op " 601 "uaddr %p op %d val %x uaddr2 %p val3 %x"), 602 args->uaddr, args->op, args->val, 603 args->uaddr2, args->val3); 604 #endif 605 error = futex_get0(args->uaddr, &f, 0); 606 if (error) 607 return (error); 608 if (args->uaddr != args->uaddr2) 609 error = futex_get0(args->uaddr2, &f2, 0); 610 if (error) { 611 futex_put(f, NULL); 612 return (error); 613 } 614 615 /* 616 * This function returns positive number as results and 617 * negative as errors 618 */ 619 op_ret = futex_atomic_op(td, args->val3, args->uaddr2); 620 621 if (op_ret < 0) { 622 /* XXX: We don't handle the EFAULT yet. */ 623 if (op_ret != -EFAULT) { 624 if (f2 != NULL) 625 futex_put(f2, NULL); 626 futex_put(f, NULL); 627 return (-op_ret); 628 } 629 if (f2 != NULL) 630 futex_put(f2, NULL); 631 futex_put(f, NULL); 632 return (EFAULT); 633 } 634 635 ret = futex_wake(f, args->val, args->val3); 636 637 if (op_ret > 0) { 638 op_ret = 0; 639 nrwake = (int)(unsigned long)args->timeout; 640 641 if (f2 != NULL) 642 op_ret += futex_wake(f2, nrwake, args->val3); 643 else 644 op_ret += futex_wake(f, nrwake, args->val3); 645 ret += op_ret; 646 647 } 648 if (f2 != NULL) 649 futex_put(f2, NULL); 650 futex_put(f, NULL); 651 td->td_retval[0] = ret; 652 break; 653 654 case LINUX_FUTEX_LOCK_PI: 655 /* not yet implemented */ 656 linux_msg(td, 657 "linux_sys_futex: " 658 "op LINUX_FUTEX_LOCK_PI not implemented\n"); 659 return (ENOSYS); 660 661 case LINUX_FUTEX_UNLOCK_PI: 662 /* not yet implemented */ 663 linux_msg(td, 664 "linux_sys_futex: " 665 "op LINUX_FUTEX_UNLOCK_PI not implemented\n"); 666 return (ENOSYS); 667 668 case LINUX_FUTEX_TRYLOCK_PI: 669 /* not yet implemented */ 670 linux_msg(td, 671 "linux_sys_futex: " 672 "op LINUX_FUTEX_TRYLOCK_PI not implemented\n"); 673 return (ENOSYS); 674 675 case LINUX_FUTEX_REQUEUE: 676 677 /* 678 * Glibc does not use this operation since version 2.3.3, 679 * as it is racy and replaced by FUTEX_CMP_REQUEUE operation. 680 * Glibc versions prior to 2.3.3 fall back to FUTEX_WAKE when 681 * FUTEX_REQUEUE returned EINVAL. 682 */ 683 em = em_find(td->td_proc, EMUL_DONTLOCK); 684 if ((em->flags & LINUX_XDEPR_REQUEUEOP) == 0) { 685 linux_msg(td, 686 "linux_sys_futex: " 687 "unsupported futex_requeue op\n"); 688 em->flags |= LINUX_XDEPR_REQUEUEOP; 689 } 690 return (EINVAL); 691 692 case LINUX_FUTEX_WAIT_REQUEUE_PI: 693 /* not yet implemented */ 694 linux_msg(td, 695 "linux_sys_futex: " 696 "op FUTEX_WAIT_REQUEUE_PI not implemented\n"); 697 return (ENOSYS); 698 699 case LINUX_FUTEX_CMP_REQUEUE_PI: 700 /* not yet implemented */ 701 linux_msg(td, 702 "linux_sys_futex: " 703 "op LINUX_FUTEX_CMP_REQUEUE_PI not implemented\n"); 704 return (ENOSYS); 705 706 default: 707 linux_msg(td, 708 "linux_sys_futex: unknown op %d\n", args->op); 709 return (ENOSYS); 710 } 711 712 return (error); 713 } 714 715 int 716 linux_set_robust_list(struct thread *td, struct linux_set_robust_list_args *args) 717 { 718 struct linux_emuldata *em; 719 720 #ifdef DEBUG 721 if (ldebug(set_robust_list)) 722 printf(ARGS(set_robust_list, "head %p len %d"), 723 args->head, args->len); 724 #endif 725 726 if (args->len != sizeof(struct linux_robust_list_head)) 727 return (EINVAL); 728 729 em = em_find(td->td_proc, EMUL_DOLOCK); 730 em->robust_futexes = args->head; 731 EMUL_UNLOCK(&emul_lock); 732 733 return (0); 734 } 735 736 int 737 linux_get_robust_list(struct thread *td, struct linux_get_robust_list_args *args) 738 { 739 struct linux_emuldata *em; 740 struct linux_robust_list_head *head; 741 l_size_t len = sizeof(struct linux_robust_list_head); 742 int error = 0; 743 744 #ifdef DEBUG 745 if (ldebug(get_robust_list)) 746 printf(ARGS(get_robust_list, "")); 747 #endif 748 749 if (!args->pid) { 750 em = em_find(td->td_proc, EMUL_DONTLOCK); 751 head = em->robust_futexes; 752 } else { 753 struct proc *p; 754 755 p = pfind(args->pid); 756 if (p == NULL) 757 return (ESRCH); 758 759 em = em_find(p, EMUL_DONTLOCK); 760 /* XXX: ptrace? */ 761 if (priv_check(td, PRIV_CRED_SETUID) || 762 priv_check(td, PRIV_CRED_SETEUID) || 763 p_candebug(td, p)) { 764 PROC_UNLOCK(p); 765 return (EPERM); 766 } 767 head = em->robust_futexes; 768 769 PROC_UNLOCK(p); 770 } 771 772 error = copyout(&len, args->len, sizeof(l_size_t)); 773 if (error) 774 return (EFAULT); 775 776 error = copyout(head, args->head, sizeof(struct linux_robust_list_head)); 777 778 return (error); 779 } 780 781 static int 782 handle_futex_death(struct proc *p, uint32_t *uaddr, int pi) 783 { 784 uint32_t uval, nval, mval; 785 struct futex *f; 786 int error; 787 788 retry: 789 if (copyin(uaddr, &uval, 4)) 790 return (EFAULT); 791 if ((uval & FUTEX_TID_MASK) == p->p_pid) { 792 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; 793 nval = casuword32(uaddr, uval, mval); 794 795 if (nval == -1) 796 return (EFAULT); 797 798 if (nval != uval) 799 goto retry; 800 801 if (!pi && (uval & FUTEX_WAITERS)) { 802 error = futex_get(uaddr, NULL, &f, 803 FUTEX_DONTCREATE); 804 if (error) 805 return (error); 806 if (f != NULL) { 807 futex_wake(f, 1, FUTEX_BITSET_MATCH_ANY); 808 futex_put(f, NULL); 809 } 810 } 811 } 812 813 return (0); 814 } 815 816 static int 817 fetch_robust_entry(struct linux_robust_list **entry, 818 struct linux_robust_list **head, int *pi) 819 { 820 l_ulong uentry; 821 822 if (copyin((const void *)head, &uentry, sizeof(l_ulong))) 823 return (EFAULT); 824 825 *entry = (void *)(uentry & ~1UL); 826 *pi = uentry & 1; 827 828 return (0); 829 } 830 831 /* This walks the list of robust futexes releasing them. */ 832 void 833 release_futexes(struct proc *p) 834 { 835 struct linux_robust_list_head *head = NULL; 836 struct linux_robust_list *entry, *next_entry, *pending; 837 unsigned int limit = 2048, pi, next_pi, pip; 838 struct linux_emuldata *em; 839 l_long futex_offset; 840 int rc; 841 842 em = em_find(p, EMUL_DONTLOCK); 843 head = em->robust_futexes; 844 845 if (head == NULL) 846 return; 847 848 if (fetch_robust_entry(&entry, PTRIN(&head->list.next), &pi)) 849 return; 850 851 if (copyin(&head->futex_offset, &futex_offset, sizeof(futex_offset))) 852 return; 853 854 if (fetch_robust_entry(&pending, PTRIN(&head->pending_list), &pip)) 855 return; 856 857 while (entry != &head->list) { 858 rc = fetch_robust_entry(&next_entry, PTRIN(&entry->next), &next_pi); 859 860 if (entry != pending) 861 if (handle_futex_death(p, (uint32_t *)entry + futex_offset, pi)) 862 return; 863 if (rc) 864 return; 865 866 entry = next_entry; 867 pi = next_pi; 868 869 if (!--limit) 870 break; 871 872 sched_relinquish(curthread); 873 } 874 875 if (pending) 876 handle_futex_death(p, (uint32_t *)pending + futex_offset, pip); 877 } 878