1 /* $NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $ */ 2 3 /*- 4 * Copyright (c) 2005 Emmanuel Dreyfus, all rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Emmanuel Dreyfus 17 * 4. The name of the author may not be used to endorse or promote 18 * products derived from this software without specific prior written 19 * permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE THE AUTHOR AND CONTRIBUTORS ``AS IS'' 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 #if 0 37 __KERNEL_RCSID(1, "$NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $"); 38 #endif 39 40 #include "opt_compat.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/imgact.h> 45 #include <sys/kernel.h> 46 #include <sys/ktr.h> 47 #include <sys/lock.h> 48 #include <sys/malloc.h> 49 #include <sys/mutex.h> 50 #include <sys/priv.h> 51 #include <sys/proc.h> 52 #include <sys/queue.h> 53 #include <sys/sched.h> 54 #include <sys/sx.h> 55 #include <sys/umtx.h> 56 57 #ifdef COMPAT_LINUX32 58 #include <machine/../linux32/linux.h> 59 #include <machine/../linux32/linux32_proto.h> 60 #else 61 #include <machine/../linux/linux.h> 62 #include <machine/../linux/linux_proto.h> 63 #endif 64 #include <compat/linux/linux_emul.h> 65 #include <compat/linux/linux_futex.h> 66 #include <compat/linux/linux_util.h> 67 68 MALLOC_DEFINE(M_FUTEX, "futex", "Linux futexes"); 69 MALLOC_DEFINE(M_FUTEX_WP, "futex wp", "Linux futexes wp"); 70 71 struct futex; 72 73 struct waiting_proc { 74 uint32_t wp_flags; 75 struct futex *wp_futex; 76 TAILQ_ENTRY(waiting_proc) wp_list; 77 }; 78 79 struct futex { 80 struct sx f_lck; 81 uint32_t *f_uaddr; /* user-supplied value, for debug */ 82 struct umtx_key f_key; 83 uint32_t f_refcount; 84 uint32_t f_bitset; 85 LIST_ENTRY(futex) f_list; 86 TAILQ_HEAD(lf_waiting_proc, waiting_proc) f_waiting_proc; 87 }; 88 89 struct futex_list futex_list; 90 91 #define FUTEX_LOCK(f) sx_xlock(&(f)->f_lck) 92 #define FUTEX_UNLOCK(f) sx_xunlock(&(f)->f_lck) 93 #define FUTEX_INIT(f) sx_init_flags(&(f)->f_lck, "ftlk", SX_DUPOK) 94 #define FUTEX_DESTROY(f) sx_destroy(&(f)->f_lck) 95 #define FUTEX_ASSERT_LOCKED(f) sx_assert(&(f)->f_lck, SA_XLOCKED) 96 97 struct mtx futex_mtx; /* protects the futex list */ 98 #define FUTEXES_LOCK mtx_lock(&futex_mtx) 99 #define FUTEXES_UNLOCK mtx_unlock(&futex_mtx) 100 101 /* flags for futex_get() */ 102 #define FUTEX_CREATE_WP 0x1 /* create waiting_proc */ 103 #define FUTEX_DONTCREATE 0x2 /* don't create futex if not exists */ 104 #define FUTEX_DONTEXISTS 0x4 /* return EINVAL if futex exists */ 105 #define FUTEX_SHARED 0x8 /* shared futex */ 106 107 /* wp_flags */ 108 #define FUTEX_WP_REQUEUED 0x1 /* wp requeued - wp moved from wp_list 109 * of futex where thread sleep to wp_list 110 * of another futex. 111 */ 112 #define FUTEX_WP_REMOVED 0x2 /* wp is woken up and removed from futex 113 * wp_list to prevent double wakeup. 114 */ 115 116 /* support.s */ 117 int futex_xchgl(int oparg, uint32_t *uaddr, int *oldval); 118 int futex_addl(int oparg, uint32_t *uaddr, int *oldval); 119 int futex_orl(int oparg, uint32_t *uaddr, int *oldval); 120 int futex_andl(int oparg, uint32_t *uaddr, int *oldval); 121 int futex_xorl(int oparg, uint32_t *uaddr, int *oldval); 122 123 static void 124 futex_put(struct futex *f, struct waiting_proc *wp) 125 { 126 127 FUTEX_ASSERT_LOCKED(f); 128 if (wp != NULL) { 129 if ((wp->wp_flags & FUTEX_WP_REMOVED) == 0) 130 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list); 131 free(wp, M_FUTEX_WP); 132 } 133 134 FUTEXES_LOCK; 135 if (--f->f_refcount == 0) { 136 LIST_REMOVE(f, f_list); 137 FUTEXES_UNLOCK; 138 FUTEX_UNLOCK(f); 139 140 LINUX_CTR2(sys_futex, "futex_put destroy uaddr %p ref %d", 141 f->f_uaddr, f->f_refcount); 142 umtx_key_release(&f->f_key); 143 FUTEX_DESTROY(f); 144 free(f, M_FUTEX); 145 return; 146 } 147 148 LINUX_CTR2(sys_futex, "futex_put uaddr %p ref %d", 149 f->f_uaddr, f->f_refcount); 150 FUTEXES_UNLOCK; 151 FUTEX_UNLOCK(f); 152 } 153 154 static int 155 futex_get0(uint32_t *uaddr, struct futex **newf, uint32_t flags) 156 { 157 struct futex *f, *tmpf; 158 struct umtx_key key; 159 int error; 160 161 *newf = tmpf = NULL; 162 163 error = umtx_key_get(uaddr, TYPE_FUTEX, (flags & FUTEX_SHARED) ? 164 PROCESS_SHARE : THREAD_SHARE, &key); 165 if (error) 166 return (error); 167 retry: 168 FUTEXES_LOCK; 169 LIST_FOREACH(f, &futex_list, f_list) { 170 if (umtx_key_match(&f->f_key, &key)) { 171 if (tmpf != NULL) { 172 FUTEX_UNLOCK(tmpf); 173 FUTEX_DESTROY(tmpf); 174 free(tmpf, M_FUTEX); 175 } 176 if (flags & FUTEX_DONTEXISTS) { 177 FUTEXES_UNLOCK; 178 umtx_key_release(&key); 179 return (EINVAL); 180 } 181 182 /* 183 * Increment refcount of the found futex to 184 * prevent it from deallocation before FUTEX_LOCK() 185 */ 186 ++f->f_refcount; 187 FUTEXES_UNLOCK; 188 umtx_key_release(&key); 189 190 FUTEX_LOCK(f); 191 *newf = f; 192 LINUX_CTR2(sys_futex, "futex_get uaddr %p ref %d", 193 uaddr, f->f_refcount); 194 return (0); 195 } 196 } 197 198 if (flags & FUTEX_DONTCREATE) { 199 FUTEXES_UNLOCK; 200 umtx_key_release(&key); 201 LINUX_CTR1(sys_futex, "futex_get uaddr %p null", uaddr); 202 return (0); 203 } 204 205 if (tmpf == NULL) { 206 FUTEXES_UNLOCK; 207 tmpf = malloc(sizeof(*tmpf), M_FUTEX, M_WAITOK | M_ZERO); 208 tmpf->f_uaddr = uaddr; 209 tmpf->f_key = key; 210 tmpf->f_refcount = 1; 211 tmpf->f_bitset = FUTEX_BITSET_MATCH_ANY; 212 FUTEX_INIT(tmpf); 213 TAILQ_INIT(&tmpf->f_waiting_proc); 214 215 /* 216 * Lock the new futex before an insert into the futex_list 217 * to prevent futex usage by other. 218 */ 219 FUTEX_LOCK(tmpf); 220 goto retry; 221 } 222 223 LIST_INSERT_HEAD(&futex_list, tmpf, f_list); 224 FUTEXES_UNLOCK; 225 226 LINUX_CTR2(sys_futex, "futex_get uaddr %p ref %d new", 227 uaddr, tmpf->f_refcount); 228 *newf = tmpf; 229 return (0); 230 } 231 232 static int 233 futex_get(uint32_t *uaddr, struct waiting_proc **wp, struct futex **f, 234 uint32_t flags) 235 { 236 int error; 237 238 if (flags & FUTEX_CREATE_WP) { 239 *wp = malloc(sizeof(struct waiting_proc), M_FUTEX_WP, M_WAITOK); 240 (*wp)->wp_flags = 0; 241 } 242 error = futex_get0(uaddr, f, flags); 243 if (error) { 244 if (flags & FUTEX_CREATE_WP) 245 free(*wp, M_FUTEX_WP); 246 return (error); 247 } 248 if (flags & FUTEX_CREATE_WP) { 249 TAILQ_INSERT_HEAD(&(*f)->f_waiting_proc, *wp, wp_list); 250 (*wp)->wp_futex = *f; 251 } 252 253 return (error); 254 } 255 256 static int 257 futex_sleep(struct futex *f, struct waiting_proc *wp, int timeout) 258 { 259 int error; 260 261 FUTEX_ASSERT_LOCKED(f); 262 LINUX_CTR4(sys_futex, "futex_sleep enter uaddr %p wp %p timo %d ref %d", 263 f->f_uaddr, wp, timeout, f->f_refcount); 264 error = sx_sleep(wp, &f->f_lck, PCATCH, "futex", timeout); 265 if (wp->wp_flags & FUTEX_WP_REQUEUED) { 266 KASSERT(f != wp->wp_futex, ("futex != wp_futex")); 267 LINUX_CTR5(sys_futex, "futex_sleep out error %d uaddr %p w" 268 " %p requeued uaddr %p ref %d", 269 error, f->f_uaddr, wp, wp->wp_futex->f_uaddr, 270 wp->wp_futex->f_refcount); 271 futex_put(f, NULL); 272 f = wp->wp_futex; 273 FUTEX_LOCK(f); 274 } else 275 LINUX_CTR3(sys_futex, "futex_sleep out error %d uaddr %p wp %p", 276 error, f->f_uaddr, wp); 277 278 futex_put(f, wp); 279 return (error); 280 } 281 282 static int 283 futex_wake(struct futex *f, int n, uint32_t bitset) 284 { 285 struct waiting_proc *wp, *wpt; 286 int count = 0; 287 288 if (bitset == 0) 289 return (EINVAL); 290 291 FUTEX_ASSERT_LOCKED(f); 292 TAILQ_FOREACH_SAFE(wp, &f->f_waiting_proc, wp_list, wpt) { 293 LINUX_CTR3(sys_futex, "futex_wake uaddr %p wp %p ref %d", 294 f->f_uaddr, wp, f->f_refcount); 295 /* 296 * Unless we find a matching bit in 297 * the bitset, continue searching. 298 */ 299 if (!(wp->wp_futex->f_bitset & bitset)) 300 continue; 301 302 wp->wp_flags |= FUTEX_WP_REMOVED; 303 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list); 304 wakeup_one(wp); 305 if (++count == n) 306 break; 307 } 308 309 return (count); 310 } 311 312 static int 313 futex_requeue(struct futex *f, int n, struct futex *f2, int n2) 314 { 315 struct waiting_proc *wp, *wpt; 316 int count = 0; 317 318 FUTEX_ASSERT_LOCKED(f); 319 FUTEX_ASSERT_LOCKED(f2); 320 321 TAILQ_FOREACH_SAFE(wp, &f->f_waiting_proc, wp_list, wpt) { 322 if (++count <= n) { 323 LINUX_CTR2(sys_futex, "futex_req_wake uaddr %p wp %p", 324 f->f_uaddr, wp); 325 wp->wp_flags |= FUTEX_WP_REMOVED; 326 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list); 327 wakeup_one(wp); 328 } else { 329 LINUX_CTR3(sys_futex, "futex_requeue uaddr %p wp %p to %p", 330 f->f_uaddr, wp, f2->f_uaddr); 331 wp->wp_flags |= FUTEX_WP_REQUEUED; 332 /* Move wp to wp_list of f2 futex */ 333 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list); 334 TAILQ_INSERT_HEAD(&f2->f_waiting_proc, wp, wp_list); 335 336 /* 337 * Thread which sleeps on wp after waking should 338 * acquire f2 lock, so increment refcount of f2 to 339 * prevent it from premature deallocation. 340 */ 341 wp->wp_futex = f2; 342 FUTEXES_LOCK; 343 ++f2->f_refcount; 344 FUTEXES_UNLOCK; 345 if (count - n >= n2) 346 break; 347 } 348 } 349 350 return (count); 351 } 352 353 static int 354 futex_wait(struct futex *f, struct waiting_proc *wp, struct l_timespec *ts, 355 uint32_t bitset) 356 { 357 struct l_timespec timeout; 358 struct timeval tv; 359 int timeout_hz; 360 int error; 361 362 if (bitset == 0) 363 return (EINVAL); 364 f->f_bitset = bitset; 365 366 if (ts != NULL) { 367 error = copyin(ts, &timeout, sizeof(timeout)); 368 if (error) 369 return (error); 370 TIMESPEC_TO_TIMEVAL(&tv, &timeout); 371 error = itimerfix(&tv); 372 if (error) 373 return (error); 374 timeout_hz = tvtohz(&tv); 375 } else 376 timeout_hz = 0; 377 378 error = futex_sleep(f, wp, timeout_hz); 379 if (error == EWOULDBLOCK) 380 error = ETIMEDOUT; 381 382 return (error); 383 } 384 385 static int 386 futex_atomic_op(struct thread *td, int encoded_op, uint32_t *uaddr) 387 { 388 int op = (encoded_op >> 28) & 7; 389 int cmp = (encoded_op >> 24) & 15; 390 int oparg = (encoded_op << 8) >> 20; 391 int cmparg = (encoded_op << 20) >> 20; 392 int oldval = 0, ret; 393 394 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 395 oparg = 1 << oparg; 396 397 #ifdef DEBUG 398 if (ldebug(sys_futex)) 399 printf("futex_atomic_op: op = %d, cmp = %d, oparg = %x, " 400 "cmparg = %x, uaddr = %p\n", 401 op, cmp, oparg, cmparg, uaddr); 402 #endif 403 /* XXX: linux verifies access here and returns EFAULT */ 404 405 switch (op) { 406 case FUTEX_OP_SET: 407 ret = futex_xchgl(oparg, uaddr, &oldval); 408 break; 409 case FUTEX_OP_ADD: 410 ret = futex_addl(oparg, uaddr, &oldval); 411 break; 412 case FUTEX_OP_OR: 413 ret = futex_orl(oparg, uaddr, &oldval); 414 break; 415 case FUTEX_OP_ANDN: 416 ret = futex_andl(~oparg, uaddr, &oldval); 417 break; 418 case FUTEX_OP_XOR: 419 ret = futex_xorl(oparg, uaddr, &oldval); 420 break; 421 default: 422 ret = -ENOSYS; 423 break; 424 } 425 426 if (ret) 427 return (ret); 428 429 switch (cmp) { 430 case FUTEX_OP_CMP_EQ: 431 return (oldval == cmparg); 432 case FUTEX_OP_CMP_NE: 433 return (oldval != cmparg); 434 case FUTEX_OP_CMP_LT: 435 return (oldval < cmparg); 436 case FUTEX_OP_CMP_GE: 437 return (oldval >= cmparg); 438 case FUTEX_OP_CMP_LE: 439 return (oldval <= cmparg); 440 case FUTEX_OP_CMP_GT: 441 return (oldval > cmparg); 442 default: 443 return (-ENOSYS); 444 } 445 } 446 447 int 448 linux_sys_futex(struct thread *td, struct linux_sys_futex_args *args) 449 { 450 int clockrt, nrwake, op_ret, ret, val; 451 struct linux_emuldata *em; 452 struct waiting_proc *wp; 453 struct futex *f, *f2; 454 int error; 455 uint32_t flags; 456 457 if (args->op & LINUX_FUTEX_PRIVATE_FLAG) { 458 flags = 0; 459 args->op &= ~LINUX_FUTEX_PRIVATE_FLAG; 460 } else 461 flags = FUTEX_SHARED; 462 463 /* 464 * Currently support for switching between CLOCK_MONOTONIC and 465 * CLOCK_REALTIME is not present. However Linux forbids the use of 466 * FUTEX_CLOCK_REALTIME with any op except FUTEX_WAIT_BITSET and 467 * FUTEX_WAIT_REQUEUE_PI. 468 */ 469 clockrt = args->op & LINUX_FUTEX_CLOCK_REALTIME; 470 args->op = args->op & ~LINUX_FUTEX_CLOCK_REALTIME; 471 if (clockrt && args->op != LINUX_FUTEX_WAIT_BITSET && 472 args->op != LINUX_FUTEX_WAIT_REQUEUE_PI) 473 return (ENOSYS); 474 475 error = 0; 476 f = f2 = NULL; 477 478 switch (args->op) { 479 case LINUX_FUTEX_WAIT: 480 args->val3 = FUTEX_BITSET_MATCH_ANY; 481 /* FALLTHROUGH */ 482 483 case LINUX_FUTEX_WAIT_BITSET: 484 485 LINUX_CTR3(sys_futex, "WAIT uaddr %p val %d val3 %d", 486 args->uaddr, args->val, args->val3); 487 #ifdef DEBUG 488 if (ldebug(sys_futex)) 489 printf(ARGS(sys_futex, 490 "futex_wait uaddr %p val %d val3 %d"), 491 args->uaddr, args->val, args->val3); 492 #endif 493 error = futex_get(args->uaddr, &wp, &f, 494 flags | FUTEX_CREATE_WP); 495 if (error) 496 return (error); 497 error = copyin(args->uaddr, &val, sizeof(val)); 498 if (error) { 499 LINUX_CTR1(sys_futex, "WAIT copyin failed %d", 500 error); 501 futex_put(f, wp); 502 return (error); 503 } 504 if (val != args->val) { 505 LINUX_CTR4(sys_futex, 506 "WAIT uaddr %p val %d != uval %d val3 %d", 507 args->uaddr, args->val, val, args->val3); 508 futex_put(f, wp); 509 return (EWOULDBLOCK); 510 } 511 512 error = futex_wait(f, wp, args->timeout, args->val3); 513 break; 514 515 case LINUX_FUTEX_WAKE: 516 args->val3 = FUTEX_BITSET_MATCH_ANY; 517 /* FALLTHROUGH */ 518 519 case LINUX_FUTEX_WAKE_BITSET: 520 521 LINUX_CTR3(sys_futex, "WAKE uaddr %p val % d val3 %d", 522 args->uaddr, args->val, args->val3); 523 524 /* 525 * XXX: Linux is able to cope with different addresses 526 * corresponding to the same mapped memory in the sleeping 527 * and waker process(es). 528 */ 529 #ifdef DEBUG 530 if (ldebug(sys_futex)) 531 printf(ARGS(sys_futex, "futex_wake uaddr %p val %d val3 %d"), 532 args->uaddr, args->val, args->val3); 533 #endif 534 error = futex_get(args->uaddr, NULL, &f, 535 flags | FUTEX_DONTCREATE); 536 if (error) 537 return (error); 538 if (f == NULL) { 539 td->td_retval[0] = 0; 540 return (error); 541 } 542 td->td_retval[0] = futex_wake(f, args->val, args->val3); 543 futex_put(f, NULL); 544 break; 545 546 case LINUX_FUTEX_CMP_REQUEUE: 547 548 LINUX_CTR5(sys_futex, "CMP_REQUEUE uaddr %p " 549 "val %d val3 %d uaddr2 %p val2 %d", 550 args->uaddr, args->val, args->val3, args->uaddr2, 551 (int)(unsigned long)args->timeout); 552 553 #ifdef DEBUG 554 if (ldebug(sys_futex)) 555 printf(ARGS(sys_futex, "futex_cmp_requeue uaddr %p " 556 "val %d val3 %d uaddr2 %p val2 %d"), 557 args->uaddr, args->val, args->val3, args->uaddr2, 558 (int)(unsigned long)args->timeout); 559 #endif 560 561 /* 562 * Linux allows this, we would not, it is an incorrect 563 * usage of declared ABI, so return EINVAL. 564 */ 565 if (args->uaddr == args->uaddr2) 566 return (EINVAL); 567 error = futex_get(args->uaddr, NULL, &f, flags); 568 if (error) 569 return (error); 570 571 /* 572 * To avoid deadlocks return EINVAL if second futex 573 * exists at this time. 574 * 575 * Glibc fall back to FUTEX_WAKE in case of any error 576 * returned by FUTEX_CMP_REQUEUE. 577 */ 578 error = futex_get(args->uaddr2, NULL, &f2, 579 flags | FUTEX_DONTEXISTS); 580 if (error) { 581 futex_put(f, NULL); 582 return (error); 583 } 584 error = copyin(args->uaddr, &val, sizeof(val)); 585 if (error) { 586 LINUX_CTR1(sys_futex, "CMP_REQUEUE copyin failed %d", 587 error); 588 futex_put(f2, NULL); 589 futex_put(f, NULL); 590 return (error); 591 } 592 if (val != args->val3) { 593 LINUX_CTR2(sys_futex, "CMP_REQUEUE val %d != uval %d", 594 args->val, val); 595 futex_put(f2, NULL); 596 futex_put(f, NULL); 597 return (EAGAIN); 598 } 599 600 nrwake = (int)(unsigned long)args->timeout; 601 td->td_retval[0] = futex_requeue(f, args->val, f2, nrwake); 602 futex_put(f2, NULL); 603 futex_put(f, NULL); 604 break; 605 606 case LINUX_FUTEX_WAKE_OP: 607 608 LINUX_CTR5(sys_futex, "WAKE_OP " 609 "uaddr %p op %d val %x uaddr2 %p val3 %x", 610 args->uaddr, args->op, args->val, 611 args->uaddr2, args->val3); 612 613 #ifdef DEBUG 614 if (ldebug(sys_futex)) 615 printf(ARGS(sys_futex, "futex_wake_op " 616 "uaddr %p op %d val %x uaddr2 %p val3 %x"), 617 args->uaddr, args->op, args->val, 618 args->uaddr2, args->val3); 619 #endif 620 error = futex_get(args->uaddr, NULL, &f, flags); 621 if (error) 622 return (error); 623 if (args->uaddr != args->uaddr2) 624 error = futex_get(args->uaddr2, NULL, &f2, flags); 625 if (error) { 626 futex_put(f, NULL); 627 return (error); 628 } 629 630 /* 631 * This function returns positive number as results and 632 * negative as errors 633 */ 634 op_ret = futex_atomic_op(td, args->val3, args->uaddr2); 635 636 if (op_ret < 0) { 637 /* XXX: We don't handle the EFAULT yet. */ 638 if (op_ret != -EFAULT) { 639 if (f2 != NULL) 640 futex_put(f2, NULL); 641 futex_put(f, NULL); 642 return (-op_ret); 643 } 644 if (f2 != NULL) 645 futex_put(f2, NULL); 646 futex_put(f, NULL); 647 return (EFAULT); 648 } 649 650 ret = futex_wake(f, args->val, args->val3); 651 652 if (op_ret > 0) { 653 op_ret = 0; 654 nrwake = (int)(unsigned long)args->timeout; 655 656 if (f2 != NULL) 657 op_ret += futex_wake(f2, nrwake, args->val3); 658 else 659 op_ret += futex_wake(f, nrwake, args->val3); 660 ret += op_ret; 661 662 } 663 if (f2 != NULL) 664 futex_put(f2, NULL); 665 futex_put(f, NULL); 666 td->td_retval[0] = ret; 667 break; 668 669 case LINUX_FUTEX_LOCK_PI: 670 /* not yet implemented */ 671 linux_msg(td, 672 "linux_sys_futex: " 673 "op LINUX_FUTEX_LOCK_PI not implemented\n"); 674 return (ENOSYS); 675 676 case LINUX_FUTEX_UNLOCK_PI: 677 /* not yet implemented */ 678 linux_msg(td, 679 "linux_sys_futex: " 680 "op LINUX_FUTEX_UNLOCK_PI not implemented\n"); 681 return (ENOSYS); 682 683 case LINUX_FUTEX_TRYLOCK_PI: 684 /* not yet implemented */ 685 linux_msg(td, 686 "linux_sys_futex: " 687 "op LINUX_FUTEX_TRYLOCK_PI not implemented\n"); 688 return (ENOSYS); 689 690 case LINUX_FUTEX_REQUEUE: 691 692 /* 693 * Glibc does not use this operation since version 2.3.3, 694 * as it is racy and replaced by FUTEX_CMP_REQUEUE operation. 695 * Glibc versions prior to 2.3.3 fall back to FUTEX_WAKE when 696 * FUTEX_REQUEUE returned EINVAL. 697 */ 698 em = em_find(td->td_proc, EMUL_DONTLOCK); 699 if ((em->flags & LINUX_XDEPR_REQUEUEOP) == 0) { 700 linux_msg(td, 701 "linux_sys_futex: " 702 "unsupported futex_requeue op\n"); 703 em->flags |= LINUX_XDEPR_REQUEUEOP; 704 } 705 return (EINVAL); 706 707 case LINUX_FUTEX_WAIT_REQUEUE_PI: 708 /* not yet implemented */ 709 linux_msg(td, 710 "linux_sys_futex: " 711 "op FUTEX_WAIT_REQUEUE_PI not implemented\n"); 712 return (ENOSYS); 713 714 case LINUX_FUTEX_CMP_REQUEUE_PI: 715 /* not yet implemented */ 716 linux_msg(td, 717 "linux_sys_futex: " 718 "op LINUX_FUTEX_CMP_REQUEUE_PI not implemented\n"); 719 return (ENOSYS); 720 721 default: 722 linux_msg(td, 723 "linux_sys_futex: unknown op %d\n", args->op); 724 return (ENOSYS); 725 } 726 727 return (error); 728 } 729 730 int 731 linux_set_robust_list(struct thread *td, struct linux_set_robust_list_args *args) 732 { 733 struct linux_emuldata *em; 734 735 #ifdef DEBUG 736 if (ldebug(set_robust_list)) 737 printf(ARGS(set_robust_list, "head %p len %d"), 738 args->head, args->len); 739 #endif 740 741 if (args->len != sizeof(struct linux_robust_list_head)) 742 return (EINVAL); 743 744 em = em_find(td->td_proc, EMUL_DOLOCK); 745 em->robust_futexes = args->head; 746 EMUL_UNLOCK(&emul_lock); 747 748 return (0); 749 } 750 751 int 752 linux_get_robust_list(struct thread *td, struct linux_get_robust_list_args *args) 753 { 754 struct linux_emuldata *em; 755 struct linux_robust_list_head *head; 756 l_size_t len = sizeof(struct linux_robust_list_head); 757 int error = 0; 758 759 #ifdef DEBUG 760 if (ldebug(get_robust_list)) 761 printf(ARGS(get_robust_list, "")); 762 #endif 763 764 if (!args->pid) { 765 em = em_find(td->td_proc, EMUL_DONTLOCK); 766 head = em->robust_futexes; 767 } else { 768 struct proc *p; 769 770 p = pfind(args->pid); 771 if (p == NULL) 772 return (ESRCH); 773 774 em = em_find(p, EMUL_DONTLOCK); 775 /* XXX: ptrace? */ 776 if (priv_check(td, PRIV_CRED_SETUID) || 777 priv_check(td, PRIV_CRED_SETEUID) || 778 p_candebug(td, p)) { 779 PROC_UNLOCK(p); 780 return (EPERM); 781 } 782 head = em->robust_futexes; 783 784 PROC_UNLOCK(p); 785 } 786 787 error = copyout(&len, args->len, sizeof(l_size_t)); 788 if (error) 789 return (EFAULT); 790 791 error = copyout(head, args->head, sizeof(struct linux_robust_list_head)); 792 793 return (error); 794 } 795 796 static int 797 handle_futex_death(struct proc *p, uint32_t *uaddr, int pi) 798 { 799 uint32_t uval, nval, mval; 800 struct futex *f; 801 int error; 802 803 retry: 804 if (copyin(uaddr, &uval, 4)) 805 return (EFAULT); 806 if ((uval & FUTEX_TID_MASK) == p->p_pid) { 807 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; 808 nval = casuword32(uaddr, uval, mval); 809 810 if (nval == -1) 811 return (EFAULT); 812 813 if (nval != uval) 814 goto retry; 815 816 if (!pi && (uval & FUTEX_WAITERS)) { 817 error = futex_get(uaddr, NULL, &f, 818 FUTEX_DONTCREATE | FUTEX_SHARED); 819 if (error) 820 return (error); 821 if (f != NULL) { 822 futex_wake(f, 1, FUTEX_BITSET_MATCH_ANY); 823 futex_put(f, NULL); 824 } 825 } 826 } 827 828 return (0); 829 } 830 831 static int 832 fetch_robust_entry(struct linux_robust_list **entry, 833 struct linux_robust_list **head, int *pi) 834 { 835 l_ulong uentry; 836 837 if (copyin((const void *)head, &uentry, sizeof(l_ulong))) 838 return (EFAULT); 839 840 *entry = (void *)(uentry & ~1UL); 841 *pi = uentry & 1; 842 843 return (0); 844 } 845 846 /* This walks the list of robust futexes releasing them. */ 847 void 848 release_futexes(struct proc *p) 849 { 850 struct linux_robust_list_head *head = NULL; 851 struct linux_robust_list *entry, *next_entry, *pending; 852 unsigned int limit = 2048, pi, next_pi, pip; 853 struct linux_emuldata *em; 854 l_long futex_offset; 855 int rc; 856 857 em = em_find(p, EMUL_DONTLOCK); 858 head = em->robust_futexes; 859 860 if (head == NULL) 861 return; 862 863 if (fetch_robust_entry(&entry, PTRIN(&head->list.next), &pi)) 864 return; 865 866 if (copyin(&head->futex_offset, &futex_offset, sizeof(futex_offset))) 867 return; 868 869 if (fetch_robust_entry(&pending, PTRIN(&head->pending_list), &pip)) 870 return; 871 872 while (entry != &head->list) { 873 rc = fetch_robust_entry(&next_entry, PTRIN(&entry->next), &next_pi); 874 875 if (entry != pending) 876 if (handle_futex_death(p, (uint32_t *)entry + futex_offset, pi)) 877 return; 878 if (rc) 879 return; 880 881 entry = next_entry; 882 pi = next_pi; 883 884 if (!--limit) 885 break; 886 887 sched_relinquish(curthread); 888 } 889 890 if (pending) 891 handle_futex_death(p, (uint32_t *)pending + futex_offset, pip); 892 } 893