1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2015, 2016 The FreeBSD Foundation 5 * Copyright (c) 2004, David Xu <davidxu@freebsd.org> 6 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org> 7 * All rights reserved. 8 * 9 * Portions of this software were developed by Konstantin Belousov 10 * under sponsorship from the FreeBSD Foundation. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice unmodified, this list of conditions, and the following 17 * disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_umtx_profiling.h" 38 39 #include <sys/param.h> 40 #include <sys/kernel.h> 41 #include <sys/fcntl.h> 42 #include <sys/file.h> 43 #include <sys/filedesc.h> 44 #include <sys/limits.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/mman.h> 48 #include <sys/mutex.h> 49 #include <sys/priv.h> 50 #include <sys/proc.h> 51 #include <sys/resource.h> 52 #include <sys/resourcevar.h> 53 #include <sys/rwlock.h> 54 #include <sys/sbuf.h> 55 #include <sys/sched.h> 56 #include <sys/smp.h> 57 #include <sys/sysctl.h> 58 #include <sys/sysent.h> 59 #include <sys/systm.h> 60 #include <sys/sysproto.h> 61 #include <sys/syscallsubr.h> 62 #include <sys/taskqueue.h> 63 #include <sys/time.h> 64 #include <sys/eventhandler.h> 65 #include <sys/umtx.h> 66 #include <sys/umtxvar.h> 67 68 #include <security/mac/mac_framework.h> 69 70 #include <vm/vm.h> 71 #include <vm/vm_param.h> 72 #include <vm/pmap.h> 73 #include <vm/vm_map.h> 74 #include <vm/vm_object.h> 75 76 #include <machine/atomic.h> 77 #include <machine/cpu.h> 78 79 #include <compat/freebsd32/freebsd32.h> 80 #ifdef COMPAT_FREEBSD32 81 #include <compat/freebsd32/freebsd32_proto.h> 82 #endif 83 84 #define _UMUTEX_TRY 1 85 #define _UMUTEX_WAIT 2 86 87 #ifdef UMTX_PROFILING 88 #define UPROF_PERC_BIGGER(w, f, sw, sf) \ 89 (((w) > (sw)) || ((w) == (sw) && (f) > (sf))) 90 #endif 91 92 #define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED) 93 #ifdef INVARIANTS 94 #define UMTXQ_ASSERT_LOCKED_BUSY(key) do { \ 95 struct umtxq_chain *uc; \ 96 \ 97 uc = umtxq_getchain(key); \ 98 mtx_assert(&uc->uc_lock, MA_OWNED); \ 99 KASSERT(uc->uc_busy != 0, ("umtx chain is not busy")); \ 100 } while (0) 101 #else 102 #define UMTXQ_ASSERT_LOCKED_BUSY(key) do {} while (0) 103 #endif 104 105 /* 106 * Don't propagate time-sharing priority, there is a security reason, 107 * a user can simply introduce PI-mutex, let thread A lock the mutex, 108 * and let another thread B block on the mutex, because B is 109 * sleeping, its priority will be boosted, this causes A's priority to 110 * be boosted via priority propagating too and will never be lowered even 111 * if it is using 100%CPU, this is unfair to other processes. 112 */ 113 114 #define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\ 115 (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\ 116 PRI_MAX_TIMESHARE : (td)->td_user_pri) 117 118 #define GOLDEN_RATIO_PRIME 2654404609U 119 #ifndef UMTX_CHAINS 120 #define UMTX_CHAINS 512 121 #endif 122 #define UMTX_SHIFTS (__WORD_BIT - 9) 123 124 #define GET_SHARE(flags) \ 125 (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE) 126 127 #define BUSY_SPINS 200 128 129 struct umtx_copyops { 130 int (*copyin_timeout)(const void *uaddr, struct timespec *tsp); 131 int (*copyin_umtx_time)(const void *uaddr, size_t size, 132 struct _umtx_time *tp); 133 int (*copyin_robust_lists)(const void *uaddr, size_t size, 134 struct umtx_robust_lists_params *rbp); 135 int (*copyout_timeout)(void *uaddr, size_t size, 136 struct timespec *tsp); 137 const size_t timespec_sz; 138 const size_t umtx_time_sz; 139 const bool compat32; 140 }; 141 142 _Static_assert(sizeof(struct umutex) == sizeof(struct umutex32), "umutex32"); 143 _Static_assert(__offsetof(struct umutex, m_spare[0]) == 144 __offsetof(struct umutex32, m_spare[0]), "m_spare32"); 145 146 int umtx_shm_vnobj_persistent = 0; 147 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_vnode_persistent, CTLFLAG_RWTUN, 148 &umtx_shm_vnobj_persistent, 0, 149 "False forces destruction of umtx attached to file, on last close"); 150 static int umtx_max_rb = 1000; 151 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_max_robust, CTLFLAG_RWTUN, 152 &umtx_max_rb, 0, 153 "Maximum number of robust mutexes allowed for each thread"); 154 155 static uma_zone_t umtx_pi_zone; 156 static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS]; 157 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory"); 158 static int umtx_pi_allocated; 159 160 static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 161 "umtx debug"); 162 SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD, 163 &umtx_pi_allocated, 0, "Allocated umtx_pi"); 164 static int umtx_verbose_rb = 1; 165 SYSCTL_INT(_debug_umtx, OID_AUTO, robust_faults_verbose, CTLFLAG_RWTUN, 166 &umtx_verbose_rb, 0, 167 ""); 168 169 #ifdef UMTX_PROFILING 170 static long max_length; 171 SYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0, "max_length"); 172 static SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 173 "umtx chain stats"); 174 #endif 175 176 static inline void umtx_abs_timeout_init2(struct umtx_abs_timeout *timo, 177 const struct _umtx_time *umtxtime); 178 179 static void umtx_shm_init(void); 180 static void umtxq_sysinit(void *); 181 static void umtxq_hash(struct umtx_key *key); 182 static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags, 183 bool rb); 184 static void umtx_thread_cleanup(struct thread *td); 185 SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL); 186 187 #define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE) 188 189 static struct mtx umtx_lock; 190 191 #ifdef UMTX_PROFILING 192 static void 193 umtx_init_profiling(void) 194 { 195 struct sysctl_oid *chain_oid; 196 char chain_name[10]; 197 int i; 198 199 for (i = 0; i < UMTX_CHAINS; ++i) { 200 snprintf(chain_name, sizeof(chain_name), "%d", i); 201 chain_oid = SYSCTL_ADD_NODE(NULL, 202 SYSCTL_STATIC_CHILDREN(_debug_umtx_chains), OID_AUTO, 203 chain_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 204 "umtx hash stats"); 205 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 206 "max_length0", CTLFLAG_RD, &umtxq_chains[0][i].max_length, 0, NULL); 207 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 208 "max_length1", CTLFLAG_RD, &umtxq_chains[1][i].max_length, 0, NULL); 209 } 210 } 211 212 static int 213 sysctl_debug_umtx_chains_peaks(SYSCTL_HANDLER_ARGS) 214 { 215 char buf[512]; 216 struct sbuf sb; 217 struct umtxq_chain *uc; 218 u_int fract, i, j, tot, whole; 219 u_int sf0, sf1, sf2, sf3, sf4; 220 u_int si0, si1, si2, si3, si4; 221 u_int sw0, sw1, sw2, sw3, sw4; 222 223 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); 224 for (i = 0; i < 2; i++) { 225 tot = 0; 226 for (j = 0; j < UMTX_CHAINS; ++j) { 227 uc = &umtxq_chains[i][j]; 228 mtx_lock(&uc->uc_lock); 229 tot += uc->max_length; 230 mtx_unlock(&uc->uc_lock); 231 } 232 if (tot == 0) 233 sbuf_printf(&sb, "%u) Empty ", i); 234 else { 235 sf0 = sf1 = sf2 = sf3 = sf4 = 0; 236 si0 = si1 = si2 = si3 = si4 = 0; 237 sw0 = sw1 = sw2 = sw3 = sw4 = 0; 238 for (j = 0; j < UMTX_CHAINS; j++) { 239 uc = &umtxq_chains[i][j]; 240 mtx_lock(&uc->uc_lock); 241 whole = uc->max_length * 100; 242 mtx_unlock(&uc->uc_lock); 243 fract = (whole % tot) * 100; 244 if (UPROF_PERC_BIGGER(whole, fract, sw0, sf0)) { 245 sf0 = fract; 246 si0 = j; 247 sw0 = whole; 248 } else if (UPROF_PERC_BIGGER(whole, fract, sw1, 249 sf1)) { 250 sf1 = fract; 251 si1 = j; 252 sw1 = whole; 253 } else if (UPROF_PERC_BIGGER(whole, fract, sw2, 254 sf2)) { 255 sf2 = fract; 256 si2 = j; 257 sw2 = whole; 258 } else if (UPROF_PERC_BIGGER(whole, fract, sw3, 259 sf3)) { 260 sf3 = fract; 261 si3 = j; 262 sw3 = whole; 263 } else if (UPROF_PERC_BIGGER(whole, fract, sw4, 264 sf4)) { 265 sf4 = fract; 266 si4 = j; 267 sw4 = whole; 268 } 269 } 270 sbuf_printf(&sb, "queue %u:\n", i); 271 sbuf_printf(&sb, "1st: %u.%u%% idx: %u\n", sw0 / tot, 272 sf0 / tot, si0); 273 sbuf_printf(&sb, "2nd: %u.%u%% idx: %u\n", sw1 / tot, 274 sf1 / tot, si1); 275 sbuf_printf(&sb, "3rd: %u.%u%% idx: %u\n", sw2 / tot, 276 sf2 / tot, si2); 277 sbuf_printf(&sb, "4th: %u.%u%% idx: %u\n", sw3 / tot, 278 sf3 / tot, si3); 279 sbuf_printf(&sb, "5th: %u.%u%% idx: %u\n", sw4 / tot, 280 sf4 / tot, si4); 281 } 282 } 283 sbuf_trim(&sb); 284 sbuf_finish(&sb); 285 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 286 sbuf_delete(&sb); 287 return (0); 288 } 289 290 static int 291 sysctl_debug_umtx_chains_clear(SYSCTL_HANDLER_ARGS) 292 { 293 struct umtxq_chain *uc; 294 u_int i, j; 295 int clear, error; 296 297 clear = 0; 298 error = sysctl_handle_int(oidp, &clear, 0, req); 299 if (error != 0 || req->newptr == NULL) 300 return (error); 301 302 if (clear != 0) { 303 for (i = 0; i < 2; ++i) { 304 for (j = 0; j < UMTX_CHAINS; ++j) { 305 uc = &umtxq_chains[i][j]; 306 mtx_lock(&uc->uc_lock); 307 uc->length = 0; 308 uc->max_length = 0; 309 mtx_unlock(&uc->uc_lock); 310 } 311 } 312 } 313 return (0); 314 } 315 316 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, clear, 317 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0, 318 sysctl_debug_umtx_chains_clear, "I", 319 "Clear umtx chains statistics"); 320 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, peaks, 321 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0, 322 sysctl_debug_umtx_chains_peaks, "A", 323 "Highest peaks in chains max length"); 324 #endif 325 326 static void 327 umtxq_sysinit(void *arg __unused) 328 { 329 int i, j; 330 331 umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi), 332 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 333 for (i = 0; i < 2; ++i) { 334 for (j = 0; j < UMTX_CHAINS; ++j) { 335 mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL, 336 MTX_DEF | MTX_DUPOK); 337 LIST_INIT(&umtxq_chains[i][j].uc_queue[0]); 338 LIST_INIT(&umtxq_chains[i][j].uc_queue[1]); 339 LIST_INIT(&umtxq_chains[i][j].uc_spare_queue); 340 TAILQ_INIT(&umtxq_chains[i][j].uc_pi_list); 341 umtxq_chains[i][j].uc_busy = 0; 342 umtxq_chains[i][j].uc_waiters = 0; 343 #ifdef UMTX_PROFILING 344 umtxq_chains[i][j].length = 0; 345 umtxq_chains[i][j].max_length = 0; 346 #endif 347 } 348 } 349 #ifdef UMTX_PROFILING 350 umtx_init_profiling(); 351 #endif 352 mtx_init(&umtx_lock, "umtx lock", NULL, MTX_DEF); 353 umtx_shm_init(); 354 } 355 356 struct umtx_q * 357 umtxq_alloc(void) 358 { 359 struct umtx_q *uq; 360 361 uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO); 362 uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX, 363 M_WAITOK | M_ZERO); 364 TAILQ_INIT(&uq->uq_spare_queue->head); 365 TAILQ_INIT(&uq->uq_pi_contested); 366 uq->uq_inherited_pri = PRI_MAX; 367 return (uq); 368 } 369 370 void 371 umtxq_free(struct umtx_q *uq) 372 { 373 374 MPASS(uq->uq_spare_queue != NULL); 375 free(uq->uq_spare_queue, M_UMTX); 376 free(uq, M_UMTX); 377 } 378 379 static inline void 380 umtxq_hash(struct umtx_key *key) 381 { 382 unsigned n; 383 384 n = (uintptr_t)key->info.both.a + key->info.both.b; 385 key->hash = ((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS; 386 } 387 388 struct umtxq_chain * 389 umtxq_getchain(struct umtx_key *key) 390 { 391 392 if (key->type <= TYPE_SEM) 393 return (&umtxq_chains[1][key->hash]); 394 return (&umtxq_chains[0][key->hash]); 395 } 396 397 /* 398 * Set chain to busy state when following operation 399 * may be blocked (kernel mutex can not be used). 400 */ 401 void 402 umtxq_busy(struct umtx_key *key) 403 { 404 struct umtxq_chain *uc; 405 406 uc = umtxq_getchain(key); 407 mtx_assert(&uc->uc_lock, MA_OWNED); 408 if (uc->uc_busy) { 409 #ifdef SMP 410 if (smp_cpus > 1) { 411 int count = BUSY_SPINS; 412 if (count > 0) { 413 umtxq_unlock(key); 414 while (uc->uc_busy && --count > 0) 415 cpu_spinwait(); 416 umtxq_lock(key); 417 } 418 } 419 #endif 420 while (uc->uc_busy) { 421 uc->uc_waiters++; 422 msleep(uc, &uc->uc_lock, 0, "umtxqb", 0); 423 uc->uc_waiters--; 424 } 425 } 426 uc->uc_busy = 1; 427 } 428 429 /* 430 * Unbusy a chain. 431 */ 432 void 433 umtxq_unbusy(struct umtx_key *key) 434 { 435 struct umtxq_chain *uc; 436 437 uc = umtxq_getchain(key); 438 mtx_assert(&uc->uc_lock, MA_OWNED); 439 KASSERT(uc->uc_busy != 0, ("not busy")); 440 uc->uc_busy = 0; 441 if (uc->uc_waiters) 442 wakeup_one(uc); 443 } 444 445 void 446 umtxq_unbusy_unlocked(struct umtx_key *key) 447 { 448 449 umtxq_lock(key); 450 umtxq_unbusy(key); 451 umtxq_unlock(key); 452 } 453 454 static struct umtxq_queue * 455 umtxq_queue_lookup(struct umtx_key *key, int q) 456 { 457 struct umtxq_queue *uh; 458 struct umtxq_chain *uc; 459 460 uc = umtxq_getchain(key); 461 UMTXQ_LOCKED_ASSERT(uc); 462 LIST_FOREACH(uh, &uc->uc_queue[q], link) { 463 if (umtx_key_match(&uh->key, key)) 464 return (uh); 465 } 466 467 return (NULL); 468 } 469 470 void 471 umtxq_insert_queue(struct umtx_q *uq, int q) 472 { 473 struct umtxq_queue *uh; 474 struct umtxq_chain *uc; 475 476 uc = umtxq_getchain(&uq->uq_key); 477 UMTXQ_LOCKED_ASSERT(uc); 478 KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, ("umtx_q is already on queue")); 479 uh = umtxq_queue_lookup(&uq->uq_key, q); 480 if (uh != NULL) { 481 LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link); 482 } else { 483 uh = uq->uq_spare_queue; 484 uh->key = uq->uq_key; 485 LIST_INSERT_HEAD(&uc->uc_queue[q], uh, link); 486 #ifdef UMTX_PROFILING 487 uc->length++; 488 if (uc->length > uc->max_length) { 489 uc->max_length = uc->length; 490 if (uc->max_length > max_length) 491 max_length = uc->max_length; 492 } 493 #endif 494 } 495 uq->uq_spare_queue = NULL; 496 497 TAILQ_INSERT_TAIL(&uh->head, uq, uq_link); 498 uh->length++; 499 uq->uq_flags |= UQF_UMTXQ; 500 uq->uq_cur_queue = uh; 501 return; 502 } 503 504 void 505 umtxq_remove_queue(struct umtx_q *uq, int q) 506 { 507 struct umtxq_chain *uc; 508 struct umtxq_queue *uh; 509 510 uc = umtxq_getchain(&uq->uq_key); 511 UMTXQ_LOCKED_ASSERT(uc); 512 if (uq->uq_flags & UQF_UMTXQ) { 513 uh = uq->uq_cur_queue; 514 TAILQ_REMOVE(&uh->head, uq, uq_link); 515 uh->length--; 516 uq->uq_flags &= ~UQF_UMTXQ; 517 if (TAILQ_EMPTY(&uh->head)) { 518 KASSERT(uh->length == 0, 519 ("inconsistent umtxq_queue length")); 520 #ifdef UMTX_PROFILING 521 uc->length--; 522 #endif 523 LIST_REMOVE(uh, link); 524 } else { 525 uh = LIST_FIRST(&uc->uc_spare_queue); 526 KASSERT(uh != NULL, ("uc_spare_queue is empty")); 527 LIST_REMOVE(uh, link); 528 } 529 uq->uq_spare_queue = uh; 530 uq->uq_cur_queue = NULL; 531 } 532 } 533 534 /* 535 * Check if there are multiple waiters 536 */ 537 int 538 umtxq_count(struct umtx_key *key) 539 { 540 struct umtxq_queue *uh; 541 542 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key)); 543 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE); 544 if (uh != NULL) 545 return (uh->length); 546 return (0); 547 } 548 549 /* 550 * Check if there are multiple PI waiters and returns first 551 * waiter. 552 */ 553 static int 554 umtxq_count_pi(struct umtx_key *key, struct umtx_q **first) 555 { 556 struct umtxq_queue *uh; 557 558 *first = NULL; 559 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key)); 560 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE); 561 if (uh != NULL) { 562 *first = TAILQ_FIRST(&uh->head); 563 return (uh->length); 564 } 565 return (0); 566 } 567 568 /* 569 * Wake up threads waiting on an userland object by a bit mask. 570 */ 571 int 572 umtxq_signal_mask(struct umtx_key *key, int n_wake, u_int bitset) 573 { 574 struct umtxq_queue *uh; 575 struct umtx_q *uq, *uq_temp; 576 int ret; 577 578 ret = 0; 579 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key)); 580 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE); 581 if (uh == NULL) 582 return (0); 583 TAILQ_FOREACH_SAFE(uq, &uh->head, uq_link, uq_temp) { 584 if ((uq->uq_bitset & bitset) == 0) 585 continue; 586 umtxq_remove_queue(uq, UMTX_SHARED_QUEUE); 587 wakeup_one(uq); 588 if (++ret >= n_wake) 589 break; 590 } 591 return (ret); 592 } 593 594 /* 595 * Wake up threads waiting on an userland object. 596 */ 597 598 static int 599 umtxq_signal_queue(struct umtx_key *key, int n_wake, int q) 600 { 601 struct umtxq_queue *uh; 602 struct umtx_q *uq; 603 int ret; 604 605 ret = 0; 606 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key)); 607 uh = umtxq_queue_lookup(key, q); 608 if (uh != NULL) { 609 while ((uq = TAILQ_FIRST(&uh->head)) != NULL) { 610 umtxq_remove_queue(uq, q); 611 wakeup(uq); 612 if (++ret >= n_wake) 613 return (ret); 614 } 615 } 616 return (ret); 617 } 618 619 /* 620 * Wake up specified thread. 621 */ 622 static inline void 623 umtxq_signal_thread(struct umtx_q *uq) 624 { 625 626 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key)); 627 umtxq_remove(uq); 628 wakeup(uq); 629 } 630 631 /* 632 * Wake up a maximum of n_wake threads that are waiting on an userland 633 * object identified by key. The remaining threads are removed from queue 634 * identified by key and added to the queue identified by key2 (requeued). 635 * The n_requeue specifies an upper limit on the number of threads that 636 * are requeued to the second queue. 637 */ 638 int 639 umtxq_requeue(struct umtx_key *key, int n_wake, struct umtx_key *key2, 640 int n_requeue) 641 { 642 struct umtxq_queue *uh; 643 struct umtx_q *uq, *uq_temp; 644 int ret; 645 646 ret = 0; 647 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key)); 648 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key2)); 649 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE); 650 if (uh == NULL) 651 return (0); 652 TAILQ_FOREACH_SAFE(uq, &uh->head, uq_link, uq_temp) { 653 if (++ret <= n_wake) { 654 umtxq_remove(uq); 655 wakeup_one(uq); 656 } else { 657 umtxq_remove(uq); 658 uq->uq_key = *key2; 659 umtxq_insert(uq); 660 if (ret - n_wake == n_requeue) 661 break; 662 } 663 } 664 return (ret); 665 } 666 667 static inline int 668 tstohz(const struct timespec *tsp) 669 { 670 struct timeval tv; 671 672 TIMESPEC_TO_TIMEVAL(&tv, tsp); 673 return tvtohz(&tv); 674 } 675 676 void 677 umtx_abs_timeout_init(struct umtx_abs_timeout *timo, int clockid, 678 int absolute, const struct timespec *timeout) 679 { 680 681 timo->clockid = clockid; 682 if (!absolute) { 683 timo->is_abs_real = false; 684 kern_clock_gettime(curthread, timo->clockid, &timo->cur); 685 timespecadd(&timo->cur, timeout, &timo->end); 686 } else { 687 timo->end = *timeout; 688 timo->is_abs_real = clockid == CLOCK_REALTIME || 689 clockid == CLOCK_REALTIME_FAST || 690 clockid == CLOCK_REALTIME_PRECISE || 691 clockid == CLOCK_SECOND; 692 } 693 } 694 695 static void 696 umtx_abs_timeout_init2(struct umtx_abs_timeout *timo, 697 const struct _umtx_time *umtxtime) 698 { 699 700 umtx_abs_timeout_init(timo, umtxtime->_clockid, 701 (umtxtime->_flags & UMTX_ABSTIME) != 0, &umtxtime->_timeout); 702 } 703 704 static int 705 umtx_abs_timeout_getsbt(struct umtx_abs_timeout *timo, sbintime_t *sbt, 706 int *flags) 707 { 708 struct bintime bt, bbt; 709 struct timespec tts; 710 711 switch (timo->clockid) { 712 713 /* Clocks that can be converted into absolute time. */ 714 case CLOCK_REALTIME: 715 case CLOCK_REALTIME_PRECISE: 716 case CLOCK_REALTIME_FAST: 717 case CLOCK_MONOTONIC: 718 case CLOCK_MONOTONIC_PRECISE: 719 case CLOCK_MONOTONIC_FAST: 720 case CLOCK_UPTIME: 721 case CLOCK_UPTIME_PRECISE: 722 case CLOCK_UPTIME_FAST: 723 case CLOCK_SECOND: 724 timespec2bintime(&timo->end, &bt); 725 switch (timo->clockid) { 726 case CLOCK_REALTIME: 727 case CLOCK_REALTIME_PRECISE: 728 case CLOCK_REALTIME_FAST: 729 case CLOCK_SECOND: 730 getboottimebin(&bbt); 731 bintime_sub(&bt, &bbt); 732 break; 733 } 734 if (bt.sec < 0) 735 return (ETIMEDOUT); 736 if (bt.sec >= (SBT_MAX >> 32)) { 737 *sbt = 0; 738 *flags = 0; 739 return (0); 740 } 741 *sbt = bttosbt(bt); 742 switch (timo->clockid) { 743 case CLOCK_REALTIME_FAST: 744 case CLOCK_MONOTONIC_FAST: 745 case CLOCK_UPTIME_FAST: 746 *sbt += tc_tick_sbt; 747 break; 748 case CLOCK_SECOND: 749 *sbt += SBT_1S; 750 break; 751 } 752 *flags = C_ABSOLUTE; 753 return (0); 754 755 /* Clocks that has to be periodically polled. */ 756 case CLOCK_VIRTUAL: 757 case CLOCK_PROF: 758 case CLOCK_THREAD_CPUTIME_ID: 759 case CLOCK_PROCESS_CPUTIME_ID: 760 default: 761 kern_clock_gettime(curthread, timo->clockid, &timo->cur); 762 if (timespeccmp(&timo->end, &timo->cur, <=)) 763 return (ETIMEDOUT); 764 timespecsub(&timo->end, &timo->cur, &tts); 765 *sbt = tick_sbt * tstohz(&tts); 766 *flags = C_HARDCLOCK; 767 return (0); 768 } 769 } 770 771 static uint32_t 772 umtx_unlock_val(uint32_t flags, bool rb) 773 { 774 775 if (rb) 776 return (UMUTEX_RB_OWNERDEAD); 777 else if ((flags & UMUTEX_NONCONSISTENT) != 0) 778 return (UMUTEX_RB_NOTRECOV); 779 else 780 return (UMUTEX_UNOWNED); 781 782 } 783 784 /* 785 * Put thread into sleep state, before sleeping, check if 786 * thread was removed from umtx queue. 787 */ 788 int 789 umtxq_sleep(struct umtx_q *uq, const char *wmesg, 790 struct umtx_abs_timeout *timo) 791 { 792 struct umtxq_chain *uc; 793 sbintime_t sbt = 0; 794 int error, flags = 0; 795 796 uc = umtxq_getchain(&uq->uq_key); 797 UMTXQ_LOCKED_ASSERT(uc); 798 for (;;) { 799 if (!(uq->uq_flags & UQF_UMTXQ)) { 800 error = 0; 801 break; 802 } 803 if (timo != NULL) { 804 if (timo->is_abs_real) 805 curthread->td_rtcgen = 806 atomic_load_acq_int(&rtc_generation); 807 error = umtx_abs_timeout_getsbt(timo, &sbt, &flags); 808 if (error != 0) 809 break; 810 } 811 error = msleep_sbt(uq, &uc->uc_lock, PCATCH | PDROP, wmesg, 812 sbt, 0, flags); 813 uc = umtxq_getchain(&uq->uq_key); 814 mtx_lock(&uc->uc_lock); 815 if (error == EINTR || error == ERESTART) 816 break; 817 if (error == EWOULDBLOCK && (flags & C_ABSOLUTE) != 0) { 818 error = ETIMEDOUT; 819 break; 820 } 821 } 822 823 curthread->td_rtcgen = 0; 824 return (error); 825 } 826 827 /* 828 * Convert userspace address into unique logical address. 829 */ 830 int 831 umtx_key_get(const void *addr, int type, int share, struct umtx_key *key) 832 { 833 struct thread *td = curthread; 834 vm_map_t map; 835 vm_map_entry_t entry; 836 vm_pindex_t pindex; 837 vm_prot_t prot; 838 boolean_t wired; 839 840 key->type = type; 841 if (share == THREAD_SHARE) { 842 key->shared = 0; 843 key->info.private.vs = td->td_proc->p_vmspace; 844 key->info.private.addr = (uintptr_t)addr; 845 } else { 846 MPASS(share == PROCESS_SHARE || share == AUTO_SHARE); 847 map = &td->td_proc->p_vmspace->vm_map; 848 if (vm_map_lookup(&map, (vm_offset_t)addr, VM_PROT_WRITE, 849 &entry, &key->info.shared.object, &pindex, &prot, 850 &wired) != KERN_SUCCESS) { 851 return (EFAULT); 852 } 853 854 if ((share == PROCESS_SHARE) || 855 (share == AUTO_SHARE && 856 VM_INHERIT_SHARE == entry->inheritance)) { 857 key->shared = 1; 858 key->info.shared.offset = (vm_offset_t)addr - 859 entry->start + entry->offset; 860 vm_object_reference(key->info.shared.object); 861 } else { 862 key->shared = 0; 863 key->info.private.vs = td->td_proc->p_vmspace; 864 key->info.private.addr = (uintptr_t)addr; 865 } 866 vm_map_lookup_done(map, entry); 867 } 868 869 umtxq_hash(key); 870 return (0); 871 } 872 873 /* 874 * Release key. 875 */ 876 void 877 umtx_key_release(struct umtx_key *key) 878 { 879 if (key->shared) 880 vm_object_deallocate(key->info.shared.object); 881 } 882 883 #ifdef COMPAT_FREEBSD10 884 /* 885 * Lock a umtx object. 886 */ 887 static int 888 do_lock_umtx(struct thread *td, struct umtx *umtx, u_long id, 889 const struct timespec *timeout) 890 { 891 struct umtx_abs_timeout timo; 892 struct umtx_q *uq; 893 u_long owner; 894 u_long old; 895 int error = 0; 896 897 uq = td->td_umtxq; 898 if (timeout != NULL) 899 umtx_abs_timeout_init(&timo, CLOCK_REALTIME, 0, timeout); 900 901 /* 902 * Care must be exercised when dealing with umtx structure. It 903 * can fault on any access. 904 */ 905 for (;;) { 906 /* 907 * Try the uncontested case. This should be done in userland. 908 */ 909 owner = casuword(&umtx->u_owner, UMTX_UNOWNED, id); 910 911 /* The acquire succeeded. */ 912 if (owner == UMTX_UNOWNED) 913 return (0); 914 915 /* The address was invalid. */ 916 if (owner == -1) 917 return (EFAULT); 918 919 /* If no one owns it but it is contested try to acquire it. */ 920 if (owner == UMTX_CONTESTED) { 921 owner = casuword(&umtx->u_owner, 922 UMTX_CONTESTED, id | UMTX_CONTESTED); 923 924 if (owner == UMTX_CONTESTED) 925 return (0); 926 927 /* The address was invalid. */ 928 if (owner == -1) 929 return (EFAULT); 930 931 error = thread_check_susp(td, false); 932 if (error != 0) 933 break; 934 935 /* If this failed the lock has changed, restart. */ 936 continue; 937 } 938 939 /* 940 * If we caught a signal, we have retried and now 941 * exit immediately. 942 */ 943 if (error != 0) 944 break; 945 946 if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK, 947 AUTO_SHARE, &uq->uq_key)) != 0) 948 return (error); 949 950 umtxq_lock(&uq->uq_key); 951 umtxq_busy(&uq->uq_key); 952 umtxq_insert(uq); 953 umtxq_unbusy(&uq->uq_key); 954 umtxq_unlock(&uq->uq_key); 955 956 /* 957 * Set the contested bit so that a release in user space 958 * knows to use the system call for unlock. If this fails 959 * either some one else has acquired the lock or it has been 960 * released. 961 */ 962 old = casuword(&umtx->u_owner, owner, owner | UMTX_CONTESTED); 963 964 /* The address was invalid. */ 965 if (old == -1) { 966 umtxq_lock(&uq->uq_key); 967 umtxq_remove(uq); 968 umtxq_unlock(&uq->uq_key); 969 umtx_key_release(&uq->uq_key); 970 return (EFAULT); 971 } 972 973 /* 974 * We set the contested bit, sleep. Otherwise the lock changed 975 * and we need to retry or we lost a race to the thread 976 * unlocking the umtx. 977 */ 978 umtxq_lock(&uq->uq_key); 979 if (old == owner) 980 error = umtxq_sleep(uq, "umtx", timeout == NULL ? NULL : 981 &timo); 982 umtxq_remove(uq); 983 umtxq_unlock(&uq->uq_key); 984 umtx_key_release(&uq->uq_key); 985 986 if (error == 0) 987 error = thread_check_susp(td, false); 988 } 989 990 if (timeout == NULL) { 991 /* Mutex locking is restarted if it is interrupted. */ 992 if (error == EINTR) 993 error = ERESTART; 994 } else { 995 /* Timed-locking is not restarted. */ 996 if (error == ERESTART) 997 error = EINTR; 998 } 999 return (error); 1000 } 1001 1002 /* 1003 * Unlock a umtx object. 1004 */ 1005 static int 1006 do_unlock_umtx(struct thread *td, struct umtx *umtx, u_long id) 1007 { 1008 struct umtx_key key; 1009 u_long owner; 1010 u_long old; 1011 int error; 1012 int count; 1013 1014 /* 1015 * Make sure we own this mtx. 1016 */ 1017 owner = fuword(__DEVOLATILE(u_long *, &umtx->u_owner)); 1018 if (owner == -1) 1019 return (EFAULT); 1020 1021 if ((owner & ~UMTX_CONTESTED) != id) 1022 return (EPERM); 1023 1024 /* This should be done in userland */ 1025 if ((owner & UMTX_CONTESTED) == 0) { 1026 old = casuword(&umtx->u_owner, owner, UMTX_UNOWNED); 1027 if (old == -1) 1028 return (EFAULT); 1029 if (old == owner) 1030 return (0); 1031 owner = old; 1032 } 1033 1034 /* We should only ever be in here for contested locks */ 1035 if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK, AUTO_SHARE, 1036 &key)) != 0) 1037 return (error); 1038 1039 umtxq_lock(&key); 1040 umtxq_busy(&key); 1041 count = umtxq_count(&key); 1042 umtxq_unlock(&key); 1043 1044 /* 1045 * When unlocking the umtx, it must be marked as unowned if 1046 * there is zero or one thread only waiting for it. 1047 * Otherwise, it must be marked as contested. 1048 */ 1049 old = casuword(&umtx->u_owner, owner, 1050 count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED); 1051 umtxq_lock(&key); 1052 umtxq_signal(&key,1); 1053 umtxq_unbusy(&key); 1054 umtxq_unlock(&key); 1055 umtx_key_release(&key); 1056 if (old == -1) 1057 return (EFAULT); 1058 if (old != owner) 1059 return (EINVAL); 1060 return (0); 1061 } 1062 1063 #ifdef COMPAT_FREEBSD32 1064 1065 /* 1066 * Lock a umtx object. 1067 */ 1068 static int 1069 do_lock_umtx32(struct thread *td, uint32_t *m, uint32_t id, 1070 const struct timespec *timeout) 1071 { 1072 struct umtx_abs_timeout timo; 1073 struct umtx_q *uq; 1074 uint32_t owner; 1075 uint32_t old; 1076 int error = 0; 1077 1078 uq = td->td_umtxq; 1079 1080 if (timeout != NULL) 1081 umtx_abs_timeout_init(&timo, CLOCK_REALTIME, 0, timeout); 1082 1083 /* 1084 * Care must be exercised when dealing with umtx structure. It 1085 * can fault on any access. 1086 */ 1087 for (;;) { 1088 /* 1089 * Try the uncontested case. This should be done in userland. 1090 */ 1091 owner = casuword32(m, UMUTEX_UNOWNED, id); 1092 1093 /* The acquire succeeded. */ 1094 if (owner == UMUTEX_UNOWNED) 1095 return (0); 1096 1097 /* The address was invalid. */ 1098 if (owner == -1) 1099 return (EFAULT); 1100 1101 /* If no one owns it but it is contested try to acquire it. */ 1102 if (owner == UMUTEX_CONTESTED) { 1103 owner = casuword32(m, 1104 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); 1105 if (owner == UMUTEX_CONTESTED) 1106 return (0); 1107 1108 /* The address was invalid. */ 1109 if (owner == -1) 1110 return (EFAULT); 1111 1112 error = thread_check_susp(td, false); 1113 if (error != 0) 1114 break; 1115 1116 /* If this failed the lock has changed, restart. */ 1117 continue; 1118 } 1119 1120 /* 1121 * If we caught a signal, we have retried and now 1122 * exit immediately. 1123 */ 1124 if (error != 0) 1125 return (error); 1126 1127 if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK, 1128 AUTO_SHARE, &uq->uq_key)) != 0) 1129 return (error); 1130 1131 umtxq_lock(&uq->uq_key); 1132 umtxq_busy(&uq->uq_key); 1133 umtxq_insert(uq); 1134 umtxq_unbusy(&uq->uq_key); 1135 umtxq_unlock(&uq->uq_key); 1136 1137 /* 1138 * Set the contested bit so that a release in user space 1139 * knows to use the system call for unlock. If this fails 1140 * either some one else has acquired the lock or it has been 1141 * released. 1142 */ 1143 old = casuword32(m, owner, owner | UMUTEX_CONTESTED); 1144 1145 /* The address was invalid. */ 1146 if (old == -1) { 1147 umtxq_lock(&uq->uq_key); 1148 umtxq_remove(uq); 1149 umtxq_unlock(&uq->uq_key); 1150 umtx_key_release(&uq->uq_key); 1151 return (EFAULT); 1152 } 1153 1154 /* 1155 * We set the contested bit, sleep. Otherwise the lock changed 1156 * and we need to retry or we lost a race to the thread 1157 * unlocking the umtx. 1158 */ 1159 umtxq_lock(&uq->uq_key); 1160 if (old == owner) 1161 error = umtxq_sleep(uq, "umtx", timeout == NULL ? 1162 NULL : &timo); 1163 umtxq_remove(uq); 1164 umtxq_unlock(&uq->uq_key); 1165 umtx_key_release(&uq->uq_key); 1166 1167 if (error == 0) 1168 error = thread_check_susp(td, false); 1169 } 1170 1171 if (timeout == NULL) { 1172 /* Mutex locking is restarted if it is interrupted. */ 1173 if (error == EINTR) 1174 error = ERESTART; 1175 } else { 1176 /* Timed-locking is not restarted. */ 1177 if (error == ERESTART) 1178 error = EINTR; 1179 } 1180 return (error); 1181 } 1182 1183 /* 1184 * Unlock a umtx object. 1185 */ 1186 static int 1187 do_unlock_umtx32(struct thread *td, uint32_t *m, uint32_t id) 1188 { 1189 struct umtx_key key; 1190 uint32_t owner; 1191 uint32_t old; 1192 int error; 1193 int count; 1194 1195 /* 1196 * Make sure we own this mtx. 1197 */ 1198 owner = fuword32(m); 1199 if (owner == -1) 1200 return (EFAULT); 1201 1202 if ((owner & ~UMUTEX_CONTESTED) != id) 1203 return (EPERM); 1204 1205 /* This should be done in userland */ 1206 if ((owner & UMUTEX_CONTESTED) == 0) { 1207 old = casuword32(m, owner, UMUTEX_UNOWNED); 1208 if (old == -1) 1209 return (EFAULT); 1210 if (old == owner) 1211 return (0); 1212 owner = old; 1213 } 1214 1215 /* We should only ever be in here for contested locks */ 1216 if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK, AUTO_SHARE, 1217 &key)) != 0) 1218 return (error); 1219 1220 umtxq_lock(&key); 1221 umtxq_busy(&key); 1222 count = umtxq_count(&key); 1223 umtxq_unlock(&key); 1224 1225 /* 1226 * When unlocking the umtx, it must be marked as unowned if 1227 * there is zero or one thread only waiting for it. 1228 * Otherwise, it must be marked as contested. 1229 */ 1230 old = casuword32(m, owner, 1231 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED); 1232 umtxq_lock(&key); 1233 umtxq_signal(&key,1); 1234 umtxq_unbusy(&key); 1235 umtxq_unlock(&key); 1236 umtx_key_release(&key); 1237 if (old == -1) 1238 return (EFAULT); 1239 if (old != owner) 1240 return (EINVAL); 1241 return (0); 1242 } 1243 #endif /* COMPAT_FREEBSD32 */ 1244 #endif /* COMPAT_FREEBSD10 */ 1245 1246 /* 1247 * Fetch and compare value, sleep on the address if value is not changed. 1248 */ 1249 static int 1250 do_wait(struct thread *td, void *addr, u_long id, 1251 struct _umtx_time *timeout, int compat32, int is_private) 1252 { 1253 struct umtx_abs_timeout timo; 1254 struct umtx_q *uq; 1255 u_long tmp; 1256 uint32_t tmp32; 1257 int error = 0; 1258 1259 uq = td->td_umtxq; 1260 if ((error = umtx_key_get(addr, TYPE_SIMPLE_WAIT, 1261 is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0) 1262 return (error); 1263 1264 if (timeout != NULL) 1265 umtx_abs_timeout_init2(&timo, timeout); 1266 1267 umtxq_lock(&uq->uq_key); 1268 umtxq_insert(uq); 1269 umtxq_unlock(&uq->uq_key); 1270 if (compat32 == 0) { 1271 error = fueword(addr, &tmp); 1272 if (error != 0) 1273 error = EFAULT; 1274 } else { 1275 error = fueword32(addr, &tmp32); 1276 if (error == 0) 1277 tmp = tmp32; 1278 else 1279 error = EFAULT; 1280 } 1281 umtxq_lock(&uq->uq_key); 1282 if (error == 0) { 1283 if (tmp == id) 1284 error = umtxq_sleep(uq, "uwait", timeout == NULL ? 1285 NULL : &timo); 1286 if ((uq->uq_flags & UQF_UMTXQ) == 0) 1287 error = 0; 1288 else 1289 umtxq_remove(uq); 1290 } else if ((uq->uq_flags & UQF_UMTXQ) != 0) { 1291 umtxq_remove(uq); 1292 } 1293 umtxq_unlock(&uq->uq_key); 1294 umtx_key_release(&uq->uq_key); 1295 if (error == ERESTART) 1296 error = EINTR; 1297 return (error); 1298 } 1299 1300 /* 1301 * Wake up threads sleeping on the specified address. 1302 */ 1303 int 1304 kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private) 1305 { 1306 struct umtx_key key; 1307 int ret; 1308 1309 if ((ret = umtx_key_get(uaddr, TYPE_SIMPLE_WAIT, 1310 is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0) 1311 return (ret); 1312 umtxq_lock(&key); 1313 umtxq_signal(&key, n_wake); 1314 umtxq_unlock(&key); 1315 umtx_key_release(&key); 1316 return (0); 1317 } 1318 1319 /* 1320 * Lock PTHREAD_PRIO_NONE protocol POSIX mutex. 1321 */ 1322 static int 1323 do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags, 1324 struct _umtx_time *timeout, int mode) 1325 { 1326 struct umtx_abs_timeout timo; 1327 struct umtx_q *uq; 1328 uint32_t owner, old, id; 1329 int error, rv; 1330 1331 id = td->td_tid; 1332 uq = td->td_umtxq; 1333 error = 0; 1334 if (timeout != NULL) 1335 umtx_abs_timeout_init2(&timo, timeout); 1336 1337 /* 1338 * Care must be exercised when dealing with umtx structure. It 1339 * can fault on any access. 1340 */ 1341 for (;;) { 1342 rv = fueword32(&m->m_owner, &owner); 1343 if (rv == -1) 1344 return (EFAULT); 1345 if (mode == _UMUTEX_WAIT) { 1346 if (owner == UMUTEX_UNOWNED || 1347 owner == UMUTEX_CONTESTED || 1348 owner == UMUTEX_RB_OWNERDEAD || 1349 owner == UMUTEX_RB_NOTRECOV) 1350 return (0); 1351 } else { 1352 /* 1353 * Robust mutex terminated. Kernel duty is to 1354 * return EOWNERDEAD to the userspace. The 1355 * umutex.m_flags UMUTEX_NONCONSISTENT is set 1356 * by the common userspace code. 1357 */ 1358 if (owner == UMUTEX_RB_OWNERDEAD) { 1359 rv = casueword32(&m->m_owner, 1360 UMUTEX_RB_OWNERDEAD, &owner, 1361 id | UMUTEX_CONTESTED); 1362 if (rv == -1) 1363 return (EFAULT); 1364 if (rv == 0) { 1365 MPASS(owner == UMUTEX_RB_OWNERDEAD); 1366 return (EOWNERDEAD); /* success */ 1367 } 1368 MPASS(rv == 1); 1369 rv = thread_check_susp(td, false); 1370 if (rv != 0) 1371 return (rv); 1372 continue; 1373 } 1374 if (owner == UMUTEX_RB_NOTRECOV) 1375 return (ENOTRECOVERABLE); 1376 1377 /* 1378 * Try the uncontested case. This should be 1379 * done in userland. 1380 */ 1381 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED, 1382 &owner, id); 1383 /* The address was invalid. */ 1384 if (rv == -1) 1385 return (EFAULT); 1386 1387 /* The acquire succeeded. */ 1388 if (rv == 0) { 1389 MPASS(owner == UMUTEX_UNOWNED); 1390 return (0); 1391 } 1392 1393 /* 1394 * If no one owns it but it is contested try 1395 * to acquire it. 1396 */ 1397 MPASS(rv == 1); 1398 if (owner == UMUTEX_CONTESTED) { 1399 rv = casueword32(&m->m_owner, 1400 UMUTEX_CONTESTED, &owner, 1401 id | UMUTEX_CONTESTED); 1402 /* The address was invalid. */ 1403 if (rv == -1) 1404 return (EFAULT); 1405 if (rv == 0) { 1406 MPASS(owner == UMUTEX_CONTESTED); 1407 return (0); 1408 } 1409 if (rv == 1) { 1410 rv = thread_check_susp(td, false); 1411 if (rv != 0) 1412 return (rv); 1413 } 1414 1415 /* 1416 * If this failed the lock has 1417 * changed, restart. 1418 */ 1419 continue; 1420 } 1421 1422 /* rv == 1 but not contested, likely store failure */ 1423 rv = thread_check_susp(td, false); 1424 if (rv != 0) 1425 return (rv); 1426 } 1427 1428 if (mode == _UMUTEX_TRY) 1429 return (EBUSY); 1430 1431 /* 1432 * If we caught a signal, we have retried and now 1433 * exit immediately. 1434 */ 1435 if (error != 0) 1436 return (error); 1437 1438 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, 1439 GET_SHARE(flags), &uq->uq_key)) != 0) 1440 return (error); 1441 1442 umtxq_lock(&uq->uq_key); 1443 umtxq_busy(&uq->uq_key); 1444 umtxq_insert(uq); 1445 umtxq_unlock(&uq->uq_key); 1446 1447 /* 1448 * Set the contested bit so that a release in user space 1449 * knows to use the system call for unlock. If this fails 1450 * either some one else has acquired the lock or it has been 1451 * released. 1452 */ 1453 rv = casueword32(&m->m_owner, owner, &old, 1454 owner | UMUTEX_CONTESTED); 1455 1456 /* The address was invalid or casueword failed to store. */ 1457 if (rv == -1 || rv == 1) { 1458 umtxq_lock(&uq->uq_key); 1459 umtxq_remove(uq); 1460 umtxq_unbusy(&uq->uq_key); 1461 umtxq_unlock(&uq->uq_key); 1462 umtx_key_release(&uq->uq_key); 1463 if (rv == -1) 1464 return (EFAULT); 1465 if (rv == 1) { 1466 rv = thread_check_susp(td, false); 1467 if (rv != 0) 1468 return (rv); 1469 } 1470 continue; 1471 } 1472 1473 /* 1474 * We set the contested bit, sleep. Otherwise the lock changed 1475 * and we need to retry or we lost a race to the thread 1476 * unlocking the umtx. 1477 */ 1478 umtxq_lock(&uq->uq_key); 1479 umtxq_unbusy(&uq->uq_key); 1480 MPASS(old == owner); 1481 error = umtxq_sleep(uq, "umtxn", timeout == NULL ? 1482 NULL : &timo); 1483 umtxq_remove(uq); 1484 umtxq_unlock(&uq->uq_key); 1485 umtx_key_release(&uq->uq_key); 1486 1487 if (error == 0) 1488 error = thread_check_susp(td, false); 1489 } 1490 1491 return (0); 1492 } 1493 1494 /* 1495 * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex. 1496 */ 1497 static int 1498 do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags, bool rb) 1499 { 1500 struct umtx_key key; 1501 uint32_t owner, old, id, newlock; 1502 int error, count; 1503 1504 id = td->td_tid; 1505 1506 again: 1507 /* 1508 * Make sure we own this mtx. 1509 */ 1510 error = fueword32(&m->m_owner, &owner); 1511 if (error == -1) 1512 return (EFAULT); 1513 1514 if ((owner & ~UMUTEX_CONTESTED) != id) 1515 return (EPERM); 1516 1517 newlock = umtx_unlock_val(flags, rb); 1518 if ((owner & UMUTEX_CONTESTED) == 0) { 1519 error = casueword32(&m->m_owner, owner, &old, newlock); 1520 if (error == -1) 1521 return (EFAULT); 1522 if (error == 1) { 1523 error = thread_check_susp(td, false); 1524 if (error != 0) 1525 return (error); 1526 goto again; 1527 } 1528 MPASS(old == owner); 1529 return (0); 1530 } 1531 1532 /* We should only ever be in here for contested locks */ 1533 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags), 1534 &key)) != 0) 1535 return (error); 1536 1537 umtxq_lock(&key); 1538 umtxq_busy(&key); 1539 count = umtxq_count(&key); 1540 umtxq_unlock(&key); 1541 1542 /* 1543 * When unlocking the umtx, it must be marked as unowned if 1544 * there is zero or one thread only waiting for it. 1545 * Otherwise, it must be marked as contested. 1546 */ 1547 if (count > 1) 1548 newlock |= UMUTEX_CONTESTED; 1549 error = casueword32(&m->m_owner, owner, &old, newlock); 1550 umtxq_lock(&key); 1551 umtxq_signal(&key, 1); 1552 umtxq_unbusy(&key); 1553 umtxq_unlock(&key); 1554 umtx_key_release(&key); 1555 if (error == -1) 1556 return (EFAULT); 1557 if (error == 1) { 1558 if (old != owner) 1559 return (EINVAL); 1560 error = thread_check_susp(td, false); 1561 if (error != 0) 1562 return (error); 1563 goto again; 1564 } 1565 return (0); 1566 } 1567 1568 /* 1569 * Check if the mutex is available and wake up a waiter, 1570 * only for simple mutex. 1571 */ 1572 static int 1573 do_wake_umutex(struct thread *td, struct umutex *m) 1574 { 1575 struct umtx_key key; 1576 uint32_t owner; 1577 uint32_t flags; 1578 int error; 1579 int count; 1580 1581 again: 1582 error = fueword32(&m->m_owner, &owner); 1583 if (error == -1) 1584 return (EFAULT); 1585 1586 if ((owner & ~UMUTEX_CONTESTED) != 0 && owner != UMUTEX_RB_OWNERDEAD && 1587 owner != UMUTEX_RB_NOTRECOV) 1588 return (0); 1589 1590 error = fueword32(&m->m_flags, &flags); 1591 if (error == -1) 1592 return (EFAULT); 1593 1594 /* We should only ever be in here for contested locks */ 1595 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags), 1596 &key)) != 0) 1597 return (error); 1598 1599 umtxq_lock(&key); 1600 umtxq_busy(&key); 1601 count = umtxq_count(&key); 1602 umtxq_unlock(&key); 1603 1604 if (count <= 1 && owner != UMUTEX_RB_OWNERDEAD && 1605 owner != UMUTEX_RB_NOTRECOV) { 1606 error = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner, 1607 UMUTEX_UNOWNED); 1608 if (error == -1) { 1609 error = EFAULT; 1610 } else if (error == 1) { 1611 umtxq_lock(&key); 1612 umtxq_unbusy(&key); 1613 umtxq_unlock(&key); 1614 umtx_key_release(&key); 1615 error = thread_check_susp(td, false); 1616 if (error != 0) 1617 return (error); 1618 goto again; 1619 } 1620 } 1621 1622 umtxq_lock(&key); 1623 if (error == 0 && count != 0) { 1624 MPASS((owner & ~UMUTEX_CONTESTED) == 0 || 1625 owner == UMUTEX_RB_OWNERDEAD || 1626 owner == UMUTEX_RB_NOTRECOV); 1627 umtxq_signal(&key, 1); 1628 } 1629 umtxq_unbusy(&key); 1630 umtxq_unlock(&key); 1631 umtx_key_release(&key); 1632 return (error); 1633 } 1634 1635 /* 1636 * Check if the mutex has waiters and tries to fix contention bit. 1637 */ 1638 static int 1639 do_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags) 1640 { 1641 struct umtx_key key; 1642 uint32_t owner, old; 1643 int type; 1644 int error; 1645 int count; 1646 1647 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT | 1648 UMUTEX_ROBUST)) { 1649 case 0: 1650 case UMUTEX_ROBUST: 1651 type = TYPE_NORMAL_UMUTEX; 1652 break; 1653 case UMUTEX_PRIO_INHERIT: 1654 type = TYPE_PI_UMUTEX; 1655 break; 1656 case (UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST): 1657 type = TYPE_PI_ROBUST_UMUTEX; 1658 break; 1659 case UMUTEX_PRIO_PROTECT: 1660 type = TYPE_PP_UMUTEX; 1661 break; 1662 case (UMUTEX_PRIO_PROTECT | UMUTEX_ROBUST): 1663 type = TYPE_PP_ROBUST_UMUTEX; 1664 break; 1665 default: 1666 return (EINVAL); 1667 } 1668 if ((error = umtx_key_get(m, type, GET_SHARE(flags), &key)) != 0) 1669 return (error); 1670 1671 owner = 0; 1672 umtxq_lock(&key); 1673 umtxq_busy(&key); 1674 count = umtxq_count(&key); 1675 umtxq_unlock(&key); 1676 1677 error = fueword32(&m->m_owner, &owner); 1678 if (error == -1) 1679 error = EFAULT; 1680 1681 /* 1682 * Only repair contention bit if there is a waiter, this means 1683 * the mutex is still being referenced by userland code, 1684 * otherwise don't update any memory. 1685 */ 1686 while (error == 0 && (owner & UMUTEX_CONTESTED) == 0 && 1687 (count > 1 || (count == 1 && (owner & ~UMUTEX_CONTESTED) != 0))) { 1688 error = casueword32(&m->m_owner, owner, &old, 1689 owner | UMUTEX_CONTESTED); 1690 if (error == -1) { 1691 error = EFAULT; 1692 break; 1693 } 1694 if (error == 0) { 1695 MPASS(old == owner); 1696 break; 1697 } 1698 owner = old; 1699 error = thread_check_susp(td, false); 1700 } 1701 1702 umtxq_lock(&key); 1703 if (error == EFAULT) { 1704 umtxq_signal(&key, INT_MAX); 1705 } else if (count != 0 && ((owner & ~UMUTEX_CONTESTED) == 0 || 1706 owner == UMUTEX_RB_OWNERDEAD || owner == UMUTEX_RB_NOTRECOV)) 1707 umtxq_signal(&key, 1); 1708 umtxq_unbusy(&key); 1709 umtxq_unlock(&key); 1710 umtx_key_release(&key); 1711 return (error); 1712 } 1713 1714 struct umtx_pi * 1715 umtx_pi_alloc(int flags) 1716 { 1717 struct umtx_pi *pi; 1718 1719 pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags); 1720 TAILQ_INIT(&pi->pi_blocked); 1721 atomic_add_int(&umtx_pi_allocated, 1); 1722 return (pi); 1723 } 1724 1725 void 1726 umtx_pi_free(struct umtx_pi *pi) 1727 { 1728 uma_zfree(umtx_pi_zone, pi); 1729 atomic_add_int(&umtx_pi_allocated, -1); 1730 } 1731 1732 /* 1733 * Adjust the thread's position on a pi_state after its priority has been 1734 * changed. 1735 */ 1736 static int 1737 umtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td) 1738 { 1739 struct umtx_q *uq, *uq1, *uq2; 1740 struct thread *td1; 1741 1742 mtx_assert(&umtx_lock, MA_OWNED); 1743 if (pi == NULL) 1744 return (0); 1745 1746 uq = td->td_umtxq; 1747 1748 /* 1749 * Check if the thread needs to be moved on the blocked chain. 1750 * It needs to be moved if either its priority is lower than 1751 * the previous thread or higher than the next thread. 1752 */ 1753 uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq); 1754 uq2 = TAILQ_NEXT(uq, uq_lockq); 1755 if ((uq1 != NULL && UPRI(td) < UPRI(uq1->uq_thread)) || 1756 (uq2 != NULL && UPRI(td) > UPRI(uq2->uq_thread))) { 1757 /* 1758 * Remove thread from blocked chain and determine where 1759 * it should be moved to. 1760 */ 1761 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq); 1762 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) { 1763 td1 = uq1->uq_thread; 1764 MPASS(td1->td_proc->p_magic == P_MAGIC); 1765 if (UPRI(td1) > UPRI(td)) 1766 break; 1767 } 1768 1769 if (uq1 == NULL) 1770 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq); 1771 else 1772 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq); 1773 } 1774 return (1); 1775 } 1776 1777 static struct umtx_pi * 1778 umtx_pi_next(struct umtx_pi *pi) 1779 { 1780 struct umtx_q *uq_owner; 1781 1782 if (pi->pi_owner == NULL) 1783 return (NULL); 1784 uq_owner = pi->pi_owner->td_umtxq; 1785 if (uq_owner == NULL) 1786 return (NULL); 1787 return (uq_owner->uq_pi_blocked); 1788 } 1789 1790 /* 1791 * Floyd's Cycle-Finding Algorithm. 1792 */ 1793 static bool 1794 umtx_pi_check_loop(struct umtx_pi *pi) 1795 { 1796 struct umtx_pi *pi1; /* fast iterator */ 1797 1798 mtx_assert(&umtx_lock, MA_OWNED); 1799 if (pi == NULL) 1800 return (false); 1801 pi1 = pi; 1802 for (;;) { 1803 pi = umtx_pi_next(pi); 1804 if (pi == NULL) 1805 break; 1806 pi1 = umtx_pi_next(pi1); 1807 if (pi1 == NULL) 1808 break; 1809 pi1 = umtx_pi_next(pi1); 1810 if (pi1 == NULL) 1811 break; 1812 if (pi == pi1) 1813 return (true); 1814 } 1815 return (false); 1816 } 1817 1818 /* 1819 * Propagate priority when a thread is blocked on POSIX 1820 * PI mutex. 1821 */ 1822 static void 1823 umtx_propagate_priority(struct thread *td) 1824 { 1825 struct umtx_q *uq; 1826 struct umtx_pi *pi; 1827 int pri; 1828 1829 mtx_assert(&umtx_lock, MA_OWNED); 1830 pri = UPRI(td); 1831 uq = td->td_umtxq; 1832 pi = uq->uq_pi_blocked; 1833 if (pi == NULL) 1834 return; 1835 if (umtx_pi_check_loop(pi)) 1836 return; 1837 1838 for (;;) { 1839 td = pi->pi_owner; 1840 if (td == NULL || td == curthread) 1841 return; 1842 1843 MPASS(td->td_proc != NULL); 1844 MPASS(td->td_proc->p_magic == P_MAGIC); 1845 1846 thread_lock(td); 1847 if (td->td_lend_user_pri > pri) 1848 sched_lend_user_prio(td, pri); 1849 else { 1850 thread_unlock(td); 1851 break; 1852 } 1853 thread_unlock(td); 1854 1855 /* 1856 * Pick up the lock that td is blocked on. 1857 */ 1858 uq = td->td_umtxq; 1859 pi = uq->uq_pi_blocked; 1860 if (pi == NULL) 1861 break; 1862 /* Resort td on the list if needed. */ 1863 umtx_pi_adjust_thread(pi, td); 1864 } 1865 } 1866 1867 /* 1868 * Unpropagate priority for a PI mutex when a thread blocked on 1869 * it is interrupted by signal or resumed by others. 1870 */ 1871 static void 1872 umtx_repropagate_priority(struct umtx_pi *pi) 1873 { 1874 struct umtx_q *uq, *uq_owner; 1875 struct umtx_pi *pi2; 1876 int pri; 1877 1878 mtx_assert(&umtx_lock, MA_OWNED); 1879 1880 if (umtx_pi_check_loop(pi)) 1881 return; 1882 while (pi != NULL && pi->pi_owner != NULL) { 1883 pri = PRI_MAX; 1884 uq_owner = pi->pi_owner->td_umtxq; 1885 1886 TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) { 1887 uq = TAILQ_FIRST(&pi2->pi_blocked); 1888 if (uq != NULL) { 1889 if (pri > UPRI(uq->uq_thread)) 1890 pri = UPRI(uq->uq_thread); 1891 } 1892 } 1893 1894 if (pri > uq_owner->uq_inherited_pri) 1895 pri = uq_owner->uq_inherited_pri; 1896 thread_lock(pi->pi_owner); 1897 sched_lend_user_prio(pi->pi_owner, pri); 1898 thread_unlock(pi->pi_owner); 1899 if ((pi = uq_owner->uq_pi_blocked) != NULL) 1900 umtx_pi_adjust_thread(pi, uq_owner->uq_thread); 1901 } 1902 } 1903 1904 /* 1905 * Insert a PI mutex into owned list. 1906 */ 1907 static void 1908 umtx_pi_setowner(struct umtx_pi *pi, struct thread *owner) 1909 { 1910 struct umtx_q *uq_owner; 1911 1912 uq_owner = owner->td_umtxq; 1913 mtx_assert(&umtx_lock, MA_OWNED); 1914 MPASS(pi->pi_owner == NULL); 1915 pi->pi_owner = owner; 1916 TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link); 1917 } 1918 1919 /* 1920 * Disown a PI mutex, and remove it from the owned list. 1921 */ 1922 static void 1923 umtx_pi_disown(struct umtx_pi *pi) 1924 { 1925 1926 mtx_assert(&umtx_lock, MA_OWNED); 1927 TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested, pi, pi_link); 1928 pi->pi_owner = NULL; 1929 } 1930 1931 /* 1932 * Claim ownership of a PI mutex. 1933 */ 1934 int 1935 umtx_pi_claim(struct umtx_pi *pi, struct thread *owner) 1936 { 1937 struct umtx_q *uq; 1938 int pri; 1939 1940 mtx_lock(&umtx_lock); 1941 if (pi->pi_owner == owner) { 1942 mtx_unlock(&umtx_lock); 1943 return (0); 1944 } 1945 1946 if (pi->pi_owner != NULL) { 1947 /* 1948 * userland may have already messed the mutex, sigh. 1949 */ 1950 mtx_unlock(&umtx_lock); 1951 return (EPERM); 1952 } 1953 umtx_pi_setowner(pi, owner); 1954 uq = TAILQ_FIRST(&pi->pi_blocked); 1955 if (uq != NULL) { 1956 pri = UPRI(uq->uq_thread); 1957 thread_lock(owner); 1958 if (pri < UPRI(owner)) 1959 sched_lend_user_prio(owner, pri); 1960 thread_unlock(owner); 1961 } 1962 mtx_unlock(&umtx_lock); 1963 return (0); 1964 } 1965 1966 /* 1967 * Adjust a thread's order position in its blocked PI mutex, 1968 * this may result new priority propagating process. 1969 */ 1970 void 1971 umtx_pi_adjust(struct thread *td, u_char oldpri) 1972 { 1973 struct umtx_q *uq; 1974 struct umtx_pi *pi; 1975 1976 uq = td->td_umtxq; 1977 mtx_lock(&umtx_lock); 1978 /* 1979 * Pick up the lock that td is blocked on. 1980 */ 1981 pi = uq->uq_pi_blocked; 1982 if (pi != NULL) { 1983 umtx_pi_adjust_thread(pi, td); 1984 umtx_repropagate_priority(pi); 1985 } 1986 mtx_unlock(&umtx_lock); 1987 } 1988 1989 /* 1990 * Sleep on a PI mutex. 1991 */ 1992 int 1993 umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi, uint32_t owner, 1994 const char *wmesg, struct umtx_abs_timeout *timo, bool shared) 1995 { 1996 struct thread *td, *td1; 1997 struct umtx_q *uq1; 1998 int error, pri; 1999 #ifdef INVARIANTS 2000 struct umtxq_chain *uc; 2001 2002 uc = umtxq_getchain(&pi->pi_key); 2003 #endif 2004 error = 0; 2005 td = uq->uq_thread; 2006 KASSERT(td == curthread, ("inconsistent uq_thread")); 2007 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key)); 2008 KASSERT(uc->uc_busy != 0, ("umtx chain is not busy")); 2009 umtxq_insert(uq); 2010 mtx_lock(&umtx_lock); 2011 if (pi->pi_owner == NULL) { 2012 mtx_unlock(&umtx_lock); 2013 td1 = tdfind(owner, shared ? -1 : td->td_proc->p_pid); 2014 mtx_lock(&umtx_lock); 2015 if (td1 != NULL) { 2016 if (pi->pi_owner == NULL) 2017 umtx_pi_setowner(pi, td1); 2018 PROC_UNLOCK(td1->td_proc); 2019 } 2020 } 2021 2022 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) { 2023 pri = UPRI(uq1->uq_thread); 2024 if (pri > UPRI(td)) 2025 break; 2026 } 2027 2028 if (uq1 != NULL) 2029 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq); 2030 else 2031 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq); 2032 2033 uq->uq_pi_blocked = pi; 2034 thread_lock(td); 2035 td->td_flags |= TDF_UPIBLOCKED; 2036 thread_unlock(td); 2037 umtx_propagate_priority(td); 2038 mtx_unlock(&umtx_lock); 2039 umtxq_unbusy(&uq->uq_key); 2040 2041 error = umtxq_sleep(uq, wmesg, timo); 2042 umtxq_remove(uq); 2043 2044 mtx_lock(&umtx_lock); 2045 uq->uq_pi_blocked = NULL; 2046 thread_lock(td); 2047 td->td_flags &= ~TDF_UPIBLOCKED; 2048 thread_unlock(td); 2049 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq); 2050 umtx_repropagate_priority(pi); 2051 mtx_unlock(&umtx_lock); 2052 umtxq_unlock(&uq->uq_key); 2053 2054 return (error); 2055 } 2056 2057 /* 2058 * Add reference count for a PI mutex. 2059 */ 2060 void 2061 umtx_pi_ref(struct umtx_pi *pi) 2062 { 2063 2064 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&pi->pi_key)); 2065 pi->pi_refcount++; 2066 } 2067 2068 /* 2069 * Decrease reference count for a PI mutex, if the counter 2070 * is decreased to zero, its memory space is freed. 2071 */ 2072 void 2073 umtx_pi_unref(struct umtx_pi *pi) 2074 { 2075 struct umtxq_chain *uc; 2076 2077 uc = umtxq_getchain(&pi->pi_key); 2078 UMTXQ_LOCKED_ASSERT(uc); 2079 KASSERT(pi->pi_refcount > 0, ("invalid reference count")); 2080 if (--pi->pi_refcount == 0) { 2081 mtx_lock(&umtx_lock); 2082 if (pi->pi_owner != NULL) 2083 umtx_pi_disown(pi); 2084 KASSERT(TAILQ_EMPTY(&pi->pi_blocked), 2085 ("blocked queue not empty")); 2086 mtx_unlock(&umtx_lock); 2087 TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink); 2088 umtx_pi_free(pi); 2089 } 2090 } 2091 2092 /* 2093 * Find a PI mutex in hash table. 2094 */ 2095 struct umtx_pi * 2096 umtx_pi_lookup(struct umtx_key *key) 2097 { 2098 struct umtxq_chain *uc; 2099 struct umtx_pi *pi; 2100 2101 uc = umtxq_getchain(key); 2102 UMTXQ_LOCKED_ASSERT(uc); 2103 2104 TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) { 2105 if (umtx_key_match(&pi->pi_key, key)) { 2106 return (pi); 2107 } 2108 } 2109 return (NULL); 2110 } 2111 2112 /* 2113 * Insert a PI mutex into hash table. 2114 */ 2115 void 2116 umtx_pi_insert(struct umtx_pi *pi) 2117 { 2118 struct umtxq_chain *uc; 2119 2120 uc = umtxq_getchain(&pi->pi_key); 2121 UMTXQ_LOCKED_ASSERT(uc); 2122 TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink); 2123 } 2124 2125 /* 2126 * Drop a PI mutex and wakeup a top waiter. 2127 */ 2128 int 2129 umtx_pi_drop(struct thread *td, struct umtx_key *key, bool rb, int *count) 2130 { 2131 struct umtx_q *uq_first, *uq_first2, *uq_me; 2132 struct umtx_pi *pi, *pi2; 2133 int pri; 2134 2135 UMTXQ_ASSERT_LOCKED_BUSY(key); 2136 *count = umtxq_count_pi(key, &uq_first); 2137 if (uq_first != NULL) { 2138 mtx_lock(&umtx_lock); 2139 pi = uq_first->uq_pi_blocked; 2140 KASSERT(pi != NULL, ("pi == NULL?")); 2141 if (pi->pi_owner != td && !(rb && pi->pi_owner == NULL)) { 2142 mtx_unlock(&umtx_lock); 2143 /* userland messed the mutex */ 2144 return (EPERM); 2145 } 2146 uq_me = td->td_umtxq; 2147 if (pi->pi_owner == td) 2148 umtx_pi_disown(pi); 2149 /* get highest priority thread which is still sleeping. */ 2150 uq_first = TAILQ_FIRST(&pi->pi_blocked); 2151 while (uq_first != NULL && 2152 (uq_first->uq_flags & UQF_UMTXQ) == 0) { 2153 uq_first = TAILQ_NEXT(uq_first, uq_lockq); 2154 } 2155 pri = PRI_MAX; 2156 TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) { 2157 uq_first2 = TAILQ_FIRST(&pi2->pi_blocked); 2158 if (uq_first2 != NULL) { 2159 if (pri > UPRI(uq_first2->uq_thread)) 2160 pri = UPRI(uq_first2->uq_thread); 2161 } 2162 } 2163 thread_lock(td); 2164 sched_lend_user_prio(td, pri); 2165 thread_unlock(td); 2166 mtx_unlock(&umtx_lock); 2167 if (uq_first) 2168 umtxq_signal_thread(uq_first); 2169 } else { 2170 pi = umtx_pi_lookup(key); 2171 /* 2172 * A umtx_pi can exist if a signal or timeout removed the 2173 * last waiter from the umtxq, but there is still 2174 * a thread in do_lock_pi() holding the umtx_pi. 2175 */ 2176 if (pi != NULL) { 2177 /* 2178 * The umtx_pi can be unowned, such as when a thread 2179 * has just entered do_lock_pi(), allocated the 2180 * umtx_pi, and unlocked the umtxq. 2181 * If the current thread owns it, it must disown it. 2182 */ 2183 mtx_lock(&umtx_lock); 2184 if (pi->pi_owner == td) 2185 umtx_pi_disown(pi); 2186 mtx_unlock(&umtx_lock); 2187 } 2188 } 2189 return (0); 2190 } 2191 2192 /* 2193 * Lock a PI mutex. 2194 */ 2195 static int 2196 do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags, 2197 struct _umtx_time *timeout, int try) 2198 { 2199 struct umtx_abs_timeout timo; 2200 struct umtx_q *uq; 2201 struct umtx_pi *pi, *new_pi; 2202 uint32_t id, old_owner, owner, old; 2203 int error, rv; 2204 2205 id = td->td_tid; 2206 uq = td->td_umtxq; 2207 2208 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ? 2209 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags), 2210 &uq->uq_key)) != 0) 2211 return (error); 2212 2213 if (timeout != NULL) 2214 umtx_abs_timeout_init2(&timo, timeout); 2215 2216 umtxq_lock(&uq->uq_key); 2217 pi = umtx_pi_lookup(&uq->uq_key); 2218 if (pi == NULL) { 2219 new_pi = umtx_pi_alloc(M_NOWAIT); 2220 if (new_pi == NULL) { 2221 umtxq_unlock(&uq->uq_key); 2222 new_pi = umtx_pi_alloc(M_WAITOK); 2223 umtxq_lock(&uq->uq_key); 2224 pi = umtx_pi_lookup(&uq->uq_key); 2225 if (pi != NULL) { 2226 umtx_pi_free(new_pi); 2227 new_pi = NULL; 2228 } 2229 } 2230 if (new_pi != NULL) { 2231 new_pi->pi_key = uq->uq_key; 2232 umtx_pi_insert(new_pi); 2233 pi = new_pi; 2234 } 2235 } 2236 umtx_pi_ref(pi); 2237 umtxq_unlock(&uq->uq_key); 2238 2239 /* 2240 * Care must be exercised when dealing with umtx structure. It 2241 * can fault on any access. 2242 */ 2243 for (;;) { 2244 /* 2245 * Try the uncontested case. This should be done in userland. 2246 */ 2247 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED, &owner, id); 2248 /* The address was invalid. */ 2249 if (rv == -1) { 2250 error = EFAULT; 2251 break; 2252 } 2253 /* The acquire succeeded. */ 2254 if (rv == 0) { 2255 MPASS(owner == UMUTEX_UNOWNED); 2256 error = 0; 2257 break; 2258 } 2259 2260 if (owner == UMUTEX_RB_NOTRECOV) { 2261 error = ENOTRECOVERABLE; 2262 break; 2263 } 2264 2265 /* 2266 * Avoid overwriting a possible error from sleep due 2267 * to the pending signal with suspension check result. 2268 */ 2269 if (error == 0) { 2270 error = thread_check_susp(td, true); 2271 if (error != 0) 2272 break; 2273 } 2274 2275 /* If no one owns it but it is contested try to acquire it. */ 2276 if (owner == UMUTEX_CONTESTED || owner == UMUTEX_RB_OWNERDEAD) { 2277 old_owner = owner; 2278 rv = casueword32(&m->m_owner, owner, &owner, 2279 id | UMUTEX_CONTESTED); 2280 /* The address was invalid. */ 2281 if (rv == -1) { 2282 error = EFAULT; 2283 break; 2284 } 2285 if (rv == 1) { 2286 if (error == 0) { 2287 error = thread_check_susp(td, true); 2288 if (error != 0) 2289 break; 2290 } 2291 2292 /* 2293 * If this failed the lock could 2294 * changed, restart. 2295 */ 2296 continue; 2297 } 2298 2299 MPASS(rv == 0); 2300 MPASS(owner == old_owner); 2301 umtxq_lock(&uq->uq_key); 2302 umtxq_busy(&uq->uq_key); 2303 error = umtx_pi_claim(pi, td); 2304 umtxq_unbusy(&uq->uq_key); 2305 umtxq_unlock(&uq->uq_key); 2306 if (error != 0) { 2307 /* 2308 * Since we're going to return an 2309 * error, restore the m_owner to its 2310 * previous, unowned state to avoid 2311 * compounding the problem. 2312 */ 2313 (void)casuword32(&m->m_owner, 2314 id | UMUTEX_CONTESTED, old_owner); 2315 } 2316 if (error == 0 && old_owner == UMUTEX_RB_OWNERDEAD) 2317 error = EOWNERDEAD; 2318 break; 2319 } 2320 2321 if ((owner & ~UMUTEX_CONTESTED) == id) { 2322 error = EDEADLK; 2323 break; 2324 } 2325 2326 if (try != 0) { 2327 error = EBUSY; 2328 break; 2329 } 2330 2331 /* 2332 * If we caught a signal, we have retried and now 2333 * exit immediately. 2334 */ 2335 if (error != 0) 2336 break; 2337 2338 umtxq_lock(&uq->uq_key); 2339 umtxq_busy(&uq->uq_key); 2340 umtxq_unlock(&uq->uq_key); 2341 2342 /* 2343 * Set the contested bit so that a release in user space 2344 * knows to use the system call for unlock. If this fails 2345 * either some one else has acquired the lock or it has been 2346 * released. 2347 */ 2348 rv = casueword32(&m->m_owner, owner, &old, owner | 2349 UMUTEX_CONTESTED); 2350 2351 /* The address was invalid. */ 2352 if (rv == -1) { 2353 umtxq_unbusy_unlocked(&uq->uq_key); 2354 error = EFAULT; 2355 break; 2356 } 2357 if (rv == 1) { 2358 umtxq_unbusy_unlocked(&uq->uq_key); 2359 error = thread_check_susp(td, true); 2360 if (error != 0) 2361 break; 2362 2363 /* 2364 * The lock changed and we need to retry or we 2365 * lost a race to the thread unlocking the 2366 * umtx. Note that the UMUTEX_RB_OWNERDEAD 2367 * value for owner is impossible there. 2368 */ 2369 continue; 2370 } 2371 2372 umtxq_lock(&uq->uq_key); 2373 2374 /* We set the contested bit, sleep. */ 2375 MPASS(old == owner); 2376 error = umtxq_sleep_pi(uq, pi, owner & ~UMUTEX_CONTESTED, 2377 "umtxpi", timeout == NULL ? NULL : &timo, 2378 (flags & USYNC_PROCESS_SHARED) != 0); 2379 if (error != 0) 2380 continue; 2381 2382 error = thread_check_susp(td, false); 2383 if (error != 0) 2384 break; 2385 } 2386 2387 umtxq_lock(&uq->uq_key); 2388 umtx_pi_unref(pi); 2389 umtxq_unlock(&uq->uq_key); 2390 2391 umtx_key_release(&uq->uq_key); 2392 return (error); 2393 } 2394 2395 /* 2396 * Unlock a PI mutex. 2397 */ 2398 static int 2399 do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags, bool rb) 2400 { 2401 struct umtx_key key; 2402 uint32_t id, new_owner, old, owner; 2403 int count, error; 2404 2405 id = td->td_tid; 2406 2407 usrloop: 2408 /* 2409 * Make sure we own this mtx. 2410 */ 2411 error = fueword32(&m->m_owner, &owner); 2412 if (error == -1) 2413 return (EFAULT); 2414 2415 if ((owner & ~UMUTEX_CONTESTED) != id) 2416 return (EPERM); 2417 2418 new_owner = umtx_unlock_val(flags, rb); 2419 2420 /* This should be done in userland */ 2421 if ((owner & UMUTEX_CONTESTED) == 0) { 2422 error = casueword32(&m->m_owner, owner, &old, new_owner); 2423 if (error == -1) 2424 return (EFAULT); 2425 if (error == 1) { 2426 error = thread_check_susp(td, true); 2427 if (error != 0) 2428 return (error); 2429 goto usrloop; 2430 } 2431 if (old == owner) 2432 return (0); 2433 owner = old; 2434 } 2435 2436 /* We should only ever be in here for contested locks */ 2437 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ? 2438 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags), 2439 &key)) != 0) 2440 return (error); 2441 2442 umtxq_lock(&key); 2443 umtxq_busy(&key); 2444 error = umtx_pi_drop(td, &key, rb, &count); 2445 if (error != 0) { 2446 umtxq_unbusy(&key); 2447 umtxq_unlock(&key); 2448 umtx_key_release(&key); 2449 /* userland messed the mutex */ 2450 return (error); 2451 } 2452 umtxq_unlock(&key); 2453 2454 /* 2455 * When unlocking the umtx, it must be marked as unowned if 2456 * there is zero or one thread only waiting for it. 2457 * Otherwise, it must be marked as contested. 2458 */ 2459 2460 if (count > 1) 2461 new_owner |= UMUTEX_CONTESTED; 2462 again: 2463 error = casueword32(&m->m_owner, owner, &old, new_owner); 2464 if (error == 1) { 2465 error = thread_check_susp(td, false); 2466 if (error == 0) 2467 goto again; 2468 } 2469 umtxq_unbusy_unlocked(&key); 2470 umtx_key_release(&key); 2471 if (error == -1) 2472 return (EFAULT); 2473 if (error == 0 && old != owner) 2474 return (EINVAL); 2475 return (error); 2476 } 2477 2478 /* 2479 * Lock a PP mutex. 2480 */ 2481 static int 2482 do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags, 2483 struct _umtx_time *timeout, int try) 2484 { 2485 struct umtx_abs_timeout timo; 2486 struct umtx_q *uq, *uq2; 2487 struct umtx_pi *pi; 2488 uint32_t ceiling; 2489 uint32_t owner, id; 2490 int error, pri, old_inherited_pri, su, rv; 2491 2492 id = td->td_tid; 2493 uq = td->td_umtxq; 2494 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ? 2495 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags), 2496 &uq->uq_key)) != 0) 2497 return (error); 2498 2499 if (timeout != NULL) 2500 umtx_abs_timeout_init2(&timo, timeout); 2501 2502 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0); 2503 for (;;) { 2504 old_inherited_pri = uq->uq_inherited_pri; 2505 umtxq_lock(&uq->uq_key); 2506 umtxq_busy(&uq->uq_key); 2507 umtxq_unlock(&uq->uq_key); 2508 2509 rv = fueword32(&m->m_ceilings[0], &ceiling); 2510 if (rv == -1) { 2511 error = EFAULT; 2512 goto out; 2513 } 2514 ceiling = RTP_PRIO_MAX - ceiling; 2515 if (ceiling > RTP_PRIO_MAX) { 2516 error = EINVAL; 2517 goto out; 2518 } 2519 2520 mtx_lock(&umtx_lock); 2521 if (UPRI(td) < PRI_MIN_REALTIME + ceiling) { 2522 mtx_unlock(&umtx_lock); 2523 error = EINVAL; 2524 goto out; 2525 } 2526 if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) { 2527 uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling; 2528 thread_lock(td); 2529 if (uq->uq_inherited_pri < UPRI(td)) 2530 sched_lend_user_prio(td, uq->uq_inherited_pri); 2531 thread_unlock(td); 2532 } 2533 mtx_unlock(&umtx_lock); 2534 2535 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner, 2536 id | UMUTEX_CONTESTED); 2537 /* The address was invalid. */ 2538 if (rv == -1) { 2539 error = EFAULT; 2540 break; 2541 } 2542 if (rv == 0) { 2543 MPASS(owner == UMUTEX_CONTESTED); 2544 error = 0; 2545 break; 2546 } 2547 /* rv == 1 */ 2548 if (owner == UMUTEX_RB_OWNERDEAD) { 2549 rv = casueword32(&m->m_owner, UMUTEX_RB_OWNERDEAD, 2550 &owner, id | UMUTEX_CONTESTED); 2551 if (rv == -1) { 2552 error = EFAULT; 2553 break; 2554 } 2555 if (rv == 0) { 2556 MPASS(owner == UMUTEX_RB_OWNERDEAD); 2557 error = EOWNERDEAD; /* success */ 2558 break; 2559 } 2560 2561 /* 2562 * rv == 1, only check for suspension if we 2563 * did not already catched a signal. If we 2564 * get an error from the check, the same 2565 * condition is checked by the umtxq_sleep() 2566 * call below, so we should obliterate the 2567 * error to not skip the last loop iteration. 2568 */ 2569 if (error == 0) { 2570 error = thread_check_susp(td, false); 2571 if (error == 0) { 2572 if (try != 0) 2573 error = EBUSY; 2574 else 2575 continue; 2576 } 2577 error = 0; 2578 } 2579 } else if (owner == UMUTEX_RB_NOTRECOV) { 2580 error = ENOTRECOVERABLE; 2581 } 2582 2583 if (try != 0) 2584 error = EBUSY; 2585 2586 /* 2587 * If we caught a signal, we have retried and now 2588 * exit immediately. 2589 */ 2590 if (error != 0) 2591 break; 2592 2593 umtxq_lock(&uq->uq_key); 2594 umtxq_insert(uq); 2595 umtxq_unbusy(&uq->uq_key); 2596 error = umtxq_sleep(uq, "umtxpp", timeout == NULL ? 2597 NULL : &timo); 2598 umtxq_remove(uq); 2599 umtxq_unlock(&uq->uq_key); 2600 2601 mtx_lock(&umtx_lock); 2602 uq->uq_inherited_pri = old_inherited_pri; 2603 pri = PRI_MAX; 2604 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) { 2605 uq2 = TAILQ_FIRST(&pi->pi_blocked); 2606 if (uq2 != NULL) { 2607 if (pri > UPRI(uq2->uq_thread)) 2608 pri = UPRI(uq2->uq_thread); 2609 } 2610 } 2611 if (pri > uq->uq_inherited_pri) 2612 pri = uq->uq_inherited_pri; 2613 thread_lock(td); 2614 sched_lend_user_prio(td, pri); 2615 thread_unlock(td); 2616 mtx_unlock(&umtx_lock); 2617 } 2618 2619 if (error != 0 && error != EOWNERDEAD) { 2620 mtx_lock(&umtx_lock); 2621 uq->uq_inherited_pri = old_inherited_pri; 2622 pri = PRI_MAX; 2623 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) { 2624 uq2 = TAILQ_FIRST(&pi->pi_blocked); 2625 if (uq2 != NULL) { 2626 if (pri > UPRI(uq2->uq_thread)) 2627 pri = UPRI(uq2->uq_thread); 2628 } 2629 } 2630 if (pri > uq->uq_inherited_pri) 2631 pri = uq->uq_inherited_pri; 2632 thread_lock(td); 2633 sched_lend_user_prio(td, pri); 2634 thread_unlock(td); 2635 mtx_unlock(&umtx_lock); 2636 } 2637 2638 out: 2639 umtxq_unbusy_unlocked(&uq->uq_key); 2640 umtx_key_release(&uq->uq_key); 2641 return (error); 2642 } 2643 2644 /* 2645 * Unlock a PP mutex. 2646 */ 2647 static int 2648 do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags, bool rb) 2649 { 2650 struct umtx_key key; 2651 struct umtx_q *uq, *uq2; 2652 struct umtx_pi *pi; 2653 uint32_t id, owner, rceiling; 2654 int error, pri, new_inherited_pri, su; 2655 2656 id = td->td_tid; 2657 uq = td->td_umtxq; 2658 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0); 2659 2660 /* 2661 * Make sure we own this mtx. 2662 */ 2663 error = fueword32(&m->m_owner, &owner); 2664 if (error == -1) 2665 return (EFAULT); 2666 2667 if ((owner & ~UMUTEX_CONTESTED) != id) 2668 return (EPERM); 2669 2670 error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t)); 2671 if (error != 0) 2672 return (error); 2673 2674 if (rceiling == -1) 2675 new_inherited_pri = PRI_MAX; 2676 else { 2677 rceiling = RTP_PRIO_MAX - rceiling; 2678 if (rceiling > RTP_PRIO_MAX) 2679 return (EINVAL); 2680 new_inherited_pri = PRI_MIN_REALTIME + rceiling; 2681 } 2682 2683 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ? 2684 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags), 2685 &key)) != 0) 2686 return (error); 2687 umtxq_lock(&key); 2688 umtxq_busy(&key); 2689 umtxq_unlock(&key); 2690 /* 2691 * For priority protected mutex, always set unlocked state 2692 * to UMUTEX_CONTESTED, so that userland always enters kernel 2693 * to lock the mutex, it is necessary because thread priority 2694 * has to be adjusted for such mutex. 2695 */ 2696 error = suword32(&m->m_owner, umtx_unlock_val(flags, rb) | 2697 UMUTEX_CONTESTED); 2698 2699 umtxq_lock(&key); 2700 if (error == 0) 2701 umtxq_signal(&key, 1); 2702 umtxq_unbusy(&key); 2703 umtxq_unlock(&key); 2704 2705 if (error == -1) 2706 error = EFAULT; 2707 else { 2708 mtx_lock(&umtx_lock); 2709 if (su != 0) 2710 uq->uq_inherited_pri = new_inherited_pri; 2711 pri = PRI_MAX; 2712 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) { 2713 uq2 = TAILQ_FIRST(&pi->pi_blocked); 2714 if (uq2 != NULL) { 2715 if (pri > UPRI(uq2->uq_thread)) 2716 pri = UPRI(uq2->uq_thread); 2717 } 2718 } 2719 if (pri > uq->uq_inherited_pri) 2720 pri = uq->uq_inherited_pri; 2721 thread_lock(td); 2722 sched_lend_user_prio(td, pri); 2723 thread_unlock(td); 2724 mtx_unlock(&umtx_lock); 2725 } 2726 umtx_key_release(&key); 2727 return (error); 2728 } 2729 2730 static int 2731 do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling, 2732 uint32_t *old_ceiling) 2733 { 2734 struct umtx_q *uq; 2735 uint32_t flags, id, owner, save_ceiling; 2736 int error, rv, rv1; 2737 2738 error = fueword32(&m->m_flags, &flags); 2739 if (error == -1) 2740 return (EFAULT); 2741 if ((flags & UMUTEX_PRIO_PROTECT) == 0) 2742 return (EINVAL); 2743 if (ceiling > RTP_PRIO_MAX) 2744 return (EINVAL); 2745 id = td->td_tid; 2746 uq = td->td_umtxq; 2747 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ? 2748 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags), 2749 &uq->uq_key)) != 0) 2750 return (error); 2751 for (;;) { 2752 umtxq_lock(&uq->uq_key); 2753 umtxq_busy(&uq->uq_key); 2754 umtxq_unlock(&uq->uq_key); 2755 2756 rv = fueword32(&m->m_ceilings[0], &save_ceiling); 2757 if (rv == -1) { 2758 error = EFAULT; 2759 break; 2760 } 2761 2762 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner, 2763 id | UMUTEX_CONTESTED); 2764 if (rv == -1) { 2765 error = EFAULT; 2766 break; 2767 } 2768 2769 if (rv == 0) { 2770 MPASS(owner == UMUTEX_CONTESTED); 2771 rv = suword32(&m->m_ceilings[0], ceiling); 2772 rv1 = suword32(&m->m_owner, UMUTEX_CONTESTED); 2773 error = (rv == 0 && rv1 == 0) ? 0: EFAULT; 2774 break; 2775 } 2776 2777 if ((owner & ~UMUTEX_CONTESTED) == id) { 2778 rv = suword32(&m->m_ceilings[0], ceiling); 2779 error = rv == 0 ? 0 : EFAULT; 2780 break; 2781 } 2782 2783 if (owner == UMUTEX_RB_OWNERDEAD) { 2784 error = EOWNERDEAD; 2785 break; 2786 } else if (owner == UMUTEX_RB_NOTRECOV) { 2787 error = ENOTRECOVERABLE; 2788 break; 2789 } 2790 2791 /* 2792 * If we caught a signal, we have retried and now 2793 * exit immediately. 2794 */ 2795 if (error != 0) 2796 break; 2797 2798 /* 2799 * We set the contested bit, sleep. Otherwise the lock changed 2800 * and we need to retry or we lost a race to the thread 2801 * unlocking the umtx. 2802 */ 2803 umtxq_lock(&uq->uq_key); 2804 umtxq_insert(uq); 2805 umtxq_unbusy(&uq->uq_key); 2806 error = umtxq_sleep(uq, "umtxpp", NULL); 2807 umtxq_remove(uq); 2808 umtxq_unlock(&uq->uq_key); 2809 } 2810 umtxq_lock(&uq->uq_key); 2811 if (error == 0) 2812 umtxq_signal(&uq->uq_key, INT_MAX); 2813 umtxq_unbusy(&uq->uq_key); 2814 umtxq_unlock(&uq->uq_key); 2815 umtx_key_release(&uq->uq_key); 2816 if (error == 0 && old_ceiling != NULL) { 2817 rv = suword32(old_ceiling, save_ceiling); 2818 error = rv == 0 ? 0 : EFAULT; 2819 } 2820 return (error); 2821 } 2822 2823 /* 2824 * Lock a userland POSIX mutex. 2825 */ 2826 static int 2827 do_lock_umutex(struct thread *td, struct umutex *m, 2828 struct _umtx_time *timeout, int mode) 2829 { 2830 uint32_t flags; 2831 int error; 2832 2833 error = fueword32(&m->m_flags, &flags); 2834 if (error == -1) 2835 return (EFAULT); 2836 2837 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) { 2838 case 0: 2839 error = do_lock_normal(td, m, flags, timeout, mode); 2840 break; 2841 case UMUTEX_PRIO_INHERIT: 2842 error = do_lock_pi(td, m, flags, timeout, mode); 2843 break; 2844 case UMUTEX_PRIO_PROTECT: 2845 error = do_lock_pp(td, m, flags, timeout, mode); 2846 break; 2847 default: 2848 return (EINVAL); 2849 } 2850 if (timeout == NULL) { 2851 if (error == EINTR && mode != _UMUTEX_WAIT) 2852 error = ERESTART; 2853 } else { 2854 /* Timed-locking is not restarted. */ 2855 if (error == ERESTART) 2856 error = EINTR; 2857 } 2858 return (error); 2859 } 2860 2861 /* 2862 * Unlock a userland POSIX mutex. 2863 */ 2864 static int 2865 do_unlock_umutex(struct thread *td, struct umutex *m, bool rb) 2866 { 2867 uint32_t flags; 2868 int error; 2869 2870 error = fueword32(&m->m_flags, &flags); 2871 if (error == -1) 2872 return (EFAULT); 2873 2874 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) { 2875 case 0: 2876 return (do_unlock_normal(td, m, flags, rb)); 2877 case UMUTEX_PRIO_INHERIT: 2878 return (do_unlock_pi(td, m, flags, rb)); 2879 case UMUTEX_PRIO_PROTECT: 2880 return (do_unlock_pp(td, m, flags, rb)); 2881 } 2882 2883 return (EINVAL); 2884 } 2885 2886 static int 2887 do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m, 2888 struct timespec *timeout, u_long wflags) 2889 { 2890 struct umtx_abs_timeout timo; 2891 struct umtx_q *uq; 2892 uint32_t flags, clockid, hasw; 2893 int error; 2894 2895 uq = td->td_umtxq; 2896 error = fueword32(&cv->c_flags, &flags); 2897 if (error == -1) 2898 return (EFAULT); 2899 error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &uq->uq_key); 2900 if (error != 0) 2901 return (error); 2902 2903 if ((wflags & CVWAIT_CLOCKID) != 0) { 2904 error = fueword32(&cv->c_clockid, &clockid); 2905 if (error == -1) { 2906 umtx_key_release(&uq->uq_key); 2907 return (EFAULT); 2908 } 2909 if (clockid < CLOCK_REALTIME || 2910 clockid >= CLOCK_THREAD_CPUTIME_ID) { 2911 /* hmm, only HW clock id will work. */ 2912 umtx_key_release(&uq->uq_key); 2913 return (EINVAL); 2914 } 2915 } else { 2916 clockid = CLOCK_REALTIME; 2917 } 2918 2919 umtxq_lock(&uq->uq_key); 2920 umtxq_busy(&uq->uq_key); 2921 umtxq_insert(uq); 2922 umtxq_unlock(&uq->uq_key); 2923 2924 /* 2925 * Set c_has_waiters to 1 before releasing user mutex, also 2926 * don't modify cache line when unnecessary. 2927 */ 2928 error = fueword32(&cv->c_has_waiters, &hasw); 2929 if (error == 0 && hasw == 0) 2930 suword32(&cv->c_has_waiters, 1); 2931 2932 umtxq_unbusy_unlocked(&uq->uq_key); 2933 2934 error = do_unlock_umutex(td, m, false); 2935 2936 if (timeout != NULL) 2937 umtx_abs_timeout_init(&timo, clockid, 2938 (wflags & CVWAIT_ABSTIME) != 0, timeout); 2939 2940 umtxq_lock(&uq->uq_key); 2941 if (error == 0) { 2942 error = umtxq_sleep(uq, "ucond", timeout == NULL ? 2943 NULL : &timo); 2944 } 2945 2946 if ((uq->uq_flags & UQF_UMTXQ) == 0) 2947 error = 0; 2948 else { 2949 /* 2950 * This must be timeout,interrupted by signal or 2951 * surprious wakeup, clear c_has_waiter flag when 2952 * necessary. 2953 */ 2954 umtxq_busy(&uq->uq_key); 2955 if ((uq->uq_flags & UQF_UMTXQ) != 0) { 2956 int oldlen = uq->uq_cur_queue->length; 2957 umtxq_remove(uq); 2958 if (oldlen == 1) { 2959 umtxq_unlock(&uq->uq_key); 2960 suword32(&cv->c_has_waiters, 0); 2961 umtxq_lock(&uq->uq_key); 2962 } 2963 } 2964 umtxq_unbusy(&uq->uq_key); 2965 if (error == ERESTART) 2966 error = EINTR; 2967 } 2968 2969 umtxq_unlock(&uq->uq_key); 2970 umtx_key_release(&uq->uq_key); 2971 return (error); 2972 } 2973 2974 /* 2975 * Signal a userland condition variable. 2976 */ 2977 static int 2978 do_cv_signal(struct thread *td, struct ucond *cv) 2979 { 2980 struct umtx_key key; 2981 int error, cnt, nwake; 2982 uint32_t flags; 2983 2984 error = fueword32(&cv->c_flags, &flags); 2985 if (error == -1) 2986 return (EFAULT); 2987 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0) 2988 return (error); 2989 umtxq_lock(&key); 2990 umtxq_busy(&key); 2991 cnt = umtxq_count(&key); 2992 nwake = umtxq_signal(&key, 1); 2993 if (cnt <= nwake) { 2994 umtxq_unlock(&key); 2995 error = suword32(&cv->c_has_waiters, 0); 2996 if (error == -1) 2997 error = EFAULT; 2998 umtxq_lock(&key); 2999 } 3000 umtxq_unbusy(&key); 3001 umtxq_unlock(&key); 3002 umtx_key_release(&key); 3003 return (error); 3004 } 3005 3006 static int 3007 do_cv_broadcast(struct thread *td, struct ucond *cv) 3008 { 3009 struct umtx_key key; 3010 int error; 3011 uint32_t flags; 3012 3013 error = fueword32(&cv->c_flags, &flags); 3014 if (error == -1) 3015 return (EFAULT); 3016 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0) 3017 return (error); 3018 3019 umtxq_lock(&key); 3020 umtxq_busy(&key); 3021 umtxq_signal(&key, INT_MAX); 3022 umtxq_unlock(&key); 3023 3024 error = suword32(&cv->c_has_waiters, 0); 3025 if (error == -1) 3026 error = EFAULT; 3027 3028 umtxq_unbusy_unlocked(&key); 3029 3030 umtx_key_release(&key); 3031 return (error); 3032 } 3033 3034 static int 3035 do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag, 3036 struct _umtx_time *timeout) 3037 { 3038 struct umtx_abs_timeout timo; 3039 struct umtx_q *uq; 3040 uint32_t flags, wrflags; 3041 int32_t state, oldstate; 3042 int32_t blocked_readers; 3043 int error, error1, rv; 3044 3045 uq = td->td_umtxq; 3046 error = fueword32(&rwlock->rw_flags, &flags); 3047 if (error == -1) 3048 return (EFAULT); 3049 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key); 3050 if (error != 0) 3051 return (error); 3052 3053 if (timeout != NULL) 3054 umtx_abs_timeout_init2(&timo, timeout); 3055 3056 wrflags = URWLOCK_WRITE_OWNER; 3057 if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER)) 3058 wrflags |= URWLOCK_WRITE_WAITERS; 3059 3060 for (;;) { 3061 rv = fueword32(&rwlock->rw_state, &state); 3062 if (rv == -1) { 3063 umtx_key_release(&uq->uq_key); 3064 return (EFAULT); 3065 } 3066 3067 /* try to lock it */ 3068 while (!(state & wrflags)) { 3069 if (__predict_false(URWLOCK_READER_COUNT(state) == 3070 URWLOCK_MAX_READERS)) { 3071 umtx_key_release(&uq->uq_key); 3072 return (EAGAIN); 3073 } 3074 rv = casueword32(&rwlock->rw_state, state, 3075 &oldstate, state + 1); 3076 if (rv == -1) { 3077 umtx_key_release(&uq->uq_key); 3078 return (EFAULT); 3079 } 3080 if (rv == 0) { 3081 MPASS(oldstate == state); 3082 umtx_key_release(&uq->uq_key); 3083 return (0); 3084 } 3085 error = thread_check_susp(td, true); 3086 if (error != 0) 3087 break; 3088 state = oldstate; 3089 } 3090 3091 if (error) 3092 break; 3093 3094 /* grab monitor lock */ 3095 umtxq_lock(&uq->uq_key); 3096 umtxq_busy(&uq->uq_key); 3097 umtxq_unlock(&uq->uq_key); 3098 3099 /* 3100 * re-read the state, in case it changed between the try-lock above 3101 * and the check below 3102 */ 3103 rv = fueword32(&rwlock->rw_state, &state); 3104 if (rv == -1) 3105 error = EFAULT; 3106 3107 /* set read contention bit */ 3108 while (error == 0 && (state & wrflags) && 3109 !(state & URWLOCK_READ_WAITERS)) { 3110 rv = casueword32(&rwlock->rw_state, state, 3111 &oldstate, state | URWLOCK_READ_WAITERS); 3112 if (rv == -1) { 3113 error = EFAULT; 3114 break; 3115 } 3116 if (rv == 0) { 3117 MPASS(oldstate == state); 3118 goto sleep; 3119 } 3120 state = oldstate; 3121 error = thread_check_susp(td, false); 3122 if (error != 0) 3123 break; 3124 } 3125 if (error != 0) { 3126 umtxq_unbusy_unlocked(&uq->uq_key); 3127 break; 3128 } 3129 3130 /* state is changed while setting flags, restart */ 3131 if (!(state & wrflags)) { 3132 umtxq_unbusy_unlocked(&uq->uq_key); 3133 error = thread_check_susp(td, true); 3134 if (error != 0) 3135 break; 3136 continue; 3137 } 3138 3139 sleep: 3140 /* 3141 * Contention bit is set, before sleeping, increase 3142 * read waiter count. 3143 */ 3144 rv = fueword32(&rwlock->rw_blocked_readers, 3145 &blocked_readers); 3146 if (rv == -1) { 3147 umtxq_unbusy_unlocked(&uq->uq_key); 3148 error = EFAULT; 3149 break; 3150 } 3151 suword32(&rwlock->rw_blocked_readers, blocked_readers+1); 3152 3153 while (state & wrflags) { 3154 umtxq_lock(&uq->uq_key); 3155 umtxq_insert(uq); 3156 umtxq_unbusy(&uq->uq_key); 3157 3158 error = umtxq_sleep(uq, "urdlck", timeout == NULL ? 3159 NULL : &timo); 3160 3161 umtxq_busy(&uq->uq_key); 3162 umtxq_remove(uq); 3163 umtxq_unlock(&uq->uq_key); 3164 if (error) 3165 break; 3166 rv = fueword32(&rwlock->rw_state, &state); 3167 if (rv == -1) { 3168 error = EFAULT; 3169 break; 3170 } 3171 } 3172 3173 /* decrease read waiter count, and may clear read contention bit */ 3174 rv = fueword32(&rwlock->rw_blocked_readers, 3175 &blocked_readers); 3176 if (rv == -1) { 3177 umtxq_unbusy_unlocked(&uq->uq_key); 3178 error = EFAULT; 3179 break; 3180 } 3181 suword32(&rwlock->rw_blocked_readers, blocked_readers-1); 3182 if (blocked_readers == 1) { 3183 rv = fueword32(&rwlock->rw_state, &state); 3184 if (rv == -1) { 3185 umtxq_unbusy_unlocked(&uq->uq_key); 3186 error = EFAULT; 3187 break; 3188 } 3189 for (;;) { 3190 rv = casueword32(&rwlock->rw_state, state, 3191 &oldstate, state & ~URWLOCK_READ_WAITERS); 3192 if (rv == -1) { 3193 error = EFAULT; 3194 break; 3195 } 3196 if (rv == 0) { 3197 MPASS(oldstate == state); 3198 break; 3199 } 3200 state = oldstate; 3201 error1 = thread_check_susp(td, false); 3202 if (error1 != 0) { 3203 if (error == 0) 3204 error = error1; 3205 break; 3206 } 3207 } 3208 } 3209 3210 umtxq_unbusy_unlocked(&uq->uq_key); 3211 if (error != 0) 3212 break; 3213 } 3214 umtx_key_release(&uq->uq_key); 3215 if (error == ERESTART) 3216 error = EINTR; 3217 return (error); 3218 } 3219 3220 static int 3221 do_rw_wrlock(struct thread *td, struct urwlock *rwlock, struct _umtx_time *timeout) 3222 { 3223 struct umtx_abs_timeout timo; 3224 struct umtx_q *uq; 3225 uint32_t flags; 3226 int32_t state, oldstate; 3227 int32_t blocked_writers; 3228 int32_t blocked_readers; 3229 int error, error1, rv; 3230 3231 uq = td->td_umtxq; 3232 error = fueword32(&rwlock->rw_flags, &flags); 3233 if (error == -1) 3234 return (EFAULT); 3235 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key); 3236 if (error != 0) 3237 return (error); 3238 3239 if (timeout != NULL) 3240 umtx_abs_timeout_init2(&timo, timeout); 3241 3242 blocked_readers = 0; 3243 for (;;) { 3244 rv = fueword32(&rwlock->rw_state, &state); 3245 if (rv == -1) { 3246 umtx_key_release(&uq->uq_key); 3247 return (EFAULT); 3248 } 3249 while ((state & URWLOCK_WRITE_OWNER) == 0 && 3250 URWLOCK_READER_COUNT(state) == 0) { 3251 rv = casueword32(&rwlock->rw_state, state, 3252 &oldstate, state | URWLOCK_WRITE_OWNER); 3253 if (rv == -1) { 3254 umtx_key_release(&uq->uq_key); 3255 return (EFAULT); 3256 } 3257 if (rv == 0) { 3258 MPASS(oldstate == state); 3259 umtx_key_release(&uq->uq_key); 3260 return (0); 3261 } 3262 state = oldstate; 3263 error = thread_check_susp(td, true); 3264 if (error != 0) 3265 break; 3266 } 3267 3268 if (error) { 3269 if ((state & (URWLOCK_WRITE_OWNER | 3270 URWLOCK_WRITE_WAITERS)) == 0 && 3271 blocked_readers != 0) { 3272 umtxq_lock(&uq->uq_key); 3273 umtxq_busy(&uq->uq_key); 3274 umtxq_signal_queue(&uq->uq_key, INT_MAX, 3275 UMTX_SHARED_QUEUE); 3276 umtxq_unbusy(&uq->uq_key); 3277 umtxq_unlock(&uq->uq_key); 3278 } 3279 3280 break; 3281 } 3282 3283 /* grab monitor lock */ 3284 umtxq_lock(&uq->uq_key); 3285 umtxq_busy(&uq->uq_key); 3286 umtxq_unlock(&uq->uq_key); 3287 3288 /* 3289 * Re-read the state, in case it changed between the 3290 * try-lock above and the check below. 3291 */ 3292 rv = fueword32(&rwlock->rw_state, &state); 3293 if (rv == -1) 3294 error = EFAULT; 3295 3296 while (error == 0 && ((state & URWLOCK_WRITE_OWNER) || 3297 URWLOCK_READER_COUNT(state) != 0) && 3298 (state & URWLOCK_WRITE_WAITERS) == 0) { 3299 rv = casueword32(&rwlock->rw_state, state, 3300 &oldstate, state | URWLOCK_WRITE_WAITERS); 3301 if (rv == -1) { 3302 error = EFAULT; 3303 break; 3304 } 3305 if (rv == 0) { 3306 MPASS(oldstate == state); 3307 goto sleep; 3308 } 3309 state = oldstate; 3310 error = thread_check_susp(td, false); 3311 if (error != 0) 3312 break; 3313 } 3314 if (error != 0) { 3315 umtxq_unbusy_unlocked(&uq->uq_key); 3316 break; 3317 } 3318 3319 if ((state & URWLOCK_WRITE_OWNER) == 0 && 3320 URWLOCK_READER_COUNT(state) == 0) { 3321 umtxq_unbusy_unlocked(&uq->uq_key); 3322 error = thread_check_susp(td, false); 3323 if (error != 0) 3324 break; 3325 continue; 3326 } 3327 sleep: 3328 rv = fueword32(&rwlock->rw_blocked_writers, 3329 &blocked_writers); 3330 if (rv == -1) { 3331 umtxq_unbusy_unlocked(&uq->uq_key); 3332 error = EFAULT; 3333 break; 3334 } 3335 suword32(&rwlock->rw_blocked_writers, blocked_writers + 1); 3336 3337 while ((state & URWLOCK_WRITE_OWNER) || 3338 URWLOCK_READER_COUNT(state) != 0) { 3339 umtxq_lock(&uq->uq_key); 3340 umtxq_insert_queue(uq, UMTX_EXCLUSIVE_QUEUE); 3341 umtxq_unbusy(&uq->uq_key); 3342 3343 error = umtxq_sleep(uq, "uwrlck", timeout == NULL ? 3344 NULL : &timo); 3345 3346 umtxq_busy(&uq->uq_key); 3347 umtxq_remove_queue(uq, UMTX_EXCLUSIVE_QUEUE); 3348 umtxq_unlock(&uq->uq_key); 3349 if (error) 3350 break; 3351 rv = fueword32(&rwlock->rw_state, &state); 3352 if (rv == -1) { 3353 error = EFAULT; 3354 break; 3355 } 3356 } 3357 3358 rv = fueword32(&rwlock->rw_blocked_writers, 3359 &blocked_writers); 3360 if (rv == -1) { 3361 umtxq_unbusy_unlocked(&uq->uq_key); 3362 error = EFAULT; 3363 break; 3364 } 3365 suword32(&rwlock->rw_blocked_writers, blocked_writers-1); 3366 if (blocked_writers == 1) { 3367 rv = fueword32(&rwlock->rw_state, &state); 3368 if (rv == -1) { 3369 umtxq_unbusy_unlocked(&uq->uq_key); 3370 error = EFAULT; 3371 break; 3372 } 3373 for (;;) { 3374 rv = casueword32(&rwlock->rw_state, state, 3375 &oldstate, state & ~URWLOCK_WRITE_WAITERS); 3376 if (rv == -1) { 3377 error = EFAULT; 3378 break; 3379 } 3380 if (rv == 0) { 3381 MPASS(oldstate == state); 3382 break; 3383 } 3384 state = oldstate; 3385 error1 = thread_check_susp(td, false); 3386 /* 3387 * We are leaving the URWLOCK_WRITE_WAITERS 3388 * behind, but this should not harm the 3389 * correctness. 3390 */ 3391 if (error1 != 0) { 3392 if (error == 0) 3393 error = error1; 3394 break; 3395 } 3396 } 3397 rv = fueword32(&rwlock->rw_blocked_readers, 3398 &blocked_readers); 3399 if (rv == -1) { 3400 umtxq_unbusy_unlocked(&uq->uq_key); 3401 error = EFAULT; 3402 break; 3403 } 3404 } else 3405 blocked_readers = 0; 3406 3407 umtxq_unbusy_unlocked(&uq->uq_key); 3408 } 3409 3410 umtx_key_release(&uq->uq_key); 3411 if (error == ERESTART) 3412 error = EINTR; 3413 return (error); 3414 } 3415 3416 static int 3417 do_rw_unlock(struct thread *td, struct urwlock *rwlock) 3418 { 3419 struct umtx_q *uq; 3420 uint32_t flags; 3421 int32_t state, oldstate; 3422 int error, rv, q, count; 3423 3424 uq = td->td_umtxq; 3425 error = fueword32(&rwlock->rw_flags, &flags); 3426 if (error == -1) 3427 return (EFAULT); 3428 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key); 3429 if (error != 0) 3430 return (error); 3431 3432 error = fueword32(&rwlock->rw_state, &state); 3433 if (error == -1) { 3434 error = EFAULT; 3435 goto out; 3436 } 3437 if (state & URWLOCK_WRITE_OWNER) { 3438 for (;;) { 3439 rv = casueword32(&rwlock->rw_state, state, 3440 &oldstate, state & ~URWLOCK_WRITE_OWNER); 3441 if (rv == -1) { 3442 error = EFAULT; 3443 goto out; 3444 } 3445 if (rv == 1) { 3446 state = oldstate; 3447 if (!(oldstate & URWLOCK_WRITE_OWNER)) { 3448 error = EPERM; 3449 goto out; 3450 } 3451 error = thread_check_susp(td, true); 3452 if (error != 0) 3453 goto out; 3454 } else 3455 break; 3456 } 3457 } else if (URWLOCK_READER_COUNT(state) != 0) { 3458 for (;;) { 3459 rv = casueword32(&rwlock->rw_state, state, 3460 &oldstate, state - 1); 3461 if (rv == -1) { 3462 error = EFAULT; 3463 goto out; 3464 } 3465 if (rv == 1) { 3466 state = oldstate; 3467 if (URWLOCK_READER_COUNT(oldstate) == 0) { 3468 error = EPERM; 3469 goto out; 3470 } 3471 error = thread_check_susp(td, true); 3472 if (error != 0) 3473 goto out; 3474 } else 3475 break; 3476 } 3477 } else { 3478 error = EPERM; 3479 goto out; 3480 } 3481 3482 count = 0; 3483 3484 if (!(flags & URWLOCK_PREFER_READER)) { 3485 if (state & URWLOCK_WRITE_WAITERS) { 3486 count = 1; 3487 q = UMTX_EXCLUSIVE_QUEUE; 3488 } else if (state & URWLOCK_READ_WAITERS) { 3489 count = INT_MAX; 3490 q = UMTX_SHARED_QUEUE; 3491 } 3492 } else { 3493 if (state & URWLOCK_READ_WAITERS) { 3494 count = INT_MAX; 3495 q = UMTX_SHARED_QUEUE; 3496 } else if (state & URWLOCK_WRITE_WAITERS) { 3497 count = 1; 3498 q = UMTX_EXCLUSIVE_QUEUE; 3499 } 3500 } 3501 3502 if (count) { 3503 umtxq_lock(&uq->uq_key); 3504 umtxq_busy(&uq->uq_key); 3505 umtxq_signal_queue(&uq->uq_key, count, q); 3506 umtxq_unbusy(&uq->uq_key); 3507 umtxq_unlock(&uq->uq_key); 3508 } 3509 out: 3510 umtx_key_release(&uq->uq_key); 3511 return (error); 3512 } 3513 3514 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10) 3515 static int 3516 do_sem_wait(struct thread *td, struct _usem *sem, struct _umtx_time *timeout) 3517 { 3518 struct umtx_abs_timeout timo; 3519 struct umtx_q *uq; 3520 uint32_t flags, count, count1; 3521 int error, rv, rv1; 3522 3523 uq = td->td_umtxq; 3524 error = fueword32(&sem->_flags, &flags); 3525 if (error == -1) 3526 return (EFAULT); 3527 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key); 3528 if (error != 0) 3529 return (error); 3530 3531 if (timeout != NULL) 3532 umtx_abs_timeout_init2(&timo, timeout); 3533 3534 again: 3535 umtxq_lock(&uq->uq_key); 3536 umtxq_busy(&uq->uq_key); 3537 umtxq_insert(uq); 3538 umtxq_unlock(&uq->uq_key); 3539 rv = casueword32(&sem->_has_waiters, 0, &count1, 1); 3540 if (rv == 0) 3541 rv1 = fueword32(&sem->_count, &count); 3542 if (rv == -1 || (rv == 0 && (rv1 == -1 || count != 0)) || 3543 (rv == 1 && count1 == 0)) { 3544 umtxq_lock(&uq->uq_key); 3545 umtxq_unbusy(&uq->uq_key); 3546 umtxq_remove(uq); 3547 umtxq_unlock(&uq->uq_key); 3548 if (rv == 1) { 3549 rv = thread_check_susp(td, true); 3550 if (rv == 0) 3551 goto again; 3552 error = rv; 3553 goto out; 3554 } 3555 if (rv == 0) 3556 rv = rv1; 3557 error = rv == -1 ? EFAULT : 0; 3558 goto out; 3559 } 3560 umtxq_lock(&uq->uq_key); 3561 umtxq_unbusy(&uq->uq_key); 3562 3563 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo); 3564 3565 if ((uq->uq_flags & UQF_UMTXQ) == 0) 3566 error = 0; 3567 else { 3568 umtxq_remove(uq); 3569 /* A relative timeout cannot be restarted. */ 3570 if (error == ERESTART && timeout != NULL && 3571 (timeout->_flags & UMTX_ABSTIME) == 0) 3572 error = EINTR; 3573 } 3574 umtxq_unlock(&uq->uq_key); 3575 out: 3576 umtx_key_release(&uq->uq_key); 3577 return (error); 3578 } 3579 3580 /* 3581 * Signal a userland semaphore. 3582 */ 3583 static int 3584 do_sem_wake(struct thread *td, struct _usem *sem) 3585 { 3586 struct umtx_key key; 3587 int error, cnt; 3588 uint32_t flags; 3589 3590 error = fueword32(&sem->_flags, &flags); 3591 if (error == -1) 3592 return (EFAULT); 3593 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0) 3594 return (error); 3595 umtxq_lock(&key); 3596 umtxq_busy(&key); 3597 cnt = umtxq_count(&key); 3598 if (cnt > 0) { 3599 /* 3600 * Check if count is greater than 0, this means the memory is 3601 * still being referenced by user code, so we can safely 3602 * update _has_waiters flag. 3603 */ 3604 if (cnt == 1) { 3605 umtxq_unlock(&key); 3606 error = suword32(&sem->_has_waiters, 0); 3607 umtxq_lock(&key); 3608 if (error == -1) 3609 error = EFAULT; 3610 } 3611 umtxq_signal(&key, 1); 3612 } 3613 umtxq_unbusy(&key); 3614 umtxq_unlock(&key); 3615 umtx_key_release(&key); 3616 return (error); 3617 } 3618 #endif 3619 3620 static int 3621 do_sem2_wait(struct thread *td, struct _usem2 *sem, struct _umtx_time *timeout) 3622 { 3623 struct umtx_abs_timeout timo; 3624 struct umtx_q *uq; 3625 uint32_t count, flags; 3626 int error, rv; 3627 3628 uq = td->td_umtxq; 3629 flags = fuword32(&sem->_flags); 3630 if (timeout != NULL) 3631 umtx_abs_timeout_init2(&timo, timeout); 3632 3633 again: 3634 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key); 3635 if (error != 0) 3636 return (error); 3637 umtxq_lock(&uq->uq_key); 3638 umtxq_busy(&uq->uq_key); 3639 umtxq_insert(uq); 3640 umtxq_unlock(&uq->uq_key); 3641 rv = fueword32(&sem->_count, &count); 3642 if (rv == -1) { 3643 umtxq_lock(&uq->uq_key); 3644 umtxq_unbusy(&uq->uq_key); 3645 umtxq_remove(uq); 3646 umtxq_unlock(&uq->uq_key); 3647 umtx_key_release(&uq->uq_key); 3648 return (EFAULT); 3649 } 3650 for (;;) { 3651 if (USEM_COUNT(count) != 0) { 3652 umtxq_lock(&uq->uq_key); 3653 umtxq_unbusy(&uq->uq_key); 3654 umtxq_remove(uq); 3655 umtxq_unlock(&uq->uq_key); 3656 umtx_key_release(&uq->uq_key); 3657 return (0); 3658 } 3659 if (count == USEM_HAS_WAITERS) 3660 break; 3661 rv = casueword32(&sem->_count, 0, &count, USEM_HAS_WAITERS); 3662 if (rv == 0) 3663 break; 3664 umtxq_lock(&uq->uq_key); 3665 umtxq_unbusy(&uq->uq_key); 3666 umtxq_remove(uq); 3667 umtxq_unlock(&uq->uq_key); 3668 umtx_key_release(&uq->uq_key); 3669 if (rv == -1) 3670 return (EFAULT); 3671 rv = thread_check_susp(td, true); 3672 if (rv != 0) 3673 return (rv); 3674 goto again; 3675 } 3676 umtxq_lock(&uq->uq_key); 3677 umtxq_unbusy(&uq->uq_key); 3678 3679 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo); 3680 3681 if ((uq->uq_flags & UQF_UMTXQ) == 0) 3682 error = 0; 3683 else { 3684 umtxq_remove(uq); 3685 if (timeout != NULL && (timeout->_flags & UMTX_ABSTIME) == 0) { 3686 /* A relative timeout cannot be restarted. */ 3687 if (error == ERESTART) 3688 error = EINTR; 3689 if (error == EINTR) { 3690 kern_clock_gettime(curthread, timo.clockid, 3691 &timo.cur); 3692 timespecsub(&timo.end, &timo.cur, 3693 &timeout->_timeout); 3694 } 3695 } 3696 } 3697 umtxq_unlock(&uq->uq_key); 3698 umtx_key_release(&uq->uq_key); 3699 return (error); 3700 } 3701 3702 /* 3703 * Signal a userland semaphore. 3704 */ 3705 static int 3706 do_sem2_wake(struct thread *td, struct _usem2 *sem) 3707 { 3708 struct umtx_key key; 3709 int error, cnt, rv; 3710 uint32_t count, flags; 3711 3712 rv = fueword32(&sem->_flags, &flags); 3713 if (rv == -1) 3714 return (EFAULT); 3715 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0) 3716 return (error); 3717 umtxq_lock(&key); 3718 umtxq_busy(&key); 3719 cnt = umtxq_count(&key); 3720 if (cnt > 0) { 3721 /* 3722 * If this was the last sleeping thread, clear the waiters 3723 * flag in _count. 3724 */ 3725 if (cnt == 1) { 3726 umtxq_unlock(&key); 3727 rv = fueword32(&sem->_count, &count); 3728 while (rv != -1 && count & USEM_HAS_WAITERS) { 3729 rv = casueword32(&sem->_count, count, &count, 3730 count & ~USEM_HAS_WAITERS); 3731 if (rv == 1) { 3732 rv = thread_check_susp(td, true); 3733 if (rv != 0) 3734 break; 3735 } 3736 } 3737 if (rv == -1) 3738 error = EFAULT; 3739 else if (rv > 0) { 3740 error = rv; 3741 } 3742 umtxq_lock(&key); 3743 } 3744 3745 umtxq_signal(&key, 1); 3746 } 3747 umtxq_unbusy(&key); 3748 umtxq_unlock(&key); 3749 umtx_key_release(&key); 3750 return (error); 3751 } 3752 3753 #ifdef COMPAT_FREEBSD10 3754 int 3755 freebsd10__umtx_lock(struct thread *td, struct freebsd10__umtx_lock_args *uap) 3756 { 3757 return (do_lock_umtx(td, uap->umtx, td->td_tid, 0)); 3758 } 3759 3760 int 3761 freebsd10__umtx_unlock(struct thread *td, 3762 struct freebsd10__umtx_unlock_args *uap) 3763 { 3764 return (do_unlock_umtx(td, uap->umtx, td->td_tid)); 3765 } 3766 #endif 3767 3768 inline int 3769 umtx_copyin_timeout(const void *uaddr, struct timespec *tsp) 3770 { 3771 int error; 3772 3773 error = copyin(uaddr, tsp, sizeof(*tsp)); 3774 if (error == 0) { 3775 if (tsp->tv_sec < 0 || 3776 tsp->tv_nsec >= 1000000000 || 3777 tsp->tv_nsec < 0) 3778 error = EINVAL; 3779 } 3780 return (error); 3781 } 3782 3783 static inline int 3784 umtx_copyin_umtx_time(const void *uaddr, size_t size, struct _umtx_time *tp) 3785 { 3786 int error; 3787 3788 if (size <= sizeof(tp->_timeout)) { 3789 tp->_clockid = CLOCK_REALTIME; 3790 tp->_flags = 0; 3791 error = copyin(uaddr, &tp->_timeout, sizeof(tp->_timeout)); 3792 } else 3793 error = copyin(uaddr, tp, sizeof(*tp)); 3794 if (error != 0) 3795 return (error); 3796 if (tp->_timeout.tv_sec < 0 || 3797 tp->_timeout.tv_nsec >= 1000000000 || tp->_timeout.tv_nsec < 0) 3798 return (EINVAL); 3799 return (0); 3800 } 3801 3802 static int 3803 umtx_copyin_robust_lists(const void *uaddr, size_t size, 3804 struct umtx_robust_lists_params *rb) 3805 { 3806 3807 if (size > sizeof(*rb)) 3808 return (EINVAL); 3809 return (copyin(uaddr, rb, size)); 3810 } 3811 3812 static int 3813 umtx_copyout_timeout(void *uaddr, size_t sz, struct timespec *tsp) 3814 { 3815 3816 /* 3817 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time) 3818 * and we're only called if sz >= sizeof(timespec) as supplied in the 3819 * copyops. 3820 */ 3821 KASSERT(sz >= sizeof(*tsp), 3822 ("umtx_copyops specifies incorrect sizes")); 3823 3824 return (copyout(tsp, uaddr, sizeof(*tsp))); 3825 } 3826 3827 #ifdef COMPAT_FREEBSD10 3828 static int 3829 __umtx_op_lock_umtx(struct thread *td, struct _umtx_op_args *uap, 3830 const struct umtx_copyops *ops) 3831 { 3832 struct timespec *ts, timeout; 3833 int error; 3834 3835 /* Allow a null timespec (wait forever). */ 3836 if (uap->uaddr2 == NULL) 3837 ts = NULL; 3838 else { 3839 error = ops->copyin_timeout(uap->uaddr2, &timeout); 3840 if (error != 0) 3841 return (error); 3842 ts = &timeout; 3843 } 3844 #ifdef COMPAT_FREEBSD32 3845 if (ops->compat32) 3846 return (do_lock_umtx32(td, uap->obj, uap->val, ts)); 3847 #endif 3848 return (do_lock_umtx(td, uap->obj, uap->val, ts)); 3849 } 3850 3851 static int 3852 __umtx_op_unlock_umtx(struct thread *td, struct _umtx_op_args *uap, 3853 const struct umtx_copyops *ops) 3854 { 3855 #ifdef COMPAT_FREEBSD32 3856 if (ops->compat32) 3857 return (do_unlock_umtx32(td, uap->obj, uap->val)); 3858 #endif 3859 return (do_unlock_umtx(td, uap->obj, uap->val)); 3860 } 3861 #endif /* COMPAT_FREEBSD10 */ 3862 3863 #if !defined(COMPAT_FREEBSD10) 3864 static int 3865 __umtx_op_unimpl(struct thread *td __unused, struct _umtx_op_args *uap __unused, 3866 const struct umtx_copyops *ops __unused) 3867 { 3868 return (EOPNOTSUPP); 3869 } 3870 #endif /* COMPAT_FREEBSD10 */ 3871 3872 static int 3873 __umtx_op_wait(struct thread *td, struct _umtx_op_args *uap, 3874 const struct umtx_copyops *ops) 3875 { 3876 struct _umtx_time timeout, *tm_p; 3877 int error; 3878 3879 if (uap->uaddr2 == NULL) 3880 tm_p = NULL; 3881 else { 3882 error = ops->copyin_umtx_time( 3883 uap->uaddr2, (size_t)uap->uaddr1, &timeout); 3884 if (error != 0) 3885 return (error); 3886 tm_p = &timeout; 3887 } 3888 return (do_wait(td, uap->obj, uap->val, tm_p, ops->compat32, 0)); 3889 } 3890 3891 static int 3892 __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap, 3893 const struct umtx_copyops *ops) 3894 { 3895 struct _umtx_time timeout, *tm_p; 3896 int error; 3897 3898 if (uap->uaddr2 == NULL) 3899 tm_p = NULL; 3900 else { 3901 error = ops->copyin_umtx_time( 3902 uap->uaddr2, (size_t)uap->uaddr1, &timeout); 3903 if (error != 0) 3904 return (error); 3905 tm_p = &timeout; 3906 } 3907 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 0)); 3908 } 3909 3910 static int 3911 __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap, 3912 const struct umtx_copyops *ops) 3913 { 3914 struct _umtx_time *tm_p, timeout; 3915 int error; 3916 3917 if (uap->uaddr2 == NULL) 3918 tm_p = NULL; 3919 else { 3920 error = ops->copyin_umtx_time( 3921 uap->uaddr2, (size_t)uap->uaddr1, &timeout); 3922 if (error != 0) 3923 return (error); 3924 tm_p = &timeout; 3925 } 3926 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 1)); 3927 } 3928 3929 static int 3930 __umtx_op_wake(struct thread *td, struct _umtx_op_args *uap, 3931 const struct umtx_copyops *ops __unused) 3932 { 3933 3934 return (kern_umtx_wake(td, uap->obj, uap->val, 0)); 3935 } 3936 3937 #define BATCH_SIZE 128 3938 static int 3939 __umtx_op_nwake_private_native(struct thread *td, struct _umtx_op_args *uap) 3940 { 3941 char *uaddrs[BATCH_SIZE], **upp; 3942 int count, error, i, pos, tocopy; 3943 3944 upp = (char **)uap->obj; 3945 error = 0; 3946 for (count = uap->val, pos = 0; count > 0; count -= tocopy, 3947 pos += tocopy) { 3948 tocopy = MIN(count, BATCH_SIZE); 3949 error = copyin(upp + pos, uaddrs, tocopy * sizeof(char *)); 3950 if (error != 0) 3951 break; 3952 for (i = 0; i < tocopy; ++i) { 3953 kern_umtx_wake(td, uaddrs[i], INT_MAX, 1); 3954 } 3955 maybe_yield(); 3956 } 3957 return (error); 3958 } 3959 3960 static int 3961 __umtx_op_nwake_private_compat32(struct thread *td, struct _umtx_op_args *uap) 3962 { 3963 uint32_t uaddrs[BATCH_SIZE], *upp; 3964 int count, error, i, pos, tocopy; 3965 3966 upp = (uint32_t *)uap->obj; 3967 error = 0; 3968 for (count = uap->val, pos = 0; count > 0; count -= tocopy, 3969 pos += tocopy) { 3970 tocopy = MIN(count, BATCH_SIZE); 3971 error = copyin(upp + pos, uaddrs, tocopy * sizeof(uint32_t)); 3972 if (error != 0) 3973 break; 3974 for (i = 0; i < tocopy; ++i) { 3975 kern_umtx_wake(td, (void *)(uintptr_t)uaddrs[i], 3976 INT_MAX, 1); 3977 } 3978 maybe_yield(); 3979 } 3980 return (error); 3981 } 3982 3983 static int 3984 __umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap, 3985 const struct umtx_copyops *ops) 3986 { 3987 3988 if (ops->compat32) 3989 return (__umtx_op_nwake_private_compat32(td, uap)); 3990 return (__umtx_op_nwake_private_native(td, uap)); 3991 } 3992 3993 static int 3994 __umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap, 3995 const struct umtx_copyops *ops __unused) 3996 { 3997 3998 return (kern_umtx_wake(td, uap->obj, uap->val, 1)); 3999 } 4000 4001 static int 4002 __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap, 4003 const struct umtx_copyops *ops) 4004 { 4005 struct _umtx_time *tm_p, timeout; 4006 int error; 4007 4008 /* Allow a null timespec (wait forever). */ 4009 if (uap->uaddr2 == NULL) 4010 tm_p = NULL; 4011 else { 4012 error = ops->copyin_umtx_time( 4013 uap->uaddr2, (size_t)uap->uaddr1, &timeout); 4014 if (error != 0) 4015 return (error); 4016 tm_p = &timeout; 4017 } 4018 return (do_lock_umutex(td, uap->obj, tm_p, 0)); 4019 } 4020 4021 static int 4022 __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap, 4023 const struct umtx_copyops *ops __unused) 4024 { 4025 4026 return (do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY)); 4027 } 4028 4029 static int 4030 __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap, 4031 const struct umtx_copyops *ops) 4032 { 4033 struct _umtx_time *tm_p, timeout; 4034 int error; 4035 4036 /* Allow a null timespec (wait forever). */ 4037 if (uap->uaddr2 == NULL) 4038 tm_p = NULL; 4039 else { 4040 error = ops->copyin_umtx_time( 4041 uap->uaddr2, (size_t)uap->uaddr1, &timeout); 4042 if (error != 0) 4043 return (error); 4044 tm_p = &timeout; 4045 } 4046 return (do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT)); 4047 } 4048 4049 static int 4050 __umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap, 4051 const struct umtx_copyops *ops __unused) 4052 { 4053 4054 return (do_wake_umutex(td, uap->obj)); 4055 } 4056 4057 static int 4058 __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap, 4059 const struct umtx_copyops *ops __unused) 4060 { 4061 4062 return (do_unlock_umutex(td, uap->obj, false)); 4063 } 4064 4065 static int 4066 __umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap, 4067 const struct umtx_copyops *ops __unused) 4068 { 4069 4070 return (do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1)); 4071 } 4072 4073 static int 4074 __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap, 4075 const struct umtx_copyops *ops) 4076 { 4077 struct timespec *ts, timeout; 4078 int error; 4079 4080 /* Allow a null timespec (wait forever). */ 4081 if (uap->uaddr2 == NULL) 4082 ts = NULL; 4083 else { 4084 error = ops->copyin_timeout(uap->uaddr2, &timeout); 4085 if (error != 0) 4086 return (error); 4087 ts = &timeout; 4088 } 4089 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val)); 4090 } 4091 4092 static int 4093 __umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap, 4094 const struct umtx_copyops *ops __unused) 4095 { 4096 4097 return (do_cv_signal(td, uap->obj)); 4098 } 4099 4100 static int 4101 __umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap, 4102 const struct umtx_copyops *ops __unused) 4103 { 4104 4105 return (do_cv_broadcast(td, uap->obj)); 4106 } 4107 4108 static int 4109 __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap, 4110 const struct umtx_copyops *ops) 4111 { 4112 struct _umtx_time timeout; 4113 int error; 4114 4115 /* Allow a null timespec (wait forever). */ 4116 if (uap->uaddr2 == NULL) { 4117 error = do_rw_rdlock(td, uap->obj, uap->val, 0); 4118 } else { 4119 error = ops->copyin_umtx_time(uap->uaddr2, 4120 (size_t)uap->uaddr1, &timeout); 4121 if (error != 0) 4122 return (error); 4123 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout); 4124 } 4125 return (error); 4126 } 4127 4128 static int 4129 __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap, 4130 const struct umtx_copyops *ops) 4131 { 4132 struct _umtx_time timeout; 4133 int error; 4134 4135 /* Allow a null timespec (wait forever). */ 4136 if (uap->uaddr2 == NULL) { 4137 error = do_rw_wrlock(td, uap->obj, 0); 4138 } else { 4139 error = ops->copyin_umtx_time(uap->uaddr2, 4140 (size_t)uap->uaddr1, &timeout); 4141 if (error != 0) 4142 return (error); 4143 4144 error = do_rw_wrlock(td, uap->obj, &timeout); 4145 } 4146 return (error); 4147 } 4148 4149 static int 4150 __umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap, 4151 const struct umtx_copyops *ops __unused) 4152 { 4153 4154 return (do_rw_unlock(td, uap->obj)); 4155 } 4156 4157 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10) 4158 static int 4159 __umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap, 4160 const struct umtx_copyops *ops) 4161 { 4162 struct _umtx_time *tm_p, timeout; 4163 int error; 4164 4165 /* Allow a null timespec (wait forever). */ 4166 if (uap->uaddr2 == NULL) 4167 tm_p = NULL; 4168 else { 4169 error = ops->copyin_umtx_time( 4170 uap->uaddr2, (size_t)uap->uaddr1, &timeout); 4171 if (error != 0) 4172 return (error); 4173 tm_p = &timeout; 4174 } 4175 return (do_sem_wait(td, uap->obj, tm_p)); 4176 } 4177 4178 static int 4179 __umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap, 4180 const struct umtx_copyops *ops __unused) 4181 { 4182 4183 return (do_sem_wake(td, uap->obj)); 4184 } 4185 #endif 4186 4187 static int 4188 __umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap, 4189 const struct umtx_copyops *ops __unused) 4190 { 4191 4192 return (do_wake2_umutex(td, uap->obj, uap->val)); 4193 } 4194 4195 static int 4196 __umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap, 4197 const struct umtx_copyops *ops) 4198 { 4199 struct _umtx_time *tm_p, timeout; 4200 size_t uasize; 4201 int error; 4202 4203 /* Allow a null timespec (wait forever). */ 4204 if (uap->uaddr2 == NULL) { 4205 uasize = 0; 4206 tm_p = NULL; 4207 } else { 4208 uasize = (size_t)uap->uaddr1; 4209 error = ops->copyin_umtx_time(uap->uaddr2, uasize, &timeout); 4210 if (error != 0) 4211 return (error); 4212 tm_p = &timeout; 4213 } 4214 error = do_sem2_wait(td, uap->obj, tm_p); 4215 if (error == EINTR && uap->uaddr2 != NULL && 4216 (timeout._flags & UMTX_ABSTIME) == 0 && 4217 uasize >= ops->umtx_time_sz + ops->timespec_sz) { 4218 error = ops->copyout_timeout( 4219 (void *)((uintptr_t)uap->uaddr2 + ops->umtx_time_sz), 4220 uasize - ops->umtx_time_sz, &timeout._timeout); 4221 if (error == 0) { 4222 error = EINTR; 4223 } 4224 } 4225 4226 return (error); 4227 } 4228 4229 static int 4230 __umtx_op_sem2_wake(struct thread *td, struct _umtx_op_args *uap, 4231 const struct umtx_copyops *ops __unused) 4232 { 4233 4234 return (do_sem2_wake(td, uap->obj)); 4235 } 4236 4237 #define USHM_OBJ_UMTX(o) \ 4238 ((struct umtx_shm_obj_list *)(&(o)->umtx_data)) 4239 4240 #define USHMF_REG_LINKED 0x0001 4241 #define USHMF_OBJ_LINKED 0x0002 4242 struct umtx_shm_reg { 4243 TAILQ_ENTRY(umtx_shm_reg) ushm_reg_link; 4244 LIST_ENTRY(umtx_shm_reg) ushm_obj_link; 4245 struct umtx_key ushm_key; 4246 struct ucred *ushm_cred; 4247 struct shmfd *ushm_obj; 4248 u_int ushm_refcnt; 4249 u_int ushm_flags; 4250 }; 4251 4252 LIST_HEAD(umtx_shm_obj_list, umtx_shm_reg); 4253 TAILQ_HEAD(umtx_shm_reg_head, umtx_shm_reg); 4254 4255 static uma_zone_t umtx_shm_reg_zone; 4256 static struct umtx_shm_reg_head umtx_shm_registry[UMTX_CHAINS]; 4257 static struct mtx umtx_shm_lock; 4258 static struct umtx_shm_reg_head umtx_shm_reg_delfree = 4259 TAILQ_HEAD_INITIALIZER(umtx_shm_reg_delfree); 4260 4261 static void umtx_shm_free_reg(struct umtx_shm_reg *reg); 4262 4263 static void 4264 umtx_shm_reg_delfree_tq(void *context __unused, int pending __unused) 4265 { 4266 struct umtx_shm_reg_head d; 4267 struct umtx_shm_reg *reg, *reg1; 4268 4269 TAILQ_INIT(&d); 4270 mtx_lock(&umtx_shm_lock); 4271 TAILQ_CONCAT(&d, &umtx_shm_reg_delfree, ushm_reg_link); 4272 mtx_unlock(&umtx_shm_lock); 4273 TAILQ_FOREACH_SAFE(reg, &d, ushm_reg_link, reg1) { 4274 TAILQ_REMOVE(&d, reg, ushm_reg_link); 4275 umtx_shm_free_reg(reg); 4276 } 4277 } 4278 4279 static struct task umtx_shm_reg_delfree_task = 4280 TASK_INITIALIZER(0, umtx_shm_reg_delfree_tq, NULL); 4281 4282 static struct umtx_shm_reg * 4283 umtx_shm_find_reg_locked(const struct umtx_key *key) 4284 { 4285 struct umtx_shm_reg *reg; 4286 struct umtx_shm_reg_head *reg_head; 4287 4288 KASSERT(key->shared, ("umtx_p_find_rg: private key")); 4289 mtx_assert(&umtx_shm_lock, MA_OWNED); 4290 reg_head = &umtx_shm_registry[key->hash]; 4291 TAILQ_FOREACH(reg, reg_head, ushm_reg_link) { 4292 KASSERT(reg->ushm_key.shared, 4293 ("non-shared key on reg %p %d", reg, reg->ushm_key.shared)); 4294 if (reg->ushm_key.info.shared.object == 4295 key->info.shared.object && 4296 reg->ushm_key.info.shared.offset == 4297 key->info.shared.offset) { 4298 KASSERT(reg->ushm_key.type == TYPE_SHM, ("TYPE_USHM")); 4299 KASSERT(reg->ushm_refcnt > 0, 4300 ("reg %p refcnt 0 onlist", reg)); 4301 KASSERT((reg->ushm_flags & USHMF_REG_LINKED) != 0, 4302 ("reg %p not linked", reg)); 4303 reg->ushm_refcnt++; 4304 return (reg); 4305 } 4306 } 4307 return (NULL); 4308 } 4309 4310 static struct umtx_shm_reg * 4311 umtx_shm_find_reg(const struct umtx_key *key) 4312 { 4313 struct umtx_shm_reg *reg; 4314 4315 mtx_lock(&umtx_shm_lock); 4316 reg = umtx_shm_find_reg_locked(key); 4317 mtx_unlock(&umtx_shm_lock); 4318 return (reg); 4319 } 4320 4321 static void 4322 umtx_shm_free_reg(struct umtx_shm_reg *reg) 4323 { 4324 4325 chgumtxcnt(reg->ushm_cred->cr_ruidinfo, -1, 0); 4326 crfree(reg->ushm_cred); 4327 shm_drop(reg->ushm_obj); 4328 uma_zfree(umtx_shm_reg_zone, reg); 4329 } 4330 4331 static bool 4332 umtx_shm_unref_reg_locked(struct umtx_shm_reg *reg, bool force) 4333 { 4334 bool res; 4335 4336 mtx_assert(&umtx_shm_lock, MA_OWNED); 4337 KASSERT(reg->ushm_refcnt > 0, ("ushm_reg %p refcnt 0", reg)); 4338 reg->ushm_refcnt--; 4339 res = reg->ushm_refcnt == 0; 4340 if (res || force) { 4341 if ((reg->ushm_flags & USHMF_REG_LINKED) != 0) { 4342 TAILQ_REMOVE(&umtx_shm_registry[reg->ushm_key.hash], 4343 reg, ushm_reg_link); 4344 reg->ushm_flags &= ~USHMF_REG_LINKED; 4345 } 4346 if ((reg->ushm_flags & USHMF_OBJ_LINKED) != 0) { 4347 LIST_REMOVE(reg, ushm_obj_link); 4348 reg->ushm_flags &= ~USHMF_OBJ_LINKED; 4349 } 4350 } 4351 return (res); 4352 } 4353 4354 static void 4355 umtx_shm_unref_reg(struct umtx_shm_reg *reg, bool force) 4356 { 4357 vm_object_t object; 4358 bool dofree; 4359 4360 if (force) { 4361 object = reg->ushm_obj->shm_object; 4362 VM_OBJECT_WLOCK(object); 4363 object->flags |= OBJ_UMTXDEAD; 4364 VM_OBJECT_WUNLOCK(object); 4365 } 4366 mtx_lock(&umtx_shm_lock); 4367 dofree = umtx_shm_unref_reg_locked(reg, force); 4368 mtx_unlock(&umtx_shm_lock); 4369 if (dofree) 4370 umtx_shm_free_reg(reg); 4371 } 4372 4373 void 4374 umtx_shm_object_init(vm_object_t object) 4375 { 4376 4377 LIST_INIT(USHM_OBJ_UMTX(object)); 4378 } 4379 4380 void 4381 umtx_shm_object_terminated(vm_object_t object) 4382 { 4383 struct umtx_shm_reg *reg, *reg1; 4384 bool dofree; 4385 4386 if (LIST_EMPTY(USHM_OBJ_UMTX(object))) 4387 return; 4388 4389 dofree = false; 4390 mtx_lock(&umtx_shm_lock); 4391 LIST_FOREACH_SAFE(reg, USHM_OBJ_UMTX(object), ushm_obj_link, reg1) { 4392 if (umtx_shm_unref_reg_locked(reg, true)) { 4393 TAILQ_INSERT_TAIL(&umtx_shm_reg_delfree, reg, 4394 ushm_reg_link); 4395 dofree = true; 4396 } 4397 } 4398 mtx_unlock(&umtx_shm_lock); 4399 if (dofree) 4400 taskqueue_enqueue(taskqueue_thread, &umtx_shm_reg_delfree_task); 4401 } 4402 4403 static int 4404 umtx_shm_create_reg(struct thread *td, const struct umtx_key *key, 4405 struct umtx_shm_reg **res) 4406 { 4407 struct umtx_shm_reg *reg, *reg1; 4408 struct ucred *cred; 4409 int error; 4410 4411 reg = umtx_shm_find_reg(key); 4412 if (reg != NULL) { 4413 *res = reg; 4414 return (0); 4415 } 4416 cred = td->td_ucred; 4417 if (!chgumtxcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_UMTXP))) 4418 return (ENOMEM); 4419 reg = uma_zalloc(umtx_shm_reg_zone, M_WAITOK | M_ZERO); 4420 reg->ushm_refcnt = 1; 4421 bcopy(key, ®->ushm_key, sizeof(*key)); 4422 reg->ushm_obj = shm_alloc(td->td_ucred, O_RDWR, false); 4423 reg->ushm_cred = crhold(cred); 4424 error = shm_dotruncate(reg->ushm_obj, PAGE_SIZE); 4425 if (error != 0) { 4426 umtx_shm_free_reg(reg); 4427 return (error); 4428 } 4429 mtx_lock(&umtx_shm_lock); 4430 reg1 = umtx_shm_find_reg_locked(key); 4431 if (reg1 != NULL) { 4432 mtx_unlock(&umtx_shm_lock); 4433 umtx_shm_free_reg(reg); 4434 *res = reg1; 4435 return (0); 4436 } 4437 reg->ushm_refcnt++; 4438 TAILQ_INSERT_TAIL(&umtx_shm_registry[key->hash], reg, ushm_reg_link); 4439 LIST_INSERT_HEAD(USHM_OBJ_UMTX(key->info.shared.object), reg, 4440 ushm_obj_link); 4441 reg->ushm_flags = USHMF_REG_LINKED | USHMF_OBJ_LINKED; 4442 mtx_unlock(&umtx_shm_lock); 4443 *res = reg; 4444 return (0); 4445 } 4446 4447 static int 4448 umtx_shm_alive(struct thread *td, void *addr) 4449 { 4450 vm_map_t map; 4451 vm_map_entry_t entry; 4452 vm_object_t object; 4453 vm_pindex_t pindex; 4454 vm_prot_t prot; 4455 int res, ret; 4456 boolean_t wired; 4457 4458 map = &td->td_proc->p_vmspace->vm_map; 4459 res = vm_map_lookup(&map, (uintptr_t)addr, VM_PROT_READ, &entry, 4460 &object, &pindex, &prot, &wired); 4461 if (res != KERN_SUCCESS) 4462 return (EFAULT); 4463 if (object == NULL) 4464 ret = EINVAL; 4465 else 4466 ret = (object->flags & OBJ_UMTXDEAD) != 0 ? ENOTTY : 0; 4467 vm_map_lookup_done(map, entry); 4468 return (ret); 4469 } 4470 4471 static void 4472 umtx_shm_init(void) 4473 { 4474 int i; 4475 4476 umtx_shm_reg_zone = uma_zcreate("umtx_shm", sizeof(struct umtx_shm_reg), 4477 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 4478 mtx_init(&umtx_shm_lock, "umtxshm", NULL, MTX_DEF); 4479 for (i = 0; i < nitems(umtx_shm_registry); i++) 4480 TAILQ_INIT(&umtx_shm_registry[i]); 4481 } 4482 4483 static int 4484 umtx_shm(struct thread *td, void *addr, u_int flags) 4485 { 4486 struct umtx_key key; 4487 struct umtx_shm_reg *reg; 4488 struct file *fp; 4489 int error, fd; 4490 4491 if (__bitcount(flags & (UMTX_SHM_CREAT | UMTX_SHM_LOOKUP | 4492 UMTX_SHM_DESTROY| UMTX_SHM_ALIVE)) != 1) 4493 return (EINVAL); 4494 if ((flags & UMTX_SHM_ALIVE) != 0) 4495 return (umtx_shm_alive(td, addr)); 4496 error = umtx_key_get(addr, TYPE_SHM, PROCESS_SHARE, &key); 4497 if (error != 0) 4498 return (error); 4499 KASSERT(key.shared == 1, ("non-shared key")); 4500 if ((flags & UMTX_SHM_CREAT) != 0) { 4501 error = umtx_shm_create_reg(td, &key, ®); 4502 } else { 4503 reg = umtx_shm_find_reg(&key); 4504 if (reg == NULL) 4505 error = ESRCH; 4506 } 4507 umtx_key_release(&key); 4508 if (error != 0) 4509 return (error); 4510 KASSERT(reg != NULL, ("no reg")); 4511 if ((flags & UMTX_SHM_DESTROY) != 0) { 4512 umtx_shm_unref_reg(reg, true); 4513 } else { 4514 #if 0 4515 #ifdef MAC 4516 error = mac_posixshm_check_open(td->td_ucred, 4517 reg->ushm_obj, FFLAGS(O_RDWR)); 4518 if (error == 0) 4519 #endif 4520 error = shm_access(reg->ushm_obj, td->td_ucred, 4521 FFLAGS(O_RDWR)); 4522 if (error == 0) 4523 #endif 4524 error = falloc_caps(td, &fp, &fd, O_CLOEXEC, NULL); 4525 if (error == 0) { 4526 shm_hold(reg->ushm_obj); 4527 finit(fp, FFLAGS(O_RDWR), DTYPE_SHM, reg->ushm_obj, 4528 &shm_ops); 4529 td->td_retval[0] = fd; 4530 fdrop(fp, td); 4531 } 4532 } 4533 umtx_shm_unref_reg(reg, false); 4534 return (error); 4535 } 4536 4537 static int 4538 __umtx_op_shm(struct thread *td, struct _umtx_op_args *uap, 4539 const struct umtx_copyops *ops __unused) 4540 { 4541 4542 return (umtx_shm(td, uap->uaddr1, uap->val)); 4543 } 4544 4545 static int 4546 __umtx_op_robust_lists(struct thread *td, struct _umtx_op_args *uap, 4547 const struct umtx_copyops *ops) 4548 { 4549 struct umtx_robust_lists_params rb; 4550 int error; 4551 4552 if (ops->compat32) { 4553 if ((td->td_pflags2 & TDP2_COMPAT32RB) == 0 && 4554 (td->td_rb_list != 0 || td->td_rbp_list != 0 || 4555 td->td_rb_inact != 0)) 4556 return (EBUSY); 4557 } else if ((td->td_pflags2 & TDP2_COMPAT32RB) != 0) { 4558 return (EBUSY); 4559 } 4560 4561 bzero(&rb, sizeof(rb)); 4562 error = ops->copyin_robust_lists(uap->uaddr1, uap->val, &rb); 4563 if (error != 0) 4564 return (error); 4565 4566 if (ops->compat32) 4567 td->td_pflags2 |= TDP2_COMPAT32RB; 4568 4569 td->td_rb_list = rb.robust_list_offset; 4570 td->td_rbp_list = rb.robust_priv_list_offset; 4571 td->td_rb_inact = rb.robust_inact_offset; 4572 return (0); 4573 } 4574 4575 #if defined(__i386__) || defined(__amd64__) 4576 /* 4577 * Provide the standard 32-bit definitions for x86, since native/compat32 use a 4578 * 32-bit time_t there. Other architectures just need the i386 definitions 4579 * along with their standard compat32. 4580 */ 4581 struct timespecx32 { 4582 int64_t tv_sec; 4583 int32_t tv_nsec; 4584 }; 4585 4586 struct umtx_timex32 { 4587 struct timespecx32 _timeout; 4588 uint32_t _flags; 4589 uint32_t _clockid; 4590 }; 4591 4592 #ifndef __i386__ 4593 #define timespeci386 timespec32 4594 #define umtx_timei386 umtx_time32 4595 #endif 4596 #else /* !__i386__ && !__amd64__ */ 4597 /* 32-bit architectures can emulate i386, so define these almost everywhere. */ 4598 struct timespeci386 { 4599 int32_t tv_sec; 4600 int32_t tv_nsec; 4601 }; 4602 4603 struct umtx_timei386 { 4604 struct timespeci386 _timeout; 4605 uint32_t _flags; 4606 uint32_t _clockid; 4607 }; 4608 4609 #if defined(__LP64__) 4610 #define timespecx32 timespec32 4611 #define umtx_timex32 umtx_time32 4612 #endif 4613 #endif 4614 4615 static int 4616 umtx_copyin_robust_lists32(const void *uaddr, size_t size, 4617 struct umtx_robust_lists_params *rbp) 4618 { 4619 struct umtx_robust_lists_params_compat32 rb32; 4620 int error; 4621 4622 if (size > sizeof(rb32)) 4623 return (EINVAL); 4624 bzero(&rb32, sizeof(rb32)); 4625 error = copyin(uaddr, &rb32, size); 4626 if (error != 0) 4627 return (error); 4628 CP(rb32, *rbp, robust_list_offset); 4629 CP(rb32, *rbp, robust_priv_list_offset); 4630 CP(rb32, *rbp, robust_inact_offset); 4631 return (0); 4632 } 4633 4634 #ifndef __i386__ 4635 static inline int 4636 umtx_copyin_timeouti386(const void *uaddr, struct timespec *tsp) 4637 { 4638 struct timespeci386 ts32; 4639 int error; 4640 4641 error = copyin(uaddr, &ts32, sizeof(ts32)); 4642 if (error == 0) { 4643 if (ts32.tv_sec < 0 || 4644 ts32.tv_nsec >= 1000000000 || 4645 ts32.tv_nsec < 0) 4646 error = EINVAL; 4647 else { 4648 CP(ts32, *tsp, tv_sec); 4649 CP(ts32, *tsp, tv_nsec); 4650 } 4651 } 4652 return (error); 4653 } 4654 4655 static inline int 4656 umtx_copyin_umtx_timei386(const void *uaddr, size_t size, struct _umtx_time *tp) 4657 { 4658 struct umtx_timei386 t32; 4659 int error; 4660 4661 t32._clockid = CLOCK_REALTIME; 4662 t32._flags = 0; 4663 if (size <= sizeof(t32._timeout)) 4664 error = copyin(uaddr, &t32._timeout, sizeof(t32._timeout)); 4665 else 4666 error = copyin(uaddr, &t32, sizeof(t32)); 4667 if (error != 0) 4668 return (error); 4669 if (t32._timeout.tv_sec < 0 || 4670 t32._timeout.tv_nsec >= 1000000000 || t32._timeout.tv_nsec < 0) 4671 return (EINVAL); 4672 TS_CP(t32, *tp, _timeout); 4673 CP(t32, *tp, _flags); 4674 CP(t32, *tp, _clockid); 4675 return (0); 4676 } 4677 4678 static int 4679 umtx_copyout_timeouti386(void *uaddr, size_t sz, struct timespec *tsp) 4680 { 4681 struct timespeci386 remain32 = { 4682 .tv_sec = tsp->tv_sec, 4683 .tv_nsec = tsp->tv_nsec, 4684 }; 4685 4686 /* 4687 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time) 4688 * and we're only called if sz >= sizeof(timespec) as supplied in the 4689 * copyops. 4690 */ 4691 KASSERT(sz >= sizeof(remain32), 4692 ("umtx_copyops specifies incorrect sizes")); 4693 4694 return (copyout(&remain32, uaddr, sizeof(remain32))); 4695 } 4696 #endif /* !__i386__ */ 4697 4698 #if defined(__i386__) || defined(__LP64__) 4699 static inline int 4700 umtx_copyin_timeoutx32(const void *uaddr, struct timespec *tsp) 4701 { 4702 struct timespecx32 ts32; 4703 int error; 4704 4705 error = copyin(uaddr, &ts32, sizeof(ts32)); 4706 if (error == 0) { 4707 if (ts32.tv_sec < 0 || 4708 ts32.tv_nsec >= 1000000000 || 4709 ts32.tv_nsec < 0) 4710 error = EINVAL; 4711 else { 4712 CP(ts32, *tsp, tv_sec); 4713 CP(ts32, *tsp, tv_nsec); 4714 } 4715 } 4716 return (error); 4717 } 4718 4719 static inline int 4720 umtx_copyin_umtx_timex32(const void *uaddr, size_t size, struct _umtx_time *tp) 4721 { 4722 struct umtx_timex32 t32; 4723 int error; 4724 4725 t32._clockid = CLOCK_REALTIME; 4726 t32._flags = 0; 4727 if (size <= sizeof(t32._timeout)) 4728 error = copyin(uaddr, &t32._timeout, sizeof(t32._timeout)); 4729 else 4730 error = copyin(uaddr, &t32, sizeof(t32)); 4731 if (error != 0) 4732 return (error); 4733 if (t32._timeout.tv_sec < 0 || 4734 t32._timeout.tv_nsec >= 1000000000 || t32._timeout.tv_nsec < 0) 4735 return (EINVAL); 4736 TS_CP(t32, *tp, _timeout); 4737 CP(t32, *tp, _flags); 4738 CP(t32, *tp, _clockid); 4739 return (0); 4740 } 4741 4742 static int 4743 umtx_copyout_timeoutx32(void *uaddr, size_t sz, struct timespec *tsp) 4744 { 4745 struct timespecx32 remain32 = { 4746 .tv_sec = tsp->tv_sec, 4747 .tv_nsec = tsp->tv_nsec, 4748 }; 4749 4750 /* 4751 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time) 4752 * and we're only called if sz >= sizeof(timespec) as supplied in the 4753 * copyops. 4754 */ 4755 KASSERT(sz >= sizeof(remain32), 4756 ("umtx_copyops specifies incorrect sizes")); 4757 4758 return (copyout(&remain32, uaddr, sizeof(remain32))); 4759 } 4760 #endif /* __i386__ || __LP64__ */ 4761 4762 typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap, 4763 const struct umtx_copyops *umtx_ops); 4764 4765 static const _umtx_op_func op_table[] = { 4766 #ifdef COMPAT_FREEBSD10 4767 [UMTX_OP_LOCK] = __umtx_op_lock_umtx, 4768 [UMTX_OP_UNLOCK] = __umtx_op_unlock_umtx, 4769 #else 4770 [UMTX_OP_LOCK] = __umtx_op_unimpl, 4771 [UMTX_OP_UNLOCK] = __umtx_op_unimpl, 4772 #endif 4773 [UMTX_OP_WAIT] = __umtx_op_wait, 4774 [UMTX_OP_WAKE] = __umtx_op_wake, 4775 [UMTX_OP_MUTEX_TRYLOCK] = __umtx_op_trylock_umutex, 4776 [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex, 4777 [UMTX_OP_MUTEX_UNLOCK] = __umtx_op_unlock_umutex, 4778 [UMTX_OP_SET_CEILING] = __umtx_op_set_ceiling, 4779 [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait, 4780 [UMTX_OP_CV_SIGNAL] = __umtx_op_cv_signal, 4781 [UMTX_OP_CV_BROADCAST] = __umtx_op_cv_broadcast, 4782 [UMTX_OP_WAIT_UINT] = __umtx_op_wait_uint, 4783 [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock, 4784 [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock, 4785 [UMTX_OP_RW_UNLOCK] = __umtx_op_rw_unlock, 4786 [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private, 4787 [UMTX_OP_WAKE_PRIVATE] = __umtx_op_wake_private, 4788 [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex, 4789 [UMTX_OP_MUTEX_WAKE] = __umtx_op_wake_umutex, 4790 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10) 4791 [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait, 4792 [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake, 4793 #else 4794 [UMTX_OP_SEM_WAIT] = __umtx_op_unimpl, 4795 [UMTX_OP_SEM_WAKE] = __umtx_op_unimpl, 4796 #endif 4797 [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private, 4798 [UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex, 4799 [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait, 4800 [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake, 4801 [UMTX_OP_SHM] = __umtx_op_shm, 4802 [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists, 4803 }; 4804 4805 static const struct umtx_copyops umtx_native_ops = { 4806 .copyin_timeout = umtx_copyin_timeout, 4807 .copyin_umtx_time = umtx_copyin_umtx_time, 4808 .copyin_robust_lists = umtx_copyin_robust_lists, 4809 .copyout_timeout = umtx_copyout_timeout, 4810 .timespec_sz = sizeof(struct timespec), 4811 .umtx_time_sz = sizeof(struct _umtx_time), 4812 }; 4813 4814 #ifndef __i386__ 4815 static const struct umtx_copyops umtx_native_opsi386 = { 4816 .copyin_timeout = umtx_copyin_timeouti386, 4817 .copyin_umtx_time = umtx_copyin_umtx_timei386, 4818 .copyin_robust_lists = umtx_copyin_robust_lists32, 4819 .copyout_timeout = umtx_copyout_timeouti386, 4820 .timespec_sz = sizeof(struct timespeci386), 4821 .umtx_time_sz = sizeof(struct umtx_timei386), 4822 .compat32 = true, 4823 }; 4824 #endif 4825 4826 #if defined(__i386__) || defined(__LP64__) 4827 /* i386 can emulate other 32-bit archs, too! */ 4828 static const struct umtx_copyops umtx_native_opsx32 = { 4829 .copyin_timeout = umtx_copyin_timeoutx32, 4830 .copyin_umtx_time = umtx_copyin_umtx_timex32, 4831 .copyin_robust_lists = umtx_copyin_robust_lists32, 4832 .copyout_timeout = umtx_copyout_timeoutx32, 4833 .timespec_sz = sizeof(struct timespecx32), 4834 .umtx_time_sz = sizeof(struct umtx_timex32), 4835 .compat32 = true, 4836 }; 4837 4838 #ifdef COMPAT_FREEBSD32 4839 #ifdef __amd64__ 4840 #define umtx_native_ops32 umtx_native_opsi386 4841 #else 4842 #define umtx_native_ops32 umtx_native_opsx32 4843 #endif 4844 #endif /* COMPAT_FREEBSD32 */ 4845 #endif /* __i386__ || __LP64__ */ 4846 4847 #define UMTX_OP__FLAGS (UMTX_OP__32BIT | UMTX_OP__I386) 4848 4849 static int 4850 kern__umtx_op(struct thread *td, void *obj, int op, unsigned long val, 4851 void *uaddr1, void *uaddr2, const struct umtx_copyops *ops) 4852 { 4853 struct _umtx_op_args uap = { 4854 .obj = obj, 4855 .op = op & ~UMTX_OP__FLAGS, 4856 .val = val, 4857 .uaddr1 = uaddr1, 4858 .uaddr2 = uaddr2 4859 }; 4860 4861 if ((uap.op >= nitems(op_table))) 4862 return (EINVAL); 4863 return ((*op_table[uap.op])(td, &uap, ops)); 4864 } 4865 4866 int 4867 sys__umtx_op(struct thread *td, struct _umtx_op_args *uap) 4868 { 4869 static const struct umtx_copyops *umtx_ops; 4870 4871 umtx_ops = &umtx_native_ops; 4872 #ifdef __LP64__ 4873 if ((uap->op & (UMTX_OP__32BIT | UMTX_OP__I386)) != 0) { 4874 if ((uap->op & UMTX_OP__I386) != 0) 4875 umtx_ops = &umtx_native_opsi386; 4876 else 4877 umtx_ops = &umtx_native_opsx32; 4878 } 4879 #elif !defined(__i386__) 4880 /* We consider UMTX_OP__32BIT a nop on !i386 ILP32. */ 4881 if ((uap->op & UMTX_OP__I386) != 0) 4882 umtx_ops = &umtx_native_opsi386; 4883 #else 4884 /* Likewise, UMTX_OP__I386 is a nop on i386. */ 4885 if ((uap->op & UMTX_OP__32BIT) != 0) 4886 umtx_ops = &umtx_native_opsx32; 4887 #endif 4888 return (kern__umtx_op(td, uap->obj, uap->op, uap->val, uap->uaddr1, 4889 uap->uaddr2, umtx_ops)); 4890 } 4891 4892 #ifdef COMPAT_FREEBSD32 4893 #ifdef COMPAT_FREEBSD10 4894 int 4895 freebsd10_freebsd32__umtx_lock(struct thread *td, 4896 struct freebsd10_freebsd32__umtx_lock_args *uap) 4897 { 4898 return (do_lock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid, NULL)); 4899 } 4900 4901 int 4902 freebsd10_freebsd32__umtx_unlock(struct thread *td, 4903 struct freebsd10_freebsd32__umtx_unlock_args *uap) 4904 { 4905 return (do_unlock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid)); 4906 } 4907 #endif /* COMPAT_FREEBSD10 */ 4908 4909 int 4910 freebsd32__umtx_op(struct thread *td, struct freebsd32__umtx_op_args *uap) 4911 { 4912 4913 return (kern__umtx_op(td, uap->obj, uap->op, uap->val, uap->uaddr1, 4914 uap->uaddr2, &umtx_native_ops32)); 4915 } 4916 #endif /* COMPAT_FREEBSD32 */ 4917 4918 void 4919 umtx_thread_init(struct thread *td) 4920 { 4921 4922 td->td_umtxq = umtxq_alloc(); 4923 td->td_umtxq->uq_thread = td; 4924 } 4925 4926 void 4927 umtx_thread_fini(struct thread *td) 4928 { 4929 4930 umtxq_free(td->td_umtxq); 4931 } 4932 4933 /* 4934 * It will be called when new thread is created, e.g fork(). 4935 */ 4936 void 4937 umtx_thread_alloc(struct thread *td) 4938 { 4939 struct umtx_q *uq; 4940 4941 uq = td->td_umtxq; 4942 uq->uq_inherited_pri = PRI_MAX; 4943 4944 KASSERT(uq->uq_flags == 0, ("uq_flags != 0")); 4945 KASSERT(uq->uq_thread == td, ("uq_thread != td")); 4946 KASSERT(uq->uq_pi_blocked == NULL, ("uq_pi_blocked != NULL")); 4947 KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), ("uq_pi_contested is not empty")); 4948 } 4949 4950 /* 4951 * exec() hook. 4952 * 4953 * Clear robust lists for all process' threads, not delaying the 4954 * cleanup to thread exit, since the relevant address space is 4955 * destroyed right now. 4956 */ 4957 void 4958 umtx_exec(struct proc *p) 4959 { 4960 struct thread *td; 4961 4962 KASSERT(p == curproc, ("need curproc")); 4963 KASSERT((p->p_flag & P_HADTHREADS) == 0 || 4964 (p->p_flag & P_STOPPED_SINGLE) != 0, 4965 ("curproc must be single-threaded")); 4966 /* 4967 * There is no need to lock the list as only this thread can be 4968 * running. 4969 */ 4970 FOREACH_THREAD_IN_PROC(p, td) { 4971 KASSERT(td == curthread || 4972 ((td->td_flags & TDF_BOUNDARY) != 0 && TD_IS_SUSPENDED(td)), 4973 ("running thread %p %p", p, td)); 4974 umtx_thread_cleanup(td); 4975 td->td_rb_list = td->td_rbp_list = td->td_rb_inact = 0; 4976 } 4977 } 4978 4979 /* 4980 * thread exit hook. 4981 */ 4982 void 4983 umtx_thread_exit(struct thread *td) 4984 { 4985 4986 umtx_thread_cleanup(td); 4987 } 4988 4989 static int 4990 umtx_read_uptr(struct thread *td, uintptr_t ptr, uintptr_t *res, bool compat32) 4991 { 4992 u_long res1; 4993 uint32_t res32; 4994 int error; 4995 4996 if (compat32) { 4997 error = fueword32((void *)ptr, &res32); 4998 if (error == 0) 4999 res1 = res32; 5000 } else { 5001 error = fueword((void *)ptr, &res1); 5002 } 5003 if (error == 0) 5004 *res = res1; 5005 else 5006 error = EFAULT; 5007 return (error); 5008 } 5009 5010 static void 5011 umtx_read_rb_list(struct thread *td, struct umutex *m, uintptr_t *rb_list, 5012 bool compat32) 5013 { 5014 struct umutex32 m32; 5015 5016 if (compat32) { 5017 memcpy(&m32, m, sizeof(m32)); 5018 *rb_list = m32.m_rb_lnk; 5019 } else { 5020 *rb_list = m->m_rb_lnk; 5021 } 5022 } 5023 5024 static int 5025 umtx_handle_rb(struct thread *td, uintptr_t rbp, uintptr_t *rb_list, bool inact, 5026 bool compat32) 5027 { 5028 struct umutex m; 5029 int error; 5030 5031 KASSERT(td->td_proc == curproc, ("need current vmspace")); 5032 error = copyin((void *)rbp, &m, sizeof(m)); 5033 if (error != 0) 5034 return (error); 5035 if (rb_list != NULL) 5036 umtx_read_rb_list(td, &m, rb_list, compat32); 5037 if ((m.m_flags & UMUTEX_ROBUST) == 0) 5038 return (EINVAL); 5039 if ((m.m_owner & ~UMUTEX_CONTESTED) != td->td_tid) 5040 /* inact is cleared after unlock, allow the inconsistency */ 5041 return (inact ? 0 : EINVAL); 5042 return (do_unlock_umutex(td, (struct umutex *)rbp, true)); 5043 } 5044 5045 static void 5046 umtx_cleanup_rb_list(struct thread *td, uintptr_t rb_list, uintptr_t *rb_inact, 5047 const char *name, bool compat32) 5048 { 5049 int error, i; 5050 uintptr_t rbp; 5051 bool inact; 5052 5053 if (rb_list == 0) 5054 return; 5055 error = umtx_read_uptr(td, rb_list, &rbp, compat32); 5056 for (i = 0; error == 0 && rbp != 0 && i < umtx_max_rb; i++) { 5057 if (rbp == *rb_inact) { 5058 inact = true; 5059 *rb_inact = 0; 5060 } else 5061 inact = false; 5062 error = umtx_handle_rb(td, rbp, &rbp, inact, compat32); 5063 } 5064 if (i == umtx_max_rb && umtx_verbose_rb) { 5065 uprintf("comm %s pid %d: reached umtx %smax rb %d\n", 5066 td->td_proc->p_comm, td->td_proc->p_pid, name, umtx_max_rb); 5067 } 5068 if (error != 0 && umtx_verbose_rb) { 5069 uprintf("comm %s pid %d: handling %srb error %d\n", 5070 td->td_proc->p_comm, td->td_proc->p_pid, name, error); 5071 } 5072 } 5073 5074 /* 5075 * Clean up umtx data. 5076 */ 5077 static void 5078 umtx_thread_cleanup(struct thread *td) 5079 { 5080 struct umtx_q *uq; 5081 struct umtx_pi *pi; 5082 uintptr_t rb_inact; 5083 bool compat32; 5084 5085 /* 5086 * Disown pi mutexes. 5087 */ 5088 uq = td->td_umtxq; 5089 if (uq != NULL) { 5090 if (uq->uq_inherited_pri != PRI_MAX || 5091 !TAILQ_EMPTY(&uq->uq_pi_contested)) { 5092 mtx_lock(&umtx_lock); 5093 uq->uq_inherited_pri = PRI_MAX; 5094 while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) { 5095 pi->pi_owner = NULL; 5096 TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link); 5097 } 5098 mtx_unlock(&umtx_lock); 5099 } 5100 sched_lend_user_prio_cond(td, PRI_MAX); 5101 } 5102 5103 compat32 = (td->td_pflags2 & TDP2_COMPAT32RB) != 0; 5104 td->td_pflags2 &= ~TDP2_COMPAT32RB; 5105 5106 if (td->td_rb_inact == 0 && td->td_rb_list == 0 && td->td_rbp_list == 0) 5107 return; 5108 5109 /* 5110 * Handle terminated robust mutexes. Must be done after 5111 * robust pi disown, otherwise unlock could see unowned 5112 * entries. 5113 */ 5114 rb_inact = td->td_rb_inact; 5115 if (rb_inact != 0) 5116 (void)umtx_read_uptr(td, rb_inact, &rb_inact, compat32); 5117 umtx_cleanup_rb_list(td, td->td_rb_list, &rb_inact, "", compat32); 5118 umtx_cleanup_rb_list(td, td->td_rbp_list, &rb_inact, "priv ", compat32); 5119 if (rb_inact != 0) 5120 (void)umtx_handle_rb(td, rb_inact, NULL, true, compat32); 5121 } 5122