1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2015, 2016 The FreeBSD Foundation 5 * Copyright (c) 2004, David Xu <davidxu@freebsd.org> 6 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org> 7 * All rights reserved. 8 * 9 * Portions of this software were developed by Konstantin Belousov 10 * under sponsorship from the FreeBSD Foundation. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice unmodified, this list of conditions, and the following 17 * disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_umtx_profiling.h" 38 39 #include <sys/param.h> 40 #include <sys/kernel.h> 41 #include <sys/fcntl.h> 42 #include <sys/file.h> 43 #include <sys/filedesc.h> 44 #include <sys/limits.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/mman.h> 48 #include <sys/mutex.h> 49 #include <sys/priv.h> 50 #include <sys/proc.h> 51 #include <sys/resource.h> 52 #include <sys/resourcevar.h> 53 #include <sys/rwlock.h> 54 #include <sys/sbuf.h> 55 #include <sys/sched.h> 56 #include <sys/smp.h> 57 #include <sys/sysctl.h> 58 #include <sys/systm.h> 59 #include <sys/sysproto.h> 60 #include <sys/syscallsubr.h> 61 #include <sys/taskqueue.h> 62 #include <sys/time.h> 63 #include <sys/eventhandler.h> 64 #include <sys/umtx.h> 65 #include <sys/umtxvar.h> 66 67 #include <security/mac/mac_framework.h> 68 69 #include <vm/vm.h> 70 #include <vm/vm_param.h> 71 #include <vm/pmap.h> 72 #include <vm/vm_map.h> 73 #include <vm/vm_object.h> 74 75 #include <machine/atomic.h> 76 #include <machine/cpu.h> 77 78 #include <compat/freebsd32/freebsd32.h> 79 #ifdef COMPAT_FREEBSD32 80 #include <compat/freebsd32/freebsd32_proto.h> 81 #endif 82 83 #define _UMUTEX_TRY 1 84 #define _UMUTEX_WAIT 2 85 86 #ifdef UMTX_PROFILING 87 #define UPROF_PERC_BIGGER(w, f, sw, sf) \ 88 (((w) > (sw)) || ((w) == (sw) && (f) > (sf))) 89 #endif 90 91 #define UMTXQ_LOCKED_ASSERT(uc) mtx_assert(&(uc)->uc_lock, MA_OWNED) 92 #ifdef INVARIANTS 93 #define UMTXQ_ASSERT_LOCKED_BUSY(key) do { \ 94 struct umtxq_chain *uc; \ 95 \ 96 uc = umtxq_getchain(key); \ 97 mtx_assert(&uc->uc_lock, MA_OWNED); \ 98 KASSERT(uc->uc_busy != 0, ("umtx chain is not busy")); \ 99 } while (0) 100 #else 101 #define UMTXQ_ASSERT_LOCKED_BUSY(key) do {} while (0) 102 #endif 103 104 /* 105 * Don't propagate time-sharing priority, there is a security reason, 106 * a user can simply introduce PI-mutex, let thread A lock the mutex, 107 * and let another thread B block on the mutex, because B is 108 * sleeping, its priority will be boosted, this causes A's priority to 109 * be boosted via priority propagating too and will never be lowered even 110 * if it is using 100%CPU, this is unfair to other processes. 111 */ 112 113 #define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\ 114 (td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\ 115 PRI_MAX_TIMESHARE : (td)->td_user_pri) 116 117 #define GOLDEN_RATIO_PRIME 2654404609U 118 #ifndef UMTX_CHAINS 119 #define UMTX_CHAINS 512 120 #endif 121 #define UMTX_SHIFTS (__WORD_BIT - 9) 122 123 #define GET_SHARE(flags) \ 124 (((flags) & USYNC_PROCESS_SHARED) == 0 ? THREAD_SHARE : PROCESS_SHARE) 125 126 #define BUSY_SPINS 200 127 128 struct umtx_copyops { 129 int (*copyin_timeout)(const void *uaddr, struct timespec *tsp); 130 int (*copyin_umtx_time)(const void *uaddr, size_t size, 131 struct _umtx_time *tp); 132 int (*copyin_robust_lists)(const void *uaddr, size_t size, 133 struct umtx_robust_lists_params *rbp); 134 int (*copyout_timeout)(void *uaddr, size_t size, 135 struct timespec *tsp); 136 const size_t timespec_sz; 137 const size_t umtx_time_sz; 138 const bool compat32; 139 }; 140 141 _Static_assert(sizeof(struct umutex) == sizeof(struct umutex32), "umutex32"); 142 _Static_assert(__offsetof(struct umutex, m_spare[0]) == 143 __offsetof(struct umutex32, m_spare[0]), "m_spare32"); 144 145 int umtx_shm_vnobj_persistent = 0; 146 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_vnode_persistent, CTLFLAG_RWTUN, 147 &umtx_shm_vnobj_persistent, 0, 148 "False forces destruction of umtx attached to file, on last close"); 149 static int umtx_max_rb = 1000; 150 SYSCTL_INT(_kern_ipc, OID_AUTO, umtx_max_robust, CTLFLAG_RWTUN, 151 &umtx_max_rb, 0, 152 "Maximum number of robust mutexes allowed for each thread"); 153 154 static uma_zone_t umtx_pi_zone; 155 static struct umtxq_chain umtxq_chains[2][UMTX_CHAINS]; 156 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory"); 157 static int umtx_pi_allocated; 158 159 static SYSCTL_NODE(_debug, OID_AUTO, umtx, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 160 "umtx debug"); 161 SYSCTL_INT(_debug_umtx, OID_AUTO, umtx_pi_allocated, CTLFLAG_RD, 162 &umtx_pi_allocated, 0, "Allocated umtx_pi"); 163 static int umtx_verbose_rb = 1; 164 SYSCTL_INT(_debug_umtx, OID_AUTO, robust_faults_verbose, CTLFLAG_RWTUN, 165 &umtx_verbose_rb, 0, 166 ""); 167 168 #ifdef UMTX_PROFILING 169 static long max_length; 170 SYSCTL_LONG(_debug_umtx, OID_AUTO, max_length, CTLFLAG_RD, &max_length, 0, "max_length"); 171 static SYSCTL_NODE(_debug_umtx, OID_AUTO, chains, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 172 "umtx chain stats"); 173 #endif 174 175 static inline void umtx_abs_timeout_init2(struct umtx_abs_timeout *timo, 176 const struct _umtx_time *umtxtime); 177 178 static void umtx_shm_init(void); 179 static void umtxq_sysinit(void *); 180 static void umtxq_hash(struct umtx_key *key); 181 static int do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags, 182 bool rb); 183 static void umtx_thread_cleanup(struct thread *td); 184 SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_sysinit, NULL); 185 186 #define umtxq_signal(key, nwake) umtxq_signal_queue((key), (nwake), UMTX_SHARED_QUEUE) 187 188 static struct mtx umtx_lock; 189 190 #ifdef UMTX_PROFILING 191 static void 192 umtx_init_profiling(void) 193 { 194 struct sysctl_oid *chain_oid; 195 char chain_name[10]; 196 int i; 197 198 for (i = 0; i < UMTX_CHAINS; ++i) { 199 snprintf(chain_name, sizeof(chain_name), "%d", i); 200 chain_oid = SYSCTL_ADD_NODE(NULL, 201 SYSCTL_STATIC_CHILDREN(_debug_umtx_chains), OID_AUTO, 202 chain_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 203 "umtx hash stats"); 204 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 205 "max_length0", CTLFLAG_RD, &umtxq_chains[0][i].max_length, 0, NULL); 206 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 207 "max_length1", CTLFLAG_RD, &umtxq_chains[1][i].max_length, 0, NULL); 208 } 209 } 210 211 static int 212 sysctl_debug_umtx_chains_peaks(SYSCTL_HANDLER_ARGS) 213 { 214 char buf[512]; 215 struct sbuf sb; 216 struct umtxq_chain *uc; 217 u_int fract, i, j, tot, whole; 218 u_int sf0, sf1, sf2, sf3, sf4; 219 u_int si0, si1, si2, si3, si4; 220 u_int sw0, sw1, sw2, sw3, sw4; 221 222 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); 223 for (i = 0; i < 2; i++) { 224 tot = 0; 225 for (j = 0; j < UMTX_CHAINS; ++j) { 226 uc = &umtxq_chains[i][j]; 227 mtx_lock(&uc->uc_lock); 228 tot += uc->max_length; 229 mtx_unlock(&uc->uc_lock); 230 } 231 if (tot == 0) 232 sbuf_printf(&sb, "%u) Empty ", i); 233 else { 234 sf0 = sf1 = sf2 = sf3 = sf4 = 0; 235 si0 = si1 = si2 = si3 = si4 = 0; 236 sw0 = sw1 = sw2 = sw3 = sw4 = 0; 237 for (j = 0; j < UMTX_CHAINS; j++) { 238 uc = &umtxq_chains[i][j]; 239 mtx_lock(&uc->uc_lock); 240 whole = uc->max_length * 100; 241 mtx_unlock(&uc->uc_lock); 242 fract = (whole % tot) * 100; 243 if (UPROF_PERC_BIGGER(whole, fract, sw0, sf0)) { 244 sf0 = fract; 245 si0 = j; 246 sw0 = whole; 247 } else if (UPROF_PERC_BIGGER(whole, fract, sw1, 248 sf1)) { 249 sf1 = fract; 250 si1 = j; 251 sw1 = whole; 252 } else if (UPROF_PERC_BIGGER(whole, fract, sw2, 253 sf2)) { 254 sf2 = fract; 255 si2 = j; 256 sw2 = whole; 257 } else if (UPROF_PERC_BIGGER(whole, fract, sw3, 258 sf3)) { 259 sf3 = fract; 260 si3 = j; 261 sw3 = whole; 262 } else if (UPROF_PERC_BIGGER(whole, fract, sw4, 263 sf4)) { 264 sf4 = fract; 265 si4 = j; 266 sw4 = whole; 267 } 268 } 269 sbuf_printf(&sb, "queue %u:\n", i); 270 sbuf_printf(&sb, "1st: %u.%u%% idx: %u\n", sw0 / tot, 271 sf0 / tot, si0); 272 sbuf_printf(&sb, "2nd: %u.%u%% idx: %u\n", sw1 / tot, 273 sf1 / tot, si1); 274 sbuf_printf(&sb, "3rd: %u.%u%% idx: %u\n", sw2 / tot, 275 sf2 / tot, si2); 276 sbuf_printf(&sb, "4th: %u.%u%% idx: %u\n", sw3 / tot, 277 sf3 / tot, si3); 278 sbuf_printf(&sb, "5th: %u.%u%% idx: %u\n", sw4 / tot, 279 sf4 / tot, si4); 280 } 281 } 282 sbuf_trim(&sb); 283 sbuf_finish(&sb); 284 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 285 sbuf_delete(&sb); 286 return (0); 287 } 288 289 static int 290 sysctl_debug_umtx_chains_clear(SYSCTL_HANDLER_ARGS) 291 { 292 struct umtxq_chain *uc; 293 u_int i, j; 294 int clear, error; 295 296 clear = 0; 297 error = sysctl_handle_int(oidp, &clear, 0, req); 298 if (error != 0 || req->newptr == NULL) 299 return (error); 300 301 if (clear != 0) { 302 for (i = 0; i < 2; ++i) { 303 for (j = 0; j < UMTX_CHAINS; ++j) { 304 uc = &umtxq_chains[i][j]; 305 mtx_lock(&uc->uc_lock); 306 uc->length = 0; 307 uc->max_length = 0; 308 mtx_unlock(&uc->uc_lock); 309 } 310 } 311 } 312 return (0); 313 } 314 315 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, clear, 316 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0, 317 sysctl_debug_umtx_chains_clear, "I", 318 "Clear umtx chains statistics"); 319 SYSCTL_PROC(_debug_umtx_chains, OID_AUTO, peaks, 320 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 0, 321 sysctl_debug_umtx_chains_peaks, "A", 322 "Highest peaks in chains max length"); 323 #endif 324 325 static void 326 umtxq_sysinit(void *arg __unused) 327 { 328 int i, j; 329 330 umtx_pi_zone = uma_zcreate("umtx pi", sizeof(struct umtx_pi), 331 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 332 for (i = 0; i < 2; ++i) { 333 for (j = 0; j < UMTX_CHAINS; ++j) { 334 mtx_init(&umtxq_chains[i][j].uc_lock, "umtxql", NULL, 335 MTX_DEF | MTX_DUPOK); 336 LIST_INIT(&umtxq_chains[i][j].uc_queue[0]); 337 LIST_INIT(&umtxq_chains[i][j].uc_queue[1]); 338 LIST_INIT(&umtxq_chains[i][j].uc_spare_queue); 339 TAILQ_INIT(&umtxq_chains[i][j].uc_pi_list); 340 umtxq_chains[i][j].uc_busy = 0; 341 umtxq_chains[i][j].uc_waiters = 0; 342 #ifdef UMTX_PROFILING 343 umtxq_chains[i][j].length = 0; 344 umtxq_chains[i][j].max_length = 0; 345 #endif 346 } 347 } 348 #ifdef UMTX_PROFILING 349 umtx_init_profiling(); 350 #endif 351 mtx_init(&umtx_lock, "umtx lock", NULL, MTX_DEF); 352 umtx_shm_init(); 353 } 354 355 struct umtx_q * 356 umtxq_alloc(void) 357 { 358 struct umtx_q *uq; 359 360 uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO); 361 uq->uq_spare_queue = malloc(sizeof(struct umtxq_queue), M_UMTX, 362 M_WAITOK | M_ZERO); 363 TAILQ_INIT(&uq->uq_spare_queue->head); 364 TAILQ_INIT(&uq->uq_pi_contested); 365 uq->uq_inherited_pri = PRI_MAX; 366 return (uq); 367 } 368 369 void 370 umtxq_free(struct umtx_q *uq) 371 { 372 373 MPASS(uq->uq_spare_queue != NULL); 374 free(uq->uq_spare_queue, M_UMTX); 375 free(uq, M_UMTX); 376 } 377 378 static inline void 379 umtxq_hash(struct umtx_key *key) 380 { 381 unsigned n; 382 383 n = (uintptr_t)key->info.both.a + key->info.both.b; 384 key->hash = ((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS; 385 } 386 387 struct umtxq_chain * 388 umtxq_getchain(struct umtx_key *key) 389 { 390 391 if (key->type <= TYPE_SEM) 392 return (&umtxq_chains[1][key->hash]); 393 return (&umtxq_chains[0][key->hash]); 394 } 395 396 /* 397 * Set chain to busy state when following operation 398 * may be blocked (kernel mutex can not be used). 399 */ 400 void 401 umtxq_busy(struct umtx_key *key) 402 { 403 struct umtxq_chain *uc; 404 405 uc = umtxq_getchain(key); 406 mtx_assert(&uc->uc_lock, MA_OWNED); 407 if (uc->uc_busy) { 408 #ifdef SMP 409 if (smp_cpus > 1) { 410 int count = BUSY_SPINS; 411 if (count > 0) { 412 umtxq_unlock(key); 413 while (uc->uc_busy && --count > 0) 414 cpu_spinwait(); 415 umtxq_lock(key); 416 } 417 } 418 #endif 419 while (uc->uc_busy) { 420 uc->uc_waiters++; 421 msleep(uc, &uc->uc_lock, 0, "umtxqb", 0); 422 uc->uc_waiters--; 423 } 424 } 425 uc->uc_busy = 1; 426 } 427 428 /* 429 * Unbusy a chain. 430 */ 431 void 432 umtxq_unbusy(struct umtx_key *key) 433 { 434 struct umtxq_chain *uc; 435 436 uc = umtxq_getchain(key); 437 mtx_assert(&uc->uc_lock, MA_OWNED); 438 KASSERT(uc->uc_busy != 0, ("not busy")); 439 uc->uc_busy = 0; 440 if (uc->uc_waiters) 441 wakeup_one(uc); 442 } 443 444 void 445 umtxq_unbusy_unlocked(struct umtx_key *key) 446 { 447 448 umtxq_lock(key); 449 umtxq_unbusy(key); 450 umtxq_unlock(key); 451 } 452 453 static struct umtxq_queue * 454 umtxq_queue_lookup(struct umtx_key *key, int q) 455 { 456 struct umtxq_queue *uh; 457 struct umtxq_chain *uc; 458 459 uc = umtxq_getchain(key); 460 UMTXQ_LOCKED_ASSERT(uc); 461 LIST_FOREACH(uh, &uc->uc_queue[q], link) { 462 if (umtx_key_match(&uh->key, key)) 463 return (uh); 464 } 465 466 return (NULL); 467 } 468 469 void 470 umtxq_insert_queue(struct umtx_q *uq, int q) 471 { 472 struct umtxq_queue *uh; 473 struct umtxq_chain *uc; 474 475 uc = umtxq_getchain(&uq->uq_key); 476 UMTXQ_LOCKED_ASSERT(uc); 477 KASSERT((uq->uq_flags & UQF_UMTXQ) == 0, ("umtx_q is already on queue")); 478 uh = umtxq_queue_lookup(&uq->uq_key, q); 479 if (uh != NULL) { 480 LIST_INSERT_HEAD(&uc->uc_spare_queue, uq->uq_spare_queue, link); 481 } else { 482 uh = uq->uq_spare_queue; 483 uh->key = uq->uq_key; 484 LIST_INSERT_HEAD(&uc->uc_queue[q], uh, link); 485 #ifdef UMTX_PROFILING 486 uc->length++; 487 if (uc->length > uc->max_length) { 488 uc->max_length = uc->length; 489 if (uc->max_length > max_length) 490 max_length = uc->max_length; 491 } 492 #endif 493 } 494 uq->uq_spare_queue = NULL; 495 496 TAILQ_INSERT_TAIL(&uh->head, uq, uq_link); 497 uh->length++; 498 uq->uq_flags |= UQF_UMTXQ; 499 uq->uq_cur_queue = uh; 500 return; 501 } 502 503 void 504 umtxq_remove_queue(struct umtx_q *uq, int q) 505 { 506 struct umtxq_chain *uc; 507 struct umtxq_queue *uh; 508 509 uc = umtxq_getchain(&uq->uq_key); 510 UMTXQ_LOCKED_ASSERT(uc); 511 if (uq->uq_flags & UQF_UMTXQ) { 512 uh = uq->uq_cur_queue; 513 TAILQ_REMOVE(&uh->head, uq, uq_link); 514 uh->length--; 515 uq->uq_flags &= ~UQF_UMTXQ; 516 if (TAILQ_EMPTY(&uh->head)) { 517 KASSERT(uh->length == 0, 518 ("inconsistent umtxq_queue length")); 519 #ifdef UMTX_PROFILING 520 uc->length--; 521 #endif 522 LIST_REMOVE(uh, link); 523 } else { 524 uh = LIST_FIRST(&uc->uc_spare_queue); 525 KASSERT(uh != NULL, ("uc_spare_queue is empty")); 526 LIST_REMOVE(uh, link); 527 } 528 uq->uq_spare_queue = uh; 529 uq->uq_cur_queue = NULL; 530 } 531 } 532 533 /* 534 * Check if there are multiple waiters 535 */ 536 int 537 umtxq_count(struct umtx_key *key) 538 { 539 struct umtxq_queue *uh; 540 541 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key)); 542 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE); 543 if (uh != NULL) 544 return (uh->length); 545 return (0); 546 } 547 548 /* 549 * Check if there are multiple PI waiters and returns first 550 * waiter. 551 */ 552 static int 553 umtxq_count_pi(struct umtx_key *key, struct umtx_q **first) 554 { 555 struct umtxq_queue *uh; 556 557 *first = NULL; 558 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key)); 559 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE); 560 if (uh != NULL) { 561 *first = TAILQ_FIRST(&uh->head); 562 return (uh->length); 563 } 564 return (0); 565 } 566 567 /* 568 * Wake up threads waiting on an userland object by a bit mask. 569 */ 570 int 571 umtxq_signal_mask(struct umtx_key *key, int n_wake, u_int bitset) 572 { 573 struct umtxq_queue *uh; 574 struct umtx_q *uq, *uq_temp; 575 int ret; 576 577 ret = 0; 578 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key)); 579 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE); 580 if (uh == NULL) 581 return (0); 582 TAILQ_FOREACH_SAFE(uq, &uh->head, uq_link, uq_temp) { 583 if ((uq->uq_bitset & bitset) == 0) 584 continue; 585 umtxq_remove_queue(uq, UMTX_SHARED_QUEUE); 586 wakeup_one(uq); 587 if (++ret >= n_wake) 588 break; 589 } 590 return (ret); 591 } 592 593 /* 594 * Wake up threads waiting on an userland object. 595 */ 596 597 static int 598 umtxq_signal_queue(struct umtx_key *key, int n_wake, int q) 599 { 600 struct umtxq_queue *uh; 601 struct umtx_q *uq; 602 int ret; 603 604 ret = 0; 605 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key)); 606 uh = umtxq_queue_lookup(key, q); 607 if (uh != NULL) { 608 while ((uq = TAILQ_FIRST(&uh->head)) != NULL) { 609 umtxq_remove_queue(uq, q); 610 wakeup(uq); 611 if (++ret >= n_wake) 612 return (ret); 613 } 614 } 615 return (ret); 616 } 617 618 /* 619 * Wake up specified thread. 620 */ 621 static inline void 622 umtxq_signal_thread(struct umtx_q *uq) 623 { 624 625 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key)); 626 umtxq_remove(uq); 627 wakeup(uq); 628 } 629 630 /* 631 * Wake up a maximum of n_wake threads that are waiting on an userland 632 * object identified by key. The remaining threads are removed from queue 633 * identified by key and added to the queue identified by key2 (requeued). 634 * The n_requeue specifies an upper limit on the number of threads that 635 * are requeued to the second queue. 636 */ 637 int 638 umtxq_requeue(struct umtx_key *key, int n_wake, struct umtx_key *key2, 639 int n_requeue) 640 { 641 struct umtxq_queue *uh; 642 struct umtx_q *uq, *uq_temp; 643 int ret; 644 645 ret = 0; 646 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key)); 647 UMTXQ_LOCKED_ASSERT(umtxq_getchain(key2)); 648 uh = umtxq_queue_lookup(key, UMTX_SHARED_QUEUE); 649 if (uh == NULL) 650 return (0); 651 TAILQ_FOREACH_SAFE(uq, &uh->head, uq_link, uq_temp) { 652 if (++ret <= n_wake) { 653 umtxq_remove(uq); 654 wakeup_one(uq); 655 } else { 656 umtxq_remove(uq); 657 uq->uq_key = *key2; 658 umtxq_insert(uq); 659 if (ret - n_wake == n_requeue) 660 break; 661 } 662 } 663 return (ret); 664 } 665 666 static inline int 667 tstohz(const struct timespec *tsp) 668 { 669 struct timeval tv; 670 671 TIMESPEC_TO_TIMEVAL(&tv, tsp); 672 return tvtohz(&tv); 673 } 674 675 void 676 umtx_abs_timeout_init(struct umtx_abs_timeout *timo, int clockid, 677 int absolute, const struct timespec *timeout) 678 { 679 680 timo->clockid = clockid; 681 if (!absolute) { 682 timo->is_abs_real = false; 683 kern_clock_gettime(curthread, timo->clockid, &timo->cur); 684 timespecadd(&timo->cur, timeout, &timo->end); 685 } else { 686 timo->end = *timeout; 687 timo->is_abs_real = clockid == CLOCK_REALTIME || 688 clockid == CLOCK_REALTIME_FAST || 689 clockid == CLOCK_REALTIME_PRECISE || 690 clockid == CLOCK_SECOND; 691 } 692 } 693 694 static void 695 umtx_abs_timeout_init2(struct umtx_abs_timeout *timo, 696 const struct _umtx_time *umtxtime) 697 { 698 699 umtx_abs_timeout_init(timo, umtxtime->_clockid, 700 (umtxtime->_flags & UMTX_ABSTIME) != 0, &umtxtime->_timeout); 701 } 702 703 static int 704 umtx_abs_timeout_getsbt(struct umtx_abs_timeout *timo, sbintime_t *sbt, 705 int *flags) 706 { 707 struct bintime bt, bbt; 708 struct timespec tts; 709 710 switch (timo->clockid) { 711 712 /* Clocks that can be converted into absolute time. */ 713 case CLOCK_REALTIME: 714 case CLOCK_REALTIME_PRECISE: 715 case CLOCK_REALTIME_FAST: 716 case CLOCK_MONOTONIC: 717 case CLOCK_MONOTONIC_PRECISE: 718 case CLOCK_MONOTONIC_FAST: 719 case CLOCK_UPTIME: 720 case CLOCK_UPTIME_PRECISE: 721 case CLOCK_UPTIME_FAST: 722 case CLOCK_SECOND: 723 timespec2bintime(&timo->end, &bt); 724 switch (timo->clockid) { 725 case CLOCK_REALTIME: 726 case CLOCK_REALTIME_PRECISE: 727 case CLOCK_REALTIME_FAST: 728 case CLOCK_SECOND: 729 getboottimebin(&bbt); 730 bintime_sub(&bt, &bbt); 731 break; 732 } 733 if (bt.sec < 0) 734 return (ETIMEDOUT); 735 if (bt.sec >= (SBT_MAX >> 32)) { 736 *sbt = 0; 737 *flags = 0; 738 return (0); 739 } 740 *sbt = bttosbt(bt); 741 switch (timo->clockid) { 742 case CLOCK_REALTIME_FAST: 743 case CLOCK_MONOTONIC_FAST: 744 case CLOCK_UPTIME_FAST: 745 *sbt += tc_tick_sbt; 746 break; 747 case CLOCK_SECOND: 748 *sbt += SBT_1S; 749 break; 750 } 751 *flags = C_ABSOLUTE; 752 return (0); 753 754 /* Clocks that has to be periodically polled. */ 755 case CLOCK_VIRTUAL: 756 case CLOCK_PROF: 757 case CLOCK_THREAD_CPUTIME_ID: 758 case CLOCK_PROCESS_CPUTIME_ID: 759 default: 760 kern_clock_gettime(curthread, timo->clockid, &timo->cur); 761 if (timespeccmp(&timo->end, &timo->cur, <=)) 762 return (ETIMEDOUT); 763 timespecsub(&timo->end, &timo->cur, &tts); 764 *sbt = tick_sbt * tstohz(&tts); 765 *flags = C_HARDCLOCK; 766 return (0); 767 } 768 } 769 770 static uint32_t 771 umtx_unlock_val(uint32_t flags, bool rb) 772 { 773 774 if (rb) 775 return (UMUTEX_RB_OWNERDEAD); 776 else if ((flags & UMUTEX_NONCONSISTENT) != 0) 777 return (UMUTEX_RB_NOTRECOV); 778 else 779 return (UMUTEX_UNOWNED); 780 781 } 782 783 /* 784 * Put thread into sleep state, before sleeping, check if 785 * thread was removed from umtx queue. 786 */ 787 int 788 umtxq_sleep(struct umtx_q *uq, const char *wmesg, 789 struct umtx_abs_timeout *timo) 790 { 791 struct umtxq_chain *uc; 792 sbintime_t sbt = 0; 793 int error, flags = 0; 794 795 uc = umtxq_getchain(&uq->uq_key); 796 UMTXQ_LOCKED_ASSERT(uc); 797 for (;;) { 798 if (!(uq->uq_flags & UQF_UMTXQ)) { 799 error = 0; 800 break; 801 } 802 if (timo != NULL) { 803 if (timo->is_abs_real) 804 curthread->td_rtcgen = 805 atomic_load_acq_int(&rtc_generation); 806 error = umtx_abs_timeout_getsbt(timo, &sbt, &flags); 807 if (error != 0) 808 break; 809 } 810 error = msleep_sbt(uq, &uc->uc_lock, PCATCH | PDROP, wmesg, 811 sbt, 0, flags); 812 uc = umtxq_getchain(&uq->uq_key); 813 mtx_lock(&uc->uc_lock); 814 if (error == EINTR || error == ERESTART) 815 break; 816 if (error == EWOULDBLOCK && (flags & C_ABSOLUTE) != 0) { 817 error = ETIMEDOUT; 818 break; 819 } 820 } 821 822 curthread->td_rtcgen = 0; 823 return (error); 824 } 825 826 /* 827 * Convert userspace address into unique logical address. 828 */ 829 int 830 umtx_key_get(const void *addr, int type, int share, struct umtx_key *key) 831 { 832 struct thread *td = curthread; 833 vm_map_t map; 834 vm_map_entry_t entry; 835 vm_pindex_t pindex; 836 vm_prot_t prot; 837 boolean_t wired; 838 839 key->type = type; 840 if (share == THREAD_SHARE) { 841 key->shared = 0; 842 key->info.private.vs = td->td_proc->p_vmspace; 843 key->info.private.addr = (uintptr_t)addr; 844 } else { 845 MPASS(share == PROCESS_SHARE || share == AUTO_SHARE); 846 map = &td->td_proc->p_vmspace->vm_map; 847 if (vm_map_lookup(&map, (vm_offset_t)addr, VM_PROT_WRITE, 848 &entry, &key->info.shared.object, &pindex, &prot, 849 &wired) != KERN_SUCCESS) { 850 return (EFAULT); 851 } 852 853 if ((share == PROCESS_SHARE) || 854 (share == AUTO_SHARE && 855 VM_INHERIT_SHARE == entry->inheritance)) { 856 key->shared = 1; 857 key->info.shared.offset = (vm_offset_t)addr - 858 entry->start + entry->offset; 859 vm_object_reference(key->info.shared.object); 860 } else { 861 key->shared = 0; 862 key->info.private.vs = td->td_proc->p_vmspace; 863 key->info.private.addr = (uintptr_t)addr; 864 } 865 vm_map_lookup_done(map, entry); 866 } 867 868 umtxq_hash(key); 869 return (0); 870 } 871 872 /* 873 * Release key. 874 */ 875 void 876 umtx_key_release(struct umtx_key *key) 877 { 878 if (key->shared) 879 vm_object_deallocate(key->info.shared.object); 880 } 881 882 #ifdef COMPAT_FREEBSD10 883 /* 884 * Lock a umtx object. 885 */ 886 static int 887 do_lock_umtx(struct thread *td, struct umtx *umtx, u_long id, 888 const struct timespec *timeout) 889 { 890 struct umtx_abs_timeout timo; 891 struct umtx_q *uq; 892 u_long owner; 893 u_long old; 894 int error = 0; 895 896 uq = td->td_umtxq; 897 if (timeout != NULL) 898 umtx_abs_timeout_init(&timo, CLOCK_REALTIME, 0, timeout); 899 900 /* 901 * Care must be exercised when dealing with umtx structure. It 902 * can fault on any access. 903 */ 904 for (;;) { 905 /* 906 * Try the uncontested case. This should be done in userland. 907 */ 908 owner = casuword(&umtx->u_owner, UMTX_UNOWNED, id); 909 910 /* The acquire succeeded. */ 911 if (owner == UMTX_UNOWNED) 912 return (0); 913 914 /* The address was invalid. */ 915 if (owner == -1) 916 return (EFAULT); 917 918 /* If no one owns it but it is contested try to acquire it. */ 919 if (owner == UMTX_CONTESTED) { 920 owner = casuword(&umtx->u_owner, 921 UMTX_CONTESTED, id | UMTX_CONTESTED); 922 923 if (owner == UMTX_CONTESTED) 924 return (0); 925 926 /* The address was invalid. */ 927 if (owner == -1) 928 return (EFAULT); 929 930 error = thread_check_susp(td, false); 931 if (error != 0) 932 break; 933 934 /* If this failed the lock has changed, restart. */ 935 continue; 936 } 937 938 /* 939 * If we caught a signal, we have retried and now 940 * exit immediately. 941 */ 942 if (error != 0) 943 break; 944 945 if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK, 946 AUTO_SHARE, &uq->uq_key)) != 0) 947 return (error); 948 949 umtxq_lock(&uq->uq_key); 950 umtxq_busy(&uq->uq_key); 951 umtxq_insert(uq); 952 umtxq_unbusy(&uq->uq_key); 953 umtxq_unlock(&uq->uq_key); 954 955 /* 956 * Set the contested bit so that a release in user space 957 * knows to use the system call for unlock. If this fails 958 * either some one else has acquired the lock or it has been 959 * released. 960 */ 961 old = casuword(&umtx->u_owner, owner, owner | UMTX_CONTESTED); 962 963 /* The address was invalid. */ 964 if (old == -1) { 965 umtxq_lock(&uq->uq_key); 966 umtxq_remove(uq); 967 umtxq_unlock(&uq->uq_key); 968 umtx_key_release(&uq->uq_key); 969 return (EFAULT); 970 } 971 972 /* 973 * We set the contested bit, sleep. Otherwise the lock changed 974 * and we need to retry or we lost a race to the thread 975 * unlocking the umtx. 976 */ 977 umtxq_lock(&uq->uq_key); 978 if (old == owner) 979 error = umtxq_sleep(uq, "umtx", timeout == NULL ? NULL : 980 &timo); 981 umtxq_remove(uq); 982 umtxq_unlock(&uq->uq_key); 983 umtx_key_release(&uq->uq_key); 984 985 if (error == 0) 986 error = thread_check_susp(td, false); 987 } 988 989 if (timeout == NULL) { 990 /* Mutex locking is restarted if it is interrupted. */ 991 if (error == EINTR) 992 error = ERESTART; 993 } else { 994 /* Timed-locking is not restarted. */ 995 if (error == ERESTART) 996 error = EINTR; 997 } 998 return (error); 999 } 1000 1001 /* 1002 * Unlock a umtx object. 1003 */ 1004 static int 1005 do_unlock_umtx(struct thread *td, struct umtx *umtx, u_long id) 1006 { 1007 struct umtx_key key; 1008 u_long owner; 1009 u_long old; 1010 int error; 1011 int count; 1012 1013 /* 1014 * Make sure we own this mtx. 1015 */ 1016 owner = fuword(__DEVOLATILE(u_long *, &umtx->u_owner)); 1017 if (owner == -1) 1018 return (EFAULT); 1019 1020 if ((owner & ~UMTX_CONTESTED) != id) 1021 return (EPERM); 1022 1023 /* This should be done in userland */ 1024 if ((owner & UMTX_CONTESTED) == 0) { 1025 old = casuword(&umtx->u_owner, owner, UMTX_UNOWNED); 1026 if (old == -1) 1027 return (EFAULT); 1028 if (old == owner) 1029 return (0); 1030 owner = old; 1031 } 1032 1033 /* We should only ever be in here for contested locks */ 1034 if ((error = umtx_key_get(umtx, TYPE_SIMPLE_LOCK, AUTO_SHARE, 1035 &key)) != 0) 1036 return (error); 1037 1038 umtxq_lock(&key); 1039 umtxq_busy(&key); 1040 count = umtxq_count(&key); 1041 umtxq_unlock(&key); 1042 1043 /* 1044 * When unlocking the umtx, it must be marked as unowned if 1045 * there is zero or one thread only waiting for it. 1046 * Otherwise, it must be marked as contested. 1047 */ 1048 old = casuword(&umtx->u_owner, owner, 1049 count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED); 1050 umtxq_lock(&key); 1051 umtxq_signal(&key,1); 1052 umtxq_unbusy(&key); 1053 umtxq_unlock(&key); 1054 umtx_key_release(&key); 1055 if (old == -1) 1056 return (EFAULT); 1057 if (old != owner) 1058 return (EINVAL); 1059 return (0); 1060 } 1061 1062 #ifdef COMPAT_FREEBSD32 1063 1064 /* 1065 * Lock a umtx object. 1066 */ 1067 static int 1068 do_lock_umtx32(struct thread *td, uint32_t *m, uint32_t id, 1069 const struct timespec *timeout) 1070 { 1071 struct umtx_abs_timeout timo; 1072 struct umtx_q *uq; 1073 uint32_t owner; 1074 uint32_t old; 1075 int error = 0; 1076 1077 uq = td->td_umtxq; 1078 1079 if (timeout != NULL) 1080 umtx_abs_timeout_init(&timo, CLOCK_REALTIME, 0, timeout); 1081 1082 /* 1083 * Care must be exercised when dealing with umtx structure. It 1084 * can fault on any access. 1085 */ 1086 for (;;) { 1087 /* 1088 * Try the uncontested case. This should be done in userland. 1089 */ 1090 owner = casuword32(m, UMUTEX_UNOWNED, id); 1091 1092 /* The acquire succeeded. */ 1093 if (owner == UMUTEX_UNOWNED) 1094 return (0); 1095 1096 /* The address was invalid. */ 1097 if (owner == -1) 1098 return (EFAULT); 1099 1100 /* If no one owns it but it is contested try to acquire it. */ 1101 if (owner == UMUTEX_CONTESTED) { 1102 owner = casuword32(m, 1103 UMUTEX_CONTESTED, id | UMUTEX_CONTESTED); 1104 if (owner == UMUTEX_CONTESTED) 1105 return (0); 1106 1107 /* The address was invalid. */ 1108 if (owner == -1) 1109 return (EFAULT); 1110 1111 error = thread_check_susp(td, false); 1112 if (error != 0) 1113 break; 1114 1115 /* If this failed the lock has changed, restart. */ 1116 continue; 1117 } 1118 1119 /* 1120 * If we caught a signal, we have retried and now 1121 * exit immediately. 1122 */ 1123 if (error != 0) 1124 return (error); 1125 1126 if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK, 1127 AUTO_SHARE, &uq->uq_key)) != 0) 1128 return (error); 1129 1130 umtxq_lock(&uq->uq_key); 1131 umtxq_busy(&uq->uq_key); 1132 umtxq_insert(uq); 1133 umtxq_unbusy(&uq->uq_key); 1134 umtxq_unlock(&uq->uq_key); 1135 1136 /* 1137 * Set the contested bit so that a release in user space 1138 * knows to use the system call for unlock. If this fails 1139 * either some one else has acquired the lock or it has been 1140 * released. 1141 */ 1142 old = casuword32(m, owner, owner | UMUTEX_CONTESTED); 1143 1144 /* The address was invalid. */ 1145 if (old == -1) { 1146 umtxq_lock(&uq->uq_key); 1147 umtxq_remove(uq); 1148 umtxq_unlock(&uq->uq_key); 1149 umtx_key_release(&uq->uq_key); 1150 return (EFAULT); 1151 } 1152 1153 /* 1154 * We set the contested bit, sleep. Otherwise the lock changed 1155 * and we need to retry or we lost a race to the thread 1156 * unlocking the umtx. 1157 */ 1158 umtxq_lock(&uq->uq_key); 1159 if (old == owner) 1160 error = umtxq_sleep(uq, "umtx", timeout == NULL ? 1161 NULL : &timo); 1162 umtxq_remove(uq); 1163 umtxq_unlock(&uq->uq_key); 1164 umtx_key_release(&uq->uq_key); 1165 1166 if (error == 0) 1167 error = thread_check_susp(td, false); 1168 } 1169 1170 if (timeout == NULL) { 1171 /* Mutex locking is restarted if it is interrupted. */ 1172 if (error == EINTR) 1173 error = ERESTART; 1174 } else { 1175 /* Timed-locking is not restarted. */ 1176 if (error == ERESTART) 1177 error = EINTR; 1178 } 1179 return (error); 1180 } 1181 1182 /* 1183 * Unlock a umtx object. 1184 */ 1185 static int 1186 do_unlock_umtx32(struct thread *td, uint32_t *m, uint32_t id) 1187 { 1188 struct umtx_key key; 1189 uint32_t owner; 1190 uint32_t old; 1191 int error; 1192 int count; 1193 1194 /* 1195 * Make sure we own this mtx. 1196 */ 1197 owner = fuword32(m); 1198 if (owner == -1) 1199 return (EFAULT); 1200 1201 if ((owner & ~UMUTEX_CONTESTED) != id) 1202 return (EPERM); 1203 1204 /* This should be done in userland */ 1205 if ((owner & UMUTEX_CONTESTED) == 0) { 1206 old = casuword32(m, owner, UMUTEX_UNOWNED); 1207 if (old == -1) 1208 return (EFAULT); 1209 if (old == owner) 1210 return (0); 1211 owner = old; 1212 } 1213 1214 /* We should only ever be in here for contested locks */ 1215 if ((error = umtx_key_get(m, TYPE_SIMPLE_LOCK, AUTO_SHARE, 1216 &key)) != 0) 1217 return (error); 1218 1219 umtxq_lock(&key); 1220 umtxq_busy(&key); 1221 count = umtxq_count(&key); 1222 umtxq_unlock(&key); 1223 1224 /* 1225 * When unlocking the umtx, it must be marked as unowned if 1226 * there is zero or one thread only waiting for it. 1227 * Otherwise, it must be marked as contested. 1228 */ 1229 old = casuword32(m, owner, 1230 count <= 1 ? UMUTEX_UNOWNED : UMUTEX_CONTESTED); 1231 umtxq_lock(&key); 1232 umtxq_signal(&key,1); 1233 umtxq_unbusy(&key); 1234 umtxq_unlock(&key); 1235 umtx_key_release(&key); 1236 if (old == -1) 1237 return (EFAULT); 1238 if (old != owner) 1239 return (EINVAL); 1240 return (0); 1241 } 1242 #endif /* COMPAT_FREEBSD32 */ 1243 #endif /* COMPAT_FREEBSD10 */ 1244 1245 /* 1246 * Fetch and compare value, sleep on the address if value is not changed. 1247 */ 1248 static int 1249 do_wait(struct thread *td, void *addr, u_long id, 1250 struct _umtx_time *timeout, int compat32, int is_private) 1251 { 1252 struct umtx_abs_timeout timo; 1253 struct umtx_q *uq; 1254 u_long tmp; 1255 uint32_t tmp32; 1256 int error = 0; 1257 1258 uq = td->td_umtxq; 1259 if ((error = umtx_key_get(addr, TYPE_SIMPLE_WAIT, 1260 is_private ? THREAD_SHARE : AUTO_SHARE, &uq->uq_key)) != 0) 1261 return (error); 1262 1263 if (timeout != NULL) 1264 umtx_abs_timeout_init2(&timo, timeout); 1265 1266 umtxq_lock(&uq->uq_key); 1267 umtxq_insert(uq); 1268 umtxq_unlock(&uq->uq_key); 1269 if (compat32 == 0) { 1270 error = fueword(addr, &tmp); 1271 if (error != 0) 1272 error = EFAULT; 1273 } else { 1274 error = fueword32(addr, &tmp32); 1275 if (error == 0) 1276 tmp = tmp32; 1277 else 1278 error = EFAULT; 1279 } 1280 umtxq_lock(&uq->uq_key); 1281 if (error == 0) { 1282 if (tmp == id) 1283 error = umtxq_sleep(uq, "uwait", timeout == NULL ? 1284 NULL : &timo); 1285 if ((uq->uq_flags & UQF_UMTXQ) == 0) 1286 error = 0; 1287 else 1288 umtxq_remove(uq); 1289 } else if ((uq->uq_flags & UQF_UMTXQ) != 0) { 1290 umtxq_remove(uq); 1291 } 1292 umtxq_unlock(&uq->uq_key); 1293 umtx_key_release(&uq->uq_key); 1294 if (error == ERESTART) 1295 error = EINTR; 1296 return (error); 1297 } 1298 1299 /* 1300 * Wake up threads sleeping on the specified address. 1301 */ 1302 int 1303 kern_umtx_wake(struct thread *td, void *uaddr, int n_wake, int is_private) 1304 { 1305 struct umtx_key key; 1306 int ret; 1307 1308 if ((ret = umtx_key_get(uaddr, TYPE_SIMPLE_WAIT, 1309 is_private ? THREAD_SHARE : AUTO_SHARE, &key)) != 0) 1310 return (ret); 1311 umtxq_lock(&key); 1312 umtxq_signal(&key, n_wake); 1313 umtxq_unlock(&key); 1314 umtx_key_release(&key); 1315 return (0); 1316 } 1317 1318 /* 1319 * Lock PTHREAD_PRIO_NONE protocol POSIX mutex. 1320 */ 1321 static int 1322 do_lock_normal(struct thread *td, struct umutex *m, uint32_t flags, 1323 struct _umtx_time *timeout, int mode) 1324 { 1325 struct umtx_abs_timeout timo; 1326 struct umtx_q *uq; 1327 uint32_t owner, old, id; 1328 int error, rv; 1329 1330 id = td->td_tid; 1331 uq = td->td_umtxq; 1332 error = 0; 1333 if (timeout != NULL) 1334 umtx_abs_timeout_init2(&timo, timeout); 1335 1336 /* 1337 * Care must be exercised when dealing with umtx structure. It 1338 * can fault on any access. 1339 */ 1340 for (;;) { 1341 rv = fueword32(&m->m_owner, &owner); 1342 if (rv == -1) 1343 return (EFAULT); 1344 if (mode == _UMUTEX_WAIT) { 1345 if (owner == UMUTEX_UNOWNED || 1346 owner == UMUTEX_CONTESTED || 1347 owner == UMUTEX_RB_OWNERDEAD || 1348 owner == UMUTEX_RB_NOTRECOV) 1349 return (0); 1350 } else { 1351 /* 1352 * Robust mutex terminated. Kernel duty is to 1353 * return EOWNERDEAD to the userspace. The 1354 * umutex.m_flags UMUTEX_NONCONSISTENT is set 1355 * by the common userspace code. 1356 */ 1357 if (owner == UMUTEX_RB_OWNERDEAD) { 1358 rv = casueword32(&m->m_owner, 1359 UMUTEX_RB_OWNERDEAD, &owner, 1360 id | UMUTEX_CONTESTED); 1361 if (rv == -1) 1362 return (EFAULT); 1363 if (rv == 0) { 1364 MPASS(owner == UMUTEX_RB_OWNERDEAD); 1365 return (EOWNERDEAD); /* success */ 1366 } 1367 MPASS(rv == 1); 1368 rv = thread_check_susp(td, false); 1369 if (rv != 0) 1370 return (rv); 1371 continue; 1372 } 1373 if (owner == UMUTEX_RB_NOTRECOV) 1374 return (ENOTRECOVERABLE); 1375 1376 /* 1377 * Try the uncontested case. This should be 1378 * done in userland. 1379 */ 1380 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED, 1381 &owner, id); 1382 /* The address was invalid. */ 1383 if (rv == -1) 1384 return (EFAULT); 1385 1386 /* The acquire succeeded. */ 1387 if (rv == 0) { 1388 MPASS(owner == UMUTEX_UNOWNED); 1389 return (0); 1390 } 1391 1392 /* 1393 * If no one owns it but it is contested try 1394 * to acquire it. 1395 */ 1396 MPASS(rv == 1); 1397 if (owner == UMUTEX_CONTESTED) { 1398 rv = casueword32(&m->m_owner, 1399 UMUTEX_CONTESTED, &owner, 1400 id | UMUTEX_CONTESTED); 1401 /* The address was invalid. */ 1402 if (rv == -1) 1403 return (EFAULT); 1404 if (rv == 0) { 1405 MPASS(owner == UMUTEX_CONTESTED); 1406 return (0); 1407 } 1408 if (rv == 1) { 1409 rv = thread_check_susp(td, false); 1410 if (rv != 0) 1411 return (rv); 1412 } 1413 1414 /* 1415 * If this failed the lock has 1416 * changed, restart. 1417 */ 1418 continue; 1419 } 1420 1421 /* rv == 1 but not contested, likely store failure */ 1422 rv = thread_check_susp(td, false); 1423 if (rv != 0) 1424 return (rv); 1425 } 1426 1427 if (mode == _UMUTEX_TRY) 1428 return (EBUSY); 1429 1430 /* 1431 * If we caught a signal, we have retried and now 1432 * exit immediately. 1433 */ 1434 if (error != 0) 1435 return (error); 1436 1437 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, 1438 GET_SHARE(flags), &uq->uq_key)) != 0) 1439 return (error); 1440 1441 umtxq_lock(&uq->uq_key); 1442 umtxq_busy(&uq->uq_key); 1443 umtxq_insert(uq); 1444 umtxq_unlock(&uq->uq_key); 1445 1446 /* 1447 * Set the contested bit so that a release in user space 1448 * knows to use the system call for unlock. If this fails 1449 * either some one else has acquired the lock or it has been 1450 * released. 1451 */ 1452 rv = casueword32(&m->m_owner, owner, &old, 1453 owner | UMUTEX_CONTESTED); 1454 1455 /* The address was invalid or casueword failed to store. */ 1456 if (rv == -1 || rv == 1) { 1457 umtxq_lock(&uq->uq_key); 1458 umtxq_remove(uq); 1459 umtxq_unbusy(&uq->uq_key); 1460 umtxq_unlock(&uq->uq_key); 1461 umtx_key_release(&uq->uq_key); 1462 if (rv == -1) 1463 return (EFAULT); 1464 if (rv == 1) { 1465 rv = thread_check_susp(td, false); 1466 if (rv != 0) 1467 return (rv); 1468 } 1469 continue; 1470 } 1471 1472 /* 1473 * We set the contested bit, sleep. Otherwise the lock changed 1474 * and we need to retry or we lost a race to the thread 1475 * unlocking the umtx. 1476 */ 1477 umtxq_lock(&uq->uq_key); 1478 umtxq_unbusy(&uq->uq_key); 1479 MPASS(old == owner); 1480 error = umtxq_sleep(uq, "umtxn", timeout == NULL ? 1481 NULL : &timo); 1482 umtxq_remove(uq); 1483 umtxq_unlock(&uq->uq_key); 1484 umtx_key_release(&uq->uq_key); 1485 1486 if (error == 0) 1487 error = thread_check_susp(td, false); 1488 } 1489 1490 return (0); 1491 } 1492 1493 /* 1494 * Unlock PTHREAD_PRIO_NONE protocol POSIX mutex. 1495 */ 1496 static int 1497 do_unlock_normal(struct thread *td, struct umutex *m, uint32_t flags, bool rb) 1498 { 1499 struct umtx_key key; 1500 uint32_t owner, old, id, newlock; 1501 int error, count; 1502 1503 id = td->td_tid; 1504 1505 again: 1506 /* 1507 * Make sure we own this mtx. 1508 */ 1509 error = fueword32(&m->m_owner, &owner); 1510 if (error == -1) 1511 return (EFAULT); 1512 1513 if ((owner & ~UMUTEX_CONTESTED) != id) 1514 return (EPERM); 1515 1516 newlock = umtx_unlock_val(flags, rb); 1517 if ((owner & UMUTEX_CONTESTED) == 0) { 1518 error = casueword32(&m->m_owner, owner, &old, newlock); 1519 if (error == -1) 1520 return (EFAULT); 1521 if (error == 1) { 1522 error = thread_check_susp(td, false); 1523 if (error != 0) 1524 return (error); 1525 goto again; 1526 } 1527 MPASS(old == owner); 1528 return (0); 1529 } 1530 1531 /* We should only ever be in here for contested locks */ 1532 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags), 1533 &key)) != 0) 1534 return (error); 1535 1536 umtxq_lock(&key); 1537 umtxq_busy(&key); 1538 count = umtxq_count(&key); 1539 umtxq_unlock(&key); 1540 1541 /* 1542 * When unlocking the umtx, it must be marked as unowned if 1543 * there is zero or one thread only waiting for it. 1544 * Otherwise, it must be marked as contested. 1545 */ 1546 if (count > 1) 1547 newlock |= UMUTEX_CONTESTED; 1548 error = casueword32(&m->m_owner, owner, &old, newlock); 1549 umtxq_lock(&key); 1550 umtxq_signal(&key, 1); 1551 umtxq_unbusy(&key); 1552 umtxq_unlock(&key); 1553 umtx_key_release(&key); 1554 if (error == -1) 1555 return (EFAULT); 1556 if (error == 1) { 1557 if (old != owner) 1558 return (EINVAL); 1559 error = thread_check_susp(td, false); 1560 if (error != 0) 1561 return (error); 1562 goto again; 1563 } 1564 return (0); 1565 } 1566 1567 /* 1568 * Check if the mutex is available and wake up a waiter, 1569 * only for simple mutex. 1570 */ 1571 static int 1572 do_wake_umutex(struct thread *td, struct umutex *m) 1573 { 1574 struct umtx_key key; 1575 uint32_t owner; 1576 uint32_t flags; 1577 int error; 1578 int count; 1579 1580 again: 1581 error = fueword32(&m->m_owner, &owner); 1582 if (error == -1) 1583 return (EFAULT); 1584 1585 if ((owner & ~UMUTEX_CONTESTED) != 0 && owner != UMUTEX_RB_OWNERDEAD && 1586 owner != UMUTEX_RB_NOTRECOV) 1587 return (0); 1588 1589 error = fueword32(&m->m_flags, &flags); 1590 if (error == -1) 1591 return (EFAULT); 1592 1593 /* We should only ever be in here for contested locks */ 1594 if ((error = umtx_key_get(m, TYPE_NORMAL_UMUTEX, GET_SHARE(flags), 1595 &key)) != 0) 1596 return (error); 1597 1598 umtxq_lock(&key); 1599 umtxq_busy(&key); 1600 count = umtxq_count(&key); 1601 umtxq_unlock(&key); 1602 1603 if (count <= 1 && owner != UMUTEX_RB_OWNERDEAD && 1604 owner != UMUTEX_RB_NOTRECOV) { 1605 error = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner, 1606 UMUTEX_UNOWNED); 1607 if (error == -1) { 1608 error = EFAULT; 1609 } else if (error == 1) { 1610 umtxq_lock(&key); 1611 umtxq_unbusy(&key); 1612 umtxq_unlock(&key); 1613 umtx_key_release(&key); 1614 error = thread_check_susp(td, false); 1615 if (error != 0) 1616 return (error); 1617 goto again; 1618 } 1619 } 1620 1621 umtxq_lock(&key); 1622 if (error == 0 && count != 0) { 1623 MPASS((owner & ~UMUTEX_CONTESTED) == 0 || 1624 owner == UMUTEX_RB_OWNERDEAD || 1625 owner == UMUTEX_RB_NOTRECOV); 1626 umtxq_signal(&key, 1); 1627 } 1628 umtxq_unbusy(&key); 1629 umtxq_unlock(&key); 1630 umtx_key_release(&key); 1631 return (error); 1632 } 1633 1634 /* 1635 * Check if the mutex has waiters and tries to fix contention bit. 1636 */ 1637 static int 1638 do_wake2_umutex(struct thread *td, struct umutex *m, uint32_t flags) 1639 { 1640 struct umtx_key key; 1641 uint32_t owner, old; 1642 int type; 1643 int error; 1644 int count; 1645 1646 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT | 1647 UMUTEX_ROBUST)) { 1648 case 0: 1649 case UMUTEX_ROBUST: 1650 type = TYPE_NORMAL_UMUTEX; 1651 break; 1652 case UMUTEX_PRIO_INHERIT: 1653 type = TYPE_PI_UMUTEX; 1654 break; 1655 case (UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST): 1656 type = TYPE_PI_ROBUST_UMUTEX; 1657 break; 1658 case UMUTEX_PRIO_PROTECT: 1659 type = TYPE_PP_UMUTEX; 1660 break; 1661 case (UMUTEX_PRIO_PROTECT | UMUTEX_ROBUST): 1662 type = TYPE_PP_ROBUST_UMUTEX; 1663 break; 1664 default: 1665 return (EINVAL); 1666 } 1667 if ((error = umtx_key_get(m, type, GET_SHARE(flags), &key)) != 0) 1668 return (error); 1669 1670 owner = 0; 1671 umtxq_lock(&key); 1672 umtxq_busy(&key); 1673 count = umtxq_count(&key); 1674 umtxq_unlock(&key); 1675 1676 error = fueword32(&m->m_owner, &owner); 1677 if (error == -1) 1678 error = EFAULT; 1679 1680 /* 1681 * Only repair contention bit if there is a waiter, this means 1682 * the mutex is still being referenced by userland code, 1683 * otherwise don't update any memory. 1684 */ 1685 while (error == 0 && (owner & UMUTEX_CONTESTED) == 0 && 1686 (count > 1 || (count == 1 && (owner & ~UMUTEX_CONTESTED) != 0))) { 1687 error = casueword32(&m->m_owner, owner, &old, 1688 owner | UMUTEX_CONTESTED); 1689 if (error == -1) { 1690 error = EFAULT; 1691 break; 1692 } 1693 if (error == 0) { 1694 MPASS(old == owner); 1695 break; 1696 } 1697 owner = old; 1698 error = thread_check_susp(td, false); 1699 } 1700 1701 umtxq_lock(&key); 1702 if (error == EFAULT) { 1703 umtxq_signal(&key, INT_MAX); 1704 } else if (count != 0 && ((owner & ~UMUTEX_CONTESTED) == 0 || 1705 owner == UMUTEX_RB_OWNERDEAD || owner == UMUTEX_RB_NOTRECOV)) 1706 umtxq_signal(&key, 1); 1707 umtxq_unbusy(&key); 1708 umtxq_unlock(&key); 1709 umtx_key_release(&key); 1710 return (error); 1711 } 1712 1713 struct umtx_pi * 1714 umtx_pi_alloc(int flags) 1715 { 1716 struct umtx_pi *pi; 1717 1718 pi = uma_zalloc(umtx_pi_zone, M_ZERO | flags); 1719 TAILQ_INIT(&pi->pi_blocked); 1720 atomic_add_int(&umtx_pi_allocated, 1); 1721 return (pi); 1722 } 1723 1724 void 1725 umtx_pi_free(struct umtx_pi *pi) 1726 { 1727 uma_zfree(umtx_pi_zone, pi); 1728 atomic_add_int(&umtx_pi_allocated, -1); 1729 } 1730 1731 /* 1732 * Adjust the thread's position on a pi_state after its priority has been 1733 * changed. 1734 */ 1735 static int 1736 umtx_pi_adjust_thread(struct umtx_pi *pi, struct thread *td) 1737 { 1738 struct umtx_q *uq, *uq1, *uq2; 1739 struct thread *td1; 1740 1741 mtx_assert(&umtx_lock, MA_OWNED); 1742 if (pi == NULL) 1743 return (0); 1744 1745 uq = td->td_umtxq; 1746 1747 /* 1748 * Check if the thread needs to be moved on the blocked chain. 1749 * It needs to be moved if either its priority is lower than 1750 * the previous thread or higher than the next thread. 1751 */ 1752 uq1 = TAILQ_PREV(uq, umtxq_head, uq_lockq); 1753 uq2 = TAILQ_NEXT(uq, uq_lockq); 1754 if ((uq1 != NULL && UPRI(td) < UPRI(uq1->uq_thread)) || 1755 (uq2 != NULL && UPRI(td) > UPRI(uq2->uq_thread))) { 1756 /* 1757 * Remove thread from blocked chain and determine where 1758 * it should be moved to. 1759 */ 1760 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq); 1761 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) { 1762 td1 = uq1->uq_thread; 1763 MPASS(td1->td_proc->p_magic == P_MAGIC); 1764 if (UPRI(td1) > UPRI(td)) 1765 break; 1766 } 1767 1768 if (uq1 == NULL) 1769 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq); 1770 else 1771 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq); 1772 } 1773 return (1); 1774 } 1775 1776 static struct umtx_pi * 1777 umtx_pi_next(struct umtx_pi *pi) 1778 { 1779 struct umtx_q *uq_owner; 1780 1781 if (pi->pi_owner == NULL) 1782 return (NULL); 1783 uq_owner = pi->pi_owner->td_umtxq; 1784 if (uq_owner == NULL) 1785 return (NULL); 1786 return (uq_owner->uq_pi_blocked); 1787 } 1788 1789 /* 1790 * Floyd's Cycle-Finding Algorithm. 1791 */ 1792 static bool 1793 umtx_pi_check_loop(struct umtx_pi *pi) 1794 { 1795 struct umtx_pi *pi1; /* fast iterator */ 1796 1797 mtx_assert(&umtx_lock, MA_OWNED); 1798 if (pi == NULL) 1799 return (false); 1800 pi1 = pi; 1801 for (;;) { 1802 pi = umtx_pi_next(pi); 1803 if (pi == NULL) 1804 break; 1805 pi1 = umtx_pi_next(pi1); 1806 if (pi1 == NULL) 1807 break; 1808 pi1 = umtx_pi_next(pi1); 1809 if (pi1 == NULL) 1810 break; 1811 if (pi == pi1) 1812 return (true); 1813 } 1814 return (false); 1815 } 1816 1817 /* 1818 * Propagate priority when a thread is blocked on POSIX 1819 * PI mutex. 1820 */ 1821 static void 1822 umtx_propagate_priority(struct thread *td) 1823 { 1824 struct umtx_q *uq; 1825 struct umtx_pi *pi; 1826 int pri; 1827 1828 mtx_assert(&umtx_lock, MA_OWNED); 1829 pri = UPRI(td); 1830 uq = td->td_umtxq; 1831 pi = uq->uq_pi_blocked; 1832 if (pi == NULL) 1833 return; 1834 if (umtx_pi_check_loop(pi)) 1835 return; 1836 1837 for (;;) { 1838 td = pi->pi_owner; 1839 if (td == NULL || td == curthread) 1840 return; 1841 1842 MPASS(td->td_proc != NULL); 1843 MPASS(td->td_proc->p_magic == P_MAGIC); 1844 1845 thread_lock(td); 1846 if (td->td_lend_user_pri > pri) 1847 sched_lend_user_prio(td, pri); 1848 else { 1849 thread_unlock(td); 1850 break; 1851 } 1852 thread_unlock(td); 1853 1854 /* 1855 * Pick up the lock that td is blocked on. 1856 */ 1857 uq = td->td_umtxq; 1858 pi = uq->uq_pi_blocked; 1859 if (pi == NULL) 1860 break; 1861 /* Resort td on the list if needed. */ 1862 umtx_pi_adjust_thread(pi, td); 1863 } 1864 } 1865 1866 /* 1867 * Unpropagate priority for a PI mutex when a thread blocked on 1868 * it is interrupted by signal or resumed by others. 1869 */ 1870 static void 1871 umtx_repropagate_priority(struct umtx_pi *pi) 1872 { 1873 struct umtx_q *uq, *uq_owner; 1874 struct umtx_pi *pi2; 1875 int pri; 1876 1877 mtx_assert(&umtx_lock, MA_OWNED); 1878 1879 if (umtx_pi_check_loop(pi)) 1880 return; 1881 while (pi != NULL && pi->pi_owner != NULL) { 1882 pri = PRI_MAX; 1883 uq_owner = pi->pi_owner->td_umtxq; 1884 1885 TAILQ_FOREACH(pi2, &uq_owner->uq_pi_contested, pi_link) { 1886 uq = TAILQ_FIRST(&pi2->pi_blocked); 1887 if (uq != NULL) { 1888 if (pri > UPRI(uq->uq_thread)) 1889 pri = UPRI(uq->uq_thread); 1890 } 1891 } 1892 1893 if (pri > uq_owner->uq_inherited_pri) 1894 pri = uq_owner->uq_inherited_pri; 1895 thread_lock(pi->pi_owner); 1896 sched_lend_user_prio(pi->pi_owner, pri); 1897 thread_unlock(pi->pi_owner); 1898 if ((pi = uq_owner->uq_pi_blocked) != NULL) 1899 umtx_pi_adjust_thread(pi, uq_owner->uq_thread); 1900 } 1901 } 1902 1903 /* 1904 * Insert a PI mutex into owned list. 1905 */ 1906 static void 1907 umtx_pi_setowner(struct umtx_pi *pi, struct thread *owner) 1908 { 1909 struct umtx_q *uq_owner; 1910 1911 uq_owner = owner->td_umtxq; 1912 mtx_assert(&umtx_lock, MA_OWNED); 1913 MPASS(pi->pi_owner == NULL); 1914 pi->pi_owner = owner; 1915 TAILQ_INSERT_TAIL(&uq_owner->uq_pi_contested, pi, pi_link); 1916 } 1917 1918 /* 1919 * Disown a PI mutex, and remove it from the owned list. 1920 */ 1921 static void 1922 umtx_pi_disown(struct umtx_pi *pi) 1923 { 1924 1925 mtx_assert(&umtx_lock, MA_OWNED); 1926 TAILQ_REMOVE(&pi->pi_owner->td_umtxq->uq_pi_contested, pi, pi_link); 1927 pi->pi_owner = NULL; 1928 } 1929 1930 /* 1931 * Claim ownership of a PI mutex. 1932 */ 1933 int 1934 umtx_pi_claim(struct umtx_pi *pi, struct thread *owner) 1935 { 1936 struct umtx_q *uq; 1937 int pri; 1938 1939 mtx_lock(&umtx_lock); 1940 if (pi->pi_owner == owner) { 1941 mtx_unlock(&umtx_lock); 1942 return (0); 1943 } 1944 1945 if (pi->pi_owner != NULL) { 1946 /* 1947 * userland may have already messed the mutex, sigh. 1948 */ 1949 mtx_unlock(&umtx_lock); 1950 return (EPERM); 1951 } 1952 umtx_pi_setowner(pi, owner); 1953 uq = TAILQ_FIRST(&pi->pi_blocked); 1954 if (uq != NULL) { 1955 pri = UPRI(uq->uq_thread); 1956 thread_lock(owner); 1957 if (pri < UPRI(owner)) 1958 sched_lend_user_prio(owner, pri); 1959 thread_unlock(owner); 1960 } 1961 mtx_unlock(&umtx_lock); 1962 return (0); 1963 } 1964 1965 /* 1966 * Adjust a thread's order position in its blocked PI mutex, 1967 * this may result new priority propagating process. 1968 */ 1969 void 1970 umtx_pi_adjust(struct thread *td, u_char oldpri) 1971 { 1972 struct umtx_q *uq; 1973 struct umtx_pi *pi; 1974 1975 uq = td->td_umtxq; 1976 mtx_lock(&umtx_lock); 1977 /* 1978 * Pick up the lock that td is blocked on. 1979 */ 1980 pi = uq->uq_pi_blocked; 1981 if (pi != NULL) { 1982 umtx_pi_adjust_thread(pi, td); 1983 umtx_repropagate_priority(pi); 1984 } 1985 mtx_unlock(&umtx_lock); 1986 } 1987 1988 /* 1989 * Sleep on a PI mutex. 1990 */ 1991 int 1992 umtxq_sleep_pi(struct umtx_q *uq, struct umtx_pi *pi, uint32_t owner, 1993 const char *wmesg, struct umtx_abs_timeout *timo, bool shared) 1994 { 1995 struct thread *td, *td1; 1996 struct umtx_q *uq1; 1997 int error, pri; 1998 #ifdef INVARIANTS 1999 struct umtxq_chain *uc; 2000 2001 uc = umtxq_getchain(&pi->pi_key); 2002 #endif 2003 error = 0; 2004 td = uq->uq_thread; 2005 KASSERT(td == curthread, ("inconsistent uq_thread")); 2006 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&uq->uq_key)); 2007 KASSERT(uc->uc_busy != 0, ("umtx chain is not busy")); 2008 umtxq_insert(uq); 2009 mtx_lock(&umtx_lock); 2010 if (pi->pi_owner == NULL) { 2011 mtx_unlock(&umtx_lock); 2012 td1 = tdfind(owner, shared ? -1 : td->td_proc->p_pid); 2013 mtx_lock(&umtx_lock); 2014 if (td1 != NULL) { 2015 if (pi->pi_owner == NULL) 2016 umtx_pi_setowner(pi, td1); 2017 PROC_UNLOCK(td1->td_proc); 2018 } 2019 } 2020 2021 TAILQ_FOREACH(uq1, &pi->pi_blocked, uq_lockq) { 2022 pri = UPRI(uq1->uq_thread); 2023 if (pri > UPRI(td)) 2024 break; 2025 } 2026 2027 if (uq1 != NULL) 2028 TAILQ_INSERT_BEFORE(uq1, uq, uq_lockq); 2029 else 2030 TAILQ_INSERT_TAIL(&pi->pi_blocked, uq, uq_lockq); 2031 2032 uq->uq_pi_blocked = pi; 2033 thread_lock(td); 2034 td->td_flags |= TDF_UPIBLOCKED; 2035 thread_unlock(td); 2036 umtx_propagate_priority(td); 2037 mtx_unlock(&umtx_lock); 2038 umtxq_unbusy(&uq->uq_key); 2039 2040 error = umtxq_sleep(uq, wmesg, timo); 2041 umtxq_remove(uq); 2042 2043 mtx_lock(&umtx_lock); 2044 uq->uq_pi_blocked = NULL; 2045 thread_lock(td); 2046 td->td_flags &= ~TDF_UPIBLOCKED; 2047 thread_unlock(td); 2048 TAILQ_REMOVE(&pi->pi_blocked, uq, uq_lockq); 2049 umtx_repropagate_priority(pi); 2050 mtx_unlock(&umtx_lock); 2051 umtxq_unlock(&uq->uq_key); 2052 2053 return (error); 2054 } 2055 2056 /* 2057 * Add reference count for a PI mutex. 2058 */ 2059 void 2060 umtx_pi_ref(struct umtx_pi *pi) 2061 { 2062 2063 UMTXQ_LOCKED_ASSERT(umtxq_getchain(&pi->pi_key)); 2064 pi->pi_refcount++; 2065 } 2066 2067 /* 2068 * Decrease reference count for a PI mutex, if the counter 2069 * is decreased to zero, its memory space is freed. 2070 */ 2071 void 2072 umtx_pi_unref(struct umtx_pi *pi) 2073 { 2074 struct umtxq_chain *uc; 2075 2076 uc = umtxq_getchain(&pi->pi_key); 2077 UMTXQ_LOCKED_ASSERT(uc); 2078 KASSERT(pi->pi_refcount > 0, ("invalid reference count")); 2079 if (--pi->pi_refcount == 0) { 2080 mtx_lock(&umtx_lock); 2081 if (pi->pi_owner != NULL) 2082 umtx_pi_disown(pi); 2083 KASSERT(TAILQ_EMPTY(&pi->pi_blocked), 2084 ("blocked queue not empty")); 2085 mtx_unlock(&umtx_lock); 2086 TAILQ_REMOVE(&uc->uc_pi_list, pi, pi_hashlink); 2087 umtx_pi_free(pi); 2088 } 2089 } 2090 2091 /* 2092 * Find a PI mutex in hash table. 2093 */ 2094 struct umtx_pi * 2095 umtx_pi_lookup(struct umtx_key *key) 2096 { 2097 struct umtxq_chain *uc; 2098 struct umtx_pi *pi; 2099 2100 uc = umtxq_getchain(key); 2101 UMTXQ_LOCKED_ASSERT(uc); 2102 2103 TAILQ_FOREACH(pi, &uc->uc_pi_list, pi_hashlink) { 2104 if (umtx_key_match(&pi->pi_key, key)) { 2105 return (pi); 2106 } 2107 } 2108 return (NULL); 2109 } 2110 2111 /* 2112 * Insert a PI mutex into hash table. 2113 */ 2114 void 2115 umtx_pi_insert(struct umtx_pi *pi) 2116 { 2117 struct umtxq_chain *uc; 2118 2119 uc = umtxq_getchain(&pi->pi_key); 2120 UMTXQ_LOCKED_ASSERT(uc); 2121 TAILQ_INSERT_TAIL(&uc->uc_pi_list, pi, pi_hashlink); 2122 } 2123 2124 /* 2125 * Drop a PI mutex and wakeup a top waiter. 2126 */ 2127 int 2128 umtx_pi_drop(struct thread *td, struct umtx_key *key, bool rb, int *count) 2129 { 2130 struct umtx_q *uq_first, *uq_first2, *uq_me; 2131 struct umtx_pi *pi, *pi2; 2132 int pri; 2133 2134 UMTXQ_ASSERT_LOCKED_BUSY(key); 2135 *count = umtxq_count_pi(key, &uq_first); 2136 if (uq_first != NULL) { 2137 mtx_lock(&umtx_lock); 2138 pi = uq_first->uq_pi_blocked; 2139 KASSERT(pi != NULL, ("pi == NULL?")); 2140 if (pi->pi_owner != td && !(rb && pi->pi_owner == NULL)) { 2141 mtx_unlock(&umtx_lock); 2142 /* userland messed the mutex */ 2143 return (EPERM); 2144 } 2145 uq_me = td->td_umtxq; 2146 if (pi->pi_owner == td) 2147 umtx_pi_disown(pi); 2148 /* get highest priority thread which is still sleeping. */ 2149 uq_first = TAILQ_FIRST(&pi->pi_blocked); 2150 while (uq_first != NULL && 2151 (uq_first->uq_flags & UQF_UMTXQ) == 0) { 2152 uq_first = TAILQ_NEXT(uq_first, uq_lockq); 2153 } 2154 pri = PRI_MAX; 2155 TAILQ_FOREACH(pi2, &uq_me->uq_pi_contested, pi_link) { 2156 uq_first2 = TAILQ_FIRST(&pi2->pi_blocked); 2157 if (uq_first2 != NULL) { 2158 if (pri > UPRI(uq_first2->uq_thread)) 2159 pri = UPRI(uq_first2->uq_thread); 2160 } 2161 } 2162 thread_lock(td); 2163 sched_lend_user_prio(td, pri); 2164 thread_unlock(td); 2165 mtx_unlock(&umtx_lock); 2166 if (uq_first) 2167 umtxq_signal_thread(uq_first); 2168 } else { 2169 pi = umtx_pi_lookup(key); 2170 /* 2171 * A umtx_pi can exist if a signal or timeout removed the 2172 * last waiter from the umtxq, but there is still 2173 * a thread in do_lock_pi() holding the umtx_pi. 2174 */ 2175 if (pi != NULL) { 2176 /* 2177 * The umtx_pi can be unowned, such as when a thread 2178 * has just entered do_lock_pi(), allocated the 2179 * umtx_pi, and unlocked the umtxq. 2180 * If the current thread owns it, it must disown it. 2181 */ 2182 mtx_lock(&umtx_lock); 2183 if (pi->pi_owner == td) 2184 umtx_pi_disown(pi); 2185 mtx_unlock(&umtx_lock); 2186 } 2187 } 2188 return (0); 2189 } 2190 2191 /* 2192 * Lock a PI mutex. 2193 */ 2194 static int 2195 do_lock_pi(struct thread *td, struct umutex *m, uint32_t flags, 2196 struct _umtx_time *timeout, int try) 2197 { 2198 struct umtx_abs_timeout timo; 2199 struct umtx_q *uq; 2200 struct umtx_pi *pi, *new_pi; 2201 uint32_t id, old_owner, owner, old; 2202 int error, rv; 2203 2204 id = td->td_tid; 2205 uq = td->td_umtxq; 2206 2207 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ? 2208 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags), 2209 &uq->uq_key)) != 0) 2210 return (error); 2211 2212 if (timeout != NULL) 2213 umtx_abs_timeout_init2(&timo, timeout); 2214 2215 umtxq_lock(&uq->uq_key); 2216 pi = umtx_pi_lookup(&uq->uq_key); 2217 if (pi == NULL) { 2218 new_pi = umtx_pi_alloc(M_NOWAIT); 2219 if (new_pi == NULL) { 2220 umtxq_unlock(&uq->uq_key); 2221 new_pi = umtx_pi_alloc(M_WAITOK); 2222 umtxq_lock(&uq->uq_key); 2223 pi = umtx_pi_lookup(&uq->uq_key); 2224 if (pi != NULL) { 2225 umtx_pi_free(new_pi); 2226 new_pi = NULL; 2227 } 2228 } 2229 if (new_pi != NULL) { 2230 new_pi->pi_key = uq->uq_key; 2231 umtx_pi_insert(new_pi); 2232 pi = new_pi; 2233 } 2234 } 2235 umtx_pi_ref(pi); 2236 umtxq_unlock(&uq->uq_key); 2237 2238 /* 2239 * Care must be exercised when dealing with umtx structure. It 2240 * can fault on any access. 2241 */ 2242 for (;;) { 2243 /* 2244 * Try the uncontested case. This should be done in userland. 2245 */ 2246 rv = casueword32(&m->m_owner, UMUTEX_UNOWNED, &owner, id); 2247 /* The address was invalid. */ 2248 if (rv == -1) { 2249 error = EFAULT; 2250 break; 2251 } 2252 /* The acquire succeeded. */ 2253 if (rv == 0) { 2254 MPASS(owner == UMUTEX_UNOWNED); 2255 error = 0; 2256 break; 2257 } 2258 2259 if (owner == UMUTEX_RB_NOTRECOV) { 2260 error = ENOTRECOVERABLE; 2261 break; 2262 } 2263 2264 /* 2265 * Nobody owns it, but the acquire failed. This can happen 2266 * with ll/sc atomics. 2267 */ 2268 if (owner == UMUTEX_UNOWNED) { 2269 error = thread_check_susp(td, true); 2270 if (error != 0) 2271 break; 2272 continue; 2273 } 2274 2275 /* 2276 * Avoid overwriting a possible error from sleep due 2277 * to the pending signal with suspension check result. 2278 */ 2279 if (error == 0) { 2280 error = thread_check_susp(td, true); 2281 if (error != 0) 2282 break; 2283 } 2284 2285 /* If no one owns it but it is contested try to acquire it. */ 2286 if (owner == UMUTEX_CONTESTED || owner == UMUTEX_RB_OWNERDEAD) { 2287 old_owner = owner; 2288 rv = casueword32(&m->m_owner, owner, &owner, 2289 id | UMUTEX_CONTESTED); 2290 /* The address was invalid. */ 2291 if (rv == -1) { 2292 error = EFAULT; 2293 break; 2294 } 2295 if (rv == 1) { 2296 if (error == 0) { 2297 error = thread_check_susp(td, true); 2298 if (error != 0) 2299 break; 2300 } 2301 2302 /* 2303 * If this failed the lock could 2304 * changed, restart. 2305 */ 2306 continue; 2307 } 2308 2309 MPASS(rv == 0); 2310 MPASS(owner == old_owner); 2311 umtxq_lock(&uq->uq_key); 2312 umtxq_busy(&uq->uq_key); 2313 error = umtx_pi_claim(pi, td); 2314 umtxq_unbusy(&uq->uq_key); 2315 umtxq_unlock(&uq->uq_key); 2316 if (error != 0) { 2317 /* 2318 * Since we're going to return an 2319 * error, restore the m_owner to its 2320 * previous, unowned state to avoid 2321 * compounding the problem. 2322 */ 2323 (void)casuword32(&m->m_owner, 2324 id | UMUTEX_CONTESTED, old_owner); 2325 } 2326 if (error == 0 && old_owner == UMUTEX_RB_OWNERDEAD) 2327 error = EOWNERDEAD; 2328 break; 2329 } 2330 2331 if ((owner & ~UMUTEX_CONTESTED) == id) { 2332 error = EDEADLK; 2333 break; 2334 } 2335 2336 if (try != 0) { 2337 error = EBUSY; 2338 break; 2339 } 2340 2341 /* 2342 * If we caught a signal, we have retried and now 2343 * exit immediately. 2344 */ 2345 if (error != 0) 2346 break; 2347 2348 umtxq_lock(&uq->uq_key); 2349 umtxq_busy(&uq->uq_key); 2350 umtxq_unlock(&uq->uq_key); 2351 2352 /* 2353 * Set the contested bit so that a release in user space 2354 * knows to use the system call for unlock. If this fails 2355 * either some one else has acquired the lock or it has been 2356 * released. 2357 */ 2358 rv = casueword32(&m->m_owner, owner, &old, owner | 2359 UMUTEX_CONTESTED); 2360 2361 /* The address was invalid. */ 2362 if (rv == -1) { 2363 umtxq_unbusy_unlocked(&uq->uq_key); 2364 error = EFAULT; 2365 break; 2366 } 2367 if (rv == 1) { 2368 umtxq_unbusy_unlocked(&uq->uq_key); 2369 error = thread_check_susp(td, true); 2370 if (error != 0) 2371 break; 2372 2373 /* 2374 * The lock changed and we need to retry or we 2375 * lost a race to the thread unlocking the 2376 * umtx. Note that the UMUTEX_RB_OWNERDEAD 2377 * value for owner is impossible there. 2378 */ 2379 continue; 2380 } 2381 2382 umtxq_lock(&uq->uq_key); 2383 2384 /* We set the contested bit, sleep. */ 2385 MPASS(old == owner); 2386 error = umtxq_sleep_pi(uq, pi, owner & ~UMUTEX_CONTESTED, 2387 "umtxpi", timeout == NULL ? NULL : &timo, 2388 (flags & USYNC_PROCESS_SHARED) != 0); 2389 if (error != 0) 2390 continue; 2391 2392 error = thread_check_susp(td, false); 2393 if (error != 0) 2394 break; 2395 } 2396 2397 umtxq_lock(&uq->uq_key); 2398 umtx_pi_unref(pi); 2399 umtxq_unlock(&uq->uq_key); 2400 2401 umtx_key_release(&uq->uq_key); 2402 return (error); 2403 } 2404 2405 /* 2406 * Unlock a PI mutex. 2407 */ 2408 static int 2409 do_unlock_pi(struct thread *td, struct umutex *m, uint32_t flags, bool rb) 2410 { 2411 struct umtx_key key; 2412 uint32_t id, new_owner, old, owner; 2413 int count, error; 2414 2415 id = td->td_tid; 2416 2417 usrloop: 2418 /* 2419 * Make sure we own this mtx. 2420 */ 2421 error = fueword32(&m->m_owner, &owner); 2422 if (error == -1) 2423 return (EFAULT); 2424 2425 if ((owner & ~UMUTEX_CONTESTED) != id) 2426 return (EPERM); 2427 2428 new_owner = umtx_unlock_val(flags, rb); 2429 2430 /* This should be done in userland */ 2431 if ((owner & UMUTEX_CONTESTED) == 0) { 2432 error = casueword32(&m->m_owner, owner, &old, new_owner); 2433 if (error == -1) 2434 return (EFAULT); 2435 if (error == 1) { 2436 error = thread_check_susp(td, true); 2437 if (error != 0) 2438 return (error); 2439 goto usrloop; 2440 } 2441 if (old == owner) 2442 return (0); 2443 owner = old; 2444 } 2445 2446 /* We should only ever be in here for contested locks */ 2447 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ? 2448 TYPE_PI_ROBUST_UMUTEX : TYPE_PI_UMUTEX, GET_SHARE(flags), 2449 &key)) != 0) 2450 return (error); 2451 2452 umtxq_lock(&key); 2453 umtxq_busy(&key); 2454 error = umtx_pi_drop(td, &key, rb, &count); 2455 if (error != 0) { 2456 umtxq_unbusy(&key); 2457 umtxq_unlock(&key); 2458 umtx_key_release(&key); 2459 /* userland messed the mutex */ 2460 return (error); 2461 } 2462 umtxq_unlock(&key); 2463 2464 /* 2465 * When unlocking the umtx, it must be marked as unowned if 2466 * there is zero or one thread only waiting for it. 2467 * Otherwise, it must be marked as contested. 2468 */ 2469 2470 if (count > 1) 2471 new_owner |= UMUTEX_CONTESTED; 2472 again: 2473 error = casueword32(&m->m_owner, owner, &old, new_owner); 2474 if (error == 1) { 2475 error = thread_check_susp(td, false); 2476 if (error == 0) 2477 goto again; 2478 } 2479 umtxq_unbusy_unlocked(&key); 2480 umtx_key_release(&key); 2481 if (error == -1) 2482 return (EFAULT); 2483 if (error == 0 && old != owner) 2484 return (EINVAL); 2485 return (error); 2486 } 2487 2488 /* 2489 * Lock a PP mutex. 2490 */ 2491 static int 2492 do_lock_pp(struct thread *td, struct umutex *m, uint32_t flags, 2493 struct _umtx_time *timeout, int try) 2494 { 2495 struct umtx_abs_timeout timo; 2496 struct umtx_q *uq, *uq2; 2497 struct umtx_pi *pi; 2498 uint32_t ceiling; 2499 uint32_t owner, id; 2500 int error, pri, old_inherited_pri, su, rv; 2501 2502 id = td->td_tid; 2503 uq = td->td_umtxq; 2504 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ? 2505 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags), 2506 &uq->uq_key)) != 0) 2507 return (error); 2508 2509 if (timeout != NULL) 2510 umtx_abs_timeout_init2(&timo, timeout); 2511 2512 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0); 2513 for (;;) { 2514 old_inherited_pri = uq->uq_inherited_pri; 2515 umtxq_lock(&uq->uq_key); 2516 umtxq_busy(&uq->uq_key); 2517 umtxq_unlock(&uq->uq_key); 2518 2519 rv = fueword32(&m->m_ceilings[0], &ceiling); 2520 if (rv == -1) { 2521 error = EFAULT; 2522 goto out; 2523 } 2524 ceiling = RTP_PRIO_MAX - ceiling; 2525 if (ceiling > RTP_PRIO_MAX) { 2526 error = EINVAL; 2527 goto out; 2528 } 2529 2530 mtx_lock(&umtx_lock); 2531 if (UPRI(td) < PRI_MIN_REALTIME + ceiling) { 2532 mtx_unlock(&umtx_lock); 2533 error = EINVAL; 2534 goto out; 2535 } 2536 if (su && PRI_MIN_REALTIME + ceiling < uq->uq_inherited_pri) { 2537 uq->uq_inherited_pri = PRI_MIN_REALTIME + ceiling; 2538 thread_lock(td); 2539 if (uq->uq_inherited_pri < UPRI(td)) 2540 sched_lend_user_prio(td, uq->uq_inherited_pri); 2541 thread_unlock(td); 2542 } 2543 mtx_unlock(&umtx_lock); 2544 2545 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner, 2546 id | UMUTEX_CONTESTED); 2547 /* The address was invalid. */ 2548 if (rv == -1) { 2549 error = EFAULT; 2550 break; 2551 } 2552 if (rv == 0) { 2553 MPASS(owner == UMUTEX_CONTESTED); 2554 error = 0; 2555 break; 2556 } 2557 /* rv == 1 */ 2558 if (owner == UMUTEX_RB_OWNERDEAD) { 2559 rv = casueword32(&m->m_owner, UMUTEX_RB_OWNERDEAD, 2560 &owner, id | UMUTEX_CONTESTED); 2561 if (rv == -1) { 2562 error = EFAULT; 2563 break; 2564 } 2565 if (rv == 0) { 2566 MPASS(owner == UMUTEX_RB_OWNERDEAD); 2567 error = EOWNERDEAD; /* success */ 2568 break; 2569 } 2570 2571 /* 2572 * rv == 1, only check for suspension if we 2573 * did not already catched a signal. If we 2574 * get an error from the check, the same 2575 * condition is checked by the umtxq_sleep() 2576 * call below, so we should obliterate the 2577 * error to not skip the last loop iteration. 2578 */ 2579 if (error == 0) { 2580 error = thread_check_susp(td, false); 2581 if (error == 0) { 2582 if (try != 0) 2583 error = EBUSY; 2584 else 2585 continue; 2586 } 2587 error = 0; 2588 } 2589 } else if (owner == UMUTEX_RB_NOTRECOV) { 2590 error = ENOTRECOVERABLE; 2591 } 2592 2593 if (try != 0) 2594 error = EBUSY; 2595 2596 /* 2597 * If we caught a signal, we have retried and now 2598 * exit immediately. 2599 */ 2600 if (error != 0) 2601 break; 2602 2603 umtxq_lock(&uq->uq_key); 2604 umtxq_insert(uq); 2605 umtxq_unbusy(&uq->uq_key); 2606 error = umtxq_sleep(uq, "umtxpp", timeout == NULL ? 2607 NULL : &timo); 2608 umtxq_remove(uq); 2609 umtxq_unlock(&uq->uq_key); 2610 2611 mtx_lock(&umtx_lock); 2612 uq->uq_inherited_pri = old_inherited_pri; 2613 pri = PRI_MAX; 2614 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) { 2615 uq2 = TAILQ_FIRST(&pi->pi_blocked); 2616 if (uq2 != NULL) { 2617 if (pri > UPRI(uq2->uq_thread)) 2618 pri = UPRI(uq2->uq_thread); 2619 } 2620 } 2621 if (pri > uq->uq_inherited_pri) 2622 pri = uq->uq_inherited_pri; 2623 thread_lock(td); 2624 sched_lend_user_prio(td, pri); 2625 thread_unlock(td); 2626 mtx_unlock(&umtx_lock); 2627 } 2628 2629 if (error != 0 && error != EOWNERDEAD) { 2630 mtx_lock(&umtx_lock); 2631 uq->uq_inherited_pri = old_inherited_pri; 2632 pri = PRI_MAX; 2633 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) { 2634 uq2 = TAILQ_FIRST(&pi->pi_blocked); 2635 if (uq2 != NULL) { 2636 if (pri > UPRI(uq2->uq_thread)) 2637 pri = UPRI(uq2->uq_thread); 2638 } 2639 } 2640 if (pri > uq->uq_inherited_pri) 2641 pri = uq->uq_inherited_pri; 2642 thread_lock(td); 2643 sched_lend_user_prio(td, pri); 2644 thread_unlock(td); 2645 mtx_unlock(&umtx_lock); 2646 } 2647 2648 out: 2649 umtxq_unbusy_unlocked(&uq->uq_key); 2650 umtx_key_release(&uq->uq_key); 2651 return (error); 2652 } 2653 2654 /* 2655 * Unlock a PP mutex. 2656 */ 2657 static int 2658 do_unlock_pp(struct thread *td, struct umutex *m, uint32_t flags, bool rb) 2659 { 2660 struct umtx_key key; 2661 struct umtx_q *uq, *uq2; 2662 struct umtx_pi *pi; 2663 uint32_t id, owner, rceiling; 2664 int error, pri, new_inherited_pri, su; 2665 2666 id = td->td_tid; 2667 uq = td->td_umtxq; 2668 su = (priv_check(td, PRIV_SCHED_RTPRIO) == 0); 2669 2670 /* 2671 * Make sure we own this mtx. 2672 */ 2673 error = fueword32(&m->m_owner, &owner); 2674 if (error == -1) 2675 return (EFAULT); 2676 2677 if ((owner & ~UMUTEX_CONTESTED) != id) 2678 return (EPERM); 2679 2680 error = copyin(&m->m_ceilings[1], &rceiling, sizeof(uint32_t)); 2681 if (error != 0) 2682 return (error); 2683 2684 if (rceiling == -1) 2685 new_inherited_pri = PRI_MAX; 2686 else { 2687 rceiling = RTP_PRIO_MAX - rceiling; 2688 if (rceiling > RTP_PRIO_MAX) 2689 return (EINVAL); 2690 new_inherited_pri = PRI_MIN_REALTIME + rceiling; 2691 } 2692 2693 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ? 2694 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags), 2695 &key)) != 0) 2696 return (error); 2697 umtxq_lock(&key); 2698 umtxq_busy(&key); 2699 umtxq_unlock(&key); 2700 /* 2701 * For priority protected mutex, always set unlocked state 2702 * to UMUTEX_CONTESTED, so that userland always enters kernel 2703 * to lock the mutex, it is necessary because thread priority 2704 * has to be adjusted for such mutex. 2705 */ 2706 error = suword32(&m->m_owner, umtx_unlock_val(flags, rb) | 2707 UMUTEX_CONTESTED); 2708 2709 umtxq_lock(&key); 2710 if (error == 0) 2711 umtxq_signal(&key, 1); 2712 umtxq_unbusy(&key); 2713 umtxq_unlock(&key); 2714 2715 if (error == -1) 2716 error = EFAULT; 2717 else { 2718 mtx_lock(&umtx_lock); 2719 if (su != 0) 2720 uq->uq_inherited_pri = new_inherited_pri; 2721 pri = PRI_MAX; 2722 TAILQ_FOREACH(pi, &uq->uq_pi_contested, pi_link) { 2723 uq2 = TAILQ_FIRST(&pi->pi_blocked); 2724 if (uq2 != NULL) { 2725 if (pri > UPRI(uq2->uq_thread)) 2726 pri = UPRI(uq2->uq_thread); 2727 } 2728 } 2729 if (pri > uq->uq_inherited_pri) 2730 pri = uq->uq_inherited_pri; 2731 thread_lock(td); 2732 sched_lend_user_prio(td, pri); 2733 thread_unlock(td); 2734 mtx_unlock(&umtx_lock); 2735 } 2736 umtx_key_release(&key); 2737 return (error); 2738 } 2739 2740 static int 2741 do_set_ceiling(struct thread *td, struct umutex *m, uint32_t ceiling, 2742 uint32_t *old_ceiling) 2743 { 2744 struct umtx_q *uq; 2745 uint32_t flags, id, owner, save_ceiling; 2746 int error, rv, rv1; 2747 2748 error = fueword32(&m->m_flags, &flags); 2749 if (error == -1) 2750 return (EFAULT); 2751 if ((flags & UMUTEX_PRIO_PROTECT) == 0) 2752 return (EINVAL); 2753 if (ceiling > RTP_PRIO_MAX) 2754 return (EINVAL); 2755 id = td->td_tid; 2756 uq = td->td_umtxq; 2757 if ((error = umtx_key_get(m, (flags & UMUTEX_ROBUST) != 0 ? 2758 TYPE_PP_ROBUST_UMUTEX : TYPE_PP_UMUTEX, GET_SHARE(flags), 2759 &uq->uq_key)) != 0) 2760 return (error); 2761 for (;;) { 2762 umtxq_lock(&uq->uq_key); 2763 umtxq_busy(&uq->uq_key); 2764 umtxq_unlock(&uq->uq_key); 2765 2766 rv = fueword32(&m->m_ceilings[0], &save_ceiling); 2767 if (rv == -1) { 2768 error = EFAULT; 2769 break; 2770 } 2771 2772 rv = casueword32(&m->m_owner, UMUTEX_CONTESTED, &owner, 2773 id | UMUTEX_CONTESTED); 2774 if (rv == -1) { 2775 error = EFAULT; 2776 break; 2777 } 2778 2779 if (rv == 0) { 2780 MPASS(owner == UMUTEX_CONTESTED); 2781 rv = suword32(&m->m_ceilings[0], ceiling); 2782 rv1 = suword32(&m->m_owner, UMUTEX_CONTESTED); 2783 error = (rv == 0 && rv1 == 0) ? 0: EFAULT; 2784 break; 2785 } 2786 2787 if ((owner & ~UMUTEX_CONTESTED) == id) { 2788 rv = suword32(&m->m_ceilings[0], ceiling); 2789 error = rv == 0 ? 0 : EFAULT; 2790 break; 2791 } 2792 2793 if (owner == UMUTEX_RB_OWNERDEAD) { 2794 error = EOWNERDEAD; 2795 break; 2796 } else if (owner == UMUTEX_RB_NOTRECOV) { 2797 error = ENOTRECOVERABLE; 2798 break; 2799 } 2800 2801 /* 2802 * If we caught a signal, we have retried and now 2803 * exit immediately. 2804 */ 2805 if (error != 0) 2806 break; 2807 2808 /* 2809 * We set the contested bit, sleep. Otherwise the lock changed 2810 * and we need to retry or we lost a race to the thread 2811 * unlocking the umtx. 2812 */ 2813 umtxq_lock(&uq->uq_key); 2814 umtxq_insert(uq); 2815 umtxq_unbusy(&uq->uq_key); 2816 error = umtxq_sleep(uq, "umtxpp", NULL); 2817 umtxq_remove(uq); 2818 umtxq_unlock(&uq->uq_key); 2819 } 2820 umtxq_lock(&uq->uq_key); 2821 if (error == 0) 2822 umtxq_signal(&uq->uq_key, INT_MAX); 2823 umtxq_unbusy(&uq->uq_key); 2824 umtxq_unlock(&uq->uq_key); 2825 umtx_key_release(&uq->uq_key); 2826 if (error == 0 && old_ceiling != NULL) { 2827 rv = suword32(old_ceiling, save_ceiling); 2828 error = rv == 0 ? 0 : EFAULT; 2829 } 2830 return (error); 2831 } 2832 2833 /* 2834 * Lock a userland POSIX mutex. 2835 */ 2836 static int 2837 do_lock_umutex(struct thread *td, struct umutex *m, 2838 struct _umtx_time *timeout, int mode) 2839 { 2840 uint32_t flags; 2841 int error; 2842 2843 error = fueword32(&m->m_flags, &flags); 2844 if (error == -1) 2845 return (EFAULT); 2846 2847 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) { 2848 case 0: 2849 error = do_lock_normal(td, m, flags, timeout, mode); 2850 break; 2851 case UMUTEX_PRIO_INHERIT: 2852 error = do_lock_pi(td, m, flags, timeout, mode); 2853 break; 2854 case UMUTEX_PRIO_PROTECT: 2855 error = do_lock_pp(td, m, flags, timeout, mode); 2856 break; 2857 default: 2858 return (EINVAL); 2859 } 2860 if (timeout == NULL) { 2861 if (error == EINTR && mode != _UMUTEX_WAIT) 2862 error = ERESTART; 2863 } else { 2864 /* Timed-locking is not restarted. */ 2865 if (error == ERESTART) 2866 error = EINTR; 2867 } 2868 return (error); 2869 } 2870 2871 /* 2872 * Unlock a userland POSIX mutex. 2873 */ 2874 static int 2875 do_unlock_umutex(struct thread *td, struct umutex *m, bool rb) 2876 { 2877 uint32_t flags; 2878 int error; 2879 2880 error = fueword32(&m->m_flags, &flags); 2881 if (error == -1) 2882 return (EFAULT); 2883 2884 switch (flags & (UMUTEX_PRIO_INHERIT | UMUTEX_PRIO_PROTECT)) { 2885 case 0: 2886 return (do_unlock_normal(td, m, flags, rb)); 2887 case UMUTEX_PRIO_INHERIT: 2888 return (do_unlock_pi(td, m, flags, rb)); 2889 case UMUTEX_PRIO_PROTECT: 2890 return (do_unlock_pp(td, m, flags, rb)); 2891 } 2892 2893 return (EINVAL); 2894 } 2895 2896 static int 2897 do_cv_wait(struct thread *td, struct ucond *cv, struct umutex *m, 2898 struct timespec *timeout, u_long wflags) 2899 { 2900 struct umtx_abs_timeout timo; 2901 struct umtx_q *uq; 2902 uint32_t flags, clockid, hasw; 2903 int error; 2904 2905 uq = td->td_umtxq; 2906 error = fueword32(&cv->c_flags, &flags); 2907 if (error == -1) 2908 return (EFAULT); 2909 error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &uq->uq_key); 2910 if (error != 0) 2911 return (error); 2912 2913 if ((wflags & CVWAIT_CLOCKID) != 0) { 2914 error = fueword32(&cv->c_clockid, &clockid); 2915 if (error == -1) { 2916 umtx_key_release(&uq->uq_key); 2917 return (EFAULT); 2918 } 2919 if (clockid < CLOCK_REALTIME || 2920 clockid >= CLOCK_THREAD_CPUTIME_ID) { 2921 /* hmm, only HW clock id will work. */ 2922 umtx_key_release(&uq->uq_key); 2923 return (EINVAL); 2924 } 2925 } else { 2926 clockid = CLOCK_REALTIME; 2927 } 2928 2929 umtxq_lock(&uq->uq_key); 2930 umtxq_busy(&uq->uq_key); 2931 umtxq_insert(uq); 2932 umtxq_unlock(&uq->uq_key); 2933 2934 /* 2935 * Set c_has_waiters to 1 before releasing user mutex, also 2936 * don't modify cache line when unnecessary. 2937 */ 2938 error = fueword32(&cv->c_has_waiters, &hasw); 2939 if (error == 0 && hasw == 0) 2940 suword32(&cv->c_has_waiters, 1); 2941 2942 umtxq_unbusy_unlocked(&uq->uq_key); 2943 2944 error = do_unlock_umutex(td, m, false); 2945 2946 if (timeout != NULL) 2947 umtx_abs_timeout_init(&timo, clockid, 2948 (wflags & CVWAIT_ABSTIME) != 0, timeout); 2949 2950 umtxq_lock(&uq->uq_key); 2951 if (error == 0) { 2952 error = umtxq_sleep(uq, "ucond", timeout == NULL ? 2953 NULL : &timo); 2954 } 2955 2956 if ((uq->uq_flags & UQF_UMTXQ) == 0) 2957 error = 0; 2958 else { 2959 /* 2960 * This must be timeout,interrupted by signal or 2961 * surprious wakeup, clear c_has_waiter flag when 2962 * necessary. 2963 */ 2964 umtxq_busy(&uq->uq_key); 2965 if ((uq->uq_flags & UQF_UMTXQ) != 0) { 2966 int oldlen = uq->uq_cur_queue->length; 2967 umtxq_remove(uq); 2968 if (oldlen == 1) { 2969 umtxq_unlock(&uq->uq_key); 2970 suword32(&cv->c_has_waiters, 0); 2971 umtxq_lock(&uq->uq_key); 2972 } 2973 } 2974 umtxq_unbusy(&uq->uq_key); 2975 if (error == ERESTART) 2976 error = EINTR; 2977 } 2978 2979 umtxq_unlock(&uq->uq_key); 2980 umtx_key_release(&uq->uq_key); 2981 return (error); 2982 } 2983 2984 /* 2985 * Signal a userland condition variable. 2986 */ 2987 static int 2988 do_cv_signal(struct thread *td, struct ucond *cv) 2989 { 2990 struct umtx_key key; 2991 int error, cnt, nwake; 2992 uint32_t flags; 2993 2994 error = fueword32(&cv->c_flags, &flags); 2995 if (error == -1) 2996 return (EFAULT); 2997 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0) 2998 return (error); 2999 umtxq_lock(&key); 3000 umtxq_busy(&key); 3001 cnt = umtxq_count(&key); 3002 nwake = umtxq_signal(&key, 1); 3003 if (cnt <= nwake) { 3004 umtxq_unlock(&key); 3005 error = suword32(&cv->c_has_waiters, 0); 3006 if (error == -1) 3007 error = EFAULT; 3008 umtxq_lock(&key); 3009 } 3010 umtxq_unbusy(&key); 3011 umtxq_unlock(&key); 3012 umtx_key_release(&key); 3013 return (error); 3014 } 3015 3016 static int 3017 do_cv_broadcast(struct thread *td, struct ucond *cv) 3018 { 3019 struct umtx_key key; 3020 int error; 3021 uint32_t flags; 3022 3023 error = fueword32(&cv->c_flags, &flags); 3024 if (error == -1) 3025 return (EFAULT); 3026 if ((error = umtx_key_get(cv, TYPE_CV, GET_SHARE(flags), &key)) != 0) 3027 return (error); 3028 3029 umtxq_lock(&key); 3030 umtxq_busy(&key); 3031 umtxq_signal(&key, INT_MAX); 3032 umtxq_unlock(&key); 3033 3034 error = suword32(&cv->c_has_waiters, 0); 3035 if (error == -1) 3036 error = EFAULT; 3037 3038 umtxq_unbusy_unlocked(&key); 3039 3040 umtx_key_release(&key); 3041 return (error); 3042 } 3043 3044 static int 3045 do_rw_rdlock(struct thread *td, struct urwlock *rwlock, long fflag, 3046 struct _umtx_time *timeout) 3047 { 3048 struct umtx_abs_timeout timo; 3049 struct umtx_q *uq; 3050 uint32_t flags, wrflags; 3051 int32_t state, oldstate; 3052 int32_t blocked_readers; 3053 int error, error1, rv; 3054 3055 uq = td->td_umtxq; 3056 error = fueword32(&rwlock->rw_flags, &flags); 3057 if (error == -1) 3058 return (EFAULT); 3059 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key); 3060 if (error != 0) 3061 return (error); 3062 3063 if (timeout != NULL) 3064 umtx_abs_timeout_init2(&timo, timeout); 3065 3066 wrflags = URWLOCK_WRITE_OWNER; 3067 if (!(fflag & URWLOCK_PREFER_READER) && !(flags & URWLOCK_PREFER_READER)) 3068 wrflags |= URWLOCK_WRITE_WAITERS; 3069 3070 for (;;) { 3071 rv = fueword32(&rwlock->rw_state, &state); 3072 if (rv == -1) { 3073 umtx_key_release(&uq->uq_key); 3074 return (EFAULT); 3075 } 3076 3077 /* try to lock it */ 3078 while (!(state & wrflags)) { 3079 if (__predict_false(URWLOCK_READER_COUNT(state) == 3080 URWLOCK_MAX_READERS)) { 3081 umtx_key_release(&uq->uq_key); 3082 return (EAGAIN); 3083 } 3084 rv = casueword32(&rwlock->rw_state, state, 3085 &oldstate, state + 1); 3086 if (rv == -1) { 3087 umtx_key_release(&uq->uq_key); 3088 return (EFAULT); 3089 } 3090 if (rv == 0) { 3091 MPASS(oldstate == state); 3092 umtx_key_release(&uq->uq_key); 3093 return (0); 3094 } 3095 error = thread_check_susp(td, true); 3096 if (error != 0) 3097 break; 3098 state = oldstate; 3099 } 3100 3101 if (error) 3102 break; 3103 3104 /* grab monitor lock */ 3105 umtxq_lock(&uq->uq_key); 3106 umtxq_busy(&uq->uq_key); 3107 umtxq_unlock(&uq->uq_key); 3108 3109 /* 3110 * re-read the state, in case it changed between the try-lock above 3111 * and the check below 3112 */ 3113 rv = fueword32(&rwlock->rw_state, &state); 3114 if (rv == -1) 3115 error = EFAULT; 3116 3117 /* set read contention bit */ 3118 while (error == 0 && (state & wrflags) && 3119 !(state & URWLOCK_READ_WAITERS)) { 3120 rv = casueword32(&rwlock->rw_state, state, 3121 &oldstate, state | URWLOCK_READ_WAITERS); 3122 if (rv == -1) { 3123 error = EFAULT; 3124 break; 3125 } 3126 if (rv == 0) { 3127 MPASS(oldstate == state); 3128 goto sleep; 3129 } 3130 state = oldstate; 3131 error = thread_check_susp(td, false); 3132 if (error != 0) 3133 break; 3134 } 3135 if (error != 0) { 3136 umtxq_unbusy_unlocked(&uq->uq_key); 3137 break; 3138 } 3139 3140 /* state is changed while setting flags, restart */ 3141 if (!(state & wrflags)) { 3142 umtxq_unbusy_unlocked(&uq->uq_key); 3143 error = thread_check_susp(td, true); 3144 if (error != 0) 3145 break; 3146 continue; 3147 } 3148 3149 sleep: 3150 /* 3151 * Contention bit is set, before sleeping, increase 3152 * read waiter count. 3153 */ 3154 rv = fueword32(&rwlock->rw_blocked_readers, 3155 &blocked_readers); 3156 if (rv == -1) { 3157 umtxq_unbusy_unlocked(&uq->uq_key); 3158 error = EFAULT; 3159 break; 3160 } 3161 suword32(&rwlock->rw_blocked_readers, blocked_readers+1); 3162 3163 while (state & wrflags) { 3164 umtxq_lock(&uq->uq_key); 3165 umtxq_insert(uq); 3166 umtxq_unbusy(&uq->uq_key); 3167 3168 error = umtxq_sleep(uq, "urdlck", timeout == NULL ? 3169 NULL : &timo); 3170 3171 umtxq_busy(&uq->uq_key); 3172 umtxq_remove(uq); 3173 umtxq_unlock(&uq->uq_key); 3174 if (error) 3175 break; 3176 rv = fueword32(&rwlock->rw_state, &state); 3177 if (rv == -1) { 3178 error = EFAULT; 3179 break; 3180 } 3181 } 3182 3183 /* decrease read waiter count, and may clear read contention bit */ 3184 rv = fueword32(&rwlock->rw_blocked_readers, 3185 &blocked_readers); 3186 if (rv == -1) { 3187 umtxq_unbusy_unlocked(&uq->uq_key); 3188 error = EFAULT; 3189 break; 3190 } 3191 suword32(&rwlock->rw_blocked_readers, blocked_readers-1); 3192 if (blocked_readers == 1) { 3193 rv = fueword32(&rwlock->rw_state, &state); 3194 if (rv == -1) { 3195 umtxq_unbusy_unlocked(&uq->uq_key); 3196 error = EFAULT; 3197 break; 3198 } 3199 for (;;) { 3200 rv = casueword32(&rwlock->rw_state, state, 3201 &oldstate, state & ~URWLOCK_READ_WAITERS); 3202 if (rv == -1) { 3203 error = EFAULT; 3204 break; 3205 } 3206 if (rv == 0) { 3207 MPASS(oldstate == state); 3208 break; 3209 } 3210 state = oldstate; 3211 error1 = thread_check_susp(td, false); 3212 if (error1 != 0) { 3213 if (error == 0) 3214 error = error1; 3215 break; 3216 } 3217 } 3218 } 3219 3220 umtxq_unbusy_unlocked(&uq->uq_key); 3221 if (error != 0) 3222 break; 3223 } 3224 umtx_key_release(&uq->uq_key); 3225 if (error == ERESTART) 3226 error = EINTR; 3227 return (error); 3228 } 3229 3230 static int 3231 do_rw_wrlock(struct thread *td, struct urwlock *rwlock, struct _umtx_time *timeout) 3232 { 3233 struct umtx_abs_timeout timo; 3234 struct umtx_q *uq; 3235 uint32_t flags; 3236 int32_t state, oldstate; 3237 int32_t blocked_writers; 3238 int32_t blocked_readers; 3239 int error, error1, rv; 3240 3241 uq = td->td_umtxq; 3242 error = fueword32(&rwlock->rw_flags, &flags); 3243 if (error == -1) 3244 return (EFAULT); 3245 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key); 3246 if (error != 0) 3247 return (error); 3248 3249 if (timeout != NULL) 3250 umtx_abs_timeout_init2(&timo, timeout); 3251 3252 blocked_readers = 0; 3253 for (;;) { 3254 rv = fueword32(&rwlock->rw_state, &state); 3255 if (rv == -1) { 3256 umtx_key_release(&uq->uq_key); 3257 return (EFAULT); 3258 } 3259 while ((state & URWLOCK_WRITE_OWNER) == 0 && 3260 URWLOCK_READER_COUNT(state) == 0) { 3261 rv = casueword32(&rwlock->rw_state, state, 3262 &oldstate, state | URWLOCK_WRITE_OWNER); 3263 if (rv == -1) { 3264 umtx_key_release(&uq->uq_key); 3265 return (EFAULT); 3266 } 3267 if (rv == 0) { 3268 MPASS(oldstate == state); 3269 umtx_key_release(&uq->uq_key); 3270 return (0); 3271 } 3272 state = oldstate; 3273 error = thread_check_susp(td, true); 3274 if (error != 0) 3275 break; 3276 } 3277 3278 if (error) { 3279 if ((state & (URWLOCK_WRITE_OWNER | 3280 URWLOCK_WRITE_WAITERS)) == 0 && 3281 blocked_readers != 0) { 3282 umtxq_lock(&uq->uq_key); 3283 umtxq_busy(&uq->uq_key); 3284 umtxq_signal_queue(&uq->uq_key, INT_MAX, 3285 UMTX_SHARED_QUEUE); 3286 umtxq_unbusy(&uq->uq_key); 3287 umtxq_unlock(&uq->uq_key); 3288 } 3289 3290 break; 3291 } 3292 3293 /* grab monitor lock */ 3294 umtxq_lock(&uq->uq_key); 3295 umtxq_busy(&uq->uq_key); 3296 umtxq_unlock(&uq->uq_key); 3297 3298 /* 3299 * Re-read the state, in case it changed between the 3300 * try-lock above and the check below. 3301 */ 3302 rv = fueword32(&rwlock->rw_state, &state); 3303 if (rv == -1) 3304 error = EFAULT; 3305 3306 while (error == 0 && ((state & URWLOCK_WRITE_OWNER) || 3307 URWLOCK_READER_COUNT(state) != 0) && 3308 (state & URWLOCK_WRITE_WAITERS) == 0) { 3309 rv = casueword32(&rwlock->rw_state, state, 3310 &oldstate, state | URWLOCK_WRITE_WAITERS); 3311 if (rv == -1) { 3312 error = EFAULT; 3313 break; 3314 } 3315 if (rv == 0) { 3316 MPASS(oldstate == state); 3317 goto sleep; 3318 } 3319 state = oldstate; 3320 error = thread_check_susp(td, false); 3321 if (error != 0) 3322 break; 3323 } 3324 if (error != 0) { 3325 umtxq_unbusy_unlocked(&uq->uq_key); 3326 break; 3327 } 3328 3329 if ((state & URWLOCK_WRITE_OWNER) == 0 && 3330 URWLOCK_READER_COUNT(state) == 0) { 3331 umtxq_unbusy_unlocked(&uq->uq_key); 3332 error = thread_check_susp(td, false); 3333 if (error != 0) 3334 break; 3335 continue; 3336 } 3337 sleep: 3338 rv = fueword32(&rwlock->rw_blocked_writers, 3339 &blocked_writers); 3340 if (rv == -1) { 3341 umtxq_unbusy_unlocked(&uq->uq_key); 3342 error = EFAULT; 3343 break; 3344 } 3345 suword32(&rwlock->rw_blocked_writers, blocked_writers + 1); 3346 3347 while ((state & URWLOCK_WRITE_OWNER) || 3348 URWLOCK_READER_COUNT(state) != 0) { 3349 umtxq_lock(&uq->uq_key); 3350 umtxq_insert_queue(uq, UMTX_EXCLUSIVE_QUEUE); 3351 umtxq_unbusy(&uq->uq_key); 3352 3353 error = umtxq_sleep(uq, "uwrlck", timeout == NULL ? 3354 NULL : &timo); 3355 3356 umtxq_busy(&uq->uq_key); 3357 umtxq_remove_queue(uq, UMTX_EXCLUSIVE_QUEUE); 3358 umtxq_unlock(&uq->uq_key); 3359 if (error) 3360 break; 3361 rv = fueword32(&rwlock->rw_state, &state); 3362 if (rv == -1) { 3363 error = EFAULT; 3364 break; 3365 } 3366 } 3367 3368 rv = fueword32(&rwlock->rw_blocked_writers, 3369 &blocked_writers); 3370 if (rv == -1) { 3371 umtxq_unbusy_unlocked(&uq->uq_key); 3372 error = EFAULT; 3373 break; 3374 } 3375 suword32(&rwlock->rw_blocked_writers, blocked_writers-1); 3376 if (blocked_writers == 1) { 3377 rv = fueword32(&rwlock->rw_state, &state); 3378 if (rv == -1) { 3379 umtxq_unbusy_unlocked(&uq->uq_key); 3380 error = EFAULT; 3381 break; 3382 } 3383 for (;;) { 3384 rv = casueword32(&rwlock->rw_state, state, 3385 &oldstate, state & ~URWLOCK_WRITE_WAITERS); 3386 if (rv == -1) { 3387 error = EFAULT; 3388 break; 3389 } 3390 if (rv == 0) { 3391 MPASS(oldstate == state); 3392 break; 3393 } 3394 state = oldstate; 3395 error1 = thread_check_susp(td, false); 3396 /* 3397 * We are leaving the URWLOCK_WRITE_WAITERS 3398 * behind, but this should not harm the 3399 * correctness. 3400 */ 3401 if (error1 != 0) { 3402 if (error == 0) 3403 error = error1; 3404 break; 3405 } 3406 } 3407 rv = fueword32(&rwlock->rw_blocked_readers, 3408 &blocked_readers); 3409 if (rv == -1) { 3410 umtxq_unbusy_unlocked(&uq->uq_key); 3411 error = EFAULT; 3412 break; 3413 } 3414 } else 3415 blocked_readers = 0; 3416 3417 umtxq_unbusy_unlocked(&uq->uq_key); 3418 } 3419 3420 umtx_key_release(&uq->uq_key); 3421 if (error == ERESTART) 3422 error = EINTR; 3423 return (error); 3424 } 3425 3426 static int 3427 do_rw_unlock(struct thread *td, struct urwlock *rwlock) 3428 { 3429 struct umtx_q *uq; 3430 uint32_t flags; 3431 int32_t state, oldstate; 3432 int error, rv, q, count; 3433 3434 uq = td->td_umtxq; 3435 error = fueword32(&rwlock->rw_flags, &flags); 3436 if (error == -1) 3437 return (EFAULT); 3438 error = umtx_key_get(rwlock, TYPE_RWLOCK, GET_SHARE(flags), &uq->uq_key); 3439 if (error != 0) 3440 return (error); 3441 3442 error = fueword32(&rwlock->rw_state, &state); 3443 if (error == -1) { 3444 error = EFAULT; 3445 goto out; 3446 } 3447 if (state & URWLOCK_WRITE_OWNER) { 3448 for (;;) { 3449 rv = casueword32(&rwlock->rw_state, state, 3450 &oldstate, state & ~URWLOCK_WRITE_OWNER); 3451 if (rv == -1) { 3452 error = EFAULT; 3453 goto out; 3454 } 3455 if (rv == 1) { 3456 state = oldstate; 3457 if (!(oldstate & URWLOCK_WRITE_OWNER)) { 3458 error = EPERM; 3459 goto out; 3460 } 3461 error = thread_check_susp(td, true); 3462 if (error != 0) 3463 goto out; 3464 } else 3465 break; 3466 } 3467 } else if (URWLOCK_READER_COUNT(state) != 0) { 3468 for (;;) { 3469 rv = casueword32(&rwlock->rw_state, state, 3470 &oldstate, state - 1); 3471 if (rv == -1) { 3472 error = EFAULT; 3473 goto out; 3474 } 3475 if (rv == 1) { 3476 state = oldstate; 3477 if (URWLOCK_READER_COUNT(oldstate) == 0) { 3478 error = EPERM; 3479 goto out; 3480 } 3481 error = thread_check_susp(td, true); 3482 if (error != 0) 3483 goto out; 3484 } else 3485 break; 3486 } 3487 } else { 3488 error = EPERM; 3489 goto out; 3490 } 3491 3492 count = 0; 3493 3494 if (!(flags & URWLOCK_PREFER_READER)) { 3495 if (state & URWLOCK_WRITE_WAITERS) { 3496 count = 1; 3497 q = UMTX_EXCLUSIVE_QUEUE; 3498 } else if (state & URWLOCK_READ_WAITERS) { 3499 count = INT_MAX; 3500 q = UMTX_SHARED_QUEUE; 3501 } 3502 } else { 3503 if (state & URWLOCK_READ_WAITERS) { 3504 count = INT_MAX; 3505 q = UMTX_SHARED_QUEUE; 3506 } else if (state & URWLOCK_WRITE_WAITERS) { 3507 count = 1; 3508 q = UMTX_EXCLUSIVE_QUEUE; 3509 } 3510 } 3511 3512 if (count) { 3513 umtxq_lock(&uq->uq_key); 3514 umtxq_busy(&uq->uq_key); 3515 umtxq_signal_queue(&uq->uq_key, count, q); 3516 umtxq_unbusy(&uq->uq_key); 3517 umtxq_unlock(&uq->uq_key); 3518 } 3519 out: 3520 umtx_key_release(&uq->uq_key); 3521 return (error); 3522 } 3523 3524 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10) 3525 static int 3526 do_sem_wait(struct thread *td, struct _usem *sem, struct _umtx_time *timeout) 3527 { 3528 struct umtx_abs_timeout timo; 3529 struct umtx_q *uq; 3530 uint32_t flags, count, count1; 3531 int error, rv, rv1; 3532 3533 uq = td->td_umtxq; 3534 error = fueword32(&sem->_flags, &flags); 3535 if (error == -1) 3536 return (EFAULT); 3537 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key); 3538 if (error != 0) 3539 return (error); 3540 3541 if (timeout != NULL) 3542 umtx_abs_timeout_init2(&timo, timeout); 3543 3544 again: 3545 umtxq_lock(&uq->uq_key); 3546 umtxq_busy(&uq->uq_key); 3547 umtxq_insert(uq); 3548 umtxq_unlock(&uq->uq_key); 3549 rv = casueword32(&sem->_has_waiters, 0, &count1, 1); 3550 if (rv == 0) 3551 rv1 = fueword32(&sem->_count, &count); 3552 if (rv == -1 || (rv == 0 && (rv1 == -1 || count != 0)) || 3553 (rv == 1 && count1 == 0)) { 3554 umtxq_lock(&uq->uq_key); 3555 umtxq_unbusy(&uq->uq_key); 3556 umtxq_remove(uq); 3557 umtxq_unlock(&uq->uq_key); 3558 if (rv == 1) { 3559 rv = thread_check_susp(td, true); 3560 if (rv == 0) 3561 goto again; 3562 error = rv; 3563 goto out; 3564 } 3565 if (rv == 0) 3566 rv = rv1; 3567 error = rv == -1 ? EFAULT : 0; 3568 goto out; 3569 } 3570 umtxq_lock(&uq->uq_key); 3571 umtxq_unbusy(&uq->uq_key); 3572 3573 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo); 3574 3575 if ((uq->uq_flags & UQF_UMTXQ) == 0) 3576 error = 0; 3577 else { 3578 umtxq_remove(uq); 3579 /* A relative timeout cannot be restarted. */ 3580 if (error == ERESTART && timeout != NULL && 3581 (timeout->_flags & UMTX_ABSTIME) == 0) 3582 error = EINTR; 3583 } 3584 umtxq_unlock(&uq->uq_key); 3585 out: 3586 umtx_key_release(&uq->uq_key); 3587 return (error); 3588 } 3589 3590 /* 3591 * Signal a userland semaphore. 3592 */ 3593 static int 3594 do_sem_wake(struct thread *td, struct _usem *sem) 3595 { 3596 struct umtx_key key; 3597 int error, cnt; 3598 uint32_t flags; 3599 3600 error = fueword32(&sem->_flags, &flags); 3601 if (error == -1) 3602 return (EFAULT); 3603 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0) 3604 return (error); 3605 umtxq_lock(&key); 3606 umtxq_busy(&key); 3607 cnt = umtxq_count(&key); 3608 if (cnt > 0) { 3609 /* 3610 * Check if count is greater than 0, this means the memory is 3611 * still being referenced by user code, so we can safely 3612 * update _has_waiters flag. 3613 */ 3614 if (cnt == 1) { 3615 umtxq_unlock(&key); 3616 error = suword32(&sem->_has_waiters, 0); 3617 umtxq_lock(&key); 3618 if (error == -1) 3619 error = EFAULT; 3620 } 3621 umtxq_signal(&key, 1); 3622 } 3623 umtxq_unbusy(&key); 3624 umtxq_unlock(&key); 3625 umtx_key_release(&key); 3626 return (error); 3627 } 3628 #endif 3629 3630 static int 3631 do_sem2_wait(struct thread *td, struct _usem2 *sem, struct _umtx_time *timeout) 3632 { 3633 struct umtx_abs_timeout timo; 3634 struct umtx_q *uq; 3635 uint32_t count, flags; 3636 int error, rv; 3637 3638 uq = td->td_umtxq; 3639 flags = fuword32(&sem->_flags); 3640 if (timeout != NULL) 3641 umtx_abs_timeout_init2(&timo, timeout); 3642 3643 again: 3644 error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &uq->uq_key); 3645 if (error != 0) 3646 return (error); 3647 umtxq_lock(&uq->uq_key); 3648 umtxq_busy(&uq->uq_key); 3649 umtxq_insert(uq); 3650 umtxq_unlock(&uq->uq_key); 3651 rv = fueword32(&sem->_count, &count); 3652 if (rv == -1) { 3653 umtxq_lock(&uq->uq_key); 3654 umtxq_unbusy(&uq->uq_key); 3655 umtxq_remove(uq); 3656 umtxq_unlock(&uq->uq_key); 3657 umtx_key_release(&uq->uq_key); 3658 return (EFAULT); 3659 } 3660 for (;;) { 3661 if (USEM_COUNT(count) != 0) { 3662 umtxq_lock(&uq->uq_key); 3663 umtxq_unbusy(&uq->uq_key); 3664 umtxq_remove(uq); 3665 umtxq_unlock(&uq->uq_key); 3666 umtx_key_release(&uq->uq_key); 3667 return (0); 3668 } 3669 if (count == USEM_HAS_WAITERS) 3670 break; 3671 rv = casueword32(&sem->_count, 0, &count, USEM_HAS_WAITERS); 3672 if (rv == 0) 3673 break; 3674 umtxq_lock(&uq->uq_key); 3675 umtxq_unbusy(&uq->uq_key); 3676 umtxq_remove(uq); 3677 umtxq_unlock(&uq->uq_key); 3678 umtx_key_release(&uq->uq_key); 3679 if (rv == -1) 3680 return (EFAULT); 3681 rv = thread_check_susp(td, true); 3682 if (rv != 0) 3683 return (rv); 3684 goto again; 3685 } 3686 umtxq_lock(&uq->uq_key); 3687 umtxq_unbusy(&uq->uq_key); 3688 3689 error = umtxq_sleep(uq, "usem", timeout == NULL ? NULL : &timo); 3690 3691 if ((uq->uq_flags & UQF_UMTXQ) == 0) 3692 error = 0; 3693 else { 3694 umtxq_remove(uq); 3695 if (timeout != NULL && (timeout->_flags & UMTX_ABSTIME) == 0) { 3696 /* A relative timeout cannot be restarted. */ 3697 if (error == ERESTART) 3698 error = EINTR; 3699 if (error == EINTR) { 3700 kern_clock_gettime(curthread, timo.clockid, 3701 &timo.cur); 3702 timespecsub(&timo.end, &timo.cur, 3703 &timeout->_timeout); 3704 } 3705 } 3706 } 3707 umtxq_unlock(&uq->uq_key); 3708 umtx_key_release(&uq->uq_key); 3709 return (error); 3710 } 3711 3712 /* 3713 * Signal a userland semaphore. 3714 */ 3715 static int 3716 do_sem2_wake(struct thread *td, struct _usem2 *sem) 3717 { 3718 struct umtx_key key; 3719 int error, cnt, rv; 3720 uint32_t count, flags; 3721 3722 rv = fueword32(&sem->_flags, &flags); 3723 if (rv == -1) 3724 return (EFAULT); 3725 if ((error = umtx_key_get(sem, TYPE_SEM, GET_SHARE(flags), &key)) != 0) 3726 return (error); 3727 umtxq_lock(&key); 3728 umtxq_busy(&key); 3729 cnt = umtxq_count(&key); 3730 if (cnt > 0) { 3731 /* 3732 * If this was the last sleeping thread, clear the waiters 3733 * flag in _count. 3734 */ 3735 if (cnt == 1) { 3736 umtxq_unlock(&key); 3737 rv = fueword32(&sem->_count, &count); 3738 while (rv != -1 && count & USEM_HAS_WAITERS) { 3739 rv = casueword32(&sem->_count, count, &count, 3740 count & ~USEM_HAS_WAITERS); 3741 if (rv == 1) { 3742 rv = thread_check_susp(td, true); 3743 if (rv != 0) 3744 break; 3745 } 3746 } 3747 if (rv == -1) 3748 error = EFAULT; 3749 else if (rv > 0) { 3750 error = rv; 3751 } 3752 umtxq_lock(&key); 3753 } 3754 3755 umtxq_signal(&key, 1); 3756 } 3757 umtxq_unbusy(&key); 3758 umtxq_unlock(&key); 3759 umtx_key_release(&key); 3760 return (error); 3761 } 3762 3763 #ifdef COMPAT_FREEBSD10 3764 int 3765 freebsd10__umtx_lock(struct thread *td, struct freebsd10__umtx_lock_args *uap) 3766 { 3767 return (do_lock_umtx(td, uap->umtx, td->td_tid, 0)); 3768 } 3769 3770 int 3771 freebsd10__umtx_unlock(struct thread *td, 3772 struct freebsd10__umtx_unlock_args *uap) 3773 { 3774 return (do_unlock_umtx(td, uap->umtx, td->td_tid)); 3775 } 3776 #endif 3777 3778 inline int 3779 umtx_copyin_timeout(const void *uaddr, struct timespec *tsp) 3780 { 3781 int error; 3782 3783 error = copyin(uaddr, tsp, sizeof(*tsp)); 3784 if (error == 0) { 3785 if (!timespecvalid_interval(tsp)) 3786 error = EINVAL; 3787 } 3788 return (error); 3789 } 3790 3791 static inline int 3792 umtx_copyin_umtx_time(const void *uaddr, size_t size, struct _umtx_time *tp) 3793 { 3794 int error; 3795 3796 if (size <= sizeof(tp->_timeout)) { 3797 tp->_clockid = CLOCK_REALTIME; 3798 tp->_flags = 0; 3799 error = copyin(uaddr, &tp->_timeout, sizeof(tp->_timeout)); 3800 } else 3801 error = copyin(uaddr, tp, sizeof(*tp)); 3802 if (error != 0) 3803 return (error); 3804 if (!timespecvalid_interval(&tp->_timeout)) 3805 return (EINVAL); 3806 return (0); 3807 } 3808 3809 static int 3810 umtx_copyin_robust_lists(const void *uaddr, size_t size, 3811 struct umtx_robust_lists_params *rb) 3812 { 3813 3814 if (size > sizeof(*rb)) 3815 return (EINVAL); 3816 return (copyin(uaddr, rb, size)); 3817 } 3818 3819 static int 3820 umtx_copyout_timeout(void *uaddr, size_t sz, struct timespec *tsp) 3821 { 3822 3823 /* 3824 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time) 3825 * and we're only called if sz >= sizeof(timespec) as supplied in the 3826 * copyops. 3827 */ 3828 KASSERT(sz >= sizeof(*tsp), 3829 ("umtx_copyops specifies incorrect sizes")); 3830 3831 return (copyout(tsp, uaddr, sizeof(*tsp))); 3832 } 3833 3834 #ifdef COMPAT_FREEBSD10 3835 static int 3836 __umtx_op_lock_umtx(struct thread *td, struct _umtx_op_args *uap, 3837 const struct umtx_copyops *ops) 3838 { 3839 struct timespec *ts, timeout; 3840 int error; 3841 3842 /* Allow a null timespec (wait forever). */ 3843 if (uap->uaddr2 == NULL) 3844 ts = NULL; 3845 else { 3846 error = ops->copyin_timeout(uap->uaddr2, &timeout); 3847 if (error != 0) 3848 return (error); 3849 ts = &timeout; 3850 } 3851 #ifdef COMPAT_FREEBSD32 3852 if (ops->compat32) 3853 return (do_lock_umtx32(td, uap->obj, uap->val, ts)); 3854 #endif 3855 return (do_lock_umtx(td, uap->obj, uap->val, ts)); 3856 } 3857 3858 static int 3859 __umtx_op_unlock_umtx(struct thread *td, struct _umtx_op_args *uap, 3860 const struct umtx_copyops *ops) 3861 { 3862 #ifdef COMPAT_FREEBSD32 3863 if (ops->compat32) 3864 return (do_unlock_umtx32(td, uap->obj, uap->val)); 3865 #endif 3866 return (do_unlock_umtx(td, uap->obj, uap->val)); 3867 } 3868 #endif /* COMPAT_FREEBSD10 */ 3869 3870 #if !defined(COMPAT_FREEBSD10) 3871 static int 3872 __umtx_op_unimpl(struct thread *td __unused, struct _umtx_op_args *uap __unused, 3873 const struct umtx_copyops *ops __unused) 3874 { 3875 return (EOPNOTSUPP); 3876 } 3877 #endif /* COMPAT_FREEBSD10 */ 3878 3879 static int 3880 __umtx_op_wait(struct thread *td, struct _umtx_op_args *uap, 3881 const struct umtx_copyops *ops) 3882 { 3883 struct _umtx_time timeout, *tm_p; 3884 int error; 3885 3886 if (uap->uaddr2 == NULL) 3887 tm_p = NULL; 3888 else { 3889 error = ops->copyin_umtx_time( 3890 uap->uaddr2, (size_t)uap->uaddr1, &timeout); 3891 if (error != 0) 3892 return (error); 3893 tm_p = &timeout; 3894 } 3895 return (do_wait(td, uap->obj, uap->val, tm_p, ops->compat32, 0)); 3896 } 3897 3898 static int 3899 __umtx_op_wait_uint(struct thread *td, struct _umtx_op_args *uap, 3900 const struct umtx_copyops *ops) 3901 { 3902 struct _umtx_time timeout, *tm_p; 3903 int error; 3904 3905 if (uap->uaddr2 == NULL) 3906 tm_p = NULL; 3907 else { 3908 error = ops->copyin_umtx_time( 3909 uap->uaddr2, (size_t)uap->uaddr1, &timeout); 3910 if (error != 0) 3911 return (error); 3912 tm_p = &timeout; 3913 } 3914 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 0)); 3915 } 3916 3917 static int 3918 __umtx_op_wait_uint_private(struct thread *td, struct _umtx_op_args *uap, 3919 const struct umtx_copyops *ops) 3920 { 3921 struct _umtx_time *tm_p, timeout; 3922 int error; 3923 3924 if (uap->uaddr2 == NULL) 3925 tm_p = NULL; 3926 else { 3927 error = ops->copyin_umtx_time( 3928 uap->uaddr2, (size_t)uap->uaddr1, &timeout); 3929 if (error != 0) 3930 return (error); 3931 tm_p = &timeout; 3932 } 3933 return (do_wait(td, uap->obj, uap->val, tm_p, 1, 1)); 3934 } 3935 3936 static int 3937 __umtx_op_wake(struct thread *td, struct _umtx_op_args *uap, 3938 const struct umtx_copyops *ops __unused) 3939 { 3940 3941 return (kern_umtx_wake(td, uap->obj, uap->val, 0)); 3942 } 3943 3944 #define BATCH_SIZE 128 3945 static int 3946 __umtx_op_nwake_private_native(struct thread *td, struct _umtx_op_args *uap) 3947 { 3948 char *uaddrs[BATCH_SIZE], **upp; 3949 int count, error, i, pos, tocopy; 3950 3951 upp = (char **)uap->obj; 3952 error = 0; 3953 for (count = uap->val, pos = 0; count > 0; count -= tocopy, 3954 pos += tocopy) { 3955 tocopy = MIN(count, BATCH_SIZE); 3956 error = copyin(upp + pos, uaddrs, tocopy * sizeof(char *)); 3957 if (error != 0) 3958 break; 3959 for (i = 0; i < tocopy; ++i) { 3960 kern_umtx_wake(td, uaddrs[i], INT_MAX, 1); 3961 } 3962 maybe_yield(); 3963 } 3964 return (error); 3965 } 3966 3967 static int 3968 __umtx_op_nwake_private_compat32(struct thread *td, struct _umtx_op_args *uap) 3969 { 3970 uint32_t uaddrs[BATCH_SIZE], *upp; 3971 int count, error, i, pos, tocopy; 3972 3973 upp = (uint32_t *)uap->obj; 3974 error = 0; 3975 for (count = uap->val, pos = 0; count > 0; count -= tocopy, 3976 pos += tocopy) { 3977 tocopy = MIN(count, BATCH_SIZE); 3978 error = copyin(upp + pos, uaddrs, tocopy * sizeof(uint32_t)); 3979 if (error != 0) 3980 break; 3981 for (i = 0; i < tocopy; ++i) { 3982 kern_umtx_wake(td, (void *)(uintptr_t)uaddrs[i], 3983 INT_MAX, 1); 3984 } 3985 maybe_yield(); 3986 } 3987 return (error); 3988 } 3989 3990 static int 3991 __umtx_op_nwake_private(struct thread *td, struct _umtx_op_args *uap, 3992 const struct umtx_copyops *ops) 3993 { 3994 3995 if (ops->compat32) 3996 return (__umtx_op_nwake_private_compat32(td, uap)); 3997 return (__umtx_op_nwake_private_native(td, uap)); 3998 } 3999 4000 static int 4001 __umtx_op_wake_private(struct thread *td, struct _umtx_op_args *uap, 4002 const struct umtx_copyops *ops __unused) 4003 { 4004 4005 return (kern_umtx_wake(td, uap->obj, uap->val, 1)); 4006 } 4007 4008 static int 4009 __umtx_op_lock_umutex(struct thread *td, struct _umtx_op_args *uap, 4010 const struct umtx_copyops *ops) 4011 { 4012 struct _umtx_time *tm_p, timeout; 4013 int error; 4014 4015 /* Allow a null timespec (wait forever). */ 4016 if (uap->uaddr2 == NULL) 4017 tm_p = NULL; 4018 else { 4019 error = ops->copyin_umtx_time( 4020 uap->uaddr2, (size_t)uap->uaddr1, &timeout); 4021 if (error != 0) 4022 return (error); 4023 tm_p = &timeout; 4024 } 4025 return (do_lock_umutex(td, uap->obj, tm_p, 0)); 4026 } 4027 4028 static int 4029 __umtx_op_trylock_umutex(struct thread *td, struct _umtx_op_args *uap, 4030 const struct umtx_copyops *ops __unused) 4031 { 4032 4033 return (do_lock_umutex(td, uap->obj, NULL, _UMUTEX_TRY)); 4034 } 4035 4036 static int 4037 __umtx_op_wait_umutex(struct thread *td, struct _umtx_op_args *uap, 4038 const struct umtx_copyops *ops) 4039 { 4040 struct _umtx_time *tm_p, timeout; 4041 int error; 4042 4043 /* Allow a null timespec (wait forever). */ 4044 if (uap->uaddr2 == NULL) 4045 tm_p = NULL; 4046 else { 4047 error = ops->copyin_umtx_time( 4048 uap->uaddr2, (size_t)uap->uaddr1, &timeout); 4049 if (error != 0) 4050 return (error); 4051 tm_p = &timeout; 4052 } 4053 return (do_lock_umutex(td, uap->obj, tm_p, _UMUTEX_WAIT)); 4054 } 4055 4056 static int 4057 __umtx_op_wake_umutex(struct thread *td, struct _umtx_op_args *uap, 4058 const struct umtx_copyops *ops __unused) 4059 { 4060 4061 return (do_wake_umutex(td, uap->obj)); 4062 } 4063 4064 static int 4065 __umtx_op_unlock_umutex(struct thread *td, struct _umtx_op_args *uap, 4066 const struct umtx_copyops *ops __unused) 4067 { 4068 4069 return (do_unlock_umutex(td, uap->obj, false)); 4070 } 4071 4072 static int 4073 __umtx_op_set_ceiling(struct thread *td, struct _umtx_op_args *uap, 4074 const struct umtx_copyops *ops __unused) 4075 { 4076 4077 return (do_set_ceiling(td, uap->obj, uap->val, uap->uaddr1)); 4078 } 4079 4080 static int 4081 __umtx_op_cv_wait(struct thread *td, struct _umtx_op_args *uap, 4082 const struct umtx_copyops *ops) 4083 { 4084 struct timespec *ts, timeout; 4085 int error; 4086 4087 /* Allow a null timespec (wait forever). */ 4088 if (uap->uaddr2 == NULL) 4089 ts = NULL; 4090 else { 4091 error = ops->copyin_timeout(uap->uaddr2, &timeout); 4092 if (error != 0) 4093 return (error); 4094 ts = &timeout; 4095 } 4096 return (do_cv_wait(td, uap->obj, uap->uaddr1, ts, uap->val)); 4097 } 4098 4099 static int 4100 __umtx_op_cv_signal(struct thread *td, struct _umtx_op_args *uap, 4101 const struct umtx_copyops *ops __unused) 4102 { 4103 4104 return (do_cv_signal(td, uap->obj)); 4105 } 4106 4107 static int 4108 __umtx_op_cv_broadcast(struct thread *td, struct _umtx_op_args *uap, 4109 const struct umtx_copyops *ops __unused) 4110 { 4111 4112 return (do_cv_broadcast(td, uap->obj)); 4113 } 4114 4115 static int 4116 __umtx_op_rw_rdlock(struct thread *td, struct _umtx_op_args *uap, 4117 const struct umtx_copyops *ops) 4118 { 4119 struct _umtx_time timeout; 4120 int error; 4121 4122 /* Allow a null timespec (wait forever). */ 4123 if (uap->uaddr2 == NULL) { 4124 error = do_rw_rdlock(td, uap->obj, uap->val, 0); 4125 } else { 4126 error = ops->copyin_umtx_time(uap->uaddr2, 4127 (size_t)uap->uaddr1, &timeout); 4128 if (error != 0) 4129 return (error); 4130 error = do_rw_rdlock(td, uap->obj, uap->val, &timeout); 4131 } 4132 return (error); 4133 } 4134 4135 static int 4136 __umtx_op_rw_wrlock(struct thread *td, struct _umtx_op_args *uap, 4137 const struct umtx_copyops *ops) 4138 { 4139 struct _umtx_time timeout; 4140 int error; 4141 4142 /* Allow a null timespec (wait forever). */ 4143 if (uap->uaddr2 == NULL) { 4144 error = do_rw_wrlock(td, uap->obj, 0); 4145 } else { 4146 error = ops->copyin_umtx_time(uap->uaddr2, 4147 (size_t)uap->uaddr1, &timeout); 4148 if (error != 0) 4149 return (error); 4150 4151 error = do_rw_wrlock(td, uap->obj, &timeout); 4152 } 4153 return (error); 4154 } 4155 4156 static int 4157 __umtx_op_rw_unlock(struct thread *td, struct _umtx_op_args *uap, 4158 const struct umtx_copyops *ops __unused) 4159 { 4160 4161 return (do_rw_unlock(td, uap->obj)); 4162 } 4163 4164 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10) 4165 static int 4166 __umtx_op_sem_wait(struct thread *td, struct _umtx_op_args *uap, 4167 const struct umtx_copyops *ops) 4168 { 4169 struct _umtx_time *tm_p, timeout; 4170 int error; 4171 4172 /* Allow a null timespec (wait forever). */ 4173 if (uap->uaddr2 == NULL) 4174 tm_p = NULL; 4175 else { 4176 error = ops->copyin_umtx_time( 4177 uap->uaddr2, (size_t)uap->uaddr1, &timeout); 4178 if (error != 0) 4179 return (error); 4180 tm_p = &timeout; 4181 } 4182 return (do_sem_wait(td, uap->obj, tm_p)); 4183 } 4184 4185 static int 4186 __umtx_op_sem_wake(struct thread *td, struct _umtx_op_args *uap, 4187 const struct umtx_copyops *ops __unused) 4188 { 4189 4190 return (do_sem_wake(td, uap->obj)); 4191 } 4192 #endif 4193 4194 static int 4195 __umtx_op_wake2_umutex(struct thread *td, struct _umtx_op_args *uap, 4196 const struct umtx_copyops *ops __unused) 4197 { 4198 4199 return (do_wake2_umutex(td, uap->obj, uap->val)); 4200 } 4201 4202 static int 4203 __umtx_op_sem2_wait(struct thread *td, struct _umtx_op_args *uap, 4204 const struct umtx_copyops *ops) 4205 { 4206 struct _umtx_time *tm_p, timeout; 4207 size_t uasize; 4208 int error; 4209 4210 /* Allow a null timespec (wait forever). */ 4211 if (uap->uaddr2 == NULL) { 4212 uasize = 0; 4213 tm_p = NULL; 4214 } else { 4215 uasize = (size_t)uap->uaddr1; 4216 error = ops->copyin_umtx_time(uap->uaddr2, uasize, &timeout); 4217 if (error != 0) 4218 return (error); 4219 tm_p = &timeout; 4220 } 4221 error = do_sem2_wait(td, uap->obj, tm_p); 4222 if (error == EINTR && uap->uaddr2 != NULL && 4223 (timeout._flags & UMTX_ABSTIME) == 0 && 4224 uasize >= ops->umtx_time_sz + ops->timespec_sz) { 4225 error = ops->copyout_timeout( 4226 (void *)((uintptr_t)uap->uaddr2 + ops->umtx_time_sz), 4227 uasize - ops->umtx_time_sz, &timeout._timeout); 4228 if (error == 0) { 4229 error = EINTR; 4230 } 4231 } 4232 4233 return (error); 4234 } 4235 4236 static int 4237 __umtx_op_sem2_wake(struct thread *td, struct _umtx_op_args *uap, 4238 const struct umtx_copyops *ops __unused) 4239 { 4240 4241 return (do_sem2_wake(td, uap->obj)); 4242 } 4243 4244 #define USHM_OBJ_UMTX(o) \ 4245 ((struct umtx_shm_obj_list *)(&(o)->umtx_data)) 4246 4247 #define USHMF_REG_LINKED 0x0001 4248 #define USHMF_OBJ_LINKED 0x0002 4249 struct umtx_shm_reg { 4250 TAILQ_ENTRY(umtx_shm_reg) ushm_reg_link; 4251 LIST_ENTRY(umtx_shm_reg) ushm_obj_link; 4252 struct umtx_key ushm_key; 4253 struct ucred *ushm_cred; 4254 struct shmfd *ushm_obj; 4255 u_int ushm_refcnt; 4256 u_int ushm_flags; 4257 }; 4258 4259 LIST_HEAD(umtx_shm_obj_list, umtx_shm_reg); 4260 TAILQ_HEAD(umtx_shm_reg_head, umtx_shm_reg); 4261 4262 static uma_zone_t umtx_shm_reg_zone; 4263 static struct umtx_shm_reg_head umtx_shm_registry[UMTX_CHAINS]; 4264 static struct mtx umtx_shm_lock; 4265 static struct umtx_shm_reg_head umtx_shm_reg_delfree = 4266 TAILQ_HEAD_INITIALIZER(umtx_shm_reg_delfree); 4267 4268 static void umtx_shm_free_reg(struct umtx_shm_reg *reg); 4269 4270 static void 4271 umtx_shm_reg_delfree_tq(void *context __unused, int pending __unused) 4272 { 4273 struct umtx_shm_reg_head d; 4274 struct umtx_shm_reg *reg, *reg1; 4275 4276 TAILQ_INIT(&d); 4277 mtx_lock(&umtx_shm_lock); 4278 TAILQ_CONCAT(&d, &umtx_shm_reg_delfree, ushm_reg_link); 4279 mtx_unlock(&umtx_shm_lock); 4280 TAILQ_FOREACH_SAFE(reg, &d, ushm_reg_link, reg1) { 4281 TAILQ_REMOVE(&d, reg, ushm_reg_link); 4282 umtx_shm_free_reg(reg); 4283 } 4284 } 4285 4286 static struct task umtx_shm_reg_delfree_task = 4287 TASK_INITIALIZER(0, umtx_shm_reg_delfree_tq, NULL); 4288 4289 static struct umtx_shm_reg * 4290 umtx_shm_find_reg_locked(const struct umtx_key *key) 4291 { 4292 struct umtx_shm_reg *reg; 4293 struct umtx_shm_reg_head *reg_head; 4294 4295 KASSERT(key->shared, ("umtx_p_find_rg: private key")); 4296 mtx_assert(&umtx_shm_lock, MA_OWNED); 4297 reg_head = &umtx_shm_registry[key->hash]; 4298 TAILQ_FOREACH(reg, reg_head, ushm_reg_link) { 4299 KASSERT(reg->ushm_key.shared, 4300 ("non-shared key on reg %p %d", reg, reg->ushm_key.shared)); 4301 if (reg->ushm_key.info.shared.object == 4302 key->info.shared.object && 4303 reg->ushm_key.info.shared.offset == 4304 key->info.shared.offset) { 4305 KASSERT(reg->ushm_key.type == TYPE_SHM, ("TYPE_USHM")); 4306 KASSERT(reg->ushm_refcnt > 0, 4307 ("reg %p refcnt 0 onlist", reg)); 4308 KASSERT((reg->ushm_flags & USHMF_REG_LINKED) != 0, 4309 ("reg %p not linked", reg)); 4310 reg->ushm_refcnt++; 4311 return (reg); 4312 } 4313 } 4314 return (NULL); 4315 } 4316 4317 static struct umtx_shm_reg * 4318 umtx_shm_find_reg(const struct umtx_key *key) 4319 { 4320 struct umtx_shm_reg *reg; 4321 4322 mtx_lock(&umtx_shm_lock); 4323 reg = umtx_shm_find_reg_locked(key); 4324 mtx_unlock(&umtx_shm_lock); 4325 return (reg); 4326 } 4327 4328 static void 4329 umtx_shm_free_reg(struct umtx_shm_reg *reg) 4330 { 4331 4332 chgumtxcnt(reg->ushm_cred->cr_ruidinfo, -1, 0); 4333 crfree(reg->ushm_cred); 4334 shm_drop(reg->ushm_obj); 4335 uma_zfree(umtx_shm_reg_zone, reg); 4336 } 4337 4338 static bool 4339 umtx_shm_unref_reg_locked(struct umtx_shm_reg *reg, bool force) 4340 { 4341 bool res; 4342 4343 mtx_assert(&umtx_shm_lock, MA_OWNED); 4344 KASSERT(reg->ushm_refcnt > 0, ("ushm_reg %p refcnt 0", reg)); 4345 reg->ushm_refcnt--; 4346 res = reg->ushm_refcnt == 0; 4347 if (res || force) { 4348 if ((reg->ushm_flags & USHMF_REG_LINKED) != 0) { 4349 TAILQ_REMOVE(&umtx_shm_registry[reg->ushm_key.hash], 4350 reg, ushm_reg_link); 4351 reg->ushm_flags &= ~USHMF_REG_LINKED; 4352 } 4353 if ((reg->ushm_flags & USHMF_OBJ_LINKED) != 0) { 4354 LIST_REMOVE(reg, ushm_obj_link); 4355 reg->ushm_flags &= ~USHMF_OBJ_LINKED; 4356 } 4357 } 4358 return (res); 4359 } 4360 4361 static void 4362 umtx_shm_unref_reg(struct umtx_shm_reg *reg, bool force) 4363 { 4364 vm_object_t object; 4365 bool dofree; 4366 4367 if (force) { 4368 object = reg->ushm_obj->shm_object; 4369 VM_OBJECT_WLOCK(object); 4370 vm_object_set_flag(object, OBJ_UMTXDEAD); 4371 VM_OBJECT_WUNLOCK(object); 4372 } 4373 mtx_lock(&umtx_shm_lock); 4374 dofree = umtx_shm_unref_reg_locked(reg, force); 4375 mtx_unlock(&umtx_shm_lock); 4376 if (dofree) 4377 umtx_shm_free_reg(reg); 4378 } 4379 4380 void 4381 umtx_shm_object_init(vm_object_t object) 4382 { 4383 4384 LIST_INIT(USHM_OBJ_UMTX(object)); 4385 } 4386 4387 void 4388 umtx_shm_object_terminated(vm_object_t object) 4389 { 4390 struct umtx_shm_reg *reg, *reg1; 4391 bool dofree; 4392 4393 if (LIST_EMPTY(USHM_OBJ_UMTX(object))) 4394 return; 4395 4396 dofree = false; 4397 mtx_lock(&umtx_shm_lock); 4398 LIST_FOREACH_SAFE(reg, USHM_OBJ_UMTX(object), ushm_obj_link, reg1) { 4399 if (umtx_shm_unref_reg_locked(reg, true)) { 4400 TAILQ_INSERT_TAIL(&umtx_shm_reg_delfree, reg, 4401 ushm_reg_link); 4402 dofree = true; 4403 } 4404 } 4405 mtx_unlock(&umtx_shm_lock); 4406 if (dofree) 4407 taskqueue_enqueue(taskqueue_thread, &umtx_shm_reg_delfree_task); 4408 } 4409 4410 static int 4411 umtx_shm_create_reg(struct thread *td, const struct umtx_key *key, 4412 struct umtx_shm_reg **res) 4413 { 4414 struct umtx_shm_reg *reg, *reg1; 4415 struct ucred *cred; 4416 int error; 4417 4418 reg = umtx_shm_find_reg(key); 4419 if (reg != NULL) { 4420 *res = reg; 4421 return (0); 4422 } 4423 cred = td->td_ucred; 4424 if (!chgumtxcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_UMTXP))) 4425 return (ENOMEM); 4426 reg = uma_zalloc(umtx_shm_reg_zone, M_WAITOK | M_ZERO); 4427 reg->ushm_refcnt = 1; 4428 bcopy(key, ®->ushm_key, sizeof(*key)); 4429 reg->ushm_obj = shm_alloc(td->td_ucred, O_RDWR, false); 4430 reg->ushm_cred = crhold(cred); 4431 error = shm_dotruncate(reg->ushm_obj, PAGE_SIZE); 4432 if (error != 0) { 4433 umtx_shm_free_reg(reg); 4434 return (error); 4435 } 4436 mtx_lock(&umtx_shm_lock); 4437 reg1 = umtx_shm_find_reg_locked(key); 4438 if (reg1 != NULL) { 4439 mtx_unlock(&umtx_shm_lock); 4440 umtx_shm_free_reg(reg); 4441 *res = reg1; 4442 return (0); 4443 } 4444 reg->ushm_refcnt++; 4445 TAILQ_INSERT_TAIL(&umtx_shm_registry[key->hash], reg, ushm_reg_link); 4446 LIST_INSERT_HEAD(USHM_OBJ_UMTX(key->info.shared.object), reg, 4447 ushm_obj_link); 4448 reg->ushm_flags = USHMF_REG_LINKED | USHMF_OBJ_LINKED; 4449 mtx_unlock(&umtx_shm_lock); 4450 *res = reg; 4451 return (0); 4452 } 4453 4454 static int 4455 umtx_shm_alive(struct thread *td, void *addr) 4456 { 4457 vm_map_t map; 4458 vm_map_entry_t entry; 4459 vm_object_t object; 4460 vm_pindex_t pindex; 4461 vm_prot_t prot; 4462 int res, ret; 4463 boolean_t wired; 4464 4465 map = &td->td_proc->p_vmspace->vm_map; 4466 res = vm_map_lookup(&map, (uintptr_t)addr, VM_PROT_READ, &entry, 4467 &object, &pindex, &prot, &wired); 4468 if (res != KERN_SUCCESS) 4469 return (EFAULT); 4470 if (object == NULL) 4471 ret = EINVAL; 4472 else 4473 ret = (object->flags & OBJ_UMTXDEAD) != 0 ? ENOTTY : 0; 4474 vm_map_lookup_done(map, entry); 4475 return (ret); 4476 } 4477 4478 static void 4479 umtx_shm_init(void) 4480 { 4481 int i; 4482 4483 umtx_shm_reg_zone = uma_zcreate("umtx_shm", sizeof(struct umtx_shm_reg), 4484 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 4485 mtx_init(&umtx_shm_lock, "umtxshm", NULL, MTX_DEF); 4486 for (i = 0; i < nitems(umtx_shm_registry); i++) 4487 TAILQ_INIT(&umtx_shm_registry[i]); 4488 } 4489 4490 static int 4491 umtx_shm(struct thread *td, void *addr, u_int flags) 4492 { 4493 struct umtx_key key; 4494 struct umtx_shm_reg *reg; 4495 struct file *fp; 4496 int error, fd; 4497 4498 if (__bitcount(flags & (UMTX_SHM_CREAT | UMTX_SHM_LOOKUP | 4499 UMTX_SHM_DESTROY| UMTX_SHM_ALIVE)) != 1) 4500 return (EINVAL); 4501 if ((flags & UMTX_SHM_ALIVE) != 0) 4502 return (umtx_shm_alive(td, addr)); 4503 error = umtx_key_get(addr, TYPE_SHM, PROCESS_SHARE, &key); 4504 if (error != 0) 4505 return (error); 4506 KASSERT(key.shared == 1, ("non-shared key")); 4507 if ((flags & UMTX_SHM_CREAT) != 0) { 4508 error = umtx_shm_create_reg(td, &key, ®); 4509 } else { 4510 reg = umtx_shm_find_reg(&key); 4511 if (reg == NULL) 4512 error = ESRCH; 4513 } 4514 umtx_key_release(&key); 4515 if (error != 0) 4516 return (error); 4517 KASSERT(reg != NULL, ("no reg")); 4518 if ((flags & UMTX_SHM_DESTROY) != 0) { 4519 umtx_shm_unref_reg(reg, true); 4520 } else { 4521 #if 0 4522 #ifdef MAC 4523 error = mac_posixshm_check_open(td->td_ucred, 4524 reg->ushm_obj, FFLAGS(O_RDWR)); 4525 if (error == 0) 4526 #endif 4527 error = shm_access(reg->ushm_obj, td->td_ucred, 4528 FFLAGS(O_RDWR)); 4529 if (error == 0) 4530 #endif 4531 error = falloc_caps(td, &fp, &fd, O_CLOEXEC, NULL); 4532 if (error == 0) { 4533 shm_hold(reg->ushm_obj); 4534 finit(fp, FFLAGS(O_RDWR), DTYPE_SHM, reg->ushm_obj, 4535 &shm_ops); 4536 td->td_retval[0] = fd; 4537 fdrop(fp, td); 4538 } 4539 } 4540 umtx_shm_unref_reg(reg, false); 4541 return (error); 4542 } 4543 4544 static int 4545 __umtx_op_shm(struct thread *td, struct _umtx_op_args *uap, 4546 const struct umtx_copyops *ops __unused) 4547 { 4548 4549 return (umtx_shm(td, uap->uaddr1, uap->val)); 4550 } 4551 4552 static int 4553 __umtx_op_robust_lists(struct thread *td, struct _umtx_op_args *uap, 4554 const struct umtx_copyops *ops) 4555 { 4556 struct umtx_robust_lists_params rb; 4557 int error; 4558 4559 if (ops->compat32) { 4560 if ((td->td_pflags2 & TDP2_COMPAT32RB) == 0 && 4561 (td->td_rb_list != 0 || td->td_rbp_list != 0 || 4562 td->td_rb_inact != 0)) 4563 return (EBUSY); 4564 } else if ((td->td_pflags2 & TDP2_COMPAT32RB) != 0) { 4565 return (EBUSY); 4566 } 4567 4568 bzero(&rb, sizeof(rb)); 4569 error = ops->copyin_robust_lists(uap->uaddr1, uap->val, &rb); 4570 if (error != 0) 4571 return (error); 4572 4573 if (ops->compat32) 4574 td->td_pflags2 |= TDP2_COMPAT32RB; 4575 4576 td->td_rb_list = rb.robust_list_offset; 4577 td->td_rbp_list = rb.robust_priv_list_offset; 4578 td->td_rb_inact = rb.robust_inact_offset; 4579 return (0); 4580 } 4581 4582 #if defined(__i386__) || defined(__amd64__) 4583 /* 4584 * Provide the standard 32-bit definitions for x86, since native/compat32 use a 4585 * 32-bit time_t there. Other architectures just need the i386 definitions 4586 * along with their standard compat32. 4587 */ 4588 struct timespecx32 { 4589 int64_t tv_sec; 4590 int32_t tv_nsec; 4591 }; 4592 4593 struct umtx_timex32 { 4594 struct timespecx32 _timeout; 4595 uint32_t _flags; 4596 uint32_t _clockid; 4597 }; 4598 4599 #ifndef __i386__ 4600 #define timespeci386 timespec32 4601 #define umtx_timei386 umtx_time32 4602 #endif 4603 #else /* !__i386__ && !__amd64__ */ 4604 /* 32-bit architectures can emulate i386, so define these almost everywhere. */ 4605 struct timespeci386 { 4606 int32_t tv_sec; 4607 int32_t tv_nsec; 4608 }; 4609 4610 struct umtx_timei386 { 4611 struct timespeci386 _timeout; 4612 uint32_t _flags; 4613 uint32_t _clockid; 4614 }; 4615 4616 #if defined(__LP64__) 4617 #define timespecx32 timespec32 4618 #define umtx_timex32 umtx_time32 4619 #endif 4620 #endif 4621 4622 static int 4623 umtx_copyin_robust_lists32(const void *uaddr, size_t size, 4624 struct umtx_robust_lists_params *rbp) 4625 { 4626 struct umtx_robust_lists_params_compat32 rb32; 4627 int error; 4628 4629 if (size > sizeof(rb32)) 4630 return (EINVAL); 4631 bzero(&rb32, sizeof(rb32)); 4632 error = copyin(uaddr, &rb32, size); 4633 if (error != 0) 4634 return (error); 4635 CP(rb32, *rbp, robust_list_offset); 4636 CP(rb32, *rbp, robust_priv_list_offset); 4637 CP(rb32, *rbp, robust_inact_offset); 4638 return (0); 4639 } 4640 4641 #ifndef __i386__ 4642 static inline int 4643 umtx_copyin_timeouti386(const void *uaddr, struct timespec *tsp) 4644 { 4645 struct timespeci386 ts32; 4646 int error; 4647 4648 error = copyin(uaddr, &ts32, sizeof(ts32)); 4649 if (error == 0) { 4650 if (!timespecvalid_interval(&ts32)) 4651 error = EINVAL; 4652 else { 4653 CP(ts32, *tsp, tv_sec); 4654 CP(ts32, *tsp, tv_nsec); 4655 } 4656 } 4657 return (error); 4658 } 4659 4660 static inline int 4661 umtx_copyin_umtx_timei386(const void *uaddr, size_t size, struct _umtx_time *tp) 4662 { 4663 struct umtx_timei386 t32; 4664 int error; 4665 4666 t32._clockid = CLOCK_REALTIME; 4667 t32._flags = 0; 4668 if (size <= sizeof(t32._timeout)) 4669 error = copyin(uaddr, &t32._timeout, sizeof(t32._timeout)); 4670 else 4671 error = copyin(uaddr, &t32, sizeof(t32)); 4672 if (error != 0) 4673 return (error); 4674 if (!timespecvalid_interval(&t32._timeout)) 4675 return (EINVAL); 4676 TS_CP(t32, *tp, _timeout); 4677 CP(t32, *tp, _flags); 4678 CP(t32, *tp, _clockid); 4679 return (0); 4680 } 4681 4682 static int 4683 umtx_copyout_timeouti386(void *uaddr, size_t sz, struct timespec *tsp) 4684 { 4685 struct timespeci386 remain32 = { 4686 .tv_sec = tsp->tv_sec, 4687 .tv_nsec = tsp->tv_nsec, 4688 }; 4689 4690 /* 4691 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time) 4692 * and we're only called if sz >= sizeof(timespec) as supplied in the 4693 * copyops. 4694 */ 4695 KASSERT(sz >= sizeof(remain32), 4696 ("umtx_copyops specifies incorrect sizes")); 4697 4698 return (copyout(&remain32, uaddr, sizeof(remain32))); 4699 } 4700 #endif /* !__i386__ */ 4701 4702 #if defined(__i386__) || defined(__LP64__) 4703 static inline int 4704 umtx_copyin_timeoutx32(const void *uaddr, struct timespec *tsp) 4705 { 4706 struct timespecx32 ts32; 4707 int error; 4708 4709 error = copyin(uaddr, &ts32, sizeof(ts32)); 4710 if (error == 0) { 4711 if (!timespecvalid_interval(&ts32)) 4712 error = EINVAL; 4713 else { 4714 CP(ts32, *tsp, tv_sec); 4715 CP(ts32, *tsp, tv_nsec); 4716 } 4717 } 4718 return (error); 4719 } 4720 4721 static inline int 4722 umtx_copyin_umtx_timex32(const void *uaddr, size_t size, struct _umtx_time *tp) 4723 { 4724 struct umtx_timex32 t32; 4725 int error; 4726 4727 t32._clockid = CLOCK_REALTIME; 4728 t32._flags = 0; 4729 if (size <= sizeof(t32._timeout)) 4730 error = copyin(uaddr, &t32._timeout, sizeof(t32._timeout)); 4731 else 4732 error = copyin(uaddr, &t32, sizeof(t32)); 4733 if (error != 0) 4734 return (error); 4735 if (!timespecvalid_interval(&t32._timeout)) 4736 return (EINVAL); 4737 TS_CP(t32, *tp, _timeout); 4738 CP(t32, *tp, _flags); 4739 CP(t32, *tp, _clockid); 4740 return (0); 4741 } 4742 4743 static int 4744 umtx_copyout_timeoutx32(void *uaddr, size_t sz, struct timespec *tsp) 4745 { 4746 struct timespecx32 remain32 = { 4747 .tv_sec = tsp->tv_sec, 4748 .tv_nsec = tsp->tv_nsec, 4749 }; 4750 4751 /* 4752 * Should be guaranteed by the caller, sz == uaddr1 - sizeof(_umtx_time) 4753 * and we're only called if sz >= sizeof(timespec) as supplied in the 4754 * copyops. 4755 */ 4756 KASSERT(sz >= sizeof(remain32), 4757 ("umtx_copyops specifies incorrect sizes")); 4758 4759 return (copyout(&remain32, uaddr, sizeof(remain32))); 4760 } 4761 #endif /* __i386__ || __LP64__ */ 4762 4763 typedef int (*_umtx_op_func)(struct thread *td, struct _umtx_op_args *uap, 4764 const struct umtx_copyops *umtx_ops); 4765 4766 static const _umtx_op_func op_table[] = { 4767 #ifdef COMPAT_FREEBSD10 4768 [UMTX_OP_LOCK] = __umtx_op_lock_umtx, 4769 [UMTX_OP_UNLOCK] = __umtx_op_unlock_umtx, 4770 #else 4771 [UMTX_OP_LOCK] = __umtx_op_unimpl, 4772 [UMTX_OP_UNLOCK] = __umtx_op_unimpl, 4773 #endif 4774 [UMTX_OP_WAIT] = __umtx_op_wait, 4775 [UMTX_OP_WAKE] = __umtx_op_wake, 4776 [UMTX_OP_MUTEX_TRYLOCK] = __umtx_op_trylock_umutex, 4777 [UMTX_OP_MUTEX_LOCK] = __umtx_op_lock_umutex, 4778 [UMTX_OP_MUTEX_UNLOCK] = __umtx_op_unlock_umutex, 4779 [UMTX_OP_SET_CEILING] = __umtx_op_set_ceiling, 4780 [UMTX_OP_CV_WAIT] = __umtx_op_cv_wait, 4781 [UMTX_OP_CV_SIGNAL] = __umtx_op_cv_signal, 4782 [UMTX_OP_CV_BROADCAST] = __umtx_op_cv_broadcast, 4783 [UMTX_OP_WAIT_UINT] = __umtx_op_wait_uint, 4784 [UMTX_OP_RW_RDLOCK] = __umtx_op_rw_rdlock, 4785 [UMTX_OP_RW_WRLOCK] = __umtx_op_rw_wrlock, 4786 [UMTX_OP_RW_UNLOCK] = __umtx_op_rw_unlock, 4787 [UMTX_OP_WAIT_UINT_PRIVATE] = __umtx_op_wait_uint_private, 4788 [UMTX_OP_WAKE_PRIVATE] = __umtx_op_wake_private, 4789 [UMTX_OP_MUTEX_WAIT] = __umtx_op_wait_umutex, 4790 [UMTX_OP_MUTEX_WAKE] = __umtx_op_wake_umutex, 4791 #if defined(COMPAT_FREEBSD9) || defined(COMPAT_FREEBSD10) 4792 [UMTX_OP_SEM_WAIT] = __umtx_op_sem_wait, 4793 [UMTX_OP_SEM_WAKE] = __umtx_op_sem_wake, 4794 #else 4795 [UMTX_OP_SEM_WAIT] = __umtx_op_unimpl, 4796 [UMTX_OP_SEM_WAKE] = __umtx_op_unimpl, 4797 #endif 4798 [UMTX_OP_NWAKE_PRIVATE] = __umtx_op_nwake_private, 4799 [UMTX_OP_MUTEX_WAKE2] = __umtx_op_wake2_umutex, 4800 [UMTX_OP_SEM2_WAIT] = __umtx_op_sem2_wait, 4801 [UMTX_OP_SEM2_WAKE] = __umtx_op_sem2_wake, 4802 [UMTX_OP_SHM] = __umtx_op_shm, 4803 [UMTX_OP_ROBUST_LISTS] = __umtx_op_robust_lists, 4804 }; 4805 4806 static const struct umtx_copyops umtx_native_ops = { 4807 .copyin_timeout = umtx_copyin_timeout, 4808 .copyin_umtx_time = umtx_copyin_umtx_time, 4809 .copyin_robust_lists = umtx_copyin_robust_lists, 4810 .copyout_timeout = umtx_copyout_timeout, 4811 .timespec_sz = sizeof(struct timespec), 4812 .umtx_time_sz = sizeof(struct _umtx_time), 4813 }; 4814 4815 #ifndef __i386__ 4816 static const struct umtx_copyops umtx_native_opsi386 = { 4817 .copyin_timeout = umtx_copyin_timeouti386, 4818 .copyin_umtx_time = umtx_copyin_umtx_timei386, 4819 .copyin_robust_lists = umtx_copyin_robust_lists32, 4820 .copyout_timeout = umtx_copyout_timeouti386, 4821 .timespec_sz = sizeof(struct timespeci386), 4822 .umtx_time_sz = sizeof(struct umtx_timei386), 4823 .compat32 = true, 4824 }; 4825 #endif 4826 4827 #if defined(__i386__) || defined(__LP64__) 4828 /* i386 can emulate other 32-bit archs, too! */ 4829 static const struct umtx_copyops umtx_native_opsx32 = { 4830 .copyin_timeout = umtx_copyin_timeoutx32, 4831 .copyin_umtx_time = umtx_copyin_umtx_timex32, 4832 .copyin_robust_lists = umtx_copyin_robust_lists32, 4833 .copyout_timeout = umtx_copyout_timeoutx32, 4834 .timespec_sz = sizeof(struct timespecx32), 4835 .umtx_time_sz = sizeof(struct umtx_timex32), 4836 .compat32 = true, 4837 }; 4838 4839 #ifdef COMPAT_FREEBSD32 4840 #ifdef __amd64__ 4841 #define umtx_native_ops32 umtx_native_opsi386 4842 #else 4843 #define umtx_native_ops32 umtx_native_opsx32 4844 #endif 4845 #endif /* COMPAT_FREEBSD32 */ 4846 #endif /* __i386__ || __LP64__ */ 4847 4848 #define UMTX_OP__FLAGS (UMTX_OP__32BIT | UMTX_OP__I386) 4849 4850 static int 4851 kern__umtx_op(struct thread *td, void *obj, int op, unsigned long val, 4852 void *uaddr1, void *uaddr2, const struct umtx_copyops *ops) 4853 { 4854 struct _umtx_op_args uap = { 4855 .obj = obj, 4856 .op = op & ~UMTX_OP__FLAGS, 4857 .val = val, 4858 .uaddr1 = uaddr1, 4859 .uaddr2 = uaddr2 4860 }; 4861 4862 if ((uap.op >= nitems(op_table))) 4863 return (EINVAL); 4864 return ((*op_table[uap.op])(td, &uap, ops)); 4865 } 4866 4867 int 4868 sys__umtx_op(struct thread *td, struct _umtx_op_args *uap) 4869 { 4870 static const struct umtx_copyops *umtx_ops; 4871 4872 umtx_ops = &umtx_native_ops; 4873 #ifdef __LP64__ 4874 if ((uap->op & (UMTX_OP__32BIT | UMTX_OP__I386)) != 0) { 4875 if ((uap->op & UMTX_OP__I386) != 0) 4876 umtx_ops = &umtx_native_opsi386; 4877 else 4878 umtx_ops = &umtx_native_opsx32; 4879 } 4880 #elif !defined(__i386__) 4881 /* We consider UMTX_OP__32BIT a nop on !i386 ILP32. */ 4882 if ((uap->op & UMTX_OP__I386) != 0) 4883 umtx_ops = &umtx_native_opsi386; 4884 #else 4885 /* Likewise, UMTX_OP__I386 is a nop on i386. */ 4886 if ((uap->op & UMTX_OP__32BIT) != 0) 4887 umtx_ops = &umtx_native_opsx32; 4888 #endif 4889 return (kern__umtx_op(td, uap->obj, uap->op, uap->val, uap->uaddr1, 4890 uap->uaddr2, umtx_ops)); 4891 } 4892 4893 #ifdef COMPAT_FREEBSD32 4894 #ifdef COMPAT_FREEBSD10 4895 int 4896 freebsd10_freebsd32__umtx_lock(struct thread *td, 4897 struct freebsd10_freebsd32__umtx_lock_args *uap) 4898 { 4899 return (do_lock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid, NULL)); 4900 } 4901 4902 int 4903 freebsd10_freebsd32__umtx_unlock(struct thread *td, 4904 struct freebsd10_freebsd32__umtx_unlock_args *uap) 4905 { 4906 return (do_unlock_umtx32(td, (uint32_t *)uap->umtx, td->td_tid)); 4907 } 4908 #endif /* COMPAT_FREEBSD10 */ 4909 4910 int 4911 freebsd32__umtx_op(struct thread *td, struct freebsd32__umtx_op_args *uap) 4912 { 4913 4914 return (kern__umtx_op(td, uap->obj, uap->op, uap->val, uap->uaddr1, 4915 uap->uaddr2, &umtx_native_ops32)); 4916 } 4917 #endif /* COMPAT_FREEBSD32 */ 4918 4919 void 4920 umtx_thread_init(struct thread *td) 4921 { 4922 4923 td->td_umtxq = umtxq_alloc(); 4924 td->td_umtxq->uq_thread = td; 4925 } 4926 4927 void 4928 umtx_thread_fini(struct thread *td) 4929 { 4930 4931 umtxq_free(td->td_umtxq); 4932 } 4933 4934 /* 4935 * It will be called when new thread is created, e.g fork(). 4936 */ 4937 void 4938 umtx_thread_alloc(struct thread *td) 4939 { 4940 struct umtx_q *uq; 4941 4942 uq = td->td_umtxq; 4943 uq->uq_inherited_pri = PRI_MAX; 4944 4945 KASSERT(uq->uq_flags == 0, ("uq_flags != 0")); 4946 KASSERT(uq->uq_thread == td, ("uq_thread != td")); 4947 KASSERT(uq->uq_pi_blocked == NULL, ("uq_pi_blocked != NULL")); 4948 KASSERT(TAILQ_EMPTY(&uq->uq_pi_contested), ("uq_pi_contested is not empty")); 4949 } 4950 4951 /* 4952 * exec() hook. 4953 * 4954 * Clear robust lists for all process' threads, not delaying the 4955 * cleanup to thread exit, since the relevant address space is 4956 * destroyed right now. 4957 */ 4958 void 4959 umtx_exec(struct proc *p) 4960 { 4961 struct thread *td; 4962 4963 KASSERT(p == curproc, ("need curproc")); 4964 KASSERT((p->p_flag & P_HADTHREADS) == 0 || 4965 (p->p_flag & P_STOPPED_SINGLE) != 0, 4966 ("curproc must be single-threaded")); 4967 /* 4968 * There is no need to lock the list as only this thread can be 4969 * running. 4970 */ 4971 FOREACH_THREAD_IN_PROC(p, td) { 4972 KASSERT(td == curthread || 4973 ((td->td_flags & TDF_BOUNDARY) != 0 && TD_IS_SUSPENDED(td)), 4974 ("running thread %p %p", p, td)); 4975 umtx_thread_cleanup(td); 4976 td->td_rb_list = td->td_rbp_list = td->td_rb_inact = 0; 4977 } 4978 } 4979 4980 /* 4981 * thread exit hook. 4982 */ 4983 void 4984 umtx_thread_exit(struct thread *td) 4985 { 4986 4987 umtx_thread_cleanup(td); 4988 } 4989 4990 static int 4991 umtx_read_uptr(struct thread *td, uintptr_t ptr, uintptr_t *res, bool compat32) 4992 { 4993 u_long res1; 4994 uint32_t res32; 4995 int error; 4996 4997 if (compat32) { 4998 error = fueword32((void *)ptr, &res32); 4999 if (error == 0) 5000 res1 = res32; 5001 } else { 5002 error = fueword((void *)ptr, &res1); 5003 } 5004 if (error == 0) 5005 *res = res1; 5006 else 5007 error = EFAULT; 5008 return (error); 5009 } 5010 5011 static void 5012 umtx_read_rb_list(struct thread *td, struct umutex *m, uintptr_t *rb_list, 5013 bool compat32) 5014 { 5015 struct umutex32 m32; 5016 5017 if (compat32) { 5018 memcpy(&m32, m, sizeof(m32)); 5019 *rb_list = m32.m_rb_lnk; 5020 } else { 5021 *rb_list = m->m_rb_lnk; 5022 } 5023 } 5024 5025 static int 5026 umtx_handle_rb(struct thread *td, uintptr_t rbp, uintptr_t *rb_list, bool inact, 5027 bool compat32) 5028 { 5029 struct umutex m; 5030 int error; 5031 5032 KASSERT(td->td_proc == curproc, ("need current vmspace")); 5033 error = copyin((void *)rbp, &m, sizeof(m)); 5034 if (error != 0) 5035 return (error); 5036 if (rb_list != NULL) 5037 umtx_read_rb_list(td, &m, rb_list, compat32); 5038 if ((m.m_flags & UMUTEX_ROBUST) == 0) 5039 return (EINVAL); 5040 if ((m.m_owner & ~UMUTEX_CONTESTED) != td->td_tid) 5041 /* inact is cleared after unlock, allow the inconsistency */ 5042 return (inact ? 0 : EINVAL); 5043 return (do_unlock_umutex(td, (struct umutex *)rbp, true)); 5044 } 5045 5046 static void 5047 umtx_cleanup_rb_list(struct thread *td, uintptr_t rb_list, uintptr_t *rb_inact, 5048 const char *name, bool compat32) 5049 { 5050 int error, i; 5051 uintptr_t rbp; 5052 bool inact; 5053 5054 if (rb_list == 0) 5055 return; 5056 error = umtx_read_uptr(td, rb_list, &rbp, compat32); 5057 for (i = 0; error == 0 && rbp != 0 && i < umtx_max_rb; i++) { 5058 if (rbp == *rb_inact) { 5059 inact = true; 5060 *rb_inact = 0; 5061 } else 5062 inact = false; 5063 error = umtx_handle_rb(td, rbp, &rbp, inact, compat32); 5064 } 5065 if (i == umtx_max_rb && umtx_verbose_rb) { 5066 uprintf("comm %s pid %d: reached umtx %smax rb %d\n", 5067 td->td_proc->p_comm, td->td_proc->p_pid, name, umtx_max_rb); 5068 } 5069 if (error != 0 && umtx_verbose_rb) { 5070 uprintf("comm %s pid %d: handling %srb error %d\n", 5071 td->td_proc->p_comm, td->td_proc->p_pid, name, error); 5072 } 5073 } 5074 5075 /* 5076 * Clean up umtx data. 5077 */ 5078 static void 5079 umtx_thread_cleanup(struct thread *td) 5080 { 5081 struct umtx_q *uq; 5082 struct umtx_pi *pi; 5083 uintptr_t rb_inact; 5084 bool compat32; 5085 5086 /* 5087 * Disown pi mutexes. 5088 */ 5089 uq = td->td_umtxq; 5090 if (uq != NULL) { 5091 if (uq->uq_inherited_pri != PRI_MAX || 5092 !TAILQ_EMPTY(&uq->uq_pi_contested)) { 5093 mtx_lock(&umtx_lock); 5094 uq->uq_inherited_pri = PRI_MAX; 5095 while ((pi = TAILQ_FIRST(&uq->uq_pi_contested)) != NULL) { 5096 pi->pi_owner = NULL; 5097 TAILQ_REMOVE(&uq->uq_pi_contested, pi, pi_link); 5098 } 5099 mtx_unlock(&umtx_lock); 5100 } 5101 sched_lend_user_prio_cond(td, PRI_MAX); 5102 } 5103 5104 compat32 = (td->td_pflags2 & TDP2_COMPAT32RB) != 0; 5105 td->td_pflags2 &= ~TDP2_COMPAT32RB; 5106 5107 if (td->td_rb_inact == 0 && td->td_rb_list == 0 && td->td_rbp_list == 0) 5108 return; 5109 5110 /* 5111 * Handle terminated robust mutexes. Must be done after 5112 * robust pi disown, otherwise unlock could see unowned 5113 * entries. 5114 */ 5115 rb_inact = td->td_rb_inact; 5116 if (rb_inact != 0) 5117 (void)umtx_read_uptr(td, rb_inact, &rb_inact, compat32); 5118 umtx_cleanup_rb_list(td, td->td_rb_list, &rb_inact, "", compat32); 5119 umtx_cleanup_rb_list(td, td->td_rbp_list, &rb_inact, "priv ", compat32); 5120 if (rb_inact != 0) 5121 (void)umtx_handle_rb(td, rb_inact, NULL, true, compat32); 5122 } 5123