1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/sdt.h> 30 31 #include "lint.h" 32 #include "thr_uberdata.h" 33 34 /* 35 * This mutex is initialized to be held by lwp#1. 36 * It is used to block a thread that has returned from a mutex_lock() 37 * of a PTHREAD_PRIO_INHERIT mutex with an unrecoverable error. 38 */ 39 mutex_t stall_mutex = DEFAULTMUTEX; 40 41 static int shared_mutex_held(mutex_t *); 42 43 /* 44 * Lock statistics support functions. 45 */ 46 void 47 record_begin_hold(tdb_mutex_stats_t *msp) 48 { 49 tdb_incr(msp->mutex_lock); 50 msp->mutex_begin_hold = gethrtime(); 51 } 52 53 hrtime_t 54 record_hold_time(tdb_mutex_stats_t *msp) 55 { 56 hrtime_t now = gethrtime(); 57 58 if (msp->mutex_begin_hold) 59 msp->mutex_hold_time += now - msp->mutex_begin_hold; 60 msp->mutex_begin_hold = 0; 61 return (now); 62 } 63 64 /* 65 * Called once at library initialization. 66 */ 67 void 68 mutex_setup(void) 69 { 70 if (set_lock_byte(&stall_mutex.mutex_lockw)) 71 thr_panic("mutex_setup() cannot acquire stall_mutex"); 72 stall_mutex.mutex_owner = (uintptr_t)curthread; 73 } 74 75 /* 76 * The default spin counts of 1000 and 500 are experimentally determined. 77 * On sun4u machines with any number of processors they could be raised 78 * to 10,000 but that (experimentally) makes almost no difference. 79 * The environment variables: 80 * _THREAD_ADAPTIVE_SPIN=count 81 * _THREAD_RELEASE_SPIN=count 82 * can be used to override and set the counts in the range [0 .. 1,000,000]. 83 */ 84 int thread_adaptive_spin = 1000; 85 uint_t thread_max_spinners = 100; 86 int thread_release_spin = 500; 87 int thread_queue_verify = 0; 88 static int ncpus; 89 90 /* 91 * Distinguish spinning for queue locks from spinning for regular locks. 92 * The environment variable: 93 * _THREAD_QUEUE_SPIN=count 94 * can be used to override and set the count in the range [0 .. 1,000,000]. 95 * There is no release spin concept for queue locks. 96 */ 97 int thread_queue_spin = 1000; 98 99 /* 100 * Use the otherwise-unused 'mutex_ownerpid' field of a USYNC_THREAD 101 * mutex to be a count of adaptive spins in progress. 102 */ 103 #define mutex_spinners mutex_ownerpid 104 105 void 106 _mutex_set_typeattr(mutex_t *mp, int attr) 107 { 108 mp->mutex_type |= (uint8_t)attr; 109 } 110 111 /* 112 * 'type' can be one of USYNC_THREAD or USYNC_PROCESS, possibly 113 * augmented by the flags LOCK_RECURSIVE and/or LOCK_ERRORCHECK, 114 * or it can be USYNC_PROCESS_ROBUST with no extra flags. 115 */ 116 #pragma weak _private_mutex_init = __mutex_init 117 #pragma weak mutex_init = __mutex_init 118 #pragma weak _mutex_init = __mutex_init 119 /* ARGSUSED2 */ 120 int 121 __mutex_init(mutex_t *mp, int type, void *arg) 122 { 123 int error; 124 125 switch (type & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) { 126 case USYNC_THREAD: 127 case USYNC_PROCESS: 128 (void) _memset(mp, 0, sizeof (*mp)); 129 mp->mutex_type = (uint8_t)type; 130 mp->mutex_flag = LOCK_INITED; 131 error = 0; 132 break; 133 case USYNC_PROCESS_ROBUST: 134 if (type & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) 135 error = EINVAL; 136 else 137 error = ___lwp_mutex_init(mp, type); 138 break; 139 default: 140 error = EINVAL; 141 break; 142 } 143 if (error == 0) 144 mp->mutex_magic = MUTEX_MAGIC; 145 return (error); 146 } 147 148 /* 149 * Delete mp from list of ceil mutexes owned by curthread. 150 * Return 1 if the head of the chain was updated. 151 */ 152 int 153 _ceil_mylist_del(mutex_t *mp) 154 { 155 ulwp_t *self = curthread; 156 mxchain_t **mcpp; 157 mxchain_t *mcp; 158 159 mcpp = &self->ul_mxchain; 160 while ((*mcpp)->mxchain_mx != mp) 161 mcpp = &(*mcpp)->mxchain_next; 162 mcp = *mcpp; 163 *mcpp = mcp->mxchain_next; 164 lfree(mcp, sizeof (*mcp)); 165 return (mcpp == &self->ul_mxchain); 166 } 167 168 /* 169 * Add mp to head of list of ceil mutexes owned by curthread. 170 * Return ENOMEM if no memory could be allocated. 171 */ 172 int 173 _ceil_mylist_add(mutex_t *mp) 174 { 175 ulwp_t *self = curthread; 176 mxchain_t *mcp; 177 178 if ((mcp = lmalloc(sizeof (*mcp))) == NULL) 179 return (ENOMEM); 180 mcp->mxchain_mx = mp; 181 mcp->mxchain_next = self->ul_mxchain; 182 self->ul_mxchain = mcp; 183 return (0); 184 } 185 186 /* 187 * Inherit priority from ceiling. The inheritance impacts the effective 188 * priority, not the assigned priority. See _thread_setschedparam_main(). 189 */ 190 void 191 _ceil_prio_inherit(int ceil) 192 { 193 ulwp_t *self = curthread; 194 struct sched_param param; 195 196 (void) _memset(¶m, 0, sizeof (param)); 197 param.sched_priority = ceil; 198 if (_thread_setschedparam_main(self->ul_lwpid, 199 self->ul_policy, ¶m, PRIO_INHERIT)) { 200 /* 201 * Panic since unclear what error code to return. 202 * If we do return the error codes returned by above 203 * called routine, update the man page... 204 */ 205 thr_panic("_thread_setschedparam_main() fails"); 206 } 207 } 208 209 /* 210 * Waive inherited ceiling priority. Inherit from head of owned ceiling locks 211 * if holding at least one ceiling lock. If no ceiling locks are held at this 212 * point, disinherit completely, reverting back to assigned priority. 213 */ 214 void 215 _ceil_prio_waive(void) 216 { 217 ulwp_t *self = curthread; 218 struct sched_param param; 219 220 (void) _memset(¶m, 0, sizeof (param)); 221 if (self->ul_mxchain == NULL) { 222 /* 223 * No ceil locks held. Zero the epri, revert back to ul_pri. 224 * Since thread's hash lock is not held, one cannot just 225 * read ul_pri here...do it in the called routine... 226 */ 227 param.sched_priority = self->ul_pri; /* ignored */ 228 if (_thread_setschedparam_main(self->ul_lwpid, 229 self->ul_policy, ¶m, PRIO_DISINHERIT)) 230 thr_panic("_thread_setschedparam_main() fails"); 231 } else { 232 /* 233 * Set priority to that of the mutex at the head 234 * of the ceilmutex chain. 235 */ 236 param.sched_priority = 237 self->ul_mxchain->mxchain_mx->mutex_ceiling; 238 if (_thread_setschedparam_main(self->ul_lwpid, 239 self->ul_policy, ¶m, PRIO_INHERIT)) 240 thr_panic("_thread_setschedparam_main() fails"); 241 } 242 } 243 244 /* 245 * Non-preemptive spin locks. Used by queue_lock(). 246 * No lock statistics are gathered for these locks. 247 */ 248 void 249 spin_lock_set(mutex_t *mp) 250 { 251 ulwp_t *self = curthread; 252 253 no_preempt(self); 254 if (set_lock_byte(&mp->mutex_lockw) == 0) { 255 mp->mutex_owner = (uintptr_t)self; 256 return; 257 } 258 /* 259 * Spin for a while, attempting to acquire the lock. 260 */ 261 if (self->ul_spin_lock_spin != UINT_MAX) 262 self->ul_spin_lock_spin++; 263 if (mutex_queuelock_adaptive(mp) == 0 || 264 set_lock_byte(&mp->mutex_lockw) == 0) { 265 mp->mutex_owner = (uintptr_t)self; 266 return; 267 } 268 /* 269 * Try harder if we were previously at a no premption level. 270 */ 271 if (self->ul_preempt > 1) { 272 if (self->ul_spin_lock_spin2 != UINT_MAX) 273 self->ul_spin_lock_spin2++; 274 if (mutex_queuelock_adaptive(mp) == 0 || 275 set_lock_byte(&mp->mutex_lockw) == 0) { 276 mp->mutex_owner = (uintptr_t)self; 277 return; 278 } 279 } 280 /* 281 * Give up and block in the kernel for the mutex. 282 */ 283 if (self->ul_spin_lock_sleep != UINT_MAX) 284 self->ul_spin_lock_sleep++; 285 (void) ___lwp_mutex_timedlock(mp, NULL); 286 mp->mutex_owner = (uintptr_t)self; 287 } 288 289 void 290 spin_lock_clear(mutex_t *mp) 291 { 292 ulwp_t *self = curthread; 293 294 mp->mutex_owner = 0; 295 if (atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK) { 296 (void) ___lwp_mutex_wakeup(mp); 297 if (self->ul_spin_lock_wakeup != UINT_MAX) 298 self->ul_spin_lock_wakeup++; 299 } 300 preempt(self); 301 } 302 303 /* 304 * Allocate the sleep queue hash table. 305 */ 306 void 307 queue_alloc(void) 308 { 309 ulwp_t *self = curthread; 310 uberdata_t *udp = self->ul_uberdata; 311 void *data; 312 int i; 313 314 /* 315 * No locks are needed; we call here only when single-threaded. 316 */ 317 ASSERT(self == udp->ulwp_one); 318 ASSERT(!udp->uberflags.uf_mt); 319 if ((data = _private_mmap(NULL, 2 * QHASHSIZE * sizeof (queue_head_t), 320 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0)) 321 == MAP_FAILED) 322 thr_panic("cannot allocate thread queue_head table"); 323 udp->queue_head = (queue_head_t *)data; 324 for (i = 0; i < 2 * QHASHSIZE; i++) 325 udp->queue_head[i].qh_lock.mutex_magic = MUTEX_MAGIC; 326 } 327 328 #if defined(THREAD_DEBUG) 329 330 /* 331 * Debugging: verify correctness of a sleep queue. 332 */ 333 void 334 QVERIFY(queue_head_t *qp) 335 { 336 ulwp_t *self = curthread; 337 uberdata_t *udp = self->ul_uberdata; 338 ulwp_t *ulwp; 339 ulwp_t *prev; 340 uint_t index; 341 uint32_t cnt = 0; 342 char qtype; 343 void *wchan; 344 345 ASSERT(qp >= udp->queue_head && (qp - udp->queue_head) < 2 * QHASHSIZE); 346 ASSERT(MUTEX_OWNED(&qp->qh_lock, self)); 347 ASSERT((qp->qh_head != NULL && qp->qh_tail != NULL) || 348 (qp->qh_head == NULL && qp->qh_tail == NULL)); 349 if (!thread_queue_verify) 350 return; 351 /* real expensive stuff, only for _THREAD_QUEUE_VERIFY */ 352 qtype = ((qp - udp->queue_head) < QHASHSIZE)? MX : CV; 353 for (prev = NULL, ulwp = qp->qh_head; ulwp != NULL; 354 prev = ulwp, ulwp = ulwp->ul_link, cnt++) { 355 ASSERT(ulwp->ul_qtype == qtype); 356 ASSERT(ulwp->ul_wchan != NULL); 357 ASSERT(ulwp->ul_sleepq == qp); 358 wchan = ulwp->ul_wchan; 359 index = QUEUE_HASH(wchan, qtype); 360 ASSERT(&udp->queue_head[index] == qp); 361 } 362 ASSERT(qp->qh_tail == prev); 363 ASSERT(qp->qh_qlen == cnt); 364 } 365 366 #else /* THREAD_DEBUG */ 367 368 #define QVERIFY(qp) 369 370 #endif /* THREAD_DEBUG */ 371 372 /* 373 * Acquire a queue head. 374 */ 375 queue_head_t * 376 queue_lock(void *wchan, int qtype) 377 { 378 uberdata_t *udp = curthread->ul_uberdata; 379 queue_head_t *qp; 380 381 ASSERT(qtype == MX || qtype == CV); 382 383 /* 384 * It is possible that we could be called while still single-threaded. 385 * If so, we call queue_alloc() to allocate the queue_head[] array. 386 */ 387 if ((qp = udp->queue_head) == NULL) { 388 queue_alloc(); 389 qp = udp->queue_head; 390 } 391 qp += QUEUE_HASH(wchan, qtype); 392 spin_lock_set(&qp->qh_lock); 393 /* 394 * At once per nanosecond, qh_lockcount will wrap after 512 years. 395 * Were we to care about this, we could peg the value at UINT64_MAX. 396 */ 397 qp->qh_lockcount++; 398 QVERIFY(qp); 399 return (qp); 400 } 401 402 /* 403 * Release a queue head. 404 */ 405 void 406 queue_unlock(queue_head_t *qp) 407 { 408 QVERIFY(qp); 409 spin_lock_clear(&qp->qh_lock); 410 } 411 412 /* 413 * For rwlock queueing, we must queue writers ahead of readers of the 414 * same priority. We do this by making writers appear to have a half 415 * point higher priority for purposes of priority comparisons below. 416 */ 417 #define CMP_PRIO(ulwp) ((real_priority(ulwp) << 1) + (ulwp)->ul_writer) 418 419 void 420 enqueue(queue_head_t *qp, ulwp_t *ulwp, void *wchan, int qtype) 421 { 422 ulwp_t **ulwpp; 423 ulwp_t *next; 424 int pri = CMP_PRIO(ulwp); 425 int force_fifo = (qtype & FIFOQ); 426 int do_fifo; 427 428 qtype &= ~FIFOQ; 429 ASSERT(qtype == MX || qtype == CV); 430 ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread)); 431 ASSERT(ulwp->ul_sleepq != qp); 432 433 /* 434 * LIFO queue ordering is unfair and can lead to starvation, 435 * but it gives better performance for heavily contended locks. 436 * We use thread_queue_fifo (range is 0..8) to determine 437 * the frequency of FIFO vs LIFO queuing: 438 * 0 : every 256th time (almost always LIFO) 439 * 1 : every 128th time 440 * 2 : every 64th time 441 * 3 : every 32nd time 442 * 4 : every 16th time (the default value, mostly LIFO) 443 * 5 : every 8th time 444 * 6 : every 4th time 445 * 7 : every 2nd time 446 * 8 : every time (never LIFO, always FIFO) 447 * Note that there is always some degree of FIFO ordering. 448 * This breaks live lock conditions that occur in applications 449 * that are written assuming (incorrectly) that threads acquire 450 * locks fairly, that is, in roughly round-robin order. 451 * In any event, the queue is maintained in priority order. 452 * 453 * If we are given the FIFOQ flag in qtype, fifo queueing is forced. 454 * SUSV3 requires this for semaphores. 455 */ 456 do_fifo = (force_fifo || 457 ((++qp->qh_qcnt << curthread->ul_queue_fifo) & 0xff) == 0); 458 459 if (qp->qh_head == NULL) { 460 /* 461 * The queue is empty. LIFO/FIFO doesn't matter. 462 */ 463 ASSERT(qp->qh_tail == NULL); 464 ulwpp = &qp->qh_head; 465 } else if (do_fifo) { 466 /* 467 * Enqueue after the last thread whose priority is greater 468 * than or equal to the priority of the thread being queued. 469 * Attempt first to go directly onto the tail of the queue. 470 */ 471 if (pri <= CMP_PRIO(qp->qh_tail)) 472 ulwpp = &qp->qh_tail->ul_link; 473 else { 474 for (ulwpp = &qp->qh_head; (next = *ulwpp) != NULL; 475 ulwpp = &next->ul_link) 476 if (pri > CMP_PRIO(next)) 477 break; 478 } 479 } else { 480 /* 481 * Enqueue before the first thread whose priority is less 482 * than or equal to the priority of the thread being queued. 483 * Hopefully we can go directly onto the head of the queue. 484 */ 485 for (ulwpp = &qp->qh_head; (next = *ulwpp) != NULL; 486 ulwpp = &next->ul_link) 487 if (pri >= CMP_PRIO(next)) 488 break; 489 } 490 if ((ulwp->ul_link = *ulwpp) == NULL) 491 qp->qh_tail = ulwp; 492 *ulwpp = ulwp; 493 494 ulwp->ul_sleepq = qp; 495 ulwp->ul_wchan = wchan; 496 ulwp->ul_qtype = qtype; 497 if (qp->qh_qmax < ++qp->qh_qlen) 498 qp->qh_qmax = qp->qh_qlen; 499 } 500 501 /* 502 * Return a pointer to the queue slot of the 503 * highest priority thread on the queue. 504 * On return, prevp, if not NULL, will contain a pointer 505 * to the thread's predecessor on the queue 506 */ 507 static ulwp_t ** 508 queue_slot(queue_head_t *qp, void *wchan, int *more, ulwp_t **prevp) 509 { 510 ulwp_t **ulwpp; 511 ulwp_t *ulwp; 512 ulwp_t *prev = NULL; 513 ulwp_t **suspp = NULL; 514 ulwp_t *susprev; 515 516 ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread)); 517 518 /* 519 * Find a waiter on the sleep queue. 520 */ 521 for (ulwpp = &qp->qh_head; (ulwp = *ulwpp) != NULL; 522 prev = ulwp, ulwpp = &ulwp->ul_link) { 523 if (ulwp->ul_wchan == wchan) { 524 if (!ulwp->ul_stop) 525 break; 526 /* 527 * Try not to return a suspended thread. 528 * This mimics the old libthread's behavior. 529 */ 530 if (suspp == NULL) { 531 suspp = ulwpp; 532 susprev = prev; 533 } 534 } 535 } 536 537 if (ulwp == NULL && suspp != NULL) { 538 ulwp = *(ulwpp = suspp); 539 prev = susprev; 540 suspp = NULL; 541 } 542 if (ulwp == NULL) { 543 if (more != NULL) 544 *more = 0; 545 return (NULL); 546 } 547 548 if (prevp != NULL) 549 *prevp = prev; 550 if (more == NULL) 551 return (ulwpp); 552 553 /* 554 * Scan the remainder of the queue for another waiter. 555 */ 556 if (suspp != NULL) { 557 *more = 1; 558 return (ulwpp); 559 } 560 for (ulwp = ulwp->ul_link; ulwp != NULL; ulwp = ulwp->ul_link) { 561 if (ulwp->ul_wchan == wchan) { 562 *more = 1; 563 return (ulwpp); 564 } 565 } 566 567 *more = 0; 568 return (ulwpp); 569 } 570 571 ulwp_t * 572 queue_unlink(queue_head_t *qp, ulwp_t **ulwpp, ulwp_t *prev) 573 { 574 ulwp_t *ulwp; 575 576 ulwp = *ulwpp; 577 *ulwpp = ulwp->ul_link; 578 ulwp->ul_link = NULL; 579 if (qp->qh_tail == ulwp) 580 qp->qh_tail = prev; 581 qp->qh_qlen--; 582 ulwp->ul_sleepq = NULL; 583 ulwp->ul_wchan = NULL; 584 585 return (ulwp); 586 } 587 588 ulwp_t * 589 dequeue(queue_head_t *qp, void *wchan, int *more) 590 { 591 ulwp_t **ulwpp; 592 ulwp_t *prev; 593 594 if ((ulwpp = queue_slot(qp, wchan, more, &prev)) == NULL) 595 return (NULL); 596 return (queue_unlink(qp, ulwpp, prev)); 597 } 598 599 /* 600 * Return a pointer to the highest priority thread sleeping on wchan. 601 */ 602 ulwp_t * 603 queue_waiter(queue_head_t *qp, void *wchan) 604 { 605 ulwp_t **ulwpp; 606 607 if ((ulwpp = queue_slot(qp, wchan, NULL, NULL)) == NULL) 608 return (NULL); 609 return (*ulwpp); 610 } 611 612 uint8_t 613 dequeue_self(queue_head_t *qp, void *wchan) 614 { 615 ulwp_t *self = curthread; 616 ulwp_t **ulwpp; 617 ulwp_t *ulwp; 618 ulwp_t *prev = NULL; 619 int found = 0; 620 int more = 0; 621 622 ASSERT(MUTEX_OWNED(&qp->qh_lock, self)); 623 624 /* find self on the sleep queue */ 625 for (ulwpp = &qp->qh_head; (ulwp = *ulwpp) != NULL; 626 prev = ulwp, ulwpp = &ulwp->ul_link) { 627 if (ulwp == self) { 628 /* dequeue ourself */ 629 ASSERT(self->ul_wchan == wchan); 630 (void) queue_unlink(qp, ulwpp, prev); 631 self->ul_cvmutex = NULL; 632 self->ul_cv_wake = 0; 633 found = 1; 634 break; 635 } 636 if (ulwp->ul_wchan == wchan) 637 more = 1; 638 } 639 640 if (!found) 641 thr_panic("dequeue_self(): curthread not found on queue"); 642 643 if (more) 644 return (1); 645 646 /* scan the remainder of the queue for another waiter */ 647 for (ulwp = *ulwpp; ulwp != NULL; ulwp = ulwp->ul_link) { 648 if (ulwp->ul_wchan == wchan) 649 return (1); 650 } 651 652 return (0); 653 } 654 655 /* 656 * Called from call_user_handler() and _thrp_suspend() to take 657 * ourself off of our sleep queue so we can grab locks. 658 */ 659 void 660 unsleep_self(void) 661 { 662 ulwp_t *self = curthread; 663 queue_head_t *qp; 664 665 /* 666 * Calling enter_critical()/exit_critical() here would lead 667 * to recursion. Just manipulate self->ul_critical directly. 668 */ 669 self->ul_critical++; 670 while (self->ul_sleepq != NULL) { 671 qp = queue_lock(self->ul_wchan, self->ul_qtype); 672 /* 673 * We may have been moved from a CV queue to a 674 * mutex queue while we were attempting queue_lock(). 675 * If so, just loop around and try again. 676 * dequeue_self() clears self->ul_sleepq. 677 */ 678 if (qp == self->ul_sleepq) { 679 (void) dequeue_self(qp, self->ul_wchan); 680 self->ul_writer = 0; 681 } 682 queue_unlock(qp); 683 } 684 self->ul_critical--; 685 } 686 687 /* 688 * Common code for calling the the ___lwp_mutex_timedlock() system call. 689 * Returns with mutex_owner and mutex_ownerpid set correctly. 690 */ 691 int 692 mutex_lock_kernel(mutex_t *mp, timespec_t *tsp, tdb_mutex_stats_t *msp) 693 { 694 ulwp_t *self = curthread; 695 uberdata_t *udp = self->ul_uberdata; 696 hrtime_t begin_sleep; 697 int error; 698 699 self->ul_sp = stkptr(); 700 self->ul_wchan = mp; 701 if (__td_event_report(self, TD_SLEEP, udp)) { 702 self->ul_td_evbuf.eventnum = TD_SLEEP; 703 self->ul_td_evbuf.eventdata = mp; 704 tdb_event(TD_SLEEP, udp); 705 } 706 if (msp) { 707 tdb_incr(msp->mutex_sleep); 708 begin_sleep = gethrtime(); 709 } 710 711 DTRACE_PROBE1(plockstat, mutex__block, mp); 712 713 for (;;) { 714 if ((error = ___lwp_mutex_timedlock(mp, tsp)) != 0) { 715 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0); 716 DTRACE_PROBE2(plockstat, mutex__error, mp, error); 717 break; 718 } 719 720 if (mp->mutex_type & (USYNC_PROCESS | USYNC_PROCESS_ROBUST)) { 721 /* 722 * Defend against forkall(). We may be the child, 723 * in which case we don't actually own the mutex. 724 */ 725 enter_critical(self); 726 if (mp->mutex_ownerpid == udp->pid) { 727 mp->mutex_owner = (uintptr_t)self; 728 exit_critical(self); 729 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1); 730 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 731 0, 0); 732 break; 733 } 734 exit_critical(self); 735 } else { 736 mp->mutex_owner = (uintptr_t)self; 737 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1); 738 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 739 break; 740 } 741 } 742 if (msp) 743 msp->mutex_sleep_time += gethrtime() - begin_sleep; 744 self->ul_wchan = NULL; 745 self->ul_sp = 0; 746 747 return (error); 748 } 749 750 /* 751 * Common code for calling the ___lwp_mutex_trylock() system call. 752 * Returns with mutex_owner and mutex_ownerpid set correctly. 753 */ 754 int 755 mutex_trylock_kernel(mutex_t *mp) 756 { 757 ulwp_t *self = curthread; 758 uberdata_t *udp = self->ul_uberdata; 759 int error; 760 761 for (;;) { 762 if ((error = ___lwp_mutex_trylock(mp)) != 0) { 763 if (error != EBUSY) { 764 DTRACE_PROBE2(plockstat, mutex__error, mp, 765 error); 766 } 767 break; 768 } 769 770 if (mp->mutex_type & (USYNC_PROCESS | USYNC_PROCESS_ROBUST)) { 771 /* 772 * Defend against forkall(). We may be the child, 773 * in which case we don't actually own the mutex. 774 */ 775 enter_critical(self); 776 if (mp->mutex_ownerpid == udp->pid) { 777 mp->mutex_owner = (uintptr_t)self; 778 exit_critical(self); 779 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 780 0, 0); 781 break; 782 } 783 exit_critical(self); 784 } else { 785 mp->mutex_owner = (uintptr_t)self; 786 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 787 break; 788 } 789 } 790 791 return (error); 792 } 793 794 volatile sc_shared_t * 795 setup_schedctl(void) 796 { 797 ulwp_t *self = curthread; 798 volatile sc_shared_t *scp; 799 sc_shared_t *tmp; 800 801 if ((scp = self->ul_schedctl) == NULL && /* no shared state yet */ 802 !self->ul_vfork && /* not a child of vfork() */ 803 !self->ul_schedctl_called) { /* haven't been called before */ 804 enter_critical(self); 805 self->ul_schedctl_called = &self->ul_uberdata->uberflags; 806 if ((tmp = __schedctl()) != (sc_shared_t *)(-1)) 807 self->ul_schedctl = scp = tmp; 808 exit_critical(self); 809 } 810 /* 811 * Unless the call to setup_schedctl() is surrounded 812 * by enter_critical()/exit_critical(), the address 813 * we are returning could be invalid due to a forkall() 814 * having occurred in another thread. 815 */ 816 return (scp); 817 } 818 819 /* 820 * Interfaces from libsched, incorporated into libc. 821 * libsched.so.1 is now a filter library onto libc. 822 */ 823 #pragma weak schedctl_lookup = _schedctl_init 824 #pragma weak _schedctl_lookup = _schedctl_init 825 #pragma weak schedctl_init = _schedctl_init 826 schedctl_t * 827 _schedctl_init(void) 828 { 829 volatile sc_shared_t *scp = setup_schedctl(); 830 return ((scp == NULL)? NULL : (schedctl_t *)&scp->sc_preemptctl); 831 } 832 833 #pragma weak schedctl_exit = _schedctl_exit 834 void 835 _schedctl_exit(void) 836 { 837 } 838 839 /* 840 * Contract private interface for java. 841 * Set up the schedctl data if it doesn't exist yet. 842 * Return a pointer to the pointer to the schedctl data. 843 */ 844 volatile sc_shared_t *volatile * 845 _thr_schedctl(void) 846 { 847 ulwp_t *self = curthread; 848 volatile sc_shared_t *volatile *ptr; 849 850 if (self->ul_vfork) 851 return (NULL); 852 if (*(ptr = &self->ul_schedctl) == NULL) 853 (void) setup_schedctl(); 854 return (ptr); 855 } 856 857 /* 858 * Block signals and attempt to block preemption. 859 * no_preempt()/preempt() must be used in pairs but can be nested. 860 */ 861 void 862 no_preempt(ulwp_t *self) 863 { 864 volatile sc_shared_t *scp; 865 866 if (self->ul_preempt++ == 0) { 867 enter_critical(self); 868 if ((scp = self->ul_schedctl) != NULL || 869 (scp = setup_schedctl()) != NULL) { 870 /* 871 * Save the pre-existing preempt value. 872 */ 873 self->ul_savpreempt = scp->sc_preemptctl.sc_nopreempt; 874 scp->sc_preemptctl.sc_nopreempt = 1; 875 } 876 } 877 } 878 879 /* 880 * Undo the effects of no_preempt(). 881 */ 882 void 883 preempt(ulwp_t *self) 884 { 885 volatile sc_shared_t *scp; 886 887 ASSERT(self->ul_preempt > 0); 888 if (--self->ul_preempt == 0) { 889 if ((scp = self->ul_schedctl) != NULL) { 890 /* 891 * Restore the pre-existing preempt value. 892 */ 893 scp->sc_preemptctl.sc_nopreempt = self->ul_savpreempt; 894 if (scp->sc_preemptctl.sc_yield && 895 scp->sc_preemptctl.sc_nopreempt == 0) { 896 lwp_yield(); 897 if (scp->sc_preemptctl.sc_yield) { 898 /* 899 * Shouldn't happen. This is either 900 * a race condition or the thread 901 * just entered the real-time class. 902 */ 903 lwp_yield(); 904 scp->sc_preemptctl.sc_yield = 0; 905 } 906 } 907 } 908 exit_critical(self); 909 } 910 } 911 912 /* 913 * If a call to preempt() would cause the current thread to yield or to 914 * take deferred actions in exit_critical(), then unpark the specified 915 * lwp so it can run while we delay. Return the original lwpid if the 916 * unpark was not performed, else return zero. The tests are a repeat 917 * of some of the tests in preempt(), above. This is a statistical 918 * optimization solely for cond_sleep_queue(), below. 919 */ 920 static lwpid_t 921 preempt_unpark(ulwp_t *self, lwpid_t lwpid) 922 { 923 volatile sc_shared_t *scp = self->ul_schedctl; 924 925 ASSERT(self->ul_preempt == 1 && self->ul_critical > 0); 926 if ((scp != NULL && scp->sc_preemptctl.sc_yield) || 927 (self->ul_curplease && self->ul_critical == 1)) { 928 (void) __lwp_unpark(lwpid); 929 lwpid = 0; 930 } 931 return (lwpid); 932 } 933 934 /* 935 * Spin for a while, trying to grab the lock. We know that we 936 * failed set_lock_byte(&mp->mutex_lockw) once before coming here. 937 * If this fails, return EBUSY and let the caller deal with it. 938 * If this succeeds, return 0 with mutex_owner set to curthread. 939 */ 940 int 941 mutex_trylock_adaptive(mutex_t *mp) 942 { 943 ulwp_t *self = curthread; 944 ulwp_t *ulwp; 945 volatile sc_shared_t *scp; 946 volatile uint8_t *lockp; 947 volatile uint64_t *ownerp; 948 int count, max = self->ul_adaptive_spin; 949 950 ASSERT(!(mp->mutex_type & (USYNC_PROCESS | USYNC_PROCESS_ROBUST))); 951 952 if (max == 0 || (mp->mutex_spinners >= self->ul_max_spinners)) 953 return (EBUSY); 954 955 lockp = (volatile uint8_t *)&mp->mutex_lockw; 956 ownerp = (volatile uint64_t *)&mp->mutex_owner; 957 958 DTRACE_PROBE1(plockstat, mutex__spin, mp); 959 960 /* 961 * This spin loop is unfair to lwps that have already dropped into 962 * the kernel to sleep. They will starve on a highly-contended mutex. 963 * This is just too bad. The adaptive spin algorithm is intended 964 * to allow programs with highly-contended locks (that is, broken 965 * programs) to execute with reasonable speed despite their contention. 966 * Being fair would reduce the speed of such programs and well-written 967 * programs will not suffer in any case. 968 */ 969 enter_critical(self); /* protects ul_schedctl */ 970 atomic_inc_32(&mp->mutex_spinners); 971 for (count = 0; count < max; count++) { 972 if (*lockp == 0 && set_lock_byte(lockp) == 0) { 973 *ownerp = (uintptr_t)self; 974 atomic_dec_32(&mp->mutex_spinners); 975 exit_critical(self); 976 DTRACE_PROBE2(plockstat, mutex__spun, 1, count); 977 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 978 return (0); 979 } 980 SMT_PAUSE(); 981 /* 982 * Stop spinning if the mutex owner is not running on 983 * a processor; it will not drop the lock any time soon 984 * and we would just be wasting time to keep spinning. 985 * 986 * Note that we are looking at another thread (ulwp_t) 987 * without ensuring that the other thread does not exit. 988 * The scheme relies on ulwp_t structures never being 989 * deallocated by the library (the library employs a free 990 * list of ulwp_t structs that are reused when new threads 991 * are created) and on schedctl shared memory never being 992 * deallocated once created via __schedctl(). 993 * 994 * Thus, the worst that can happen when the spinning thread 995 * looks at the owner's schedctl data is that it is looking 996 * at some other thread's schedctl data. This almost never 997 * happens and is benign when it does. 998 */ 999 if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL && 1000 ((scp = ulwp->ul_schedctl) == NULL || 1001 scp->sc_state != SC_ONPROC)) 1002 break; 1003 } 1004 atomic_dec_32(&mp->mutex_spinners); 1005 exit_critical(self); 1006 1007 DTRACE_PROBE2(plockstat, mutex__spun, 0, count); 1008 1009 return (EBUSY); 1010 } 1011 1012 /* 1013 * Same as mutex_trylock_adaptive(), except specifically for queue locks. 1014 * The owner field is not set here; the caller (spin_lock_set()) sets it. 1015 */ 1016 int 1017 mutex_queuelock_adaptive(mutex_t *mp) 1018 { 1019 ulwp_t *ulwp; 1020 volatile sc_shared_t *scp; 1021 volatile uint8_t *lockp; 1022 volatile uint64_t *ownerp; 1023 int count = curthread->ul_queue_spin; 1024 1025 ASSERT(mp->mutex_type == USYNC_THREAD); 1026 1027 if (count == 0) 1028 return (EBUSY); 1029 1030 lockp = (volatile uint8_t *)&mp->mutex_lockw; 1031 ownerp = (volatile uint64_t *)&mp->mutex_owner; 1032 while (--count >= 0) { 1033 if (*lockp == 0 && set_lock_byte(lockp) == 0) 1034 return (0); 1035 SMT_PAUSE(); 1036 if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL && 1037 ((scp = ulwp->ul_schedctl) == NULL || 1038 scp->sc_state != SC_ONPROC)) 1039 break; 1040 } 1041 1042 return (EBUSY); 1043 } 1044 1045 /* 1046 * Like mutex_trylock_adaptive(), but for process-shared mutexes. 1047 * Spin for a while, trying to grab the lock. We know that we 1048 * failed set_lock_byte(&mp->mutex_lockw) once before coming here. 1049 * If this fails, return EBUSY and let the caller deal with it. 1050 * If this succeeds, return 0 with mutex_owner set to curthread 1051 * and mutex_ownerpid set to the current pid. 1052 */ 1053 int 1054 mutex_trylock_process(mutex_t *mp) 1055 { 1056 ulwp_t *self = curthread; 1057 uberdata_t *udp = self->ul_uberdata; 1058 int count; 1059 volatile uint8_t *lockp; 1060 volatile uint64_t *ownerp; 1061 volatile int32_t *pidp; 1062 pid_t pid, newpid; 1063 uint64_t owner, newowner; 1064 1065 if ((count = ncpus) == 0) 1066 count = ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN); 1067 count = (count > 1)? self->ul_adaptive_spin : 0; 1068 1069 ASSERT((mp->mutex_type & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 1070 USYNC_PROCESS); 1071 1072 if (count == 0) 1073 return (EBUSY); 1074 1075 lockp = (volatile uint8_t *)&mp->mutex_lockw; 1076 ownerp = (volatile uint64_t *)&mp->mutex_owner; 1077 pidp = (volatile int32_t *)&mp->mutex_ownerpid; 1078 owner = *ownerp; 1079 pid = *pidp; 1080 /* 1081 * This is a process-shared mutex. 1082 * We cannot know if the owner is running on a processor. 1083 * We just spin and hope that it is on a processor. 1084 */ 1085 while (--count >= 0) { 1086 if (*lockp == 0) { 1087 enter_critical(self); 1088 if (set_lock_byte(lockp) == 0) { 1089 *ownerp = (uintptr_t)self; 1090 *pidp = udp->pid; 1091 exit_critical(self); 1092 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1093 0, 0); 1094 return (0); 1095 } 1096 exit_critical(self); 1097 } else if ((newowner = *ownerp) == owner && 1098 (newpid = *pidp) == pid) { 1099 SMT_PAUSE(); 1100 continue; 1101 } 1102 /* 1103 * The owner of the lock changed; start the count over again. 1104 * This may be too aggressive; it needs testing. 1105 */ 1106 owner = newowner; 1107 pid = newpid; 1108 count = self->ul_adaptive_spin; 1109 } 1110 1111 return (EBUSY); 1112 } 1113 1114 /* 1115 * Mutex wakeup code for releasing a USYNC_THREAD mutex. 1116 * Returns the lwpid of the thread that was dequeued, if any. 1117 * The caller of mutex_wakeup() must call __lwp_unpark(lwpid) 1118 * to wake up the specified lwp. 1119 */ 1120 lwpid_t 1121 mutex_wakeup(mutex_t *mp) 1122 { 1123 lwpid_t lwpid = 0; 1124 queue_head_t *qp; 1125 ulwp_t *ulwp; 1126 int more; 1127 1128 /* 1129 * Dequeue a waiter from the sleep queue. Don't touch the mutex 1130 * waiters bit if no one was found on the queue because the mutex 1131 * might have been deallocated or reallocated for another purpose. 1132 */ 1133 qp = queue_lock(mp, MX); 1134 if ((ulwp = dequeue(qp, mp, &more)) != NULL) { 1135 lwpid = ulwp->ul_lwpid; 1136 mp->mutex_waiters = (more? 1 : 0); 1137 } 1138 queue_unlock(qp); 1139 return (lwpid); 1140 } 1141 1142 /* 1143 * Spin for a while, testing to see if the lock has been grabbed. 1144 * If this fails, call mutex_wakeup() to release a waiter. 1145 */ 1146 lwpid_t 1147 mutex_unlock_queue(mutex_t *mp) 1148 { 1149 ulwp_t *self = curthread; 1150 uint32_t *lockw = &mp->mutex_lockword; 1151 lwpid_t lwpid; 1152 volatile uint8_t *lockp; 1153 volatile uint32_t *spinp; 1154 int count; 1155 1156 /* 1157 * We use the swap primitive to clear the lock, but we must 1158 * atomically retain the waiters bit for the remainder of this 1159 * code to work. We first check to see if the waiters bit is 1160 * set and if so clear the lock by swapping in a word containing 1161 * only the waiters bit. This could produce a false positive test 1162 * for whether there are waiters that need to be waked up, but 1163 * this just causes an extra call to mutex_wakeup() to do nothing. 1164 * The opposite case is more delicate: If there are no waiters, 1165 * we swap in a zero lock byte and a zero waiters bit. The result 1166 * of the swap could indicate that there really was a waiter so in 1167 * this case we go directly to mutex_wakeup() without performing 1168 * any of the adaptive code because the waiter bit has been cleared 1169 * and the adaptive code is unreliable in this case. 1170 */ 1171 if (!(*lockw & WAITERMASK)) { /* no waiter exists right now */ 1172 mp->mutex_owner = 0; 1173 DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 1174 if (!(atomic_swap_32(lockw, 0) & WAITERMASK)) 1175 return (0); /* still no waiters */ 1176 no_preempt(self); /* ensure a prompt wakeup */ 1177 lwpid = mutex_wakeup(mp); 1178 } else { 1179 no_preempt(self); /* ensure a prompt wakeup */ 1180 lockp = (volatile uint8_t *)&mp->mutex_lockw; 1181 spinp = (volatile uint32_t *)&mp->mutex_spinners; 1182 mp->mutex_owner = 0; 1183 DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 1184 /* clear lock, retain waiter */ 1185 (void) atomic_swap_32(lockw, WAITER); 1186 1187 /* 1188 * We spin here fewer times than mutex_trylock_adaptive(). 1189 * We are trying to balance two conflicting goals: 1190 * 1. Avoid waking up anyone if a spinning thread 1191 * grabs the lock. 1192 * 2. Wake up a sleeping thread promptly to get on 1193 * with useful work. 1194 * We don't spin at all if there is no acquiring spinner; 1195 * (mp->mutex_spinners is non-zero if there are spinners). 1196 */ 1197 for (count = self->ul_release_spin; 1198 *spinp && count > 0; count--) { 1199 /* 1200 * There is a waiter that we will have to wake 1201 * up unless someone else grabs the lock while 1202 * we are busy spinning. Like the spin loop in 1203 * mutex_trylock_adaptive(), this spin loop is 1204 * unfair to lwps that have already dropped into 1205 * the kernel to sleep. They will starve on a 1206 * highly-contended mutex. Too bad. 1207 */ 1208 if (*lockp != 0) { /* somebody grabbed the lock */ 1209 preempt(self); 1210 return (0); 1211 } 1212 SMT_PAUSE(); 1213 } 1214 1215 /* 1216 * No one grabbed the lock. 1217 * Wake up some lwp that is waiting for it. 1218 */ 1219 mp->mutex_waiters = 0; 1220 lwpid = mutex_wakeup(mp); 1221 } 1222 1223 if (lwpid == 0) 1224 preempt(self); 1225 return (lwpid); 1226 } 1227 1228 /* 1229 * Like mutex_unlock_queue(), but for process-shared mutexes. 1230 * We tested the waiters field before calling here and it was non-zero. 1231 */ 1232 void 1233 mutex_unlock_process(mutex_t *mp) 1234 { 1235 ulwp_t *self = curthread; 1236 int count; 1237 volatile uint8_t *lockp; 1238 1239 /* 1240 * See the comments in mutex_unlock_queue(), above. 1241 */ 1242 if ((count = ncpus) == 0) 1243 count = ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN); 1244 count = (count > 1)? self->ul_release_spin : 0; 1245 no_preempt(self); 1246 mp->mutex_owner = 0; 1247 mp->mutex_ownerpid = 0; 1248 DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 1249 if (count == 0) { 1250 /* clear lock, test waiter */ 1251 if (!(atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK)) { 1252 /* no waiters now */ 1253 preempt(self); 1254 return; 1255 } 1256 } else { 1257 /* clear lock, retain waiter */ 1258 (void) atomic_swap_32(&mp->mutex_lockword, WAITER); 1259 lockp = (volatile uint8_t *)&mp->mutex_lockw; 1260 while (--count >= 0) { 1261 if (*lockp != 0) { 1262 /* somebody grabbed the lock */ 1263 preempt(self); 1264 return; 1265 } 1266 SMT_PAUSE(); 1267 } 1268 /* 1269 * We must clear the waiters field before going 1270 * to the kernel, else it could remain set forever. 1271 */ 1272 mp->mutex_waiters = 0; 1273 } 1274 (void) ___lwp_mutex_wakeup(mp); 1275 preempt(self); 1276 } 1277 1278 /* 1279 * Return the real priority of a thread. 1280 */ 1281 int 1282 real_priority(ulwp_t *ulwp) 1283 { 1284 if (ulwp->ul_epri == 0) 1285 return (ulwp->ul_mappedpri? ulwp->ul_mappedpri : ulwp->ul_pri); 1286 return (ulwp->ul_emappedpri? ulwp->ul_emappedpri : ulwp->ul_epri); 1287 } 1288 1289 void 1290 stall(void) 1291 { 1292 for (;;) 1293 (void) mutex_lock_kernel(&stall_mutex, NULL, NULL); 1294 } 1295 1296 /* 1297 * Acquire a USYNC_THREAD mutex via user-level sleep queues. 1298 * We failed set_lock_byte(&mp->mutex_lockw) before coming here. 1299 * Returns with mutex_owner set correctly. 1300 */ 1301 int 1302 mutex_lock_queue(ulwp_t *self, tdb_mutex_stats_t *msp, mutex_t *mp, 1303 timespec_t *tsp) 1304 { 1305 uberdata_t *udp = curthread->ul_uberdata; 1306 queue_head_t *qp; 1307 hrtime_t begin_sleep; 1308 int error = 0; 1309 1310 self->ul_sp = stkptr(); 1311 if (__td_event_report(self, TD_SLEEP, udp)) { 1312 self->ul_wchan = mp; 1313 self->ul_td_evbuf.eventnum = TD_SLEEP; 1314 self->ul_td_evbuf.eventdata = mp; 1315 tdb_event(TD_SLEEP, udp); 1316 } 1317 if (msp) { 1318 tdb_incr(msp->mutex_sleep); 1319 begin_sleep = gethrtime(); 1320 } 1321 1322 DTRACE_PROBE1(plockstat, mutex__block, mp); 1323 1324 /* 1325 * Put ourself on the sleep queue, and while we are 1326 * unable to grab the lock, go park in the kernel. 1327 * Take ourself off the sleep queue after we acquire the lock. 1328 * The waiter bit can be set/cleared only while holding the queue lock. 1329 */ 1330 qp = queue_lock(mp, MX); 1331 enqueue(qp, self, mp, MX); 1332 mp->mutex_waiters = 1; 1333 for (;;) { 1334 if (set_lock_byte(&mp->mutex_lockw) == 0) { 1335 mp->mutex_owner = (uintptr_t)self; 1336 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1); 1337 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 1338 mp->mutex_waiters = dequeue_self(qp, mp); 1339 break; 1340 } 1341 set_parking_flag(self, 1); 1342 queue_unlock(qp); 1343 /* 1344 * __lwp_park() will return the residual time in tsp 1345 * if we are unparked before the timeout expires. 1346 */ 1347 if ((error = __lwp_park(tsp, 0)) == EINTR) 1348 error = 0; 1349 set_parking_flag(self, 0); 1350 /* 1351 * We could have taken a signal or suspended ourself. 1352 * If we did, then we removed ourself from the queue. 1353 * Someone else may have removed us from the queue 1354 * as a consequence of mutex_unlock(). We may have 1355 * gotten a timeout from __lwp_park(). Or we may still 1356 * be on the queue and this is just a spurious wakeup. 1357 */ 1358 qp = queue_lock(mp, MX); 1359 if (self->ul_sleepq == NULL) { 1360 if (error) { 1361 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0); 1362 DTRACE_PROBE2(plockstat, mutex__error, mp, 1363 error); 1364 break; 1365 } 1366 if (set_lock_byte(&mp->mutex_lockw) == 0) { 1367 mp->mutex_owner = (uintptr_t)self; 1368 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1); 1369 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1370 0, 0); 1371 break; 1372 } 1373 enqueue(qp, self, mp, MX); 1374 mp->mutex_waiters = 1; 1375 } 1376 ASSERT(self->ul_sleepq == qp && 1377 self->ul_qtype == MX && 1378 self->ul_wchan == mp); 1379 if (error) { 1380 mp->mutex_waiters = dequeue_self(qp, mp); 1381 DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0); 1382 DTRACE_PROBE2(plockstat, mutex__error, mp, error); 1383 break; 1384 } 1385 } 1386 1387 ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL && 1388 self->ul_wchan == NULL); 1389 self->ul_sp = 0; 1390 1391 queue_unlock(qp); 1392 if (msp) 1393 msp->mutex_sleep_time += gethrtime() - begin_sleep; 1394 1395 ASSERT(error == 0 || error == EINVAL || error == ETIME); 1396 return (error); 1397 } 1398 1399 /* 1400 * Returns with mutex_owner set correctly. 1401 */ 1402 int 1403 mutex_lock_internal(mutex_t *mp, timespec_t *tsp, int try) 1404 { 1405 ulwp_t *self = curthread; 1406 uberdata_t *udp = self->ul_uberdata; 1407 int mtype = mp->mutex_type; 1408 tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 1409 int error = 0; 1410 1411 ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK); 1412 1413 if (!self->ul_schedctl_called) 1414 (void) setup_schedctl(); 1415 1416 if (msp && try == MUTEX_TRY) 1417 tdb_incr(msp->mutex_try); 1418 1419 if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && mutex_is_held(mp)) { 1420 if (mtype & LOCK_RECURSIVE) { 1421 if (mp->mutex_rcount == RECURSION_MAX) { 1422 error = EAGAIN; 1423 } else { 1424 mp->mutex_rcount++; 1425 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1426 1, 0); 1427 return (0); 1428 } 1429 } else if (try == MUTEX_TRY) { 1430 return (EBUSY); 1431 } else { 1432 DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); 1433 return (EDEADLK); 1434 } 1435 } 1436 1437 if (self->ul_error_detection && try == MUTEX_LOCK && 1438 tsp == NULL && mutex_is_held(mp)) 1439 lock_error(mp, "mutex_lock", NULL, NULL); 1440 1441 if (mtype & 1442 (USYNC_PROCESS_ROBUST|PTHREAD_PRIO_INHERIT|PTHREAD_PRIO_PROTECT)) { 1443 uint8_t ceil; 1444 int myprio; 1445 1446 if (mtype & PTHREAD_PRIO_PROTECT) { 1447 ceil = mp->mutex_ceiling; 1448 ASSERT(_validate_rt_prio(SCHED_FIFO, ceil) == 0); 1449 myprio = real_priority(self); 1450 if (myprio > ceil) { 1451 DTRACE_PROBE2(plockstat, mutex__error, mp, 1452 EINVAL); 1453 return (EINVAL); 1454 } 1455 if ((error = _ceil_mylist_add(mp)) != 0) { 1456 DTRACE_PROBE2(plockstat, mutex__error, mp, 1457 error); 1458 return (error); 1459 } 1460 if (myprio < ceil) 1461 _ceil_prio_inherit(ceil); 1462 } 1463 1464 if (mtype & PTHREAD_PRIO_INHERIT) { 1465 /* go straight to the kernel */ 1466 if (try == MUTEX_TRY) 1467 error = mutex_trylock_kernel(mp); 1468 else /* MUTEX_LOCK */ 1469 error = mutex_lock_kernel(mp, tsp, msp); 1470 /* 1471 * The kernel never sets or clears the lock byte 1472 * for PTHREAD_PRIO_INHERIT mutexes. 1473 * Set it here for debugging consistency. 1474 */ 1475 switch (error) { 1476 case 0: 1477 case EOWNERDEAD: 1478 mp->mutex_lockw = LOCKSET; 1479 break; 1480 } 1481 } else if (mtype & USYNC_PROCESS_ROBUST) { 1482 /* go straight to the kernel */ 1483 if (try == MUTEX_TRY) 1484 error = mutex_trylock_kernel(mp); 1485 else /* MUTEX_LOCK */ 1486 error = mutex_lock_kernel(mp, tsp, msp); 1487 } else { /* PTHREAD_PRIO_PROTECT */ 1488 /* 1489 * Try once at user level before going to the kernel. 1490 * If this is a process shared mutex then protect 1491 * against forkall() while setting mp->mutex_ownerpid. 1492 */ 1493 if (mtype & (USYNC_PROCESS | USYNC_PROCESS_ROBUST)) { 1494 enter_critical(self); 1495 if (set_lock_byte(&mp->mutex_lockw) == 0) { 1496 mp->mutex_owner = (uintptr_t)self; 1497 mp->mutex_ownerpid = udp->pid; 1498 exit_critical(self); 1499 DTRACE_PROBE3(plockstat, 1500 mutex__acquire, mp, 0, 0); 1501 } else { 1502 exit_critical(self); 1503 error = EBUSY; 1504 } 1505 } else { 1506 if (set_lock_byte(&mp->mutex_lockw) == 0) { 1507 mp->mutex_owner = (uintptr_t)self; 1508 DTRACE_PROBE3(plockstat, 1509 mutex__acquire, mp, 0, 0); 1510 } else { 1511 error = EBUSY; 1512 } 1513 } 1514 if (error && try == MUTEX_LOCK) 1515 error = mutex_lock_kernel(mp, tsp, msp); 1516 } 1517 1518 if (error) { 1519 if (mtype & PTHREAD_PRIO_INHERIT) { 1520 switch (error) { 1521 case EOWNERDEAD: 1522 case ENOTRECOVERABLE: 1523 if (mtype & PTHREAD_MUTEX_ROBUST_NP) 1524 break; 1525 if (error == EOWNERDEAD) { 1526 /* 1527 * We own the mutex; unlock it. 1528 * It becomes ENOTRECOVERABLE. 1529 * All waiters are waked up. 1530 */ 1531 mp->mutex_owner = 0; 1532 mp->mutex_ownerpid = 0; 1533 DTRACE_PROBE2(plockstat, 1534 mutex__release, mp, 0); 1535 mp->mutex_lockw = LOCKCLEAR; 1536 (void) ___lwp_mutex_unlock(mp); 1537 } 1538 /* FALLTHROUGH */ 1539 case EDEADLK: 1540 if (try == MUTEX_LOCK) 1541 stall(); 1542 error = EBUSY; 1543 break; 1544 } 1545 } 1546 if ((mtype & PTHREAD_PRIO_PROTECT) && 1547 error != EOWNERDEAD) { 1548 (void) _ceil_mylist_del(mp); 1549 if (myprio < ceil) 1550 _ceil_prio_waive(); 1551 } 1552 } 1553 } else if (mtype & USYNC_PROCESS) { 1554 /* 1555 * This is a process shared mutex. Protect against 1556 * forkall() while setting mp->mutex_ownerpid. 1557 */ 1558 enter_critical(self); 1559 if (set_lock_byte(&mp->mutex_lockw) == 0) { 1560 mp->mutex_owner = (uintptr_t)self; 1561 mp->mutex_ownerpid = udp->pid; 1562 exit_critical(self); 1563 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 1564 } else { 1565 /* try a little harder */ 1566 exit_critical(self); 1567 error = mutex_trylock_process(mp); 1568 } 1569 if (error && try == MUTEX_LOCK) 1570 error = mutex_lock_kernel(mp, tsp, msp); 1571 } else { /* USYNC_THREAD */ 1572 /* try once */ 1573 if (set_lock_byte(&mp->mutex_lockw) == 0) { 1574 mp->mutex_owner = (uintptr_t)self; 1575 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 1576 } else { 1577 /* try a little harder if we don't own the mutex */ 1578 error = EBUSY; 1579 if (MUTEX_OWNER(mp) != self) 1580 error = mutex_trylock_adaptive(mp); 1581 if (error && try == MUTEX_LOCK) /* go park */ 1582 error = mutex_lock_queue(self, msp, mp, tsp); 1583 } 1584 } 1585 1586 switch (error) { 1587 case EOWNERDEAD: 1588 case ELOCKUNMAPPED: 1589 mp->mutex_owner = (uintptr_t)self; 1590 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 1591 /* FALLTHROUGH */ 1592 case 0: 1593 if (msp) 1594 record_begin_hold(msp); 1595 break; 1596 default: 1597 if (try == MUTEX_TRY) { 1598 if (msp) 1599 tdb_incr(msp->mutex_try_fail); 1600 if (__td_event_report(self, TD_LOCK_TRY, udp)) { 1601 self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 1602 tdb_event(TD_LOCK_TRY, udp); 1603 } 1604 } 1605 break; 1606 } 1607 1608 return (error); 1609 } 1610 1611 int 1612 fast_process_lock(mutex_t *mp, timespec_t *tsp, int mtype, int try) 1613 { 1614 ulwp_t *self = curthread; 1615 uberdata_t *udp = self->ul_uberdata; 1616 1617 /* 1618 * We know that USYNC_PROCESS is set in mtype and that 1619 * zero, one, or both of the flags LOCK_RECURSIVE and 1620 * LOCK_ERRORCHECK are set, and that no other flags are set. 1621 */ 1622 enter_critical(self); 1623 if (set_lock_byte(&mp->mutex_lockw) == 0) { 1624 mp->mutex_owner = (uintptr_t)self; 1625 mp->mutex_ownerpid = udp->pid; 1626 exit_critical(self); 1627 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 1628 return (0); 1629 } 1630 exit_critical(self); 1631 1632 if ((mtype & ~USYNC_PROCESS) && shared_mutex_held(mp)) { 1633 if (mtype & LOCK_RECURSIVE) { 1634 if (mp->mutex_rcount == RECURSION_MAX) 1635 return (EAGAIN); 1636 mp->mutex_rcount++; 1637 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1, 0); 1638 return (0); 1639 } 1640 if (try == MUTEX_LOCK) { 1641 DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); 1642 return (EDEADLK); 1643 } 1644 return (EBUSY); 1645 } 1646 1647 /* try a little harder if we don't own the mutex */ 1648 if (!shared_mutex_held(mp) && mutex_trylock_process(mp) == 0) 1649 return (0); 1650 1651 if (try == MUTEX_LOCK) 1652 return (mutex_lock_kernel(mp, tsp, NULL)); 1653 1654 if (__td_event_report(self, TD_LOCK_TRY, udp)) { 1655 self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 1656 tdb_event(TD_LOCK_TRY, udp); 1657 } 1658 return (EBUSY); 1659 } 1660 1661 static int 1662 slow_lock(ulwp_t *self, mutex_t *mp, timespec_t *tsp) 1663 { 1664 int error = 0; 1665 1666 if (MUTEX_OWNER(mp) == self || mutex_trylock_adaptive(mp) != 0) 1667 error = mutex_lock_queue(self, NULL, mp, tsp); 1668 return (error); 1669 } 1670 1671 int 1672 mutex_lock_impl(mutex_t *mp, timespec_t *tsp) 1673 { 1674 ulwp_t *self = curthread; 1675 uberdata_t *udp = self->ul_uberdata; 1676 uberflags_t *gflags; 1677 int mtype; 1678 1679 /* 1680 * Optimize the case of USYNC_THREAD, including 1681 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 1682 * no error detection, no lock statistics, 1683 * and the process has only a single thread. 1684 * (Most likely a traditional single-threaded application.) 1685 */ 1686 if ((((mtype = mp->mutex_type) & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 1687 udp->uberflags.uf_all) == 0) { 1688 /* 1689 * Only one thread exists so we don't need an atomic operation. 1690 */ 1691 if (mp->mutex_lockw == 0) { 1692 mp->mutex_lockw = LOCKSET; 1693 mp->mutex_owner = (uintptr_t)self; 1694 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 1695 return (0); 1696 } 1697 if (mtype && MUTEX_OWNER(mp) == self) { 1698 /* 1699 * LOCK_RECURSIVE, LOCK_ERRORCHECK, or both. 1700 */ 1701 if (mtype & LOCK_RECURSIVE) { 1702 if (mp->mutex_rcount == RECURSION_MAX) 1703 return (EAGAIN); 1704 mp->mutex_rcount++; 1705 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1706 1, 0); 1707 return (0); 1708 } 1709 DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); 1710 return (EDEADLK); /* LOCK_ERRORCHECK */ 1711 } 1712 /* 1713 * We have reached a deadlock, probably because the 1714 * process is executing non-async-signal-safe code in 1715 * a signal handler and is attempting to acquire a lock 1716 * that it already owns. This is not surprising, given 1717 * bad programming practices over the years that has 1718 * resulted in applications calling printf() and such 1719 * in their signal handlers. Unless the user has told 1720 * us that the signal handlers are safe by setting: 1721 * export _THREAD_ASYNC_SAFE=1 1722 * we return EDEADLK rather than actually deadlocking. 1723 */ 1724 if (tsp == NULL && 1725 MUTEX_OWNER(mp) == self && !self->ul_async_safe) { 1726 DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); 1727 return (EDEADLK); 1728 } 1729 } 1730 1731 /* 1732 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 1733 * no error detection, and no lock statistics. 1734 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 1735 */ 1736 if ((gflags = self->ul_schedctl_called) != NULL && 1737 (gflags->uf_trs_ted | 1738 (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) { 1739 1740 if (mtype & USYNC_PROCESS) 1741 return (fast_process_lock(mp, tsp, mtype, MUTEX_LOCK)); 1742 1743 if (set_lock_byte(&mp->mutex_lockw) == 0) { 1744 mp->mutex_owner = (uintptr_t)self; 1745 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 1746 return (0); 1747 } 1748 1749 if (mtype && MUTEX_OWNER(mp) == self) { 1750 if (mtype & LOCK_RECURSIVE) { 1751 if (mp->mutex_rcount == RECURSION_MAX) 1752 return (EAGAIN); 1753 mp->mutex_rcount++; 1754 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1755 1, 0); 1756 return (0); 1757 } 1758 DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); 1759 return (EDEADLK); /* LOCK_ERRORCHECK */ 1760 } 1761 1762 return (slow_lock(self, mp, tsp)); 1763 } 1764 1765 /* else do it the long way */ 1766 return (mutex_lock_internal(mp, tsp, MUTEX_LOCK)); 1767 } 1768 1769 #pragma weak _private_mutex_lock = __mutex_lock 1770 #pragma weak mutex_lock = __mutex_lock 1771 #pragma weak _mutex_lock = __mutex_lock 1772 #pragma weak pthread_mutex_lock = __mutex_lock 1773 #pragma weak _pthread_mutex_lock = __mutex_lock 1774 int 1775 __mutex_lock(mutex_t *mp) 1776 { 1777 ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 1778 return (mutex_lock_impl(mp, NULL)); 1779 } 1780 1781 #pragma weak pthread_mutex_timedlock = _pthread_mutex_timedlock 1782 int 1783 _pthread_mutex_timedlock(mutex_t *mp, const timespec_t *abstime) 1784 { 1785 timespec_t tslocal; 1786 int error; 1787 1788 ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 1789 abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal); 1790 error = mutex_lock_impl(mp, &tslocal); 1791 if (error == ETIME) 1792 error = ETIMEDOUT; 1793 return (error); 1794 } 1795 1796 #pragma weak pthread_mutex_reltimedlock_np = _pthread_mutex_reltimedlock_np 1797 int 1798 _pthread_mutex_reltimedlock_np(mutex_t *mp, const timespec_t *reltime) 1799 { 1800 timespec_t tslocal; 1801 int error; 1802 1803 ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 1804 tslocal = *reltime; 1805 error = mutex_lock_impl(mp, &tslocal); 1806 if (error == ETIME) 1807 error = ETIMEDOUT; 1808 return (error); 1809 } 1810 1811 static int 1812 slow_trylock(mutex_t *mp, ulwp_t *self) 1813 { 1814 if (MUTEX_OWNER(mp) == self || 1815 mutex_trylock_adaptive(mp) != 0) { 1816 uberdata_t *udp = self->ul_uberdata; 1817 1818 if (__td_event_report(self, TD_LOCK_TRY, udp)) { 1819 self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 1820 tdb_event(TD_LOCK_TRY, udp); 1821 } 1822 return (EBUSY); 1823 } 1824 return (0); 1825 } 1826 1827 #pragma weak _private_mutex_trylock = __mutex_trylock 1828 #pragma weak mutex_trylock = __mutex_trylock 1829 #pragma weak _mutex_trylock = __mutex_trylock 1830 #pragma weak pthread_mutex_trylock = __mutex_trylock 1831 #pragma weak _pthread_mutex_trylock = __mutex_trylock 1832 int 1833 __mutex_trylock(mutex_t *mp) 1834 { 1835 ulwp_t *self = curthread; 1836 uberdata_t *udp = self->ul_uberdata; 1837 uberflags_t *gflags; 1838 int mtype; 1839 1840 ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 1841 /* 1842 * Optimize the case of USYNC_THREAD, including 1843 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 1844 * no error detection, no lock statistics, 1845 * and the process has only a single thread. 1846 * (Most likely a traditional single-threaded application.) 1847 */ 1848 if ((((mtype = mp->mutex_type) & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 1849 udp->uberflags.uf_all) == 0) { 1850 /* 1851 * Only one thread exists so we don't need an atomic operation. 1852 */ 1853 if (mp->mutex_lockw == 0) { 1854 mp->mutex_lockw = LOCKSET; 1855 mp->mutex_owner = (uintptr_t)self; 1856 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 1857 return (0); 1858 } 1859 if (mtype && MUTEX_OWNER(mp) == self) { 1860 if (mtype & LOCK_RECURSIVE) { 1861 if (mp->mutex_rcount == RECURSION_MAX) 1862 return (EAGAIN); 1863 mp->mutex_rcount++; 1864 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1865 1, 0); 1866 return (0); 1867 } 1868 return (EDEADLK); /* LOCK_ERRORCHECK */ 1869 } 1870 return (EBUSY); 1871 } 1872 1873 /* 1874 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 1875 * no error detection, and no lock statistics. 1876 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 1877 */ 1878 if ((gflags = self->ul_schedctl_called) != NULL && 1879 (gflags->uf_trs_ted | 1880 (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) { 1881 1882 if (mtype & USYNC_PROCESS) 1883 return (fast_process_lock(mp, NULL, mtype, MUTEX_TRY)); 1884 1885 if (set_lock_byte(&mp->mutex_lockw) == 0) { 1886 mp->mutex_owner = (uintptr_t)self; 1887 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 1888 return (0); 1889 } 1890 1891 if (mtype && MUTEX_OWNER(mp) == self) { 1892 if (mtype & LOCK_RECURSIVE) { 1893 if (mp->mutex_rcount == RECURSION_MAX) 1894 return (EAGAIN); 1895 mp->mutex_rcount++; 1896 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1897 1, 0); 1898 return (0); 1899 } 1900 return (EBUSY); /* LOCK_ERRORCHECK */ 1901 } 1902 1903 return (slow_trylock(mp, self)); 1904 } 1905 1906 /* else do it the long way */ 1907 return (mutex_lock_internal(mp, NULL, MUTEX_TRY)); 1908 } 1909 1910 int 1911 mutex_unlock_internal(mutex_t *mp) 1912 { 1913 ulwp_t *self = curthread; 1914 uberdata_t *udp = self->ul_uberdata; 1915 int mtype = mp->mutex_type; 1916 tdb_mutex_stats_t *msp; 1917 int error; 1918 lwpid_t lwpid; 1919 1920 if ((mtype & LOCK_ERRORCHECK) && !mutex_is_held(mp)) 1921 return (EPERM); 1922 1923 if (self->ul_error_detection && !mutex_is_held(mp)) 1924 lock_error(mp, "mutex_unlock", NULL, NULL); 1925 1926 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 1927 mp->mutex_rcount--; 1928 DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 1929 return (0); 1930 } 1931 1932 if ((msp = MUTEX_STATS(mp, udp)) != NULL) 1933 (void) record_hold_time(msp); 1934 1935 if (mtype & 1936 (USYNC_PROCESS_ROBUST|PTHREAD_PRIO_INHERIT|PTHREAD_PRIO_PROTECT)) { 1937 no_preempt(self); 1938 mp->mutex_owner = 0; 1939 mp->mutex_ownerpid = 0; 1940 DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 1941 if (mtype & PTHREAD_PRIO_INHERIT) { 1942 mp->mutex_lockw = LOCKCLEAR; 1943 error = ___lwp_mutex_unlock(mp); 1944 } else if (mtype & USYNC_PROCESS_ROBUST) { 1945 error = ___lwp_mutex_unlock(mp); 1946 } else { 1947 if (atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK) 1948 (void) ___lwp_mutex_wakeup(mp); 1949 error = 0; 1950 } 1951 if (mtype & PTHREAD_PRIO_PROTECT) { 1952 if (_ceil_mylist_del(mp)) 1953 _ceil_prio_waive(); 1954 } 1955 preempt(self); 1956 } else if (mtype & USYNC_PROCESS) { 1957 if (mp->mutex_lockword & WAITERMASK) 1958 mutex_unlock_process(mp); 1959 else { 1960 mp->mutex_owner = 0; 1961 mp->mutex_ownerpid = 0; 1962 DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 1963 if (atomic_swap_32(&mp->mutex_lockword, 0) & 1964 WAITERMASK) { 1965 no_preempt(self); 1966 (void) ___lwp_mutex_wakeup(mp); 1967 preempt(self); 1968 } 1969 } 1970 error = 0; 1971 } else { /* USYNC_THREAD */ 1972 if ((lwpid = mutex_unlock_queue(mp)) != 0) { 1973 (void) __lwp_unpark(lwpid); 1974 preempt(self); 1975 } 1976 error = 0; 1977 } 1978 1979 return (error); 1980 } 1981 1982 #pragma weak _private_mutex_unlock = __mutex_unlock 1983 #pragma weak mutex_unlock = __mutex_unlock 1984 #pragma weak _mutex_unlock = __mutex_unlock 1985 #pragma weak pthread_mutex_unlock = __mutex_unlock 1986 #pragma weak _pthread_mutex_unlock = __mutex_unlock 1987 int 1988 __mutex_unlock(mutex_t *mp) 1989 { 1990 ulwp_t *self = curthread; 1991 uberdata_t *udp = self->ul_uberdata; 1992 uberflags_t *gflags; 1993 lwpid_t lwpid; 1994 int mtype; 1995 short el; 1996 1997 /* 1998 * Optimize the case of USYNC_THREAD, including 1999 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 2000 * no error detection, no lock statistics, 2001 * and the process has only a single thread. 2002 * (Most likely a traditional single-threaded application.) 2003 */ 2004 if ((((mtype = mp->mutex_type) & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 2005 udp->uberflags.uf_all) == 0) { 2006 if (mtype) { 2007 /* 2008 * At this point we know that one or both of the 2009 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set. 2010 */ 2011 if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self)) 2012 return (EPERM); 2013 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 2014 mp->mutex_rcount--; 2015 DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 2016 return (0); 2017 } 2018 } 2019 /* 2020 * Only one thread exists so we don't need an atomic operation. 2021 * Also, there can be no waiters. 2022 */ 2023 mp->mutex_owner = 0; 2024 mp->mutex_lockword = 0; 2025 DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 2026 return (0); 2027 } 2028 2029 /* 2030 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 2031 * no error detection, and no lock statistics. 2032 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 2033 */ 2034 if ((gflags = self->ul_schedctl_called) != NULL) { 2035 if (((el = gflags->uf_trs_ted) | mtype) == 0) { 2036 fast_unlock: 2037 if (!(mp->mutex_lockword & WAITERMASK)) { 2038 /* no waiter exists right now */ 2039 mp->mutex_owner = 0; 2040 DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 2041 if (atomic_swap_32(&mp->mutex_lockword, 0) & 2042 WAITERMASK) { 2043 /* a waiter suddenly appeared */ 2044 no_preempt(self); 2045 if ((lwpid = mutex_wakeup(mp)) != 0) 2046 (void) __lwp_unpark(lwpid); 2047 preempt(self); 2048 } 2049 } else if ((lwpid = mutex_unlock_queue(mp)) != 0) { 2050 (void) __lwp_unpark(lwpid); 2051 preempt(self); 2052 } 2053 return (0); 2054 } 2055 if (el) /* error detection or lock statistics */ 2056 goto slow_unlock; 2057 if ((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) { 2058 /* 2059 * At this point we know that one or both of the 2060 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set. 2061 */ 2062 if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self)) 2063 return (EPERM); 2064 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 2065 mp->mutex_rcount--; 2066 DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 2067 return (0); 2068 } 2069 goto fast_unlock; 2070 } 2071 if ((mtype & 2072 ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) { 2073 /* 2074 * At this point we know that zero, one, or both of the 2075 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set and 2076 * that the USYNC_PROCESS flag is set. 2077 */ 2078 if ((mtype & LOCK_ERRORCHECK) && !shared_mutex_held(mp)) 2079 return (EPERM); 2080 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 2081 mp->mutex_rcount--; 2082 DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 2083 return (0); 2084 } 2085 if (mp->mutex_lockword & WAITERMASK) 2086 mutex_unlock_process(mp); 2087 else { 2088 mp->mutex_owner = 0; 2089 mp->mutex_ownerpid = 0; 2090 DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 2091 if (atomic_swap_32(&mp->mutex_lockword, 0) & 2092 WAITERMASK) { 2093 no_preempt(self); 2094 (void) ___lwp_mutex_wakeup(mp); 2095 preempt(self); 2096 } 2097 } 2098 return (0); 2099 } 2100 } 2101 2102 /* else do it the long way */ 2103 slow_unlock: 2104 return (mutex_unlock_internal(mp)); 2105 } 2106 2107 /* 2108 * Internally to the library, almost all mutex lock/unlock actions 2109 * go through these lmutex_ functions, to protect critical regions. 2110 * We replicate a bit of code from __mutex_lock() and __mutex_unlock() 2111 * to make these functions faster since we know that the mutex type 2112 * of all internal locks is USYNC_THREAD. We also know that internal 2113 * locking can never fail, so we panic if it does. 2114 */ 2115 void 2116 lmutex_lock(mutex_t *mp) 2117 { 2118 ulwp_t *self = curthread; 2119 uberdata_t *udp = self->ul_uberdata; 2120 2121 ASSERT(mp->mutex_type == USYNC_THREAD); 2122 2123 enter_critical(self); 2124 /* 2125 * Optimize the case of no lock statistics and only a single thread. 2126 * (Most likely a traditional single-threaded application.) 2127 */ 2128 if (udp->uberflags.uf_all == 0) { 2129 /* 2130 * Only one thread exists; the mutex must be free. 2131 */ 2132 ASSERT(mp->mutex_lockw == 0); 2133 mp->mutex_lockw = LOCKSET; 2134 mp->mutex_owner = (uintptr_t)self; 2135 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 2136 } else { 2137 tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 2138 2139 if (!self->ul_schedctl_called) 2140 (void) setup_schedctl(); 2141 2142 if (set_lock_byte(&mp->mutex_lockw) == 0) { 2143 mp->mutex_owner = (uintptr_t)self; 2144 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 2145 } else if (mutex_trylock_adaptive(mp) != 0) { 2146 (void) mutex_lock_queue(self, msp, mp, NULL); 2147 } 2148 2149 if (msp) 2150 record_begin_hold(msp); 2151 } 2152 } 2153 2154 void 2155 lmutex_unlock(mutex_t *mp) 2156 { 2157 ulwp_t *self = curthread; 2158 uberdata_t *udp = self->ul_uberdata; 2159 2160 ASSERT(mp->mutex_type == USYNC_THREAD); 2161 2162 /* 2163 * Optimize the case of no lock statistics and only a single thread. 2164 * (Most likely a traditional single-threaded application.) 2165 */ 2166 if (udp->uberflags.uf_all == 0) { 2167 /* 2168 * Only one thread exists so there can be no waiters. 2169 */ 2170 mp->mutex_owner = 0; 2171 mp->mutex_lockword = 0; 2172 DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 2173 } else { 2174 tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 2175 lwpid_t lwpid; 2176 2177 if (msp) 2178 (void) record_hold_time(msp); 2179 if ((lwpid = mutex_unlock_queue(mp)) != 0) { 2180 (void) __lwp_unpark(lwpid); 2181 preempt(self); 2182 } 2183 } 2184 exit_critical(self); 2185 } 2186 2187 /* 2188 * For specialized code in libc, like the asynchronous i/o code, 2189 * the following sig_*() locking primitives are used in order 2190 * to make the code asynchronous signal safe. Signals are 2191 * deferred while locks acquired by these functions are held. 2192 */ 2193 void 2194 sig_mutex_lock(mutex_t *mp) 2195 { 2196 sigoff(curthread); 2197 (void) _private_mutex_lock(mp); 2198 } 2199 2200 void 2201 sig_mutex_unlock(mutex_t *mp) 2202 { 2203 (void) _private_mutex_unlock(mp); 2204 sigon(curthread); 2205 } 2206 2207 int 2208 sig_mutex_trylock(mutex_t *mp) 2209 { 2210 int error; 2211 2212 sigoff(curthread); 2213 if ((error = _private_mutex_trylock(mp)) != 0) 2214 sigon(curthread); 2215 return (error); 2216 } 2217 2218 /* 2219 * sig_cond_wait() is a cancellation point. 2220 */ 2221 int 2222 sig_cond_wait(cond_t *cv, mutex_t *mp) 2223 { 2224 int error; 2225 2226 ASSERT(curthread->ul_sigdefer != 0); 2227 _private_testcancel(); 2228 error = _cond_wait(cv, mp); 2229 if (error == EINTR && curthread->ul_cursig) { 2230 sig_mutex_unlock(mp); 2231 /* take the deferred signal here */ 2232 sig_mutex_lock(mp); 2233 } 2234 _private_testcancel(); 2235 return (error); 2236 } 2237 2238 /* 2239 * sig_cond_reltimedwait() is a cancellation point. 2240 */ 2241 int 2242 sig_cond_reltimedwait(cond_t *cv, mutex_t *mp, const timespec_t *ts) 2243 { 2244 int error; 2245 2246 ASSERT(curthread->ul_sigdefer != 0); 2247 _private_testcancel(); 2248 error = _cond_reltimedwait(cv, mp, ts); 2249 if (error == EINTR && curthread->ul_cursig) { 2250 sig_mutex_unlock(mp); 2251 /* take the deferred signal here */ 2252 sig_mutex_lock(mp); 2253 } 2254 _private_testcancel(); 2255 return (error); 2256 } 2257 2258 static int 2259 shared_mutex_held(mutex_t *mparg) 2260 { 2261 /* 2262 * There is an inherent data race in the current ownership design. 2263 * The mutex_owner and mutex_ownerpid fields cannot be set or tested 2264 * atomically as a pair. The original implementation tested each 2265 * field just once. This was exposed to trivial false positives in 2266 * the case of multiple multithreaded processes with thread addresses 2267 * in common. To close the window to an acceptable level we now use a 2268 * sequence of five tests: pid-thr-pid-thr-pid. This ensures that any 2269 * single interruption will still leave one uninterrupted sequence of 2270 * pid-thr-pid tests intact. 2271 * 2272 * It is assumed that all updates are always ordered thr-pid and that 2273 * we have TSO hardware. 2274 */ 2275 volatile mutex_t *mp = (volatile mutex_t *)mparg; 2276 ulwp_t *self = curthread; 2277 uberdata_t *udp = self->ul_uberdata; 2278 2279 if (mp->mutex_ownerpid != udp->pid) 2280 return (0); 2281 2282 if (!MUTEX_OWNED(mp, self)) 2283 return (0); 2284 2285 if (mp->mutex_ownerpid != udp->pid) 2286 return (0); 2287 2288 if (!MUTEX_OWNED(mp, self)) 2289 return (0); 2290 2291 if (mp->mutex_ownerpid != udp->pid) 2292 return (0); 2293 2294 return (1); 2295 } 2296 2297 /* 2298 * Some crufty old programs define their own version of _mutex_held() 2299 * to be simply return(1). This breaks internal libc logic, so we 2300 * define a private version for exclusive use by libc, mutex_is_held(), 2301 * and also a new public function, __mutex_held(), to be used in new 2302 * code to circumvent these crufty old programs. 2303 */ 2304 #pragma weak mutex_held = mutex_is_held 2305 #pragma weak _mutex_held = mutex_is_held 2306 #pragma weak __mutex_held = mutex_is_held 2307 int 2308 mutex_is_held(mutex_t *mp) 2309 { 2310 if (mp->mutex_type & (USYNC_PROCESS | USYNC_PROCESS_ROBUST)) 2311 return (shared_mutex_held(mp)); 2312 return (MUTEX_OWNED(mp, curthread)); 2313 } 2314 2315 #pragma weak _private_mutex_destroy = __mutex_destroy 2316 #pragma weak mutex_destroy = __mutex_destroy 2317 #pragma weak _mutex_destroy = __mutex_destroy 2318 #pragma weak pthread_mutex_destroy = __mutex_destroy 2319 #pragma weak _pthread_mutex_destroy = __mutex_destroy 2320 int 2321 __mutex_destroy(mutex_t *mp) 2322 { 2323 mp->mutex_magic = 0; 2324 mp->mutex_flag &= ~LOCK_INITED; 2325 tdb_sync_obj_deregister(mp); 2326 return (0); 2327 } 2328 2329 /* 2330 * Spin locks are separate from ordinary mutexes, 2331 * but we use the same data structure for them. 2332 */ 2333 2334 #pragma weak pthread_spin_init = _pthread_spin_init 2335 int 2336 _pthread_spin_init(pthread_spinlock_t *lock, int pshared) 2337 { 2338 mutex_t *mp = (mutex_t *)lock; 2339 2340 (void) _memset(mp, 0, sizeof (*mp)); 2341 if (pshared == PTHREAD_PROCESS_SHARED) 2342 mp->mutex_type = USYNC_PROCESS; 2343 else 2344 mp->mutex_type = USYNC_THREAD; 2345 mp->mutex_flag = LOCK_INITED; 2346 mp->mutex_magic = MUTEX_MAGIC; 2347 return (0); 2348 } 2349 2350 #pragma weak pthread_spin_destroy = _pthread_spin_destroy 2351 int 2352 _pthread_spin_destroy(pthread_spinlock_t *lock) 2353 { 2354 (void) _memset(lock, 0, sizeof (*lock)); 2355 return (0); 2356 } 2357 2358 #pragma weak pthread_spin_trylock = _pthread_spin_trylock 2359 int 2360 _pthread_spin_trylock(pthread_spinlock_t *lock) 2361 { 2362 mutex_t *mp = (mutex_t *)lock; 2363 ulwp_t *self = curthread; 2364 int error = 0; 2365 2366 no_preempt(self); 2367 if (set_lock_byte(&mp->mutex_lockw) != 0) 2368 error = EBUSY; 2369 else { 2370 mp->mutex_owner = (uintptr_t)self; 2371 if (mp->mutex_type == USYNC_PROCESS) 2372 mp->mutex_ownerpid = self->ul_uberdata->pid; 2373 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 2374 } 2375 preempt(self); 2376 return (error); 2377 } 2378 2379 #pragma weak pthread_spin_lock = _pthread_spin_lock 2380 int 2381 _pthread_spin_lock(pthread_spinlock_t *lock) 2382 { 2383 volatile uint8_t *lockp = 2384 (volatile uint8_t *)&((mutex_t *)lock)->mutex_lockw; 2385 2386 ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 2387 /* 2388 * We don't care whether the owner is running on a processor. 2389 * We just spin because that's what this interface requires. 2390 */ 2391 for (;;) { 2392 if (*lockp == 0) { /* lock byte appears to be clear */ 2393 if (_pthread_spin_trylock(lock) == 0) 2394 return (0); 2395 } 2396 SMT_PAUSE(); 2397 } 2398 } 2399 2400 #pragma weak pthread_spin_unlock = _pthread_spin_unlock 2401 int 2402 _pthread_spin_unlock(pthread_spinlock_t *lock) 2403 { 2404 mutex_t *mp = (mutex_t *)lock; 2405 ulwp_t *self = curthread; 2406 2407 no_preempt(self); 2408 mp->mutex_owner = 0; 2409 mp->mutex_ownerpid = 0; 2410 DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 2411 (void) atomic_swap_32(&mp->mutex_lockword, 0); 2412 preempt(self); 2413 return (0); 2414 } 2415 2416 #pragma weak cond_init = _cond_init 2417 /* ARGSUSED2 */ 2418 int 2419 _cond_init(cond_t *cvp, int type, void *arg) 2420 { 2421 if (type != USYNC_THREAD && type != USYNC_PROCESS) 2422 return (EINVAL); 2423 (void) _memset(cvp, 0, sizeof (*cvp)); 2424 cvp->cond_type = (uint16_t)type; 2425 cvp->cond_magic = COND_MAGIC; 2426 return (0); 2427 } 2428 2429 /* 2430 * cond_sleep_queue(): utility function for cond_wait_queue(). 2431 * 2432 * Go to sleep on a condvar sleep queue, expect to be waked up 2433 * by someone calling cond_signal() or cond_broadcast() or due 2434 * to receiving a UNIX signal or being cancelled, or just simply 2435 * due to a spurious wakeup (like someome calling forkall()). 2436 * 2437 * The associated mutex is *not* reacquired before returning. 2438 * That must be done by the caller of cond_sleep_queue(). 2439 */ 2440 int 2441 cond_sleep_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 2442 { 2443 ulwp_t *self = curthread; 2444 queue_head_t *qp; 2445 queue_head_t *mqp; 2446 lwpid_t lwpid; 2447 int signalled; 2448 int error; 2449 2450 /* 2451 * Put ourself on the CV sleep queue, unlock the mutex, then 2452 * park ourself and unpark a candidate lwp to grab the mutex. 2453 * We must go onto the CV sleep queue before dropping the 2454 * mutex in order to guarantee atomicity of the operation. 2455 */ 2456 self->ul_sp = stkptr(); 2457 qp = queue_lock(cvp, CV); 2458 enqueue(qp, self, cvp, CV); 2459 cvp->cond_waiters_user = 1; 2460 self->ul_cvmutex = mp; 2461 self->ul_cv_wake = (tsp != NULL); 2462 self->ul_signalled = 0; 2463 lwpid = mutex_unlock_queue(mp); 2464 for (;;) { 2465 set_parking_flag(self, 1); 2466 queue_unlock(qp); 2467 if (lwpid != 0) { 2468 lwpid = preempt_unpark(self, lwpid); 2469 preempt(self); 2470 } 2471 /* 2472 * We may have a deferred signal present, 2473 * in which case we should return EINTR. 2474 * Also, we may have received a SIGCANCEL; if so 2475 * and we are cancelable we should return EINTR. 2476 * We force an immediate EINTR return from 2477 * __lwp_park() by turning our parking flag off. 2478 */ 2479 if (self->ul_cursig != 0 || 2480 (self->ul_cancelable && self->ul_cancel_pending)) 2481 set_parking_flag(self, 0); 2482 /* 2483 * __lwp_park() will return the residual time in tsp 2484 * if we are unparked before the timeout expires. 2485 */ 2486 error = __lwp_park(tsp, lwpid); 2487 set_parking_flag(self, 0); 2488 lwpid = 0; /* unpark the other lwp only once */ 2489 /* 2490 * We were waked up by cond_signal(), cond_broadcast(), 2491 * by an interrupt or timeout (EINTR or ETIME), 2492 * or we may just have gotten a spurious wakeup. 2493 */ 2494 qp = queue_lock(cvp, CV); 2495 mqp = queue_lock(mp, MX); 2496 if (self->ul_sleepq == NULL) 2497 break; 2498 /* 2499 * We are on either the condvar sleep queue or the 2500 * mutex sleep queue. Break out of the sleep if we 2501 * were interrupted or we timed out (EINTR or ETIME). 2502 * Else this is a spurious wakeup; continue the loop. 2503 */ 2504 if (self->ul_sleepq == mqp) { /* mutex queue */ 2505 if (error) { 2506 mp->mutex_waiters = dequeue_self(mqp, mp); 2507 break; 2508 } 2509 tsp = NULL; /* no more timeout */ 2510 } else if (self->ul_sleepq == qp) { /* condvar queue */ 2511 if (error) { 2512 cvp->cond_waiters_user = dequeue_self(qp, cvp); 2513 break; 2514 } 2515 /* 2516 * Else a spurious wakeup on the condvar queue. 2517 * __lwp_park() has already adjusted the timeout. 2518 */ 2519 } else { 2520 thr_panic("cond_sleep_queue(): thread not on queue"); 2521 } 2522 queue_unlock(mqp); 2523 } 2524 2525 self->ul_sp = 0; 2526 ASSERT(self->ul_cvmutex == NULL && self->ul_cv_wake == 0); 2527 ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL && 2528 self->ul_wchan == NULL); 2529 2530 signalled = self->ul_signalled; 2531 self->ul_signalled = 0; 2532 queue_unlock(qp); 2533 queue_unlock(mqp); 2534 2535 /* 2536 * If we were concurrently cond_signal()d and any of: 2537 * received a UNIX signal, were cancelled, or got a timeout, 2538 * then perform another cond_signal() to avoid consuming it. 2539 */ 2540 if (error && signalled) 2541 (void) cond_signal_internal(cvp); 2542 2543 return (error); 2544 } 2545 2546 int 2547 cond_wait_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp, 2548 tdb_mutex_stats_t *msp) 2549 { 2550 ulwp_t *self = curthread; 2551 int error; 2552 2553 /* 2554 * The old thread library was programmed to defer signals 2555 * while in cond_wait() so that the associated mutex would 2556 * be guaranteed to be held when the application signal 2557 * handler was invoked. 2558 * 2559 * We do not behave this way by default; the state of the 2560 * associated mutex in the signal handler is undefined. 2561 * 2562 * To accommodate applications that depend on the old 2563 * behavior, the _THREAD_COND_WAIT_DEFER environment 2564 * variable can be set to 1 and we will behave in the 2565 * old way with respect to cond_wait(). 2566 */ 2567 if (self->ul_cond_wait_defer) 2568 sigoff(self); 2569 2570 error = cond_sleep_queue(cvp, mp, tsp); 2571 2572 /* 2573 * Reacquire the mutex. 2574 */ 2575 if (set_lock_byte(&mp->mutex_lockw) == 0) { 2576 mp->mutex_owner = (uintptr_t)self; 2577 DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 2578 } else if (mutex_trylock_adaptive(mp) != 0) { 2579 (void) mutex_lock_queue(self, msp, mp, NULL); 2580 } 2581 2582 if (msp) 2583 record_begin_hold(msp); 2584 2585 /* 2586 * Take any deferred signal now, after we have reacquired the mutex. 2587 */ 2588 if (self->ul_cond_wait_defer) 2589 sigon(self); 2590 2591 return (error); 2592 } 2593 2594 /* 2595 * cond_sleep_kernel(): utility function for cond_wait_kernel(). 2596 * See the comment ahead of cond_sleep_queue(), above. 2597 */ 2598 int 2599 cond_sleep_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 2600 { 2601 int mtype = mp->mutex_type; 2602 ulwp_t *self = curthread; 2603 int error; 2604 2605 if (mtype & PTHREAD_PRIO_PROTECT) { 2606 if (_ceil_mylist_del(mp)) 2607 _ceil_prio_waive(); 2608 } 2609 2610 self->ul_sp = stkptr(); 2611 self->ul_wchan = cvp; 2612 mp->mutex_owner = 0; 2613 mp->mutex_ownerpid = 0; 2614 if (mtype & PTHREAD_PRIO_INHERIT) 2615 mp->mutex_lockw = LOCKCLEAR; 2616 /* 2617 * ___lwp_cond_wait() returns immediately with EINTR if 2618 * set_parking_flag(self,0) is called on this lwp before it 2619 * goes to sleep in the kernel. sigacthandler() calls this 2620 * when a deferred signal is noted. This assures that we don't 2621 * get stuck in ___lwp_cond_wait() with all signals blocked 2622 * due to taking a deferred signal before going to sleep. 2623 */ 2624 set_parking_flag(self, 1); 2625 if (self->ul_cursig != 0 || 2626 (self->ul_cancelable && self->ul_cancel_pending)) 2627 set_parking_flag(self, 0); 2628 error = ___lwp_cond_wait(cvp, mp, tsp, 1); 2629 set_parking_flag(self, 0); 2630 self->ul_sp = 0; 2631 self->ul_wchan = NULL; 2632 return (error); 2633 } 2634 2635 int 2636 cond_wait_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 2637 { 2638 ulwp_t *self = curthread; 2639 int error; 2640 int merror; 2641 2642 /* 2643 * See the large comment in cond_wait_queue(), above. 2644 */ 2645 if (self->ul_cond_wait_defer) 2646 sigoff(self); 2647 2648 error = cond_sleep_kernel(cvp, mp, tsp); 2649 2650 /* 2651 * Override the return code from ___lwp_cond_wait() 2652 * with any non-zero return code from mutex_lock(). 2653 * This addresses robust lock failures in particular; 2654 * the caller must see the EOWNERDEAD or ENOTRECOVERABLE 2655 * errors in order to take corrective action. 2656 */ 2657 if ((merror = _private_mutex_lock(mp)) != 0) 2658 error = merror; 2659 2660 /* 2661 * Take any deferred signal now, after we have reacquired the mutex. 2662 */ 2663 if (self->ul_cond_wait_defer) 2664 sigon(self); 2665 2666 return (error); 2667 } 2668 2669 /* 2670 * Common code for _cond_wait() and _cond_timedwait() 2671 */ 2672 int 2673 cond_wait_common(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 2674 { 2675 int mtype = mp->mutex_type; 2676 hrtime_t begin_sleep = 0; 2677 ulwp_t *self = curthread; 2678 uberdata_t *udp = self->ul_uberdata; 2679 tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 2680 tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 2681 uint8_t rcount; 2682 int error = 0; 2683 2684 /* 2685 * The SUSV3 Posix spec for pthread_cond_timedwait() states: 2686 * Except in the case of [ETIMEDOUT], all these error checks 2687 * shall act as if they were performed immediately at the 2688 * beginning of processing for the function and shall cause 2689 * an error return, in effect, prior to modifying the state 2690 * of the mutex specified by mutex or the condition variable 2691 * specified by cond. 2692 * Therefore, we must return EINVAL now if the timout is invalid. 2693 */ 2694 if (tsp != NULL && 2695 (tsp->tv_sec < 0 || (ulong_t)tsp->tv_nsec >= NANOSEC)) 2696 return (EINVAL); 2697 2698 if (__td_event_report(self, TD_SLEEP, udp)) { 2699 self->ul_sp = stkptr(); 2700 self->ul_wchan = cvp; 2701 self->ul_td_evbuf.eventnum = TD_SLEEP; 2702 self->ul_td_evbuf.eventdata = cvp; 2703 tdb_event(TD_SLEEP, udp); 2704 self->ul_sp = 0; 2705 } 2706 if (csp) { 2707 if (tsp) 2708 tdb_incr(csp->cond_timedwait); 2709 else 2710 tdb_incr(csp->cond_wait); 2711 } 2712 if (msp) 2713 begin_sleep = record_hold_time(msp); 2714 else if (csp) 2715 begin_sleep = gethrtime(); 2716 2717 if (self->ul_error_detection) { 2718 if (!mutex_is_held(mp)) 2719 lock_error(mp, "cond_wait", cvp, NULL); 2720 if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) 2721 lock_error(mp, "recursive mutex in cond_wait", 2722 cvp, NULL); 2723 if (cvp->cond_type & USYNC_PROCESS) { 2724 if (!(mtype & (USYNC_PROCESS | USYNC_PROCESS_ROBUST))) 2725 lock_error(mp, "cond_wait", cvp, 2726 "condvar process-shared, " 2727 "mutex process-private"); 2728 } else { 2729 if (mtype & (USYNC_PROCESS | USYNC_PROCESS_ROBUST)) 2730 lock_error(mp, "cond_wait", cvp, 2731 "condvar process-private, " 2732 "mutex process-shared"); 2733 } 2734 } 2735 2736 /* 2737 * We deal with recursive mutexes by completely 2738 * dropping the lock and restoring the recursion 2739 * count after waking up. This is arguably wrong, 2740 * but it obeys the principle of least astonishment. 2741 */ 2742 rcount = mp->mutex_rcount; 2743 mp->mutex_rcount = 0; 2744 if ((mtype & (USYNC_PROCESS | USYNC_PROCESS_ROBUST | 2745 PTHREAD_PRIO_INHERIT | PTHREAD_PRIO_PROTECT)) | 2746 (cvp->cond_type & USYNC_PROCESS)) 2747 error = cond_wait_kernel(cvp, mp, tsp); 2748 else 2749 error = cond_wait_queue(cvp, mp, tsp, msp); 2750 mp->mutex_rcount = rcount; 2751 2752 if (csp) { 2753 hrtime_t lapse = gethrtime() - begin_sleep; 2754 if (tsp == NULL) 2755 csp->cond_wait_sleep_time += lapse; 2756 else { 2757 csp->cond_timedwait_sleep_time += lapse; 2758 if (error == ETIME) 2759 tdb_incr(csp->cond_timedwait_timeout); 2760 } 2761 } 2762 return (error); 2763 } 2764 2765 /* 2766 * cond_wait() is a cancellation point but _cond_wait() is not. 2767 * System libraries call the non-cancellation version. 2768 * It is expected that only applications call the cancellation version. 2769 */ 2770 int 2771 _cond_wait(cond_t *cvp, mutex_t *mp) 2772 { 2773 ulwp_t *self = curthread; 2774 uberdata_t *udp = self->ul_uberdata; 2775 uberflags_t *gflags; 2776 2777 /* 2778 * Optimize the common case of USYNC_THREAD plus 2779 * no error detection, no lock statistics, and no event tracing. 2780 */ 2781 if ((gflags = self->ul_schedctl_called) != NULL && 2782 (cvp->cond_type | mp->mutex_type | gflags->uf_trs_ted | 2783 self->ul_td_events_enable | 2784 udp->tdb.tdb_ev_global_mask.event_bits[0]) == 0) 2785 return (cond_wait_queue(cvp, mp, NULL, NULL)); 2786 2787 /* 2788 * Else do it the long way. 2789 */ 2790 return (cond_wait_common(cvp, mp, NULL)); 2791 } 2792 2793 int 2794 cond_wait(cond_t *cvp, mutex_t *mp) 2795 { 2796 int error; 2797 2798 _cancelon(); 2799 error = _cond_wait(cvp, mp); 2800 if (error == EINTR) 2801 _canceloff(); 2802 else 2803 _canceloff_nocancel(); 2804 return (error); 2805 } 2806 2807 #pragma weak pthread_cond_wait = _pthread_cond_wait 2808 int 2809 _pthread_cond_wait(cond_t *cvp, mutex_t *mp) 2810 { 2811 int error; 2812 2813 error = cond_wait(cvp, mp); 2814 return ((error == EINTR)? 0 : error); 2815 } 2816 2817 /* 2818 * cond_timedwait() is a cancellation point but _cond_timedwait() is not. 2819 * System libraries call the non-cancellation version. 2820 * It is expected that only applications call the cancellation version. 2821 */ 2822 int 2823 _cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) 2824 { 2825 clockid_t clock_id = cvp->cond_clockid; 2826 timespec_t reltime; 2827 int error; 2828 2829 if (clock_id != CLOCK_REALTIME && clock_id != CLOCK_HIGHRES) 2830 clock_id = CLOCK_REALTIME; 2831 abstime_to_reltime(clock_id, abstime, &reltime); 2832 error = cond_wait_common(cvp, mp, &reltime); 2833 if (error == ETIME && clock_id == CLOCK_HIGHRES) { 2834 /* 2835 * Don't return ETIME if we didn't really get a timeout. 2836 * This can happen if we return because someone resets 2837 * the system clock. Just return zero in this case, 2838 * giving a spurious wakeup but not a timeout. 2839 */ 2840 if ((hrtime_t)(uint32_t)abstime->tv_sec * NANOSEC + 2841 abstime->tv_nsec > gethrtime()) 2842 error = 0; 2843 } 2844 return (error); 2845 } 2846 2847 int 2848 cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) 2849 { 2850 int error; 2851 2852 _cancelon(); 2853 error = _cond_timedwait(cvp, mp, abstime); 2854 if (error == EINTR) 2855 _canceloff(); 2856 else 2857 _canceloff_nocancel(); 2858 return (error); 2859 } 2860 2861 #pragma weak pthread_cond_timedwait = _pthread_cond_timedwait 2862 int 2863 _pthread_cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) 2864 { 2865 int error; 2866 2867 error = cond_timedwait(cvp, mp, abstime); 2868 if (error == ETIME) 2869 error = ETIMEDOUT; 2870 else if (error == EINTR) 2871 error = 0; 2872 return (error); 2873 } 2874 2875 /* 2876 * cond_reltimedwait() is a cancellation point but _cond_reltimedwait() 2877 * is not. System libraries call the non-cancellation version. 2878 * It is expected that only applications call the cancellation version. 2879 */ 2880 int 2881 _cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime) 2882 { 2883 timespec_t tslocal = *reltime; 2884 2885 return (cond_wait_common(cvp, mp, &tslocal)); 2886 } 2887 2888 #pragma weak cond_reltimedwait = _cond_reltimedwait_cancel 2889 int 2890 _cond_reltimedwait_cancel(cond_t *cvp, mutex_t *mp, const timespec_t *reltime) 2891 { 2892 int error; 2893 2894 _cancelon(); 2895 error = _cond_reltimedwait(cvp, mp, reltime); 2896 if (error == EINTR) 2897 _canceloff(); 2898 else 2899 _canceloff_nocancel(); 2900 return (error); 2901 } 2902 2903 #pragma weak pthread_cond_reltimedwait_np = _pthread_cond_reltimedwait_np 2904 int 2905 _pthread_cond_reltimedwait_np(cond_t *cvp, mutex_t *mp, 2906 const timespec_t *reltime) 2907 { 2908 int error; 2909 2910 error = _cond_reltimedwait_cancel(cvp, mp, reltime); 2911 if (error == ETIME) 2912 error = ETIMEDOUT; 2913 else if (error == EINTR) 2914 error = 0; 2915 return (error); 2916 } 2917 2918 #pragma weak pthread_cond_signal = cond_signal_internal 2919 #pragma weak _pthread_cond_signal = cond_signal_internal 2920 #pragma weak cond_signal = cond_signal_internal 2921 #pragma weak _cond_signal = cond_signal_internal 2922 int 2923 cond_signal_internal(cond_t *cvp) 2924 { 2925 ulwp_t *self = curthread; 2926 uberdata_t *udp = self->ul_uberdata; 2927 tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 2928 int error = 0; 2929 queue_head_t *qp; 2930 mutex_t *mp; 2931 queue_head_t *mqp; 2932 ulwp_t **ulwpp; 2933 ulwp_t *ulwp; 2934 ulwp_t *prev = NULL; 2935 ulwp_t *next; 2936 ulwp_t **suspp = NULL; 2937 ulwp_t *susprev; 2938 2939 if (csp) 2940 tdb_incr(csp->cond_signal); 2941 2942 if (cvp->cond_waiters_kernel) /* someone sleeping in the kernel? */ 2943 error = __lwp_cond_signal(cvp); 2944 2945 if (!cvp->cond_waiters_user) /* no one sleeping at user-level */ 2946 return (error); 2947 2948 /* 2949 * Move someone from the condvar sleep queue to the mutex sleep 2950 * queue for the mutex that he will acquire on being waked up. 2951 * We can do this only if we own the mutex he will acquire. 2952 * If we do not own the mutex, or if his ul_cv_wake flag 2953 * is set, just dequeue and unpark him. 2954 */ 2955 qp = queue_lock(cvp, CV); 2956 for (ulwpp = &qp->qh_head; (ulwp = *ulwpp) != NULL; 2957 prev = ulwp, ulwpp = &ulwp->ul_link) { 2958 if (ulwp->ul_wchan == cvp) { 2959 if (!ulwp->ul_stop) 2960 break; 2961 /* 2962 * Try not to dequeue a suspended thread. 2963 * This mimics the old libthread's behavior. 2964 */ 2965 if (suspp == NULL) { 2966 suspp = ulwpp; 2967 susprev = prev; 2968 } 2969 } 2970 } 2971 if (ulwp == NULL && suspp != NULL) { 2972 ulwp = *(ulwpp = suspp); 2973 prev = susprev; 2974 suspp = NULL; 2975 } 2976 if (ulwp == NULL) { /* no one on the sleep queue */ 2977 cvp->cond_waiters_user = 0; 2978 queue_unlock(qp); 2979 return (error); 2980 } 2981 /* 2982 * Scan the remainder of the CV queue for another waiter. 2983 */ 2984 if (suspp != NULL) { 2985 next = *suspp; 2986 } else { 2987 for (next = ulwp->ul_link; next != NULL; next = next->ul_link) 2988 if (next->ul_wchan == cvp) 2989 break; 2990 } 2991 if (next == NULL) 2992 cvp->cond_waiters_user = 0; 2993 2994 /* 2995 * Inform the thread that he was the recipient of a cond_signal(). 2996 * This lets him deal with cond_signal() and, concurrently, 2997 * one or more of a cancellation, a UNIX signal, or a timeout. 2998 * These latter conditions must not consume a cond_signal(). 2999 */ 3000 ulwp->ul_signalled = 1; 3001 3002 /* 3003 * Dequeue the waiter but leave his ul_sleepq non-NULL 3004 * while we move him to the mutex queue so that he can 3005 * deal properly with spurious wakeups. 3006 */ 3007 *ulwpp = ulwp->ul_link; 3008 if (qp->qh_tail == ulwp) 3009 qp->qh_tail = prev; 3010 qp->qh_qlen--; 3011 ulwp->ul_link = NULL; 3012 3013 mp = ulwp->ul_cvmutex; /* the mutex he will acquire */ 3014 ulwp->ul_cvmutex = NULL; 3015 ASSERT(mp != NULL); 3016 3017 if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) { 3018 lwpid_t lwpid = ulwp->ul_lwpid; 3019 3020 no_preempt(self); 3021 ulwp->ul_sleepq = NULL; 3022 ulwp->ul_wchan = NULL; 3023 ulwp->ul_cv_wake = 0; 3024 queue_unlock(qp); 3025 (void) __lwp_unpark(lwpid); 3026 preempt(self); 3027 } else { 3028 mqp = queue_lock(mp, MX); 3029 enqueue(mqp, ulwp, mp, MX); 3030 mp->mutex_waiters = 1; 3031 queue_unlock(mqp); 3032 queue_unlock(qp); 3033 } 3034 3035 return (error); 3036 } 3037 3038 /* 3039 * Utility function called from cond_broadcast() and rw_queue_release() 3040 * to (re)allocate a big buffer to hold the lwpids of all the threads 3041 * to be set running after they are removed from their sleep queues. 3042 * Since we are holding a queue lock, we cannot call any function 3043 * that might acquire a lock. mmap(), munmap() and lwp_unpark_all() 3044 * are simple system calls and are safe in this regard. 3045 */ 3046 lwpid_t * 3047 alloc_lwpids(lwpid_t *lwpid, int *nlwpid_ptr, int *maxlwps_ptr) 3048 { 3049 /* 3050 * Allocate NEWLWPS ids on the first overflow. 3051 * Double the allocation each time after that. 3052 */ 3053 int nlwpid = *nlwpid_ptr; 3054 int maxlwps = *maxlwps_ptr; 3055 int first_allocation; 3056 int newlwps; 3057 void *vaddr; 3058 3059 ASSERT(nlwpid == maxlwps); 3060 3061 first_allocation = (maxlwps == MAXLWPS); 3062 newlwps = first_allocation? NEWLWPS : 2 * maxlwps; 3063 vaddr = _private_mmap(NULL, newlwps * sizeof (lwpid_t), 3064 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0); 3065 3066 if (vaddr == MAP_FAILED) { 3067 /* 3068 * Let's hope this never happens. 3069 * If it does, then we have a terrible 3070 * thundering herd on our hands. 3071 */ 3072 (void) __lwp_unpark_all(lwpid, nlwpid); 3073 *nlwpid_ptr = 0; 3074 } else { 3075 (void) _memcpy(vaddr, lwpid, maxlwps * sizeof (lwpid_t)); 3076 if (!first_allocation) 3077 (void) _private_munmap(lwpid, 3078 maxlwps * sizeof (lwpid_t)); 3079 lwpid = vaddr; 3080 *maxlwps_ptr = newlwps; 3081 } 3082 3083 return (lwpid); 3084 } 3085 3086 #pragma weak pthread_cond_broadcast = cond_broadcast_internal 3087 #pragma weak _pthread_cond_broadcast = cond_broadcast_internal 3088 #pragma weak cond_broadcast = cond_broadcast_internal 3089 #pragma weak _cond_broadcast = cond_broadcast_internal 3090 int 3091 cond_broadcast_internal(cond_t *cvp) 3092 { 3093 ulwp_t *self = curthread; 3094 uberdata_t *udp = self->ul_uberdata; 3095 tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 3096 int error = 0; 3097 queue_head_t *qp; 3098 mutex_t *mp; 3099 mutex_t *mp_cache = NULL; 3100 queue_head_t *mqp = NULL; 3101 ulwp_t **ulwpp; 3102 ulwp_t *ulwp; 3103 ulwp_t *prev = NULL; 3104 int nlwpid = 0; 3105 int maxlwps = MAXLWPS; 3106 lwpid_t buffer[MAXLWPS]; 3107 lwpid_t *lwpid = buffer; 3108 3109 if (csp) 3110 tdb_incr(csp->cond_broadcast); 3111 3112 if (cvp->cond_waiters_kernel) /* someone sleeping in the kernel? */ 3113 error = __lwp_cond_broadcast(cvp); 3114 3115 if (!cvp->cond_waiters_user) /* no one sleeping at user-level */ 3116 return (error); 3117 3118 /* 3119 * Move everyone from the condvar sleep queue to the mutex sleep 3120 * queue for the mutex that they will acquire on being waked up. 3121 * We can do this only if we own the mutex they will acquire. 3122 * If we do not own the mutex, or if their ul_cv_wake flag 3123 * is set, just dequeue and unpark them. 3124 * 3125 * We keep track of lwpids that are to be unparked in lwpid[]. 3126 * __lwp_unpark_all() is called to unpark all of them after 3127 * they have been removed from the sleep queue and the sleep 3128 * queue lock has been dropped. If we run out of space in our 3129 * on-stack buffer, we need to allocate more but we can't call 3130 * lmalloc() because we are holding a queue lock when the overflow 3131 * occurs and lmalloc() acquires a lock. We can't use alloca() 3132 * either because the application may have allocated a small 3133 * stack and we don't want to overrun the stack. So we call 3134 * alloc_lwpids() to allocate a bigger buffer using the mmap() 3135 * system call directly since that path acquires no locks. 3136 */ 3137 qp = queue_lock(cvp, CV); 3138 cvp->cond_waiters_user = 0; 3139 ulwpp = &qp->qh_head; 3140 while ((ulwp = *ulwpp) != NULL) { 3141 if (ulwp->ul_wchan != cvp) { 3142 prev = ulwp; 3143 ulwpp = &ulwp->ul_link; 3144 continue; 3145 } 3146 *ulwpp = ulwp->ul_link; 3147 if (qp->qh_tail == ulwp) 3148 qp->qh_tail = prev; 3149 qp->qh_qlen--; 3150 ulwp->ul_link = NULL; 3151 mp = ulwp->ul_cvmutex; /* his mutex */ 3152 ulwp->ul_cvmutex = NULL; 3153 ASSERT(mp != NULL); 3154 if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) { 3155 ulwp->ul_sleepq = NULL; 3156 ulwp->ul_wchan = NULL; 3157 ulwp->ul_cv_wake = 0; 3158 if (nlwpid == maxlwps) 3159 lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps); 3160 lwpid[nlwpid++] = ulwp->ul_lwpid; 3161 } else { 3162 if (mp != mp_cache) { 3163 mp_cache = mp; 3164 if (mqp != NULL) 3165 queue_unlock(mqp); 3166 mqp = queue_lock(mp, MX); 3167 } 3168 enqueue(mqp, ulwp, mp, MX); 3169 mp->mutex_waiters = 1; 3170 } 3171 } 3172 if (mqp != NULL) 3173 queue_unlock(mqp); 3174 if (nlwpid == 0) { 3175 queue_unlock(qp); 3176 } else { 3177 no_preempt(self); 3178 queue_unlock(qp); 3179 if (nlwpid == 1) 3180 (void) __lwp_unpark(lwpid[0]); 3181 else 3182 (void) __lwp_unpark_all(lwpid, nlwpid); 3183 preempt(self); 3184 } 3185 if (lwpid != buffer) 3186 (void) _private_munmap(lwpid, maxlwps * sizeof (lwpid_t)); 3187 return (error); 3188 } 3189 3190 #pragma weak pthread_cond_destroy = _cond_destroy 3191 #pragma weak _pthread_cond_destroy = _cond_destroy 3192 #pragma weak cond_destroy = _cond_destroy 3193 int 3194 _cond_destroy(cond_t *cvp) 3195 { 3196 cvp->cond_magic = 0; 3197 tdb_sync_obj_deregister(cvp); 3198 return (0); 3199 } 3200 3201 #if defined(THREAD_DEBUG) 3202 void 3203 assert_no_libc_locks_held(void) 3204 { 3205 ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 3206 } 3207 #endif 3208 3209 /* protected by link_lock */ 3210 uint64_t spin_lock_spin; 3211 uint64_t spin_lock_spin2; 3212 uint64_t spin_lock_sleep; 3213 uint64_t spin_lock_wakeup; 3214 3215 /* 3216 * Record spin lock statistics. 3217 * Called by a thread exiting itself in thrp_exit(). 3218 * Also called via atexit() from the thread calling 3219 * exit() to do all the other threads as well. 3220 */ 3221 void 3222 record_spin_locks(ulwp_t *ulwp) 3223 { 3224 spin_lock_spin += ulwp->ul_spin_lock_spin; 3225 spin_lock_spin2 += ulwp->ul_spin_lock_spin2; 3226 spin_lock_sleep += ulwp->ul_spin_lock_sleep; 3227 spin_lock_wakeup += ulwp->ul_spin_lock_wakeup; 3228 ulwp->ul_spin_lock_spin = 0; 3229 ulwp->ul_spin_lock_spin2 = 0; 3230 ulwp->ul_spin_lock_sleep = 0; 3231 ulwp->ul_spin_lock_wakeup = 0; 3232 } 3233 3234 /* 3235 * atexit function: dump the queue statistics to stderr. 3236 */ 3237 #if !defined(__lint) 3238 #define fprintf _fprintf 3239 #endif 3240 #include <stdio.h> 3241 void 3242 dump_queue_statistics(void) 3243 { 3244 uberdata_t *udp = curthread->ul_uberdata; 3245 queue_head_t *qp; 3246 int qn; 3247 uint64_t spin_lock_total = 0; 3248 3249 if (udp->queue_head == NULL || thread_queue_dump == 0) 3250 return; 3251 3252 if (fprintf(stderr, "\n%5d mutex queues:\n", QHASHSIZE) < 0 || 3253 fprintf(stderr, "queue# lockcount max qlen\n") < 0) 3254 return; 3255 for (qn = 0, qp = udp->queue_head; qn < QHASHSIZE; qn++, qp++) { 3256 if (qp->qh_lockcount == 0) 3257 continue; 3258 spin_lock_total += qp->qh_lockcount; 3259 if (fprintf(stderr, "%5d %12llu%12u\n", qn, 3260 (u_longlong_t)qp->qh_lockcount, qp->qh_qmax) < 0) 3261 return; 3262 } 3263 3264 if (fprintf(stderr, "\n%5d condvar queues:\n", QHASHSIZE) < 0 || 3265 fprintf(stderr, "queue# lockcount max qlen\n") < 0) 3266 return; 3267 for (qn = 0; qn < QHASHSIZE; qn++, qp++) { 3268 if (qp->qh_lockcount == 0) 3269 continue; 3270 spin_lock_total += qp->qh_lockcount; 3271 if (fprintf(stderr, "%5d %12llu%12u\n", qn, 3272 (u_longlong_t)qp->qh_lockcount, qp->qh_qmax) < 0) 3273 return; 3274 } 3275 3276 (void) fprintf(stderr, "\n spin_lock_total = %10llu\n", 3277 (u_longlong_t)spin_lock_total); 3278 (void) fprintf(stderr, " spin_lock_spin = %10llu\n", 3279 (u_longlong_t)spin_lock_spin); 3280 (void) fprintf(stderr, " spin_lock_spin2 = %10llu\n", 3281 (u_longlong_t)spin_lock_spin2); 3282 (void) fprintf(stderr, " spin_lock_sleep = %10llu\n", 3283 (u_longlong_t)spin_lock_sleep); 3284 (void) fprintf(stderr, " spin_lock_wakeup = %10llu\n", 3285 (u_longlong_t)spin_lock_wakeup); 3286 } 3287