1 /*- 2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ 29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ 30 */ 31 32 /* 33 * Implementation of turnstiles used to hold queue of threads blocked on 34 * non-sleepable locks. Sleepable locks use condition variables to 35 * implement their queues. Turnstiles differ from a sleep queue in that 36 * turnstile queue's are assigned to a lock held by an owning thread. Thus, 37 * when one thread is enqueued onto a turnstile, it can lend its priority 38 * to the owning thread. 39 * 40 * We wish to avoid bloating locks with an embedded turnstile and we do not 41 * want to use back-pointers in the locks for the same reason. Thus, we 42 * use a similar approach to that of Solaris 7 as described in Solaris 43 * Internals by Jim Mauro and Richard McDougall. Turnstiles are looked up 44 * in a hash table based on the address of the lock. Each entry in the 45 * hash table is a linked-lists of turnstiles and is called a turnstile 46 * chain. Each chain contains a spin mutex that protects all of the 47 * turnstiles in the chain. 48 * 49 * Each time a thread is created, a turnstile is malloc'd and attached to 50 * that thread. When a thread blocks on a lock, if it is the first thread 51 * to block, it lends its turnstile to the lock. If the lock already has 52 * a turnstile, then it gives its turnstile to the lock's turnstile's free 53 * list. When a thread is woken up, it takes a turnstile from the free list 54 * if there are any other waiters. If it is the only thread blocked on the 55 * lock, then it reclaims the turnstile associated with the lock and removes 56 * it from the hash table. 57 */ 58 59 #include <sys/cdefs.h> 60 __FBSDID("$FreeBSD$"); 61 62 #include "opt_ddb.h" 63 #include "opt_turnstile_profiling.h" 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/kernel.h> 68 #include <sys/ktr.h> 69 #include <sys/lock.h> 70 #include <sys/malloc.h> 71 #include <sys/mutex.h> 72 #include <sys/proc.h> 73 #include <sys/queue.h> 74 #include <sys/sched.h> 75 #include <sys/sysctl.h> 76 #include <sys/turnstile.h> 77 78 #ifdef DDB 79 #include <ddb/ddb.h> 80 #endif 81 82 /* 83 * Constants for the hash table of turnstile chains. TC_SHIFT is a magic 84 * number chosen because the sleep queue's use the same value for the 85 * shift. Basically, we ignore the lower 8 bits of the address. 86 * TC_TABLESIZE must be a power of two for TC_MASK to work properly. 87 */ 88 #define TC_TABLESIZE 128 /* Must be power of 2. */ 89 #define TC_MASK (TC_TABLESIZE - 1) 90 #define TC_SHIFT 8 91 #define TC_HASH(lock) (((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK) 92 #define TC_LOOKUP(lock) &turnstile_chains[TC_HASH(lock)] 93 94 /* 95 * There are three different lists of turnstiles as follows. The list 96 * connected by ts_link entries is a per-thread list of all the turnstiles 97 * attached to locks that we own. This is used to fixup our priority when 98 * a lock is released. The other two lists use the ts_hash entries. The 99 * first of these two is the turnstile chain list that a turnstile is on 100 * when it is attached to a lock. The second list to use ts_hash is the 101 * free list hung off of a turnstile that is attached to a lock. 102 * 103 * Each turnstile contains three lists of threads. The two ts_blocked lists 104 * are linked list of threads blocked on the turnstile's lock. One list is 105 * for exclusive waiters, and the other is for shared waiters. The 106 * ts_pending list is a linked list of threads previously awakened by 107 * turnstile_signal() or turnstile_wait() that are waiting to be put on 108 * the run queue. 109 * 110 * Locking key: 111 * c - turnstile chain lock 112 * q - td_contested lock 113 */ 114 struct turnstile { 115 struct threadqueue ts_blocked[2]; /* (c + q) Blocked threads. */ 116 struct threadqueue ts_pending; /* (c) Pending threads. */ 117 LIST_ENTRY(turnstile) ts_hash; /* (c) Chain and free list. */ 118 LIST_ENTRY(turnstile) ts_link; /* (q) Contested locks. */ 119 LIST_HEAD(, turnstile) ts_free; /* (c) Free turnstiles. */ 120 struct lock_object *ts_lockobj; /* (c) Lock we reference. */ 121 struct thread *ts_owner; /* (c + q) Who owns the lock. */ 122 }; 123 124 struct turnstile_chain { 125 LIST_HEAD(, turnstile) tc_turnstiles; /* List of turnstiles. */ 126 struct mtx tc_lock; /* Spin lock for this chain. */ 127 #ifdef TURNSTILE_PROFILING 128 u_int tc_depth; /* Length of tc_queues. */ 129 u_int tc_max_depth; /* Max length of tc_queues. */ 130 #endif 131 }; 132 133 #ifdef TURNSTILE_PROFILING 134 u_int turnstile_max_depth; 135 SYSCTL_NODE(_debug, OID_AUTO, turnstile, CTLFLAG_RD, 0, "turnstile profiling"); 136 SYSCTL_NODE(_debug_turnstile, OID_AUTO, chains, CTLFLAG_RD, 0, 137 "turnstile chain stats"); 138 SYSCTL_UINT(_debug_turnstile, OID_AUTO, max_depth, CTLFLAG_RD, 139 &turnstile_max_depth, 0, "maxmimum depth achieved of a single chain"); 140 #endif 141 static struct mtx td_contested_lock; 142 static struct turnstile_chain turnstile_chains[TC_TABLESIZE]; 143 144 static MALLOC_DEFINE(M_TURNSTILE, "turnstiles", "turnstiles"); 145 146 /* 147 * Prototypes for non-exported routines. 148 */ 149 static void init_turnstile0(void *dummy); 150 #ifdef TURNSTILE_PROFILING 151 static void init_turnstile_profiling(void *arg); 152 #endif 153 static void propagate_priority(struct thread *td); 154 static int turnstile_adjust_thread(struct turnstile *ts, 155 struct thread *td); 156 static struct thread *turnstile_first_waiter(struct turnstile *ts); 157 static void turnstile_setowner(struct turnstile *ts, struct thread *owner); 158 159 /* 160 * Walks the chain of turnstiles and their owners to propagate the priority 161 * of the thread being blocked to all the threads holding locks that have to 162 * release their locks before this thread can run again. 163 */ 164 static void 165 propagate_priority(struct thread *td) 166 { 167 struct turnstile_chain *tc; 168 struct turnstile *ts; 169 int pri; 170 171 mtx_assert(&sched_lock, MA_OWNED); 172 pri = td->td_priority; 173 ts = td->td_blocked; 174 for (;;) { 175 td = ts->ts_owner; 176 177 if (td == NULL) { 178 /* 179 * This might be a read lock with no owner. There's 180 * not much we can do, so just bail. 181 */ 182 return; 183 } 184 185 MPASS(td->td_proc != NULL); 186 MPASS(td->td_proc->p_magic == P_MAGIC); 187 188 /* 189 * XXX: The owner of a turnstile can be stale if it is the 190 * first thread to grab a rlock of a rw lock. In that case 191 * it is possible for us to be at SSLEEP or some other 192 * weird state. We should probably just return if the state 193 * isn't SRUN or SLOCK. 194 */ 195 KASSERT(!TD_IS_SLEEPING(td), 196 ("sleeping thread (tid %d) owns a non-sleepable lock", 197 td->td_tid)); 198 199 /* 200 * If this thread already has higher priority than the 201 * thread that is being blocked, we are finished. 202 */ 203 if (td->td_priority <= pri) 204 return; 205 206 /* 207 * Bump this thread's priority. 208 */ 209 sched_lend_prio(td, pri); 210 211 /* 212 * If lock holder is actually running or on the run queue 213 * then we are done. 214 */ 215 if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td)) { 216 MPASS(td->td_blocked == NULL); 217 return; 218 } 219 220 #ifndef SMP 221 /* 222 * For UP, we check to see if td is curthread (this shouldn't 223 * ever happen however as it would mean we are in a deadlock.) 224 */ 225 KASSERT(td != curthread, ("Deadlock detected")); 226 #endif 227 228 /* 229 * If we aren't blocked on a lock, we should be. 230 */ 231 KASSERT(TD_ON_LOCK(td), ( 232 "thread %d(%s):%d holds %s but isn't blocked on a lock\n", 233 td->td_tid, td->td_proc->p_comm, td->td_state, 234 ts->ts_lockobj->lo_name)); 235 236 /* 237 * Pick up the lock that td is blocked on. 238 */ 239 ts = td->td_blocked; 240 MPASS(ts != NULL); 241 tc = TC_LOOKUP(ts->ts_lockobj); 242 mtx_lock_spin(&tc->tc_lock); 243 244 /* Resort td on the list if needed. */ 245 if (!turnstile_adjust_thread(ts, td)) { 246 mtx_unlock_spin(&tc->tc_lock); 247 return; 248 } 249 mtx_unlock_spin(&tc->tc_lock); 250 } 251 } 252 253 /* 254 * Adjust the thread's position on a turnstile after its priority has been 255 * changed. 256 */ 257 static int 258 turnstile_adjust_thread(struct turnstile *ts, struct thread *td) 259 { 260 struct turnstile_chain *tc; 261 struct thread *td1, *td2; 262 int queue; 263 264 mtx_assert(&sched_lock, MA_OWNED); 265 MPASS(TD_ON_LOCK(td)); 266 267 /* 268 * This thread may not be blocked on this turnstile anymore 269 * but instead might already be woken up on another CPU 270 * that is waiting on sched_lock in turnstile_unpend() to 271 * finish waking this thread up. We can detect this case 272 * by checking to see if this thread has been given a 273 * turnstile by either turnstile_signal() or 274 * turnstile_broadcast(). In this case, treat the thread as 275 * if it was already running. 276 */ 277 if (td->td_turnstile != NULL) 278 return (0); 279 280 /* 281 * Check if the thread needs to be moved on the blocked chain. 282 * It needs to be moved if either its priority is lower than 283 * the previous thread or higher than the next thread. 284 */ 285 tc = TC_LOOKUP(ts->ts_lockobj); 286 mtx_assert(&tc->tc_lock, MA_OWNED); 287 td1 = TAILQ_PREV(td, threadqueue, td_lockq); 288 td2 = TAILQ_NEXT(td, td_lockq); 289 if ((td1 != NULL && td->td_priority < td1->td_priority) || 290 (td2 != NULL && td->td_priority > td2->td_priority)) { 291 292 /* 293 * Remove thread from blocked chain and determine where 294 * it should be moved to. 295 */ 296 queue = td->td_tsqueue; 297 MPASS(queue == TS_EXCLUSIVE_QUEUE || queue == TS_SHARED_QUEUE); 298 mtx_lock_spin(&td_contested_lock); 299 TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq); 300 TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq) { 301 MPASS(td1->td_proc->p_magic == P_MAGIC); 302 if (td1->td_priority > td->td_priority) 303 break; 304 } 305 306 if (td1 == NULL) 307 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq); 308 else 309 TAILQ_INSERT_BEFORE(td1, td, td_lockq); 310 mtx_unlock_spin(&td_contested_lock); 311 if (td1 == NULL) 312 CTR3(KTR_LOCK, 313 "turnstile_adjust_thread: td %d put at tail on [%p] %s", 314 td->td_tid, ts->ts_lockobj, ts->ts_lockobj->lo_name); 315 else 316 CTR4(KTR_LOCK, 317 "turnstile_adjust_thread: td %d moved before %d on [%p] %s", 318 td->td_tid, td1->td_tid, ts->ts_lockobj, 319 ts->ts_lockobj->lo_name); 320 } 321 return (1); 322 } 323 324 /* 325 * Early initialization of turnstiles. This is not done via a SYSINIT() 326 * since this needs to be initialized very early when mutexes are first 327 * initialized. 328 */ 329 void 330 init_turnstiles(void) 331 { 332 int i; 333 334 for (i = 0; i < TC_TABLESIZE; i++) { 335 LIST_INIT(&turnstile_chains[i].tc_turnstiles); 336 mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain", 337 NULL, MTX_SPIN); 338 } 339 mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN); 340 LIST_INIT(&thread0.td_contested); 341 thread0.td_turnstile = NULL; 342 } 343 344 #ifdef TURNSTILE_PROFILING 345 static void 346 init_turnstile_profiling(void *arg) 347 { 348 struct sysctl_oid *chain_oid; 349 char chain_name[10]; 350 int i; 351 352 for (i = 0; i < TC_TABLESIZE; i++) { 353 snprintf(chain_name, sizeof(chain_name), "%d", i); 354 chain_oid = SYSCTL_ADD_NODE(NULL, 355 SYSCTL_STATIC_CHILDREN(_debug_turnstile_chains), OID_AUTO, 356 chain_name, CTLFLAG_RD, NULL, "turnstile chain stats"); 357 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 358 "depth", CTLFLAG_RD, &turnstile_chains[i].tc_depth, 0, 359 NULL); 360 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO, 361 "max_depth", CTLFLAG_RD, &turnstile_chains[i].tc_max_depth, 362 0, NULL); 363 } 364 } 365 SYSINIT(turnstile_profiling, SI_SUB_LOCK, SI_ORDER_ANY, 366 init_turnstile_profiling, NULL); 367 #endif 368 369 static void 370 init_turnstile0(void *dummy) 371 { 372 373 thread0.td_turnstile = turnstile_alloc(); 374 } 375 SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL); 376 377 /* 378 * Update a thread on the turnstile list after it's priority has been changed. 379 * The old priority is passed in as an argument. 380 */ 381 void 382 turnstile_adjust(struct thread *td, u_char oldpri) 383 { 384 struct turnstile_chain *tc; 385 struct turnstile *ts; 386 387 mtx_assert(&sched_lock, MA_OWNED); 388 MPASS(TD_ON_LOCK(td)); 389 390 /* 391 * Pick up the lock that td is blocked on. 392 */ 393 ts = td->td_blocked; 394 MPASS(ts != NULL); 395 tc = TC_LOOKUP(ts->ts_lockobj); 396 mtx_lock_spin(&tc->tc_lock); 397 398 /* Resort the turnstile on the list. */ 399 if (!turnstile_adjust_thread(ts, td)) { 400 mtx_unlock_spin(&tc->tc_lock); 401 return; 402 } 403 404 /* 405 * If our priority was lowered and we are at the head of the 406 * turnstile, then propagate our new priority up the chain. 407 * Note that we currently don't try to revoke lent priorities 408 * when our priority goes up. 409 */ 410 MPASS(td->td_tsqueue == TS_EXCLUSIVE_QUEUE || 411 td->td_tsqueue == TS_SHARED_QUEUE); 412 if (td == TAILQ_FIRST(&ts->ts_blocked[td->td_tsqueue]) && 413 td->td_priority < oldpri) { 414 mtx_unlock_spin(&tc->tc_lock); 415 propagate_priority(td); 416 } else 417 mtx_unlock_spin(&tc->tc_lock); 418 } 419 420 /* 421 * Set the owner of the lock this turnstile is attached to. 422 */ 423 static void 424 turnstile_setowner(struct turnstile *ts, struct thread *owner) 425 { 426 427 mtx_assert(&td_contested_lock, MA_OWNED); 428 MPASS(ts->ts_owner == NULL); 429 430 /* A shared lock might not have an owner. */ 431 if (owner == NULL) 432 return; 433 434 MPASS(owner->td_proc->p_magic == P_MAGIC); 435 ts->ts_owner = owner; 436 LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link); 437 } 438 439 /* 440 * Malloc a turnstile for a new thread, initialize it and return it. 441 */ 442 struct turnstile * 443 turnstile_alloc(void) 444 { 445 struct turnstile *ts; 446 447 ts = malloc(sizeof(struct turnstile), M_TURNSTILE, M_WAITOK | M_ZERO); 448 TAILQ_INIT(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]); 449 TAILQ_INIT(&ts->ts_blocked[TS_SHARED_QUEUE]); 450 TAILQ_INIT(&ts->ts_pending); 451 LIST_INIT(&ts->ts_free); 452 return (ts); 453 } 454 455 /* 456 * Free a turnstile when a thread is destroyed. 457 */ 458 void 459 turnstile_free(struct turnstile *ts) 460 { 461 462 MPASS(ts != NULL); 463 MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE])); 464 MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE])); 465 MPASS(TAILQ_EMPTY(&ts->ts_pending)); 466 free(ts, M_TURNSTILE); 467 } 468 469 /* 470 * Lock the turnstile chain associated with the specified lock. 471 */ 472 void 473 turnstile_lock(struct lock_object *lock) 474 { 475 struct turnstile_chain *tc; 476 477 tc = TC_LOOKUP(lock); 478 mtx_lock_spin(&tc->tc_lock); 479 } 480 481 /* 482 * Look up the turnstile for a lock in the hash table locking the associated 483 * turnstile chain along the way. If no turnstile is found in the hash 484 * table, NULL is returned. 485 */ 486 struct turnstile * 487 turnstile_lookup(struct lock_object *lock) 488 { 489 struct turnstile_chain *tc; 490 struct turnstile *ts; 491 492 tc = TC_LOOKUP(lock); 493 mtx_assert(&tc->tc_lock, MA_OWNED); 494 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash) 495 if (ts->ts_lockobj == lock) 496 return (ts); 497 return (NULL); 498 } 499 500 /* 501 * Unlock the turnstile chain associated with a given lock. 502 */ 503 void 504 turnstile_release(struct lock_object *lock) 505 { 506 struct turnstile_chain *tc; 507 508 tc = TC_LOOKUP(lock); 509 mtx_unlock_spin(&tc->tc_lock); 510 } 511 512 /* 513 * Return a pointer to the thread waiting on this turnstile with the 514 * most important priority or NULL if the turnstile has no waiters. 515 */ 516 static struct thread * 517 turnstile_first_waiter(struct turnstile *ts) 518 { 519 struct thread *std, *xtd; 520 521 std = TAILQ_FIRST(&ts->ts_blocked[TS_SHARED_QUEUE]); 522 xtd = TAILQ_FIRST(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]); 523 if (xtd == NULL || (std != NULL && std->td_priority < xtd->td_priority)) 524 return (std); 525 return (xtd); 526 } 527 528 /* 529 * Take ownership of a turnstile and adjust the priority of the new 530 * owner appropriately. 531 */ 532 void 533 turnstile_claim(struct lock_object *lock) 534 { 535 struct turnstile_chain *tc; 536 struct turnstile *ts; 537 struct thread *td, *owner; 538 539 tc = TC_LOOKUP(lock); 540 mtx_assert(&tc->tc_lock, MA_OWNED); 541 ts = turnstile_lookup(lock); 542 MPASS(ts != NULL); 543 544 owner = curthread; 545 mtx_lock_spin(&td_contested_lock); 546 turnstile_setowner(ts, owner); 547 mtx_unlock_spin(&td_contested_lock); 548 549 td = turnstile_first_waiter(ts); 550 MPASS(td != NULL); 551 MPASS(td->td_proc->p_magic == P_MAGIC); 552 mtx_unlock_spin(&tc->tc_lock); 553 554 /* 555 * Update the priority of the new owner if needed. 556 */ 557 mtx_lock_spin(&sched_lock); 558 if (td->td_priority < owner->td_priority) 559 sched_lend_prio(owner, td->td_priority); 560 mtx_unlock_spin(&sched_lock); 561 } 562 563 /* 564 * Block the current thread on the turnstile assicated with 'lock'. This 565 * function will context switch and not return until this thread has been 566 * woken back up. This function must be called with the appropriate 567 * turnstile chain locked and will return with it unlocked. 568 */ 569 void 570 turnstile_wait(struct lock_object *lock, struct thread *owner, int queue) 571 { 572 struct turnstile_chain *tc; 573 struct turnstile *ts; 574 struct thread *td, *td1; 575 576 td = curthread; 577 tc = TC_LOOKUP(lock); 578 mtx_assert(&tc->tc_lock, MA_OWNED); 579 MPASS(td->td_turnstile != NULL); 580 if (queue == TS_SHARED_QUEUE) 581 MPASS(owner != NULL); 582 if (owner) 583 MPASS(owner->td_proc->p_magic == P_MAGIC); 584 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE); 585 586 /* Look up the turnstile associated with the lock 'lock'. */ 587 ts = turnstile_lookup(lock); 588 589 /* 590 * If the lock does not already have a turnstile, use this thread's 591 * turnstile. Otherwise insert the current thread into the 592 * turnstile already in use by this lock. 593 */ 594 if (ts == NULL) { 595 #ifdef TURNSTILE_PROFILING 596 tc->tc_depth++; 597 if (tc->tc_depth > tc->tc_max_depth) { 598 tc->tc_max_depth = tc->tc_depth; 599 if (tc->tc_max_depth > turnstile_max_depth) 600 turnstile_max_depth = tc->tc_max_depth; 601 } 602 #endif 603 ts = td->td_turnstile; 604 LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash); 605 KASSERT(TAILQ_EMPTY(&ts->ts_pending), 606 ("thread's turnstile has pending threads")); 607 KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]), 608 ("thread's turnstile has exclusive waiters")); 609 KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]), 610 ("thread's turnstile has shared waiters")); 611 KASSERT(LIST_EMPTY(&ts->ts_free), 612 ("thread's turnstile has a non-empty free list")); 613 KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer")); 614 ts->ts_lockobj = lock; 615 mtx_lock_spin(&td_contested_lock); 616 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq); 617 turnstile_setowner(ts, owner); 618 mtx_unlock_spin(&td_contested_lock); 619 } else { 620 TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq) 621 if (td1->td_priority > td->td_priority) 622 break; 623 mtx_lock_spin(&td_contested_lock); 624 if (td1 != NULL) 625 TAILQ_INSERT_BEFORE(td1, td, td_lockq); 626 else 627 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq); 628 MPASS(owner == ts->ts_owner); 629 mtx_unlock_spin(&td_contested_lock); 630 MPASS(td->td_turnstile != NULL); 631 LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash); 632 } 633 td->td_turnstile = NULL; 634 mtx_unlock_spin(&tc->tc_lock); 635 636 mtx_lock_spin(&sched_lock); 637 /* 638 * Handle race condition where a thread on another CPU that owns 639 * lock 'lock' could have woken us in between us dropping the 640 * turnstile chain lock and acquiring the sched_lock. 641 */ 642 if (td->td_flags & TDF_TSNOBLOCK) { 643 td->td_flags &= ~TDF_TSNOBLOCK; 644 mtx_unlock_spin(&sched_lock); 645 return; 646 } 647 648 #ifdef notyet 649 /* 650 * If we're borrowing an interrupted thread's VM context, we 651 * must clean up before going to sleep. 652 */ 653 if (td->td_ithd != NULL) { 654 struct ithd *it = td->td_ithd; 655 656 if (it->it_interrupted) { 657 if (LOCK_LOG_TEST(lock, 0)) 658 CTR3(KTR_LOCK, "%s: %p interrupted %p", 659 __func__, it, it->it_interrupted); 660 intr_thd_fixup(it); 661 } 662 } 663 #endif 664 665 /* Save who we are blocked on and switch. */ 666 td->td_tsqueue = queue; 667 td->td_blocked = ts; 668 td->td_lockname = lock->lo_name; 669 TD_SET_LOCK(td); 670 propagate_priority(td); 671 672 if (LOCK_LOG_TEST(lock, 0)) 673 CTR4(KTR_LOCK, "%s: td %d blocked on [%p] %s", __func__, 674 td->td_tid, lock, lock->lo_name); 675 676 mi_switch(SW_VOL, NULL); 677 678 if (LOCK_LOG_TEST(lock, 0)) 679 CTR4(KTR_LOCK, "%s: td %d free from blocked on [%p] %s", 680 __func__, td->td_tid, lock, lock->lo_name); 681 682 mtx_unlock_spin(&sched_lock); 683 } 684 685 /* 686 * Pick the highest priority thread on this turnstile and put it on the 687 * pending list. This must be called with the turnstile chain locked. 688 */ 689 int 690 turnstile_signal(struct turnstile *ts, int queue) 691 { 692 struct turnstile_chain *tc; 693 struct thread *td; 694 int empty; 695 696 MPASS(ts != NULL); 697 MPASS(curthread->td_proc->p_magic == P_MAGIC); 698 MPASS(ts->ts_owner == curthread || 699 (queue == TS_EXCLUSIVE_QUEUE && ts->ts_owner == NULL)); 700 tc = TC_LOOKUP(ts->ts_lockobj); 701 mtx_assert(&tc->tc_lock, MA_OWNED); 702 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE); 703 704 /* 705 * Pick the highest priority thread blocked on this lock and 706 * move it to the pending list. 707 */ 708 td = TAILQ_FIRST(&ts->ts_blocked[queue]); 709 MPASS(td->td_proc->p_magic == P_MAGIC); 710 mtx_lock_spin(&td_contested_lock); 711 TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq); 712 mtx_unlock_spin(&td_contested_lock); 713 TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq); 714 715 /* 716 * If the turnstile is now empty, remove it from its chain and 717 * give it to the about-to-be-woken thread. Otherwise take a 718 * turnstile from the free list and give it to the thread. 719 */ 720 empty = TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) && 721 TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]); 722 if (empty) { 723 MPASS(LIST_EMPTY(&ts->ts_free)); 724 #ifdef TURNSTILE_PROFILING 725 tc->tc_depth--; 726 #endif 727 } else 728 ts = LIST_FIRST(&ts->ts_free); 729 MPASS(ts != NULL); 730 LIST_REMOVE(ts, ts_hash); 731 td->td_turnstile = ts; 732 733 return (empty); 734 } 735 736 /* 737 * Put all blocked threads on the pending list. This must be called with 738 * the turnstile chain locked. 739 */ 740 void 741 turnstile_broadcast(struct turnstile *ts, int queue) 742 { 743 struct turnstile_chain *tc; 744 struct turnstile *ts1; 745 struct thread *td; 746 747 MPASS(ts != NULL); 748 MPASS(curthread->td_proc->p_magic == P_MAGIC); 749 MPASS(ts->ts_owner == curthread || 750 (queue == TS_EXCLUSIVE_QUEUE && ts->ts_owner == NULL)); 751 tc = TC_LOOKUP(ts->ts_lockobj); 752 mtx_assert(&tc->tc_lock, MA_OWNED); 753 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE); 754 755 /* 756 * Transfer the blocked list to the pending list. 757 */ 758 mtx_lock_spin(&td_contested_lock); 759 TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked[queue], td_lockq); 760 mtx_unlock_spin(&td_contested_lock); 761 762 /* 763 * Give a turnstile to each thread. The last thread gets 764 * this turnstile if the turnstile is empty. 765 */ 766 TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) { 767 if (LIST_EMPTY(&ts->ts_free)) { 768 MPASS(TAILQ_NEXT(td, td_lockq) == NULL); 769 ts1 = ts; 770 #ifdef TURNSTILE_PROFILING 771 tc->tc_depth--; 772 #endif 773 } else 774 ts1 = LIST_FIRST(&ts->ts_free); 775 MPASS(ts1 != NULL); 776 LIST_REMOVE(ts1, ts_hash); 777 td->td_turnstile = ts1; 778 } 779 } 780 781 /* 782 * Wakeup all threads on the pending list and adjust the priority of the 783 * current thread appropriately. This must be called with the turnstile 784 * chain locked. 785 */ 786 void 787 turnstile_unpend(struct turnstile *ts, int owner_type) 788 { 789 TAILQ_HEAD( ,thread) pending_threads; 790 struct turnstile_chain *tc; 791 struct thread *td; 792 u_char cp, pri; 793 794 MPASS(ts != NULL); 795 MPASS(ts->ts_owner == curthread || 796 (owner_type == TS_SHARED_LOCK && ts->ts_owner == NULL)); 797 tc = TC_LOOKUP(ts->ts_lockobj); 798 mtx_assert(&tc->tc_lock, MA_OWNED); 799 MPASS(!TAILQ_EMPTY(&ts->ts_pending)); 800 801 /* 802 * Move the list of pending threads out of the turnstile and 803 * into a local variable. 804 */ 805 TAILQ_INIT(&pending_threads); 806 TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq); 807 #ifdef INVARIANTS 808 if (TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) && 809 TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE])) 810 ts->ts_lockobj = NULL; 811 #endif 812 813 /* 814 * Remove the turnstile from this thread's list of contested locks 815 * since this thread doesn't own it anymore. New threads will 816 * not be blocking on the turnstile until it is claimed by a new 817 * owner. There might not be a current owner if this is a shared 818 * lock. 819 */ 820 if (ts->ts_owner != NULL) { 821 mtx_lock_spin(&td_contested_lock); 822 ts->ts_owner = NULL; 823 LIST_REMOVE(ts, ts_link); 824 mtx_unlock_spin(&td_contested_lock); 825 } 826 critical_enter(); 827 mtx_unlock_spin(&tc->tc_lock); 828 829 /* 830 * Adjust the priority of curthread based on other contested 831 * locks it owns. Don't lower the priority below the base 832 * priority however. 833 */ 834 td = curthread; 835 pri = PRI_MAX; 836 mtx_lock_spin(&sched_lock); 837 mtx_lock_spin(&td_contested_lock); 838 LIST_FOREACH(ts, &td->td_contested, ts_link) { 839 cp = turnstile_first_waiter(ts)->td_priority; 840 if (cp < pri) 841 pri = cp; 842 } 843 mtx_unlock_spin(&td_contested_lock); 844 sched_unlend_prio(td, pri); 845 846 /* 847 * Wake up all the pending threads. If a thread is not blocked 848 * on a lock, then it is currently executing on another CPU in 849 * turnstile_wait() or sitting on a run queue waiting to resume 850 * in turnstile_wait(). Set a flag to force it to try to acquire 851 * the lock again instead of blocking. 852 */ 853 while (!TAILQ_EMPTY(&pending_threads)) { 854 td = TAILQ_FIRST(&pending_threads); 855 TAILQ_REMOVE(&pending_threads, td, td_lockq); 856 MPASS(td->td_proc->p_magic == P_MAGIC); 857 if (TD_ON_LOCK(td)) { 858 td->td_blocked = NULL; 859 td->td_lockname = NULL; 860 #ifdef INVARIANTS 861 td->td_tsqueue = 0xff; 862 #endif 863 TD_CLR_LOCK(td); 864 MPASS(TD_CAN_RUN(td)); 865 setrunqueue(td, SRQ_BORING); 866 } else { 867 td->td_flags |= TDF_TSNOBLOCK; 868 MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td)); 869 } 870 } 871 critical_exit(); 872 mtx_unlock_spin(&sched_lock); 873 } 874 875 /* 876 * Return the first thread in a turnstile. 877 */ 878 struct thread * 879 turnstile_head(struct turnstile *ts, int queue) 880 { 881 #ifdef INVARIANTS 882 struct turnstile_chain *tc; 883 884 MPASS(ts != NULL); 885 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE); 886 tc = TC_LOOKUP(ts->ts_lockobj); 887 mtx_assert(&tc->tc_lock, MA_OWNED); 888 #endif 889 return (TAILQ_FIRST(&ts->ts_blocked[queue])); 890 } 891 892 #ifdef DDB 893 static void 894 print_thread(struct thread *td, const char *prefix) 895 { 896 897 db_printf("%s%p (tid %d, pid %d, \"%s\")\n", prefix, td, td->td_tid, 898 td->td_proc->p_pid, td->td_proc->p_comm); 899 } 900 901 static void 902 print_queue(struct threadqueue *queue, const char *header, const char *prefix) 903 { 904 struct thread *td; 905 906 db_printf("%s:\n", header); 907 if (TAILQ_EMPTY(queue)) { 908 db_printf("%sempty\n", prefix); 909 return; 910 } 911 TAILQ_FOREACH(td, queue, td_lockq) { 912 print_thread(td, prefix); 913 } 914 } 915 916 DB_SHOW_COMMAND(turnstile, db_show_turnstile) 917 { 918 struct turnstile_chain *tc; 919 struct turnstile *ts; 920 struct lock_object *lock; 921 int i; 922 923 if (!have_addr) 924 return; 925 926 /* 927 * First, see if there is an active turnstile for the lock indicated 928 * by the address. 929 */ 930 lock = (struct lock_object *)addr; 931 tc = TC_LOOKUP(lock); 932 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash) 933 if (ts->ts_lockobj == lock) 934 goto found; 935 936 /* 937 * Second, see if there is an active turnstile at the address 938 * indicated. 939 */ 940 for (i = 0; i < TC_TABLESIZE; i++) 941 LIST_FOREACH(ts, &turnstile_chains[i].tc_turnstiles, ts_hash) { 942 if (ts == (struct turnstile *)addr) 943 goto found; 944 } 945 946 db_printf("Unable to locate a turnstile via %p\n", (void *)addr); 947 return; 948 found: 949 lock = ts->ts_lockobj; 950 db_printf("Lock: %p - (%s) %s\n", lock, LOCK_CLASS(lock)->lc_name, 951 lock->lo_name); 952 if (ts->ts_owner) 953 print_thread(ts->ts_owner, "Lock Owner: "); 954 else 955 db_printf("Lock Owner: none\n"); 956 print_queue(&ts->ts_blocked[TS_SHARED_QUEUE], "Shared Waiters", "\t"); 957 print_queue(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE], "Exclusive Waiters", 958 "\t"); 959 print_queue(&ts->ts_pending, "Pending Threads", "\t"); 960 961 } 962 #endif 963