1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Copyright 2019 Joyent, Inc. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/thread.h> 32 #include <sys/cmn_err.h> 33 #include <sys/debug.h> 34 #include <sys/cpuvar.h> 35 #include <sys/sobject.h> 36 #include <sys/turnstile.h> 37 #include <sys/rwlock.h> 38 #include <sys/rwlock_impl.h> 39 #include <sys/atomic.h> 40 #include <sys/lockstat.h> 41 42 /* 43 * Big Theory Statement for readers/writer locking primitives. 44 * 45 * An rwlock provides exclusive access to a single thread ("writer") or 46 * concurrent access to multiple threads ("readers"). See rwlock(9F) 47 * for a full description of the interfaces and programming model. 48 * The rest of this comment describes the implementation. 49 * 50 * An rwlock is a single word with the following structure: 51 * 52 * --------------------------------------------------------------------- 53 * | OWNER (writer) or HOLD COUNT (readers) | WRLOCK | WRWANT | WAIT | 54 * --------------------------------------------------------------------- 55 * 63 / 31 .. 3 2 1 0 56 * 57 * The waiters bit (0) indicates whether any threads are blocked waiting 58 * for the lock. The write-wanted bit (1) indicates whether any threads 59 * are blocked waiting for write access. The write-locked bit (2) indicates 60 * whether the lock is held by a writer, which determines whether the upper 61 * bits (3..31 in ILP32, 3..63 in LP64) should be interpreted as the owner 62 * (thread pointer) or the hold count (number of readers). 63 * 64 * In the absence of any contention, a writer gets the lock by setting 65 * this word to (curthread | RW_WRITE_LOCKED); a reader gets the lock 66 * by incrementing the hold count (i.e. adding 8, aka RW_READ_LOCK). 67 * 68 * A writer will fail to acquire the lock if any other thread owns it. 69 * A reader will fail if the lock is either owned (in the RW_READER and 70 * RW_READER_STARVEWRITER cases) or wanted by a writer (in the RW_READER 71 * case). rw_tryenter() returns 0 in these cases; rw_enter() blocks until 72 * the lock becomes available. 73 * 74 * When a thread blocks it acquires the rwlock's hashed turnstile lock and 75 * attempts to set RW_HAS_WAITERS (and RW_WRITE_WANTED in the writer case) 76 * atomically *only if the lock still appears busy*. A thread must never 77 * accidentally block for an available lock since there would be no owner 78 * to awaken it. casip() provides the required atomicity. Once casip() 79 * succeeds, the decision to block becomes final and irreversible. The 80 * thread will not become runnable again until it has been granted ownership 81 * of the lock via direct handoff from a former owner as described below. 82 * 83 * In the absence of any waiters, rw_exit() just clears the lock (if it 84 * is write-locked) or decrements the hold count (if it is read-locked). 85 * Note that even if waiters are present, decrementing the hold count 86 * to a non-zero value requires no special action since the lock is still 87 * held by at least one other thread. 88 * 89 * On the "final exit" (transition to unheld state) of a lock with waiters, 90 * rw_exit_wakeup() grabs the turnstile lock and transfers ownership directly 91 * to the next writer or set of readers. There are several advantages to this 92 * approach: (1) it closes all windows for priority inversion (when a new 93 * writer has grabbed the lock but has not yet inherited from blocked readers); 94 * (2) it prevents starvation of equal-priority threads by granting the lock 95 * in FIFO order; (3) it eliminates the need for a write-wanted count -- a 96 * single bit suffices because the lock remains held until all waiting 97 * writers are gone; (4) when we awaken N readers we can perform a single 98 * "atomic_add(&x, N)" to set the total hold count rather than having all N 99 * threads fight for the cache to perform an "atomic_add(&x, 1)" upon wakeup. 100 * 101 * The most interesting policy decision in rw_exit_wakeup() is which thread 102 * to wake. Starvation is always possible with priority-based scheduling, 103 * but any sane wakeup policy should at least satisfy these requirements: 104 * 105 * (1) The highest-priority thread in the system should not starve. 106 * (2) The highest-priority writer should not starve. 107 * (3) No writer should starve due to lower-priority threads. 108 * (4) No reader should starve due to lower-priority writers. 109 * (5) If all threads have equal priority, none of them should starve. 110 * 111 * We used to employ a writers-always-win policy, which doesn't even 112 * satisfy (1): a steady stream of low-priority writers can starve out 113 * a real-time reader! This is clearly a broken policy -- it violates 114 * (1), (4), and (5) -- but it's how rwlocks always used to behave. 115 * 116 * A round-robin policy (exiting readers grant the lock to blocked writers 117 * and vice versa) satisfies all but (3): a single high-priority writer 118 * and many low-priority readers can starve out medium-priority writers. 119 * 120 * A strict priority policy (grant the lock to the highest priority blocked 121 * thread) satisfies everything but (2): a steady stream of high-priority 122 * readers can permanently starve the highest-priority writer. 123 * 124 * The reason we care about (2) is that it's important to process writers 125 * reasonably quickly -- even if they're low priority -- because their very 126 * presence causes all readers to take the slow (blocking) path through this 127 * code. There is also a general sense that writers deserve some degree of 128 * deference because they're updating the data upon which all readers act. 129 * Presumably this data should not be allowed to become arbitrarily stale 130 * due to writer starvation. Finally, it seems reasonable to level the 131 * playing field a bit to compensate for the fact that it's so much harder 132 * for a writer to get in when there are already many readers present. 133 * 134 * A hybrid of round-robin and strict priority can be made to satisfy 135 * all five criteria. In this "writer priority policy" exiting readers 136 * always grant the lock to waiting writers, but exiting writers only 137 * grant the lock to readers of the same or higher priority than the 138 * highest-priority blocked writer. Thus requirement (2) is satisfied, 139 * necessarily, by a willful act of priority inversion: an exiting reader 140 * will grant the lock to a blocked writer even if there are blocked 141 * readers of higher priority. The situation is mitigated by the fact 142 * that writers always inherit priority from blocked readers, and the 143 * writer will awaken those readers as soon as it exits the lock. 144 * 145 * Finally, note that this hybrid scheme -- and indeed, any scheme that 146 * satisfies requirement (2) -- has an important consequence: if a lock is 147 * held as reader and a writer subsequently becomes blocked, any further 148 * readers must be blocked to avoid writer starvation. This implementation 149 * detail has ramifications for the semantics of rwlocks, as it prohibits 150 * recursively acquiring an rwlock as reader: any writer that wishes to 151 * acquire the lock after the first but before the second acquisition as 152 * reader will block the second acquisition -- resulting in deadlock. This 153 * itself is not necessarily prohibitive, as it is often straightforward to 154 * prevent a single thread from recursively acquiring an rwlock as reader. 155 * However, a more subtle situation arises when both a traditional mutex and 156 * a reader lock are acquired by two different threads in opposite order. 157 * (That is, one thread first acquires the mutex and then the rwlock as 158 * reader; the other acquires the rwlock as reader and then the mutex.) As 159 * with the single threaded case, this is fine absent a blocked writer: the 160 * thread that acquires the mutex before acquiring the rwlock as reader will 161 * be able to successfully acquire the rwlock -- even as/if the other thread 162 * has the rwlock as reader and is blocked on the held mutex. However, if 163 * an unrelated writer (that is, a third thread) becomes blocked on the 164 * rwlock after the first thread acquires the rwlock as reader but before 165 * it's able to acquire the mutex, the second thread -- with the mutex held 166 * -- will not be able to acquire the rwlock as reader due to the waiting 167 * writer, deadlocking the three threads. Unlike the single-threaded 168 * (recursive) rwlock acquisition case, this case can be quite a bit 169 * thornier to fix, especially as there is nothing inherently wrong in the 170 * locking strategy: the deadlock is really induced by requirement (2), not 171 * the consumers of the rwlock. To permit such consumers, we allow rwlock 172 * acquirers to explicitly opt out of requirement (2) by specifying 173 * RW_READER_STARVEWRITER when acquiring the rwlock. This (obviously) means 174 * that inifinite readers can starve writers, but it also allows for 175 * multiple readers in the presence of other synchronization primitives 176 * without regard for lock-ordering. And while certainly odd (and perhaps 177 * unwise), RW_READER_STARVEWRITER can be safely used alongside RW_READER on 178 * the same lock -- RW_READER_STARVEWRITER describes only the act of lock 179 * acquisition with respect to waiting writers, not the lock itself. 180 * 181 * rw_downgrade() follows the same wakeup policy as an exiting writer. 182 * 183 * rw_tryupgrade() has the same failure mode as rw_tryenter() for a 184 * write lock. Both honor the WRITE_WANTED bit by specification. 185 * 186 * The following rules apply to manipulation of rwlock internal state: 187 * 188 * (1) The rwlock is only modified via the atomic primitives casip() 189 * and atomic_add_ip(). 190 * 191 * (2) The waiters bit and write-wanted bit are only modified under 192 * turnstile_lookup(). This ensures that the turnstile is consistent 193 * with the rwlock. 194 * 195 * (3) Waiters receive the lock by direct handoff from the previous 196 * owner. Therefore, waiters *always* wake up holding the lock. 197 */ 198 199 /* 200 * The sobj_ops vector exports a set of functions needed when a thread 201 * is asleep on a synchronization object of a given type. 202 */ 203 static sobj_ops_t rw_sobj_ops = { 204 SOBJ_RWLOCK, rw_owner, turnstile_stay_asleep, turnstile_change_pri 205 }; 206 207 /* 208 * If the system panics on an rwlock, save the address of the offending 209 * rwlock in panic_rwlock_addr, and save the contents in panic_rwlock. 210 */ 211 static rwlock_impl_t panic_rwlock; 212 static rwlock_impl_t *panic_rwlock_addr; 213 214 static void 215 rw_panic(char *msg, rwlock_impl_t *lp) 216 { 217 if (panicstr) 218 return; 219 220 if (atomic_cas_ptr(&panic_rwlock_addr, NULL, lp) == NULL) 221 panic_rwlock = *lp; 222 223 panic("%s, lp=%p wwwh=%lx thread=%p", 224 msg, (void *)lp, panic_rwlock.rw_wwwh, (void *)curthread); 225 } 226 227 /* ARGSUSED */ 228 void 229 rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg) 230 { 231 ((rwlock_impl_t *)rwlp)->rw_wwwh = 0; 232 } 233 234 void 235 rw_destroy(krwlock_t *rwlp) 236 { 237 rwlock_impl_t *lp = (rwlock_impl_t *)rwlp; 238 239 if (lp->rw_wwwh != 0) { 240 if ((lp->rw_wwwh & RW_DOUBLE_LOCK) == RW_DOUBLE_LOCK) 241 rw_panic("rw_destroy: lock already destroyed", lp); 242 else 243 rw_panic("rw_destroy: lock still active", lp); 244 } 245 246 lp->rw_wwwh = RW_DOUBLE_LOCK; 247 } 248 249 /* 250 * Verify that an rwlock is held correctly. 251 */ 252 static int 253 rw_locked(rwlock_impl_t *lp, krw_t rw) 254 { 255 uintptr_t old = lp->rw_wwwh; 256 257 if (rw == RW_READER || rw == RW_READER_STARVEWRITER) 258 return ((old & RW_LOCKED) && !(old & RW_WRITE_LOCKED)); 259 260 if (rw == RW_WRITER) 261 return ((old & RW_OWNER) == (uintptr_t)curthread); 262 263 return (0); 264 } 265 266 uint_t (*rw_lock_backoff)(uint_t) = NULL; 267 void (*rw_lock_delay)(uint_t) = NULL; 268 269 /* 270 * Full-service implementation of rw_enter() to handle all the hard cases. 271 * Called from the assembly version if anything complicated is going on. 272 */ 273 void 274 rw_enter_sleep(rwlock_impl_t *lp, krw_t rw) 275 { 276 uintptr_t old, new, lock_value, lock_busy, lock_wait; 277 hrtime_t sleep_time; 278 turnstile_t *ts; 279 uint_t backoff = 0; 280 int loop_count = 0; 281 282 if (rw == RW_READER) { 283 lock_value = RW_READ_LOCK; 284 lock_busy = RW_WRITE_CLAIMED; 285 lock_wait = RW_HAS_WAITERS; 286 } else if (rw == RW_READER_STARVEWRITER) { 287 lock_value = RW_READ_LOCK; 288 lock_busy = RW_WRITE_LOCKED; 289 lock_wait = RW_HAS_WAITERS; 290 } else { 291 lock_value = RW_WRITE_LOCK(curthread); 292 lock_busy = (uintptr_t)RW_LOCKED; 293 lock_wait = RW_HAS_WAITERS | RW_WRITE_WANTED; 294 } 295 296 for (;;) { 297 if (((old = lp->rw_wwwh) & lock_busy) == 0) { 298 if (casip(&lp->rw_wwwh, old, old + lock_value) != old) { 299 if (rw_lock_delay != NULL) { 300 backoff = rw_lock_backoff(backoff); 301 rw_lock_delay(backoff); 302 if (++loop_count == ncpus_online) { 303 backoff = 0; 304 loop_count = 0; 305 } 306 } 307 continue; 308 } 309 break; 310 } 311 312 if (panicstr) 313 return; 314 315 if ((old & RW_DOUBLE_LOCK) == RW_DOUBLE_LOCK) { 316 rw_panic("rw_enter: bad rwlock", lp); 317 return; 318 } 319 320 if ((old & RW_OWNER) == (uintptr_t)curthread) { 321 rw_panic("recursive rw_enter", lp); 322 return; 323 } 324 325 ts = turnstile_lookup(lp); 326 327 do { 328 if (((old = lp->rw_wwwh) & lock_busy) == 0) 329 break; 330 new = old | lock_wait; 331 } while (old != new && casip(&lp->rw_wwwh, old, new) != old); 332 333 if ((old & lock_busy) == 0) { 334 /* 335 * The lock appears free now; try the dance again 336 */ 337 turnstile_exit(lp); 338 continue; 339 } 340 341 /* 342 * We really are going to block, so bump the stats. 343 */ 344 ASSERT(lp->rw_wwwh & lock_wait); 345 ASSERT(lp->rw_wwwh & RW_LOCKED); 346 347 sleep_time = -gethrtime(); 348 if (rw != RW_WRITER) { 349 CPU_STATS_ADDQ(CPU, sys, rw_rdfails, 1); 350 (void) turnstile_block(ts, TS_READER_Q, lp, 351 &rw_sobj_ops, NULL, NULL); 352 } else { 353 CPU_STATS_ADDQ(CPU, sys, rw_wrfails, 1); 354 (void) turnstile_block(ts, TS_WRITER_Q, lp, 355 &rw_sobj_ops, NULL, NULL); 356 } 357 sleep_time += gethrtime(); 358 359 LOCKSTAT_RECORD4(LS_RW_ENTER_BLOCK, lp, sleep_time, rw, 360 (old & RW_WRITE_LOCKED) ? 1 : 0, 361 old >> RW_HOLD_COUNT_SHIFT); 362 363 /* 364 * We wake up holding the lock via direct handoff from the 365 * previous owner. 366 */ 367 break; 368 } 369 370 ASSERT(rw_locked(lp, rw)); 371 372 membar_enter(); 373 374 LOCKSTAT_RECORD(LS_RW_ENTER_ACQUIRE, lp, rw); 375 } 376 377 /* 378 * Return the number of readers to wake, or zero if we should wake a writer. 379 * Called only by exiting/downgrading writers (readers don't wake readers). 380 */ 381 static int 382 rw_readers_to_wake(turnstile_t *ts) 383 { 384 kthread_t *next_writer = ts->ts_sleepq[TS_WRITER_Q].sq_first; 385 kthread_t *next_reader = ts->ts_sleepq[TS_READER_Q].sq_first; 386 pri_t wpri = (next_writer != NULL) ? DISP_PRIO(next_writer) : -1; 387 int count = 0; 388 389 while (next_reader != NULL) { 390 if (DISP_PRIO(next_reader) < wpri) 391 break; 392 next_reader = next_reader->t_link; 393 count++; 394 } 395 return (count); 396 } 397 398 /* 399 * Full-service implementation of rw_exit() to handle all the hard cases. 400 * Called from the assembly version if anything complicated is going on. 401 * There is no semantic difference between calling rw_exit() and calling 402 * rw_exit_wakeup() directly. 403 */ 404 void 405 rw_exit_wakeup(rwlock_impl_t *lp) 406 { 407 turnstile_t *ts; 408 uintptr_t old, new, lock_value; 409 kthread_t *next_writer; 410 int nreaders; 411 uint_t backoff = 0; 412 int loop_count = 0; 413 414 membar_exit(); 415 416 old = lp->rw_wwwh; 417 if (old & RW_WRITE_LOCKED) { 418 if ((old & RW_OWNER) != (uintptr_t)curthread) { 419 rw_panic("rw_exit: not owner", lp); 420 lp->rw_wwwh = 0; 421 return; 422 } 423 lock_value = RW_WRITE_LOCK(curthread); 424 } else { 425 if ((old & RW_LOCKED) == 0) { 426 rw_panic("rw_exit: lock not held", lp); 427 return; 428 } 429 lock_value = RW_READ_LOCK; 430 } 431 432 for (;;) { 433 /* 434 * If this is *not* the final exit of a lock with waiters, 435 * just drop the lock -- there's nothing tricky going on. 436 */ 437 old = lp->rw_wwwh; 438 new = old - lock_value; 439 if ((new & (RW_LOCKED | RW_HAS_WAITERS)) != RW_HAS_WAITERS) { 440 if (casip(&lp->rw_wwwh, old, new) != old) { 441 if (rw_lock_delay != NULL) { 442 backoff = rw_lock_backoff(backoff); 443 rw_lock_delay(backoff); 444 if (++loop_count == ncpus_online) { 445 backoff = 0; 446 loop_count = 0; 447 } 448 } 449 continue; 450 } 451 break; 452 } 453 454 /* 455 * This appears to be the final exit of a lock with waiters. 456 * If we do not have the lock as writer (that is, if this is 457 * the last exit of a reader with waiting writers), we will 458 * grab the lock as writer to prevent additional readers. 459 * (This is required because a reader that is acquiring the 460 * lock via RW_READER_STARVEWRITER will not observe the 461 * RW_WRITE_WANTED bit -- and we could therefore be racing 462 * with such readers here.) 463 */ 464 if (!(old & RW_WRITE_LOCKED)) { 465 new = RW_WRITE_LOCK(curthread) | 466 RW_HAS_WAITERS | RW_WRITE_WANTED; 467 468 if (casip(&lp->rw_wwwh, old, new) != old) 469 continue; 470 } 471 472 /* 473 * Perform the final exit of a lock that has waiters. 474 */ 475 ts = turnstile_lookup(lp); 476 477 next_writer = ts->ts_sleepq[TS_WRITER_Q].sq_first; 478 479 if ((old & RW_WRITE_LOCKED) && 480 (nreaders = rw_readers_to_wake(ts)) > 0) { 481 /* 482 * Don't drop the lock -- just set the hold count 483 * such that we grant the lock to all readers at once. 484 */ 485 new = nreaders * RW_READ_LOCK; 486 if (ts->ts_waiters > nreaders) 487 new |= RW_HAS_WAITERS; 488 if (next_writer) 489 new |= RW_WRITE_WANTED; 490 lp->rw_wwwh = new; 491 membar_enter(); 492 turnstile_wakeup(ts, TS_READER_Q, nreaders, NULL); 493 } else { 494 /* 495 * Don't drop the lock -- just transfer ownership 496 * directly to next_writer. Note that there must 497 * be at least one waiting writer, because we get 498 * here only if (A) the lock is read-locked or 499 * (B) there are no waiting readers. In case (A), 500 * since the lock is read-locked there would be no 501 * reason for other readers to have blocked unless 502 * the RW_WRITE_WANTED bit was set. In case (B), 503 * since there are waiters but no waiting readers, 504 * they must all be waiting writers. 505 */ 506 ASSERT(lp->rw_wwwh & RW_WRITE_WANTED); 507 new = RW_WRITE_LOCK(next_writer); 508 if (ts->ts_waiters > 1) 509 new |= RW_HAS_WAITERS; 510 if (next_writer->t_link) 511 new |= RW_WRITE_WANTED; 512 lp->rw_wwwh = new; 513 membar_enter(); 514 turnstile_wakeup(ts, TS_WRITER_Q, 1, next_writer); 515 } 516 break; 517 } 518 519 if (lock_value == RW_READ_LOCK) { 520 LOCKSTAT_RECORD(LS_RW_EXIT_RELEASE, lp, RW_READER); 521 } else { 522 LOCKSTAT_RECORD(LS_RW_EXIT_RELEASE, lp, RW_WRITER); 523 } 524 } 525 526 int 527 rw_tryenter(krwlock_t *rwlp, krw_t rw) 528 { 529 rwlock_impl_t *lp = (rwlock_impl_t *)rwlp; 530 uintptr_t old; 531 532 if (rw != RW_WRITER) { 533 uint_t backoff = 0; 534 int loop_count = 0; 535 for (;;) { 536 if ((old = lp->rw_wwwh) & (rw == RW_READER ? 537 RW_WRITE_CLAIMED : RW_WRITE_LOCKED)) { 538 return (0); 539 } 540 if (casip(&lp->rw_wwwh, old, old + RW_READ_LOCK) == old) 541 break; 542 if (rw_lock_delay != NULL) { 543 backoff = rw_lock_backoff(backoff); 544 rw_lock_delay(backoff); 545 if (++loop_count == ncpus_online) { 546 backoff = 0; 547 loop_count = 0; 548 } 549 } 550 } 551 LOCKSTAT_RECORD(LS_RW_TRYENTER_ACQUIRE, lp, rw); 552 } else { 553 if (casip(&lp->rw_wwwh, 0, RW_WRITE_LOCK(curthread)) != 0) 554 return (0); 555 LOCKSTAT_RECORD(LS_RW_TRYENTER_ACQUIRE, lp, rw); 556 } 557 ASSERT(rw_locked(lp, rw)); 558 membar_enter(); 559 return (1); 560 } 561 562 void 563 rw_downgrade(krwlock_t *rwlp) 564 { 565 rwlock_impl_t *lp = (rwlock_impl_t *)rwlp; 566 567 membar_exit(); 568 569 if ((lp->rw_wwwh & RW_OWNER) != (uintptr_t)curthread) { 570 rw_panic("rw_downgrade: not owner", lp); 571 return; 572 } 573 574 if (atomic_add_ip_nv(&lp->rw_wwwh, 575 RW_READ_LOCK - RW_WRITE_LOCK(curthread)) & RW_HAS_WAITERS) { 576 turnstile_t *ts = turnstile_lookup(lp); 577 int nreaders = rw_readers_to_wake(ts); 578 if (nreaders > 0) { 579 uintptr_t delta = nreaders * RW_READ_LOCK; 580 if (ts->ts_waiters == nreaders) 581 delta -= RW_HAS_WAITERS; 582 atomic_add_ip(&lp->rw_wwwh, delta); 583 } 584 turnstile_wakeup(ts, TS_READER_Q, nreaders, NULL); 585 } 586 ASSERT(rw_locked(lp, RW_READER)); 587 LOCKSTAT_RECORD0(LS_RW_DOWNGRADE_DOWNGRADE, lp); 588 } 589 590 int 591 rw_tryupgrade(krwlock_t *rwlp) 592 { 593 rwlock_impl_t *lp = (rwlock_impl_t *)rwlp; 594 uintptr_t old, new; 595 596 ASSERT(rw_locked(lp, RW_READER)); 597 598 do { 599 if (((old = lp->rw_wwwh) & ~RW_HAS_WAITERS) != RW_READ_LOCK) 600 return (0); 601 new = old + RW_WRITE_LOCK(curthread) - RW_READ_LOCK; 602 } while (casip(&lp->rw_wwwh, old, new) != old); 603 604 membar_enter(); 605 LOCKSTAT_RECORD0(LS_RW_TRYUPGRADE_UPGRADE, lp); 606 ASSERT(rw_locked(lp, RW_WRITER)); 607 return (1); 608 } 609 610 int 611 rw_read_held(krwlock_t *rwlp) 612 { 613 uintptr_t tmp; 614 615 return (_RW_READ_HELD(rwlp, tmp)); 616 } 617 618 int 619 rw_write_held(krwlock_t *rwlp) 620 { 621 return (_RW_WRITE_HELD(rwlp)); 622 } 623 624 int 625 rw_lock_held(krwlock_t *rwlp) 626 { 627 return (_RW_LOCK_HELD(rwlp)); 628 } 629 630 /* 631 * Like rw_read_held(), but ASSERTs that the lock is currently held 632 */ 633 int 634 rw_read_locked(krwlock_t *rwlp) 635 { 636 uintptr_t old = ((rwlock_impl_t *)rwlp)->rw_wwwh; 637 638 ASSERT(old & RW_LOCKED); 639 return ((old & RW_LOCKED) && !(old & RW_WRITE_LOCKED)); 640 } 641 642 /* 643 * Returns non-zero if the lock is either held or desired by a writer 644 */ 645 int 646 rw_iswriter(krwlock_t *rwlp) 647 { 648 return (_RW_ISWRITER(rwlp)); 649 } 650 651 kthread_t * 652 rw_owner(krwlock_t *rwlp) 653 { 654 uintptr_t old = ((rwlock_impl_t *)rwlp)->rw_wwwh; 655 656 return ((old & RW_WRITE_LOCKED) ? (kthread_t *)(old & RW_OWNER) : NULL); 657 } 658