17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 541efec22Sraf * Common Development and Distribution License (the "License"). 641efec22Sraf * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 2141efec22Sraf 227c478bd9Sstevel@tonic-gate /* 23*bbbbacb4SRoger A. Faulkner * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate #include "lint.h" 277c478bd9Sstevel@tonic-gate #include "thr_uberdata.h" 287c478bd9Sstevel@tonic-gate #include <sys/sdt.h> 297c478bd9Sstevel@tonic-gate 307c478bd9Sstevel@tonic-gate #define TRY_FLAG 0x10 317c478bd9Sstevel@tonic-gate #define READ_LOCK 0 327c478bd9Sstevel@tonic-gate #define WRITE_LOCK 1 337c478bd9Sstevel@tonic-gate #define READ_LOCK_TRY (READ_LOCK | TRY_FLAG) 347c478bd9Sstevel@tonic-gate #define WRITE_LOCK_TRY (WRITE_LOCK | TRY_FLAG) 357c478bd9Sstevel@tonic-gate 367c478bd9Sstevel@tonic-gate #define NLOCKS 4 /* initial number of readlock_t structs allocated */ 377c478bd9Sstevel@tonic-gate 3841efec22Sraf #define ASSERT_CONSISTENT_STATE(readers) \ 3941efec22Sraf ASSERT(!((readers) & URW_WRITE_LOCKED) || \ 4041efec22Sraf ((readers) & ~URW_HAS_WAITERS) == URW_WRITE_LOCKED) 4141efec22Sraf 427c478bd9Sstevel@tonic-gate /* 437c478bd9Sstevel@tonic-gate * Find/allocate an entry for rwlp in our array of rwlocks held for reading. 4441efec22Sraf * We must be deferring signals for this to be safe. 45883492d5Sraf * Else if we are returning an entry with ul_rdlockcnt == 0, 4641efec22Sraf * it could be reassigned behind our back in a signal handler. 477c478bd9Sstevel@tonic-gate */ 487c478bd9Sstevel@tonic-gate static readlock_t * 497c478bd9Sstevel@tonic-gate rwl_entry(rwlock_t *rwlp) 507c478bd9Sstevel@tonic-gate { 517c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 527c478bd9Sstevel@tonic-gate readlock_t *remembered = NULL; 537c478bd9Sstevel@tonic-gate readlock_t *readlockp; 547c478bd9Sstevel@tonic-gate uint_t nlocks; 557c478bd9Sstevel@tonic-gate 5641efec22Sraf /* we must be deferring signals */ 5741efec22Sraf ASSERT((self->ul_critical + self->ul_sigdefer) != 0); 5841efec22Sraf 59883492d5Sraf if ((nlocks = self->ul_rdlockcnt) != 0) 607c478bd9Sstevel@tonic-gate readlockp = self->ul_readlock.array; 617c478bd9Sstevel@tonic-gate else { 627c478bd9Sstevel@tonic-gate nlocks = 1; 637c478bd9Sstevel@tonic-gate readlockp = &self->ul_readlock.single; 647c478bd9Sstevel@tonic-gate } 657c478bd9Sstevel@tonic-gate 667c478bd9Sstevel@tonic-gate for (; nlocks; nlocks--, readlockp++) { 677c478bd9Sstevel@tonic-gate if (readlockp->rd_rwlock == rwlp) 687c478bd9Sstevel@tonic-gate return (readlockp); 697c478bd9Sstevel@tonic-gate if (readlockp->rd_count == 0 && remembered == NULL) 707c478bd9Sstevel@tonic-gate remembered = readlockp; 717c478bd9Sstevel@tonic-gate } 727c478bd9Sstevel@tonic-gate if (remembered != NULL) { 737c478bd9Sstevel@tonic-gate remembered->rd_rwlock = rwlp; 747c478bd9Sstevel@tonic-gate return (remembered); 757c478bd9Sstevel@tonic-gate } 767c478bd9Sstevel@tonic-gate 777c478bd9Sstevel@tonic-gate /* 787c478bd9Sstevel@tonic-gate * No entry available. Allocate more space, converting the single 797c478bd9Sstevel@tonic-gate * readlock_t entry into an array of readlock_t entries if necessary. 807c478bd9Sstevel@tonic-gate */ 81883492d5Sraf if ((nlocks = self->ul_rdlockcnt) == 0) { 827c478bd9Sstevel@tonic-gate /* 837c478bd9Sstevel@tonic-gate * Initial allocation of the readlock_t array. 847c478bd9Sstevel@tonic-gate * Convert the single entry into an array. 857c478bd9Sstevel@tonic-gate */ 86883492d5Sraf self->ul_rdlockcnt = nlocks = NLOCKS; 877c478bd9Sstevel@tonic-gate readlockp = lmalloc(nlocks * sizeof (readlock_t)); 887c478bd9Sstevel@tonic-gate /* 897c478bd9Sstevel@tonic-gate * The single readlock_t becomes the first entry in the array. 907c478bd9Sstevel@tonic-gate */ 917c478bd9Sstevel@tonic-gate *readlockp = self->ul_readlock.single; 927c478bd9Sstevel@tonic-gate self->ul_readlock.single.rd_count = 0; 937c478bd9Sstevel@tonic-gate self->ul_readlock.array = readlockp; 947c478bd9Sstevel@tonic-gate /* 957c478bd9Sstevel@tonic-gate * Return the next available entry in the array. 967c478bd9Sstevel@tonic-gate */ 977c478bd9Sstevel@tonic-gate (++readlockp)->rd_rwlock = rwlp; 987c478bd9Sstevel@tonic-gate return (readlockp); 997c478bd9Sstevel@tonic-gate } 1007c478bd9Sstevel@tonic-gate /* 1017c478bd9Sstevel@tonic-gate * Reallocate the array, double the size each time. 1027c478bd9Sstevel@tonic-gate */ 1037c478bd9Sstevel@tonic-gate readlockp = lmalloc(nlocks * 2 * sizeof (readlock_t)); 1048cd45542Sraf (void) memcpy(readlockp, self->ul_readlock.array, 1057c478bd9Sstevel@tonic-gate nlocks * sizeof (readlock_t)); 1067c478bd9Sstevel@tonic-gate lfree(self->ul_readlock.array, nlocks * sizeof (readlock_t)); 1077c478bd9Sstevel@tonic-gate self->ul_readlock.array = readlockp; 108883492d5Sraf self->ul_rdlockcnt *= 2; 1097c478bd9Sstevel@tonic-gate /* 1107c478bd9Sstevel@tonic-gate * Return the next available entry in the newly allocated array. 1117c478bd9Sstevel@tonic-gate */ 1127c478bd9Sstevel@tonic-gate (readlockp += nlocks)->rd_rwlock = rwlp; 1137c478bd9Sstevel@tonic-gate return (readlockp); 1147c478bd9Sstevel@tonic-gate } 1157c478bd9Sstevel@tonic-gate 1167c478bd9Sstevel@tonic-gate /* 1177c478bd9Sstevel@tonic-gate * Free the array of rwlocks held for reading. 1187c478bd9Sstevel@tonic-gate */ 1197c478bd9Sstevel@tonic-gate void 1207c478bd9Sstevel@tonic-gate rwl_free(ulwp_t *ulwp) 1217c478bd9Sstevel@tonic-gate { 1227c478bd9Sstevel@tonic-gate uint_t nlocks; 1237c478bd9Sstevel@tonic-gate 124883492d5Sraf if ((nlocks = ulwp->ul_rdlockcnt) != 0) 1257c478bd9Sstevel@tonic-gate lfree(ulwp->ul_readlock.array, nlocks * sizeof (readlock_t)); 126883492d5Sraf ulwp->ul_rdlockcnt = 0; 1277c478bd9Sstevel@tonic-gate ulwp->ul_readlock.single.rd_rwlock = NULL; 1287c478bd9Sstevel@tonic-gate ulwp->ul_readlock.single.rd_count = 0; 1297c478bd9Sstevel@tonic-gate } 1307c478bd9Sstevel@tonic-gate 1317c478bd9Sstevel@tonic-gate /* 1327c478bd9Sstevel@tonic-gate * Check if a reader version of the lock is held by the current thread. 1337c478bd9Sstevel@tonic-gate */ 1347257d1b4Sraf #pragma weak _rw_read_held = rw_read_held 1357c478bd9Sstevel@tonic-gate int 1367257d1b4Sraf rw_read_held(rwlock_t *rwlp) 1377c478bd9Sstevel@tonic-gate { 13841efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 13941efec22Sraf uint32_t readers; 14041efec22Sraf ulwp_t *self = curthread; 1417c478bd9Sstevel@tonic-gate readlock_t *readlockp; 1427c478bd9Sstevel@tonic-gate uint_t nlocks; 14341efec22Sraf int rval = 0; 1447c478bd9Sstevel@tonic-gate 14541efec22Sraf no_preempt(self); 1467c478bd9Sstevel@tonic-gate 14741efec22Sraf readers = *rwstate; 14841efec22Sraf ASSERT_CONSISTENT_STATE(readers); 14941efec22Sraf if (!(readers & URW_WRITE_LOCKED) && 15041efec22Sraf (readers & URW_READERS_MASK) != 0) { 1517c478bd9Sstevel@tonic-gate /* 1527c478bd9Sstevel@tonic-gate * The lock is held for reading by some thread. 1537c478bd9Sstevel@tonic-gate * Search our array of rwlocks held for reading for a match. 1547c478bd9Sstevel@tonic-gate */ 155883492d5Sraf if ((nlocks = self->ul_rdlockcnt) != 0) 1567c478bd9Sstevel@tonic-gate readlockp = self->ul_readlock.array; 1577c478bd9Sstevel@tonic-gate else { 1587c478bd9Sstevel@tonic-gate nlocks = 1; 1597c478bd9Sstevel@tonic-gate readlockp = &self->ul_readlock.single; 1607c478bd9Sstevel@tonic-gate } 16141efec22Sraf for (; nlocks; nlocks--, readlockp++) { 16241efec22Sraf if (readlockp->rd_rwlock == rwlp) { 16341efec22Sraf if (readlockp->rd_count) 16441efec22Sraf rval = 1; 16541efec22Sraf break; 16641efec22Sraf } 16741efec22Sraf } 16841efec22Sraf } 1697c478bd9Sstevel@tonic-gate 17041efec22Sraf preempt(self); 17141efec22Sraf return (rval); 1727c478bd9Sstevel@tonic-gate } 1737c478bd9Sstevel@tonic-gate 1747c478bd9Sstevel@tonic-gate /* 1757c478bd9Sstevel@tonic-gate * Check if a writer version of the lock is held by the current thread. 1767c478bd9Sstevel@tonic-gate */ 1777257d1b4Sraf #pragma weak _rw_write_held = rw_write_held 1787c478bd9Sstevel@tonic-gate int 1797257d1b4Sraf rw_write_held(rwlock_t *rwlp) 1807c478bd9Sstevel@tonic-gate { 18141efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 18241efec22Sraf uint32_t readers; 1837c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 18441efec22Sraf int rval; 1857c478bd9Sstevel@tonic-gate 18641efec22Sraf no_preempt(self); 1877c478bd9Sstevel@tonic-gate 18841efec22Sraf readers = *rwstate; 18941efec22Sraf ASSERT_CONSISTENT_STATE(readers); 19041efec22Sraf rval = ((readers & URW_WRITE_LOCKED) && 19141efec22Sraf rwlp->rwlock_owner == (uintptr_t)self && 19241efec22Sraf (rwlp->rwlock_type == USYNC_THREAD || 19341efec22Sraf rwlp->rwlock_ownerpid == self->ul_uberdata->pid)); 19441efec22Sraf 19541efec22Sraf preempt(self); 19641efec22Sraf return (rval); 1977c478bd9Sstevel@tonic-gate } 1987c478bd9Sstevel@tonic-gate 1997257d1b4Sraf #pragma weak _rwlock_init = rwlock_init 2007c478bd9Sstevel@tonic-gate /* ARGSUSED2 */ 2017c478bd9Sstevel@tonic-gate int 2027257d1b4Sraf rwlock_init(rwlock_t *rwlp, int type, void *arg) 2037c478bd9Sstevel@tonic-gate { 2047c5714f6Sraf ulwp_t *self = curthread; 2057c5714f6Sraf 2067c478bd9Sstevel@tonic-gate if (type != USYNC_THREAD && type != USYNC_PROCESS) 2077c478bd9Sstevel@tonic-gate return (EINVAL); 2087c478bd9Sstevel@tonic-gate /* 2097c478bd9Sstevel@tonic-gate * Once reinitialized, we can no longer be holding a read or write lock. 2107c478bd9Sstevel@tonic-gate * We can do nothing about other threads that are holding read locks. 2117c478bd9Sstevel@tonic-gate */ 2127c5714f6Sraf sigoff(self); 2137c478bd9Sstevel@tonic-gate rwl_entry(rwlp)->rd_count = 0; 2147c5714f6Sraf sigon(self); 2158cd45542Sraf (void) memset(rwlp, 0, sizeof (*rwlp)); 2167c478bd9Sstevel@tonic-gate rwlp->rwlock_type = (uint16_t)type; 2177c478bd9Sstevel@tonic-gate rwlp->rwlock_magic = RWL_MAGIC; 2187c478bd9Sstevel@tonic-gate rwlp->mutex.mutex_type = (uint8_t)type; 2197c478bd9Sstevel@tonic-gate rwlp->mutex.mutex_flag = LOCK_INITED; 2207c478bd9Sstevel@tonic-gate rwlp->mutex.mutex_magic = MUTEX_MAGIC; 2217c5714f6Sraf 2227c5714f6Sraf /* 2237c5714f6Sraf * This should be at the beginning of the function, 2247c5714f6Sraf * but for the sake of old broken applications that 2257c5714f6Sraf * do not have proper alignment for their rwlocks 2267c5714f6Sraf * (and don't check the return code from rwlock_init), 2277c5714f6Sraf * we put it here, after initializing the rwlock regardless. 2287c5714f6Sraf */ 2297c5714f6Sraf if (((uintptr_t)rwlp & (_LONG_LONG_ALIGNMENT - 1)) && 2307c5714f6Sraf self->ul_misaligned == 0) 2317c5714f6Sraf return (EINVAL); 2327c5714f6Sraf 2337c478bd9Sstevel@tonic-gate return (0); 2347c478bd9Sstevel@tonic-gate } 2357c478bd9Sstevel@tonic-gate 2367257d1b4Sraf #pragma weak pthread_rwlock_destroy = rwlock_destroy 2377257d1b4Sraf #pragma weak _rwlock_destroy = rwlock_destroy 2387c478bd9Sstevel@tonic-gate int 2397257d1b4Sraf rwlock_destroy(rwlock_t *rwlp) 2407c478bd9Sstevel@tonic-gate { 241e54ab87fSRoger A. Faulkner ulwp_t *self = curthread; 242e54ab87fSRoger A. Faulkner 2437c478bd9Sstevel@tonic-gate /* 2447c478bd9Sstevel@tonic-gate * Once destroyed, we can no longer be holding a read or write lock. 2457c478bd9Sstevel@tonic-gate * We can do nothing about other threads that are holding read locks. 2467c478bd9Sstevel@tonic-gate */ 247e54ab87fSRoger A. Faulkner sigoff(self); 2487c478bd9Sstevel@tonic-gate rwl_entry(rwlp)->rd_count = 0; 249e54ab87fSRoger A. Faulkner sigon(self); 2507c478bd9Sstevel@tonic-gate rwlp->rwlock_magic = 0; 2517c478bd9Sstevel@tonic-gate tdb_sync_obj_deregister(rwlp); 2527c478bd9Sstevel@tonic-gate return (0); 2537c478bd9Sstevel@tonic-gate } 2547c478bd9Sstevel@tonic-gate 2557c478bd9Sstevel@tonic-gate /* 256*bbbbacb4SRoger A. Faulkner * The following four functions: 257*bbbbacb4SRoger A. Faulkner * read_lock_try() 258*bbbbacb4SRoger A. Faulkner * read_unlock_try() 259*bbbbacb4SRoger A. Faulkner * write_lock_try() 260*bbbbacb4SRoger A. Faulkner * write_unlock_try() 261*bbbbacb4SRoger A. Faulkner * lie at the heart of the fast-path code for rwlocks, 262*bbbbacb4SRoger A. Faulkner * both process-private and process-shared. 263*bbbbacb4SRoger A. Faulkner * 264*bbbbacb4SRoger A. Faulkner * They are called once without recourse to any other locking primitives. 265*bbbbacb4SRoger A. Faulkner * If they succeed, we are done and the fast-path code was successful. 266*bbbbacb4SRoger A. Faulkner * If they fail, we have to deal with lock queues, either to enqueue 267*bbbbacb4SRoger A. Faulkner * ourself and sleep or to dequeue and wake up someone else (slow paths). 268*bbbbacb4SRoger A. Faulkner * 269*bbbbacb4SRoger A. Faulkner * Unless 'ignore_waiters_flag' is true (a condition that applies only 270*bbbbacb4SRoger A. Faulkner * when read_lock_try() or write_lock_try() is called from code that 271*bbbbacb4SRoger A. Faulkner * is already in the slow path and has already acquired the queue lock), 272*bbbbacb4SRoger A. Faulkner * these functions will always fail if the waiters flag, URW_HAS_WAITERS, 273*bbbbacb4SRoger A. Faulkner * is set in the 'rwstate' word. Thus, setting the waiters flag on the 274*bbbbacb4SRoger A. Faulkner * rwlock and acquiring the queue lock guarantees exclusive access to 275*bbbbacb4SRoger A. Faulkner * the rwlock (and is the only way to guarantee exclusive access). 276*bbbbacb4SRoger A. Faulkner */ 277*bbbbacb4SRoger A. Faulkner 278*bbbbacb4SRoger A. Faulkner /* 27941efec22Sraf * Attempt to acquire a readers lock. Return true on success. 2807c478bd9Sstevel@tonic-gate */ 2817c478bd9Sstevel@tonic-gate static int 28241efec22Sraf read_lock_try(rwlock_t *rwlp, int ignore_waiters_flag) 2837c478bd9Sstevel@tonic-gate { 28441efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 28541efec22Sraf uint32_t mask = ignore_waiters_flag? 28641efec22Sraf URW_WRITE_LOCKED : (URW_HAS_WAITERS | URW_WRITE_LOCKED); 28741efec22Sraf uint32_t readers; 2887c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 2897c478bd9Sstevel@tonic-gate 2907c478bd9Sstevel@tonic-gate no_preempt(self); 29141efec22Sraf while (((readers = *rwstate) & mask) == 0) { 29241efec22Sraf if (atomic_cas_32(rwstate, readers, readers + 1) == readers) { 2937c478bd9Sstevel@tonic-gate preempt(self); 2947c478bd9Sstevel@tonic-gate return (1); 2957c478bd9Sstevel@tonic-gate } 2967c478bd9Sstevel@tonic-gate } 29741efec22Sraf preempt(self); 29841efec22Sraf return (0); 2997c478bd9Sstevel@tonic-gate } 30041efec22Sraf 30141efec22Sraf /* 30241efec22Sraf * Attempt to release a reader lock. Return true on success. 30341efec22Sraf */ 30441efec22Sraf static int 30541efec22Sraf read_unlock_try(rwlock_t *rwlp) 30641efec22Sraf { 30741efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 30841efec22Sraf uint32_t readers; 30941efec22Sraf ulwp_t *self = curthread; 31041efec22Sraf 31141efec22Sraf no_preempt(self); 31241efec22Sraf while (((readers = *rwstate) & URW_HAS_WAITERS) == 0) { 31341efec22Sraf if (atomic_cas_32(rwstate, readers, readers - 1) == readers) { 31441efec22Sraf preempt(self); 31541efec22Sraf return (1); 31641efec22Sraf } 31741efec22Sraf } 31841efec22Sraf preempt(self); 31941efec22Sraf return (0); 32041efec22Sraf } 32141efec22Sraf 32241efec22Sraf /* 32341efec22Sraf * Attempt to acquire a writer lock. Return true on success. 32441efec22Sraf */ 32541efec22Sraf static int 32641efec22Sraf write_lock_try(rwlock_t *rwlp, int ignore_waiters_flag) 32741efec22Sraf { 32841efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 32941efec22Sraf uint32_t mask = ignore_waiters_flag? 33041efec22Sraf (URW_WRITE_LOCKED | URW_READERS_MASK) : 33141efec22Sraf (URW_HAS_WAITERS | URW_WRITE_LOCKED | URW_READERS_MASK); 33241efec22Sraf ulwp_t *self = curthread; 33341efec22Sraf uint32_t readers; 33441efec22Sraf 33541efec22Sraf no_preempt(self); 33641efec22Sraf while (((readers = *rwstate) & mask) == 0) { 33741efec22Sraf if (atomic_cas_32(rwstate, readers, readers | URW_WRITE_LOCKED) 33841efec22Sraf == readers) { 33941efec22Sraf preempt(self); 34041efec22Sraf return (1); 34141efec22Sraf } 34241efec22Sraf } 34341efec22Sraf preempt(self); 34441efec22Sraf return (0); 34541efec22Sraf } 34641efec22Sraf 34741efec22Sraf /* 34841efec22Sraf * Attempt to release a writer lock. Return true on success. 34941efec22Sraf */ 35041efec22Sraf static int 35141efec22Sraf write_unlock_try(rwlock_t *rwlp) 35241efec22Sraf { 35341efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 35441efec22Sraf uint32_t readers; 35541efec22Sraf ulwp_t *self = curthread; 35641efec22Sraf 35741efec22Sraf no_preempt(self); 35841efec22Sraf while (((readers = *rwstate) & URW_HAS_WAITERS) == 0) { 35941efec22Sraf if (atomic_cas_32(rwstate, readers, 0) == readers) { 36041efec22Sraf preempt(self); 36141efec22Sraf return (1); 36241efec22Sraf } 36341efec22Sraf } 36441efec22Sraf preempt(self); 36541efec22Sraf return (0); 36641efec22Sraf } 36741efec22Sraf 36841efec22Sraf /* 369*bbbbacb4SRoger A. Faulkner * Release a process-private rwlock and wake up any thread(s) sleeping on it. 37041efec22Sraf * This is called when a thread releases a lock that appears to have waiters. 37141efec22Sraf */ 372*bbbbacb4SRoger A. Faulkner static void 373*bbbbacb4SRoger A. Faulkner rw_queue_release(rwlock_t *rwlp) 37441efec22Sraf { 37541efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 376*bbbbacb4SRoger A. Faulkner queue_head_t *qp; 37741efec22Sraf uint32_t readers; 378*bbbbacb4SRoger A. Faulkner uint32_t writer; 37941efec22Sraf ulwp_t **ulwpp; 38041efec22Sraf ulwp_t *ulwp; 381d4204c85Sraf ulwp_t *prev; 382d4204c85Sraf int nlwpid = 0; 383d4204c85Sraf int more; 384d4204c85Sraf int maxlwps = MAXLWPS; 38541efec22Sraf lwpid_t buffer[MAXLWPS]; 38641efec22Sraf lwpid_t *lwpid = buffer; 38741efec22Sraf 388*bbbbacb4SRoger A. Faulkner qp = queue_lock(rwlp, MX); 389*bbbbacb4SRoger A. Faulkner 390*bbbbacb4SRoger A. Faulkner /* 391*bbbbacb4SRoger A. Faulkner * Here is where we actually drop the lock, 392*bbbbacb4SRoger A. Faulkner * but we retain the URW_HAS_WAITERS flag, if it is already set. 393*bbbbacb4SRoger A. Faulkner */ 39441efec22Sraf readers = *rwstate; 39541efec22Sraf ASSERT_CONSISTENT_STATE(readers); 396*bbbbacb4SRoger A. Faulkner if (readers & URW_WRITE_LOCKED) /* drop the writer lock */ 397*bbbbacb4SRoger A. Faulkner atomic_and_32(rwstate, ~URW_WRITE_LOCKED); 398*bbbbacb4SRoger A. Faulkner else /* drop the readers lock */ 399*bbbbacb4SRoger A. Faulkner atomic_dec_32(rwstate); 400*bbbbacb4SRoger A. Faulkner if (!(readers & URW_HAS_WAITERS)) { /* no waiters */ 4017c478bd9Sstevel@tonic-gate queue_unlock(qp); 402*bbbbacb4SRoger A. Faulkner return; 4037c478bd9Sstevel@tonic-gate } 404*bbbbacb4SRoger A. Faulkner 405*bbbbacb4SRoger A. Faulkner /* 406*bbbbacb4SRoger A. Faulkner * The presence of the URW_HAS_WAITERS flag causes all rwlock 407*bbbbacb4SRoger A. Faulkner * code to go through the slow path, acquiring queue_lock(qp). 408*bbbbacb4SRoger A. Faulkner * Therefore, the rest of this code is safe because we are 409*bbbbacb4SRoger A. Faulkner * holding the queue lock and the URW_HAS_WAITERS flag is set. 410*bbbbacb4SRoger A. Faulkner */ 411*bbbbacb4SRoger A. Faulkner 412*bbbbacb4SRoger A. Faulkner readers = *rwstate; /* must fetch the value again */ 413*bbbbacb4SRoger A. Faulkner ASSERT_CONSISTENT_STATE(readers); 414*bbbbacb4SRoger A. Faulkner ASSERT(readers & URW_HAS_WAITERS); 415*bbbbacb4SRoger A. Faulkner readers &= URW_READERS_MASK; /* count of current readers */ 416*bbbbacb4SRoger A. Faulkner writer = 0; /* no current writer */ 41741efec22Sraf 41841efec22Sraf /* 419d4204c85Sraf * Examine the queue of waiters in priority order and prepare 420d4204c85Sraf * to wake up as many readers as we encounter before encountering 421d4204c85Sraf * a writer. If the highest priority thread on the queue is a 42241efec22Sraf * writer, stop there and wake it up. 42341efec22Sraf * 42441efec22Sraf * We keep track of lwpids that are to be unparked in lwpid[]. 42541efec22Sraf * __lwp_unpark_all() is called to unpark all of them after 42641efec22Sraf * they have been removed from the sleep queue and the sleep 42741efec22Sraf * queue lock has been dropped. If we run out of space in our 42841efec22Sraf * on-stack buffer, we need to allocate more but we can't call 42941efec22Sraf * lmalloc() because we are holding a queue lock when the overflow 43041efec22Sraf * occurs and lmalloc() acquires a lock. We can't use alloca() 43141efec22Sraf * either because the application may have allocated a small 43241efec22Sraf * stack and we don't want to overrun the stack. So we call 43341efec22Sraf * alloc_lwpids() to allocate a bigger buffer using the mmap() 43441efec22Sraf * system call directly since that path acquires no locks. 43541efec22Sraf */ 436d4204c85Sraf while ((ulwpp = queue_slot(qp, &prev, &more)) != NULL) { 437d4204c85Sraf ulwp = *ulwpp; 438d4204c85Sraf ASSERT(ulwp->ul_wchan == rwlp); 43941efec22Sraf if (ulwp->ul_writer) { 440*bbbbacb4SRoger A. Faulkner if (writer != 0 || readers != 0) 44141efec22Sraf break; 44241efec22Sraf /* one writer to wake */ 443*bbbbacb4SRoger A. Faulkner writer++; 44441efec22Sraf } else { 445*bbbbacb4SRoger A. Faulkner if (writer != 0) 44641efec22Sraf break; 44741efec22Sraf /* at least one reader to wake */ 44841efec22Sraf readers++; 44941efec22Sraf if (nlwpid == maxlwps) 45041efec22Sraf lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps); 45141efec22Sraf } 452d4204c85Sraf queue_unlink(qp, ulwpp, prev); 453d4204c85Sraf ulwp->ul_sleepq = NULL; 454d4204c85Sraf ulwp->ul_wchan = NULL; 455*bbbbacb4SRoger A. Faulkner if (writer) { 456*bbbbacb4SRoger A. Faulkner /* 457*bbbbacb4SRoger A. Faulkner * Hand off the lock to the writer we will be waking. 458*bbbbacb4SRoger A. Faulkner */ 459*bbbbacb4SRoger A. Faulkner ASSERT((*rwstate & ~URW_HAS_WAITERS) == 0); 460*bbbbacb4SRoger A. Faulkner atomic_or_32(rwstate, URW_WRITE_LOCKED); 461*bbbbacb4SRoger A. Faulkner rwlp->rwlock_owner = (uintptr_t)ulwp; 462*bbbbacb4SRoger A. Faulkner } 46341efec22Sraf lwpid[nlwpid++] = ulwp->ul_lwpid; 46441efec22Sraf } 465*bbbbacb4SRoger A. Faulkner 466*bbbbacb4SRoger A. Faulkner /* 467*bbbbacb4SRoger A. Faulkner * This modification of rwstate must be done last. 468*bbbbacb4SRoger A. Faulkner * The presence of the URW_HAS_WAITERS flag causes all rwlock 469*bbbbacb4SRoger A. Faulkner * code to go through the slow path, acquiring queue_lock(qp). 470*bbbbacb4SRoger A. Faulkner * Otherwise the read_lock_try() and write_lock_try() fast paths 471*bbbbacb4SRoger A. Faulkner * are effective. 472*bbbbacb4SRoger A. Faulkner */ 473d4204c85Sraf if (ulwpp == NULL) 47441efec22Sraf atomic_and_32(rwstate, ~URW_HAS_WAITERS); 475*bbbbacb4SRoger A. Faulkner 47641efec22Sraf if (nlwpid == 0) { 47741efec22Sraf queue_unlock(qp); 47841efec22Sraf } else { 479d4204c85Sraf ulwp_t *self = curthread; 48041efec22Sraf no_preempt(self); 48141efec22Sraf queue_unlock(qp); 48241efec22Sraf if (nlwpid == 1) 48341efec22Sraf (void) __lwp_unpark(lwpid[0]); 48441efec22Sraf else 48541efec22Sraf (void) __lwp_unpark_all(lwpid, nlwpid); 48641efec22Sraf preempt(self); 48741efec22Sraf } 48841efec22Sraf if (lwpid != buffer) 4898cd45542Sraf (void) munmap((caddr_t)lwpid, maxlwps * sizeof (lwpid_t)); 49041efec22Sraf } 4917c478bd9Sstevel@tonic-gate 4927c478bd9Sstevel@tonic-gate /* 4937c478bd9Sstevel@tonic-gate * Common code for rdlock, timedrdlock, wrlock, timedwrlock, tryrdlock, 4947c478bd9Sstevel@tonic-gate * and trywrlock for process-shared (USYNC_PROCESS) rwlocks. 4957c478bd9Sstevel@tonic-gate * 4967c478bd9Sstevel@tonic-gate * Note: if the lock appears to be contended we call __lwp_rwlock_rdlock() 4977c478bd9Sstevel@tonic-gate * or __lwp_rwlock_wrlock() holding the mutex. These return with the mutex 4987c478bd9Sstevel@tonic-gate * released, and if they need to sleep will release the mutex first. In the 4997c478bd9Sstevel@tonic-gate * event of a spurious wakeup, these will return EAGAIN (because it is much 5007c478bd9Sstevel@tonic-gate * easier for us to re-acquire the mutex here). 5017c478bd9Sstevel@tonic-gate */ 5027c478bd9Sstevel@tonic-gate int 5037c478bd9Sstevel@tonic-gate shared_rwlock_lock(rwlock_t *rwlp, timespec_t *tsp, int rd_wr) 5047c478bd9Sstevel@tonic-gate { 50541efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 50641efec22Sraf mutex_t *mp = &rwlp->mutex; 50741efec22Sraf uint32_t readers; 5087c478bd9Sstevel@tonic-gate int try_flag; 50941efec22Sraf int error; 5107c478bd9Sstevel@tonic-gate 5117c478bd9Sstevel@tonic-gate try_flag = (rd_wr & TRY_FLAG); 5127c478bd9Sstevel@tonic-gate rd_wr &= ~TRY_FLAG; 5137c478bd9Sstevel@tonic-gate ASSERT(rd_wr == READ_LOCK || rd_wr == WRITE_LOCK); 5147c478bd9Sstevel@tonic-gate 5157c478bd9Sstevel@tonic-gate if (!try_flag) { 5167c478bd9Sstevel@tonic-gate DTRACE_PROBE2(plockstat, rw__block, rwlp, rd_wr); 5177c478bd9Sstevel@tonic-gate } 5187c478bd9Sstevel@tonic-gate 5197c478bd9Sstevel@tonic-gate do { 52041efec22Sraf if (try_flag && (*rwstate & URW_WRITE_LOCKED)) { 52141efec22Sraf error = EBUSY; 5227c478bd9Sstevel@tonic-gate break; 52341efec22Sraf } 5248cd45542Sraf if ((error = mutex_lock(mp)) != 0) 52541efec22Sraf break; 5267c478bd9Sstevel@tonic-gate if (rd_wr == READ_LOCK) { 52741efec22Sraf if (read_lock_try(rwlp, 0)) { 5288cd45542Sraf (void) mutex_unlock(mp); 52941efec22Sraf break; 5307c478bd9Sstevel@tonic-gate } 5317c478bd9Sstevel@tonic-gate } else { 53241efec22Sraf if (write_lock_try(rwlp, 0)) { 5338cd45542Sraf (void) mutex_unlock(mp); 53441efec22Sraf break; 5357c478bd9Sstevel@tonic-gate } 53641efec22Sraf } 53741efec22Sraf atomic_or_32(rwstate, URW_HAS_WAITERS); 53841efec22Sraf readers = *rwstate; 53941efec22Sraf ASSERT_CONSISTENT_STATE(readers); 5407c478bd9Sstevel@tonic-gate /* 54141efec22Sraf * The calls to __lwp_rwlock_*() below will release the mutex, 542328cc3e9SRoger A. Faulkner * so we need a dtrace probe here. The owner field of the 543328cc3e9SRoger A. Faulkner * mutex is cleared in the kernel when the mutex is released, 544328cc3e9SRoger A. Faulkner * so we should not clear it here. 5457c478bd9Sstevel@tonic-gate */ 54641efec22Sraf DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 5477c478bd9Sstevel@tonic-gate /* 5487c478bd9Sstevel@tonic-gate * The waiters bit may be inaccurate. 5497c478bd9Sstevel@tonic-gate * Only the kernel knows for sure. 5507c478bd9Sstevel@tonic-gate */ 55141efec22Sraf if (rd_wr == READ_LOCK) { 55241efec22Sraf if (try_flag) 55341efec22Sraf error = __lwp_rwlock_tryrdlock(rwlp); 55441efec22Sraf else 55541efec22Sraf error = __lwp_rwlock_rdlock(rwlp, tsp); 5567c478bd9Sstevel@tonic-gate } else { 55741efec22Sraf if (try_flag) 55841efec22Sraf error = __lwp_rwlock_trywrlock(rwlp); 55941efec22Sraf else 5607c478bd9Sstevel@tonic-gate error = __lwp_rwlock_wrlock(rwlp, tsp); 5617c478bd9Sstevel@tonic-gate } 56241efec22Sraf } while (error == EAGAIN || error == EINTR); 5637c478bd9Sstevel@tonic-gate 5647c478bd9Sstevel@tonic-gate if (!try_flag) { 56541efec22Sraf DTRACE_PROBE3(plockstat, rw__blocked, rwlp, rd_wr, error == 0); 5667c478bd9Sstevel@tonic-gate } 5677c478bd9Sstevel@tonic-gate 5687c478bd9Sstevel@tonic-gate return (error); 5697c478bd9Sstevel@tonic-gate } 5707c478bd9Sstevel@tonic-gate 5717c478bd9Sstevel@tonic-gate /* 5727c478bd9Sstevel@tonic-gate * Common code for rdlock, timedrdlock, wrlock, timedwrlock, tryrdlock, 5737c478bd9Sstevel@tonic-gate * and trywrlock for process-private (USYNC_THREAD) rwlocks. 5747c478bd9Sstevel@tonic-gate */ 5757c478bd9Sstevel@tonic-gate int 5767c478bd9Sstevel@tonic-gate rwlock_lock(rwlock_t *rwlp, timespec_t *tsp, int rd_wr) 5777c478bd9Sstevel@tonic-gate { 57841efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 57941efec22Sraf uint32_t readers; 5807c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 5817c478bd9Sstevel@tonic-gate queue_head_t *qp; 5827c478bd9Sstevel@tonic-gate ulwp_t *ulwp; 5837c478bd9Sstevel@tonic-gate int try_flag; 584d4204c85Sraf int ignore_waiters_flag; 5857c478bd9Sstevel@tonic-gate int error = 0; 5867c478bd9Sstevel@tonic-gate 5877c478bd9Sstevel@tonic-gate try_flag = (rd_wr & TRY_FLAG); 5887c478bd9Sstevel@tonic-gate rd_wr &= ~TRY_FLAG; 5897c478bd9Sstevel@tonic-gate ASSERT(rd_wr == READ_LOCK || rd_wr == WRITE_LOCK); 5907c478bd9Sstevel@tonic-gate 5917c478bd9Sstevel@tonic-gate if (!try_flag) { 5927c478bd9Sstevel@tonic-gate DTRACE_PROBE2(plockstat, rw__block, rwlp, rd_wr); 5937c478bd9Sstevel@tonic-gate } 5947c478bd9Sstevel@tonic-gate 5957c478bd9Sstevel@tonic-gate qp = queue_lock(rwlp, MX); 596d4204c85Sraf /* initial attempt to acquire the lock fails if there are waiters */ 597d4204c85Sraf ignore_waiters_flag = 0; 5987c478bd9Sstevel@tonic-gate while (error == 0) { 59941efec22Sraf if (rd_wr == READ_LOCK) { 600d4204c85Sraf if (read_lock_try(rwlp, ignore_waiters_flag)) 601d4204c85Sraf break; 60241efec22Sraf } else { 603d4204c85Sraf if (write_lock_try(rwlp, ignore_waiters_flag)) 604d4204c85Sraf break; 60541efec22Sraf } 606d4204c85Sraf /* subsequent attempts do not fail due to waiters */ 607d4204c85Sraf ignore_waiters_flag = 1; 60841efec22Sraf atomic_or_32(rwstate, URW_HAS_WAITERS); 60941efec22Sraf readers = *rwstate; 61041efec22Sraf ASSERT_CONSISTENT_STATE(readers); 61141efec22Sraf if ((readers & URW_WRITE_LOCKED) || 61241efec22Sraf (rd_wr == WRITE_LOCK && 61341efec22Sraf (readers & URW_READERS_MASK) != 0)) 6147c478bd9Sstevel@tonic-gate /* EMPTY */; /* somebody holds the lock */ 615d4204c85Sraf else if ((ulwp = queue_waiter(qp)) == NULL) { 61641efec22Sraf atomic_and_32(rwstate, ~URW_HAS_WAITERS); 617*bbbbacb4SRoger A. Faulkner ignore_waiters_flag = 0; 618*bbbbacb4SRoger A. Faulkner continue; /* no queued waiters, start over */ 6197c478bd9Sstevel@tonic-gate } else { 620d4204c85Sraf /* 621d4204c85Sraf * Do a priority check on the queued waiter (the 622d4204c85Sraf * highest priority thread on the queue) to see 623d4204c85Sraf * if we should defer to him or just grab the lock. 624d4204c85Sraf */ 6257c478bd9Sstevel@tonic-gate int our_pri = real_priority(self); 6267c478bd9Sstevel@tonic-gate int his_pri = real_priority(ulwp); 6277c478bd9Sstevel@tonic-gate 6287c478bd9Sstevel@tonic-gate if (rd_wr == WRITE_LOCK) { 6297c478bd9Sstevel@tonic-gate /* 6307c478bd9Sstevel@tonic-gate * We defer to a queued thread that has 6317c478bd9Sstevel@tonic-gate * a higher priority than ours. 6327c478bd9Sstevel@tonic-gate */ 633*bbbbacb4SRoger A. Faulkner if (his_pri <= our_pri) { 634*bbbbacb4SRoger A. Faulkner /* 635*bbbbacb4SRoger A. Faulkner * Don't defer, just grab the lock. 636*bbbbacb4SRoger A. Faulkner */ 637*bbbbacb4SRoger A. Faulkner continue; 638*bbbbacb4SRoger A. Faulkner } 6397c478bd9Sstevel@tonic-gate } else { 6407c478bd9Sstevel@tonic-gate /* 6417c478bd9Sstevel@tonic-gate * We defer to a queued thread that has 6427c478bd9Sstevel@tonic-gate * a higher priority than ours or that 6437c478bd9Sstevel@tonic-gate * is a writer whose priority equals ours. 6447c478bd9Sstevel@tonic-gate */ 6457c478bd9Sstevel@tonic-gate if (his_pri < our_pri || 646*bbbbacb4SRoger A. Faulkner (his_pri == our_pri && !ulwp->ul_writer)) { 647*bbbbacb4SRoger A. Faulkner /* 648*bbbbacb4SRoger A. Faulkner * Don't defer, just grab the lock. 649*bbbbacb4SRoger A. Faulkner */ 650*bbbbacb4SRoger A. Faulkner continue; 651*bbbbacb4SRoger A. Faulkner } 6527c478bd9Sstevel@tonic-gate } 6537c478bd9Sstevel@tonic-gate } 6547c478bd9Sstevel@tonic-gate /* 6557c478bd9Sstevel@tonic-gate * We are about to block. 6567c478bd9Sstevel@tonic-gate * If we're doing a trylock, return EBUSY instead. 6577c478bd9Sstevel@tonic-gate */ 6587c478bd9Sstevel@tonic-gate if (try_flag) { 6597c478bd9Sstevel@tonic-gate error = EBUSY; 6607c478bd9Sstevel@tonic-gate break; 6617c478bd9Sstevel@tonic-gate } 6627c478bd9Sstevel@tonic-gate /* 663d4204c85Sraf * Enqueue writers ahead of readers. 6647c478bd9Sstevel@tonic-gate */ 6657c478bd9Sstevel@tonic-gate self->ul_writer = rd_wr; /* *must* be 0 or 1 */ 666d4204c85Sraf enqueue(qp, self, 0); 6677c478bd9Sstevel@tonic-gate set_parking_flag(self, 1); 6687c478bd9Sstevel@tonic-gate queue_unlock(qp); 6697c478bd9Sstevel@tonic-gate if ((error = __lwp_park(tsp, 0)) == EINTR) 670*bbbbacb4SRoger A. Faulkner error = 0; 6717c478bd9Sstevel@tonic-gate set_parking_flag(self, 0); 6727c478bd9Sstevel@tonic-gate qp = queue_lock(rwlp, MX); 673*bbbbacb4SRoger A. Faulkner if (self->ul_sleepq && dequeue_self(qp) == 0) { 67441efec22Sraf atomic_and_32(rwstate, ~URW_HAS_WAITERS); 675*bbbbacb4SRoger A. Faulkner ignore_waiters_flag = 0; 6767c478bd9Sstevel@tonic-gate } 677*bbbbacb4SRoger A. Faulkner self->ul_writer = 0; 678*bbbbacb4SRoger A. Faulkner if (rd_wr == WRITE_LOCK && 679*bbbbacb4SRoger A. Faulkner (*rwstate & URW_WRITE_LOCKED) && 680*bbbbacb4SRoger A. Faulkner rwlp->rwlock_owner == (uintptr_t)self) { 681*bbbbacb4SRoger A. Faulkner /* 682*bbbbacb4SRoger A. Faulkner * We acquired the lock by hand-off 683*bbbbacb4SRoger A. Faulkner * from the previous owner, 684*bbbbacb4SRoger A. Faulkner */ 685*bbbbacb4SRoger A. Faulkner error = 0; /* timedlock did not fail */ 686*bbbbacb4SRoger A. Faulkner break; 687*bbbbacb4SRoger A. Faulkner } 688*bbbbacb4SRoger A. Faulkner } 689*bbbbacb4SRoger A. Faulkner 690*bbbbacb4SRoger A. Faulkner /* 691*bbbbacb4SRoger A. Faulkner * Make one final check to see if there are any threads left 692*bbbbacb4SRoger A. Faulkner * on the rwlock queue. Clear the URW_HAS_WAITERS flag if not. 693*bbbbacb4SRoger A. Faulkner */ 694*bbbbacb4SRoger A. Faulkner if (qp->qh_root == NULL || qp->qh_root->qr_head == NULL) 695*bbbbacb4SRoger A. Faulkner atomic_and_32(rwstate, ~URW_HAS_WAITERS); 6967c478bd9Sstevel@tonic-gate 69741efec22Sraf queue_unlock(qp); 69841efec22Sraf 69941efec22Sraf if (!try_flag) { 70041efec22Sraf DTRACE_PROBE3(plockstat, rw__blocked, rwlp, rd_wr, error == 0); 70141efec22Sraf } 7027c478bd9Sstevel@tonic-gate 7037c478bd9Sstevel@tonic-gate return (error); 7047c478bd9Sstevel@tonic-gate } 7057c478bd9Sstevel@tonic-gate 7067c478bd9Sstevel@tonic-gate int 7077c478bd9Sstevel@tonic-gate rw_rdlock_impl(rwlock_t *rwlp, timespec_t *tsp) 7087c478bd9Sstevel@tonic-gate { 7097c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 7107c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 7117c478bd9Sstevel@tonic-gate readlock_t *readlockp; 7127c478bd9Sstevel@tonic-gate tdb_rwlock_stats_t *rwsp = RWLOCK_STATS(rwlp, udp); 7137c478bd9Sstevel@tonic-gate int error; 7147c478bd9Sstevel@tonic-gate 7157c478bd9Sstevel@tonic-gate /* 7167c478bd9Sstevel@tonic-gate * If we already hold a readers lock on this rwlock, 7177c478bd9Sstevel@tonic-gate * just increment our reference count and return. 7187c478bd9Sstevel@tonic-gate */ 71941efec22Sraf sigoff(self); 7207c478bd9Sstevel@tonic-gate readlockp = rwl_entry(rwlp); 7217c478bd9Sstevel@tonic-gate if (readlockp->rd_count != 0) { 72241efec22Sraf if (readlockp->rd_count == READ_LOCK_MAX) { 72341efec22Sraf sigon(self); 72441efec22Sraf error = EAGAIN; 72541efec22Sraf goto out; 7267c478bd9Sstevel@tonic-gate } 72741efec22Sraf sigon(self); 72841efec22Sraf error = 0; 72941efec22Sraf goto out; 73041efec22Sraf } 73141efec22Sraf sigon(self); 7327c478bd9Sstevel@tonic-gate 7337c478bd9Sstevel@tonic-gate /* 7347c478bd9Sstevel@tonic-gate * If we hold the writer lock, bail out. 7357c478bd9Sstevel@tonic-gate */ 7367257d1b4Sraf if (rw_write_held(rwlp)) { 7377c478bd9Sstevel@tonic-gate if (self->ul_error_detection) 7387c478bd9Sstevel@tonic-gate rwlock_error(rwlp, "rwlock_rdlock", 7397c478bd9Sstevel@tonic-gate "calling thread owns the writer lock"); 74041efec22Sraf error = EDEADLK; 74141efec22Sraf goto out; 7427c478bd9Sstevel@tonic-gate } 7437c478bd9Sstevel@tonic-gate 74441efec22Sraf if (read_lock_try(rwlp, 0)) 74541efec22Sraf error = 0; 74641efec22Sraf else if (rwlp->rwlock_type == USYNC_PROCESS) /* kernel-level */ 7477c478bd9Sstevel@tonic-gate error = shared_rwlock_lock(rwlp, tsp, READ_LOCK); 7487c478bd9Sstevel@tonic-gate else /* user-level */ 7497c478bd9Sstevel@tonic-gate error = rwlock_lock(rwlp, tsp, READ_LOCK); 7507c478bd9Sstevel@tonic-gate 75141efec22Sraf out: 7527c478bd9Sstevel@tonic-gate if (error == 0) { 75341efec22Sraf sigoff(self); 75441efec22Sraf rwl_entry(rwlp)->rd_count++; 75541efec22Sraf sigon(self); 7567c478bd9Sstevel@tonic-gate if (rwsp) 7577c478bd9Sstevel@tonic-gate tdb_incr(rwsp->rw_rdlock); 75841efec22Sraf DTRACE_PROBE2(plockstat, rw__acquire, rwlp, READ_LOCK); 75941efec22Sraf } else { 76041efec22Sraf DTRACE_PROBE3(plockstat, rw__error, rwlp, READ_LOCK, error); 7617c478bd9Sstevel@tonic-gate } 7627c478bd9Sstevel@tonic-gate 7637c478bd9Sstevel@tonic-gate return (error); 7647c478bd9Sstevel@tonic-gate } 7657c478bd9Sstevel@tonic-gate 7667257d1b4Sraf #pragma weak pthread_rwlock_rdlock = rw_rdlock 7677257d1b4Sraf #pragma weak _rw_rdlock = rw_rdlock 7687c478bd9Sstevel@tonic-gate int 7697257d1b4Sraf rw_rdlock(rwlock_t *rwlp) 7707c478bd9Sstevel@tonic-gate { 7717c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 7727c478bd9Sstevel@tonic-gate return (rw_rdlock_impl(rwlp, NULL)); 7737c478bd9Sstevel@tonic-gate } 7747c478bd9Sstevel@tonic-gate 7757c478bd9Sstevel@tonic-gate void 7767c478bd9Sstevel@tonic-gate lrw_rdlock(rwlock_t *rwlp) 7777c478bd9Sstevel@tonic-gate { 7787c478bd9Sstevel@tonic-gate enter_critical(curthread); 7797c478bd9Sstevel@tonic-gate (void) rw_rdlock_impl(rwlp, NULL); 7807c478bd9Sstevel@tonic-gate } 7817c478bd9Sstevel@tonic-gate 7827c478bd9Sstevel@tonic-gate int 7837257d1b4Sraf pthread_rwlock_reltimedrdlock_np(pthread_rwlock_t *_RESTRICT_KYWD rwlp, 7847257d1b4Sraf const struct timespec *_RESTRICT_KYWD reltime) 7857c478bd9Sstevel@tonic-gate { 7867c478bd9Sstevel@tonic-gate timespec_t tslocal = *reltime; 7877c478bd9Sstevel@tonic-gate int error; 7887c478bd9Sstevel@tonic-gate 7897c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 7907257d1b4Sraf error = rw_rdlock_impl((rwlock_t *)rwlp, &tslocal); 7917c478bd9Sstevel@tonic-gate if (error == ETIME) 7927c478bd9Sstevel@tonic-gate error = ETIMEDOUT; 7937c478bd9Sstevel@tonic-gate return (error); 7947c478bd9Sstevel@tonic-gate } 7957c478bd9Sstevel@tonic-gate 7967c478bd9Sstevel@tonic-gate int 7977257d1b4Sraf pthread_rwlock_timedrdlock(pthread_rwlock_t *_RESTRICT_KYWD rwlp, 7987257d1b4Sraf const struct timespec *_RESTRICT_KYWD abstime) 7997c478bd9Sstevel@tonic-gate { 8007c478bd9Sstevel@tonic-gate timespec_t tslocal; 8017c478bd9Sstevel@tonic-gate int error; 8027c478bd9Sstevel@tonic-gate 8037c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 8047c478bd9Sstevel@tonic-gate abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal); 8057257d1b4Sraf error = rw_rdlock_impl((rwlock_t *)rwlp, &tslocal); 8067c478bd9Sstevel@tonic-gate if (error == ETIME) 8077c478bd9Sstevel@tonic-gate error = ETIMEDOUT; 8087c478bd9Sstevel@tonic-gate return (error); 8097c478bd9Sstevel@tonic-gate } 8107c478bd9Sstevel@tonic-gate 8117c478bd9Sstevel@tonic-gate int 8127c478bd9Sstevel@tonic-gate rw_wrlock_impl(rwlock_t *rwlp, timespec_t *tsp) 8137c478bd9Sstevel@tonic-gate { 8147c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 8157c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 8167c478bd9Sstevel@tonic-gate tdb_rwlock_stats_t *rwsp = RWLOCK_STATS(rwlp, udp); 8177c478bd9Sstevel@tonic-gate int error; 8187c478bd9Sstevel@tonic-gate 8197c478bd9Sstevel@tonic-gate /* 8207c478bd9Sstevel@tonic-gate * If we hold a readers lock on this rwlock, bail out. 8217c478bd9Sstevel@tonic-gate */ 8227257d1b4Sraf if (rw_read_held(rwlp)) { 8237c478bd9Sstevel@tonic-gate if (self->ul_error_detection) 8247c478bd9Sstevel@tonic-gate rwlock_error(rwlp, "rwlock_wrlock", 8257c478bd9Sstevel@tonic-gate "calling thread owns the readers lock"); 82641efec22Sraf error = EDEADLK; 82741efec22Sraf goto out; 8287c478bd9Sstevel@tonic-gate } 8297c478bd9Sstevel@tonic-gate 8307c478bd9Sstevel@tonic-gate /* 8317c478bd9Sstevel@tonic-gate * If we hold the writer lock, bail out. 8327c478bd9Sstevel@tonic-gate */ 8337257d1b4Sraf if (rw_write_held(rwlp)) { 8347c478bd9Sstevel@tonic-gate if (self->ul_error_detection) 8357c478bd9Sstevel@tonic-gate rwlock_error(rwlp, "rwlock_wrlock", 8367c478bd9Sstevel@tonic-gate "calling thread owns the writer lock"); 83741efec22Sraf error = EDEADLK; 83841efec22Sraf goto out; 8397c478bd9Sstevel@tonic-gate } 8407c478bd9Sstevel@tonic-gate 84141efec22Sraf if (write_lock_try(rwlp, 0)) 84241efec22Sraf error = 0; 84341efec22Sraf else if (rwlp->rwlock_type == USYNC_PROCESS) /* kernel-level */ 8447c478bd9Sstevel@tonic-gate error = shared_rwlock_lock(rwlp, tsp, WRITE_LOCK); 84541efec22Sraf else /* user-level */ 8467c478bd9Sstevel@tonic-gate error = rwlock_lock(rwlp, tsp, WRITE_LOCK); 8477c478bd9Sstevel@tonic-gate 84841efec22Sraf out: 84941efec22Sraf if (error == 0) { 85041efec22Sraf rwlp->rwlock_owner = (uintptr_t)self; 85141efec22Sraf if (rwlp->rwlock_type == USYNC_PROCESS) 85241efec22Sraf rwlp->rwlock_ownerpid = udp->pid; 85341efec22Sraf if (rwsp) { 8547c478bd9Sstevel@tonic-gate tdb_incr(rwsp->rw_wrlock); 8557c478bd9Sstevel@tonic-gate rwsp->rw_wrlock_begin_hold = gethrtime(); 8567c478bd9Sstevel@tonic-gate } 85741efec22Sraf DTRACE_PROBE2(plockstat, rw__acquire, rwlp, WRITE_LOCK); 85841efec22Sraf } else { 85941efec22Sraf DTRACE_PROBE3(plockstat, rw__error, rwlp, WRITE_LOCK, error); 86041efec22Sraf } 8617c478bd9Sstevel@tonic-gate return (error); 8627c478bd9Sstevel@tonic-gate } 8637c478bd9Sstevel@tonic-gate 8647257d1b4Sraf #pragma weak pthread_rwlock_wrlock = rw_wrlock 8657257d1b4Sraf #pragma weak _rw_wrlock = rw_wrlock 8667c478bd9Sstevel@tonic-gate int 8677257d1b4Sraf rw_wrlock(rwlock_t *rwlp) 8687c478bd9Sstevel@tonic-gate { 8697c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 8707c478bd9Sstevel@tonic-gate return (rw_wrlock_impl(rwlp, NULL)); 8717c478bd9Sstevel@tonic-gate } 8727c478bd9Sstevel@tonic-gate 8737c478bd9Sstevel@tonic-gate void 8747c478bd9Sstevel@tonic-gate lrw_wrlock(rwlock_t *rwlp) 8757c478bd9Sstevel@tonic-gate { 8767c478bd9Sstevel@tonic-gate enter_critical(curthread); 8777c478bd9Sstevel@tonic-gate (void) rw_wrlock_impl(rwlp, NULL); 8787c478bd9Sstevel@tonic-gate } 8797c478bd9Sstevel@tonic-gate 8807c478bd9Sstevel@tonic-gate int 8817257d1b4Sraf pthread_rwlock_reltimedwrlock_np(pthread_rwlock_t *_RESTRICT_KYWD rwlp, 8827257d1b4Sraf const struct timespec *_RESTRICT_KYWD reltime) 8837c478bd9Sstevel@tonic-gate { 8847c478bd9Sstevel@tonic-gate timespec_t tslocal = *reltime; 8857c478bd9Sstevel@tonic-gate int error; 8867c478bd9Sstevel@tonic-gate 8877c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 8887257d1b4Sraf error = rw_wrlock_impl((rwlock_t *)rwlp, &tslocal); 8897c478bd9Sstevel@tonic-gate if (error == ETIME) 8907c478bd9Sstevel@tonic-gate error = ETIMEDOUT; 8917c478bd9Sstevel@tonic-gate return (error); 8927c478bd9Sstevel@tonic-gate } 8937c478bd9Sstevel@tonic-gate 8947c478bd9Sstevel@tonic-gate int 8957257d1b4Sraf pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlp, const timespec_t *abstime) 8967c478bd9Sstevel@tonic-gate { 8977c478bd9Sstevel@tonic-gate timespec_t tslocal; 8987c478bd9Sstevel@tonic-gate int error; 8997c478bd9Sstevel@tonic-gate 9007c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 9017c478bd9Sstevel@tonic-gate abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal); 9027257d1b4Sraf error = rw_wrlock_impl((rwlock_t *)rwlp, &tslocal); 9037c478bd9Sstevel@tonic-gate if (error == ETIME) 9047c478bd9Sstevel@tonic-gate error = ETIMEDOUT; 9057c478bd9Sstevel@tonic-gate return (error); 9067c478bd9Sstevel@tonic-gate } 9077c478bd9Sstevel@tonic-gate 9087257d1b4Sraf #pragma weak pthread_rwlock_tryrdlock = rw_tryrdlock 9097c478bd9Sstevel@tonic-gate int 9107257d1b4Sraf rw_tryrdlock(rwlock_t *rwlp) 9117c478bd9Sstevel@tonic-gate { 9127c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 9137c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 9147c478bd9Sstevel@tonic-gate tdb_rwlock_stats_t *rwsp = RWLOCK_STATS(rwlp, udp); 9157c478bd9Sstevel@tonic-gate readlock_t *readlockp; 9167c478bd9Sstevel@tonic-gate int error; 9177c478bd9Sstevel@tonic-gate 9187c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 9197c478bd9Sstevel@tonic-gate 9207c478bd9Sstevel@tonic-gate if (rwsp) 9217c478bd9Sstevel@tonic-gate tdb_incr(rwsp->rw_rdlock_try); 9227c478bd9Sstevel@tonic-gate 9237c478bd9Sstevel@tonic-gate /* 9247c478bd9Sstevel@tonic-gate * If we already hold a readers lock on this rwlock, 9257c478bd9Sstevel@tonic-gate * just increment our reference count and return. 9267c478bd9Sstevel@tonic-gate */ 92741efec22Sraf sigoff(self); 9287c478bd9Sstevel@tonic-gate readlockp = rwl_entry(rwlp); 9297c478bd9Sstevel@tonic-gate if (readlockp->rd_count != 0) { 93041efec22Sraf if (readlockp->rd_count == READ_LOCK_MAX) { 93141efec22Sraf sigon(self); 93241efec22Sraf error = EAGAIN; 93341efec22Sraf goto out; 9347c478bd9Sstevel@tonic-gate } 93541efec22Sraf sigon(self); 93641efec22Sraf error = 0; 93741efec22Sraf goto out; 93841efec22Sraf } 93941efec22Sraf sigon(self); 9407c478bd9Sstevel@tonic-gate 94141efec22Sraf if (read_lock_try(rwlp, 0)) 94241efec22Sraf error = 0; 94341efec22Sraf else if (rwlp->rwlock_type == USYNC_PROCESS) /* kernel-level */ 9447c478bd9Sstevel@tonic-gate error = shared_rwlock_lock(rwlp, NULL, READ_LOCK_TRY); 9457c478bd9Sstevel@tonic-gate else /* user-level */ 9467c478bd9Sstevel@tonic-gate error = rwlock_lock(rwlp, NULL, READ_LOCK_TRY); 9477c478bd9Sstevel@tonic-gate 94841efec22Sraf out: 94941efec22Sraf if (error == 0) { 95041efec22Sraf sigoff(self); 95141efec22Sraf rwl_entry(rwlp)->rd_count++; 95241efec22Sraf sigon(self); 95341efec22Sraf DTRACE_PROBE2(plockstat, rw__acquire, rwlp, READ_LOCK); 95441efec22Sraf } else { 95541efec22Sraf if (rwsp) 9567c478bd9Sstevel@tonic-gate tdb_incr(rwsp->rw_rdlock_try_fail); 95741efec22Sraf if (error != EBUSY) { 95841efec22Sraf DTRACE_PROBE3(plockstat, rw__error, rwlp, READ_LOCK, 95941efec22Sraf error); 96041efec22Sraf } 96141efec22Sraf } 9627c478bd9Sstevel@tonic-gate 9637c478bd9Sstevel@tonic-gate return (error); 9647c478bd9Sstevel@tonic-gate } 9657c478bd9Sstevel@tonic-gate 9667257d1b4Sraf #pragma weak pthread_rwlock_trywrlock = rw_trywrlock 9677c478bd9Sstevel@tonic-gate int 9687257d1b4Sraf rw_trywrlock(rwlock_t *rwlp) 9697c478bd9Sstevel@tonic-gate { 9707c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 9717c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 9727c478bd9Sstevel@tonic-gate tdb_rwlock_stats_t *rwsp = RWLOCK_STATS(rwlp, udp); 9737c478bd9Sstevel@tonic-gate int error; 9747c478bd9Sstevel@tonic-gate 97541efec22Sraf ASSERT(!self->ul_critical || self->ul_bindflags); 9767c478bd9Sstevel@tonic-gate 9777c478bd9Sstevel@tonic-gate if (rwsp) 9787c478bd9Sstevel@tonic-gate tdb_incr(rwsp->rw_wrlock_try); 9797c478bd9Sstevel@tonic-gate 98041efec22Sraf if (write_lock_try(rwlp, 0)) 98141efec22Sraf error = 0; 98241efec22Sraf else if (rwlp->rwlock_type == USYNC_PROCESS) /* kernel-level */ 9837c478bd9Sstevel@tonic-gate error = shared_rwlock_lock(rwlp, NULL, WRITE_LOCK_TRY); 98441efec22Sraf else /* user-level */ 9857c478bd9Sstevel@tonic-gate error = rwlock_lock(rwlp, NULL, WRITE_LOCK_TRY); 98641efec22Sraf 98741efec22Sraf if (error == 0) { 98841efec22Sraf rwlp->rwlock_owner = (uintptr_t)self; 98941efec22Sraf if (rwlp->rwlock_type == USYNC_PROCESS) 99041efec22Sraf rwlp->rwlock_ownerpid = udp->pid; 99141efec22Sraf if (rwsp) 9927c478bd9Sstevel@tonic-gate rwsp->rw_wrlock_begin_hold = gethrtime(); 99341efec22Sraf DTRACE_PROBE2(plockstat, rw__acquire, rwlp, WRITE_LOCK); 99441efec22Sraf } else { 99541efec22Sraf if (rwsp) 99641efec22Sraf tdb_incr(rwsp->rw_wrlock_try_fail); 99741efec22Sraf if (error != EBUSY) { 99841efec22Sraf DTRACE_PROBE3(plockstat, rw__error, rwlp, WRITE_LOCK, 99941efec22Sraf error); 100041efec22Sraf } 10017c478bd9Sstevel@tonic-gate } 10027c478bd9Sstevel@tonic-gate return (error); 10037c478bd9Sstevel@tonic-gate } 10047c478bd9Sstevel@tonic-gate 10057257d1b4Sraf #pragma weak pthread_rwlock_unlock = rw_unlock 10067257d1b4Sraf #pragma weak _rw_unlock = rw_unlock 10077c478bd9Sstevel@tonic-gate int 10087257d1b4Sraf rw_unlock(rwlock_t *rwlp) 10097c478bd9Sstevel@tonic-gate { 101041efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 101141efec22Sraf uint32_t readers; 10127c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 10137c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 10147c478bd9Sstevel@tonic-gate tdb_rwlock_stats_t *rwsp; 101541efec22Sraf int rd_wr; 10167c478bd9Sstevel@tonic-gate 101741efec22Sraf readers = *rwstate; 101841efec22Sraf ASSERT_CONSISTENT_STATE(readers); 101941efec22Sraf if (readers & URW_WRITE_LOCKED) { 102041efec22Sraf rd_wr = WRITE_LOCK; 102141efec22Sraf readers = 0; 102241efec22Sraf } else { 102341efec22Sraf rd_wr = READ_LOCK; 102441efec22Sraf readers &= URW_READERS_MASK; 10257c478bd9Sstevel@tonic-gate } 10267c478bd9Sstevel@tonic-gate 102741efec22Sraf if (rd_wr == WRITE_LOCK) { 10287c478bd9Sstevel@tonic-gate /* 10297c478bd9Sstevel@tonic-gate * Since the writer lock is held, we'd better be 10307c478bd9Sstevel@tonic-gate * holding it, else we cannot legitimately be here. 10317c478bd9Sstevel@tonic-gate */ 10327257d1b4Sraf if (!rw_write_held(rwlp)) { 10337c478bd9Sstevel@tonic-gate if (self->ul_error_detection) 10347c478bd9Sstevel@tonic-gate rwlock_error(rwlp, "rwlock_unlock", 10357c478bd9Sstevel@tonic-gate "writer lock held, " 10367c478bd9Sstevel@tonic-gate "but not by the calling thread"); 10377c478bd9Sstevel@tonic-gate return (EPERM); 10387c478bd9Sstevel@tonic-gate } 10397c478bd9Sstevel@tonic-gate if ((rwsp = RWLOCK_STATS(rwlp, udp)) != NULL) { 10407c478bd9Sstevel@tonic-gate if (rwsp->rw_wrlock_begin_hold) 10417c478bd9Sstevel@tonic-gate rwsp->rw_wrlock_hold_time += 10427c478bd9Sstevel@tonic-gate gethrtime() - rwsp->rw_wrlock_begin_hold; 10437c478bd9Sstevel@tonic-gate rwsp->rw_wrlock_begin_hold = 0; 10447c478bd9Sstevel@tonic-gate } 104541efec22Sraf rwlp->rwlock_owner = 0; 104641efec22Sraf rwlp->rwlock_ownerpid = 0; 104741efec22Sraf } else if (readers > 0) { 10487c478bd9Sstevel@tonic-gate /* 10497c478bd9Sstevel@tonic-gate * A readers lock is held; if we don't hold one, bail out. 10507c478bd9Sstevel@tonic-gate */ 105141efec22Sraf readlock_t *readlockp; 105241efec22Sraf 105341efec22Sraf sigoff(self); 105441efec22Sraf readlockp = rwl_entry(rwlp); 10557c478bd9Sstevel@tonic-gate if (readlockp->rd_count == 0) { 105641efec22Sraf sigon(self); 10577c478bd9Sstevel@tonic-gate if (self->ul_error_detection) 10587c478bd9Sstevel@tonic-gate rwlock_error(rwlp, "rwlock_unlock", 10597c478bd9Sstevel@tonic-gate "readers lock held, " 10607c478bd9Sstevel@tonic-gate "but not by the calling thread"); 10617c478bd9Sstevel@tonic-gate return (EPERM); 10627c478bd9Sstevel@tonic-gate } 10637c478bd9Sstevel@tonic-gate /* 10647c478bd9Sstevel@tonic-gate * If we hold more than one readers lock on this rwlock, 10657c478bd9Sstevel@tonic-gate * just decrement our reference count and return. 10667c478bd9Sstevel@tonic-gate */ 10677c478bd9Sstevel@tonic-gate if (--readlockp->rd_count != 0) { 106841efec22Sraf sigon(self); 106941efec22Sraf goto out; 10707c478bd9Sstevel@tonic-gate } 107141efec22Sraf sigon(self); 10727c478bd9Sstevel@tonic-gate } else { 10737c478bd9Sstevel@tonic-gate /* 10747c478bd9Sstevel@tonic-gate * This is a usage error. 10757c478bd9Sstevel@tonic-gate * No thread should release an unowned lock. 10767c478bd9Sstevel@tonic-gate */ 10777c478bd9Sstevel@tonic-gate if (self->ul_error_detection) 10787c478bd9Sstevel@tonic-gate rwlock_error(rwlp, "rwlock_unlock", "lock not owned"); 10797c478bd9Sstevel@tonic-gate return (EPERM); 10807c478bd9Sstevel@tonic-gate } 10817c478bd9Sstevel@tonic-gate 108241efec22Sraf if (rd_wr == WRITE_LOCK && write_unlock_try(rwlp)) { 108341efec22Sraf /* EMPTY */; 108441efec22Sraf } else if (rd_wr == READ_LOCK && read_unlock_try(rwlp)) { 108541efec22Sraf /* EMPTY */; 108641efec22Sraf } else if (rwlp->rwlock_type == USYNC_PROCESS) { 10878cd45542Sraf (void) mutex_lock(&rwlp->mutex); 108841efec22Sraf (void) __lwp_rwlock_unlock(rwlp); 10898cd45542Sraf (void) mutex_unlock(&rwlp->mutex); 10907c478bd9Sstevel@tonic-gate } else { 1091*bbbbacb4SRoger A. Faulkner rw_queue_release(rwlp); 10927c478bd9Sstevel@tonic-gate } 10937c478bd9Sstevel@tonic-gate 109441efec22Sraf out: 109541efec22Sraf DTRACE_PROBE2(plockstat, rw__release, rwlp, rd_wr); 10967c478bd9Sstevel@tonic-gate return (0); 10977c478bd9Sstevel@tonic-gate } 10987c478bd9Sstevel@tonic-gate 10997c478bd9Sstevel@tonic-gate void 11007c478bd9Sstevel@tonic-gate lrw_unlock(rwlock_t *rwlp) 11017c478bd9Sstevel@tonic-gate { 11027257d1b4Sraf (void) rw_unlock(rwlp); 11037c478bd9Sstevel@tonic-gate exit_critical(curthread); 11047c478bd9Sstevel@tonic-gate } 1105