17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 541efec22Sraf * Common Development and Distribution License (the "License"). 641efec22Sraf * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 2141efec22Sraf 227c478bd9Sstevel@tonic-gate /* 23bbbbacb4SRoger A. Faulkner * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 24*48bbca81SDaniel Hoffman * Copyright (c) 2016 by Delphix. All rights reserved. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate #include "lint.h" 287c478bd9Sstevel@tonic-gate #include "thr_uberdata.h" 297c478bd9Sstevel@tonic-gate #include <sys/sdt.h> 307c478bd9Sstevel@tonic-gate 317c478bd9Sstevel@tonic-gate #define TRY_FLAG 0x10 327c478bd9Sstevel@tonic-gate #define READ_LOCK 0 337c478bd9Sstevel@tonic-gate #define WRITE_LOCK 1 347c478bd9Sstevel@tonic-gate #define READ_LOCK_TRY (READ_LOCK | TRY_FLAG) 357c478bd9Sstevel@tonic-gate #define WRITE_LOCK_TRY (WRITE_LOCK | TRY_FLAG) 367c478bd9Sstevel@tonic-gate 377c478bd9Sstevel@tonic-gate #define NLOCKS 4 /* initial number of readlock_t structs allocated */ 387c478bd9Sstevel@tonic-gate 3941efec22Sraf #define ASSERT_CONSISTENT_STATE(readers) \ 4041efec22Sraf ASSERT(!((readers) & URW_WRITE_LOCKED) || \ 4141efec22Sraf ((readers) & ~URW_HAS_WAITERS) == URW_WRITE_LOCKED) 4241efec22Sraf 437c478bd9Sstevel@tonic-gate /* 447c478bd9Sstevel@tonic-gate * Find/allocate an entry for rwlp in our array of rwlocks held for reading. 4541efec22Sraf * We must be deferring signals for this to be safe. 46883492d5Sraf * Else if we are returning an entry with ul_rdlockcnt == 0, 4741efec22Sraf * it could be reassigned behind our back in a signal handler. 487c478bd9Sstevel@tonic-gate */ 497c478bd9Sstevel@tonic-gate static readlock_t * 507c478bd9Sstevel@tonic-gate rwl_entry(rwlock_t *rwlp) 517c478bd9Sstevel@tonic-gate { 527c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 537c478bd9Sstevel@tonic-gate readlock_t *remembered = NULL; 547c478bd9Sstevel@tonic-gate readlock_t *readlockp; 557c478bd9Sstevel@tonic-gate uint_t nlocks; 567c478bd9Sstevel@tonic-gate 5741efec22Sraf /* we must be deferring signals */ 5841efec22Sraf ASSERT((self->ul_critical + self->ul_sigdefer) != 0); 5941efec22Sraf 60883492d5Sraf if ((nlocks = self->ul_rdlockcnt) != 0) 617c478bd9Sstevel@tonic-gate readlockp = self->ul_readlock.array; 627c478bd9Sstevel@tonic-gate else { 637c478bd9Sstevel@tonic-gate nlocks = 1; 647c478bd9Sstevel@tonic-gate readlockp = &self->ul_readlock.single; 657c478bd9Sstevel@tonic-gate } 667c478bd9Sstevel@tonic-gate 677c478bd9Sstevel@tonic-gate for (; nlocks; nlocks--, readlockp++) { 687c478bd9Sstevel@tonic-gate if (readlockp->rd_rwlock == rwlp) 697c478bd9Sstevel@tonic-gate return (readlockp); 707c478bd9Sstevel@tonic-gate if (readlockp->rd_count == 0 && remembered == NULL) 717c478bd9Sstevel@tonic-gate remembered = readlockp; 727c478bd9Sstevel@tonic-gate } 737c478bd9Sstevel@tonic-gate if (remembered != NULL) { 747c478bd9Sstevel@tonic-gate remembered->rd_rwlock = rwlp; 757c478bd9Sstevel@tonic-gate return (remembered); 767c478bd9Sstevel@tonic-gate } 777c478bd9Sstevel@tonic-gate 787c478bd9Sstevel@tonic-gate /* 797c478bd9Sstevel@tonic-gate * No entry available. Allocate more space, converting the single 807c478bd9Sstevel@tonic-gate * readlock_t entry into an array of readlock_t entries if necessary. 817c478bd9Sstevel@tonic-gate */ 82883492d5Sraf if ((nlocks = self->ul_rdlockcnt) == 0) { 837c478bd9Sstevel@tonic-gate /* 847c478bd9Sstevel@tonic-gate * Initial allocation of the readlock_t array. 857c478bd9Sstevel@tonic-gate * Convert the single entry into an array. 867c478bd9Sstevel@tonic-gate */ 87883492d5Sraf self->ul_rdlockcnt = nlocks = NLOCKS; 887c478bd9Sstevel@tonic-gate readlockp = lmalloc(nlocks * sizeof (readlock_t)); 897c478bd9Sstevel@tonic-gate /* 907c478bd9Sstevel@tonic-gate * The single readlock_t becomes the first entry in the array. 917c478bd9Sstevel@tonic-gate */ 927c478bd9Sstevel@tonic-gate *readlockp = self->ul_readlock.single; 937c478bd9Sstevel@tonic-gate self->ul_readlock.single.rd_count = 0; 947c478bd9Sstevel@tonic-gate self->ul_readlock.array = readlockp; 957c478bd9Sstevel@tonic-gate /* 967c478bd9Sstevel@tonic-gate * Return the next available entry in the array. 977c478bd9Sstevel@tonic-gate */ 987c478bd9Sstevel@tonic-gate (++readlockp)->rd_rwlock = rwlp; 997c478bd9Sstevel@tonic-gate return (readlockp); 1007c478bd9Sstevel@tonic-gate } 1017c478bd9Sstevel@tonic-gate /* 1027c478bd9Sstevel@tonic-gate * Reallocate the array, double the size each time. 1037c478bd9Sstevel@tonic-gate */ 1047c478bd9Sstevel@tonic-gate readlockp = lmalloc(nlocks * 2 * sizeof (readlock_t)); 1058cd45542Sraf (void) memcpy(readlockp, self->ul_readlock.array, 1067c478bd9Sstevel@tonic-gate nlocks * sizeof (readlock_t)); 1077c478bd9Sstevel@tonic-gate lfree(self->ul_readlock.array, nlocks * sizeof (readlock_t)); 1087c478bd9Sstevel@tonic-gate self->ul_readlock.array = readlockp; 109883492d5Sraf self->ul_rdlockcnt *= 2; 1107c478bd9Sstevel@tonic-gate /* 1117c478bd9Sstevel@tonic-gate * Return the next available entry in the newly allocated array. 1127c478bd9Sstevel@tonic-gate */ 1137c478bd9Sstevel@tonic-gate (readlockp += nlocks)->rd_rwlock = rwlp; 1147c478bd9Sstevel@tonic-gate return (readlockp); 1157c478bd9Sstevel@tonic-gate } 1167c478bd9Sstevel@tonic-gate 1177c478bd9Sstevel@tonic-gate /* 1187c478bd9Sstevel@tonic-gate * Free the array of rwlocks held for reading. 1197c478bd9Sstevel@tonic-gate */ 1207c478bd9Sstevel@tonic-gate void 1217c478bd9Sstevel@tonic-gate rwl_free(ulwp_t *ulwp) 1227c478bd9Sstevel@tonic-gate { 1237c478bd9Sstevel@tonic-gate uint_t nlocks; 1247c478bd9Sstevel@tonic-gate 125883492d5Sraf if ((nlocks = ulwp->ul_rdlockcnt) != 0) 1267c478bd9Sstevel@tonic-gate lfree(ulwp->ul_readlock.array, nlocks * sizeof (readlock_t)); 127883492d5Sraf ulwp->ul_rdlockcnt = 0; 1287c478bd9Sstevel@tonic-gate ulwp->ul_readlock.single.rd_rwlock = NULL; 1297c478bd9Sstevel@tonic-gate ulwp->ul_readlock.single.rd_count = 0; 1307c478bd9Sstevel@tonic-gate } 1317c478bd9Sstevel@tonic-gate 1327c478bd9Sstevel@tonic-gate /* 1337c478bd9Sstevel@tonic-gate * Check if a reader version of the lock is held by the current thread. 1347c478bd9Sstevel@tonic-gate */ 1357257d1b4Sraf #pragma weak _rw_read_held = rw_read_held 1367c478bd9Sstevel@tonic-gate int 1377257d1b4Sraf rw_read_held(rwlock_t *rwlp) 1387c478bd9Sstevel@tonic-gate { 13941efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 14041efec22Sraf uint32_t readers; 14141efec22Sraf ulwp_t *self = curthread; 1427c478bd9Sstevel@tonic-gate readlock_t *readlockp; 1437c478bd9Sstevel@tonic-gate uint_t nlocks; 14441efec22Sraf int rval = 0; 1457c478bd9Sstevel@tonic-gate 14641efec22Sraf no_preempt(self); 1477c478bd9Sstevel@tonic-gate 14841efec22Sraf readers = *rwstate; 14941efec22Sraf ASSERT_CONSISTENT_STATE(readers); 15041efec22Sraf if (!(readers & URW_WRITE_LOCKED) && 15141efec22Sraf (readers & URW_READERS_MASK) != 0) { 1527c478bd9Sstevel@tonic-gate /* 1537c478bd9Sstevel@tonic-gate * The lock is held for reading by some thread. 1547c478bd9Sstevel@tonic-gate * Search our array of rwlocks held for reading for a match. 1557c478bd9Sstevel@tonic-gate */ 156883492d5Sraf if ((nlocks = self->ul_rdlockcnt) != 0) 1577c478bd9Sstevel@tonic-gate readlockp = self->ul_readlock.array; 1587c478bd9Sstevel@tonic-gate else { 1597c478bd9Sstevel@tonic-gate nlocks = 1; 1607c478bd9Sstevel@tonic-gate readlockp = &self->ul_readlock.single; 1617c478bd9Sstevel@tonic-gate } 16241efec22Sraf for (; nlocks; nlocks--, readlockp++) { 16341efec22Sraf if (readlockp->rd_rwlock == rwlp) { 16441efec22Sraf if (readlockp->rd_count) 16541efec22Sraf rval = 1; 16641efec22Sraf break; 16741efec22Sraf } 16841efec22Sraf } 16941efec22Sraf } 1707c478bd9Sstevel@tonic-gate 17141efec22Sraf preempt(self); 17241efec22Sraf return (rval); 1737c478bd9Sstevel@tonic-gate } 1747c478bd9Sstevel@tonic-gate 1757c478bd9Sstevel@tonic-gate /* 1767c478bd9Sstevel@tonic-gate * Check if a writer version of the lock is held by the current thread. 1777c478bd9Sstevel@tonic-gate */ 1787257d1b4Sraf #pragma weak _rw_write_held = rw_write_held 1797c478bd9Sstevel@tonic-gate int 1807257d1b4Sraf rw_write_held(rwlock_t *rwlp) 1817c478bd9Sstevel@tonic-gate { 18241efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 18341efec22Sraf uint32_t readers; 1847c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 18541efec22Sraf int rval; 1867c478bd9Sstevel@tonic-gate 18741efec22Sraf no_preempt(self); 1887c478bd9Sstevel@tonic-gate 18941efec22Sraf readers = *rwstate; 19041efec22Sraf ASSERT_CONSISTENT_STATE(readers); 19141efec22Sraf rval = ((readers & URW_WRITE_LOCKED) && 19241efec22Sraf rwlp->rwlock_owner == (uintptr_t)self && 19341efec22Sraf (rwlp->rwlock_type == USYNC_THREAD || 19441efec22Sraf rwlp->rwlock_ownerpid == self->ul_uberdata->pid)); 19541efec22Sraf 19641efec22Sraf preempt(self); 19741efec22Sraf return (rval); 1987c478bd9Sstevel@tonic-gate } 1997c478bd9Sstevel@tonic-gate 2007257d1b4Sraf #pragma weak _rwlock_init = rwlock_init 2017c478bd9Sstevel@tonic-gate /* ARGSUSED2 */ 2027c478bd9Sstevel@tonic-gate int 2037257d1b4Sraf rwlock_init(rwlock_t *rwlp, int type, void *arg) 2047c478bd9Sstevel@tonic-gate { 2057c5714f6Sraf ulwp_t *self = curthread; 2067c5714f6Sraf 2077c478bd9Sstevel@tonic-gate if (type != USYNC_THREAD && type != USYNC_PROCESS) 2087c478bd9Sstevel@tonic-gate return (EINVAL); 2097c478bd9Sstevel@tonic-gate /* 2107c478bd9Sstevel@tonic-gate * Once reinitialized, we can no longer be holding a read or write lock. 2117c478bd9Sstevel@tonic-gate * We can do nothing about other threads that are holding read locks. 2127c478bd9Sstevel@tonic-gate */ 2137c5714f6Sraf sigoff(self); 2147c478bd9Sstevel@tonic-gate rwl_entry(rwlp)->rd_count = 0; 2157c5714f6Sraf sigon(self); 2168cd45542Sraf (void) memset(rwlp, 0, sizeof (*rwlp)); 2177c478bd9Sstevel@tonic-gate rwlp->rwlock_type = (uint16_t)type; 2187c478bd9Sstevel@tonic-gate rwlp->rwlock_magic = RWL_MAGIC; 2197c478bd9Sstevel@tonic-gate rwlp->mutex.mutex_type = (uint8_t)type; 2207c478bd9Sstevel@tonic-gate rwlp->mutex.mutex_flag = LOCK_INITED; 2217c478bd9Sstevel@tonic-gate rwlp->mutex.mutex_magic = MUTEX_MAGIC; 2227c5714f6Sraf 2237c5714f6Sraf /* 2247c5714f6Sraf * This should be at the beginning of the function, 2257c5714f6Sraf * but for the sake of old broken applications that 2267c5714f6Sraf * do not have proper alignment for their rwlocks 2277c5714f6Sraf * (and don't check the return code from rwlock_init), 2287c5714f6Sraf * we put it here, after initializing the rwlock regardless. 2297c5714f6Sraf */ 2307c5714f6Sraf if (((uintptr_t)rwlp & (_LONG_LONG_ALIGNMENT - 1)) && 2317c5714f6Sraf self->ul_misaligned == 0) 2327c5714f6Sraf return (EINVAL); 2337c5714f6Sraf 2347c478bd9Sstevel@tonic-gate return (0); 2357c478bd9Sstevel@tonic-gate } 2367c478bd9Sstevel@tonic-gate 2377257d1b4Sraf #pragma weak pthread_rwlock_destroy = rwlock_destroy 2387257d1b4Sraf #pragma weak _rwlock_destroy = rwlock_destroy 2397c478bd9Sstevel@tonic-gate int 2407257d1b4Sraf rwlock_destroy(rwlock_t *rwlp) 2417c478bd9Sstevel@tonic-gate { 242e54ab87fSRoger A. Faulkner ulwp_t *self = curthread; 243e54ab87fSRoger A. Faulkner 2447c478bd9Sstevel@tonic-gate /* 2457c478bd9Sstevel@tonic-gate * Once destroyed, we can no longer be holding a read or write lock. 2467c478bd9Sstevel@tonic-gate * We can do nothing about other threads that are holding read locks. 2477c478bd9Sstevel@tonic-gate */ 248e54ab87fSRoger A. Faulkner sigoff(self); 2497c478bd9Sstevel@tonic-gate rwl_entry(rwlp)->rd_count = 0; 250e54ab87fSRoger A. Faulkner sigon(self); 2517c478bd9Sstevel@tonic-gate rwlp->rwlock_magic = 0; 2527c478bd9Sstevel@tonic-gate tdb_sync_obj_deregister(rwlp); 2537c478bd9Sstevel@tonic-gate return (0); 2547c478bd9Sstevel@tonic-gate } 2557c478bd9Sstevel@tonic-gate 2567c478bd9Sstevel@tonic-gate /* 257bbbbacb4SRoger A. Faulkner * The following four functions: 258bbbbacb4SRoger A. Faulkner * read_lock_try() 259bbbbacb4SRoger A. Faulkner * read_unlock_try() 260bbbbacb4SRoger A. Faulkner * write_lock_try() 261bbbbacb4SRoger A. Faulkner * write_unlock_try() 262bbbbacb4SRoger A. Faulkner * lie at the heart of the fast-path code for rwlocks, 263bbbbacb4SRoger A. Faulkner * both process-private and process-shared. 264bbbbacb4SRoger A. Faulkner * 265bbbbacb4SRoger A. Faulkner * They are called once without recourse to any other locking primitives. 266bbbbacb4SRoger A. Faulkner * If they succeed, we are done and the fast-path code was successful. 267bbbbacb4SRoger A. Faulkner * If they fail, we have to deal with lock queues, either to enqueue 268bbbbacb4SRoger A. Faulkner * ourself and sleep or to dequeue and wake up someone else (slow paths). 269bbbbacb4SRoger A. Faulkner * 270bbbbacb4SRoger A. Faulkner * Unless 'ignore_waiters_flag' is true (a condition that applies only 271bbbbacb4SRoger A. Faulkner * when read_lock_try() or write_lock_try() is called from code that 272bbbbacb4SRoger A. Faulkner * is already in the slow path and has already acquired the queue lock), 273bbbbacb4SRoger A. Faulkner * these functions will always fail if the waiters flag, URW_HAS_WAITERS, 274bbbbacb4SRoger A. Faulkner * is set in the 'rwstate' word. Thus, setting the waiters flag on the 275bbbbacb4SRoger A. Faulkner * rwlock and acquiring the queue lock guarantees exclusive access to 276bbbbacb4SRoger A. Faulkner * the rwlock (and is the only way to guarantee exclusive access). 277bbbbacb4SRoger A. Faulkner */ 278bbbbacb4SRoger A. Faulkner 279bbbbacb4SRoger A. Faulkner /* 28041efec22Sraf * Attempt to acquire a readers lock. Return true on success. 2817c478bd9Sstevel@tonic-gate */ 2827c478bd9Sstevel@tonic-gate static int 28341efec22Sraf read_lock_try(rwlock_t *rwlp, int ignore_waiters_flag) 2847c478bd9Sstevel@tonic-gate { 28541efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 28641efec22Sraf uint32_t mask = ignore_waiters_flag? 28741efec22Sraf URW_WRITE_LOCKED : (URW_HAS_WAITERS | URW_WRITE_LOCKED); 28841efec22Sraf uint32_t readers; 2897c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 2907c478bd9Sstevel@tonic-gate 2917c478bd9Sstevel@tonic-gate no_preempt(self); 29241efec22Sraf while (((readers = *rwstate) & mask) == 0) { 29341efec22Sraf if (atomic_cas_32(rwstate, readers, readers + 1) == readers) { 2947c478bd9Sstevel@tonic-gate preempt(self); 2957c478bd9Sstevel@tonic-gate return (1); 2967c478bd9Sstevel@tonic-gate } 2977c478bd9Sstevel@tonic-gate } 29841efec22Sraf preempt(self); 29941efec22Sraf return (0); 3007c478bd9Sstevel@tonic-gate } 30141efec22Sraf 30241efec22Sraf /* 30341efec22Sraf * Attempt to release a reader lock. Return true on success. 30441efec22Sraf */ 30541efec22Sraf static int 30641efec22Sraf read_unlock_try(rwlock_t *rwlp) 30741efec22Sraf { 30841efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 30941efec22Sraf uint32_t readers; 31041efec22Sraf ulwp_t *self = curthread; 31141efec22Sraf 31241efec22Sraf no_preempt(self); 31341efec22Sraf while (((readers = *rwstate) & URW_HAS_WAITERS) == 0) { 31441efec22Sraf if (atomic_cas_32(rwstate, readers, readers - 1) == readers) { 31541efec22Sraf preempt(self); 31641efec22Sraf return (1); 31741efec22Sraf } 31841efec22Sraf } 31941efec22Sraf preempt(self); 32041efec22Sraf return (0); 32141efec22Sraf } 32241efec22Sraf 32341efec22Sraf /* 32441efec22Sraf * Attempt to acquire a writer lock. Return true on success. 32541efec22Sraf */ 32641efec22Sraf static int 32741efec22Sraf write_lock_try(rwlock_t *rwlp, int ignore_waiters_flag) 32841efec22Sraf { 32941efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 33041efec22Sraf uint32_t mask = ignore_waiters_flag? 33141efec22Sraf (URW_WRITE_LOCKED | URW_READERS_MASK) : 33241efec22Sraf (URW_HAS_WAITERS | URW_WRITE_LOCKED | URW_READERS_MASK); 33341efec22Sraf ulwp_t *self = curthread; 33441efec22Sraf uint32_t readers; 33541efec22Sraf 33641efec22Sraf no_preempt(self); 33741efec22Sraf while (((readers = *rwstate) & mask) == 0) { 33841efec22Sraf if (atomic_cas_32(rwstate, readers, readers | URW_WRITE_LOCKED) 33941efec22Sraf == readers) { 34041efec22Sraf preempt(self); 34141efec22Sraf return (1); 34241efec22Sraf } 34341efec22Sraf } 34441efec22Sraf preempt(self); 34541efec22Sraf return (0); 34641efec22Sraf } 34741efec22Sraf 34841efec22Sraf /* 34941efec22Sraf * Attempt to release a writer lock. Return true on success. 35041efec22Sraf */ 35141efec22Sraf static int 35241efec22Sraf write_unlock_try(rwlock_t *rwlp) 35341efec22Sraf { 35441efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 35541efec22Sraf uint32_t readers; 35641efec22Sraf ulwp_t *self = curthread; 35741efec22Sraf 35841efec22Sraf no_preempt(self); 35941efec22Sraf while (((readers = *rwstate) & URW_HAS_WAITERS) == 0) { 36041efec22Sraf if (atomic_cas_32(rwstate, readers, 0) == readers) { 36141efec22Sraf preempt(self); 36241efec22Sraf return (1); 36341efec22Sraf } 36441efec22Sraf } 36541efec22Sraf preempt(self); 36641efec22Sraf return (0); 36741efec22Sraf } 36841efec22Sraf 36941efec22Sraf /* 370bbbbacb4SRoger A. Faulkner * Release a process-private rwlock and wake up any thread(s) sleeping on it. 37141efec22Sraf * This is called when a thread releases a lock that appears to have waiters. 37241efec22Sraf */ 373bbbbacb4SRoger A. Faulkner static void 374bbbbacb4SRoger A. Faulkner rw_queue_release(rwlock_t *rwlp) 37541efec22Sraf { 37641efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 377bbbbacb4SRoger A. Faulkner queue_head_t *qp; 37841efec22Sraf uint32_t readers; 379bbbbacb4SRoger A. Faulkner uint32_t writer; 38041efec22Sraf ulwp_t **ulwpp; 38141efec22Sraf ulwp_t *ulwp; 382d4204c85Sraf ulwp_t *prev; 383d4204c85Sraf int nlwpid = 0; 384d4204c85Sraf int more; 385d4204c85Sraf int maxlwps = MAXLWPS; 38641efec22Sraf lwpid_t buffer[MAXLWPS]; 38741efec22Sraf lwpid_t *lwpid = buffer; 38841efec22Sraf 389bbbbacb4SRoger A. Faulkner qp = queue_lock(rwlp, MX); 390bbbbacb4SRoger A. Faulkner 391bbbbacb4SRoger A. Faulkner /* 392bbbbacb4SRoger A. Faulkner * Here is where we actually drop the lock, 393bbbbacb4SRoger A. Faulkner * but we retain the URW_HAS_WAITERS flag, if it is already set. 394bbbbacb4SRoger A. Faulkner */ 39541efec22Sraf readers = *rwstate; 39641efec22Sraf ASSERT_CONSISTENT_STATE(readers); 397bbbbacb4SRoger A. Faulkner if (readers & URW_WRITE_LOCKED) /* drop the writer lock */ 398bbbbacb4SRoger A. Faulkner atomic_and_32(rwstate, ~URW_WRITE_LOCKED); 399bbbbacb4SRoger A. Faulkner else /* drop the readers lock */ 400bbbbacb4SRoger A. Faulkner atomic_dec_32(rwstate); 401bbbbacb4SRoger A. Faulkner if (!(readers & URW_HAS_WAITERS)) { /* no waiters */ 4027c478bd9Sstevel@tonic-gate queue_unlock(qp); 403bbbbacb4SRoger A. Faulkner return; 4047c478bd9Sstevel@tonic-gate } 405bbbbacb4SRoger A. Faulkner 406bbbbacb4SRoger A. Faulkner /* 407bbbbacb4SRoger A. Faulkner * The presence of the URW_HAS_WAITERS flag causes all rwlock 408bbbbacb4SRoger A. Faulkner * code to go through the slow path, acquiring queue_lock(qp). 409bbbbacb4SRoger A. Faulkner * Therefore, the rest of this code is safe because we are 410bbbbacb4SRoger A. Faulkner * holding the queue lock and the URW_HAS_WAITERS flag is set. 411bbbbacb4SRoger A. Faulkner */ 412bbbbacb4SRoger A. Faulkner 413bbbbacb4SRoger A. Faulkner readers = *rwstate; /* must fetch the value again */ 414bbbbacb4SRoger A. Faulkner ASSERT_CONSISTENT_STATE(readers); 415bbbbacb4SRoger A. Faulkner ASSERT(readers & URW_HAS_WAITERS); 416bbbbacb4SRoger A. Faulkner readers &= URW_READERS_MASK; /* count of current readers */ 417bbbbacb4SRoger A. Faulkner writer = 0; /* no current writer */ 41841efec22Sraf 41941efec22Sraf /* 420d4204c85Sraf * Examine the queue of waiters in priority order and prepare 421d4204c85Sraf * to wake up as many readers as we encounter before encountering 422d4204c85Sraf * a writer. If the highest priority thread on the queue is a 42341efec22Sraf * writer, stop there and wake it up. 42441efec22Sraf * 42541efec22Sraf * We keep track of lwpids that are to be unparked in lwpid[]. 42641efec22Sraf * __lwp_unpark_all() is called to unpark all of them after 42741efec22Sraf * they have been removed from the sleep queue and the sleep 42841efec22Sraf * queue lock has been dropped. If we run out of space in our 42941efec22Sraf * on-stack buffer, we need to allocate more but we can't call 43041efec22Sraf * lmalloc() because we are holding a queue lock when the overflow 43141efec22Sraf * occurs and lmalloc() acquires a lock. We can't use alloca() 43241efec22Sraf * either because the application may have allocated a small 43341efec22Sraf * stack and we don't want to overrun the stack. So we call 43441efec22Sraf * alloc_lwpids() to allocate a bigger buffer using the mmap() 43541efec22Sraf * system call directly since that path acquires no locks. 43641efec22Sraf */ 437d4204c85Sraf while ((ulwpp = queue_slot(qp, &prev, &more)) != NULL) { 438d4204c85Sraf ulwp = *ulwpp; 439d4204c85Sraf ASSERT(ulwp->ul_wchan == rwlp); 44041efec22Sraf if (ulwp->ul_writer) { 441bbbbacb4SRoger A. Faulkner if (writer != 0 || readers != 0) 44241efec22Sraf break; 44341efec22Sraf /* one writer to wake */ 444bbbbacb4SRoger A. Faulkner writer++; 44541efec22Sraf } else { 446bbbbacb4SRoger A. Faulkner if (writer != 0) 44741efec22Sraf break; 44841efec22Sraf /* at least one reader to wake */ 44941efec22Sraf readers++; 45041efec22Sraf if (nlwpid == maxlwps) 45141efec22Sraf lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps); 45241efec22Sraf } 453d4204c85Sraf queue_unlink(qp, ulwpp, prev); 454d4204c85Sraf ulwp->ul_sleepq = NULL; 455d4204c85Sraf ulwp->ul_wchan = NULL; 456bbbbacb4SRoger A. Faulkner if (writer) { 457bbbbacb4SRoger A. Faulkner /* 458bbbbacb4SRoger A. Faulkner * Hand off the lock to the writer we will be waking. 459bbbbacb4SRoger A. Faulkner */ 460bbbbacb4SRoger A. Faulkner ASSERT((*rwstate & ~URW_HAS_WAITERS) == 0); 461bbbbacb4SRoger A. Faulkner atomic_or_32(rwstate, URW_WRITE_LOCKED); 462bbbbacb4SRoger A. Faulkner rwlp->rwlock_owner = (uintptr_t)ulwp; 463bbbbacb4SRoger A. Faulkner } 46441efec22Sraf lwpid[nlwpid++] = ulwp->ul_lwpid; 46541efec22Sraf } 466bbbbacb4SRoger A. Faulkner 467bbbbacb4SRoger A. Faulkner /* 468bbbbacb4SRoger A. Faulkner * This modification of rwstate must be done last. 469bbbbacb4SRoger A. Faulkner * The presence of the URW_HAS_WAITERS flag causes all rwlock 470bbbbacb4SRoger A. Faulkner * code to go through the slow path, acquiring queue_lock(qp). 471bbbbacb4SRoger A. Faulkner * Otherwise the read_lock_try() and write_lock_try() fast paths 472bbbbacb4SRoger A. Faulkner * are effective. 473bbbbacb4SRoger A. Faulkner */ 474d4204c85Sraf if (ulwpp == NULL) 47541efec22Sraf atomic_and_32(rwstate, ~URW_HAS_WAITERS); 476bbbbacb4SRoger A. Faulkner 47741efec22Sraf if (nlwpid == 0) { 47841efec22Sraf queue_unlock(qp); 47941efec22Sraf } else { 480d4204c85Sraf ulwp_t *self = curthread; 48141efec22Sraf no_preempt(self); 48241efec22Sraf queue_unlock(qp); 48341efec22Sraf if (nlwpid == 1) 48441efec22Sraf (void) __lwp_unpark(lwpid[0]); 48541efec22Sraf else 48641efec22Sraf (void) __lwp_unpark_all(lwpid, nlwpid); 48741efec22Sraf preempt(self); 48841efec22Sraf } 48941efec22Sraf if (lwpid != buffer) 4908cd45542Sraf (void) munmap((caddr_t)lwpid, maxlwps * sizeof (lwpid_t)); 49141efec22Sraf } 4927c478bd9Sstevel@tonic-gate 4937c478bd9Sstevel@tonic-gate /* 4947c478bd9Sstevel@tonic-gate * Common code for rdlock, timedrdlock, wrlock, timedwrlock, tryrdlock, 4957c478bd9Sstevel@tonic-gate * and trywrlock for process-shared (USYNC_PROCESS) rwlocks. 4967c478bd9Sstevel@tonic-gate * 4977c478bd9Sstevel@tonic-gate * Note: if the lock appears to be contended we call __lwp_rwlock_rdlock() 4987c478bd9Sstevel@tonic-gate * or __lwp_rwlock_wrlock() holding the mutex. These return with the mutex 4997c478bd9Sstevel@tonic-gate * released, and if they need to sleep will release the mutex first. In the 5007c478bd9Sstevel@tonic-gate * event of a spurious wakeup, these will return EAGAIN (because it is much 5017c478bd9Sstevel@tonic-gate * easier for us to re-acquire the mutex here). 5027c478bd9Sstevel@tonic-gate */ 5037c478bd9Sstevel@tonic-gate int 5047c478bd9Sstevel@tonic-gate shared_rwlock_lock(rwlock_t *rwlp, timespec_t *tsp, int rd_wr) 5057c478bd9Sstevel@tonic-gate { 50641efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 50741efec22Sraf mutex_t *mp = &rwlp->mutex; 50841efec22Sraf uint32_t readers; 5097c478bd9Sstevel@tonic-gate int try_flag; 51041efec22Sraf int error; 5117c478bd9Sstevel@tonic-gate 5127c478bd9Sstevel@tonic-gate try_flag = (rd_wr & TRY_FLAG); 5137c478bd9Sstevel@tonic-gate rd_wr &= ~TRY_FLAG; 5147c478bd9Sstevel@tonic-gate ASSERT(rd_wr == READ_LOCK || rd_wr == WRITE_LOCK); 5157c478bd9Sstevel@tonic-gate 5167c478bd9Sstevel@tonic-gate if (!try_flag) { 5177c478bd9Sstevel@tonic-gate DTRACE_PROBE2(plockstat, rw__block, rwlp, rd_wr); 5187c478bd9Sstevel@tonic-gate } 5197c478bd9Sstevel@tonic-gate 5207c478bd9Sstevel@tonic-gate do { 52141efec22Sraf if (try_flag && (*rwstate & URW_WRITE_LOCKED)) { 52241efec22Sraf error = EBUSY; 5237c478bd9Sstevel@tonic-gate break; 52441efec22Sraf } 5258cd45542Sraf if ((error = mutex_lock(mp)) != 0) 52641efec22Sraf break; 5277c478bd9Sstevel@tonic-gate if (rd_wr == READ_LOCK) { 52841efec22Sraf if (read_lock_try(rwlp, 0)) { 5298cd45542Sraf (void) mutex_unlock(mp); 53041efec22Sraf break; 5317c478bd9Sstevel@tonic-gate } 5327c478bd9Sstevel@tonic-gate } else { 53341efec22Sraf if (write_lock_try(rwlp, 0)) { 5348cd45542Sraf (void) mutex_unlock(mp); 53541efec22Sraf break; 5367c478bd9Sstevel@tonic-gate } 53741efec22Sraf } 53841efec22Sraf atomic_or_32(rwstate, URW_HAS_WAITERS); 53941efec22Sraf readers = *rwstate; 54041efec22Sraf ASSERT_CONSISTENT_STATE(readers); 5417c478bd9Sstevel@tonic-gate /* 54241efec22Sraf * The calls to __lwp_rwlock_*() below will release the mutex, 543328cc3e9SRoger A. Faulkner * so we need a dtrace probe here. The owner field of the 544328cc3e9SRoger A. Faulkner * mutex is cleared in the kernel when the mutex is released, 545328cc3e9SRoger A. Faulkner * so we should not clear it here. 5467c478bd9Sstevel@tonic-gate */ 54741efec22Sraf DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 5487c478bd9Sstevel@tonic-gate /* 5497c478bd9Sstevel@tonic-gate * The waiters bit may be inaccurate. 5507c478bd9Sstevel@tonic-gate * Only the kernel knows for sure. 5517c478bd9Sstevel@tonic-gate */ 55241efec22Sraf if (rd_wr == READ_LOCK) { 55341efec22Sraf if (try_flag) 55441efec22Sraf error = __lwp_rwlock_tryrdlock(rwlp); 55541efec22Sraf else 55641efec22Sraf error = __lwp_rwlock_rdlock(rwlp, tsp); 5577c478bd9Sstevel@tonic-gate } else { 55841efec22Sraf if (try_flag) 55941efec22Sraf error = __lwp_rwlock_trywrlock(rwlp); 56041efec22Sraf else 5617c478bd9Sstevel@tonic-gate error = __lwp_rwlock_wrlock(rwlp, tsp); 5627c478bd9Sstevel@tonic-gate } 56341efec22Sraf } while (error == EAGAIN || error == EINTR); 5647c478bd9Sstevel@tonic-gate 5657c478bd9Sstevel@tonic-gate if (!try_flag) { 56641efec22Sraf DTRACE_PROBE3(plockstat, rw__blocked, rwlp, rd_wr, error == 0); 5677c478bd9Sstevel@tonic-gate } 5687c478bd9Sstevel@tonic-gate 5697c478bd9Sstevel@tonic-gate return (error); 5707c478bd9Sstevel@tonic-gate } 5717c478bd9Sstevel@tonic-gate 5727c478bd9Sstevel@tonic-gate /* 5737c478bd9Sstevel@tonic-gate * Common code for rdlock, timedrdlock, wrlock, timedwrlock, tryrdlock, 5747c478bd9Sstevel@tonic-gate * and trywrlock for process-private (USYNC_THREAD) rwlocks. 5757c478bd9Sstevel@tonic-gate */ 5767c478bd9Sstevel@tonic-gate int 5777c478bd9Sstevel@tonic-gate rwlock_lock(rwlock_t *rwlp, timespec_t *tsp, int rd_wr) 5787c478bd9Sstevel@tonic-gate { 57941efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 58041efec22Sraf uint32_t readers; 5817c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 5827c478bd9Sstevel@tonic-gate queue_head_t *qp; 5837c478bd9Sstevel@tonic-gate ulwp_t *ulwp; 5847c478bd9Sstevel@tonic-gate int try_flag; 585d4204c85Sraf int ignore_waiters_flag; 5867c478bd9Sstevel@tonic-gate int error = 0; 5877c478bd9Sstevel@tonic-gate 5887c478bd9Sstevel@tonic-gate try_flag = (rd_wr & TRY_FLAG); 5897c478bd9Sstevel@tonic-gate rd_wr &= ~TRY_FLAG; 5907c478bd9Sstevel@tonic-gate ASSERT(rd_wr == READ_LOCK || rd_wr == WRITE_LOCK); 5917c478bd9Sstevel@tonic-gate 5927c478bd9Sstevel@tonic-gate if (!try_flag) { 5937c478bd9Sstevel@tonic-gate DTRACE_PROBE2(plockstat, rw__block, rwlp, rd_wr); 5947c478bd9Sstevel@tonic-gate } 5957c478bd9Sstevel@tonic-gate 5967c478bd9Sstevel@tonic-gate qp = queue_lock(rwlp, MX); 597d4204c85Sraf /* initial attempt to acquire the lock fails if there are waiters */ 598d4204c85Sraf ignore_waiters_flag = 0; 5997c478bd9Sstevel@tonic-gate while (error == 0) { 60041efec22Sraf if (rd_wr == READ_LOCK) { 601d4204c85Sraf if (read_lock_try(rwlp, ignore_waiters_flag)) 602d4204c85Sraf break; 60341efec22Sraf } else { 604d4204c85Sraf if (write_lock_try(rwlp, ignore_waiters_flag)) 605d4204c85Sraf break; 60641efec22Sraf } 607d4204c85Sraf /* subsequent attempts do not fail due to waiters */ 608d4204c85Sraf ignore_waiters_flag = 1; 60941efec22Sraf atomic_or_32(rwstate, URW_HAS_WAITERS); 61041efec22Sraf readers = *rwstate; 61141efec22Sraf ASSERT_CONSISTENT_STATE(readers); 61241efec22Sraf if ((readers & URW_WRITE_LOCKED) || 61341efec22Sraf (rd_wr == WRITE_LOCK && 61441efec22Sraf (readers & URW_READERS_MASK) != 0)) 6157c478bd9Sstevel@tonic-gate /* EMPTY */; /* somebody holds the lock */ 616d4204c85Sraf else if ((ulwp = queue_waiter(qp)) == NULL) { 61741efec22Sraf atomic_and_32(rwstate, ~URW_HAS_WAITERS); 618bbbbacb4SRoger A. Faulkner ignore_waiters_flag = 0; 619bbbbacb4SRoger A. Faulkner continue; /* no queued waiters, start over */ 6207c478bd9Sstevel@tonic-gate } else { 621d4204c85Sraf /* 622d4204c85Sraf * Do a priority check on the queued waiter (the 623d4204c85Sraf * highest priority thread on the queue) to see 624*48bbca81SDaniel Hoffman * if we should defer to it or just grab the lock. 625d4204c85Sraf */ 6267c478bd9Sstevel@tonic-gate int our_pri = real_priority(self); 6277c478bd9Sstevel@tonic-gate int his_pri = real_priority(ulwp); 6287c478bd9Sstevel@tonic-gate 6297c478bd9Sstevel@tonic-gate if (rd_wr == WRITE_LOCK) { 6307c478bd9Sstevel@tonic-gate /* 6317c478bd9Sstevel@tonic-gate * We defer to a queued thread that has 6327c478bd9Sstevel@tonic-gate * a higher priority than ours. 6337c478bd9Sstevel@tonic-gate */ 634bbbbacb4SRoger A. Faulkner if (his_pri <= our_pri) { 635bbbbacb4SRoger A. Faulkner /* 636bbbbacb4SRoger A. Faulkner * Don't defer, just grab the lock. 637bbbbacb4SRoger A. Faulkner */ 638bbbbacb4SRoger A. Faulkner continue; 639bbbbacb4SRoger A. Faulkner } 6407c478bd9Sstevel@tonic-gate } else { 6417c478bd9Sstevel@tonic-gate /* 6427c478bd9Sstevel@tonic-gate * We defer to a queued thread that has 6437c478bd9Sstevel@tonic-gate * a higher priority than ours or that 6447c478bd9Sstevel@tonic-gate * is a writer whose priority equals ours. 6457c478bd9Sstevel@tonic-gate */ 6467c478bd9Sstevel@tonic-gate if (his_pri < our_pri || 647bbbbacb4SRoger A. Faulkner (his_pri == our_pri && !ulwp->ul_writer)) { 648bbbbacb4SRoger A. Faulkner /* 649bbbbacb4SRoger A. Faulkner * Don't defer, just grab the lock. 650bbbbacb4SRoger A. Faulkner */ 651bbbbacb4SRoger A. Faulkner continue; 652bbbbacb4SRoger A. Faulkner } 6537c478bd9Sstevel@tonic-gate } 6547c478bd9Sstevel@tonic-gate } 6557c478bd9Sstevel@tonic-gate /* 6567c478bd9Sstevel@tonic-gate * We are about to block. 6577c478bd9Sstevel@tonic-gate * If we're doing a trylock, return EBUSY instead. 6587c478bd9Sstevel@tonic-gate */ 6597c478bd9Sstevel@tonic-gate if (try_flag) { 6607c478bd9Sstevel@tonic-gate error = EBUSY; 6617c478bd9Sstevel@tonic-gate break; 6627c478bd9Sstevel@tonic-gate } 6637c478bd9Sstevel@tonic-gate /* 664d4204c85Sraf * Enqueue writers ahead of readers. 6657c478bd9Sstevel@tonic-gate */ 6667c478bd9Sstevel@tonic-gate self->ul_writer = rd_wr; /* *must* be 0 or 1 */ 667d4204c85Sraf enqueue(qp, self, 0); 6687c478bd9Sstevel@tonic-gate set_parking_flag(self, 1); 6697c478bd9Sstevel@tonic-gate queue_unlock(qp); 6707c478bd9Sstevel@tonic-gate if ((error = __lwp_park(tsp, 0)) == EINTR) 671bbbbacb4SRoger A. Faulkner error = 0; 6727c478bd9Sstevel@tonic-gate set_parking_flag(self, 0); 6737c478bd9Sstevel@tonic-gate qp = queue_lock(rwlp, MX); 674bbbbacb4SRoger A. Faulkner if (self->ul_sleepq && dequeue_self(qp) == 0) { 67541efec22Sraf atomic_and_32(rwstate, ~URW_HAS_WAITERS); 676bbbbacb4SRoger A. Faulkner ignore_waiters_flag = 0; 6777c478bd9Sstevel@tonic-gate } 678bbbbacb4SRoger A. Faulkner self->ul_writer = 0; 679bbbbacb4SRoger A. Faulkner if (rd_wr == WRITE_LOCK && 680bbbbacb4SRoger A. Faulkner (*rwstate & URW_WRITE_LOCKED) && 681bbbbacb4SRoger A. Faulkner rwlp->rwlock_owner == (uintptr_t)self) { 682bbbbacb4SRoger A. Faulkner /* 683bbbbacb4SRoger A. Faulkner * We acquired the lock by hand-off 684bbbbacb4SRoger A. Faulkner * from the previous owner, 685bbbbacb4SRoger A. Faulkner */ 686bbbbacb4SRoger A. Faulkner error = 0; /* timedlock did not fail */ 687bbbbacb4SRoger A. Faulkner break; 688bbbbacb4SRoger A. Faulkner } 689bbbbacb4SRoger A. Faulkner } 690bbbbacb4SRoger A. Faulkner 691bbbbacb4SRoger A. Faulkner /* 692bbbbacb4SRoger A. Faulkner * Make one final check to see if there are any threads left 693bbbbacb4SRoger A. Faulkner * on the rwlock queue. Clear the URW_HAS_WAITERS flag if not. 694bbbbacb4SRoger A. Faulkner */ 695bbbbacb4SRoger A. Faulkner if (qp->qh_root == NULL || qp->qh_root->qr_head == NULL) 696bbbbacb4SRoger A. Faulkner atomic_and_32(rwstate, ~URW_HAS_WAITERS); 6977c478bd9Sstevel@tonic-gate 69841efec22Sraf queue_unlock(qp); 69941efec22Sraf 70041efec22Sraf if (!try_flag) { 70141efec22Sraf DTRACE_PROBE3(plockstat, rw__blocked, rwlp, rd_wr, error == 0); 70241efec22Sraf } 7037c478bd9Sstevel@tonic-gate 7047c478bd9Sstevel@tonic-gate return (error); 7057c478bd9Sstevel@tonic-gate } 7067c478bd9Sstevel@tonic-gate 7077c478bd9Sstevel@tonic-gate int 7087c478bd9Sstevel@tonic-gate rw_rdlock_impl(rwlock_t *rwlp, timespec_t *tsp) 7097c478bd9Sstevel@tonic-gate { 7107c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 7117c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 7127c478bd9Sstevel@tonic-gate readlock_t *readlockp; 7137c478bd9Sstevel@tonic-gate tdb_rwlock_stats_t *rwsp = RWLOCK_STATS(rwlp, udp); 7147c478bd9Sstevel@tonic-gate int error; 7157c478bd9Sstevel@tonic-gate 7167c478bd9Sstevel@tonic-gate /* 7177c478bd9Sstevel@tonic-gate * If we already hold a readers lock on this rwlock, 7187c478bd9Sstevel@tonic-gate * just increment our reference count and return. 7197c478bd9Sstevel@tonic-gate */ 72041efec22Sraf sigoff(self); 7217c478bd9Sstevel@tonic-gate readlockp = rwl_entry(rwlp); 7227c478bd9Sstevel@tonic-gate if (readlockp->rd_count != 0) { 72341efec22Sraf if (readlockp->rd_count == READ_LOCK_MAX) { 72441efec22Sraf sigon(self); 72541efec22Sraf error = EAGAIN; 72641efec22Sraf goto out; 7277c478bd9Sstevel@tonic-gate } 72841efec22Sraf sigon(self); 72941efec22Sraf error = 0; 73041efec22Sraf goto out; 73141efec22Sraf } 73241efec22Sraf sigon(self); 7337c478bd9Sstevel@tonic-gate 7347c478bd9Sstevel@tonic-gate /* 7357c478bd9Sstevel@tonic-gate * If we hold the writer lock, bail out. 7367c478bd9Sstevel@tonic-gate */ 7377257d1b4Sraf if (rw_write_held(rwlp)) { 7387c478bd9Sstevel@tonic-gate if (self->ul_error_detection) 7397c478bd9Sstevel@tonic-gate rwlock_error(rwlp, "rwlock_rdlock", 7407c478bd9Sstevel@tonic-gate "calling thread owns the writer lock"); 74141efec22Sraf error = EDEADLK; 74241efec22Sraf goto out; 7437c478bd9Sstevel@tonic-gate } 7447c478bd9Sstevel@tonic-gate 74541efec22Sraf if (read_lock_try(rwlp, 0)) 74641efec22Sraf error = 0; 74741efec22Sraf else if (rwlp->rwlock_type == USYNC_PROCESS) /* kernel-level */ 7487c478bd9Sstevel@tonic-gate error = shared_rwlock_lock(rwlp, tsp, READ_LOCK); 7497c478bd9Sstevel@tonic-gate else /* user-level */ 7507c478bd9Sstevel@tonic-gate error = rwlock_lock(rwlp, tsp, READ_LOCK); 7517c478bd9Sstevel@tonic-gate 75241efec22Sraf out: 7537c478bd9Sstevel@tonic-gate if (error == 0) { 75441efec22Sraf sigoff(self); 75541efec22Sraf rwl_entry(rwlp)->rd_count++; 75641efec22Sraf sigon(self); 7577c478bd9Sstevel@tonic-gate if (rwsp) 7587c478bd9Sstevel@tonic-gate tdb_incr(rwsp->rw_rdlock); 75941efec22Sraf DTRACE_PROBE2(plockstat, rw__acquire, rwlp, READ_LOCK); 76041efec22Sraf } else { 76141efec22Sraf DTRACE_PROBE3(plockstat, rw__error, rwlp, READ_LOCK, error); 7627c478bd9Sstevel@tonic-gate } 7637c478bd9Sstevel@tonic-gate 7647c478bd9Sstevel@tonic-gate return (error); 7657c478bd9Sstevel@tonic-gate } 7667c478bd9Sstevel@tonic-gate 7677257d1b4Sraf #pragma weak pthread_rwlock_rdlock = rw_rdlock 7687257d1b4Sraf #pragma weak _rw_rdlock = rw_rdlock 7697c478bd9Sstevel@tonic-gate int 7707257d1b4Sraf rw_rdlock(rwlock_t *rwlp) 7717c478bd9Sstevel@tonic-gate { 7727c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 7737c478bd9Sstevel@tonic-gate return (rw_rdlock_impl(rwlp, NULL)); 7747c478bd9Sstevel@tonic-gate } 7757c478bd9Sstevel@tonic-gate 7767c478bd9Sstevel@tonic-gate void 7777c478bd9Sstevel@tonic-gate lrw_rdlock(rwlock_t *rwlp) 7787c478bd9Sstevel@tonic-gate { 7797c478bd9Sstevel@tonic-gate enter_critical(curthread); 7807c478bd9Sstevel@tonic-gate (void) rw_rdlock_impl(rwlp, NULL); 7817c478bd9Sstevel@tonic-gate } 7827c478bd9Sstevel@tonic-gate 7837c478bd9Sstevel@tonic-gate int 7847257d1b4Sraf pthread_rwlock_reltimedrdlock_np(pthread_rwlock_t *_RESTRICT_KYWD rwlp, 7857257d1b4Sraf const struct timespec *_RESTRICT_KYWD reltime) 7867c478bd9Sstevel@tonic-gate { 7877c478bd9Sstevel@tonic-gate timespec_t tslocal = *reltime; 7887c478bd9Sstevel@tonic-gate int error; 7897c478bd9Sstevel@tonic-gate 7907c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 7917257d1b4Sraf error = rw_rdlock_impl((rwlock_t *)rwlp, &tslocal); 7927c478bd9Sstevel@tonic-gate if (error == ETIME) 7937c478bd9Sstevel@tonic-gate error = ETIMEDOUT; 7947c478bd9Sstevel@tonic-gate return (error); 7957c478bd9Sstevel@tonic-gate } 7967c478bd9Sstevel@tonic-gate 7977c478bd9Sstevel@tonic-gate int 7987257d1b4Sraf pthread_rwlock_timedrdlock(pthread_rwlock_t *_RESTRICT_KYWD rwlp, 7997257d1b4Sraf const struct timespec *_RESTRICT_KYWD abstime) 8007c478bd9Sstevel@tonic-gate { 8017c478bd9Sstevel@tonic-gate timespec_t tslocal; 8027c478bd9Sstevel@tonic-gate int error; 8037c478bd9Sstevel@tonic-gate 8047c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 8057c478bd9Sstevel@tonic-gate abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal); 8067257d1b4Sraf error = rw_rdlock_impl((rwlock_t *)rwlp, &tslocal); 8077c478bd9Sstevel@tonic-gate if (error == ETIME) 8087c478bd9Sstevel@tonic-gate error = ETIMEDOUT; 8097c478bd9Sstevel@tonic-gate return (error); 8107c478bd9Sstevel@tonic-gate } 8117c478bd9Sstevel@tonic-gate 8127c478bd9Sstevel@tonic-gate int 8137c478bd9Sstevel@tonic-gate rw_wrlock_impl(rwlock_t *rwlp, timespec_t *tsp) 8147c478bd9Sstevel@tonic-gate { 8157c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 8167c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 8177c478bd9Sstevel@tonic-gate tdb_rwlock_stats_t *rwsp = RWLOCK_STATS(rwlp, udp); 8187c478bd9Sstevel@tonic-gate int error; 8197c478bd9Sstevel@tonic-gate 8207c478bd9Sstevel@tonic-gate /* 8217c478bd9Sstevel@tonic-gate * If we hold a readers lock on this rwlock, bail out. 8227c478bd9Sstevel@tonic-gate */ 8237257d1b4Sraf if (rw_read_held(rwlp)) { 8247c478bd9Sstevel@tonic-gate if (self->ul_error_detection) 8257c478bd9Sstevel@tonic-gate rwlock_error(rwlp, "rwlock_wrlock", 8267c478bd9Sstevel@tonic-gate "calling thread owns the readers lock"); 82741efec22Sraf error = EDEADLK; 82841efec22Sraf goto out; 8297c478bd9Sstevel@tonic-gate } 8307c478bd9Sstevel@tonic-gate 8317c478bd9Sstevel@tonic-gate /* 8327c478bd9Sstevel@tonic-gate * If we hold the writer lock, bail out. 8337c478bd9Sstevel@tonic-gate */ 8347257d1b4Sraf if (rw_write_held(rwlp)) { 8357c478bd9Sstevel@tonic-gate if (self->ul_error_detection) 8367c478bd9Sstevel@tonic-gate rwlock_error(rwlp, "rwlock_wrlock", 8377c478bd9Sstevel@tonic-gate "calling thread owns the writer lock"); 83841efec22Sraf error = EDEADLK; 83941efec22Sraf goto out; 8407c478bd9Sstevel@tonic-gate } 8417c478bd9Sstevel@tonic-gate 84241efec22Sraf if (write_lock_try(rwlp, 0)) 84341efec22Sraf error = 0; 84441efec22Sraf else if (rwlp->rwlock_type == USYNC_PROCESS) /* kernel-level */ 8457c478bd9Sstevel@tonic-gate error = shared_rwlock_lock(rwlp, tsp, WRITE_LOCK); 84641efec22Sraf else /* user-level */ 8477c478bd9Sstevel@tonic-gate error = rwlock_lock(rwlp, tsp, WRITE_LOCK); 8487c478bd9Sstevel@tonic-gate 84941efec22Sraf out: 85041efec22Sraf if (error == 0) { 85141efec22Sraf rwlp->rwlock_owner = (uintptr_t)self; 85241efec22Sraf if (rwlp->rwlock_type == USYNC_PROCESS) 85341efec22Sraf rwlp->rwlock_ownerpid = udp->pid; 85441efec22Sraf if (rwsp) { 8557c478bd9Sstevel@tonic-gate tdb_incr(rwsp->rw_wrlock); 8567c478bd9Sstevel@tonic-gate rwsp->rw_wrlock_begin_hold = gethrtime(); 8577c478bd9Sstevel@tonic-gate } 85841efec22Sraf DTRACE_PROBE2(plockstat, rw__acquire, rwlp, WRITE_LOCK); 85941efec22Sraf } else { 86041efec22Sraf DTRACE_PROBE3(plockstat, rw__error, rwlp, WRITE_LOCK, error); 86141efec22Sraf } 8627c478bd9Sstevel@tonic-gate return (error); 8637c478bd9Sstevel@tonic-gate } 8647c478bd9Sstevel@tonic-gate 8657257d1b4Sraf #pragma weak pthread_rwlock_wrlock = rw_wrlock 8667257d1b4Sraf #pragma weak _rw_wrlock = rw_wrlock 8677c478bd9Sstevel@tonic-gate int 8687257d1b4Sraf rw_wrlock(rwlock_t *rwlp) 8697c478bd9Sstevel@tonic-gate { 8707c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 8717c478bd9Sstevel@tonic-gate return (rw_wrlock_impl(rwlp, NULL)); 8727c478bd9Sstevel@tonic-gate } 8737c478bd9Sstevel@tonic-gate 8747c478bd9Sstevel@tonic-gate void 8757c478bd9Sstevel@tonic-gate lrw_wrlock(rwlock_t *rwlp) 8767c478bd9Sstevel@tonic-gate { 8777c478bd9Sstevel@tonic-gate enter_critical(curthread); 8787c478bd9Sstevel@tonic-gate (void) rw_wrlock_impl(rwlp, NULL); 8797c478bd9Sstevel@tonic-gate } 8807c478bd9Sstevel@tonic-gate 8817c478bd9Sstevel@tonic-gate int 8827257d1b4Sraf pthread_rwlock_reltimedwrlock_np(pthread_rwlock_t *_RESTRICT_KYWD rwlp, 8837257d1b4Sraf const struct timespec *_RESTRICT_KYWD reltime) 8847c478bd9Sstevel@tonic-gate { 8857c478bd9Sstevel@tonic-gate timespec_t tslocal = *reltime; 8867c478bd9Sstevel@tonic-gate int error; 8877c478bd9Sstevel@tonic-gate 8887c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 8897257d1b4Sraf error = rw_wrlock_impl((rwlock_t *)rwlp, &tslocal); 8907c478bd9Sstevel@tonic-gate if (error == ETIME) 8917c478bd9Sstevel@tonic-gate error = ETIMEDOUT; 8927c478bd9Sstevel@tonic-gate return (error); 8937c478bd9Sstevel@tonic-gate } 8947c478bd9Sstevel@tonic-gate 8957c478bd9Sstevel@tonic-gate int 8967257d1b4Sraf pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlp, const timespec_t *abstime) 8977c478bd9Sstevel@tonic-gate { 8987c478bd9Sstevel@tonic-gate timespec_t tslocal; 8997c478bd9Sstevel@tonic-gate int error; 9007c478bd9Sstevel@tonic-gate 9017c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 9027c478bd9Sstevel@tonic-gate abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal); 9037257d1b4Sraf error = rw_wrlock_impl((rwlock_t *)rwlp, &tslocal); 9047c478bd9Sstevel@tonic-gate if (error == ETIME) 9057c478bd9Sstevel@tonic-gate error = ETIMEDOUT; 9067c478bd9Sstevel@tonic-gate return (error); 9077c478bd9Sstevel@tonic-gate } 9087c478bd9Sstevel@tonic-gate 9097257d1b4Sraf #pragma weak pthread_rwlock_tryrdlock = rw_tryrdlock 9107c478bd9Sstevel@tonic-gate int 9117257d1b4Sraf rw_tryrdlock(rwlock_t *rwlp) 9127c478bd9Sstevel@tonic-gate { 9137c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 9147c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 9157c478bd9Sstevel@tonic-gate tdb_rwlock_stats_t *rwsp = RWLOCK_STATS(rwlp, udp); 9167c478bd9Sstevel@tonic-gate readlock_t *readlockp; 9177c478bd9Sstevel@tonic-gate int error; 9187c478bd9Sstevel@tonic-gate 9197c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 9207c478bd9Sstevel@tonic-gate 9217c478bd9Sstevel@tonic-gate if (rwsp) 9227c478bd9Sstevel@tonic-gate tdb_incr(rwsp->rw_rdlock_try); 9237c478bd9Sstevel@tonic-gate 9247c478bd9Sstevel@tonic-gate /* 9257c478bd9Sstevel@tonic-gate * If we already hold a readers lock on this rwlock, 9267c478bd9Sstevel@tonic-gate * just increment our reference count and return. 9277c478bd9Sstevel@tonic-gate */ 92841efec22Sraf sigoff(self); 9297c478bd9Sstevel@tonic-gate readlockp = rwl_entry(rwlp); 9307c478bd9Sstevel@tonic-gate if (readlockp->rd_count != 0) { 93141efec22Sraf if (readlockp->rd_count == READ_LOCK_MAX) { 93241efec22Sraf sigon(self); 93341efec22Sraf error = EAGAIN; 93441efec22Sraf goto out; 9357c478bd9Sstevel@tonic-gate } 93641efec22Sraf sigon(self); 93741efec22Sraf error = 0; 93841efec22Sraf goto out; 93941efec22Sraf } 94041efec22Sraf sigon(self); 9417c478bd9Sstevel@tonic-gate 94241efec22Sraf if (read_lock_try(rwlp, 0)) 94341efec22Sraf error = 0; 94441efec22Sraf else if (rwlp->rwlock_type == USYNC_PROCESS) /* kernel-level */ 9457c478bd9Sstevel@tonic-gate error = shared_rwlock_lock(rwlp, NULL, READ_LOCK_TRY); 9467c478bd9Sstevel@tonic-gate else /* user-level */ 9477c478bd9Sstevel@tonic-gate error = rwlock_lock(rwlp, NULL, READ_LOCK_TRY); 9487c478bd9Sstevel@tonic-gate 94941efec22Sraf out: 95041efec22Sraf if (error == 0) { 95141efec22Sraf sigoff(self); 95241efec22Sraf rwl_entry(rwlp)->rd_count++; 95341efec22Sraf sigon(self); 95441efec22Sraf DTRACE_PROBE2(plockstat, rw__acquire, rwlp, READ_LOCK); 95541efec22Sraf } else { 95641efec22Sraf if (rwsp) 9577c478bd9Sstevel@tonic-gate tdb_incr(rwsp->rw_rdlock_try_fail); 95841efec22Sraf if (error != EBUSY) { 95941efec22Sraf DTRACE_PROBE3(plockstat, rw__error, rwlp, READ_LOCK, 96041efec22Sraf error); 96141efec22Sraf } 96241efec22Sraf } 9637c478bd9Sstevel@tonic-gate 9647c478bd9Sstevel@tonic-gate return (error); 9657c478bd9Sstevel@tonic-gate } 9667c478bd9Sstevel@tonic-gate 9677257d1b4Sraf #pragma weak pthread_rwlock_trywrlock = rw_trywrlock 9687c478bd9Sstevel@tonic-gate int 9697257d1b4Sraf rw_trywrlock(rwlock_t *rwlp) 9707c478bd9Sstevel@tonic-gate { 9717c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 9727c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 9737c478bd9Sstevel@tonic-gate tdb_rwlock_stats_t *rwsp = RWLOCK_STATS(rwlp, udp); 9747c478bd9Sstevel@tonic-gate int error; 9757c478bd9Sstevel@tonic-gate 97641efec22Sraf ASSERT(!self->ul_critical || self->ul_bindflags); 9777c478bd9Sstevel@tonic-gate 9787c478bd9Sstevel@tonic-gate if (rwsp) 9797c478bd9Sstevel@tonic-gate tdb_incr(rwsp->rw_wrlock_try); 9807c478bd9Sstevel@tonic-gate 98141efec22Sraf if (write_lock_try(rwlp, 0)) 98241efec22Sraf error = 0; 98341efec22Sraf else if (rwlp->rwlock_type == USYNC_PROCESS) /* kernel-level */ 9847c478bd9Sstevel@tonic-gate error = shared_rwlock_lock(rwlp, NULL, WRITE_LOCK_TRY); 98541efec22Sraf else /* user-level */ 9867c478bd9Sstevel@tonic-gate error = rwlock_lock(rwlp, NULL, WRITE_LOCK_TRY); 98741efec22Sraf 98841efec22Sraf if (error == 0) { 98941efec22Sraf rwlp->rwlock_owner = (uintptr_t)self; 99041efec22Sraf if (rwlp->rwlock_type == USYNC_PROCESS) 99141efec22Sraf rwlp->rwlock_ownerpid = udp->pid; 99241efec22Sraf if (rwsp) 9937c478bd9Sstevel@tonic-gate rwsp->rw_wrlock_begin_hold = gethrtime(); 99441efec22Sraf DTRACE_PROBE2(plockstat, rw__acquire, rwlp, WRITE_LOCK); 99541efec22Sraf } else { 99641efec22Sraf if (rwsp) 99741efec22Sraf tdb_incr(rwsp->rw_wrlock_try_fail); 99841efec22Sraf if (error != EBUSY) { 99941efec22Sraf DTRACE_PROBE3(plockstat, rw__error, rwlp, WRITE_LOCK, 100041efec22Sraf error); 100141efec22Sraf } 10027c478bd9Sstevel@tonic-gate } 10037c478bd9Sstevel@tonic-gate return (error); 10047c478bd9Sstevel@tonic-gate } 10057c478bd9Sstevel@tonic-gate 10067257d1b4Sraf #pragma weak pthread_rwlock_unlock = rw_unlock 10077257d1b4Sraf #pragma weak _rw_unlock = rw_unlock 10087c478bd9Sstevel@tonic-gate int 10097257d1b4Sraf rw_unlock(rwlock_t *rwlp) 10107c478bd9Sstevel@tonic-gate { 101141efec22Sraf volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers; 101241efec22Sraf uint32_t readers; 10137c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 10147c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 10157c478bd9Sstevel@tonic-gate tdb_rwlock_stats_t *rwsp; 101641efec22Sraf int rd_wr; 10177c478bd9Sstevel@tonic-gate 101841efec22Sraf readers = *rwstate; 101941efec22Sraf ASSERT_CONSISTENT_STATE(readers); 102041efec22Sraf if (readers & URW_WRITE_LOCKED) { 102141efec22Sraf rd_wr = WRITE_LOCK; 102241efec22Sraf readers = 0; 102341efec22Sraf } else { 102441efec22Sraf rd_wr = READ_LOCK; 102541efec22Sraf readers &= URW_READERS_MASK; 10267c478bd9Sstevel@tonic-gate } 10277c478bd9Sstevel@tonic-gate 102841efec22Sraf if (rd_wr == WRITE_LOCK) { 10297c478bd9Sstevel@tonic-gate /* 10307c478bd9Sstevel@tonic-gate * Since the writer lock is held, we'd better be 10317c478bd9Sstevel@tonic-gate * holding it, else we cannot legitimately be here. 10327c478bd9Sstevel@tonic-gate */ 10337257d1b4Sraf if (!rw_write_held(rwlp)) { 10347c478bd9Sstevel@tonic-gate if (self->ul_error_detection) 10357c478bd9Sstevel@tonic-gate rwlock_error(rwlp, "rwlock_unlock", 10367c478bd9Sstevel@tonic-gate "writer lock held, " 10377c478bd9Sstevel@tonic-gate "but not by the calling thread"); 10387c478bd9Sstevel@tonic-gate return (EPERM); 10397c478bd9Sstevel@tonic-gate } 10407c478bd9Sstevel@tonic-gate if ((rwsp = RWLOCK_STATS(rwlp, udp)) != NULL) { 10417c478bd9Sstevel@tonic-gate if (rwsp->rw_wrlock_begin_hold) 10427c478bd9Sstevel@tonic-gate rwsp->rw_wrlock_hold_time += 10437c478bd9Sstevel@tonic-gate gethrtime() - rwsp->rw_wrlock_begin_hold; 10447c478bd9Sstevel@tonic-gate rwsp->rw_wrlock_begin_hold = 0; 10457c478bd9Sstevel@tonic-gate } 104641efec22Sraf rwlp->rwlock_owner = 0; 104741efec22Sraf rwlp->rwlock_ownerpid = 0; 104841efec22Sraf } else if (readers > 0) { 10497c478bd9Sstevel@tonic-gate /* 10507c478bd9Sstevel@tonic-gate * A readers lock is held; if we don't hold one, bail out. 10517c478bd9Sstevel@tonic-gate */ 105241efec22Sraf readlock_t *readlockp; 105341efec22Sraf 105441efec22Sraf sigoff(self); 105541efec22Sraf readlockp = rwl_entry(rwlp); 10567c478bd9Sstevel@tonic-gate if (readlockp->rd_count == 0) { 105741efec22Sraf sigon(self); 10587c478bd9Sstevel@tonic-gate if (self->ul_error_detection) 10597c478bd9Sstevel@tonic-gate rwlock_error(rwlp, "rwlock_unlock", 10607c478bd9Sstevel@tonic-gate "readers lock held, " 10617c478bd9Sstevel@tonic-gate "but not by the calling thread"); 10627c478bd9Sstevel@tonic-gate return (EPERM); 10637c478bd9Sstevel@tonic-gate } 10647c478bd9Sstevel@tonic-gate /* 10657c478bd9Sstevel@tonic-gate * If we hold more than one readers lock on this rwlock, 10667c478bd9Sstevel@tonic-gate * just decrement our reference count and return. 10677c478bd9Sstevel@tonic-gate */ 10687c478bd9Sstevel@tonic-gate if (--readlockp->rd_count != 0) { 106941efec22Sraf sigon(self); 107041efec22Sraf goto out; 10717c478bd9Sstevel@tonic-gate } 107241efec22Sraf sigon(self); 10737c478bd9Sstevel@tonic-gate } else { 10747c478bd9Sstevel@tonic-gate /* 10757c478bd9Sstevel@tonic-gate * This is a usage error. 10767c478bd9Sstevel@tonic-gate * No thread should release an unowned lock. 10777c478bd9Sstevel@tonic-gate */ 10787c478bd9Sstevel@tonic-gate if (self->ul_error_detection) 10797c478bd9Sstevel@tonic-gate rwlock_error(rwlp, "rwlock_unlock", "lock not owned"); 10807c478bd9Sstevel@tonic-gate return (EPERM); 10817c478bd9Sstevel@tonic-gate } 10827c478bd9Sstevel@tonic-gate 108341efec22Sraf if (rd_wr == WRITE_LOCK && write_unlock_try(rwlp)) { 108441efec22Sraf /* EMPTY */; 108541efec22Sraf } else if (rd_wr == READ_LOCK && read_unlock_try(rwlp)) { 108641efec22Sraf /* EMPTY */; 108741efec22Sraf } else if (rwlp->rwlock_type == USYNC_PROCESS) { 10888cd45542Sraf (void) mutex_lock(&rwlp->mutex); 108941efec22Sraf (void) __lwp_rwlock_unlock(rwlp); 10908cd45542Sraf (void) mutex_unlock(&rwlp->mutex); 10917c478bd9Sstevel@tonic-gate } else { 1092bbbbacb4SRoger A. Faulkner rw_queue_release(rwlp); 10937c478bd9Sstevel@tonic-gate } 10947c478bd9Sstevel@tonic-gate 109541efec22Sraf out: 109641efec22Sraf DTRACE_PROBE2(plockstat, rw__release, rwlp, rd_wr); 10977c478bd9Sstevel@tonic-gate return (0); 10987c478bd9Sstevel@tonic-gate } 10997c478bd9Sstevel@tonic-gate 11007c478bd9Sstevel@tonic-gate void 11017c478bd9Sstevel@tonic-gate lrw_unlock(rwlock_t *rwlp) 11027c478bd9Sstevel@tonic-gate { 11037257d1b4Sraf (void) rw_unlock(rwlp); 11047c478bd9Sstevel@tonic-gate exit_critical(curthread); 11057c478bd9Sstevel@tonic-gate } 1106