1f18faf3fSek110237 /*
2f18faf3fSek110237 * CDDL HEADER START
3f18faf3fSek110237 *
4f18faf3fSek110237 * The contents of this file are subject to the terms of the
5f18faf3fSek110237 * Common Development and Distribution License (the "License").
6f18faf3fSek110237 * You may not use this file except in compliance with the License.
7f18faf3fSek110237 *
8f18faf3fSek110237 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9f18faf3fSek110237 * or http://www.opensolaris.org/os/licensing.
10f18faf3fSek110237 * See the License for the specific language governing permissions
11f18faf3fSek110237 * and limitations under the License.
12f18faf3fSek110237 *
13f18faf3fSek110237 * When distributing Covered Code, include this CDDL HEADER in each
14f18faf3fSek110237 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15f18faf3fSek110237 * If applicable, add the following below this CDDL HEADER, with the
16f18faf3fSek110237 * fields enclosed by brackets "[]" replaced with your own identifying
17f18faf3fSek110237 * information: Portions Copyright [yyyy] [name of copyright owner]
18f18faf3fSek110237 *
19f18faf3fSek110237 * CDDL HEADER END
20f18faf3fSek110237 */
21f18faf3fSek110237 /*
22d47621a4STim Haley * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23f18faf3fSek110237 * Use is subject to license terms.
24f18faf3fSek110237 */
254445fffbSMatthew Ahrens /*
264445fffbSMatthew Ahrens * Copyright (c) 2012 by Delphix. All rights reserved.
274445fffbSMatthew Ahrens */
28f18faf3fSek110237
29f18faf3fSek110237 #include <sys/refcount.h>
30f18faf3fSek110237 #include <sys/rrwlock.h>
31f18faf3fSek110237
32f18faf3fSek110237 /*
33f18faf3fSek110237 * This file contains the implementation of a re-entrant read
34f18faf3fSek110237 * reader/writer lock (aka "rrwlock").
35f18faf3fSek110237 *
36f18faf3fSek110237 * This is a normal reader/writer lock with the additional feature
37f18faf3fSek110237 * of allowing threads who have already obtained a read lock to
38f18faf3fSek110237 * re-enter another read lock (re-entrant read) - even if there are
39f18faf3fSek110237 * waiting writers.
40f18faf3fSek110237 *
41f18faf3fSek110237 * Callers who have not obtained a read lock give waiting writers priority.
42f18faf3fSek110237 *
43f18faf3fSek110237 * The rrwlock_t lock does not allow re-entrant writers, nor does it
44f18faf3fSek110237 * allow a re-entrant mix of reads and writes (that is, it does not
45f18faf3fSek110237 * allow a caller who has already obtained a read lock to be able to
46f18faf3fSek110237 * then grab a write lock without first dropping all read locks, and
47f18faf3fSek110237 * vice versa).
48f18faf3fSek110237 *
49f18faf3fSek110237 * The rrwlock_t uses tsd (thread specific data) to keep a list of
50f18faf3fSek110237 * nodes (rrw_node_t), where each node keeps track of which specific
51f18faf3fSek110237 * lock (rrw_node_t::rn_rrl) the thread has grabbed. Since re-entering
52f18faf3fSek110237 * should be rare, a thread that grabs multiple reads on the same rrwlock_t
53f18faf3fSek110237 * will store multiple rrw_node_ts of the same 'rrn_rrl'. Nodes on the
54f18faf3fSek110237 * tsd list can represent a different rrwlock_t. This allows a thread
55f18faf3fSek110237 * to enter multiple and unique rrwlock_ts for read locks at the same time.
56f18faf3fSek110237 *
57f18faf3fSek110237 * Since using tsd exposes some overhead, the rrwlock_t only needs to
58f18faf3fSek110237 * keep tsd data when writers are waiting. If no writers are waiting, then
59f18faf3fSek110237 * a reader just bumps the anonymous read count (rr_anon_rcount) - no tsd
60f18faf3fSek110237 * is needed. Once a writer attempts to grab the lock, readers then
61f18faf3fSek110237 * keep tsd data and bump the linked readers count (rr_linked_rcount).
62f18faf3fSek110237 *
63f18faf3fSek110237 * If there are waiting writers and there are anonymous readers, then a
64f18faf3fSek110237 * reader doesn't know if it is a re-entrant lock. But since it may be one,
65f18faf3fSek110237 * we allow the read to proceed (otherwise it could deadlock). Since once
66f18faf3fSek110237 * waiting writers are active, readers no longer bump the anonymous count,
67f18faf3fSek110237 * the anonymous readers will eventually flush themselves out. At this point,
68f18faf3fSek110237 * readers will be able to tell if they are a re-entrant lock (have a
69f18faf3fSek110237 * rrw_node_t entry for the lock) or not. If they are a re-entrant lock, then
70f18faf3fSek110237 * we must let the proceed. If they are not, then the reader blocks for the
71f18faf3fSek110237 * waiting writers. Hence, we do not starve writers.
72f18faf3fSek110237 */
73f18faf3fSek110237
74f18faf3fSek110237 /* global key for TSD */
75f18faf3fSek110237 uint_t rrw_tsd_key;
76f18faf3fSek110237
77f18faf3fSek110237 typedef struct rrw_node {
78f18faf3fSek110237 struct rrw_node *rn_next;
79f18faf3fSek110237 rrwlock_t *rn_rrl;
803b2aab18SMatthew Ahrens void *rn_tag;
81f18faf3fSek110237 } rrw_node_t;
82f18faf3fSek110237
83f18faf3fSek110237 static rrw_node_t *
rrn_find(rrwlock_t * rrl)84f18faf3fSek110237 rrn_find(rrwlock_t *rrl)
85f18faf3fSek110237 {
86f18faf3fSek110237 rrw_node_t *rn;
87f18faf3fSek110237
88f18faf3fSek110237 if (refcount_count(&rrl->rr_linked_rcount) == 0)
89f18faf3fSek110237 return (NULL);
90f18faf3fSek110237
91f18faf3fSek110237 for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
92f18faf3fSek110237 if (rn->rn_rrl == rrl)
93f18faf3fSek110237 return (rn);
94f18faf3fSek110237 }
95f18faf3fSek110237 return (NULL);
96f18faf3fSek110237 }
97f18faf3fSek110237
98f18faf3fSek110237 /*
99f18faf3fSek110237 * Add a node to the head of the singly linked list.
100f18faf3fSek110237 */
101f18faf3fSek110237 static void
rrn_add(rrwlock_t * rrl,void * tag)1023b2aab18SMatthew Ahrens rrn_add(rrwlock_t *rrl, void *tag)
103f18faf3fSek110237 {
104f18faf3fSek110237 rrw_node_t *rn;
105f18faf3fSek110237
106f18faf3fSek110237 rn = kmem_alloc(sizeof (*rn), KM_SLEEP);
107f18faf3fSek110237 rn->rn_rrl = rrl;
108f18faf3fSek110237 rn->rn_next = tsd_get(rrw_tsd_key);
1093b2aab18SMatthew Ahrens rn->rn_tag = tag;
110f18faf3fSek110237 VERIFY(tsd_set(rrw_tsd_key, rn) == 0);
111f18faf3fSek110237 }
112f18faf3fSek110237
113f18faf3fSek110237 /*
114f18faf3fSek110237 * If a node is found for 'rrl', then remove the node from this
115f18faf3fSek110237 * thread's list and return TRUE; otherwise return FALSE.
116f18faf3fSek110237 */
117f18faf3fSek110237 static boolean_t
rrn_find_and_remove(rrwlock_t * rrl,void * tag)1183b2aab18SMatthew Ahrens rrn_find_and_remove(rrwlock_t *rrl, void *tag)
119f18faf3fSek110237 {
120f18faf3fSek110237 rrw_node_t *rn;
121f18faf3fSek110237 rrw_node_t *prev = NULL;
122f18faf3fSek110237
123f18faf3fSek110237 if (refcount_count(&rrl->rr_linked_rcount) == 0)
124d47621a4STim Haley return (B_FALSE);
125f18faf3fSek110237
126f18faf3fSek110237 for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
1273b2aab18SMatthew Ahrens if (rn->rn_rrl == rrl && rn->rn_tag == tag) {
128f18faf3fSek110237 if (prev)
129f18faf3fSek110237 prev->rn_next = rn->rn_next;
130f18faf3fSek110237 else
131f18faf3fSek110237 VERIFY(tsd_set(rrw_tsd_key, rn->rn_next) == 0);
132f18faf3fSek110237 kmem_free(rn, sizeof (*rn));
133f18faf3fSek110237 return (B_TRUE);
134f18faf3fSek110237 }
135f18faf3fSek110237 prev = rn;
136f18faf3fSek110237 }
137f18faf3fSek110237 return (B_FALSE);
138f18faf3fSek110237 }
139f18faf3fSek110237
140f18faf3fSek110237 void
rrw_init(rrwlock_t * rrl,boolean_t track_all)1413b2aab18SMatthew Ahrens rrw_init(rrwlock_t *rrl, boolean_t track_all)
142f18faf3fSek110237 {
143f18faf3fSek110237 mutex_init(&rrl->rr_lock, NULL, MUTEX_DEFAULT, NULL);
144f18faf3fSek110237 cv_init(&rrl->rr_cv, NULL, CV_DEFAULT, NULL);
145f18faf3fSek110237 rrl->rr_writer = NULL;
146f18faf3fSek110237 refcount_create(&rrl->rr_anon_rcount);
147f18faf3fSek110237 refcount_create(&rrl->rr_linked_rcount);
148f18faf3fSek110237 rrl->rr_writer_wanted = B_FALSE;
1493b2aab18SMatthew Ahrens rrl->rr_track_all = track_all;
150f18faf3fSek110237 }
151f18faf3fSek110237
152f18faf3fSek110237 void
rrw_destroy(rrwlock_t * rrl)153f18faf3fSek110237 rrw_destroy(rrwlock_t *rrl)
154f18faf3fSek110237 {
155f18faf3fSek110237 mutex_destroy(&rrl->rr_lock);
156f18faf3fSek110237 cv_destroy(&rrl->rr_cv);
157f18faf3fSek110237 ASSERT(rrl->rr_writer == NULL);
158f18faf3fSek110237 refcount_destroy(&rrl->rr_anon_rcount);
159f18faf3fSek110237 refcount_destroy(&rrl->rr_linked_rcount);
160f18faf3fSek110237 }
161f18faf3fSek110237
162*8f72ee6dSArne Jansen static void
rrw_enter_read_impl(rrwlock_t * rrl,boolean_t prio,void * tag)163*8f72ee6dSArne Jansen rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
164f18faf3fSek110237 {
165f18faf3fSek110237 mutex_enter(&rrl->rr_lock);
166d47621a4STim Haley #if !defined(DEBUG) && defined(_KERNEL)
1673b2aab18SMatthew Ahrens if (rrl->rr_writer == NULL && !rrl->rr_writer_wanted &&
1683b2aab18SMatthew Ahrens !rrl->rr_track_all) {
169d47621a4STim Haley rrl->rr_anon_rcount.rc_count++;
170d47621a4STim Haley mutex_exit(&rrl->rr_lock);
171d47621a4STim Haley return;
172d47621a4STim Haley }
173d47621a4STim Haley DTRACE_PROBE(zfs__rrwfastpath__rdmiss);
174d47621a4STim Haley #endif
175f18faf3fSek110237 ASSERT(rrl->rr_writer != curthread);
176f18faf3fSek110237 ASSERT(refcount_count(&rrl->rr_anon_rcount) >= 0);
177f18faf3fSek110237
1783b2aab18SMatthew Ahrens while (rrl->rr_writer != NULL || (rrl->rr_writer_wanted &&
179*8f72ee6dSArne Jansen refcount_is_zero(&rrl->rr_anon_rcount) && !prio &&
180f18faf3fSek110237 rrn_find(rrl) == NULL))
181f18faf3fSek110237 cv_wait(&rrl->rr_cv, &rrl->rr_lock);
182f18faf3fSek110237
1833b2aab18SMatthew Ahrens if (rrl->rr_writer_wanted || rrl->rr_track_all) {
184f18faf3fSek110237 /* may or may not be a re-entrant enter */
1853b2aab18SMatthew Ahrens rrn_add(rrl, tag);
186f18faf3fSek110237 (void) refcount_add(&rrl->rr_linked_rcount, tag);
187f18faf3fSek110237 } else {
188f18faf3fSek110237 (void) refcount_add(&rrl->rr_anon_rcount, tag);
189f18faf3fSek110237 }
190f18faf3fSek110237 ASSERT(rrl->rr_writer == NULL);
191f18faf3fSek110237 mutex_exit(&rrl->rr_lock);
192f18faf3fSek110237 }
193f18faf3fSek110237
1943b2aab18SMatthew Ahrens void
rrw_enter_read(rrwlock_t * rrl,void * tag)195*8f72ee6dSArne Jansen rrw_enter_read(rrwlock_t *rrl, void *tag)
196*8f72ee6dSArne Jansen {
197*8f72ee6dSArne Jansen rrw_enter_read_impl(rrl, B_FALSE, tag);
198*8f72ee6dSArne Jansen }
199*8f72ee6dSArne Jansen
200*8f72ee6dSArne Jansen /*
201*8f72ee6dSArne Jansen * take a read lock even if there are pending write lock requests. if we want
202*8f72ee6dSArne Jansen * to take a lock reentrantly, but from different threads (that have a
203*8f72ee6dSArne Jansen * relationship to each other), the normal detection mechanism to overrule
204*8f72ee6dSArne Jansen * the pending writer does not work, so we have to give an explicit hint here.
205*8f72ee6dSArne Jansen */
206*8f72ee6dSArne Jansen void
rrw_enter_read_prio(rrwlock_t * rrl,void * tag)207*8f72ee6dSArne Jansen rrw_enter_read_prio(rrwlock_t *rrl, void *tag)
208*8f72ee6dSArne Jansen {
209*8f72ee6dSArne Jansen rrw_enter_read_impl(rrl, B_TRUE, tag);
210*8f72ee6dSArne Jansen }
211*8f72ee6dSArne Jansen
212*8f72ee6dSArne Jansen
213*8f72ee6dSArne Jansen void
rrw_enter_write(rrwlock_t * rrl)214f18faf3fSek110237 rrw_enter_write(rrwlock_t *rrl)
215f18faf3fSek110237 {
216f18faf3fSek110237 mutex_enter(&rrl->rr_lock);
217f18faf3fSek110237 ASSERT(rrl->rr_writer != curthread);
218f18faf3fSek110237
219f18faf3fSek110237 while (refcount_count(&rrl->rr_anon_rcount) > 0 ||
220f18faf3fSek110237 refcount_count(&rrl->rr_linked_rcount) > 0 ||
221f18faf3fSek110237 rrl->rr_writer != NULL) {
222f18faf3fSek110237 rrl->rr_writer_wanted = B_TRUE;
223f18faf3fSek110237 cv_wait(&rrl->rr_cv, &rrl->rr_lock);
224f18faf3fSek110237 }
225f18faf3fSek110237 rrl->rr_writer_wanted = B_FALSE;
226f18faf3fSek110237 rrl->rr_writer = curthread;
227f18faf3fSek110237 mutex_exit(&rrl->rr_lock);
228f18faf3fSek110237 }
229f18faf3fSek110237
230f18faf3fSek110237 void
rrw_enter(rrwlock_t * rrl,krw_t rw,void * tag)231f18faf3fSek110237 rrw_enter(rrwlock_t *rrl, krw_t rw, void *tag)
232f18faf3fSek110237 {
233f18faf3fSek110237 if (rw == RW_READER)
234f18faf3fSek110237 rrw_enter_read(rrl, tag);
235f18faf3fSek110237 else
236f18faf3fSek110237 rrw_enter_write(rrl);
237f18faf3fSek110237 }
238f18faf3fSek110237
239f18faf3fSek110237 void
rrw_exit(rrwlock_t * rrl,void * tag)240f18faf3fSek110237 rrw_exit(rrwlock_t *rrl, void *tag)
241f18faf3fSek110237 {
242f18faf3fSek110237 mutex_enter(&rrl->rr_lock);
243d47621a4STim Haley #if !defined(DEBUG) && defined(_KERNEL)
244d47621a4STim Haley if (!rrl->rr_writer && rrl->rr_linked_rcount.rc_count == 0) {
245d47621a4STim Haley rrl->rr_anon_rcount.rc_count--;
246d47621a4STim Haley if (rrl->rr_anon_rcount.rc_count == 0)
247d47621a4STim Haley cv_broadcast(&rrl->rr_cv);
248d47621a4STim Haley mutex_exit(&rrl->rr_lock);
249d47621a4STim Haley return;
250d47621a4STim Haley }
251d47621a4STim Haley DTRACE_PROBE(zfs__rrwfastpath__exitmiss);
252d47621a4STim Haley #endif
253f18faf3fSek110237 ASSERT(!refcount_is_zero(&rrl->rr_anon_rcount) ||
254f18faf3fSek110237 !refcount_is_zero(&rrl->rr_linked_rcount) ||
255f18faf3fSek110237 rrl->rr_writer != NULL);
256f18faf3fSek110237
257f18faf3fSek110237 if (rrl->rr_writer == NULL) {
258d47621a4STim Haley int64_t count;
2593b2aab18SMatthew Ahrens if (rrn_find_and_remove(rrl, tag)) {
260d47621a4STim Haley count = refcount_remove(&rrl->rr_linked_rcount, tag);
2613b2aab18SMatthew Ahrens } else {
2623b2aab18SMatthew Ahrens ASSERT(!rrl->rr_track_all);
263d47621a4STim Haley count = refcount_remove(&rrl->rr_anon_rcount, tag);
2643b2aab18SMatthew Ahrens }
265d47621a4STim Haley if (count == 0)
266f18faf3fSek110237 cv_broadcast(&rrl->rr_cv);
267f18faf3fSek110237 } else {
268f18faf3fSek110237 ASSERT(rrl->rr_writer == curthread);
269f18faf3fSek110237 ASSERT(refcount_is_zero(&rrl->rr_anon_rcount) &&
270f18faf3fSek110237 refcount_is_zero(&rrl->rr_linked_rcount));
271f18faf3fSek110237 rrl->rr_writer = NULL;
272f18faf3fSek110237 cv_broadcast(&rrl->rr_cv);
273f18faf3fSek110237 }
274f18faf3fSek110237 mutex_exit(&rrl->rr_lock);
275f18faf3fSek110237 }
276f18faf3fSek110237
2773b2aab18SMatthew Ahrens /*
2783b2aab18SMatthew Ahrens * If the lock was created with track_all, rrw_held(RW_READER) will return
2793b2aab18SMatthew Ahrens * B_TRUE iff the current thread has the lock for reader. Otherwise it may
2803b2aab18SMatthew Ahrens * return B_TRUE if any thread has the lock for reader.
2813b2aab18SMatthew Ahrens */
282f18faf3fSek110237 boolean_t
rrw_held(rrwlock_t * rrl,krw_t rw)283f18faf3fSek110237 rrw_held(rrwlock_t *rrl, krw_t rw)
284f18faf3fSek110237 {
285f18faf3fSek110237 boolean_t held;
286f18faf3fSek110237
287f18faf3fSek110237 mutex_enter(&rrl->rr_lock);
288f18faf3fSek110237 if (rw == RW_WRITER) {
289f18faf3fSek110237 held = (rrl->rr_writer == curthread);
290f18faf3fSek110237 } else {
291f18faf3fSek110237 held = (!refcount_is_zero(&rrl->rr_anon_rcount) ||
2923b2aab18SMatthew Ahrens rrn_find(rrl) != NULL);
293f18faf3fSek110237 }
294f18faf3fSek110237 mutex_exit(&rrl->rr_lock);
295f18faf3fSek110237
296f18faf3fSek110237 return (held);
297f18faf3fSek110237 }
2984445fffbSMatthew Ahrens
2994445fffbSMatthew Ahrens void
rrw_tsd_destroy(void * arg)3004445fffbSMatthew Ahrens rrw_tsd_destroy(void *arg)
3014445fffbSMatthew Ahrens {
3024445fffbSMatthew Ahrens rrw_node_t *rn = arg;
3034445fffbSMatthew Ahrens if (rn != NULL) {
3044445fffbSMatthew Ahrens panic("thread %p terminating with rrw lock %p held",
3054445fffbSMatthew Ahrens (void *)curthread, (void *)rn->rn_rrl);
3064445fffbSMatthew Ahrens }
3074445fffbSMatthew Ahrens }
30840d689f8SAlexander Motin
30940d689f8SAlexander Motin /*
31040d689f8SAlexander Motin * A reader-mostly lock implementation, tuning above reader-writer locks
31140d689f8SAlexander Motin * for hightly parallel read acquisitions, while pessimizing writes.
31240d689f8SAlexander Motin *
31340d689f8SAlexander Motin * The idea is to split single busy lock into array of locks, so that
31440d689f8SAlexander Motin * each reader can lock only one of them for read, depending on result
31540d689f8SAlexander Motin * of simple hash function. That proportionally reduces lock congestion.
31640d689f8SAlexander Motin * Writer same time has to sequentially aquire write on all the locks.
31740d689f8SAlexander Motin * That makes write aquisition proportionally slower, but in places where
31840d689f8SAlexander Motin * it is used (filesystem unmount) performance is not critical.
31940d689f8SAlexander Motin *
32040d689f8SAlexander Motin * All the functions below are direct wrappers around functions above.
32140d689f8SAlexander Motin */
32240d689f8SAlexander Motin void
rrm_init(rrmlock_t * rrl,boolean_t track_all)32340d689f8SAlexander Motin rrm_init(rrmlock_t *rrl, boolean_t track_all)
32440d689f8SAlexander Motin {
32540d689f8SAlexander Motin int i;
32640d689f8SAlexander Motin
32740d689f8SAlexander Motin for (i = 0; i < RRM_NUM_LOCKS; i++)
32840d689f8SAlexander Motin rrw_init(&rrl->locks[i], track_all);
32940d689f8SAlexander Motin }
33040d689f8SAlexander Motin
33140d689f8SAlexander Motin void
rrm_destroy(rrmlock_t * rrl)33240d689f8SAlexander Motin rrm_destroy(rrmlock_t *rrl)
33340d689f8SAlexander Motin {
33440d689f8SAlexander Motin int i;
33540d689f8SAlexander Motin
33640d689f8SAlexander Motin for (i = 0; i < RRM_NUM_LOCKS; i++)
33740d689f8SAlexander Motin rrw_destroy(&rrl->locks[i]);
33840d689f8SAlexander Motin }
33940d689f8SAlexander Motin
34040d689f8SAlexander Motin void
rrm_enter(rrmlock_t * rrl,krw_t rw,void * tag)34140d689f8SAlexander Motin rrm_enter(rrmlock_t *rrl, krw_t rw, void *tag)
34240d689f8SAlexander Motin {
34340d689f8SAlexander Motin if (rw == RW_READER)
34440d689f8SAlexander Motin rrm_enter_read(rrl, tag);
34540d689f8SAlexander Motin else
34640d689f8SAlexander Motin rrm_enter_write(rrl);
34740d689f8SAlexander Motin }
34840d689f8SAlexander Motin
34940d689f8SAlexander Motin /*
35040d689f8SAlexander Motin * This maps the current thread to a specific lock. Note that the lock
35140d689f8SAlexander Motin * must be released by the same thread that acquired it. We do this
35240d689f8SAlexander Motin * mapping by taking the thread pointer mod a prime number. We examine
35340d689f8SAlexander Motin * only the low 32 bits of the thread pointer, because 32-bit division
35440d689f8SAlexander Motin * is faster than 64-bit division, and the high 32 bits have little
35540d689f8SAlexander Motin * entropy anyway.
35640d689f8SAlexander Motin */
35740d689f8SAlexander Motin #define RRM_TD_LOCK() (((uint32_t)(uintptr_t)(curthread)) % RRM_NUM_LOCKS)
35840d689f8SAlexander Motin
35940d689f8SAlexander Motin void
rrm_enter_read(rrmlock_t * rrl,void * tag)36040d689f8SAlexander Motin rrm_enter_read(rrmlock_t *rrl, void *tag)
36140d689f8SAlexander Motin {
36240d689f8SAlexander Motin rrw_enter_read(&rrl->locks[RRM_TD_LOCK()], tag);
36340d689f8SAlexander Motin }
36440d689f8SAlexander Motin
36540d689f8SAlexander Motin void
rrm_enter_write(rrmlock_t * rrl)36640d689f8SAlexander Motin rrm_enter_write(rrmlock_t *rrl)
36740d689f8SAlexander Motin {
36840d689f8SAlexander Motin int i;
36940d689f8SAlexander Motin
37040d689f8SAlexander Motin for (i = 0; i < RRM_NUM_LOCKS; i++)
37140d689f8SAlexander Motin rrw_enter_write(&rrl->locks[i]);
37240d689f8SAlexander Motin }
37340d689f8SAlexander Motin
37440d689f8SAlexander Motin void
rrm_exit(rrmlock_t * rrl,void * tag)37540d689f8SAlexander Motin rrm_exit(rrmlock_t *rrl, void *tag)
37640d689f8SAlexander Motin {
37740d689f8SAlexander Motin int i;
37840d689f8SAlexander Motin
37940d689f8SAlexander Motin if (rrl->locks[0].rr_writer == curthread) {
38040d689f8SAlexander Motin for (i = 0; i < RRM_NUM_LOCKS; i++)
38140d689f8SAlexander Motin rrw_exit(&rrl->locks[i], tag);
38240d689f8SAlexander Motin } else {
38340d689f8SAlexander Motin rrw_exit(&rrl->locks[RRM_TD_LOCK()], tag);
38440d689f8SAlexander Motin }
38540d689f8SAlexander Motin }
38640d689f8SAlexander Motin
38740d689f8SAlexander Motin boolean_t
rrm_held(rrmlock_t * rrl,krw_t rw)38840d689f8SAlexander Motin rrm_held(rrmlock_t *rrl, krw_t rw)
38940d689f8SAlexander Motin {
39040d689f8SAlexander Motin if (rw == RW_WRITER) {
39140d689f8SAlexander Motin return (rrw_held(&rrl->locks[0], rw));
39240d689f8SAlexander Motin } else {
39340d689f8SAlexander Motin return (rrw_held(&rrl->locks[RRM_TD_LOCK()], rw));
39440d689f8SAlexander Motin }
39540d689f8SAlexander Motin }
396