1eda14cbcSMatt Macy /*
2eda14cbcSMatt Macy * CDDL HEADER START
3eda14cbcSMatt Macy *
4eda14cbcSMatt Macy * The contents of this file are subject to the terms of the
5eda14cbcSMatt Macy * Common Development and Distribution License (the "License").
6eda14cbcSMatt Macy * You may not use this file except in compliance with the License.
7eda14cbcSMatt Macy *
8eda14cbcSMatt Macy * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*271171e0SMartin Matuska * or https://opensource.org/licenses/CDDL-1.0.
10eda14cbcSMatt Macy * See the License for the specific language governing permissions
11eda14cbcSMatt Macy * and limitations under the License.
12eda14cbcSMatt Macy *
13eda14cbcSMatt Macy * When distributing Covered Code, include this CDDL HEADER in each
14eda14cbcSMatt Macy * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15eda14cbcSMatt Macy * If applicable, add the following below this CDDL HEADER, with the
16eda14cbcSMatt Macy * fields enclosed by brackets "[]" replaced with your own identifying
17eda14cbcSMatt Macy * information: Portions Copyright [yyyy] [name of copyright owner]
18eda14cbcSMatt Macy *
19eda14cbcSMatt Macy * CDDL HEADER END
20eda14cbcSMatt Macy */
21eda14cbcSMatt Macy /*
22eda14cbcSMatt Macy * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23eda14cbcSMatt Macy * Use is subject to license terms.
24eda14cbcSMatt Macy */
25eda14cbcSMatt Macy /*
26eda14cbcSMatt Macy * Copyright (c) 2012 by Delphix. All rights reserved.
27eda14cbcSMatt Macy */
28eda14cbcSMatt Macy
29eda14cbcSMatt Macy #include <sys/rrwlock.h>
30eda14cbcSMatt Macy #include <sys/trace_zfs.h>
31eda14cbcSMatt Macy
32eda14cbcSMatt Macy /*
33eda14cbcSMatt Macy * This file contains the implementation of a re-entrant read
34eda14cbcSMatt Macy * reader/writer lock (aka "rrwlock").
35eda14cbcSMatt Macy *
36eda14cbcSMatt Macy * This is a normal reader/writer lock with the additional feature
37eda14cbcSMatt Macy * of allowing threads who have already obtained a read lock to
38eda14cbcSMatt Macy * re-enter another read lock (re-entrant read) - even if there are
39eda14cbcSMatt Macy * waiting writers.
40eda14cbcSMatt Macy *
41eda14cbcSMatt Macy * Callers who have not obtained a read lock give waiting writers priority.
42eda14cbcSMatt Macy *
43eda14cbcSMatt Macy * The rrwlock_t lock does not allow re-entrant writers, nor does it
44eda14cbcSMatt Macy * allow a re-entrant mix of reads and writes (that is, it does not
45eda14cbcSMatt Macy * allow a caller who has already obtained a read lock to be able to
46eda14cbcSMatt Macy * then grab a write lock without first dropping all read locks, and
47eda14cbcSMatt Macy * vice versa).
48eda14cbcSMatt Macy *
49eda14cbcSMatt Macy * The rrwlock_t uses tsd (thread specific data) to keep a list of
50eda14cbcSMatt Macy * nodes (rrw_node_t), where each node keeps track of which specific
51eda14cbcSMatt Macy * lock (rrw_node_t::rn_rrl) the thread has grabbed. Since re-entering
52eda14cbcSMatt Macy * should be rare, a thread that grabs multiple reads on the same rrwlock_t
53eda14cbcSMatt Macy * will store multiple rrw_node_ts of the same 'rrn_rrl'. Nodes on the
54eda14cbcSMatt Macy * tsd list can represent a different rrwlock_t. This allows a thread
55eda14cbcSMatt Macy * to enter multiple and unique rrwlock_ts for read locks at the same time.
56eda14cbcSMatt Macy *
57eda14cbcSMatt Macy * Since using tsd exposes some overhead, the rrwlock_t only needs to
58eda14cbcSMatt Macy * keep tsd data when writers are waiting. If no writers are waiting, then
59eda14cbcSMatt Macy * a reader just bumps the anonymous read count (rr_anon_rcount) - no tsd
60eda14cbcSMatt Macy * is needed. Once a writer attempts to grab the lock, readers then
61eda14cbcSMatt Macy * keep tsd data and bump the linked readers count (rr_linked_rcount).
62eda14cbcSMatt Macy *
63eda14cbcSMatt Macy * If there are waiting writers and there are anonymous readers, then a
64eda14cbcSMatt Macy * reader doesn't know if it is a re-entrant lock. But since it may be one,
65eda14cbcSMatt Macy * we allow the read to proceed (otherwise it could deadlock). Since once
66eda14cbcSMatt Macy * waiting writers are active, readers no longer bump the anonymous count,
67eda14cbcSMatt Macy * the anonymous readers will eventually flush themselves out. At this point,
68eda14cbcSMatt Macy * readers will be able to tell if they are a re-entrant lock (have a
69eda14cbcSMatt Macy * rrw_node_t entry for the lock) or not. If they are a re-entrant lock, then
70eda14cbcSMatt Macy * we must let the proceed. If they are not, then the reader blocks for the
71eda14cbcSMatt Macy * waiting writers. Hence, we do not starve writers.
72eda14cbcSMatt Macy */
73eda14cbcSMatt Macy
74eda14cbcSMatt Macy /* global key for TSD */
75eda14cbcSMatt Macy uint_t rrw_tsd_key;
76eda14cbcSMatt Macy
77eda14cbcSMatt Macy typedef struct rrw_node {
78eda14cbcSMatt Macy struct rrw_node *rn_next;
79eda14cbcSMatt Macy rrwlock_t *rn_rrl;
80a0b956f5SMartin Matuska const void *rn_tag;
81eda14cbcSMatt Macy } rrw_node_t;
82eda14cbcSMatt Macy
83eda14cbcSMatt Macy static rrw_node_t *
rrn_find(rrwlock_t * rrl)84eda14cbcSMatt Macy rrn_find(rrwlock_t *rrl)
85eda14cbcSMatt Macy {
86eda14cbcSMatt Macy rrw_node_t *rn;
87eda14cbcSMatt Macy
88eda14cbcSMatt Macy if (zfs_refcount_count(&rrl->rr_linked_rcount) == 0)
89eda14cbcSMatt Macy return (NULL);
90eda14cbcSMatt Macy
91eda14cbcSMatt Macy for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
92eda14cbcSMatt Macy if (rn->rn_rrl == rrl)
93eda14cbcSMatt Macy return (rn);
94eda14cbcSMatt Macy }
95eda14cbcSMatt Macy return (NULL);
96eda14cbcSMatt Macy }
97eda14cbcSMatt Macy
98eda14cbcSMatt Macy /*
99eda14cbcSMatt Macy * Add a node to the head of the singly linked list.
100eda14cbcSMatt Macy */
101eda14cbcSMatt Macy static void
rrn_add(rrwlock_t * rrl,const void * tag)102a0b956f5SMartin Matuska rrn_add(rrwlock_t *rrl, const void *tag)
103eda14cbcSMatt Macy {
104eda14cbcSMatt Macy rrw_node_t *rn;
105eda14cbcSMatt Macy
106eda14cbcSMatt Macy rn = kmem_alloc(sizeof (*rn), KM_SLEEP);
107eda14cbcSMatt Macy rn->rn_rrl = rrl;
108eda14cbcSMatt Macy rn->rn_next = tsd_get(rrw_tsd_key);
109eda14cbcSMatt Macy rn->rn_tag = tag;
110eda14cbcSMatt Macy VERIFY(tsd_set(rrw_tsd_key, rn) == 0);
111eda14cbcSMatt Macy }
112eda14cbcSMatt Macy
113eda14cbcSMatt Macy /*
114eda14cbcSMatt Macy * If a node is found for 'rrl', then remove the node from this
115eda14cbcSMatt Macy * thread's list and return TRUE; otherwise return FALSE.
116eda14cbcSMatt Macy */
117eda14cbcSMatt Macy static boolean_t
rrn_find_and_remove(rrwlock_t * rrl,const void * tag)118a0b956f5SMartin Matuska rrn_find_and_remove(rrwlock_t *rrl, const void *tag)
119eda14cbcSMatt Macy {
120eda14cbcSMatt Macy rrw_node_t *rn;
121eda14cbcSMatt Macy rrw_node_t *prev = NULL;
122eda14cbcSMatt Macy
123eda14cbcSMatt Macy if (zfs_refcount_count(&rrl->rr_linked_rcount) == 0)
124eda14cbcSMatt Macy return (B_FALSE);
125eda14cbcSMatt Macy
126eda14cbcSMatt Macy for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
127eda14cbcSMatt Macy if (rn->rn_rrl == rrl && rn->rn_tag == tag) {
128eda14cbcSMatt Macy if (prev)
129eda14cbcSMatt Macy prev->rn_next = rn->rn_next;
130eda14cbcSMatt Macy else
131eda14cbcSMatt Macy VERIFY(tsd_set(rrw_tsd_key, rn->rn_next) == 0);
132eda14cbcSMatt Macy kmem_free(rn, sizeof (*rn));
133eda14cbcSMatt Macy return (B_TRUE);
134eda14cbcSMatt Macy }
135eda14cbcSMatt Macy prev = rn;
136eda14cbcSMatt Macy }
137eda14cbcSMatt Macy return (B_FALSE);
138eda14cbcSMatt Macy }
139eda14cbcSMatt Macy
140eda14cbcSMatt Macy void
rrw_init(rrwlock_t * rrl,boolean_t track_all)141eda14cbcSMatt Macy rrw_init(rrwlock_t *rrl, boolean_t track_all)
142eda14cbcSMatt Macy {
143eda14cbcSMatt Macy mutex_init(&rrl->rr_lock, NULL, MUTEX_DEFAULT, NULL);
144eda14cbcSMatt Macy cv_init(&rrl->rr_cv, NULL, CV_DEFAULT, NULL);
145eda14cbcSMatt Macy rrl->rr_writer = NULL;
146eda14cbcSMatt Macy zfs_refcount_create(&rrl->rr_anon_rcount);
147eda14cbcSMatt Macy zfs_refcount_create(&rrl->rr_linked_rcount);
148eda14cbcSMatt Macy rrl->rr_writer_wanted = B_FALSE;
149eda14cbcSMatt Macy rrl->rr_track_all = track_all;
150eda14cbcSMatt Macy }
151eda14cbcSMatt Macy
152eda14cbcSMatt Macy void
rrw_destroy(rrwlock_t * rrl)153eda14cbcSMatt Macy rrw_destroy(rrwlock_t *rrl)
154eda14cbcSMatt Macy {
155eda14cbcSMatt Macy mutex_destroy(&rrl->rr_lock);
156eda14cbcSMatt Macy cv_destroy(&rrl->rr_cv);
157eda14cbcSMatt Macy ASSERT(rrl->rr_writer == NULL);
158eda14cbcSMatt Macy zfs_refcount_destroy(&rrl->rr_anon_rcount);
159eda14cbcSMatt Macy zfs_refcount_destroy(&rrl->rr_linked_rcount);
160eda14cbcSMatt Macy }
161eda14cbcSMatt Macy
162eda14cbcSMatt Macy static void
rrw_enter_read_impl(rrwlock_t * rrl,boolean_t prio,const void * tag)163a0b956f5SMartin Matuska rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, const void *tag)
164eda14cbcSMatt Macy {
165eda14cbcSMatt Macy mutex_enter(&rrl->rr_lock);
166eda14cbcSMatt Macy #if !defined(ZFS_DEBUG) && defined(_KERNEL)
167eda14cbcSMatt Macy if (rrl->rr_writer == NULL && !rrl->rr_writer_wanted &&
168eda14cbcSMatt Macy !rrl->rr_track_all) {
169eda14cbcSMatt Macy rrl->rr_anon_rcount.rc_count++;
170eda14cbcSMatt Macy mutex_exit(&rrl->rr_lock);
171eda14cbcSMatt Macy return;
172eda14cbcSMatt Macy }
173eda14cbcSMatt Macy DTRACE_PROBE(zfs__rrwfastpath__rdmiss);
174eda14cbcSMatt Macy #endif
175eda14cbcSMatt Macy ASSERT(rrl->rr_writer != curthread);
176eda14cbcSMatt Macy ASSERT(zfs_refcount_count(&rrl->rr_anon_rcount) >= 0);
177eda14cbcSMatt Macy
178eda14cbcSMatt Macy while (rrl->rr_writer != NULL || (rrl->rr_writer_wanted &&
179eda14cbcSMatt Macy zfs_refcount_is_zero(&rrl->rr_anon_rcount) && !prio &&
180eda14cbcSMatt Macy rrn_find(rrl) == NULL))
181eda14cbcSMatt Macy cv_wait(&rrl->rr_cv, &rrl->rr_lock);
182eda14cbcSMatt Macy
183eda14cbcSMatt Macy if (rrl->rr_writer_wanted || rrl->rr_track_all) {
184eda14cbcSMatt Macy /* may or may not be a re-entrant enter */
185eda14cbcSMatt Macy rrn_add(rrl, tag);
186eda14cbcSMatt Macy (void) zfs_refcount_add(&rrl->rr_linked_rcount, tag);
187eda14cbcSMatt Macy } else {
188eda14cbcSMatt Macy (void) zfs_refcount_add(&rrl->rr_anon_rcount, tag);
189eda14cbcSMatt Macy }
190eda14cbcSMatt Macy ASSERT(rrl->rr_writer == NULL);
191eda14cbcSMatt Macy mutex_exit(&rrl->rr_lock);
192eda14cbcSMatt Macy }
193eda14cbcSMatt Macy
194eda14cbcSMatt Macy void
rrw_enter_read(rrwlock_t * rrl,const void * tag)195a0b956f5SMartin Matuska rrw_enter_read(rrwlock_t *rrl, const void *tag)
196eda14cbcSMatt Macy {
197eda14cbcSMatt Macy rrw_enter_read_impl(rrl, B_FALSE, tag);
198eda14cbcSMatt Macy }
199eda14cbcSMatt Macy
200eda14cbcSMatt Macy /*
201eda14cbcSMatt Macy * take a read lock even if there are pending write lock requests. if we want
202eda14cbcSMatt Macy * to take a lock reentrantly, but from different threads (that have a
203eda14cbcSMatt Macy * relationship to each other), the normal detection mechanism to overrule
204eda14cbcSMatt Macy * the pending writer does not work, so we have to give an explicit hint here.
205eda14cbcSMatt Macy */
206eda14cbcSMatt Macy void
rrw_enter_read_prio(rrwlock_t * rrl,const void * tag)207a0b956f5SMartin Matuska rrw_enter_read_prio(rrwlock_t *rrl, const void *tag)
208eda14cbcSMatt Macy {
209eda14cbcSMatt Macy rrw_enter_read_impl(rrl, B_TRUE, tag);
210eda14cbcSMatt Macy }
211eda14cbcSMatt Macy
212eda14cbcSMatt Macy
213eda14cbcSMatt Macy void
rrw_enter_write(rrwlock_t * rrl)214eda14cbcSMatt Macy rrw_enter_write(rrwlock_t *rrl)
215eda14cbcSMatt Macy {
216eda14cbcSMatt Macy mutex_enter(&rrl->rr_lock);
217eda14cbcSMatt Macy ASSERT(rrl->rr_writer != curthread);
218eda14cbcSMatt Macy
219eda14cbcSMatt Macy while (zfs_refcount_count(&rrl->rr_anon_rcount) > 0 ||
220eda14cbcSMatt Macy zfs_refcount_count(&rrl->rr_linked_rcount) > 0 ||
221eda14cbcSMatt Macy rrl->rr_writer != NULL) {
222eda14cbcSMatt Macy rrl->rr_writer_wanted = B_TRUE;
223eda14cbcSMatt Macy cv_wait(&rrl->rr_cv, &rrl->rr_lock);
224eda14cbcSMatt Macy }
225eda14cbcSMatt Macy rrl->rr_writer_wanted = B_FALSE;
226eda14cbcSMatt Macy rrl->rr_writer = curthread;
227eda14cbcSMatt Macy mutex_exit(&rrl->rr_lock);
228eda14cbcSMatt Macy }
229eda14cbcSMatt Macy
230eda14cbcSMatt Macy void
rrw_enter(rrwlock_t * rrl,krw_t rw,const void * tag)231a0b956f5SMartin Matuska rrw_enter(rrwlock_t *rrl, krw_t rw, const void *tag)
232eda14cbcSMatt Macy {
233eda14cbcSMatt Macy if (rw == RW_READER)
234eda14cbcSMatt Macy rrw_enter_read(rrl, tag);
235eda14cbcSMatt Macy else
236eda14cbcSMatt Macy rrw_enter_write(rrl);
237eda14cbcSMatt Macy }
238eda14cbcSMatt Macy
239eda14cbcSMatt Macy void
rrw_exit(rrwlock_t * rrl,const void * tag)240a0b956f5SMartin Matuska rrw_exit(rrwlock_t *rrl, const void *tag)
241eda14cbcSMatt Macy {
242eda14cbcSMatt Macy mutex_enter(&rrl->rr_lock);
243eda14cbcSMatt Macy #if !defined(ZFS_DEBUG) && defined(_KERNEL)
244eda14cbcSMatt Macy if (!rrl->rr_writer && rrl->rr_linked_rcount.rc_count == 0) {
245eda14cbcSMatt Macy rrl->rr_anon_rcount.rc_count--;
246eda14cbcSMatt Macy if (rrl->rr_anon_rcount.rc_count == 0)
247eda14cbcSMatt Macy cv_broadcast(&rrl->rr_cv);
248eda14cbcSMatt Macy mutex_exit(&rrl->rr_lock);
249eda14cbcSMatt Macy return;
250eda14cbcSMatt Macy }
251eda14cbcSMatt Macy DTRACE_PROBE(zfs__rrwfastpath__exitmiss);
252eda14cbcSMatt Macy #endif
253eda14cbcSMatt Macy ASSERT(!zfs_refcount_is_zero(&rrl->rr_anon_rcount) ||
254eda14cbcSMatt Macy !zfs_refcount_is_zero(&rrl->rr_linked_rcount) ||
255eda14cbcSMatt Macy rrl->rr_writer != NULL);
256eda14cbcSMatt Macy
257eda14cbcSMatt Macy if (rrl->rr_writer == NULL) {
258eda14cbcSMatt Macy int64_t count;
259eda14cbcSMatt Macy if (rrn_find_and_remove(rrl, tag)) {
260eda14cbcSMatt Macy count = zfs_refcount_remove(
261eda14cbcSMatt Macy &rrl->rr_linked_rcount, tag);
262eda14cbcSMatt Macy } else {
263eda14cbcSMatt Macy ASSERT(!rrl->rr_track_all);
264eda14cbcSMatt Macy count = zfs_refcount_remove(&rrl->rr_anon_rcount, tag);
265eda14cbcSMatt Macy }
266eda14cbcSMatt Macy if (count == 0)
267eda14cbcSMatt Macy cv_broadcast(&rrl->rr_cv);
268eda14cbcSMatt Macy } else {
269eda14cbcSMatt Macy ASSERT(rrl->rr_writer == curthread);
270eda14cbcSMatt Macy ASSERT(zfs_refcount_is_zero(&rrl->rr_anon_rcount) &&
271eda14cbcSMatt Macy zfs_refcount_is_zero(&rrl->rr_linked_rcount));
272eda14cbcSMatt Macy rrl->rr_writer = NULL;
273eda14cbcSMatt Macy cv_broadcast(&rrl->rr_cv);
274eda14cbcSMatt Macy }
275eda14cbcSMatt Macy mutex_exit(&rrl->rr_lock);
276eda14cbcSMatt Macy }
277eda14cbcSMatt Macy
278eda14cbcSMatt Macy /*
279eda14cbcSMatt Macy * If the lock was created with track_all, rrw_held(RW_READER) will return
280eda14cbcSMatt Macy * B_TRUE iff the current thread has the lock for reader. Otherwise it may
281eda14cbcSMatt Macy * return B_TRUE if any thread has the lock for reader.
282eda14cbcSMatt Macy */
283eda14cbcSMatt Macy boolean_t
rrw_held(rrwlock_t * rrl,krw_t rw)284eda14cbcSMatt Macy rrw_held(rrwlock_t *rrl, krw_t rw)
285eda14cbcSMatt Macy {
286eda14cbcSMatt Macy boolean_t held;
287eda14cbcSMatt Macy
288eda14cbcSMatt Macy mutex_enter(&rrl->rr_lock);
289eda14cbcSMatt Macy if (rw == RW_WRITER) {
290eda14cbcSMatt Macy held = (rrl->rr_writer == curthread);
291eda14cbcSMatt Macy } else {
292eda14cbcSMatt Macy held = (!zfs_refcount_is_zero(&rrl->rr_anon_rcount) ||
293eda14cbcSMatt Macy rrn_find(rrl) != NULL);
294eda14cbcSMatt Macy }
295eda14cbcSMatt Macy mutex_exit(&rrl->rr_lock);
296eda14cbcSMatt Macy
297eda14cbcSMatt Macy return (held);
298eda14cbcSMatt Macy }
299eda14cbcSMatt Macy
300eda14cbcSMatt Macy void
rrw_tsd_destroy(void * arg)301eda14cbcSMatt Macy rrw_tsd_destroy(void *arg)
302eda14cbcSMatt Macy {
303eda14cbcSMatt Macy rrw_node_t *rn = arg;
304eda14cbcSMatt Macy if (rn != NULL) {
305eda14cbcSMatt Macy panic("thread %p terminating with rrw lock %p held",
306eda14cbcSMatt Macy (void *)curthread, (void *)rn->rn_rrl);
307eda14cbcSMatt Macy }
308eda14cbcSMatt Macy }
309eda14cbcSMatt Macy
310eda14cbcSMatt Macy /*
311eda14cbcSMatt Macy * A reader-mostly lock implementation, tuning above reader-writer locks
312eda14cbcSMatt Macy * for hightly parallel read acquisitions, while pessimizing writes.
313eda14cbcSMatt Macy *
314eda14cbcSMatt Macy * The idea is to split single busy lock into array of locks, so that
315eda14cbcSMatt Macy * each reader can lock only one of them for read, depending on result
316eda14cbcSMatt Macy * of simple hash function. That proportionally reduces lock congestion.
317eda14cbcSMatt Macy * Writer at the same time has to sequentially acquire write on all the locks.
318eda14cbcSMatt Macy * That makes write acquisition proportionally slower, but in places where
319eda14cbcSMatt Macy * it is used (filesystem unmount) performance is not critical.
320eda14cbcSMatt Macy *
321eda14cbcSMatt Macy * All the functions below are direct wrappers around functions above.
322eda14cbcSMatt Macy */
323eda14cbcSMatt Macy void
rrm_init(rrmlock_t * rrl,boolean_t track_all)324eda14cbcSMatt Macy rrm_init(rrmlock_t *rrl, boolean_t track_all)
325eda14cbcSMatt Macy {
326eda14cbcSMatt Macy int i;
327eda14cbcSMatt Macy
328eda14cbcSMatt Macy for (i = 0; i < RRM_NUM_LOCKS; i++)
329eda14cbcSMatt Macy rrw_init(&rrl->locks[i], track_all);
330eda14cbcSMatt Macy }
331eda14cbcSMatt Macy
332eda14cbcSMatt Macy void
rrm_destroy(rrmlock_t * rrl)333eda14cbcSMatt Macy rrm_destroy(rrmlock_t *rrl)
334eda14cbcSMatt Macy {
335eda14cbcSMatt Macy int i;
336eda14cbcSMatt Macy
337eda14cbcSMatt Macy for (i = 0; i < RRM_NUM_LOCKS; i++)
338eda14cbcSMatt Macy rrw_destroy(&rrl->locks[i]);
339eda14cbcSMatt Macy }
340eda14cbcSMatt Macy
341eda14cbcSMatt Macy void
rrm_enter(rrmlock_t * rrl,krw_t rw,const void * tag)342a0b956f5SMartin Matuska rrm_enter(rrmlock_t *rrl, krw_t rw, const void *tag)
343eda14cbcSMatt Macy {
344eda14cbcSMatt Macy if (rw == RW_READER)
345eda14cbcSMatt Macy rrm_enter_read(rrl, tag);
346eda14cbcSMatt Macy else
347eda14cbcSMatt Macy rrm_enter_write(rrl);
348eda14cbcSMatt Macy }
349eda14cbcSMatt Macy
350eda14cbcSMatt Macy /*
351eda14cbcSMatt Macy * This maps the current thread to a specific lock. Note that the lock
352eda14cbcSMatt Macy * must be released by the same thread that acquired it. We do this
353eda14cbcSMatt Macy * mapping by taking the thread pointer mod a prime number. We examine
354eda14cbcSMatt Macy * only the low 32 bits of the thread pointer, because 32-bit division
355eda14cbcSMatt Macy * is faster than 64-bit division, and the high 32 bits have little
356eda14cbcSMatt Macy * entropy anyway.
357eda14cbcSMatt Macy */
358eda14cbcSMatt Macy #define RRM_TD_LOCK() (((uint32_t)(uintptr_t)(curthread)) % RRM_NUM_LOCKS)
359eda14cbcSMatt Macy
360eda14cbcSMatt Macy void
rrm_enter_read(rrmlock_t * rrl,const void * tag)361a0b956f5SMartin Matuska rrm_enter_read(rrmlock_t *rrl, const void *tag)
362eda14cbcSMatt Macy {
363eda14cbcSMatt Macy rrw_enter_read(&rrl->locks[RRM_TD_LOCK()], tag);
364eda14cbcSMatt Macy }
365eda14cbcSMatt Macy
366eda14cbcSMatt Macy void
rrm_enter_write(rrmlock_t * rrl)367eda14cbcSMatt Macy rrm_enter_write(rrmlock_t *rrl)
368eda14cbcSMatt Macy {
369eda14cbcSMatt Macy int i;
370eda14cbcSMatt Macy
371eda14cbcSMatt Macy for (i = 0; i < RRM_NUM_LOCKS; i++)
372eda14cbcSMatt Macy rrw_enter_write(&rrl->locks[i]);
373eda14cbcSMatt Macy }
374eda14cbcSMatt Macy
375eda14cbcSMatt Macy void
rrm_exit(rrmlock_t * rrl,const void * tag)376a0b956f5SMartin Matuska rrm_exit(rrmlock_t *rrl, const void *tag)
377eda14cbcSMatt Macy {
378eda14cbcSMatt Macy int i;
379eda14cbcSMatt Macy
380eda14cbcSMatt Macy if (rrl->locks[0].rr_writer == curthread) {
381eda14cbcSMatt Macy for (i = 0; i < RRM_NUM_LOCKS; i++)
382eda14cbcSMatt Macy rrw_exit(&rrl->locks[i], tag);
383eda14cbcSMatt Macy } else {
384eda14cbcSMatt Macy rrw_exit(&rrl->locks[RRM_TD_LOCK()], tag);
385eda14cbcSMatt Macy }
386eda14cbcSMatt Macy }
387eda14cbcSMatt Macy
388eda14cbcSMatt Macy boolean_t
rrm_held(rrmlock_t * rrl,krw_t rw)389eda14cbcSMatt Macy rrm_held(rrmlock_t *rrl, krw_t rw)
390eda14cbcSMatt Macy {
391eda14cbcSMatt Macy if (rw == RW_WRITER) {
392eda14cbcSMatt Macy return (rrw_held(&rrl->locks[0], rw));
393eda14cbcSMatt Macy } else {
394eda14cbcSMatt Macy return (rrw_held(&rrl->locks[RRM_TD_LOCK()], rw));
395eda14cbcSMatt Macy }
396eda14cbcSMatt Macy }
397