xref: /freebsd/sys/contrib/ck/include/ck_rwlock.h (revision 5f4c09dd85bff675e0ca63c55ea3c517e0fddfcc)
1 /*
2  * Copyright 2011-2015 Samy Al Bahra.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #ifndef CK_RWLOCK_H
28 #define CK_RWLOCK_H
29 
30 #include <ck_elide.h>
31 #include <ck_pr.h>
32 #include <ck_stdbool.h>
33 #include <ck_stddef.h>
34 
35 struct ck_rwlock {
36 	unsigned int writer;
37 	unsigned int n_readers;
38 };
39 typedef struct ck_rwlock ck_rwlock_t;
40 
41 #define CK_RWLOCK_INITIALIZER {0, 0}
42 
43 CK_CC_INLINE static void
44 ck_rwlock_init(struct ck_rwlock *rw)
45 {
46 
47 	rw->writer = 0;
48 	rw->n_readers = 0;
49 	ck_pr_barrier();
50 	return;
51 }
52 
53 CK_CC_INLINE static void
54 ck_rwlock_write_unlock(ck_rwlock_t *rw)
55 {
56 
57 	ck_pr_fence_unlock();
58 	ck_pr_store_uint(&rw->writer, 0);
59 	return;
60 }
61 
62 CK_CC_INLINE static bool
63 ck_rwlock_locked_writer(ck_rwlock_t *rw)
64 {
65 	bool r;
66 
67 	r = ck_pr_load_uint(&rw->writer);
68 	ck_pr_fence_acquire();
69 	return r;
70 }
71 
72 CK_CC_INLINE static void
73 ck_rwlock_write_downgrade(ck_rwlock_t *rw)
74 {
75 
76 	ck_pr_inc_uint(&rw->n_readers);
77 	ck_rwlock_write_unlock(rw);
78 	return;
79 }
80 
81 CK_CC_INLINE static bool
82 ck_rwlock_locked(ck_rwlock_t *rw)
83 {
84 	bool l;
85 
86 	l = ck_pr_load_uint(&rw->n_readers) |
87 	    ck_pr_load_uint(&rw->writer);
88 	ck_pr_fence_acquire();
89 	return l;
90 }
91 
92 CK_CC_INLINE static bool
93 ck_rwlock_write_trylock(ck_rwlock_t *rw)
94 {
95 
96 	if (ck_pr_fas_uint(&rw->writer, 1) != 0)
97 		return false;
98 
99 	ck_pr_fence_atomic_load();
100 
101 	if (ck_pr_load_uint(&rw->n_readers) != 0) {
102 		ck_rwlock_write_unlock(rw);
103 		return false;
104 	}
105 
106 	ck_pr_fence_lock();
107 	return true;
108 }
109 
110 CK_ELIDE_TRYLOCK_PROTOTYPE(ck_rwlock_write, ck_rwlock_t,
111     ck_rwlock_locked, ck_rwlock_write_trylock)
112 
113 CK_CC_INLINE static void
114 ck_rwlock_write_lock(ck_rwlock_t *rw)
115 {
116 
117 	while (ck_pr_fas_uint(&rw->writer, 1) != 0)
118 		ck_pr_stall();
119 
120 	ck_pr_fence_atomic_load();
121 
122 	while (ck_pr_load_uint(&rw->n_readers) != 0)
123 		ck_pr_stall();
124 
125 	ck_pr_fence_lock();
126 	return;
127 }
128 
129 CK_ELIDE_PROTOTYPE(ck_rwlock_write, ck_rwlock_t,
130     ck_rwlock_locked, ck_rwlock_write_lock,
131     ck_rwlock_locked_writer, ck_rwlock_write_unlock)
132 
133 CK_CC_INLINE static bool
134 ck_rwlock_read_trylock(ck_rwlock_t *rw)
135 {
136 
137 	if (ck_pr_load_uint(&rw->writer) != 0)
138 		return false;
139 
140 	ck_pr_inc_uint(&rw->n_readers);
141 
142 	/*
143 	 * Serialize with respect to concurrent write
144 	 * lock operation.
145 	 */
146 	ck_pr_fence_atomic_load();
147 
148 	if (ck_pr_load_uint(&rw->writer) == 0) {
149 		ck_pr_fence_lock();
150 		return true;
151 	}
152 
153 	ck_pr_dec_uint(&rw->n_readers);
154 	return false;
155 }
156 
157 CK_ELIDE_TRYLOCK_PROTOTYPE(ck_rwlock_read, ck_rwlock_t,
158     ck_rwlock_locked_writer, ck_rwlock_read_trylock)
159 
160 CK_CC_INLINE static void
161 ck_rwlock_read_lock(ck_rwlock_t *rw)
162 {
163 
164 	for (;;) {
165 		while (ck_pr_load_uint(&rw->writer) != 0)
166 			ck_pr_stall();
167 
168 		ck_pr_inc_uint(&rw->n_readers);
169 
170 		/*
171 		 * Serialize with respect to concurrent write
172 		 * lock operation.
173 		 */
174 		ck_pr_fence_atomic_load();
175 
176 		if (ck_pr_load_uint(&rw->writer) == 0)
177 			break;
178 
179 		ck_pr_dec_uint(&rw->n_readers);
180 	}
181 
182 	/* Acquire semantics are necessary. */
183 	ck_pr_fence_load();
184 	return;
185 }
186 
187 CK_CC_INLINE static bool
188 ck_rwlock_locked_reader(ck_rwlock_t *rw)
189 {
190 
191 	ck_pr_fence_load();
192 	return ck_pr_load_uint(&rw->n_readers);
193 }
194 
195 CK_CC_INLINE static void
196 ck_rwlock_read_unlock(ck_rwlock_t *rw)
197 {
198 
199 	ck_pr_fence_load_atomic();
200 	ck_pr_dec_uint(&rw->n_readers);
201 	return;
202 }
203 
204 CK_ELIDE_PROTOTYPE(ck_rwlock_read, ck_rwlock_t,
205     ck_rwlock_locked_writer, ck_rwlock_read_lock,
206     ck_rwlock_locked_reader, ck_rwlock_read_unlock)
207 
208 /*
209  * Recursive writer reader-writer lock implementation.
210  */
211 struct ck_rwlock_recursive {
212 	struct ck_rwlock rw;
213 	unsigned int wc;
214 };
215 typedef struct ck_rwlock_recursive ck_rwlock_recursive_t;
216 
217 #define CK_RWLOCK_RECURSIVE_INITIALIZER {CK_RWLOCK_INITIALIZER, 0}
218 
219 CK_CC_INLINE static void
220 ck_rwlock_recursive_write_lock(ck_rwlock_recursive_t *rw, unsigned int tid)
221 {
222 	unsigned int o;
223 
224 	o = ck_pr_load_uint(&rw->rw.writer);
225 	if (o == tid)
226 		goto leave;
227 
228 	while (ck_pr_cas_uint(&rw->rw.writer, 0, tid) == false)
229 		ck_pr_stall();
230 
231 	ck_pr_fence_atomic_load();
232 
233 	while (ck_pr_load_uint(&rw->rw.n_readers) != 0)
234 		ck_pr_stall();
235 
236 	ck_pr_fence_lock();
237 leave:
238 	rw->wc++;
239 	return;
240 }
241 
242 CK_CC_INLINE static bool
243 ck_rwlock_recursive_write_trylock(ck_rwlock_recursive_t *rw, unsigned int tid)
244 {
245 	unsigned int o;
246 
247 	o = ck_pr_load_uint(&rw->rw.writer);
248 	if (o == tid)
249 		goto leave;
250 
251 	if (ck_pr_cas_uint(&rw->rw.writer, 0, tid) == false)
252 		return false;
253 
254 	ck_pr_fence_atomic_load();
255 
256 	if (ck_pr_load_uint(&rw->rw.n_readers) != 0) {
257 		ck_pr_store_uint(&rw->rw.writer, 0);
258 		return false;
259 	}
260 
261 	ck_pr_fence_lock();
262 leave:
263 	rw->wc++;
264 	return true;
265 }
266 
267 CK_CC_INLINE static void
268 ck_rwlock_recursive_write_unlock(ck_rwlock_recursive_t *rw)
269 {
270 
271 	if (--rw->wc == 0) {
272 		ck_pr_fence_unlock();
273 		ck_pr_store_uint(&rw->rw.writer, 0);
274 	}
275 
276 	return;
277 }
278 
279 CK_CC_INLINE static void
280 ck_rwlock_recursive_read_lock(ck_rwlock_recursive_t *rw)
281 {
282 
283 	ck_rwlock_read_lock(&rw->rw);
284 	return;
285 }
286 
287 CK_CC_INLINE static bool
288 ck_rwlock_recursive_read_trylock(ck_rwlock_recursive_t *rw)
289 {
290 
291 	return ck_rwlock_read_trylock(&rw->rw);
292 }
293 
294 CK_CC_INLINE static void
295 ck_rwlock_recursive_read_unlock(ck_rwlock_recursive_t *rw)
296 {
297 
298 	ck_rwlock_read_unlock(&rw->rw);
299 	return;
300 }
301 
302 #endif /* CK_RWLOCK_H */
303