xref: /freebsd/sys/contrib/ck/include/ck_brlock.h (revision bdd1243df58e60e85101c09001d9812a789b6bc4)
1 /*
2  * Copyright 2011-2015 Samy Al Bahra.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #ifndef CK_BRLOCK_H
28 #define CK_BRLOCK_H
29 
30 /*
31  * Big reader spinlocks provide cache-local contention-free read
32  * lock acquisition in the absence of writers. This comes at the
33  * cost of O(n) write lock acquisition. They were first implemented
34  * in the Linux kernel by Ingo Molnar and David S. Miller around the
35  * year 2000.
36  *
37  * This implementation is thread-agnostic which comes at the cost
38  * of larger reader objects due to necessary linkage overhead. In
39  * order to cut down on TLB pressure, it is recommended to allocate
40  * these objects on the same page.
41  */
42 
43 #include <ck_pr.h>
44 #include <ck_stdbool.h>
45 #include <ck_stddef.h>
46 
47 struct ck_brlock_reader {
48 	unsigned int n_readers;
49 	struct ck_brlock_reader *previous;
50 	struct ck_brlock_reader *next;
51 };
52 typedef struct ck_brlock_reader ck_brlock_reader_t;
53 
54 #define CK_BRLOCK_READER_INITIALIZER {0}
55 
56 struct ck_brlock {
57 	struct ck_brlock_reader *readers;
58 	unsigned int writer;
59 };
60 typedef struct ck_brlock ck_brlock_t;
61 
62 #define CK_BRLOCK_INITIALIZER {NULL, false}
63 
64 CK_CC_INLINE static void
65 ck_brlock_init(struct ck_brlock *br)
66 {
67 
68 	br->readers = NULL;
69 	br->writer = false;
70 	ck_pr_barrier();
71 	return;
72 }
73 
74 CK_CC_INLINE static void
75 ck_brlock_write_lock(struct ck_brlock *br)
76 {
77 	struct ck_brlock_reader *cursor;
78 
79 	/*
80 	 * As the frequency of write acquisitions should be low,
81 	 * there is no point to more advanced contention avoidance.
82 	 */
83 	while (ck_pr_fas_uint(&br->writer, true) == true)
84 		ck_pr_stall();
85 
86 	ck_pr_fence_atomic_load();
87 
88 	/* The reader list is protected under the writer br. */
89 	for (cursor = br->readers; cursor != NULL; cursor = cursor->next) {
90 		while (ck_pr_load_uint(&cursor->n_readers) != 0)
91 			ck_pr_stall();
92 	}
93 
94 	ck_pr_fence_lock();
95 	return;
96 }
97 
98 CK_CC_INLINE static void
99 ck_brlock_write_unlock(struct ck_brlock *br)
100 {
101 
102 	ck_pr_fence_unlock();
103 	ck_pr_store_uint(&br->writer, false);
104 	return;
105 }
106 
107 CK_CC_INLINE static bool
108 ck_brlock_write_trylock(struct ck_brlock *br, unsigned int factor)
109 {
110 	struct ck_brlock_reader *cursor;
111 	unsigned int steps = 0;
112 
113 	while (ck_pr_fas_uint(&br->writer, true) == true) {
114 		if (++steps >= factor)
115 			return false;
116 
117 		ck_pr_stall();
118 	}
119 
120 	/*
121 	 * We do not require a strict fence here as atomic RMW operations
122 	 * are serializing.
123 	 */
124 	ck_pr_fence_atomic_load();
125 
126 	for (cursor = br->readers; cursor != NULL; cursor = cursor->next) {
127 		while (ck_pr_load_uint(&cursor->n_readers) != 0) {
128 			if (++steps >= factor) {
129 				ck_brlock_write_unlock(br);
130 				return false;
131 			}
132 
133 			ck_pr_stall();
134 		}
135 	}
136 
137 	ck_pr_fence_lock();
138 	return true;
139 }
140 
141 CK_CC_INLINE static void
142 ck_brlock_read_register(struct ck_brlock *br, struct ck_brlock_reader *reader)
143 {
144 
145 	reader->n_readers = 0;
146 	reader->previous = NULL;
147 
148 	/* Implicit compiler barrier. */
149 	ck_brlock_write_lock(br);
150 
151 	reader->next = ck_pr_load_ptr(&br->readers);
152 	if (reader->next != NULL)
153 		reader->next->previous = reader;
154 	ck_pr_store_ptr(&br->readers, reader);
155 
156 	ck_brlock_write_unlock(br);
157 	return;
158 }
159 
160 CK_CC_INLINE static void
161 ck_brlock_read_unregister(struct ck_brlock *br, struct ck_brlock_reader *reader)
162 {
163 
164 	ck_brlock_write_lock(br);
165 
166 	if (reader->next != NULL)
167 		reader->next->previous = reader->previous;
168 
169 	if (reader->previous != NULL)
170 		reader->previous->next = reader->next;
171 	else
172 		br->readers = reader->next;
173 
174 	ck_brlock_write_unlock(br);
175 	return;
176 }
177 
178 CK_CC_INLINE static void
179 ck_brlock_read_lock(struct ck_brlock *br, struct ck_brlock_reader *reader)
180 {
181 
182 	if (reader->n_readers >= 1) {
183 		ck_pr_store_uint(&reader->n_readers, reader->n_readers + 1);
184 		return;
185 	}
186 
187 	for (;;) {
188 		while (ck_pr_load_uint(&br->writer) == true)
189 			ck_pr_stall();
190 
191 #if defined(__x86__) || defined(__x86_64__)
192 		ck_pr_fas_uint(&reader->n_readers, 1);
193 
194 		/*
195 		 * Serialize reader counter update with respect to load of
196 		 * writer.
197 		 */
198 		ck_pr_fence_atomic_load();
199 #else
200 		ck_pr_store_uint(&reader->n_readers, 1);
201 
202 		/*
203 		 * Serialize reader counter update with respect to load of
204 		 * writer.
205 		 */
206 		ck_pr_fence_store_load();
207 #endif
208 
209 		if (ck_pr_load_uint(&br->writer) == false)
210 			break;
211 
212 		ck_pr_store_uint(&reader->n_readers, 0);
213 	}
214 
215 	ck_pr_fence_lock();
216 	return;
217 }
218 
219 CK_CC_INLINE static bool
220 ck_brlock_read_trylock(struct ck_brlock *br,
221 		       struct ck_brlock_reader *reader,
222 		       unsigned int factor)
223 {
224 	unsigned int steps = 0;
225 
226 	if (reader->n_readers >= 1) {
227 		ck_pr_store_uint(&reader->n_readers, reader->n_readers + 1);
228 		return true;
229 	}
230 
231 	for (;;) {
232 		while (ck_pr_load_uint(&br->writer) == true) {
233 			if (++steps >= factor)
234 				return false;
235 
236 			ck_pr_stall();
237 		}
238 
239 #if defined(__x86__) || defined(__x86_64__)
240 		ck_pr_fas_uint(&reader->n_readers, 1);
241 
242 		/*
243 		 * Serialize reader counter update with respect to load of
244 		 * writer.
245 		 */
246 		ck_pr_fence_atomic_load();
247 #else
248 		ck_pr_store_uint(&reader->n_readers, 1);
249 
250 		/*
251 		 * Serialize reader counter update with respect to load of
252 		 * writer.
253 		 */
254 		ck_pr_fence_store_load();
255 #endif
256 
257 		if (ck_pr_load_uint(&br->writer) == false)
258 			break;
259 
260 		ck_pr_store_uint(&reader->n_readers, 0);
261 
262 		if (++steps >= factor)
263 			return false;
264 	}
265 
266 	ck_pr_fence_lock();
267 	return true;
268 }
269 
270 CK_CC_INLINE static void
271 ck_brlock_read_unlock(struct ck_brlock_reader *reader)
272 {
273 
274 	ck_pr_fence_unlock();
275 	ck_pr_store_uint(&reader->n_readers, reader->n_readers - 1);
276 	return;
277 }
278 
279 #endif /* CK_BRLOCK_H */
280