xref: /freebsd/sys/contrib/ck/include/spinlock/ticket.h (revision a03411e84728e9b267056fd31c7d1d9d1dc1b01e)
1 /*
2  * Copyright 2010-2015 Samy Al Bahra.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #ifndef CK_SPINLOCK_TICKET_H
28 #define CK_SPINLOCK_TICKET_H
29 
30 #include <ck_backoff.h>
31 #include <ck_cc.h>
32 #include <ck_elide.h>
33 #include <ck_md.h>
34 #include <ck_pr.h>
35 #include <ck_stdbool.h>
36 
37 #ifndef CK_F_SPINLOCK_TICKET
38 #define CK_F_SPINLOCK_TICKET
39 /*
40  * If 16-bit or 32-bit increment is supported, implement support for
41  * trylock functionality on availability of 32-bit or 64-bit fetch-and-add
42  * and compare-and-swap. This code path is only applied to x86*.
43  */
44 #if defined(CK_MD_TSO) && (defined(__x86__) || defined(__x86_64__))
45 #if defined(CK_F_PR_FAA_32) && defined(CK_F_PR_INC_16) && defined(CK_F_PR_CAS_32)
46 #define CK_SPINLOCK_TICKET_TYPE		uint32_t
47 #define CK_SPINLOCK_TICKET_TYPE_BASE	uint16_t
48 #define CK_SPINLOCK_TICKET_INC(x)	ck_pr_inc_16(x)
49 #define CK_SPINLOCK_TICKET_CAS(x, y, z) ck_pr_cas_32(x, y, z)
50 #define CK_SPINLOCK_TICKET_FAA(x, y)	ck_pr_faa_32(x, y)
51 #define CK_SPINLOCK_TICKET_LOAD(x)	ck_pr_load_32(x)
52 #define CK_SPINLOCK_TICKET_INCREMENT	(0x00010000UL)
53 #define CK_SPINLOCK_TICKET_MASK		(0xFFFFUL)
54 #define CK_SPINLOCK_TICKET_SHIFT	(16)
55 #elif defined(CK_F_PR_FAA_64) && defined(CK_F_PR_INC_32) && defined(CK_F_PR_CAS_64)
56 #define CK_SPINLOCK_TICKET_TYPE		uint64_t
57 #define CK_SPINLOCK_TICKET_TYPE_BASE	uint32_t
58 #define CK_SPINLOCK_TICKET_INC(x)	ck_pr_inc_32(x)
59 #define CK_SPINLOCK_TICKET_CAS(x, y, z) ck_pr_cas_64(x, y, z)
60 #define CK_SPINLOCK_TICKET_FAA(x, y)	ck_pr_faa_64(x, y)
61 #define CK_SPINLOCK_TICKET_LOAD(x)	ck_pr_load_64(x)
62 #define CK_SPINLOCK_TICKET_INCREMENT	(0x0000000100000000ULL)
63 #define CK_SPINLOCK_TICKET_MASK		(0xFFFFFFFFULL)
64 #define CK_SPINLOCK_TICKET_SHIFT	(32)
65 #endif
66 #endif /* CK_MD_TSO */
67 
68 #if defined(CK_SPINLOCK_TICKET_TYPE)
69 #define CK_F_SPINLOCK_TICKET_TRYLOCK
70 
71 struct ck_spinlock_ticket {
72 	CK_SPINLOCK_TICKET_TYPE value;
73 };
74 typedef struct ck_spinlock_ticket ck_spinlock_ticket_t;
75 #define CK_SPINLOCK_TICKET_INITIALIZER { .value = 0 }
76 
77 CK_CC_INLINE static void
78 ck_spinlock_ticket_init(struct ck_spinlock_ticket *ticket)
79 {
80 
81 	ticket->value = 0;
82 	ck_pr_barrier();
83 	return;
84 }
85 
86 CK_CC_INLINE static bool
87 ck_spinlock_ticket_locked(struct ck_spinlock_ticket *ticket)
88 {
89 	CK_SPINLOCK_TICKET_TYPE request, position;
90 
91 	request = CK_SPINLOCK_TICKET_LOAD(&ticket->value);
92 	position = request & CK_SPINLOCK_TICKET_MASK;
93 	request >>= CK_SPINLOCK_TICKET_SHIFT;
94 
95 	ck_pr_fence_acquire();
96 	return request != position;
97 }
98 
99 CK_CC_INLINE static void
100 ck_spinlock_ticket_lock(struct ck_spinlock_ticket *ticket)
101 {
102 	CK_SPINLOCK_TICKET_TYPE request, position;
103 
104 	/* Get our ticket number and set next ticket number. */
105 	request = CK_SPINLOCK_TICKET_FAA(&ticket->value,
106 	    CK_SPINLOCK_TICKET_INCREMENT);
107 
108 	position = request & CK_SPINLOCK_TICKET_MASK;
109 	request >>= CK_SPINLOCK_TICKET_SHIFT;
110 
111 	while (request != position) {
112 		ck_pr_stall();
113 		position = CK_SPINLOCK_TICKET_LOAD(&ticket->value) &
114 		    CK_SPINLOCK_TICKET_MASK;
115 	}
116 
117 	ck_pr_fence_lock();
118 	return;
119 }
120 
121 CK_CC_INLINE static void
122 ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket, unsigned int c)
123 {
124 	CK_SPINLOCK_TICKET_TYPE request, position;
125 	ck_backoff_t backoff;
126 
127 	/* Get our ticket number and set next ticket number. */
128 	request = CK_SPINLOCK_TICKET_FAA(&ticket->value,
129 	    CK_SPINLOCK_TICKET_INCREMENT);
130 
131 	position = request & CK_SPINLOCK_TICKET_MASK;
132 	request >>= CK_SPINLOCK_TICKET_SHIFT;
133 
134 	while (request != position) {
135 		ck_pr_stall();
136 		position = CK_SPINLOCK_TICKET_LOAD(&ticket->value) &
137 		    CK_SPINLOCK_TICKET_MASK;
138 
139 		backoff = (request - position) & CK_SPINLOCK_TICKET_MASK;
140 		backoff <<= c;
141 		ck_backoff_eb(&backoff);
142 	}
143 
144 	ck_pr_fence_lock();
145 	return;
146 }
147 
148 CK_CC_INLINE static bool
149 ck_spinlock_ticket_trylock(struct ck_spinlock_ticket *ticket)
150 {
151 	CK_SPINLOCK_TICKET_TYPE snapshot, request, position;
152 
153 	snapshot = CK_SPINLOCK_TICKET_LOAD(&ticket->value);
154 	position = snapshot & CK_SPINLOCK_TICKET_MASK;
155 	request = snapshot >> CK_SPINLOCK_TICKET_SHIFT;
156 
157 	if (position != request)
158 		return false;
159 
160 	if (CK_SPINLOCK_TICKET_CAS(&ticket->value,
161 	    snapshot, snapshot + CK_SPINLOCK_TICKET_INCREMENT) == false) {
162 		return false;
163 	}
164 
165 	ck_pr_fence_lock();
166 	return true;
167 }
168 
169 CK_CC_INLINE static void
170 ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket)
171 {
172 
173 	ck_pr_fence_unlock();
174 	CK_SPINLOCK_TICKET_INC((CK_SPINLOCK_TICKET_TYPE_BASE *)(void *)&ticket->value);
175 	return;
176 }
177 
178 #undef CK_SPINLOCK_TICKET_TYPE
179 #undef CK_SPINLOCK_TICKET_TYPE_BASE
180 #undef CK_SPINLOCK_TICKET_INC
181 #undef CK_SPINLOCK_TICKET_FAA
182 #undef CK_SPINLOCK_TICKET_LOAD
183 #undef CK_SPINLOCK_TICKET_INCREMENT
184 #undef CK_SPINLOCK_TICKET_MASK
185 #undef CK_SPINLOCK_TICKET_SHIFT
186 #else
187 /*
188  * MESI benefits from cacheline padding between next and current. This avoids
189  * invalidation of current from the cache due to incoming lock requests.
190  */
191 struct ck_spinlock_ticket {
192 	unsigned int next;
193 	unsigned int position;
194 };
195 typedef struct ck_spinlock_ticket ck_spinlock_ticket_t;
196 
197 #define CK_SPINLOCK_TICKET_INITIALIZER {.next = 0, .position = 0}
198 
199 CK_CC_INLINE static void
200 ck_spinlock_ticket_init(struct ck_spinlock_ticket *ticket)
201 {
202 
203 	ticket->next = 0;
204 	ticket->position = 0;
205 	ck_pr_barrier();
206 
207 	return;
208 }
209 
210 CK_CC_INLINE static bool
211 ck_spinlock_ticket_locked(struct ck_spinlock_ticket *ticket)
212 {
213 	bool r;
214 
215 	r = ck_pr_load_uint(&ticket->position) !=
216 	    ck_pr_load_uint(&ticket->next);
217 	ck_pr_fence_acquire();
218 	return r;
219 }
220 
221 CK_CC_INLINE static void
222 ck_spinlock_ticket_lock(struct ck_spinlock_ticket *ticket)
223 {
224 	unsigned int request;
225 
226 	/* Get our ticket number and set next ticket number. */
227 	request = ck_pr_faa_uint(&ticket->next, 1);
228 
229 	/*
230 	 * Busy-wait until our ticket number is current.
231 	 * We can get away without a fence here assuming
232 	 * our position counter does not overflow.
233 	 */
234 	while (ck_pr_load_uint(&ticket->position) != request)
235 		ck_pr_stall();
236 
237 	ck_pr_fence_lock();
238 	return;
239 }
240 
241 CK_CC_INLINE static void
242 ck_spinlock_ticket_lock_pb(struct ck_spinlock_ticket *ticket, unsigned int c)
243 {
244 	ck_backoff_t backoff;
245 	unsigned int request, position;
246 
247 	request = ck_pr_faa_uint(&ticket->next, 1);
248 
249 	for (;;) {
250 		position = ck_pr_load_uint(&ticket->position);
251 		if (position == request)
252 			break;
253 
254 		backoff = request - position;
255 		backoff <<= c;
256 
257 		/*
258 		 * Ideally, back-off from generating cache traffic for at least
259 		 * the amount of time necessary for the number of pending lock
260 		 * acquisition and relinquish operations (assuming an empty
261 		 * critical section).
262 		 */
263 		ck_backoff_eb(&backoff);
264 	}
265 
266 	ck_pr_fence_lock();
267 	return;
268 }
269 
270 CK_CC_INLINE static void
271 ck_spinlock_ticket_unlock(struct ck_spinlock_ticket *ticket)
272 {
273 	unsigned int update;
274 
275 	ck_pr_fence_unlock();
276 
277 	/*
278 	 * Update current ticket value so next lock request can proceed.
279 	 * Overflow behavior is assumed to be roll-over, in which case,
280 	 * it is only an issue if there are 2^32 pending lock requests.
281 	 */
282 	update = ck_pr_load_uint(&ticket->position);
283 	ck_pr_store_uint(&ticket->position, update + 1);
284 	return;
285 }
286 #endif /* !CK_F_SPINLOCK_TICKET_TRYLOCK */
287 
288 CK_ELIDE_PROTOTYPE(ck_spinlock_ticket, ck_spinlock_ticket_t,
289     ck_spinlock_ticket_locked, ck_spinlock_ticket_lock,
290     ck_spinlock_ticket_locked, ck_spinlock_ticket_unlock)
291 
292 CK_ELIDE_TRYLOCK_PROTOTYPE(ck_spinlock_ticket, ck_spinlock_ticket_t,
293     ck_spinlock_ticket_locked, ck_spinlock_ticket_trylock)
294 
295 #endif /* CK_F_SPINLOCK_TICKET */
296 #endif /* CK_SPINLOCK_TICKET_H */
297