xref: /freebsd/sys/contrib/ck/include/ck_elide.h (revision b70d2a2aa5003027b422e62435ab5bb9390d543c)
1 /*
2  * Copyright 2013-2015 Samy Al Bahra.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #ifndef CK_ELIDE_H
28 #define CK_ELIDE_H
29 
30 /*
31  * As RTM is currently only supported on TSO x86 architectures,
32  * fences have been omitted. They will be necessary for other
33  * non-TSO architectures with TM support.
34  */
35 
36 #include <ck_cc.h>
37 #include <ck_pr.h>
38 #include <ck_string.h>
39 
40 /*
41  * skip_-prefixed counters represent the number of consecutive
42  * elisions to forfeit. retry_-prefixed counters represent the
43  * number of elision retries to attempt before forfeit.
44  *
45  *     _busy: Lock was busy
46  *    _other: Unknown explicit abort
47  * _conflict: Data conflict in elision section
48  */
49 struct ck_elide_config {
50 	unsigned short skip_busy;
51 	short retry_busy;
52 	unsigned short skip_other;
53 	short retry_other;
54 	unsigned short skip_conflict;
55 	short retry_conflict;
56 };
57 
58 #define CK_ELIDE_CONFIG_DEFAULT_INITIALIZER {	\
59 	.skip_busy = 5,				\
60 	.retry_busy = 256,			\
61 	.skip_other = 3,			\
62 	.retry_other = 3,			\
63 	.skip_conflict = 2,			\
64 	.retry_conflict = 5			\
65 }
66 
67 struct ck_elide_stat {
68 	unsigned int n_fallback;
69 	unsigned int n_elide;
70 	unsigned short skip;
71 };
72 typedef struct ck_elide_stat ck_elide_stat_t;
73 
74 #define CK_ELIDE_STAT_INITIALIZER { 0, 0, 0 }
75 
76 CK_CC_INLINE static void
77 ck_elide_stat_init(ck_elide_stat_t *st)
78 {
79 
80 	memset(st, 0, sizeof(*st));
81 	return;
82 }
83 
84 #ifdef CK_F_PR_RTM
85 enum _ck_elide_hint {
86 	CK_ELIDE_HINT_RETRY = 0,
87 	CK_ELIDE_HINT_SPIN,
88 	CK_ELIDE_HINT_STOP
89 };
90 
91 #define CK_ELIDE_LOCK_BUSY 0xFF
92 
93 static enum _ck_elide_hint
94 _ck_elide_fallback(int *retry,
95     struct ck_elide_stat *st,
96     struct ck_elide_config *c,
97     unsigned int status)
98 {
99 
100 	st->n_fallback++;
101 	if (*retry > 0)
102 		return CK_ELIDE_HINT_RETRY;
103 
104 	if (st->skip != 0)
105 		return CK_ELIDE_HINT_STOP;
106 
107 	if (status & CK_PR_RTM_EXPLICIT) {
108 		if (CK_PR_RTM_CODE(status) == CK_ELIDE_LOCK_BUSY) {
109 			st->skip = c->skip_busy;
110 			*retry = c->retry_busy;
111 			return CK_ELIDE_HINT_SPIN;
112 		}
113 
114 		st->skip = c->skip_other;
115 		return CK_ELIDE_HINT_STOP;
116 	}
117 
118 	if ((status & CK_PR_RTM_RETRY) &&
119 	    (status & CK_PR_RTM_CONFLICT)) {
120 		st->skip = c->skip_conflict;
121 		*retry = c->retry_conflict;
122 		return CK_ELIDE_HINT_RETRY;
123 	}
124 
125 	/*
126 	 * Capacity, debug and nesting abortions are likely to be
127 	 * invariant conditions for the acquisition, execute regular
128 	 * path instead. If retry bit is not set, then take the hint.
129 	 */
130 	st->skip = USHRT_MAX;
131 	return CK_ELIDE_HINT_STOP;
132 }
133 
134 /*
135  * Defines an elision implementation according to the following variables:
136  *     N - Namespace of elision implementation.
137  *     T - Typename of mutex.
138  *   L_P - Lock predicate, returns false if resource is available.
139  *     L - Function to call if resource is unavailable of transaction aborts.
140  *   U_P - Unlock predicate, returns false if elision failed.
141  *     U - Function to call if transaction failed.
142  */
143 #define CK_ELIDE_PROTOTYPE(N, T, L_P, L, U_P, U)					\
144 	CK_CC_INLINE static void							\
145 	ck_elide_##N##_lock_adaptive(T *lock,						\
146 	    struct ck_elide_stat *st,							\
147 	    struct ck_elide_config *c)							\
148 	{										\
149 		enum _ck_elide_hint hint;						\
150 		int retry;								\
151 											\
152 		if (CK_CC_UNLIKELY(st->skip != 0)) {					\
153 			st->skip--;							\
154 			goto acquire;							\
155 		}									\
156 											\
157 		retry = c->retry_conflict;						\
158 		do {									\
159 			unsigned int status = ck_pr_rtm_begin();			\
160 			if (status == CK_PR_RTM_STARTED) {				\
161 				if (L_P(lock) == true)					\
162 					ck_pr_rtm_abort(CK_ELIDE_LOCK_BUSY);		\
163 											\
164 				return;							\
165 			}								\
166 											\
167 			hint = _ck_elide_fallback(&retry, st, c, status);		\
168 			if (hint == CK_ELIDE_HINT_RETRY)				\
169 				continue;						\
170 											\
171 			if (hint == CK_ELIDE_HINT_SPIN) {				\
172 				while (--retry != 0) {					\
173 					if (L_P(lock) == false)				\
174 						break;					\
175 											\
176 					ck_pr_stall();					\
177 				}							\
178 											\
179 				continue;						\
180 			}								\
181 											\
182 			if (hint == CK_ELIDE_HINT_STOP)					\
183 				break;							\
184 		} while (CK_CC_LIKELY(--retry > 0));					\
185 											\
186 	acquire:									\
187 		L(lock);								\
188 		return;									\
189 	}										\
190 	CK_CC_INLINE static void							\
191 	ck_elide_##N##_unlock_adaptive(struct ck_elide_stat *st, T *lock)		\
192 	{										\
193 											\
194 		if (U_P(lock) == false) {						\
195 			ck_pr_rtm_end();						\
196 			st->skip = 0;							\
197 			st->n_elide++;							\
198 		} else {								\
199 			U(lock);							\
200 		}									\
201 											\
202 		return;									\
203 	}										\
204 	CK_CC_INLINE static void							\
205 	ck_elide_##N##_lock(T *lock)							\
206 	{										\
207 											\
208 		if (ck_pr_rtm_begin() != CK_PR_RTM_STARTED) {				\
209 			L(lock);							\
210 			return;								\
211 		}									\
212 											\
213 		if (L_P(lock) == true)							\
214 			ck_pr_rtm_abort(CK_ELIDE_LOCK_BUSY);				\
215 											\
216 		return;									\
217 	}										\
218 	CK_CC_INLINE static void							\
219 	ck_elide_##N##_unlock(T *lock)							\
220 	{										\
221 											\
222 		if (U_P(lock) == false) {						\
223 			ck_pr_rtm_end();						\
224 		} else {								\
225 			U(lock);							\
226 		}									\
227 											\
228 		return;									\
229 	}
230 
231 #define CK_ELIDE_TRYLOCK_PROTOTYPE(N, T, TL_P, TL)			\
232 	CK_CC_INLINE static bool					\
233 	ck_elide_##N##_trylock(T *lock)					\
234 	{								\
235 									\
236 		if (ck_pr_rtm_begin() != CK_PR_RTM_STARTED)		\
237 			return false;					\
238 									\
239 		if (TL_P(lock) == true)					\
240 			ck_pr_rtm_abort(CK_ELIDE_LOCK_BUSY);		\
241 									\
242 		return true;						\
243 	}
244 #else
245 /*
246  * If RTM is not enabled on the target platform (CK_F_PR_RTM) then these
247  * elision wrappers directly calls into the user-specified lock operations.
248  * Unfortunately, the storage cost of both ck_elide_config and ck_elide_stat
249  * are paid (typically a storage cost that is a function of lock objects and
250  * thread count).
251  */
252 #define CK_ELIDE_PROTOTYPE(N, T, L_P, L, U_P, U)			\
253 	CK_CC_INLINE static void					\
254 	ck_elide_##N##_lock_adaptive(T *lock,				\
255 	    struct ck_elide_stat *st,					\
256 	    struct ck_elide_config *c)					\
257 	{								\
258 									\
259 		(void)st;						\
260 		(void)c;						\
261 		L(lock);						\
262 		return;							\
263 	}								\
264 	CK_CC_INLINE static void					\
265 	ck_elide_##N##_unlock_adaptive(struct ck_elide_stat *st,	\
266 	    T *lock)							\
267 	{								\
268 									\
269 		(void)st;						\
270 		U(lock);						\
271 		return;							\
272 	}								\
273 	CK_CC_INLINE static void					\
274 	ck_elide_##N##_lock(T *lock)					\
275 	{								\
276 									\
277 		L(lock);						\
278 		return;							\
279 	}								\
280 	CK_CC_INLINE static void					\
281 	ck_elide_##N##_unlock(T *lock)					\
282 	{								\
283 									\
284 		U(lock);						\
285 		return;							\
286 	}
287 
288 #define CK_ELIDE_TRYLOCK_PROTOTYPE(N, T, TL_P, TL)			\
289 	CK_CC_INLINE static bool					\
290 	ck_elide_##N##_trylock(T *lock)					\
291 	{								\
292 									\
293 		return TL_P(lock);					\
294 	}
295 #endif /* !CK_F_PR_RTM */
296 
297 /*
298  * Best-effort elision lock operations. First argument is name (N)
299  * associated with implementation and the second is a pointer to
300  * the type specified above (T).
301  *
302  * Unlike the adaptive variant, this interface does not have any retry
303  * semantics. In environments where jitter is low, this may yield a tighter
304  * fast path.
305  */
306 #define CK_ELIDE_LOCK(NAME, LOCK)	ck_elide_##NAME##_lock(LOCK)
307 #define CK_ELIDE_UNLOCK(NAME, LOCK)	ck_elide_##NAME##_unlock(LOCK)
308 #define CK_ELIDE_TRYLOCK(NAME, LOCK)	ck_elide_##NAME##_trylock(LOCK)
309 
310 /*
311  * Adaptive elision lock operations. In addition to name and pointer
312  * to the lock, you must pass in a pointer to an initialized
313  * ck_elide_config structure along with a per-thread stat structure.
314  */
315 #define CK_ELIDE_LOCK_ADAPTIVE(NAME, STAT, CONFIG, LOCK) \
316 	ck_elide_##NAME##_lock_adaptive(LOCK, STAT, CONFIG)
317 
318 #define CK_ELIDE_UNLOCK_ADAPTIVE(NAME, STAT, LOCK) \
319 	ck_elide_##NAME##_unlock_adaptive(STAT, LOCK)
320 
321 #endif /* CK_ELIDE_H */
322