1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright (c) 2009 by Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <atomic.h>
28
29 /*
30 * These are the void returning variants
31 */
32 #define ATOMIC_INC(name, type) \
33 void atomic_inc_##name(volatile type *target) \
34 { \
35 (void) __atomic_add_fetch(target, 1, __ATOMIC_SEQ_CST); \
36 }
37
38 ATOMIC_INC(8, uint8_t)
39 ATOMIC_INC(16, uint16_t)
40 ATOMIC_INC(32, uint32_t)
41 ATOMIC_INC(64, uint64_t)
ATOMIC_INC(uchar,uchar_t)42 ATOMIC_INC(uchar, uchar_t)
43 ATOMIC_INC(ushort, ushort_t)
44 ATOMIC_INC(uint, uint_t)
45 ATOMIC_INC(ulong, ulong_t)
46
47
48 #define ATOMIC_DEC(name, type) \
49 void atomic_dec_##name(volatile type *target) \
50 { \
51 (void) __atomic_sub_fetch(target, 1, __ATOMIC_SEQ_CST); \
52 }
53
54 ATOMIC_DEC(8, uint8_t)
55 ATOMIC_DEC(16, uint16_t)
56 ATOMIC_DEC(32, uint32_t)
57 ATOMIC_DEC(64, uint64_t)
58 ATOMIC_DEC(uchar, uchar_t)
59 ATOMIC_DEC(ushort, ushort_t)
60 ATOMIC_DEC(uint, uint_t)
61 ATOMIC_DEC(ulong, ulong_t)
62
63
64 #define ATOMIC_ADD(name, type1, type2) \
65 void atomic_add_##name(volatile type1 *target, type2 bits) \
66 { \
67 (void) __atomic_add_fetch(target, bits, __ATOMIC_SEQ_CST); \
68 }
69
70 void
71 atomic_add_ptr(volatile void *target, ssize_t bits)
72 {
73 (void) __atomic_add_fetch((void **)target, bits, __ATOMIC_SEQ_CST);
74 }
75
76 ATOMIC_ADD(8, uint8_t, int8_t)
77 ATOMIC_ADD(16, uint16_t, int16_t)
78 ATOMIC_ADD(32, uint32_t, int32_t)
79 ATOMIC_ADD(64, uint64_t, int64_t)
ATOMIC_ADD(char,uchar_t,signed char)80 ATOMIC_ADD(char, uchar_t, signed char)
81 ATOMIC_ADD(short, ushort_t, short)
82 ATOMIC_ADD(int, uint_t, int)
83 ATOMIC_ADD(long, ulong_t, long)
84
85
86 #define ATOMIC_SUB(name, type1, type2) \
87 void atomic_sub_##name(volatile type1 *target, type2 bits) \
88 { \
89 (void) __atomic_sub_fetch(target, bits, __ATOMIC_SEQ_CST); \
90 }
91
92 void
93 atomic_sub_ptr(volatile void *target, ssize_t bits)
94 {
95 (void) __atomic_sub_fetch((void **)target, bits, __ATOMIC_SEQ_CST);
96 }
97
98 ATOMIC_SUB(8, uint8_t, int8_t)
99 ATOMIC_SUB(16, uint16_t, int16_t)
100 ATOMIC_SUB(32, uint32_t, int32_t)
101 ATOMIC_SUB(64, uint64_t, int64_t)
ATOMIC_SUB(char,uchar_t,signed char)102 ATOMIC_SUB(char, uchar_t, signed char)
103 ATOMIC_SUB(short, ushort_t, short)
104 ATOMIC_SUB(int, uint_t, int)
105 ATOMIC_SUB(long, ulong_t, long)
106
107
108 #define ATOMIC_OR(name, type) \
109 void atomic_or_##name(volatile type *target, type bits) \
110 { \
111 (void) __atomic_or_fetch(target, bits, __ATOMIC_SEQ_CST); \
112 }
113
114 ATOMIC_OR(8, uint8_t)
115 ATOMIC_OR(16, uint16_t)
116 ATOMIC_OR(32, uint32_t)
117 ATOMIC_OR(64, uint64_t)
118 ATOMIC_OR(uchar, uchar_t)
119 ATOMIC_OR(ushort, ushort_t)
120 ATOMIC_OR(uint, uint_t)
121 ATOMIC_OR(ulong, ulong_t)
122
123
124 #define ATOMIC_AND(name, type) \
125 void atomic_and_##name(volatile type *target, type bits) \
126 { \
127 (void) __atomic_and_fetch(target, bits, __ATOMIC_SEQ_CST); \
128 }
129
130 ATOMIC_AND(8, uint8_t)
131 ATOMIC_AND(16, uint16_t)
132 ATOMIC_AND(32, uint32_t)
133 ATOMIC_AND(64, uint64_t)
134 ATOMIC_AND(uchar, uchar_t)
135 ATOMIC_AND(ushort, ushort_t)
136 ATOMIC_AND(uint, uint_t)
137 ATOMIC_AND(ulong, ulong_t)
138
139
140 /*
141 * New value returning variants
142 */
143
144 #define ATOMIC_INC_NV(name, type) \
145 type atomic_inc_##name##_nv(volatile type *target) \
146 { \
147 return (__atomic_add_fetch(target, 1, __ATOMIC_SEQ_CST)); \
148 }
149
150 ATOMIC_INC_NV(8, uint8_t)
151 ATOMIC_INC_NV(16, uint16_t)
152 ATOMIC_INC_NV(32, uint32_t)
153 ATOMIC_INC_NV(64, uint64_t)
154 ATOMIC_INC_NV(uchar, uchar_t)
155 ATOMIC_INC_NV(ushort, ushort_t)
156 ATOMIC_INC_NV(uint, uint_t)
157 ATOMIC_INC_NV(ulong, ulong_t)
158
159
160 #define ATOMIC_DEC_NV(name, type) \
161 type atomic_dec_##name##_nv(volatile type *target) \
162 { \
163 return (__atomic_sub_fetch(target, 1, __ATOMIC_SEQ_CST)); \
164 }
165
166 ATOMIC_DEC_NV(8, uint8_t)
167 ATOMIC_DEC_NV(16, uint16_t)
168 ATOMIC_DEC_NV(32, uint32_t)
169 ATOMIC_DEC_NV(64, uint64_t)
170 ATOMIC_DEC_NV(uchar, uchar_t)
171 ATOMIC_DEC_NV(ushort, ushort_t)
172 ATOMIC_DEC_NV(uint, uint_t)
173 ATOMIC_DEC_NV(ulong, ulong_t)
174
175
176 #define ATOMIC_ADD_NV(name, type1, type2) \
177 type1 atomic_add_##name##_nv(volatile type1 *target, type2 bits) \
178 { \
179 return (__atomic_add_fetch(target, bits, __ATOMIC_SEQ_CST)); \
180 }
181
182 void *
183 atomic_add_ptr_nv(volatile void *target, ssize_t bits)
184 {
185 return (__atomic_add_fetch((void **)target, bits, __ATOMIC_SEQ_CST));
186 }
187
188 ATOMIC_ADD_NV(8, uint8_t, int8_t)
189 ATOMIC_ADD_NV(16, uint16_t, int16_t)
190 ATOMIC_ADD_NV(32, uint32_t, int32_t)
191 ATOMIC_ADD_NV(64, uint64_t, int64_t)
ATOMIC_ADD_NV(char,uchar_t,signed char)192 ATOMIC_ADD_NV(char, uchar_t, signed char)
193 ATOMIC_ADD_NV(short, ushort_t, short)
194 ATOMIC_ADD_NV(int, uint_t, int)
195 ATOMIC_ADD_NV(long, ulong_t, long)
196
197
198 #define ATOMIC_SUB_NV(name, type1, type2) \
199 type1 atomic_sub_##name##_nv(volatile type1 *target, type2 bits) \
200 { \
201 return (__atomic_sub_fetch(target, bits, __ATOMIC_SEQ_CST)); \
202 }
203
204 void *
205 atomic_sub_ptr_nv(volatile void *target, ssize_t bits)
206 {
207 return (__atomic_sub_fetch((void **)target, bits, __ATOMIC_SEQ_CST));
208 }
209
210 ATOMIC_SUB_NV(8, uint8_t, int8_t)
ATOMIC_SUB_NV(char,uchar_t,signed char)211 ATOMIC_SUB_NV(char, uchar_t, signed char)
212 ATOMIC_SUB_NV(16, uint16_t, int16_t)
213 ATOMIC_SUB_NV(short, ushort_t, short)
214 ATOMIC_SUB_NV(32, uint32_t, int32_t)
215 ATOMIC_SUB_NV(int, uint_t, int)
216 ATOMIC_SUB_NV(long, ulong_t, long)
217 ATOMIC_SUB_NV(64, uint64_t, int64_t)
218
219
220 #define ATOMIC_OR_NV(name, type) \
221 type atomic_or_##name##_nv(volatile type *target, type bits) \
222 { \
223 return (__atomic_or_fetch(target, bits, __ATOMIC_SEQ_CST)); \
224 }
225
226 ATOMIC_OR_NV(8, uint8_t)
227 ATOMIC_OR_NV(16, uint16_t)
228 ATOMIC_OR_NV(32, uint32_t)
229 ATOMIC_OR_NV(64, uint64_t)
230 ATOMIC_OR_NV(uchar, uchar_t)
231 ATOMIC_OR_NV(ushort, ushort_t)
232 ATOMIC_OR_NV(uint, uint_t)
233 ATOMIC_OR_NV(ulong, ulong_t)
234
235
236 #define ATOMIC_AND_NV(name, type) \
237 type atomic_and_##name##_nv(volatile type *target, type bits) \
238 { \
239 return (__atomic_and_fetch(target, bits, __ATOMIC_SEQ_CST)); \
240 }
241
242 ATOMIC_AND_NV(8, uint8_t)
243 ATOMIC_AND_NV(16, uint16_t)
244 ATOMIC_AND_NV(32, uint32_t)
245 ATOMIC_AND_NV(64, uint64_t)
246 ATOMIC_AND_NV(uchar, uchar_t)
247 ATOMIC_AND_NV(ushort, ushort_t)
248 ATOMIC_AND_NV(uint, uint_t)
249 ATOMIC_AND_NV(ulong, ulong_t)
250
251
252 /*
253 * If *tgt == exp, set *tgt = des; return old value
254 *
255 * This may not look right on the first pass (or the sixteenth), but,
256 * from https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html:
257 * > If they are not equal, the operation is a read
258 * > and the current contents of *ptr are written into *expected.
259 * And, in the converse case, exp is already *target by definition.
260 */
261
262 #define ATOMIC_CAS(name, type) \
263 type atomic_cas_##name(volatile type *target, type exp, type des) \
264 { \
265 __atomic_compare_exchange_n(target, &exp, des, B_FALSE, \
266 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
267 return (exp); \
268 }
269
270 void *
271 atomic_cas_ptr(volatile void *target, void *exp, void *des)
272 {
273
274 __atomic_compare_exchange_n((void **)target, &exp, des, B_FALSE,
275 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
276 return (exp);
277 }
278
279 ATOMIC_CAS(8, uint8_t)
280 ATOMIC_CAS(16, uint16_t)
281 ATOMIC_CAS(32, uint32_t)
282 ATOMIC_CAS(64, uint64_t)
ATOMIC_CAS(uchar,uchar_t)283 ATOMIC_CAS(uchar, uchar_t)
284 ATOMIC_CAS(ushort, ushort_t)
285 ATOMIC_CAS(uint, uint_t)
286 ATOMIC_CAS(ulong, ulong_t)
287
288
289 /*
290 * Swap target and return old value
291 */
292
293 #define ATOMIC_SWAP(name, type) \
294 type atomic_swap_##name(volatile type *target, type bits) \
295 { \
296 return (__atomic_exchange_n(target, bits, __ATOMIC_SEQ_CST)); \
297 }
298
299 ATOMIC_SWAP(8, uint8_t)
300 ATOMIC_SWAP(16, uint16_t)
301 ATOMIC_SWAP(32, uint32_t)
302 ATOMIC_SWAP(64, uint64_t)
303 ATOMIC_SWAP(uchar, uchar_t)
304 ATOMIC_SWAP(ushort, ushort_t)
305 ATOMIC_SWAP(uint, uint_t)
306 ATOMIC_SWAP(ulong, ulong_t)
307
308 void *
309 atomic_swap_ptr(volatile void *target, void *bits)
310 {
311 return (__atomic_exchange_n((void **)target, bits, __ATOMIC_SEQ_CST));
312 }
313
314 #ifndef _LP64
315 uint64_t
atomic_load_64(volatile uint64_t * target)316 atomic_load_64(volatile uint64_t *target)
317 {
318 return (__atomic_load_n(target, __ATOMIC_RELAXED));
319 }
320
321 void
atomic_store_64(volatile uint64_t * target,uint64_t bits)322 atomic_store_64(volatile uint64_t *target, uint64_t bits)
323 {
324 return (__atomic_store_n(target, bits, __ATOMIC_RELAXED));
325 }
326 #endif
327
328 int
atomic_set_long_excl(volatile ulong_t * target,uint_t value)329 atomic_set_long_excl(volatile ulong_t *target, uint_t value)
330 {
331 ulong_t bit = 1UL << value;
332 ulong_t old = __atomic_fetch_or(target, bit, __ATOMIC_SEQ_CST);
333 return ((old & bit) ? -1 : 0);
334 }
335
336 int
atomic_clear_long_excl(volatile ulong_t * target,uint_t value)337 atomic_clear_long_excl(volatile ulong_t *target, uint_t value)
338 {
339 ulong_t bit = 1UL << value;
340 ulong_t old = __atomic_fetch_and(target, ~bit, __ATOMIC_SEQ_CST);
341 return ((old & bit) ? 0 : -1);
342 }
343
344 void
membar_enter(void)345 membar_enter(void)
346 {
347 __atomic_thread_fence(__ATOMIC_SEQ_CST);
348 }
349
350 void
membar_exit(void)351 membar_exit(void)
352 {
353 __atomic_thread_fence(__ATOMIC_SEQ_CST);
354 }
355
356 void
membar_sync(void)357 membar_sync(void)
358 {
359 __atomic_thread_fence(__ATOMIC_SEQ_CST);
360 }
361
362 void
membar_producer(void)363 membar_producer(void)
364 {
365 __atomic_thread_fence(__ATOMIC_RELEASE);
366 }
367
368 void
membar_consumer(void)369 membar_consumer(void)
370 {
371 __atomic_thread_fence(__ATOMIC_ACQUIRE);
372 }
373