1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
26 */
27
28 #ifndef _ASM_ATOMIC_H
29 #define _ASM_ATOMIC_H
30
31 #include <sys/ccompile.h>
32 #include <sys/types.h>
33
34 #ifdef __cplusplus
35 extern "C" {
36 #endif
37
38 #if !defined(__lint) && defined(__GNUC__)
39
40 /* BEGIN CSTYLED */
41 /*
42 * This file contains a number of static inline functions implementing
43 * various atomic variable functions. Note that these are *not* all of the
44 * atomic_* functions as defined in usr/src/uts/common/sys/atomic.h. All
45 * possible atomic_* functions are implemented in usr/src/common/atomic in
46 * pure assembly. In the absence of an identically named function in this
47 * header file, any use of the function will result in the compiler emitting
48 * a function call as usual. On the other hand, if an identically named
49 * function exists in this header as a static inline, the compiler will
50 * inline its contents and the linker never sees the symbol reference. We
51 * use this to avoid implementing some of the more complex and less used
52 * functions and instead falling back to function calls. Note that in some
53 * cases (e.g., atomic_inc_64) we implement a static inline only on AMD64
54 * but not i386.
55 */
56
57 /*
58 * Instruction suffixes for various operand sizes (assuming AMD64)
59 */
60 #define SUF_8 "b"
61 #define SUF_16 "w"
62 #define SUF_32 "l"
63 #define SUF_64 "q"
64
65 #if defined(__amd64)
66 #define SUF_LONG SUF_64
67 #define SUF_PTR SUF_64
68 #define __ATOMIC_OP64(...) __ATOMIC_OPXX(__VA_ARGS__)
69 #elif defined(__i386)
70 #define SUF_LONG SUF_32
71 #define SUF_PTR SUF_32
72 #define __ATOMIC_OP64(...)
73 #else
74 #error "port me"
75 #endif
76
77 #if defined(__amd64) || defined(__i386)
78
79 #define __ATOMIC_OPXX(fxn, type, op) \
80 extern __GNU_INLINE void \
81 fxn(volatile type *target) \
82 { \
83 __asm__ __volatile__( \
84 "lock; " op " %0" \
85 : "+m" (*target)); \
86 }
87
88 __ATOMIC_OPXX(atomic_inc_8, uint8_t, "inc" SUF_8)
89 __ATOMIC_OPXX(atomic_inc_16, uint16_t, "inc" SUF_16)
90 __ATOMIC_OPXX(atomic_inc_32, uint32_t, "inc" SUF_32)
91 __ATOMIC_OP64(atomic_inc_64, uint64_t, "inc" SUF_64)
92 __ATOMIC_OPXX(atomic_inc_uchar, uchar_t, "inc" SUF_8)
93 __ATOMIC_OPXX(atomic_inc_ushort, ushort_t, "inc" SUF_16)
94 __ATOMIC_OPXX(atomic_inc_uint, uint_t, "inc" SUF_32)
95 __ATOMIC_OPXX(atomic_inc_ulong, ulong_t, "inc" SUF_LONG)
96
97 __ATOMIC_OPXX(atomic_dec_8, uint8_t, "dec" SUF_8)
98 __ATOMIC_OPXX(atomic_dec_16, uint16_t, "dec" SUF_16)
99 __ATOMIC_OPXX(atomic_dec_32, uint32_t, "dec" SUF_32)
100 __ATOMIC_OP64(atomic_dec_64, uint64_t, "dec" SUF_64)
101 __ATOMIC_OPXX(atomic_dec_uchar, uchar_t, "dec" SUF_8)
102 __ATOMIC_OPXX(atomic_dec_ushort, ushort_t, "dec" SUF_16)
103 __ATOMIC_OPXX(atomic_dec_uint, uint_t, "dec" SUF_32)
104 __ATOMIC_OPXX(atomic_dec_ulong, ulong_t, "dec" SUF_LONG)
105
106 #undef __ATOMIC_OPXX
107
108 #define __ATOMIC_OPXX(fxn, type1, type2, op, reg) \
109 extern __GNU_INLINE void \
110 fxn(volatile type1 *target, type2 delta) \
111 { \
112 __asm__ __volatile__( \
113 "lock; " op " %1,%0" \
114 : "+m" (*target) \
115 : "i" reg (delta)); \
116 }
117
118 __ATOMIC_OPXX(atomic_add_8, uint8_t, int8_t, "add" SUF_8, "q")
119 __ATOMIC_OPXX(atomic_add_16, uint16_t, int16_t, "add" SUF_16, "r")
120 __ATOMIC_OPXX(atomic_add_32, uint32_t, int32_t, "add" SUF_32, "r")
121 __ATOMIC_OP64(atomic_add_64, uint64_t, int64_t, "add" SUF_64, "r")
122 __ATOMIC_OPXX(atomic_add_char, uchar_t, signed char, "add" SUF_8, "q")
123 __ATOMIC_OPXX(atomic_add_short, ushort_t, short, "add" SUF_16, "r")
124 __ATOMIC_OPXX(atomic_add_int, uint_t, int, "add" SUF_32, "r")
125 __ATOMIC_OPXX(atomic_add_long, ulong_t, long, "add" SUF_LONG, "r")
126
127 /*
128 * We don't use the above macro here because atomic_add_ptr has an
129 * inconsistent type. The first argument should really be a 'volatile void
130 * **'.
131 */
132 extern __GNU_INLINE void
atomic_add_ptr(volatile void * target,ssize_t delta)133 atomic_add_ptr(volatile void *target, ssize_t delta)
134 {
135 volatile void **tmp = (volatile void **)target;
136
137 __asm__ __volatile__(
138 "lock; add" SUF_PTR " %1,%0"
139 : "+m" (*tmp)
140 : "ir" (delta));
141 }
142
143 __ATOMIC_OPXX(atomic_or_8, uint8_t, uint8_t, "or" SUF_8, "q")
144 __ATOMIC_OPXX(atomic_or_16, uint16_t, uint16_t, "or" SUF_16, "r")
145 __ATOMIC_OPXX(atomic_or_32, uint32_t, uint32_t, "or" SUF_32, "r")
146 __ATOMIC_OP64(atomic_or_64, uint64_t, uint64_t, "or" SUF_64, "r")
147 __ATOMIC_OPXX(atomic_or_uchar, uchar_t, uchar_t, "or" SUF_8, "q")
148 __ATOMIC_OPXX(atomic_or_ushort, ushort_t, ushort_t, "or" SUF_16, "r")
149 __ATOMIC_OPXX(atomic_or_uint, uint_t, uint_t, "or" SUF_32, "r")
150 __ATOMIC_OPXX(atomic_or_ulong, ulong_t, ulong_t, "or" SUF_LONG, "r")
151
152 __ATOMIC_OPXX(atomic_and_8, uint8_t, uint8_t, "and" SUF_8, "q")
153 __ATOMIC_OPXX(atomic_and_16, uint16_t, uint16_t, "and" SUF_16, "r")
154 __ATOMIC_OPXX(atomic_and_32, uint32_t, uint32_t, "and" SUF_32, "r")
155 __ATOMIC_OP64(atomic_and_64, uint64_t, uint64_t, "and" SUF_64, "r")
156 __ATOMIC_OPXX(atomic_and_uchar, uchar_t, uchar_t, "and" SUF_8, "q")
157 __ATOMIC_OPXX(atomic_and_ushort, ushort_t, ushort_t, "and" SUF_16, "r")
158 __ATOMIC_OPXX(atomic_and_uint, uint_t, uint_t, "and" SUF_32, "r")
159 __ATOMIC_OPXX(atomic_and_ulong, ulong_t, ulong_t, "and" SUF_LONG, "r")
160
161 #undef __ATOMIC_OPXX
162
163 #define __ATOMIC_OPXX(fxn, type, op, reg) \
164 extern __GNU_INLINE type \
165 fxn(volatile type *target, type cmp, type new) \
166 { \
167 type ret; \
168 __asm__ __volatile__( \
169 "lock; " op " %2,%0" \
170 : "+m" (*target), "=a" (ret) \
171 : reg (new), "1" (cmp) \
172 : "cc"); \
173 return (ret); \
174 }
175
176 __ATOMIC_OPXX(atomic_cas_8, uint8_t, "cmpxchg" SUF_8, "q")
177 __ATOMIC_OPXX(atomic_cas_16, uint16_t, "cmpxchg" SUF_16, "r")
178 __ATOMIC_OPXX(atomic_cas_32, uint32_t, "cmpxchg" SUF_32, "r")
179 __ATOMIC_OP64(atomic_cas_64, uint64_t, "cmpxchg" SUF_64, "r")
180 __ATOMIC_OPXX(atomic_cas_uchar, uchar_t, "cmpxchg" SUF_8, "q")
181 __ATOMIC_OPXX(atomic_cas_ushort, ushort_t, "cmpxchg" SUF_16, "r")
182 __ATOMIC_OPXX(atomic_cas_uint, uint_t, "cmpxchg" SUF_32, "r")
183 __ATOMIC_OPXX(atomic_cas_ulong, ulong_t, "cmpxchg" SUF_LONG, "r")
184
185 #undef __ATOMIC_OPXX
186
187 /*
188 * We don't use the above macro here because atomic_cas_ptr has an
189 * inconsistent type. The first argument should really be a 'volatile void
190 * **'.
191 */
192 extern __GNU_INLINE void *
atomic_cas_ptr(volatile void * target,void * cmp,void * new)193 atomic_cas_ptr(volatile void *target, void *cmp, void *new)
194 {
195 volatile void **tmp = (volatile void **)target;
196 void *ret;
197
198 __asm__ __volatile__(
199 "lock; cmpxchg" SUF_PTR " %2,%0"
200 : "+m" (*tmp), "=a" (ret)
201 : "r" (new), "1" (cmp)
202 : "cc");
203
204 return (ret);
205 }
206
207 #define __ATOMIC_OPXX(fxn, type, op, reg) \
208 extern __GNU_INLINE type \
209 fxn(volatile type *target, type val) \
210 { \
211 __asm__ __volatile__( \
212 op " %1,%0" \
213 : "+m" (*target), "+" reg (val)); \
214 return (val); \
215 }
216
217 __ATOMIC_OPXX(atomic_swap_8, uint8_t, "xchg" SUF_8, "q")
218 __ATOMIC_OPXX(atomic_swap_16, uint16_t, "xchg" SUF_16, "r")
219 __ATOMIC_OPXX(atomic_swap_32, uint32_t, "xchg" SUF_32, "r")
220 __ATOMIC_OP64(atomic_swap_64, uint64_t, "xchg" SUF_64, "r")
221 __ATOMIC_OPXX(atomic_swap_uchar, uchar_t, "xchg" SUF_8, "q")
222 __ATOMIC_OPXX(atomic_swap_ushort, ushort_t, "xchg" SUF_16, "r")
223 __ATOMIC_OPXX(atomic_swap_uint, uint_t, "xchg" SUF_32, "r")
224 __ATOMIC_OPXX(atomic_swap_ulong, ulong_t, "xchg" SUF_LONG, "r")
225
226 #undef __ATOMIC_OPXX
227
228 /*
229 * We don't use the above macro here because atomic_swap_ptr has an
230 * inconsistent type. The first argument should really be a 'volatile void
231 * **'.
232 */
233 extern __GNU_INLINE void *
atomic_swap_ptr(volatile void * target,void * val)234 atomic_swap_ptr(volatile void *target, void *val)
235 {
236 volatile void **tmp = (volatile void **)target;
237
238 __asm__ __volatile__(
239 "xchg" SUF_PTR " %1,%0"
240 : "+m" (*tmp), "+r" (val));
241
242 return (val);
243 }
244
245 #else
246 #error "port me"
247 #endif
248
249 #undef SUF_8
250 #undef SUF_16
251 #undef SUF_32
252 #undef SUF_64
253 #undef SUF_LONG
254 #undef SUF_PTR
255
256 #undef __ATOMIC_OP64
257
258 /* END CSTYLED */
259
260 #endif /* !__lint && __GNUC__ */
261
262 #ifdef __cplusplus
263 }
264 #endif
265
266 #endif /* _ASM_ATOMIC_H */
267