xref: /freebsd/sys/compat/linuxkpi/common/include/asm/atomic.h (revision bdafb02fcb88389fd1ab684cfe734cb429d35618)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2018 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 #ifndef _ASM_ATOMIC_H_
33 #define	_ASM_ATOMIC_H_
34 
35 #include <linux/compiler.h>
36 #include <sys/types.h>
37 #include <machine/atomic.h>
38 
39 #define	ATOMIC_INIT(x)	{ .counter = (x) }
40 
41 typedef struct {
42 	volatile int counter;
43 } atomic_t;
44 
45 /*------------------------------------------------------------------------*
46  *	32-bit atomic operations
47  *------------------------------------------------------------------------*/
48 
49 #define	atomic_add(i, v)		atomic_add_return((i), (v))
50 #define	atomic_sub(i, v)		atomic_sub_return((i), (v))
51 #define	atomic_inc_return(v)		atomic_add_return(1, (v))
52 #define	atomic_add_negative(i, v)	(atomic_add_return((i), (v)) < 0)
53 #define	atomic_add_and_test(i, v)	(atomic_add_return((i), (v)) == 0)
54 #define	atomic_sub_and_test(i, v)	(atomic_sub_return((i), (v)) == 0)
55 #define	atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
56 #define	atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
57 #define	atomic_dec_return(v)		atomic_sub_return(1, (v))
58 #define	atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)
59 
60 static inline int
61 atomic_add_return(int i, atomic_t *v)
62 {
63 	return i + atomic_fetchadd_int(&v->counter, i);
64 }
65 
66 static inline int
67 atomic_sub_return(int i, atomic_t *v)
68 {
69 	return atomic_fetchadd_int(&v->counter, -i) - i;
70 }
71 
72 static inline void
73 atomic_set(atomic_t *v, int i)
74 {
75 	WRITE_ONCE(v->counter, i);
76 }
77 
78 static inline void
79 atomic_set_release(atomic_t *v, int i)
80 {
81 	atomic_store_rel_int(&v->counter, i);
82 }
83 
84 static inline void
85 atomic_set_mask(unsigned int mask, atomic_t *v)
86 {
87 	atomic_set_int(&v->counter, mask);
88 }
89 
90 static inline int
91 atomic_read(const atomic_t *v)
92 {
93 	return READ_ONCE(v->counter);
94 }
95 
96 static inline int
97 atomic_inc(atomic_t *v)
98 {
99 	return atomic_fetchadd_int(&v->counter, 1) + 1;
100 }
101 
102 static inline int
103 atomic_dec(atomic_t *v)
104 {
105 	return atomic_fetchadd_int(&v->counter, -1) - 1;
106 }
107 
108 static inline int
109 atomic_add_unless(atomic_t *v, int a, int u)
110 {
111 	int c = atomic_read(v);
112 
113 	for (;;) {
114 		if (unlikely(c == u))
115 			break;
116 		if (likely(atomic_fcmpset_int(&v->counter, &c, c + a)))
117 			break;
118 	}
119 	return (c != u);
120 }
121 
122 static inline void
123 atomic_clear_mask(unsigned int mask, atomic_t *v)
124 {
125 	atomic_clear_int(&v->counter, mask);
126 }
127 
128 static inline int
129 atomic_xchg(atomic_t *v, int i)
130 {
131 #if !defined(__mips__)
132 	return (atomic_swap_int(&v->counter, i));
133 #else
134 	int ret = atomic_read(v);
135 
136 	while (!atomic_fcmpset_int(&v->counter, &ret, i))
137 		;
138 	return (ret);
139 #endif
140 }
141 
142 static inline int
143 atomic_cmpxchg(atomic_t *v, int old, int new)
144 {
145 	int ret = old;
146 
147 	for (;;) {
148 		if (atomic_fcmpset_int(&v->counter, &ret, new))
149 			break;
150 		if (ret != old)
151 			break;
152 	}
153 	return (ret);
154 }
155 
156 #if defined(__amd64__) || defined(__arm64__) || defined(__i386__)
157 #define	LINUXKPI_ATOMIC_8(...) __VA_ARGS__
158 #define	LINUXKPI_ATOMIC_16(...) __VA_ARGS__
159 #else
160 #define	LINUXKPI_ATOMIC_8(...)
161 #define	LINUXKPI_ATOMIC_16(...)
162 #endif
163 
164 #if !(defined(i386) || (defined(__mips__) && !(defined(__mips_n32) ||	\
165     defined(__mips_n64))) || (defined(__powerpc__) &&			\
166     !defined(__powerpc64__)))
167 #define	LINUXKPI_ATOMIC_64(...) __VA_ARGS__
168 #else
169 #define	LINUXKPI_ATOMIC_64(...)
170 #endif
171 
172 #define	cmpxchg(ptr, old, new) ({					\
173 	union {								\
174 		__typeof(*(ptr)) val;					\
175 		u8 u8[0];						\
176 		u16 u16[0];						\
177 		u32 u32[0];						\
178 		u64 u64[0];						\
179 	} __ret = { .val = (old) }, __new = { .val = (new) };		\
180 									\
181 	CTASSERT(							\
182 	    LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||)		\
183 	    LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||)		\
184 	    LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||)		\
185 	    sizeof(__ret.val) == 4);					\
186 									\
187 	switch (sizeof(__ret.val)) {					\
188 	LINUXKPI_ATOMIC_8(						\
189 	case 1:								\
190 		while (!atomic_fcmpset_8((volatile u8 *)(ptr),		\
191 		    __ret.u8, __new.u8[0]) && __ret.val == (old))	\
192 			;						\
193 		break;							\
194 	)								\
195 	LINUXKPI_ATOMIC_16(						\
196 	case 2:								\
197 		while (!atomic_fcmpset_16((volatile u16 *)(ptr),	\
198 		    __ret.u16, __new.u16[0]) && __ret.val == (old))	\
199 			;						\
200 		break;							\
201 	)								\
202 	case 4:								\
203 		while (!atomic_fcmpset_32((volatile u32 *)(ptr),	\
204 		    __ret.u32, __new.u32[0]) && __ret.val == (old))	\
205 			;						\
206 		break;							\
207 	LINUXKPI_ATOMIC_64(						\
208 	case 8:								\
209 		while (!atomic_fcmpset_64((volatile u64 *)(ptr),	\
210 		    __ret.u64, __new.u64[0]) && __ret.val == (old))	\
211 			;						\
212 		break;							\
213 	)								\
214 	}								\
215 	__ret.val;							\
216 })
217 
218 #define	cmpxchg_relaxed(...)	cmpxchg(__VA_ARGS__)
219 
220 #define	xchg(ptr, new) ({						\
221 	union {								\
222 		__typeof(*(ptr)) val;					\
223 		u8 u8[0];						\
224 		u16 u16[0];						\
225 		u32 u32[0];						\
226 		u64 u64[0];						\
227 	} __ret, __new = { .val = (new) };				\
228 									\
229 	CTASSERT(							\
230 	    LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||)		\
231 	    LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||)		\
232 	    LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||)		\
233 	    sizeof(__ret.val) == 4);					\
234 									\
235 	switch (sizeof(__ret.val)) {					\
236 	LINUXKPI_ATOMIC_8(						\
237 	case 1:								\
238 		__ret.val = READ_ONCE(*ptr);				\
239 		while (!atomic_fcmpset_8((volatile u8 *)(ptr),		\
240 	            __ret.u8, __new.u8[0]))				\
241 			;						\
242 		break;							\
243 	)								\
244 	LINUXKPI_ATOMIC_16(						\
245 	case 2:								\
246 		__ret.val = READ_ONCE(*ptr);				\
247 		while (!atomic_fcmpset_16((volatile u16 *)(ptr),	\
248 	            __ret.u16, __new.u16[0]))				\
249 			;						\
250 		break;							\
251 	)								\
252 	case 4:								\
253 		__ret.u32[0] = atomic_swap_32((volatile u32 *)(ptr),	\
254 		    __new.u32[0]);					\
255 		break;							\
256 	LINUXKPI_ATOMIC_64(						\
257 	case 8:								\
258 		__ret.u64[0] = atomic_swap_64((volatile u64 *)(ptr),	\
259 		    __new.u64[0]);					\
260 		break;							\
261 	)								\
262 	}								\
263 	__ret.val;							\
264 })
265 
266 static inline int
267 atomic_dec_if_positive(atomic_t *v)
268 {
269 	int retval;
270 	int old;
271 
272 	old = atomic_read(v);
273 	for (;;) {
274 		retval = old - 1;
275 		if (unlikely(retval < 0))
276 			break;
277 		if (likely(atomic_fcmpset_int(&v->counter, &old, retval)))
278 			break;
279 	}
280 	return (retval);
281 }
282 
283 #define	LINUX_ATOMIC_OP(op, c_op)				\
284 static inline void atomic_##op(int i, atomic_t *v)		\
285 {								\
286 	int c, old;						\
287 								\
288 	c = v->counter;						\
289 	while ((old = atomic_cmpxchg(v, c, c c_op i)) != c)	\
290 		c = old;					\
291 }
292 
293 #define	LINUX_ATOMIC_FETCH_OP(op, c_op)				\
294 static inline int atomic_fetch_##op(int i, atomic_t *v)		\
295 {								\
296 	int c, old;						\
297 								\
298 	c = v->counter;						\
299 	while ((old = atomic_cmpxchg(v, c, c c_op i)) != c)	\
300 		c = old;					\
301 								\
302 	return (c);						\
303 }
304 
305 LINUX_ATOMIC_OP(or, |)
306 LINUX_ATOMIC_OP(and, &)
307 LINUX_ATOMIC_OP(andnot, &~)
308 LINUX_ATOMIC_OP(xor, ^)
309 
310 LINUX_ATOMIC_FETCH_OP(or, |)
311 LINUX_ATOMIC_FETCH_OP(and, &)
312 LINUX_ATOMIC_FETCH_OP(andnot, &~)
313 LINUX_ATOMIC_FETCH_OP(xor, ^)
314 
315 #endif					/* _ASM_ATOMIC_H_ */
316