xref: /freebsd/sys/compat/linuxkpi/common/include/asm/atomic.h (revision 8ef24a0d4b28fe230e20637f56869cc4148cd2ca)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #ifndef	_ASM_ATOMIC_H_
32 #define	_ASM_ATOMIC_H_
33 
34 #include <sys/cdefs.h>
35 #include <sys/types.h>
36 #include <machine/atomic.h>
37 
38 #define	ATOMIC_INIT(x)	{ .counter = (x) }
39 
40 typedef struct {
41 	volatile int counter;
42 } atomic_t;
43 
44 /*------------------------------------------------------------------------*
45  *	32-bit atomic operations
46  *------------------------------------------------------------------------*/
47 
48 #define	atomic_add(i, v)		atomic_add_return((i), (v))
49 #define	atomic_sub(i, v)		atomic_sub_return((i), (v))
50 #define	atomic_inc_return(v)		atomic_add_return(1, (v))
51 #define	atomic_add_negative(i, v)	(atomic_add_return((i), (v)) < 0)
52 #define	atomic_add_and_test(i, v)	(atomic_add_return((i), (v)) == 0)
53 #define	atomic_sub_and_test(i, v)	(atomic_sub_return((i), (v)) == 0)
54 #define	atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
55 #define	atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
56 #define	atomic_dec_return(v)		atomic_sub_return(1, (v))
57 #define	atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)
58 
59 static inline int
60 atomic_add_return(int i, atomic_t *v)
61 {
62 	return i + atomic_fetchadd_int(&v->counter, i);
63 }
64 
65 static inline int
66 atomic_sub_return(int i, atomic_t *v)
67 {
68 	return atomic_fetchadd_int(&v->counter, -i) - i;
69 }
70 
71 static inline void
72 atomic_set(atomic_t *v, int i)
73 {
74 	atomic_store_rel_int(&v->counter, i);
75 }
76 
77 static inline void
78 atomic_set_mask(unsigned int mask, atomic_t *v)
79 {
80 	atomic_set_int(&v->counter, mask);
81 }
82 
83 static inline int
84 atomic_read(atomic_t *v)
85 {
86 	return atomic_load_acq_int(&v->counter);
87 }
88 
89 static inline int
90 atomic_inc(atomic_t *v)
91 {
92 	return atomic_fetchadd_int(&v->counter, 1) + 1;
93 }
94 
95 static inline int
96 atomic_dec(atomic_t *v)
97 {
98 	return atomic_fetchadd_int(&v->counter, -1) - 1;
99 }
100 
101 static inline int
102 atomic_add_unless(atomic_t *v, int a, int u)
103 {
104 	int c;
105 
106 	for (;;) {
107 		c = atomic_read(v);
108 		if (unlikely(c == u))
109 			break;
110 		if (likely(atomic_cmpset_int(&v->counter, c, c + a)))
111 			break;
112 	}
113 	return (c != u);
114 }
115 
116 static inline void
117 atomic_clear_mask(unsigned int mask, atomic_t *v)
118 {
119 	atomic_clear_int(&v->counter, mask);
120 }
121 
122 static inline int
123 atomic_xchg(atomic_t *v, int i)
124 {
125 #if defined(__i386__) || defined(__amd64__) || \
126     defined(__arm__) || defined(__aarch64__)
127 	return (atomic_swap_int(&v->counter, i));
128 #else
129 	int ret;
130 	for (;;) {
131 		ret = atomic_load_acq_int(&v->counter);
132 		if (atomic_cmpset_int(&v->counter, ret, i))
133 			break;
134 	}
135 	return (ret);
136 #endif
137 }
138 
139 static inline int
140 atomic_cmpxchg(atomic_t *v, int old, int new)
141 {
142 	int ret = old;
143 
144 	for (;;) {
145 		if (atomic_cmpset_int(&v->counter, old, new))
146 			break;
147 		ret = atomic_load_acq_int(&v->counter);
148 		if (ret != old)
149 			break;
150 	}
151 	return (ret);
152 }
153 
154 #define	cmpxchg(ptr, old, new) ({				\
155 	__typeof(*(ptr)) __ret = (old);				\
156 	CTASSERT(sizeof(__ret) == 4 || sizeof(__ret) == 8);	\
157 	for (;;) {						\
158 		if (sizeof(__ret) == 4) {			\
159 			if (atomic_cmpset_int((volatile int *)	\
160 			    (ptr), (old), (new)))		\
161 				break;				\
162 			__ret = atomic_load_acq_int(		\
163 			    (volatile int *)(ptr));		\
164 			if (__ret != (old))			\
165 				break;				\
166 		} else {					\
167 			if (atomic_cmpset_64(			\
168 			    (volatile int64_t *)(ptr),		\
169 			    (old), (new)))			\
170 				break;				\
171 			__ret = atomic_load_acq_64(		\
172 			    (volatile int64_t *)(ptr));		\
173 			if (__ret != (old))			\
174 				break;				\
175 		}						\
176 	}							\
177 	__ret;							\
178 })
179 
180 #define	LINUX_ATOMIC_OP(op, c_op)				\
181 static inline void atomic_##op(int i, atomic_t *v)		\
182 {								\
183 	int c, old;						\
184 								\
185 	c = v->counter;						\
186 	while ((old = atomic_cmpxchg(v, c, c c_op i)) != c)	\
187 		c = old;					\
188 }
189 
190 LINUX_ATOMIC_OP(or, |)
191 LINUX_ATOMIC_OP(and, &)
192 LINUX_ATOMIC_OP(xor, ^)
193 
194 #endif					/* _ASM_ATOMIC_H_ */
195