1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2018 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 #ifndef _ASM_ATOMIC_H_ 33 #define _ASM_ATOMIC_H_ 34 35 #include <linux/compiler.h> 36 #include <sys/types.h> 37 #include <machine/atomic.h> 38 39 #define ATOMIC_INIT(x) { .counter = (x) } 40 41 typedef struct { 42 volatile int counter; 43 } atomic_t; 44 45 /*------------------------------------------------------------------------* 46 * 32-bit atomic operations 47 *------------------------------------------------------------------------*/ 48 49 #define atomic_add(i, v) atomic_add_return((i), (v)) 50 #define atomic_sub(i, v) atomic_sub_return((i), (v)) 51 #define atomic_inc_return(v) atomic_add_return(1, (v)) 52 #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) 53 #define atomic_add_and_test(i, v) (atomic_add_return((i), (v)) == 0) 54 #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) 55 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 56 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) 57 #define atomic_dec_return(v) atomic_sub_return(1, (v)) 58 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 59 60 static inline int 61 atomic_add_return(int i, atomic_t *v) 62 { 63 return i + atomic_fetchadd_int(&v->counter, i); 64 } 65 66 static inline int 67 atomic_sub_return(int i, atomic_t *v) 68 { 69 return atomic_fetchadd_int(&v->counter, -i) - i; 70 } 71 72 static inline void 73 atomic_set(atomic_t *v, int i) 74 { 75 WRITE_ONCE(v->counter, i); 76 } 77 78 static inline void 79 atomic_set_release(atomic_t *v, int i) 80 { 81 atomic_store_rel_int(&v->counter, i); 82 } 83 84 static inline void 85 atomic_set_mask(unsigned int mask, atomic_t *v) 86 { 87 atomic_set_int(&v->counter, mask); 88 } 89 90 static inline int 91 atomic_read(const atomic_t *v) 92 { 93 return READ_ONCE(v->counter); 94 } 95 96 static inline int 97 atomic_inc(atomic_t *v) 98 { 99 return atomic_fetchadd_int(&v->counter, 1) + 1; 100 } 101 102 static inline int 103 atomic_dec(atomic_t *v) 104 { 105 return atomic_fetchadd_int(&v->counter, -1) - 1; 106 } 107 108 static inline int 109 atomic_add_unless(atomic_t *v, int a, int u) 110 { 111 int c = atomic_read(v); 112 113 for (;;) { 114 if (unlikely(c == u)) 115 break; 116 if (likely(atomic_fcmpset_int(&v->counter, &c, c + a))) 117 break; 118 } 119 return (c != u); 120 } 121 122 static inline int 123 atomic_fetch_add_unless(atomic_t *v, int a, int u) 124 { 125 int c = atomic_read(v); 126 127 for (;;) { 128 if (unlikely(c == u)) 129 break; 130 if (likely(atomic_fcmpset_int(&v->counter, &c, c + a))) 131 break; 132 } 133 return (c); 134 } 135 136 static inline void 137 atomic_clear_mask(unsigned int mask, atomic_t *v) 138 { 139 atomic_clear_int(&v->counter, mask); 140 } 141 142 static inline int 143 atomic_xchg(atomic_t *v, int i) 144 { 145 return (atomic_swap_int(&v->counter, i)); 146 } 147 148 static inline int 149 atomic_cmpxchg(atomic_t *v, int old, int new) 150 { 151 int ret = old; 152 153 for (;;) { 154 if (atomic_fcmpset_int(&v->counter, &ret, new)) 155 break; 156 if (ret != old) 157 break; 158 } 159 return (ret); 160 } 161 162 #if defined(__amd64__) || defined(__arm64__) || defined(__i386__) 163 #define LINUXKPI_ATOMIC_8(...) __VA_ARGS__ 164 #define LINUXKPI_ATOMIC_16(...) __VA_ARGS__ 165 #else 166 #define LINUXKPI_ATOMIC_8(...) 167 #define LINUXKPI_ATOMIC_16(...) 168 #endif 169 170 #if !(defined(i386) || (defined(__mips__) && !(defined(__mips_n32) || \ 171 defined(__mips_n64))) || (defined(__powerpc__) && \ 172 !defined(__powerpc64__))) 173 #define LINUXKPI_ATOMIC_64(...) __VA_ARGS__ 174 #else 175 #define LINUXKPI_ATOMIC_64(...) 176 #endif 177 178 #define cmpxchg(ptr, old, new) ({ \ 179 union { \ 180 __typeof(*(ptr)) val; \ 181 u8 u8[0]; \ 182 u16 u16[0]; \ 183 u32 u32[0]; \ 184 u64 u64[0]; \ 185 } __ret = { .val = (old) }, __new = { .val = (new) }; \ 186 \ 187 CTASSERT( \ 188 LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||) \ 189 LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||) \ 190 LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||) \ 191 sizeof(__ret.val) == 4); \ 192 \ 193 switch (sizeof(__ret.val)) { \ 194 LINUXKPI_ATOMIC_8( \ 195 case 1: \ 196 while (!atomic_fcmpset_8((volatile u8 *)(ptr), \ 197 __ret.u8, __new.u8[0]) && __ret.val == (old)) \ 198 ; \ 199 break; \ 200 ) \ 201 LINUXKPI_ATOMIC_16( \ 202 case 2: \ 203 while (!atomic_fcmpset_16((volatile u16 *)(ptr), \ 204 __ret.u16, __new.u16[0]) && __ret.val == (old)) \ 205 ; \ 206 break; \ 207 ) \ 208 case 4: \ 209 while (!atomic_fcmpset_32((volatile u32 *)(ptr), \ 210 __ret.u32, __new.u32[0]) && __ret.val == (old)) \ 211 ; \ 212 break; \ 213 LINUXKPI_ATOMIC_64( \ 214 case 8: \ 215 while (!atomic_fcmpset_64((volatile u64 *)(ptr), \ 216 __ret.u64, __new.u64[0]) && __ret.val == (old)) \ 217 ; \ 218 break; \ 219 ) \ 220 } \ 221 __ret.val; \ 222 }) 223 224 #define cmpxchg_relaxed(...) cmpxchg(__VA_ARGS__) 225 226 #define xchg(ptr, new) ({ \ 227 union { \ 228 __typeof(*(ptr)) val; \ 229 u8 u8[0]; \ 230 u16 u16[0]; \ 231 u32 u32[0]; \ 232 u64 u64[0]; \ 233 } __ret, __new = { .val = (new) }; \ 234 \ 235 CTASSERT( \ 236 LINUXKPI_ATOMIC_8(sizeof(__ret.val) == 1 ||) \ 237 LINUXKPI_ATOMIC_16(sizeof(__ret.val) == 2 ||) \ 238 LINUXKPI_ATOMIC_64(sizeof(__ret.val) == 8 ||) \ 239 sizeof(__ret.val) == 4); \ 240 \ 241 switch (sizeof(__ret.val)) { \ 242 LINUXKPI_ATOMIC_8( \ 243 case 1: \ 244 __ret.val = READ_ONCE(*ptr); \ 245 while (!atomic_fcmpset_8((volatile u8 *)(ptr), \ 246 __ret.u8, __new.u8[0])) \ 247 ; \ 248 break; \ 249 ) \ 250 LINUXKPI_ATOMIC_16( \ 251 case 2: \ 252 __ret.val = READ_ONCE(*ptr); \ 253 while (!atomic_fcmpset_16((volatile u16 *)(ptr), \ 254 __ret.u16, __new.u16[0])) \ 255 ; \ 256 break; \ 257 ) \ 258 case 4: \ 259 __ret.u32[0] = atomic_swap_32((volatile u32 *)(ptr), \ 260 __new.u32[0]); \ 261 break; \ 262 LINUXKPI_ATOMIC_64( \ 263 case 8: \ 264 __ret.u64[0] = atomic_swap_64((volatile u64 *)(ptr), \ 265 __new.u64[0]); \ 266 break; \ 267 ) \ 268 } \ 269 __ret.val; \ 270 }) 271 272 static inline int 273 atomic_dec_if_positive(atomic_t *v) 274 { 275 int retval; 276 int old; 277 278 old = atomic_read(v); 279 for (;;) { 280 retval = old - 1; 281 if (unlikely(retval < 0)) 282 break; 283 if (likely(atomic_fcmpset_int(&v->counter, &old, retval))) 284 break; 285 } 286 return (retval); 287 } 288 289 #define LINUX_ATOMIC_OP(op, c_op) \ 290 static inline void atomic_##op(int i, atomic_t *v) \ 291 { \ 292 int c, old; \ 293 \ 294 c = v->counter; \ 295 while ((old = atomic_cmpxchg(v, c, c c_op i)) != c) \ 296 c = old; \ 297 } 298 299 #define LINUX_ATOMIC_FETCH_OP(op, c_op) \ 300 static inline int atomic_fetch_##op(int i, atomic_t *v) \ 301 { \ 302 int c, old; \ 303 \ 304 c = v->counter; \ 305 while ((old = atomic_cmpxchg(v, c, c c_op i)) != c) \ 306 c = old; \ 307 \ 308 return (c); \ 309 } 310 311 LINUX_ATOMIC_OP(or, |) 312 LINUX_ATOMIC_OP(and, &) 313 LINUX_ATOMIC_OP(andnot, &~) 314 LINUX_ATOMIC_OP(xor, ^) 315 316 LINUX_ATOMIC_FETCH_OP(or, |) 317 LINUX_ATOMIC_FETCH_OP(and, &) 318 LINUX_ATOMIC_FETCH_OP(andnot, &~) 319 LINUX_ATOMIC_FETCH_OP(xor, ^) 320 321 #endif /* _ASM_ATOMIC_H_ */ 322