1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 #ifndef _ASM_ATOMIC_H_ 33 #define _ASM_ATOMIC_H_ 34 35 #include <sys/cdefs.h> 36 #include <sys/types.h> 37 38 #include <machine/atomic.h> 39 40 #define ATOMIC_INIT(x) { .counter = (x) } 41 42 typedef struct { 43 volatile int counter; 44 } atomic_t; 45 46 /*------------------------------------------------------------------------* 47 * 32-bit atomic operations 48 *------------------------------------------------------------------------*/ 49 50 #define atomic_add(i, v) atomic_add_return((i), (v)) 51 #define atomic_sub(i, v) atomic_sub_return((i), (v)) 52 #define atomic_inc_return(v) atomic_add_return(1, (v)) 53 #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) 54 #define atomic_add_and_test(i, v) (atomic_add_return((i), (v)) == 0) 55 #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) 56 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 57 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) 58 #define atomic_dec_return(v) atomic_sub_return(1, (v)) 59 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 60 61 static inline int 62 atomic_add_return(int i, atomic_t *v) 63 { 64 return i + atomic_fetchadd_int(&v->counter, i); 65 } 66 67 static inline int 68 atomic_sub_return(int i, atomic_t *v) 69 { 70 return atomic_fetchadd_int(&v->counter, -i) - i; 71 } 72 73 static inline void 74 atomic_set(atomic_t *v, int i) 75 { 76 atomic_store_rel_int(&v->counter, i); 77 } 78 79 static inline void 80 atomic_set_release(atomic_t *v, int i) 81 { 82 atomic_store_rel_int(&v->counter, i); 83 } 84 85 static inline void 86 atomic_set_mask(unsigned int mask, atomic_t *v) 87 { 88 atomic_set_int(&v->counter, mask); 89 } 90 91 static inline int 92 atomic_read(const atomic_t *v) 93 { 94 return atomic_load_acq_int(&__DECONST(atomic_t *, v)->counter); 95 } 96 97 static inline int 98 atomic_inc(atomic_t *v) 99 { 100 return atomic_fetchadd_int(&v->counter, 1) + 1; 101 } 102 103 static inline int 104 atomic_dec(atomic_t *v) 105 { 106 return atomic_fetchadd_int(&v->counter, -1) - 1; 107 } 108 109 static inline int 110 atomic_add_unless(atomic_t *v, int a, int u) 111 { 112 int c; 113 114 for (;;) { 115 c = atomic_read(v); 116 if (unlikely(c == u)) 117 break; 118 if (likely(atomic_cmpset_int(&v->counter, c, c + a))) 119 break; 120 } 121 return (c != u); 122 } 123 124 static inline void 125 atomic_clear_mask(unsigned int mask, atomic_t *v) 126 { 127 atomic_clear_int(&v->counter, mask); 128 } 129 130 static inline int 131 atomic_xchg(atomic_t *v, int i) 132 { 133 #if defined(__i386__) || defined(__amd64__) || \ 134 defined(__arm__) || defined(__aarch64__) || \ 135 defined(__powerpc__) 136 return (atomic_swap_int(&v->counter, i)); 137 #else 138 int ret; 139 for (;;) { 140 ret = atomic_load_acq_int(&v->counter); 141 if (atomic_cmpset_int(&v->counter, ret, i)) 142 break; 143 } 144 return (ret); 145 #endif 146 } 147 148 static inline int 149 atomic_cmpxchg(atomic_t *v, int old, int new) 150 { 151 int ret = old; 152 153 for (;;) { 154 if (atomic_cmpset_int(&v->counter, old, new)) 155 break; 156 ret = atomic_load_acq_int(&v->counter); 157 if (ret != old) 158 break; 159 } 160 return (ret); 161 } 162 163 #define cmpxchg(ptr, old, new) ({ \ 164 __typeof(*(ptr)) __ret; \ 165 \ 166 CTASSERT(sizeof(__ret) == 1 || sizeof(__ret) == 2 || \ 167 sizeof(__ret) == 4 || sizeof(__ret) == 8); \ 168 \ 169 __ret = (old); \ 170 switch (sizeof(__ret)) { \ 171 case 1: \ 172 while (!atomic_fcmpset_8((volatile int8_t *)(ptr), \ 173 (int8_t *)&__ret, (new)) && __ret == (old)) \ 174 ; \ 175 break; \ 176 case 2: \ 177 while (!atomic_fcmpset_16((volatile int16_t *)(ptr), \ 178 (int16_t *)&__ret, (new)) && __ret == (old)) \ 179 ; \ 180 break; \ 181 case 4: \ 182 while (!atomic_fcmpset_32((volatile int32_t *)(ptr), \ 183 (int32_t *)&__ret, (new)) && __ret == (old)) \ 184 ; \ 185 break; \ 186 case 8: \ 187 while (!atomic_fcmpset_64((volatile int64_t *)(ptr), \ 188 (int64_t *)&__ret, (new)) && __ret == (old)) \ 189 ; \ 190 break; \ 191 } \ 192 __ret; \ 193 }) 194 195 #define cmpxchg_relaxed(...) cmpxchg(__VA_ARGS__) 196 197 #define xchg(ptr, v) ({ \ 198 __typeof(*(ptr)) __ret; \ 199 \ 200 __ret = *(ptr); \ 201 *(ptr) = v; \ 202 __ret; \ 203 }) 204 205 #define LINUX_ATOMIC_OP(op, c_op) \ 206 static inline void atomic_##op(int i, atomic_t *v) \ 207 { \ 208 int c, old; \ 209 \ 210 c = v->counter; \ 211 while ((old = atomic_cmpxchg(v, c, c c_op i)) != c) \ 212 c = old; \ 213 } 214 215 #define LINUX_ATOMIC_FETCH_OP(op, c_op) \ 216 static inline int atomic_fetch_##op(int i, atomic_t *v) \ 217 { \ 218 int c, old; \ 219 \ 220 c = v->counter; \ 221 while ((old = atomic_cmpxchg(v, c, c c_op i)) != c) \ 222 c = old; \ 223 \ 224 return (c); \ 225 } 226 227 LINUX_ATOMIC_OP(or, |) 228 LINUX_ATOMIC_OP(and, &) 229 LINUX_ATOMIC_OP(andnot, &~) 230 LINUX_ATOMIC_OP(xor, ^) 231 232 LINUX_ATOMIC_FETCH_OP(or, |) 233 LINUX_ATOMIC_FETCH_OP(and, &) 234 LINUX_ATOMIC_FETCH_OP(andnot, &~) 235 LINUX_ATOMIC_FETCH_OP(xor, ^) 236 237 #endif /* _ASM_ATOMIC_H_ */ 238