1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 #ifndef _ASM_ATOMIC_H_ 32 #define _ASM_ATOMIC_H_ 33 34 #include <sys/cdefs.h> 35 #include <sys/types.h> 36 #include <machine/atomic.h> 37 38 #define ATOMIC_INIT(x) { .counter = (x) } 39 40 typedef struct { 41 volatile int counter; 42 } atomic_t; 43 44 /*------------------------------------------------------------------------* 45 * 32-bit atomic operations 46 *------------------------------------------------------------------------*/ 47 48 #define atomic_add(i, v) atomic_add_return((i), (v)) 49 #define atomic_sub(i, v) atomic_sub_return((i), (v)) 50 #define atomic_inc_return(v) atomic_add_return(1, (v)) 51 #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) 52 #define atomic_add_and_test(i, v) (atomic_add_return((i), (v)) == 0) 53 #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) 54 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 55 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) 56 #define atomic_dec_return(v) atomic_sub_return(1, (v)) 57 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 58 59 static inline int 60 atomic_add_return(int i, atomic_t *v) 61 { 62 return i + atomic_fetchadd_int(&v->counter, i); 63 } 64 65 static inline int 66 atomic_sub_return(int i, atomic_t *v) 67 { 68 return atomic_fetchadd_int(&v->counter, -i) - i; 69 } 70 71 static inline void 72 atomic_set(atomic_t *v, int i) 73 { 74 atomic_store_rel_int(&v->counter, i); 75 } 76 77 static inline void 78 atomic_set_release(atomic_t *v, int i) 79 { 80 atomic_store_rel_int(&v->counter, i); 81 } 82 83 static inline void 84 atomic_set_mask(unsigned int mask, atomic_t *v) 85 { 86 atomic_set_int(&v->counter, mask); 87 } 88 89 static inline int 90 atomic_read(const atomic_t *v) 91 { 92 return atomic_load_acq_int(&__DECONST(atomic_t *, v)->counter); 93 } 94 95 static inline int 96 atomic_inc(atomic_t *v) 97 { 98 return atomic_fetchadd_int(&v->counter, 1) + 1; 99 } 100 101 static inline int 102 atomic_dec(atomic_t *v) 103 { 104 return atomic_fetchadd_int(&v->counter, -1) - 1; 105 } 106 107 static inline int 108 atomic_add_unless(atomic_t *v, int a, int u) 109 { 110 int c; 111 112 for (;;) { 113 c = atomic_read(v); 114 if (unlikely(c == u)) 115 break; 116 if (likely(atomic_cmpset_int(&v->counter, c, c + a))) 117 break; 118 } 119 return (c != u); 120 } 121 122 static inline void 123 atomic_clear_mask(unsigned int mask, atomic_t *v) 124 { 125 atomic_clear_int(&v->counter, mask); 126 } 127 128 static inline int 129 atomic_xchg(atomic_t *v, int i) 130 { 131 #if defined(__i386__) || defined(__amd64__) || \ 132 defined(__arm__) || defined(__aarch64__) 133 return (atomic_swap_int(&v->counter, i)); 134 #else 135 int ret; 136 for (;;) { 137 ret = atomic_load_acq_int(&v->counter); 138 if (atomic_cmpset_int(&v->counter, ret, i)) 139 break; 140 } 141 return (ret); 142 #endif 143 } 144 145 static inline int 146 atomic_cmpxchg(atomic_t *v, int old, int new) 147 { 148 int ret = old; 149 150 for (;;) { 151 if (atomic_cmpset_int(&v->counter, old, new)) 152 break; 153 ret = atomic_load_acq_int(&v->counter); 154 if (ret != old) 155 break; 156 } 157 return (ret); 158 } 159 160 #define cmpxchg(ptr, old, new) ({ \ 161 __typeof(*(ptr)) __ret = (old); \ 162 CTASSERT(sizeof(__ret) == 4 || sizeof(__ret) == 8); \ 163 for (;;) { \ 164 if (sizeof(__ret) == 4) { \ 165 if (atomic_cmpset_int((volatile int *) \ 166 (ptr), (old), (new))) \ 167 break; \ 168 __ret = atomic_load_acq_int( \ 169 (volatile int *)(ptr)); \ 170 if (__ret != (old)) \ 171 break; \ 172 } else { \ 173 if (atomic_cmpset_64( \ 174 (volatile int64_t *)(ptr), \ 175 (old), (new))) \ 176 break; \ 177 __ret = atomic_load_acq_64( \ 178 (volatile int64_t *)(ptr)); \ 179 if (__ret != (old)) \ 180 break; \ 181 } \ 182 } \ 183 __ret; \ 184 }) 185 186 #define LINUX_ATOMIC_OP(op, c_op) \ 187 static inline void atomic_##op(int i, atomic_t *v) \ 188 { \ 189 int c, old; \ 190 \ 191 c = v->counter; \ 192 while ((old = atomic_cmpxchg(v, c, c c_op i)) != c) \ 193 c = old; \ 194 } 195 196 #define LINUX_ATOMIC_FETCH_OP(op, c_op) \ 197 static inline int atomic_fetch_##op(int i, atomic_t *v) \ 198 { \ 199 int c, old; \ 200 \ 201 c = v->counter; \ 202 while ((old = atomic_cmpxchg(v, c, c c_op i)) != c) \ 203 c = old; \ 204 \ 205 return (c); \ 206 } 207 208 LINUX_ATOMIC_OP(or, |) 209 LINUX_ATOMIC_OP(and, &) 210 LINUX_ATOMIC_OP(andnot, &~) 211 LINUX_ATOMIC_OP(xor, ^) 212 213 LINUX_ATOMIC_FETCH_OP(or, |) 214 LINUX_ATOMIC_FETCH_OP(and, &) 215 LINUX_ATOMIC_FETCH_OP(andnot, &~) 216 LINUX_ATOMIC_FETCH_OP(xor, ^) 217 218 #endif /* _ASM_ATOMIC_H_ */ 219