1 /*- 2 * Copyright (c) 2016-2017 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 #ifndef _ASM_ATOMIC64_H_ 29 #define _ASM_ATOMIC64_H_ 30 31 #include <linux/compiler.h> 32 #include <sys/types.h> 33 #include <machine/atomic.h> 34 35 typedef struct { 36 volatile int64_t counter; 37 } atomic64_t; 38 39 #define ATOMIC64_INIT(x) { .counter = (x) } 40 41 /*------------------------------------------------------------------------* 42 * 64-bit atomic operations 43 *------------------------------------------------------------------------*/ 44 45 #define atomic64_add(i, v) atomic64_add_return((i), (v)) 46 #define atomic64_sub(i, v) atomic64_sub_return((i), (v)) 47 #define atomic64_inc_return(v) atomic64_add_return(1, (v)) 48 #define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0) 49 #define atomic64_add_and_test(i, v) (atomic64_add_return((i), (v)) == 0) 50 #define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0) 51 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) 52 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) 53 #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) 54 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 55 56 static inline int64_t 57 atomic64_add_return(int64_t i, atomic64_t *v) 58 { 59 return i + atomic_fetchadd_64(&v->counter, i); 60 } 61 62 static inline int64_t 63 atomic64_sub_return(int64_t i, atomic64_t *v) 64 { 65 return atomic_fetchadd_64(&v->counter, -i) - i; 66 } 67 68 static inline void 69 atomic64_set(atomic64_t *v, int64_t i) 70 { 71 atomic_store_rel_64(&v->counter, i); 72 } 73 74 static inline int64_t 75 atomic64_read(atomic64_t *v) 76 { 77 return READ_ONCE(v->counter); 78 } 79 80 static inline int64_t 81 atomic64_inc(atomic64_t *v) 82 { 83 return atomic_fetchadd_64(&v->counter, 1) + 1; 84 } 85 86 static inline int64_t 87 atomic64_dec(atomic64_t *v) 88 { 89 return atomic_fetchadd_64(&v->counter, -1) - 1; 90 } 91 92 static inline int64_t 93 atomic64_add_unless(atomic64_t *v, int64_t a, int64_t u) 94 { 95 int64_t c; 96 97 for (;;) { 98 c = atomic64_read(v); 99 if (unlikely(c == u)) 100 break; 101 if (likely(atomic_cmpset_64(&v->counter, c, c + a))) 102 break; 103 } 104 return (c != u); 105 } 106 107 static inline int64_t 108 atomic64_xchg(atomic64_t *v, int64_t i) 109 { 110 #if defined(__i386__) || defined(__amd64__) || \ 111 defined(__arm__) || defined(__aarch64__) || \ 112 defined(__powerpc64__) 113 return (atomic_swap_64(&v->counter, i)); 114 #else 115 int64_t ret; 116 for (;;) { 117 ret = READ_ONCE(v->counter); 118 if (atomic_cmpset_64(&v->counter, ret, i)) 119 break; 120 } 121 return (ret); 122 #endif 123 } 124 125 static inline int64_t 126 atomic64_cmpxchg(atomic64_t *v, int64_t old, int64_t new) 127 { 128 int64_t ret = old; 129 130 for (;;) { 131 if (atomic_cmpset_64(&v->counter, old, new)) 132 break; 133 ret = READ_ONCE(v->counter); 134 if (ret != old) 135 break; 136 } 137 return (ret); 138 } 139 140 #endif /* _ASM_ATOMIC64_H_ */ 141