1 /*-
2 * Copyright (c) 2016-2017 Mellanox Technologies, Ltd.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26 #ifndef _LINUXKPI_ASM_ATOMIC64_H_
27 #define _LINUXKPI_ASM_ATOMIC64_H_
28
29 #include <linux/compiler.h>
30 #include <sys/types.h>
31 #include <machine/atomic.h>
32
33 typedef struct {
34 volatile int64_t counter;
35 } atomic64_t;
36 #define ATOMIC64_INIT(x) { .counter = (x) }
37
38 /*------------------------------------------------------------------------*
39 * 64-bit atomic operations
40 *------------------------------------------------------------------------*/
41
42 #define atomic64_add(i, v) atomic64_add_return((i), (v))
43 #define atomic64_sub(i, v) atomic64_sub_return((i), (v))
44 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
45 #define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
46 #define atomic64_add_and_test(i, v) (atomic64_add_return((i), (v)) == 0)
47 #define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
48 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
49 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
50 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
51 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
52
53 static inline int64_t
atomic64_fetch_add(int64_t i,atomic64_t * v)54 atomic64_fetch_add(int64_t i, atomic64_t *v)
55 {
56 return (atomic_fetchadd_64(&v->counter, i));
57 }
58
59 static inline int64_t
atomic64_add_return(int64_t i,atomic64_t * v)60 atomic64_add_return(int64_t i, atomic64_t *v)
61 {
62 return i + atomic_fetchadd_64(&v->counter, i);
63 }
64
65 static inline int64_t
atomic64_sub_return(int64_t i,atomic64_t * v)66 atomic64_sub_return(int64_t i, atomic64_t *v)
67 {
68 return atomic_fetchadd_64(&v->counter, -i) - i;
69 }
70
71 static inline void
atomic64_set(atomic64_t * v,int64_t i)72 atomic64_set(atomic64_t *v, int64_t i)
73 {
74 atomic_store_rel_64(&v->counter, i);
75 }
76
77 static inline int64_t
atomic64_read(atomic64_t * v)78 atomic64_read(atomic64_t *v)
79 {
80 return READ_ONCE(v->counter);
81 }
82
83 static inline int64_t
atomic64_inc(atomic64_t * v)84 atomic64_inc(atomic64_t *v)
85 {
86 return atomic_fetchadd_64(&v->counter, 1) + 1;
87 }
88
89 static inline int64_t
atomic64_dec(atomic64_t * v)90 atomic64_dec(atomic64_t *v)
91 {
92 return atomic_fetchadd_64(&v->counter, -1) - 1;
93 }
94
95 static inline int64_t
atomic64_add_unless(atomic64_t * v,int64_t a,int64_t u)96 atomic64_add_unless(atomic64_t *v, int64_t a, int64_t u)
97 {
98 int64_t c = atomic64_read(v);
99
100 for (;;) {
101 if (unlikely(c == u))
102 break;
103 if (likely(atomic_fcmpset_64(&v->counter, &c, c + a)))
104 break;
105 }
106 return (c != u);
107 }
108
109 static inline int64_t
atomic64_fetch_add_unless(atomic64_t * v,int64_t a,int64_t u)110 atomic64_fetch_add_unless(atomic64_t *v, int64_t a, int64_t u)
111 {
112 int64_t c = atomic64_read(v);
113
114 for (;;) {
115 if (unlikely(c == u))
116 break;
117 if (likely(atomic_fcmpset_64(&v->counter, &c, c + a)))
118 break;
119 }
120 return (c);
121 }
122
123 static inline int64_t
atomic64_xchg(atomic64_t * v,int64_t i)124 atomic64_xchg(atomic64_t *v, int64_t i)
125 {
126 #if !(defined(__powerpc__) && !defined(__powerpc64__))
127 return (atomic_swap_64(&v->counter, i));
128 #else
129 int64_t ret = atomic64_read(v);
130
131 while (!atomic_fcmpset_64(&v->counter, &ret, i))
132 ;
133 return (ret);
134 #endif
135 }
136
137 static inline int64_t
atomic64_cmpxchg(atomic64_t * v,int64_t old,int64_t new)138 atomic64_cmpxchg(atomic64_t *v, int64_t old, int64_t new)
139 {
140 int64_t ret = old;
141
142 for (;;) {
143 if (atomic_fcmpset_64(&v->counter, &ret, new))
144 break;
145 if (ret != old)
146 break;
147 }
148 return (ret);
149 }
150
151 #endif /* _LINUXKPI_ASM_ATOMIC64_H_ */
152