xref: /linux/arch/csky/include/asm/atomic.h (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef __ASM_CSKY_ATOMIC_H
4 #define __ASM_CSKY_ATOMIC_H
5 
6 #ifdef CONFIG_SMP
7 #include <asm-generic/atomic64.h>
8 
9 #include <asm/cmpxchg.h>
10 #include <asm/barrier.h>
11 
12 #define __atomic_acquire_fence()	__bar_brarw()
13 
14 #define __atomic_release_fence()	__bar_brwaw()
15 
16 static __always_inline int arch_atomic_read(const atomic_t *v)
17 {
18 	return READ_ONCE(v->counter);
19 }
20 static __always_inline void arch_atomic_set(atomic_t *v, int i)
21 {
22 	WRITE_ONCE(v->counter, i);
23 }
24 
25 #define ATOMIC_OP(op)							\
26 static __always_inline							\
27 void arch_atomic_##op(int i, atomic_t *v)				\
28 {									\
29 	unsigned long tmp;						\
30 	__asm__ __volatile__ (						\
31 	"1:	ldex.w		%0, (%2)	\n"			\
32 	"	" #op "		%0, %1		\n"			\
33 	"	stex.w		%0, (%2)	\n"			\
34 	"	bez		%0, 1b		\n"			\
35 	: "=&r" (tmp)							\
36 	: "r" (i), "r" (&v->counter)					\
37 	: "memory");							\
38 }
39 
40 ATOMIC_OP(add)
41 ATOMIC_OP(sub)
42 ATOMIC_OP(and)
43 ATOMIC_OP( or)
44 ATOMIC_OP(xor)
45 
46 #undef ATOMIC_OP
47 
48 #define ATOMIC_FETCH_OP(op)						\
49 static __always_inline							\
50 int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)		\
51 {									\
52 	register int ret, tmp;						\
53 	__asm__ __volatile__ (						\
54 	"1:	ldex.w		%0, (%3) \n"				\
55 	"	mov		%1, %0   \n"				\
56 	"	" #op "		%0, %2   \n"				\
57 	"	stex.w		%0, (%3) \n"				\
58 	"	bez		%0, 1b   \n"				\
59 		: "=&r" (tmp), "=&r" (ret)				\
60 		: "r" (i), "r"(&v->counter) 				\
61 		: "memory");						\
62 	return ret;							\
63 }
64 
65 #define ATOMIC_OP_RETURN(op, c_op)					\
66 static __always_inline							\
67 int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)		\
68 {									\
69 	return arch_atomic_fetch_##op##_relaxed(i, v) c_op i;		\
70 }
71 
72 #define ATOMIC_OPS(op, c_op)						\
73 	ATOMIC_FETCH_OP(op)						\
74 	ATOMIC_OP_RETURN(op, c_op)
75 
76 ATOMIC_OPS(add, +)
77 ATOMIC_OPS(sub, -)
78 
79 #define arch_atomic_fetch_add_relaxed	arch_atomic_fetch_add_relaxed
80 #define arch_atomic_fetch_sub_relaxed	arch_atomic_fetch_sub_relaxed
81 
82 #define arch_atomic_add_return_relaxed	arch_atomic_add_return_relaxed
83 #define arch_atomic_sub_return_relaxed	arch_atomic_sub_return_relaxed
84 
85 #undef ATOMIC_OPS
86 #undef ATOMIC_OP_RETURN
87 
88 #define ATOMIC_OPS(op)							\
89 	ATOMIC_FETCH_OP(op)
90 
91 ATOMIC_OPS(and)
92 ATOMIC_OPS( or)
93 ATOMIC_OPS(xor)
94 
95 #define arch_atomic_fetch_and_relaxed	arch_atomic_fetch_and_relaxed
96 #define arch_atomic_fetch_or_relaxed	arch_atomic_fetch_or_relaxed
97 #define arch_atomic_fetch_xor_relaxed	arch_atomic_fetch_xor_relaxed
98 
99 #undef ATOMIC_OPS
100 
101 #undef ATOMIC_FETCH_OP
102 
103 static __always_inline int
104 arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
105 {
106 	int prev, tmp;
107 
108 	__asm__ __volatile__ (
109 		RELEASE_FENCE
110 		"1:	ldex.w		%0, (%3)	\n"
111 		"	cmpne		%0, %4		\n"
112 		"	bf		2f		\n"
113 		"	mov		%1, %0		\n"
114 		"	add		%1, %2		\n"
115 		"	stex.w		%1, (%3)	\n"
116 		"	bez		%1, 1b		\n"
117 		FULL_FENCE
118 		"2:\n"
119 		: "=&r" (prev), "=&r" (tmp)
120 		: "r" (a), "r" (&v->counter), "r" (u)
121 		: "memory");
122 
123 	return prev;
124 }
125 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
126 
127 static __always_inline bool
128 arch_atomic_inc_unless_negative(atomic_t *v)
129 {
130 	int rc, tmp;
131 
132 	__asm__ __volatile__ (
133 		RELEASE_FENCE
134 		"1:	ldex.w		%0, (%2)	\n"
135 		"	movi		%1, 0		\n"
136 		"	blz		%0, 2f		\n"
137 		"	movi		%1, 1		\n"
138 		"	addi		%0, 1		\n"
139 		"	stex.w		%0, (%2)	\n"
140 		"	bez		%0, 1b		\n"
141 		FULL_FENCE
142 		"2:\n"
143 		: "=&r" (tmp), "=&r" (rc)
144 		: "r" (&v->counter)
145 		: "memory");
146 
147 	return tmp ? true : false;
148 
149 }
150 #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
151 
152 static __always_inline bool
153 arch_atomic_dec_unless_positive(atomic_t *v)
154 {
155 	int rc, tmp;
156 
157 	__asm__ __volatile__ (
158 		RELEASE_FENCE
159 		"1:	ldex.w		%0, (%2)	\n"
160 		"	movi		%1, 0		\n"
161 		"	bhz		%0, 2f		\n"
162 		"	movi		%1, 1		\n"
163 		"	subi		%0, 1		\n"
164 		"	stex.w		%0, (%2)	\n"
165 		"	bez		%0, 1b		\n"
166 		FULL_FENCE
167 		"2:\n"
168 		: "=&r" (tmp), "=&r" (rc)
169 		: "r" (&v->counter)
170 		: "memory");
171 
172 	return tmp ? true : false;
173 }
174 #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
175 
176 static __always_inline int
177 arch_atomic_dec_if_positive(atomic_t *v)
178 {
179 	int dec, tmp;
180 
181 	__asm__ __volatile__ (
182 		RELEASE_FENCE
183 		"1:	ldex.w		%0, (%2)	\n"
184 		"	subi		%1, %0, 1	\n"
185 		"	blz		%1, 2f		\n"
186 		"	stex.w		%1, (%2)	\n"
187 		"	bez		%1, 1b		\n"
188 		FULL_FENCE
189 		"2:\n"
190 		: "=&r" (dec), "=&r" (tmp)
191 		: "r" (&v->counter)
192 		: "memory");
193 
194 	return dec - 1;
195 }
196 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
197 
198 #define ATOMIC_OP()							\
199 static __always_inline							\
200 int arch_atomic_xchg_relaxed(atomic_t *v, int n)			\
201 {									\
202 	return __xchg_relaxed(n, &(v->counter), 4);			\
203 }									\
204 static __always_inline							\
205 int arch_atomic_cmpxchg_relaxed(atomic_t *v, int o, int n)		\
206 {									\
207 	return __cmpxchg_relaxed(&(v->counter), o, n, 4);		\
208 }									\
209 static __always_inline							\
210 int arch_atomic_cmpxchg_acquire(atomic_t *v, int o, int n)		\
211 {									\
212 	return __cmpxchg_acquire(&(v->counter), o, n, 4);		\
213 }									\
214 static __always_inline							\
215 int arch_atomic_cmpxchg(atomic_t *v, int o, int n)			\
216 {									\
217 	return __cmpxchg(&(v->counter), o, n, 4);			\
218 }
219 
220 #define ATOMIC_OPS()							\
221 	ATOMIC_OP()
222 
223 ATOMIC_OPS()
224 
225 #define arch_atomic_xchg_relaxed	arch_atomic_xchg_relaxed
226 #define arch_atomic_cmpxchg_relaxed	arch_atomic_cmpxchg_relaxed
227 #define arch_atomic_cmpxchg_acquire	arch_atomic_cmpxchg_acquire
228 #define arch_atomic_cmpxchg		arch_atomic_cmpxchg
229 
230 #undef ATOMIC_OPS
231 #undef ATOMIC_OP
232 
233 #else
234 #include <asm-generic/atomic.h>
235 #endif
236 
237 #endif /* __ASM_CSKY_ATOMIC_H */
238