xref: /linux/arch/xtensa/include/asm/atomic.h (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  * include/asm-xtensa/atomic.h
3  *
4  * Atomic operations that C can't guarantee us.  Useful for resource counting..
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  *
10  * Copyright (C) 2001 - 2008 Tensilica Inc.
11  */
12 
13 #ifndef _XTENSA_ATOMIC_H
14 #define _XTENSA_ATOMIC_H
15 
16 #include <linux/stringify.h>
17 #include <linux/types.h>
18 
19 #ifdef __KERNEL__
20 #include <asm/processor.h>
21 #include <asm/cmpxchg.h>
22 #include <asm/barrier.h>
23 
24 #define ATOMIC_INIT(i)	{ (i) }
25 
26 /*
27  * This Xtensa implementation assumes that the right mechanism
28  * for exclusion is for locking interrupts to level EXCM_LEVEL.
29  *
30  * Locking interrupts looks like this:
31  *
32  *    rsil a15, LOCKLEVEL
33  *    <code>
34  *    wsr  a15, PS
35  *    rsync
36  *
37  * Note that a15 is used here because the register allocation
38  * done by the compiler is not guaranteed and a window overflow
39  * may not occur between the rsil and wsr instructions. By using
40  * a15 in the rsil, the machine is guaranteed to be in a state
41  * where no register reference will cause an overflow.
42  */
43 
44 /**
45  * atomic_read - read atomic variable
46  * @v: pointer of type atomic_t
47  *
48  * Atomically reads the value of @v.
49  */
50 #define atomic_read(v)		(*(volatile int *)&(v)->counter)
51 
52 /**
53  * atomic_set - set atomic variable
54  * @v: pointer of type atomic_t
55  * @i: required value
56  *
57  * Atomically sets the value of @v to @i.
58  */
59 #define atomic_set(v,i)		((v)->counter = (i))
60 
61 /**
62  * atomic_add - add integer to atomic variable
63  * @i: integer value to add
64  * @v: pointer of type atomic_t
65  *
66  * Atomically adds @i to @v.
67  */
68 static inline void atomic_add(int i, atomic_t * v)
69 {
70 #if XCHAL_HAVE_S32C1I
71 	unsigned long tmp;
72 	int result;
73 
74 	__asm__ __volatile__(
75 			"1:     l32i    %1, %3, 0\n"
76 			"       wsr     %1, scompare1\n"
77 			"       add     %0, %1, %2\n"
78 			"       s32c1i  %0, %3, 0\n"
79 			"       bne     %0, %1, 1b\n"
80 			: "=&a" (result), "=&a" (tmp)
81 			: "a" (i), "a" (v)
82 			: "memory"
83 			);
84 #else
85 	unsigned int vval;
86 
87 	__asm__ __volatile__(
88 			"       rsil    a15, "__stringify(LOCKLEVEL)"\n"
89 			"       l32i    %0, %2, 0\n"
90 			"       add     %0, %0, %1\n"
91 			"       s32i    %0, %2, 0\n"
92 			"       wsr     a15, ps\n"
93 			"       rsync\n"
94 			: "=&a" (vval)
95 			: "a" (i), "a" (v)
96 			: "a15", "memory"
97 			);
98 #endif
99 }
100 
101 /**
102  * atomic_sub - subtract the atomic variable
103  * @i: integer value to subtract
104  * @v: pointer of type atomic_t
105  *
106  * Atomically subtracts @i from @v.
107  */
108 static inline void atomic_sub(int i, atomic_t *v)
109 {
110 #if XCHAL_HAVE_S32C1I
111 	unsigned long tmp;
112 	int result;
113 
114 	__asm__ __volatile__(
115 			"1:     l32i    %1, %3, 0\n"
116 			"       wsr     %1, scompare1\n"
117 			"       sub     %0, %1, %2\n"
118 			"       s32c1i  %0, %3, 0\n"
119 			"       bne     %0, %1, 1b\n"
120 			: "=&a" (result), "=&a" (tmp)
121 			: "a" (i), "a" (v)
122 			: "memory"
123 			);
124 #else
125 	unsigned int vval;
126 
127 	__asm__ __volatile__(
128 			"       rsil    a15, "__stringify(LOCKLEVEL)"\n"
129 			"       l32i    %0, %2, 0\n"
130 			"       sub     %0, %0, %1\n"
131 			"       s32i    %0, %2, 0\n"
132 			"       wsr     a15, ps\n"
133 			"       rsync\n"
134 			: "=&a" (vval)
135 			: "a" (i), "a" (v)
136 			: "a15", "memory"
137 			);
138 #endif
139 }
140 
141 /*
142  * We use atomic_{add|sub}_return to define other functions.
143  */
144 
145 static inline int atomic_add_return(int i, atomic_t * v)
146 {
147 #if XCHAL_HAVE_S32C1I
148 	unsigned long tmp;
149 	int result;
150 
151 	__asm__ __volatile__(
152 			"1:     l32i    %1, %3, 0\n"
153 			"       wsr     %1, scompare1\n"
154 			"       add     %0, %1, %2\n"
155 			"       s32c1i  %0, %3, 0\n"
156 			"       bne     %0, %1, 1b\n"
157 			"       add     %0, %0, %2\n"
158 			: "=&a" (result), "=&a" (tmp)
159 			: "a" (i), "a" (v)
160 			: "memory"
161 			);
162 
163 	return result;
164 #else
165 	unsigned int vval;
166 
167 	__asm__ __volatile__(
168 			"       rsil    a15,"__stringify(LOCKLEVEL)"\n"
169 			"       l32i    %0, %2, 0\n"
170 			"       add     %0, %0, %1\n"
171 			"       s32i    %0, %2, 0\n"
172 			"       wsr     a15, ps\n"
173 			"       rsync\n"
174 			: "=&a" (vval)
175 			: "a" (i), "a" (v)
176 			: "a15", "memory"
177 			);
178 
179 	return vval;
180 #endif
181 }
182 
183 static inline int atomic_sub_return(int i, atomic_t * v)
184 {
185 #if XCHAL_HAVE_S32C1I
186 	unsigned long tmp;
187 	int result;
188 
189 	__asm__ __volatile__(
190 			"1:     l32i    %1, %3, 0\n"
191 			"       wsr     %1, scompare1\n"
192 			"       sub     %0, %1, %2\n"
193 			"       s32c1i  %0, %3, 0\n"
194 			"       bne     %0, %1, 1b\n"
195 			"       sub     %0, %0, %2\n"
196 			: "=&a" (result), "=&a" (tmp)
197 			: "a" (i), "a" (v)
198 			: "memory"
199 			);
200 
201 	return result;
202 #else
203 	unsigned int vval;
204 
205 	__asm__ __volatile__(
206 			"       rsil    a15,"__stringify(LOCKLEVEL)"\n"
207 			"       l32i    %0, %2, 0\n"
208 			"       sub     %0, %0, %1\n"
209 			"       s32i    %0, %2, 0\n"
210 			"       wsr     a15, ps\n"
211 			"       rsync\n"
212 			: "=&a" (vval)
213 			: "a" (i), "a" (v)
214 			: "a15", "memory"
215 			);
216 
217 	return vval;
218 #endif
219 }
220 
221 /**
222  * atomic_sub_and_test - subtract value from variable and test result
223  * @i: integer value to subtract
224  * @v: pointer of type atomic_t
225  *
226  * Atomically subtracts @i from @v and returns
227  * true if the result is zero, or false for all
228  * other cases.
229  */
230 #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
231 
232 /**
233  * atomic_inc - increment atomic variable
234  * @v: pointer of type atomic_t
235  *
236  * Atomically increments @v by 1.
237  */
238 #define atomic_inc(v) atomic_add(1,(v))
239 
240 /**
241  * atomic_inc - increment atomic variable
242  * @v: pointer of type atomic_t
243  *
244  * Atomically increments @v by 1.
245  */
246 #define atomic_inc_return(v) atomic_add_return(1,(v))
247 
248 /**
249  * atomic_dec - decrement atomic variable
250  * @v: pointer of type atomic_t
251  *
252  * Atomically decrements @v by 1.
253  */
254 #define atomic_dec(v) atomic_sub(1,(v))
255 
256 /**
257  * atomic_dec_return - decrement atomic variable
258  * @v: pointer of type atomic_t
259  *
260  * Atomically decrements @v by 1.
261  */
262 #define atomic_dec_return(v) atomic_sub_return(1,(v))
263 
264 /**
265  * atomic_dec_and_test - decrement and test
266  * @v: pointer of type atomic_t
267  *
268  * Atomically decrements @v by 1 and
269  * returns true if the result is 0, or false for all other
270  * cases.
271  */
272 #define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0)
273 
274 /**
275  * atomic_inc_and_test - increment and test
276  * @v: pointer of type atomic_t
277  *
278  * Atomically increments @v by 1
279  * and returns true if the result is zero, or false for all
280  * other cases.
281  */
282 #define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0)
283 
284 /**
285  * atomic_add_negative - add and test if negative
286  * @v: pointer of type atomic_t
287  * @i: integer value to add
288  *
289  * Atomically adds @i to @v and returns true
290  * if the result is negative, or false when
291  * result is greater than or equal to zero.
292  */
293 #define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
294 
295 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
296 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
297 
298 /**
299  * __atomic_add_unless - add unless the number is a given value
300  * @v: pointer of type atomic_t
301  * @a: the amount to add to v...
302  * @u: ...unless v is equal to u.
303  *
304  * Atomically adds @a to @v, so long as it was not @u.
305  * Returns the old value of @v.
306  */
307 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
308 {
309 	int c, old;
310 	c = atomic_read(v);
311 	for (;;) {
312 		if (unlikely(c == (u)))
313 			break;
314 		old = atomic_cmpxchg((v), c, c + (a));
315 		if (likely(old == c))
316 			break;
317 		c = old;
318 	}
319 	return c;
320 }
321 
322 
323 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
324 {
325 #if XCHAL_HAVE_S32C1I
326 	unsigned long tmp;
327 	int result;
328 
329 	__asm__ __volatile__(
330 			"1:     l32i    %1, %3, 0\n"
331 			"       wsr     %1, scompare1\n"
332 			"       and     %0, %1, %2\n"
333 			"       s32c1i  %0, %3, 0\n"
334 			"       bne     %0, %1, 1b\n"
335 			: "=&a" (result), "=&a" (tmp)
336 			: "a" (~mask), "a" (v)
337 			: "memory"
338 			);
339 #else
340 	unsigned int all_f = -1;
341 	unsigned int vval;
342 
343 	__asm__ __volatile__(
344 			"       rsil    a15,"__stringify(LOCKLEVEL)"\n"
345 			"       l32i    %0, %2, 0\n"
346 			"       xor     %1, %4, %3\n"
347 			"       and     %0, %0, %4\n"
348 			"       s32i    %0, %2, 0\n"
349 			"       wsr     a15, ps\n"
350 			"       rsync\n"
351 			: "=&a" (vval), "=a" (mask)
352 			: "a" (v), "a" (all_f), "1" (mask)
353 			: "a15", "memory"
354 			);
355 #endif
356 }
357 
358 static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
359 {
360 #if XCHAL_HAVE_S32C1I
361 	unsigned long tmp;
362 	int result;
363 
364 	__asm__ __volatile__(
365 			"1:     l32i    %1, %3, 0\n"
366 			"       wsr     %1, scompare1\n"
367 			"       or      %0, %1, %2\n"
368 			"       s32c1i  %0, %3, 0\n"
369 			"       bne     %0, %1, 1b\n"
370 			: "=&a" (result), "=&a" (tmp)
371 			: "a" (mask), "a" (v)
372 			: "memory"
373 			);
374 #else
375 	unsigned int vval;
376 
377 	__asm__ __volatile__(
378 			"       rsil    a15,"__stringify(LOCKLEVEL)"\n"
379 			"       l32i    %0, %2, 0\n"
380 			"       or      %0, %0, %1\n"
381 			"       s32i    %0, %2, 0\n"
382 			"       wsr     a15, ps\n"
383 			"       rsync\n"
384 			: "=&a" (vval)
385 			: "a" (mask), "a" (v)
386 			: "a15", "memory"
387 			);
388 #endif
389 }
390 
391 #endif /* __KERNEL__ */
392 
393 #endif /* _XTENSA_ATOMIC_H */
394