xref: /linux/arch/arc/include/asm/atomic.h (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #ifndef _ASM_ARC_ATOMIC_H
10 #define _ASM_ARC_ATOMIC_H
11 
12 #ifndef __ASSEMBLY__
13 
14 #include <linux/types.h>
15 #include <linux/compiler.h>
16 #include <asm/cmpxchg.h>
17 #include <asm/barrier.h>
18 #include <asm/smp.h>
19 
20 #define atomic_read(v)  ((v)->counter)
21 
22 #ifdef CONFIG_ARC_HAS_LLSC
23 
24 #define atomic_set(v, i) (((v)->counter) = (i))
25 
26 #ifdef CONFIG_ARC_STAR_9000923308
27 
28 #define SCOND_FAIL_RETRY_VAR_DEF						\
29 	unsigned int delay = 1, tmp;						\
30 
31 #define SCOND_FAIL_RETRY_ASM							\
32 	"	bz	4f			\n"				\
33 	"   ; --- scond fail delay ---		\n"				\
34 	"	mov	%[tmp], %[delay]	\n"	/* tmp = delay */	\
35 	"2: 	brne.d	%[tmp], 0, 2b		\n"	/* while (tmp != 0) */	\
36 	"	sub	%[tmp], %[tmp], 1	\n"	/* tmp-- */		\
37 	"	rol	%[delay], %[delay]	\n"	/* delay *= 2 */	\
38 	"	b	1b			\n"	/* start over */	\
39 	"4: ; --- success ---			\n"				\
40 
41 #define SCOND_FAIL_RETRY_VARS							\
42 	  ,[delay] "+&r" (delay),[tmp] "=&r"	(tmp)				\
43 
44 #else	/* !CONFIG_ARC_STAR_9000923308 */
45 
46 #define SCOND_FAIL_RETRY_VAR_DEF
47 
48 #define SCOND_FAIL_RETRY_ASM							\
49 	"	bnz     1b			\n"				\
50 
51 #define SCOND_FAIL_RETRY_VARS
52 
53 #endif
54 
55 #define ATOMIC_OP(op, c_op, asm_op)					\
56 static inline void atomic_##op(int i, atomic_t *v)			\
57 {									\
58 	unsigned int val;				                \
59 	SCOND_FAIL_RETRY_VAR_DEF                                        \
60 									\
61 	__asm__ __volatile__(						\
62 	"1:	llock   %[val], [%[ctr]]		\n"		\
63 	"	" #asm_op " %[val], %[val], %[i]	\n"		\
64 	"	scond   %[val], [%[ctr]]		\n"		\
65 	"						\n"		\
66 	SCOND_FAIL_RETRY_ASM						\
67 									\
68 	: [val]	"=&r"	(val) /* Early clobber to prevent reg reuse */	\
69 	  SCOND_FAIL_RETRY_VARS						\
70 	: [ctr]	"r"	(&v->counter), /* Not "m": llock only supports reg direct addr mode */	\
71 	  [i]	"ir"	(i)						\
72 	: "cc");							\
73 }									\
74 
75 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
76 static inline int atomic_##op##_return(int i, atomic_t *v)		\
77 {									\
78 	unsigned int val;				                \
79 	SCOND_FAIL_RETRY_VAR_DEF                                        \
80 									\
81 	/*								\
82 	 * Explicit full memory barrier needed before/after as		\
83 	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
84 	 */								\
85 	smp_mb();							\
86 									\
87 	__asm__ __volatile__(						\
88 	"1:	llock   %[val], [%[ctr]]		\n"		\
89 	"	" #asm_op " %[val], %[val], %[i]	\n"		\
90 	"	scond   %[val], [%[ctr]]		\n"		\
91 	"						\n"		\
92 	SCOND_FAIL_RETRY_ASM						\
93 									\
94 	: [val]	"=&r"	(val)						\
95 	  SCOND_FAIL_RETRY_VARS						\
96 	: [ctr]	"r"	(&v->counter),					\
97 	  [i]	"ir"	(i)						\
98 	: "cc");							\
99 									\
100 	smp_mb();							\
101 									\
102 	return val;							\
103 }
104 
105 #else	/* !CONFIG_ARC_HAS_LLSC */
106 
107 #ifndef CONFIG_SMP
108 
109  /* violating atomic_xxx API locking protocol in UP for optimization sake */
110 #define atomic_set(v, i) (((v)->counter) = (i))
111 
112 #else
113 
114 static inline void atomic_set(atomic_t *v, int i)
115 {
116 	/*
117 	 * Independent of hardware support, all of the atomic_xxx() APIs need
118 	 * to follow the same locking rules to make sure that a "hardware"
119 	 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
120 	 * sequence
121 	 *
122 	 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
123 	 * requires the locking.
124 	 */
125 	unsigned long flags;
126 
127 	atomic_ops_lock(flags);
128 	v->counter = i;
129 	atomic_ops_unlock(flags);
130 }
131 
132 #endif
133 
134 /*
135  * Non hardware assisted Atomic-R-M-W
136  * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
137  */
138 
139 #define ATOMIC_OP(op, c_op, asm_op)					\
140 static inline void atomic_##op(int i, atomic_t *v)			\
141 {									\
142 	unsigned long flags;						\
143 									\
144 	atomic_ops_lock(flags);						\
145 	v->counter c_op i;						\
146 	atomic_ops_unlock(flags);					\
147 }
148 
149 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
150 static inline int atomic_##op##_return(int i, atomic_t *v)		\
151 {									\
152 	unsigned long flags;						\
153 	unsigned long temp;						\
154 									\
155 	/*								\
156 	 * spin lock/unlock provides the needed smp_mb() before/after	\
157 	 */								\
158 	atomic_ops_lock(flags);						\
159 	temp = v->counter;						\
160 	temp c_op i;							\
161 	v->counter = temp;						\
162 	atomic_ops_unlock(flags);					\
163 									\
164 	return temp;							\
165 }
166 
167 #endif /* !CONFIG_ARC_HAS_LLSC */
168 
169 #define ATOMIC_OPS(op, c_op, asm_op)					\
170 	ATOMIC_OP(op, c_op, asm_op)					\
171 	ATOMIC_OP_RETURN(op, c_op, asm_op)
172 
173 ATOMIC_OPS(add, +=, add)
174 ATOMIC_OPS(sub, -=, sub)
175 
176 #define atomic_andnot atomic_andnot
177 
178 ATOMIC_OP(and, &=, and)
179 ATOMIC_OP(andnot, &= ~, bic)
180 ATOMIC_OP(or, |=, or)
181 ATOMIC_OP(xor, ^=, xor)
182 
183 #undef ATOMIC_OPS
184 #undef ATOMIC_OP_RETURN
185 #undef ATOMIC_OP
186 #undef SCOND_FAIL_RETRY_VAR_DEF
187 #undef SCOND_FAIL_RETRY_ASM
188 #undef SCOND_FAIL_RETRY_VARS
189 
190 /**
191  * __atomic_add_unless - add unless the number is a given value
192  * @v: pointer of type atomic_t
193  * @a: the amount to add to v...
194  * @u: ...unless v is equal to u.
195  *
196  * Atomically adds @a to @v, so long as it was not @u.
197  * Returns the old value of @v
198  */
199 #define __atomic_add_unless(v, a, u)					\
200 ({									\
201 	int c, old;							\
202 									\
203 	/*								\
204 	 * Explicit full memory barrier needed before/after as		\
205 	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
206 	 */								\
207 	smp_mb();							\
208 									\
209 	c = atomic_read(v);						\
210 	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
211 		c = old;						\
212 									\
213 	smp_mb();							\
214 									\
215 	c;								\
216 })
217 
218 #define atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)
219 
220 #define atomic_inc(v)			atomic_add(1, v)
221 #define atomic_dec(v)			atomic_sub(1, v)
222 
223 #define atomic_inc_and_test(v)		(atomic_add_return(1, v) == 0)
224 #define atomic_dec_and_test(v)		(atomic_sub_return(1, v) == 0)
225 #define atomic_inc_return(v)		atomic_add_return(1, (v))
226 #define atomic_dec_return(v)		atomic_sub_return(1, (v))
227 #define atomic_sub_and_test(i, v)	(atomic_sub_return(i, v) == 0)
228 
229 #define atomic_add_negative(i, v)	(atomic_add_return(i, v) < 0)
230 
231 #define ATOMIC_INIT(i)			{ (i) }
232 
233 #include <asm-generic/atomic64.h>
234 
235 #endif
236 
237 #endif
238