xref: /linux/arch/parisc/include/asm/atomic.h (revision 93df8a1ed6231727c5db94a80b1a6bd5ee67cec3)
1 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2  * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
3  */
4 
5 #ifndef _ASM_PARISC_ATOMIC_H_
6 #define _ASM_PARISC_ATOMIC_H_
7 
8 #include <linux/types.h>
9 #include <asm/cmpxchg.h>
10 #include <asm/barrier.h>
11 
12 /*
13  * Atomic operations that C can't guarantee us.  Useful for
14  * resource counting etc..
15  *
16  * And probably incredibly slow on parisc.  OTOH, we don't
17  * have to write any serious assembly.   prumpf
18  */
19 
20 #ifdef CONFIG_SMP
21 #include <asm/spinlock.h>
22 #include <asm/cache.h>		/* we use L1_CACHE_BYTES */
23 
24 /* Use an array of spinlocks for our atomic_ts.
25  * Hash function to index into a different SPINLOCK.
26  * Since "a" is usually an address, use one spinlock per cacheline.
27  */
28 #  define ATOMIC_HASH_SIZE 4
29 #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
30 
31 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
32 
33 /* Can't use raw_spin_lock_irq because of #include problems, so
34  * this is the substitute */
35 #define _atomic_spin_lock_irqsave(l,f) do {	\
36 	arch_spinlock_t *s = ATOMIC_HASH(l);		\
37 	local_irq_save(f);			\
38 	arch_spin_lock(s);			\
39 } while(0)
40 
41 #define _atomic_spin_unlock_irqrestore(l,f) do {	\
42 	arch_spinlock_t *s = ATOMIC_HASH(l);			\
43 	arch_spin_unlock(s);				\
44 	local_irq_restore(f);				\
45 } while(0)
46 
47 
48 #else
49 #  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
50 #  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
51 #endif
52 
53 /*
54  * Note that we need not lock read accesses - aligned word writes/reads
55  * are atomic, so a reader never sees inconsistent values.
56  */
57 
58 static __inline__ void atomic_set(atomic_t *v, int i)
59 {
60 	unsigned long flags;
61 	_atomic_spin_lock_irqsave(v, flags);
62 
63 	v->counter = i;
64 
65 	_atomic_spin_unlock_irqrestore(v, flags);
66 }
67 
68 static __inline__ int atomic_read(const atomic_t *v)
69 {
70 	return ACCESS_ONCE((v)->counter);
71 }
72 
73 /* exported interface */
74 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
75 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
76 
77 /**
78  * __atomic_add_unless - add unless the number is a given value
79  * @v: pointer of type atomic_t
80  * @a: the amount to add to v...
81  * @u: ...unless v is equal to u.
82  *
83  * Atomically adds @a to @v, so long as it was not @u.
84  * Returns the old value of @v.
85  */
86 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
87 {
88 	int c, old;
89 	c = atomic_read(v);
90 	for (;;) {
91 		if (unlikely(c == (u)))
92 			break;
93 		old = atomic_cmpxchg((v), c, c + (a));
94 		if (likely(old == c))
95 			break;
96 		c = old;
97 	}
98 	return c;
99 }
100 
101 #define ATOMIC_OP(op, c_op)						\
102 static __inline__ void atomic_##op(int i, atomic_t *v)			\
103 {									\
104 	unsigned long flags;						\
105 									\
106 	_atomic_spin_lock_irqsave(v, flags);				\
107 	v->counter c_op i;						\
108 	_atomic_spin_unlock_irqrestore(v, flags);			\
109 }									\
110 
111 #define ATOMIC_OP_RETURN(op, c_op)					\
112 static __inline__ int atomic_##op##_return(int i, atomic_t *v)		\
113 {									\
114 	unsigned long flags;						\
115 	int ret;							\
116 									\
117 	_atomic_spin_lock_irqsave(v, flags);				\
118 	ret = (v->counter c_op i);					\
119 	_atomic_spin_unlock_irqrestore(v, flags);			\
120 									\
121 	return ret;							\
122 }
123 
124 #define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
125 
126 ATOMIC_OPS(add, +=)
127 ATOMIC_OPS(sub, -=)
128 
129 #undef ATOMIC_OPS
130 #undef ATOMIC_OP_RETURN
131 #undef ATOMIC_OP
132 
133 #define atomic_inc(v)	(atomic_add(   1,(v)))
134 #define atomic_dec(v)	(atomic_add(  -1,(v)))
135 
136 #define atomic_inc_return(v)	(atomic_add_return(   1,(v)))
137 #define atomic_dec_return(v)	(atomic_add_return(  -1,(v)))
138 
139 #define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
140 
141 /*
142  * atomic_inc_and_test - increment and test
143  * @v: pointer of type atomic_t
144  *
145  * Atomically increments @v by 1
146  * and returns true if the result is zero, or false for all
147  * other cases.
148  */
149 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
150 
151 #define atomic_dec_and_test(v)	(atomic_dec_return(v) == 0)
152 
153 #define atomic_sub_and_test(i,v)	(atomic_sub_return((i),(v)) == 0)
154 
155 #define ATOMIC_INIT(i)	{ (i) }
156 
157 #ifdef CONFIG_64BIT
158 
159 #define ATOMIC64_INIT(i) { (i) }
160 
161 #define ATOMIC64_OP(op, c_op)						\
162 static __inline__ void atomic64_##op(s64 i, atomic64_t *v)		\
163 {									\
164 	unsigned long flags;						\
165 									\
166 	_atomic_spin_lock_irqsave(v, flags);				\
167 	v->counter c_op i;						\
168 	_atomic_spin_unlock_irqrestore(v, flags);			\
169 }									\
170 
171 #define ATOMIC64_OP_RETURN(op, c_op)					\
172 static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v)	\
173 {									\
174 	unsigned long flags;						\
175 	s64 ret;							\
176 									\
177 	_atomic_spin_lock_irqsave(v, flags);				\
178 	ret = (v->counter c_op i);					\
179 	_atomic_spin_unlock_irqrestore(v, flags);			\
180 									\
181 	return ret;							\
182 }
183 
184 #define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op)
185 
186 ATOMIC64_OPS(add, +=)
187 ATOMIC64_OPS(sub, -=)
188 
189 #undef ATOMIC64_OPS
190 #undef ATOMIC64_OP_RETURN
191 #undef ATOMIC64_OP
192 
193 static __inline__ void
194 atomic64_set(atomic64_t *v, s64 i)
195 {
196 	unsigned long flags;
197 	_atomic_spin_lock_irqsave(v, flags);
198 
199 	v->counter = i;
200 
201 	_atomic_spin_unlock_irqrestore(v, flags);
202 }
203 
204 static __inline__ s64
205 atomic64_read(const atomic64_t *v)
206 {
207 	return ACCESS_ONCE((v)->counter);
208 }
209 
210 #define atomic64_inc(v)		(atomic64_add(   1,(v)))
211 #define atomic64_dec(v)		(atomic64_add(  -1,(v)))
212 
213 #define atomic64_inc_return(v)		(atomic64_add_return(   1,(v)))
214 #define atomic64_dec_return(v)		(atomic64_add_return(  -1,(v)))
215 
216 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
217 
218 #define atomic64_inc_and_test(v) 	(atomic64_inc_return(v) == 0)
219 #define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0)
220 #define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i),(v)) == 0)
221 
222 /* exported interface */
223 #define atomic64_cmpxchg(v, o, n) \
224 	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
225 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
226 
227 /**
228  * atomic64_add_unless - add unless the number is a given value
229  * @v: pointer of type atomic64_t
230  * @a: the amount to add to v...
231  * @u: ...unless v is equal to u.
232  *
233  * Atomically adds @a to @v, so long as it was not @u.
234  * Returns the old value of @v.
235  */
236 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
237 {
238 	long c, old;
239 	c = atomic64_read(v);
240 	for (;;) {
241 		if (unlikely(c == (u)))
242 			break;
243 		old = atomic64_cmpxchg((v), c, c + (a));
244 		if (likely(old == c))
245 			break;
246 		c = old;
247 	}
248 	return c != (u);
249 }
250 
251 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
252 
253 /*
254  * atomic64_dec_if_positive - decrement by 1 if old value positive
255  * @v: pointer of type atomic_t
256  *
257  * The function returns the old value of *v minus 1, even if
258  * the atomic variable, v, was not decremented.
259  */
260 static inline long atomic64_dec_if_positive(atomic64_t *v)
261 {
262 	long c, old, dec;
263 	c = atomic64_read(v);
264 	for (;;) {
265 		dec = c - 1;
266 		if (unlikely(dec < 0))
267 			break;
268 		old = atomic64_cmpxchg((v), c, dec);
269 		if (likely(old == c))
270 			break;
271 		c = old;
272 	}
273 	return dec;
274 }
275 
276 #endif /* !CONFIG_64BIT */
277 
278 
279 #endif /* _ASM_PARISC_ATOMIC_H_ */
280