xref: /linux/arch/parisc/include/asm/atomic.h (revision 9ffc93f203c18a70623f21950f1dd473c9ec48cd)
1 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2  * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
3  */
4 
5 #ifndef _ASM_PARISC_ATOMIC_H_
6 #define _ASM_PARISC_ATOMIC_H_
7 
8 #include <linux/types.h>
9 
10 /*
11  * Atomic operations that C can't guarantee us.  Useful for
12  * resource counting etc..
13  *
14  * And probably incredibly slow on parisc.  OTOH, we don't
15  * have to write any serious assembly.   prumpf
16  */
17 
18 #ifdef CONFIG_SMP
19 #include <asm/spinlock.h>
20 #include <asm/cache.h>		/* we use L1_CACHE_BYTES */
21 
22 /* Use an array of spinlocks for our atomic_ts.
23  * Hash function to index into a different SPINLOCK.
24  * Since "a" is usually an address, use one spinlock per cacheline.
25  */
26 #  define ATOMIC_HASH_SIZE 4
27 #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
28 
29 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
30 
31 /* Can't use raw_spin_lock_irq because of #include problems, so
32  * this is the substitute */
33 #define _atomic_spin_lock_irqsave(l,f) do {	\
34 	arch_spinlock_t *s = ATOMIC_HASH(l);		\
35 	local_irq_save(f);			\
36 	arch_spin_lock(s);			\
37 } while(0)
38 
39 #define _atomic_spin_unlock_irqrestore(l,f) do {	\
40 	arch_spinlock_t *s = ATOMIC_HASH(l);			\
41 	arch_spin_unlock(s);				\
42 	local_irq_restore(f);				\
43 } while(0)
44 
45 
46 #else
47 #  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
48 #  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
49 #endif
50 
51 /* This should get optimized out since it's never called.
52 ** Or get a link error if xchg is used "wrong".
53 */
54 extern void __xchg_called_with_bad_pointer(void);
55 
56 
57 /* __xchg32/64 defined in arch/parisc/lib/bitops.c */
58 extern unsigned long __xchg8(char, char *);
59 extern unsigned long __xchg32(int, int *);
60 #ifdef CONFIG_64BIT
61 extern unsigned long __xchg64(unsigned long, unsigned long *);
62 #endif
63 
64 /* optimizer better get rid of switch since size is a constant */
65 static __inline__ unsigned long
66 __xchg(unsigned long x, __volatile__ void * ptr, int size)
67 {
68 	switch(size) {
69 #ifdef CONFIG_64BIT
70 	case 8: return __xchg64(x,(unsigned long *) ptr);
71 #endif
72 	case 4: return __xchg32((int) x, (int *) ptr);
73 	case 1: return __xchg8((char) x, (char *) ptr);
74 	}
75 	__xchg_called_with_bad_pointer();
76 	return x;
77 }
78 
79 
80 /*
81 ** REVISIT - Abandoned use of LDCW in xchg() for now:
82 ** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
83 ** o and while we are at it, could CONFIG_64BIT code use LDCD too?
84 **
85 **	if (__builtin_constant_p(x) && (x == NULL))
86 **		if (((unsigned long)p & 0xf) == 0)
87 **			return __ldcw(p);
88 */
89 #define xchg(ptr,x) \
90 	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
91 
92 
93 #define __HAVE_ARCH_CMPXCHG	1
94 
95 /* bug catcher for when unsupported size is used - won't link */
96 extern void __cmpxchg_called_with_bad_pointer(void);
97 
98 /* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
99 extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
100 extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
101 
102 /* don't worry...optimizer will get rid of most of this */
103 static __inline__ unsigned long
104 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
105 {
106 	switch(size) {
107 #ifdef CONFIG_64BIT
108 	case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
109 #endif
110 	case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
111 	}
112 	__cmpxchg_called_with_bad_pointer();
113 	return old;
114 }
115 
116 #define cmpxchg(ptr,o,n)						 \
117   ({									 \
118      __typeof__(*(ptr)) _o_ = (o);					 \
119      __typeof__(*(ptr)) _n_ = (n);					 \
120      (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \
121 				    (unsigned long)_n_, sizeof(*(ptr))); \
122   })
123 
124 #include <asm-generic/cmpxchg-local.h>
125 
126 static inline unsigned long __cmpxchg_local(volatile void *ptr,
127 				      unsigned long old,
128 				      unsigned long new_, int size)
129 {
130 	switch (size) {
131 #ifdef CONFIG_64BIT
132 	case 8:	return __cmpxchg_u64((unsigned long *)ptr, old, new_);
133 #endif
134 	case 4:	return __cmpxchg_u32(ptr, old, new_);
135 	default:
136 		return __cmpxchg_local_generic(ptr, old, new_, size);
137 	}
138 }
139 
140 /*
141  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
142  * them available.
143  */
144 #define cmpxchg_local(ptr, o, n)				  	\
145 	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
146 			(unsigned long)(n), sizeof(*(ptr))))
147 #ifdef CONFIG_64BIT
148 #define cmpxchg64_local(ptr, o, n)					\
149   ({									\
150 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
151 	cmpxchg_local((ptr), (o), (n));					\
152   })
153 #else
154 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
155 #endif
156 
157 /*
158  * Note that we need not lock read accesses - aligned word writes/reads
159  * are atomic, so a reader never sees inconsistent values.
160  */
161 
162 /* It's possible to reduce all atomic operations to either
163  * __atomic_add_return, atomic_set and atomic_read (the latter
164  * is there only for consistency).
165  */
166 
167 static __inline__ int __atomic_add_return(int i, atomic_t *v)
168 {
169 	int ret;
170 	unsigned long flags;
171 	_atomic_spin_lock_irqsave(v, flags);
172 
173 	ret = (v->counter += i);
174 
175 	_atomic_spin_unlock_irqrestore(v, flags);
176 	return ret;
177 }
178 
179 static __inline__ void atomic_set(atomic_t *v, int i)
180 {
181 	unsigned long flags;
182 	_atomic_spin_lock_irqsave(v, flags);
183 
184 	v->counter = i;
185 
186 	_atomic_spin_unlock_irqrestore(v, flags);
187 }
188 
189 static __inline__ int atomic_read(const atomic_t *v)
190 {
191 	return (*(volatile int *)&(v)->counter);
192 }
193 
194 /* exported interface */
195 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
196 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
197 
198 /**
199  * __atomic_add_unless - add unless the number is a given value
200  * @v: pointer of type atomic_t
201  * @a: the amount to add to v...
202  * @u: ...unless v is equal to u.
203  *
204  * Atomically adds @a to @v, so long as it was not @u.
205  * Returns the old value of @v.
206  */
207 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
208 {
209 	int c, old;
210 	c = atomic_read(v);
211 	for (;;) {
212 		if (unlikely(c == (u)))
213 			break;
214 		old = atomic_cmpxchg((v), c, c + (a));
215 		if (likely(old == c))
216 			break;
217 		c = old;
218 	}
219 	return c;
220 }
221 
222 
223 #define atomic_add(i,v)	((void)(__atomic_add_return( (i),(v))))
224 #define atomic_sub(i,v)	((void)(__atomic_add_return(-(i),(v))))
225 #define atomic_inc(v)	((void)(__atomic_add_return(   1,(v))))
226 #define atomic_dec(v)	((void)(__atomic_add_return(  -1,(v))))
227 
228 #define atomic_add_return(i,v)	(__atomic_add_return( (i),(v)))
229 #define atomic_sub_return(i,v)	(__atomic_add_return(-(i),(v)))
230 #define atomic_inc_return(v)	(__atomic_add_return(   1,(v)))
231 #define atomic_dec_return(v)	(__atomic_add_return(  -1,(v)))
232 
233 #define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
234 
235 /*
236  * atomic_inc_and_test - increment and test
237  * @v: pointer of type atomic_t
238  *
239  * Atomically increments @v by 1
240  * and returns true if the result is zero, or false for all
241  * other cases.
242  */
243 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
244 
245 #define atomic_dec_and_test(v)	(atomic_dec_return(v) == 0)
246 
247 #define atomic_sub_and_test(i,v)	(atomic_sub_return((i),(v)) == 0)
248 
249 #define ATOMIC_INIT(i)	((atomic_t) { (i) })
250 
251 #define smp_mb__before_atomic_dec()	smp_mb()
252 #define smp_mb__after_atomic_dec()	smp_mb()
253 #define smp_mb__before_atomic_inc()	smp_mb()
254 #define smp_mb__after_atomic_inc()	smp_mb()
255 
256 #ifdef CONFIG_64BIT
257 
258 #define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
259 
260 static __inline__ s64
261 __atomic64_add_return(s64 i, atomic64_t *v)
262 {
263 	s64 ret;
264 	unsigned long flags;
265 	_atomic_spin_lock_irqsave(v, flags);
266 
267 	ret = (v->counter += i);
268 
269 	_atomic_spin_unlock_irqrestore(v, flags);
270 	return ret;
271 }
272 
273 static __inline__ void
274 atomic64_set(atomic64_t *v, s64 i)
275 {
276 	unsigned long flags;
277 	_atomic_spin_lock_irqsave(v, flags);
278 
279 	v->counter = i;
280 
281 	_atomic_spin_unlock_irqrestore(v, flags);
282 }
283 
284 static __inline__ s64
285 atomic64_read(const atomic64_t *v)
286 {
287 	return (*(volatile long *)&(v)->counter);
288 }
289 
290 #define atomic64_add(i,v)	((void)(__atomic64_add_return( ((s64)(i)),(v))))
291 #define atomic64_sub(i,v)	((void)(__atomic64_add_return(-((s64)(i)),(v))))
292 #define atomic64_inc(v)		((void)(__atomic64_add_return(   1,(v))))
293 #define atomic64_dec(v)		((void)(__atomic64_add_return(  -1,(v))))
294 
295 #define atomic64_add_return(i,v)	(__atomic64_add_return( ((s64)(i)),(v)))
296 #define atomic64_sub_return(i,v)	(__atomic64_add_return(-((s64)(i)),(v)))
297 #define atomic64_inc_return(v)		(__atomic64_add_return(   1,(v)))
298 #define atomic64_dec_return(v)		(__atomic64_add_return(  -1,(v)))
299 
300 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
301 
302 #define atomic64_inc_and_test(v) 	(atomic64_inc_return(v) == 0)
303 #define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0)
304 #define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i),(v)) == 0)
305 
306 /* exported interface */
307 #define atomic64_cmpxchg(v, o, n) \
308 	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
309 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
310 
311 /**
312  * atomic64_add_unless - add unless the number is a given value
313  * @v: pointer of type atomic64_t
314  * @a: the amount to add to v...
315  * @u: ...unless v is equal to u.
316  *
317  * Atomically adds @a to @v, so long as it was not @u.
318  * Returns the old value of @v.
319  */
320 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
321 {
322 	long c, old;
323 	c = atomic64_read(v);
324 	for (;;) {
325 		if (unlikely(c == (u)))
326 			break;
327 		old = atomic64_cmpxchg((v), c, c + (a));
328 		if (likely(old == c))
329 			break;
330 		c = old;
331 	}
332 	return c != (u);
333 }
334 
335 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
336 
337 #endif /* !CONFIG_64BIT */
338 
339 
340 #endif /* _ASM_PARISC_ATOMIC_H_ */
341