xref: /linux/arch/parisc/include/asm/atomic.h (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2  * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
3  */
4 
5 #ifndef _ASM_PARISC_ATOMIC_H_
6 #define _ASM_PARISC_ATOMIC_H_
7 
8 #include <linux/types.h>
9 #include <asm/cmpxchg.h>
10 #include <asm/barrier.h>
11 
12 /*
13  * Atomic operations that C can't guarantee us.  Useful for
14  * resource counting etc..
15  *
16  * And probably incredibly slow on parisc.  OTOH, we don't
17  * have to write any serious assembly.   prumpf
18  */
19 
20 #ifdef CONFIG_SMP
21 #include <asm/spinlock.h>
22 #include <asm/cache.h>		/* we use L1_CACHE_BYTES */
23 
24 /* Use an array of spinlocks for our atomic_ts.
25  * Hash function to index into a different SPINLOCK.
26  * Since "a" is usually an address, use one spinlock per cacheline.
27  */
28 #  define ATOMIC_HASH_SIZE 4
29 #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
30 
31 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
32 
33 /* Can't use raw_spin_lock_irq because of #include problems, so
34  * this is the substitute */
35 #define _atomic_spin_lock_irqsave(l,f) do {	\
36 	arch_spinlock_t *s = ATOMIC_HASH(l);		\
37 	local_irq_save(f);			\
38 	arch_spin_lock(s);			\
39 } while(0)
40 
41 #define _atomic_spin_unlock_irqrestore(l,f) do {	\
42 	arch_spinlock_t *s = ATOMIC_HASH(l);			\
43 	arch_spin_unlock(s);				\
44 	local_irq_restore(f);				\
45 } while(0)
46 
47 
48 #else
49 #  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
50 #  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
51 #endif
52 
53 /*
54  * Note that we need not lock read accesses - aligned word writes/reads
55  * are atomic, so a reader never sees inconsistent values.
56  */
57 
58 /* It's possible to reduce all atomic operations to either
59  * __atomic_add_return, atomic_set and atomic_read (the latter
60  * is there only for consistency).
61  */
62 
63 static __inline__ int __atomic_add_return(int i, atomic_t *v)
64 {
65 	int ret;
66 	unsigned long flags;
67 	_atomic_spin_lock_irqsave(v, flags);
68 
69 	ret = (v->counter += i);
70 
71 	_atomic_spin_unlock_irqrestore(v, flags);
72 	return ret;
73 }
74 
75 static __inline__ void atomic_set(atomic_t *v, int i)
76 {
77 	unsigned long flags;
78 	_atomic_spin_lock_irqsave(v, flags);
79 
80 	v->counter = i;
81 
82 	_atomic_spin_unlock_irqrestore(v, flags);
83 }
84 
85 static __inline__ int atomic_read(const atomic_t *v)
86 {
87 	return (*(volatile int *)&(v)->counter);
88 }
89 
90 /* exported interface */
91 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
92 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
93 
94 /**
95  * __atomic_add_unless - add unless the number is a given value
96  * @v: pointer of type atomic_t
97  * @a: the amount to add to v...
98  * @u: ...unless v is equal to u.
99  *
100  * Atomically adds @a to @v, so long as it was not @u.
101  * Returns the old value of @v.
102  */
103 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
104 {
105 	int c, old;
106 	c = atomic_read(v);
107 	for (;;) {
108 		if (unlikely(c == (u)))
109 			break;
110 		old = atomic_cmpxchg((v), c, c + (a));
111 		if (likely(old == c))
112 			break;
113 		c = old;
114 	}
115 	return c;
116 }
117 
118 
119 #define atomic_add(i,v)	((void)(__atomic_add_return(        (i),(v))))
120 #define atomic_sub(i,v)	((void)(__atomic_add_return(-((int) (i)),(v))))
121 #define atomic_inc(v)	((void)(__atomic_add_return(   1,(v))))
122 #define atomic_dec(v)	((void)(__atomic_add_return(  -1,(v))))
123 
124 #define atomic_add_return(i,v)	(__atomic_add_return( (i),(v)))
125 #define atomic_sub_return(i,v)	(__atomic_add_return(-(i),(v)))
126 #define atomic_inc_return(v)	(__atomic_add_return(   1,(v)))
127 #define atomic_dec_return(v)	(__atomic_add_return(  -1,(v)))
128 
129 #define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
130 
131 /*
132  * atomic_inc_and_test - increment and test
133  * @v: pointer of type atomic_t
134  *
135  * Atomically increments @v by 1
136  * and returns true if the result is zero, or false for all
137  * other cases.
138  */
139 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
140 
141 #define atomic_dec_and_test(v)	(atomic_dec_return(v) == 0)
142 
143 #define atomic_sub_and_test(i,v)	(atomic_sub_return((i),(v)) == 0)
144 
145 #define ATOMIC_INIT(i)	{ (i) }
146 
147 #ifdef CONFIG_64BIT
148 
149 #define ATOMIC64_INIT(i) { (i) }
150 
151 static __inline__ s64
152 __atomic64_add_return(s64 i, atomic64_t *v)
153 {
154 	s64 ret;
155 	unsigned long flags;
156 	_atomic_spin_lock_irqsave(v, flags);
157 
158 	ret = (v->counter += i);
159 
160 	_atomic_spin_unlock_irqrestore(v, flags);
161 	return ret;
162 }
163 
164 static __inline__ void
165 atomic64_set(atomic64_t *v, s64 i)
166 {
167 	unsigned long flags;
168 	_atomic_spin_lock_irqsave(v, flags);
169 
170 	v->counter = i;
171 
172 	_atomic_spin_unlock_irqrestore(v, flags);
173 }
174 
175 static __inline__ s64
176 atomic64_read(const atomic64_t *v)
177 {
178 	return (*(volatile long *)&(v)->counter);
179 }
180 
181 #define atomic64_add(i,v)	((void)(__atomic64_add_return( ((s64)(i)),(v))))
182 #define atomic64_sub(i,v)	((void)(__atomic64_add_return(-((s64)(i)),(v))))
183 #define atomic64_inc(v)		((void)(__atomic64_add_return(   1,(v))))
184 #define atomic64_dec(v)		((void)(__atomic64_add_return(  -1,(v))))
185 
186 #define atomic64_add_return(i,v)	(__atomic64_add_return( ((s64)(i)),(v)))
187 #define atomic64_sub_return(i,v)	(__atomic64_add_return(-((s64)(i)),(v)))
188 #define atomic64_inc_return(v)		(__atomic64_add_return(   1,(v)))
189 #define atomic64_dec_return(v)		(__atomic64_add_return(  -1,(v)))
190 
191 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
192 
193 #define atomic64_inc_and_test(v) 	(atomic64_inc_return(v) == 0)
194 #define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0)
195 #define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i),(v)) == 0)
196 
197 /* exported interface */
198 #define atomic64_cmpxchg(v, o, n) \
199 	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
200 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
201 
202 /**
203  * atomic64_add_unless - add unless the number is a given value
204  * @v: pointer of type atomic64_t
205  * @a: the amount to add to v...
206  * @u: ...unless v is equal to u.
207  *
208  * Atomically adds @a to @v, so long as it was not @u.
209  * Returns the old value of @v.
210  */
211 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
212 {
213 	long c, old;
214 	c = atomic64_read(v);
215 	for (;;) {
216 		if (unlikely(c == (u)))
217 			break;
218 		old = atomic64_cmpxchg((v), c, c + (a));
219 		if (likely(old == c))
220 			break;
221 		c = old;
222 	}
223 	return c != (u);
224 }
225 
226 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
227 
228 /*
229  * atomic64_dec_if_positive - decrement by 1 if old value positive
230  * @v: pointer of type atomic_t
231  *
232  * The function returns the old value of *v minus 1, even if
233  * the atomic variable, v, was not decremented.
234  */
235 static inline long atomic64_dec_if_positive(atomic64_t *v)
236 {
237 	long c, old, dec;
238 	c = atomic64_read(v);
239 	for (;;) {
240 		dec = c - 1;
241 		if (unlikely(dec < 0))
242 			break;
243 		old = atomic64_cmpxchg((v), c, dec);
244 		if (likely(old == c))
245 			break;
246 		c = old;
247 	}
248 	return dec;
249 }
250 
251 #endif /* !CONFIG_64BIT */
252 
253 
254 #endif /* _ASM_PARISC_ATOMIC_H_ */
255