xref: /linux/lib/atomic64.c (revision b6ebbac51bedf9e98e837688bc838f400196da5e)
1 /*
2  * Generic implementation of 64-bit atomics using spinlocks,
3  * useful on processors that don't have 64-bit atomic instructions.
4  *
5  * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version
10  * 2 of the License, or (at your option) any later version.
11  */
12 #include <linux/types.h>
13 #include <linux/cache.h>
14 #include <linux/spinlock.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
17 #include <linux/atomic.h>
18 
19 /*
20  * We use a hashed array of spinlocks to provide exclusive access
21  * to each atomic64_t variable.  Since this is expected to used on
22  * systems with small numbers of CPUs (<= 4 or so), we use a
23  * relatively small array of 16 spinlocks to avoid wasting too much
24  * memory on the spinlock array.
25  */
26 #define NR_LOCKS	16
27 
28 /*
29  * Ensure each lock is in a separate cacheline.
30  */
31 static union {
32 	raw_spinlock_t lock;
33 	char pad[L1_CACHE_BYTES];
34 } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
35 	[0 ... (NR_LOCKS - 1)] = {
36 		.lock =  __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
37 	},
38 };
39 
40 static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
41 {
42 	unsigned long addr = (unsigned long) v;
43 
44 	addr >>= L1_CACHE_SHIFT;
45 	addr ^= (addr >> 8) ^ (addr >> 16);
46 	return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
47 }
48 
49 long long atomic64_read(const atomic64_t *v)
50 {
51 	unsigned long flags;
52 	raw_spinlock_t *lock = lock_addr(v);
53 	long long val;
54 
55 	raw_spin_lock_irqsave(lock, flags);
56 	val = v->counter;
57 	raw_spin_unlock_irqrestore(lock, flags);
58 	return val;
59 }
60 EXPORT_SYMBOL(atomic64_read);
61 
62 void atomic64_set(atomic64_t *v, long long i)
63 {
64 	unsigned long flags;
65 	raw_spinlock_t *lock = lock_addr(v);
66 
67 	raw_spin_lock_irqsave(lock, flags);
68 	v->counter = i;
69 	raw_spin_unlock_irqrestore(lock, flags);
70 }
71 EXPORT_SYMBOL(atomic64_set);
72 
73 #define ATOMIC64_OP(op, c_op)						\
74 void atomic64_##op(long long a, atomic64_t *v)				\
75 {									\
76 	unsigned long flags;						\
77 	raw_spinlock_t *lock = lock_addr(v);				\
78 									\
79 	raw_spin_lock_irqsave(lock, flags);				\
80 	v->counter c_op a;						\
81 	raw_spin_unlock_irqrestore(lock, flags);			\
82 }									\
83 EXPORT_SYMBOL(atomic64_##op);
84 
85 #define ATOMIC64_OP_RETURN(op, c_op)					\
86 long long atomic64_##op##_return(long long a, atomic64_t *v)		\
87 {									\
88 	unsigned long flags;						\
89 	raw_spinlock_t *lock = lock_addr(v);				\
90 	long long val;							\
91 									\
92 	raw_spin_lock_irqsave(lock, flags);				\
93 	val = (v->counter c_op a);					\
94 	raw_spin_unlock_irqrestore(lock, flags);			\
95 	return val;							\
96 }									\
97 EXPORT_SYMBOL(atomic64_##op##_return);
98 
99 #define ATOMIC64_FETCH_OP(op, c_op)					\
100 long long atomic64_fetch_##op(long long a, atomic64_t *v)		\
101 {									\
102 	unsigned long flags;						\
103 	raw_spinlock_t *lock = lock_addr(v);				\
104 	long long val;							\
105 									\
106 	raw_spin_lock_irqsave(lock, flags);				\
107 	val = v->counter;						\
108 	v->counter c_op a;						\
109 	raw_spin_unlock_irqrestore(lock, flags);			\
110 	return val;							\
111 }									\
112 EXPORT_SYMBOL(atomic64_fetch_##op);
113 
114 #define ATOMIC64_OPS(op, c_op)						\
115 	ATOMIC64_OP(op, c_op)						\
116 	ATOMIC64_OP_RETURN(op, c_op)					\
117 	ATOMIC64_FETCH_OP(op, c_op)
118 
119 ATOMIC64_OPS(add, +=)
120 ATOMIC64_OPS(sub, -=)
121 
122 #undef ATOMIC64_OPS
123 #define ATOMIC64_OPS(op, c_op)						\
124 	ATOMIC64_OP(op, c_op)						\
125 	ATOMIC64_OP_RETURN(op, c_op)					\
126 	ATOMIC64_FETCH_OP(op, c_op)
127 
128 ATOMIC64_OPS(and, &=)
129 ATOMIC64_OPS(or, |=)
130 ATOMIC64_OPS(xor, ^=)
131 
132 #undef ATOMIC64_OPS
133 #undef ATOMIC64_FETCH_OP
134 #undef ATOMIC64_OP_RETURN
135 #undef ATOMIC64_OP
136 
137 long long atomic64_dec_if_positive(atomic64_t *v)
138 {
139 	unsigned long flags;
140 	raw_spinlock_t *lock = lock_addr(v);
141 	long long val;
142 
143 	raw_spin_lock_irqsave(lock, flags);
144 	val = v->counter - 1;
145 	if (val >= 0)
146 		v->counter = val;
147 	raw_spin_unlock_irqrestore(lock, flags);
148 	return val;
149 }
150 EXPORT_SYMBOL(atomic64_dec_if_positive);
151 
152 long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
153 {
154 	unsigned long flags;
155 	raw_spinlock_t *lock = lock_addr(v);
156 	long long val;
157 
158 	raw_spin_lock_irqsave(lock, flags);
159 	val = v->counter;
160 	if (val == o)
161 		v->counter = n;
162 	raw_spin_unlock_irqrestore(lock, flags);
163 	return val;
164 }
165 EXPORT_SYMBOL(atomic64_cmpxchg);
166 
167 long long atomic64_xchg(atomic64_t *v, long long new)
168 {
169 	unsigned long flags;
170 	raw_spinlock_t *lock = lock_addr(v);
171 	long long val;
172 
173 	raw_spin_lock_irqsave(lock, flags);
174 	val = v->counter;
175 	v->counter = new;
176 	raw_spin_unlock_irqrestore(lock, flags);
177 	return val;
178 }
179 EXPORT_SYMBOL(atomic64_xchg);
180 
181 int atomic64_add_unless(atomic64_t *v, long long a, long long u)
182 {
183 	unsigned long flags;
184 	raw_spinlock_t *lock = lock_addr(v);
185 	int ret = 0;
186 
187 	raw_spin_lock_irqsave(lock, flags);
188 	if (v->counter != u) {
189 		v->counter += a;
190 		ret = 1;
191 	}
192 	raw_spin_unlock_irqrestore(lock, flags);
193 	return ret;
194 }
195 EXPORT_SYMBOL(atomic64_add_unless);
196