xref: /linux/lib/atomic64.c (revision 606489dbfa979dce53797f24840c512d0e7510f9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Generic implementation of 64-bit atomics using spinlocks,
4  * useful on processors that don't have 64-bit atomic instructions.
5  *
6  * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7  */
8 #include <linux/types.h>
9 #include <linux/cache.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/export.h>
13 #include <linux/atomic.h>
14 
15 /*
16  * We use a hashed array of spinlocks to provide exclusive access
17  * to each atomic64_t variable.  Since this is expected to used on
18  * systems with small numbers of CPUs (<= 4 or so), we use a
19  * relatively small array of 16 spinlocks to avoid wasting too much
20  * memory on the spinlock array.
21  */
22 #define NR_LOCKS	16
23 
24 /*
25  * Ensure each lock is in a separate cacheline.
26  */
27 static union {
28 	arch_spinlock_t lock;
29 	char pad[L1_CACHE_BYTES];
30 } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
31 	[0 ... (NR_LOCKS - 1)] = {
32 		.lock =  __ARCH_SPIN_LOCK_UNLOCKED,
33 	},
34 };
35 
lock_addr(const atomic64_t * v)36 static inline arch_spinlock_t *lock_addr(const atomic64_t *v)
37 {
38 	unsigned long addr = (unsigned long) v;
39 
40 	addr >>= L1_CACHE_SHIFT;
41 	addr ^= (addr >> 8) ^ (addr >> 16);
42 	return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
43 }
44 
generic_atomic64_read(const atomic64_t * v)45 s64 generic_atomic64_read(const atomic64_t *v)
46 {
47 	unsigned long flags;
48 	arch_spinlock_t *lock = lock_addr(v);
49 	s64 val;
50 
51 	local_irq_save(flags);
52 	arch_spin_lock(lock);
53 	val = v->counter;
54 	arch_spin_unlock(lock);
55 	local_irq_restore(flags);
56 	return val;
57 }
58 EXPORT_SYMBOL(generic_atomic64_read);
59 
generic_atomic64_set(atomic64_t * v,s64 i)60 void generic_atomic64_set(atomic64_t *v, s64 i)
61 {
62 	unsigned long flags;
63 	arch_spinlock_t *lock = lock_addr(v);
64 
65 	local_irq_save(flags);
66 	arch_spin_lock(lock);
67 	v->counter = i;
68 	arch_spin_unlock(lock);
69 	local_irq_restore(flags);
70 }
71 EXPORT_SYMBOL(generic_atomic64_set);
72 
73 #define ATOMIC64_OP(op, c_op)						\
74 void generic_atomic64_##op(s64 a, atomic64_t *v)			\
75 {									\
76 	unsigned long flags;						\
77 	arch_spinlock_t *lock = lock_addr(v);				\
78 									\
79 	local_irq_save(flags);						\
80 	arch_spin_lock(lock);						\
81 	v->counter c_op a;						\
82 	arch_spin_unlock(lock);						\
83 	local_irq_restore(flags);					\
84 }									\
85 EXPORT_SYMBOL(generic_atomic64_##op);
86 
87 #define ATOMIC64_OP_RETURN(op, c_op)					\
88 s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v)		\
89 {									\
90 	unsigned long flags;						\
91 	arch_spinlock_t *lock = lock_addr(v);				\
92 	s64 val;							\
93 									\
94 	local_irq_save(flags);						\
95 	arch_spin_lock(lock);						\
96 	val = (v->counter c_op a);					\
97 	arch_spin_unlock(lock);						\
98 	local_irq_restore(flags);					\
99 	return val;							\
100 }									\
101 EXPORT_SYMBOL(generic_atomic64_##op##_return);
102 
103 #define ATOMIC64_FETCH_OP(op, c_op)					\
104 s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v)			\
105 {									\
106 	unsigned long flags;						\
107 	arch_spinlock_t *lock = lock_addr(v);				\
108 	s64 val;							\
109 									\
110 	local_irq_save(flags);						\
111 	arch_spin_lock(lock);						\
112 	val = v->counter;						\
113 	v->counter c_op a;						\
114 	arch_spin_unlock(lock);						\
115 	local_irq_restore(flags);					\
116 	return val;							\
117 }									\
118 EXPORT_SYMBOL(generic_atomic64_fetch_##op);
119 
120 #define ATOMIC64_OPS(op, c_op)						\
121 	ATOMIC64_OP(op, c_op)						\
122 	ATOMIC64_OP_RETURN(op, c_op)					\
123 	ATOMIC64_FETCH_OP(op, c_op)
124 
125 ATOMIC64_OPS(add, +=)
126 ATOMIC64_OPS(sub, -=)
127 
128 #undef ATOMIC64_OPS
129 #define ATOMIC64_OPS(op, c_op)						\
130 	ATOMIC64_OP(op, c_op)						\
131 	ATOMIC64_FETCH_OP(op, c_op)
132 
133 ATOMIC64_OPS(and, &=)
134 ATOMIC64_OPS(or, |=)
135 ATOMIC64_OPS(xor, ^=)
136 
137 #undef ATOMIC64_OPS
138 #undef ATOMIC64_FETCH_OP
139 #undef ATOMIC64_OP
140 
generic_atomic64_dec_if_positive(atomic64_t * v)141 s64 generic_atomic64_dec_if_positive(atomic64_t *v)
142 {
143 	unsigned long flags;
144 	arch_spinlock_t *lock = lock_addr(v);
145 	s64 val;
146 
147 	local_irq_save(flags);
148 	arch_spin_lock(lock);
149 	val = v->counter - 1;
150 	if (val >= 0)
151 		v->counter = val;
152 	arch_spin_unlock(lock);
153 	local_irq_restore(flags);
154 	return val;
155 }
156 EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
157 
generic_atomic64_cmpxchg(atomic64_t * v,s64 o,s64 n)158 s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
159 {
160 	unsigned long flags;
161 	arch_spinlock_t *lock = lock_addr(v);
162 	s64 val;
163 
164 	local_irq_save(flags);
165 	arch_spin_lock(lock);
166 	val = v->counter;
167 	if (val == o)
168 		v->counter = n;
169 	arch_spin_unlock(lock);
170 	local_irq_restore(flags);
171 	return val;
172 }
173 EXPORT_SYMBOL(generic_atomic64_cmpxchg);
174 
generic_atomic64_xchg(atomic64_t * v,s64 new)175 s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
176 {
177 	unsigned long flags;
178 	arch_spinlock_t *lock = lock_addr(v);
179 	s64 val;
180 
181 	local_irq_save(flags);
182 	arch_spin_lock(lock);
183 	val = v->counter;
184 	v->counter = new;
185 	arch_spin_unlock(lock);
186 	local_irq_restore(flags);
187 	return val;
188 }
189 EXPORT_SYMBOL(generic_atomic64_xchg);
190 
generic_atomic64_fetch_add_unless(atomic64_t * v,s64 a,s64 u)191 s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
192 {
193 	unsigned long flags;
194 	arch_spinlock_t *lock = lock_addr(v);
195 	s64 val;
196 
197 	local_irq_save(flags);
198 	arch_spin_lock(lock);
199 	val = v->counter;
200 	if (val != u)
201 		v->counter += a;
202 	arch_spin_unlock(lock);
203 	local_irq_restore(flags);
204 
205 	return val;
206 }
207 EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);
208