xref: /linux/lib/atomic64.c (revision 8bc7c5e525584903ea83332e18a2118ed3b1985e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Generic implementation of 64-bit atomics using spinlocks,
4  * useful on processors that don't have 64-bit atomic instructions.
5  *
6  * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7  */
8 #include <linux/types.h>
9 #include <linux/cache.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/export.h>
13 #include <linux/atomic.h>
14 
15 /*
16  * We use a hashed array of spinlocks to provide exclusive access
17  * to each atomic64_t variable.  Since this is expected to used on
18  * systems with small numbers of CPUs (<= 4 or so), we use a
19  * relatively small array of 16 spinlocks to avoid wasting too much
20  * memory on the spinlock array.
21  */
22 #define NR_LOCKS	16
23 
24 /*
25  * Ensure each lock is in a separate cacheline.
26  */
27 static union {
28 	raw_spinlock_t lock;
29 	char pad[L1_CACHE_BYTES];
30 } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
31 	[0 ... (NR_LOCKS - 1)] = {
32 		.lock =  __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
33 	},
34 };
35 
36 static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
37 {
38 	unsigned long addr = (unsigned long) v;
39 
40 	addr >>= L1_CACHE_SHIFT;
41 	addr ^= (addr >> 8) ^ (addr >> 16);
42 	return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
43 }
44 
45 s64 generic_atomic64_read(const atomic64_t *v)
46 {
47 	unsigned long flags;
48 	raw_spinlock_t *lock = lock_addr(v);
49 	s64 val;
50 
51 	raw_spin_lock_irqsave(lock, flags);
52 	val = v->counter;
53 	raw_spin_unlock_irqrestore(lock, flags);
54 	return val;
55 }
56 EXPORT_SYMBOL(generic_atomic64_read);
57 
58 void generic_atomic64_set(atomic64_t *v, s64 i)
59 {
60 	unsigned long flags;
61 	raw_spinlock_t *lock = lock_addr(v);
62 
63 	raw_spin_lock_irqsave(lock, flags);
64 	v->counter = i;
65 	raw_spin_unlock_irqrestore(lock, flags);
66 }
67 EXPORT_SYMBOL(generic_atomic64_set);
68 
69 #define ATOMIC64_OP(op, c_op)						\
70 void generic_atomic64_##op(s64 a, atomic64_t *v)			\
71 {									\
72 	unsigned long flags;						\
73 	raw_spinlock_t *lock = lock_addr(v);				\
74 									\
75 	raw_spin_lock_irqsave(lock, flags);				\
76 	v->counter c_op a;						\
77 	raw_spin_unlock_irqrestore(lock, flags);			\
78 }									\
79 EXPORT_SYMBOL(generic_atomic64_##op);
80 
81 #define ATOMIC64_OP_RETURN(op, c_op)					\
82 s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v)		\
83 {									\
84 	unsigned long flags;						\
85 	raw_spinlock_t *lock = lock_addr(v);				\
86 	s64 val;							\
87 									\
88 	raw_spin_lock_irqsave(lock, flags);				\
89 	val = (v->counter c_op a);					\
90 	raw_spin_unlock_irqrestore(lock, flags);			\
91 	return val;							\
92 }									\
93 EXPORT_SYMBOL(generic_atomic64_##op##_return);
94 
95 #define ATOMIC64_FETCH_OP(op, c_op)					\
96 s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v)			\
97 {									\
98 	unsigned long flags;						\
99 	raw_spinlock_t *lock = lock_addr(v);				\
100 	s64 val;							\
101 									\
102 	raw_spin_lock_irqsave(lock, flags);				\
103 	val = v->counter;						\
104 	v->counter c_op a;						\
105 	raw_spin_unlock_irqrestore(lock, flags);			\
106 	return val;							\
107 }									\
108 EXPORT_SYMBOL(generic_atomic64_fetch_##op);
109 
110 #define ATOMIC64_OPS(op, c_op)						\
111 	ATOMIC64_OP(op, c_op)						\
112 	ATOMIC64_OP_RETURN(op, c_op)					\
113 	ATOMIC64_FETCH_OP(op, c_op)
114 
115 ATOMIC64_OPS(add, +=)
116 ATOMIC64_OPS(sub, -=)
117 
118 #undef ATOMIC64_OPS
119 #define ATOMIC64_OPS(op, c_op)						\
120 	ATOMIC64_OP(op, c_op)						\
121 	ATOMIC64_FETCH_OP(op, c_op)
122 
123 ATOMIC64_OPS(and, &=)
124 ATOMIC64_OPS(or, |=)
125 ATOMIC64_OPS(xor, ^=)
126 
127 #undef ATOMIC64_OPS
128 #undef ATOMIC64_FETCH_OP
129 #undef ATOMIC64_OP
130 
131 s64 generic_atomic64_dec_if_positive(atomic64_t *v)
132 {
133 	unsigned long flags;
134 	raw_spinlock_t *lock = lock_addr(v);
135 	s64 val;
136 
137 	raw_spin_lock_irqsave(lock, flags);
138 	val = v->counter - 1;
139 	if (val >= 0)
140 		v->counter = val;
141 	raw_spin_unlock_irqrestore(lock, flags);
142 	return val;
143 }
144 EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
145 
146 s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
147 {
148 	unsigned long flags;
149 	raw_spinlock_t *lock = lock_addr(v);
150 	s64 val;
151 
152 	raw_spin_lock_irqsave(lock, flags);
153 	val = v->counter;
154 	if (val == o)
155 		v->counter = n;
156 	raw_spin_unlock_irqrestore(lock, flags);
157 	return val;
158 }
159 EXPORT_SYMBOL(generic_atomic64_cmpxchg);
160 
161 s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
162 {
163 	unsigned long flags;
164 	raw_spinlock_t *lock = lock_addr(v);
165 	s64 val;
166 
167 	raw_spin_lock_irqsave(lock, flags);
168 	val = v->counter;
169 	v->counter = new;
170 	raw_spin_unlock_irqrestore(lock, flags);
171 	return val;
172 }
173 EXPORT_SYMBOL(generic_atomic64_xchg);
174 
175 s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
176 {
177 	unsigned long flags;
178 	raw_spinlock_t *lock = lock_addr(v);
179 	s64 val;
180 
181 	raw_spin_lock_irqsave(lock, flags);
182 	val = v->counter;
183 	if (val != u)
184 		v->counter += a;
185 	raw_spin_unlock_irqrestore(lock, flags);
186 
187 	return val;
188 }
189 EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);
190