xref: /linux/arch/x86/include/asm/atomic64_64.h (revision 8bf705d130396e69c04cd8e6e010244ad2ce71f4)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21a3b1d89SBrian Gerst #ifndef _ASM_X86_ATOMIC64_64_H
31a3b1d89SBrian Gerst #define _ASM_X86_ATOMIC64_64_H
41a3b1d89SBrian Gerst 
51a3b1d89SBrian Gerst #include <linux/types.h>
61a3b1d89SBrian Gerst #include <asm/alternative.h>
71a3b1d89SBrian Gerst #include <asm/cmpxchg.h>
81a3b1d89SBrian Gerst 
91a3b1d89SBrian Gerst /* The 64-bit atomic type */
101a3b1d89SBrian Gerst 
111a3b1d89SBrian Gerst #define ATOMIC64_INIT(i)	{ (i) }
121a3b1d89SBrian Gerst 
131a3b1d89SBrian Gerst /**
14*8bf705d1SDmitry Vyukov  * arch_atomic64_read - read atomic64 variable
151a3b1d89SBrian Gerst  * @v: pointer of type atomic64_t
161a3b1d89SBrian Gerst  *
171a3b1d89SBrian Gerst  * Atomically reads the value of @v.
181a3b1d89SBrian Gerst  * Doesn't imply a read memory barrier.
191a3b1d89SBrian Gerst  */
20*8bf705d1SDmitry Vyukov static inline long arch_atomic64_read(const atomic64_t *v)
211a3b1d89SBrian Gerst {
2262e8a325SPeter Zijlstra 	return READ_ONCE((v)->counter);
231a3b1d89SBrian Gerst }
241a3b1d89SBrian Gerst 
251a3b1d89SBrian Gerst /**
26*8bf705d1SDmitry Vyukov  * arch_atomic64_set - set atomic64 variable
271a3b1d89SBrian Gerst  * @v: pointer to type atomic64_t
281a3b1d89SBrian Gerst  * @i: required value
291a3b1d89SBrian Gerst  *
301a3b1d89SBrian Gerst  * Atomically sets the value of @v to @i.
311a3b1d89SBrian Gerst  */
32*8bf705d1SDmitry Vyukov static inline void arch_atomic64_set(atomic64_t *v, long i)
331a3b1d89SBrian Gerst {
3462e8a325SPeter Zijlstra 	WRITE_ONCE(v->counter, i);
351a3b1d89SBrian Gerst }
361a3b1d89SBrian Gerst 
371a3b1d89SBrian Gerst /**
38*8bf705d1SDmitry Vyukov  * arch_atomic64_add - add integer to atomic64 variable
391a3b1d89SBrian Gerst  * @i: integer value to add
401a3b1d89SBrian Gerst  * @v: pointer to type atomic64_t
411a3b1d89SBrian Gerst  *
421a3b1d89SBrian Gerst  * Atomically adds @i to @v.
431a3b1d89SBrian Gerst  */
44*8bf705d1SDmitry Vyukov static __always_inline void arch_atomic64_add(long i, atomic64_t *v)
451a3b1d89SBrian Gerst {
461a3b1d89SBrian Gerst 	asm volatile(LOCK_PREFIX "addq %1,%0"
471a3b1d89SBrian Gerst 		     : "=m" (v->counter)
481a3b1d89SBrian Gerst 		     : "er" (i), "m" (v->counter));
491a3b1d89SBrian Gerst }
501a3b1d89SBrian Gerst 
511a3b1d89SBrian Gerst /**
52*8bf705d1SDmitry Vyukov  * arch_atomic64_sub - subtract the atomic64 variable
531a3b1d89SBrian Gerst  * @i: integer value to subtract
541a3b1d89SBrian Gerst  * @v: pointer to type atomic64_t
551a3b1d89SBrian Gerst  *
561a3b1d89SBrian Gerst  * Atomically subtracts @i from @v.
571a3b1d89SBrian Gerst  */
58*8bf705d1SDmitry Vyukov static inline void arch_atomic64_sub(long i, atomic64_t *v)
591a3b1d89SBrian Gerst {
601a3b1d89SBrian Gerst 	asm volatile(LOCK_PREFIX "subq %1,%0"
611a3b1d89SBrian Gerst 		     : "=m" (v->counter)
621a3b1d89SBrian Gerst 		     : "er" (i), "m" (v->counter));
631a3b1d89SBrian Gerst }
641a3b1d89SBrian Gerst 
651a3b1d89SBrian Gerst /**
66*8bf705d1SDmitry Vyukov  * arch_atomic64_sub_and_test - subtract value from variable and test result
671a3b1d89SBrian Gerst  * @i: integer value to subtract
681a3b1d89SBrian Gerst  * @v: pointer to type atomic64_t
691a3b1d89SBrian Gerst  *
701a3b1d89SBrian Gerst  * Atomically subtracts @i from @v and returns
711a3b1d89SBrian Gerst  * true if the result is zero, or false for all
721a3b1d89SBrian Gerst  * other cases.
731a3b1d89SBrian Gerst  */
74*8bf705d1SDmitry Vyukov static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
751a3b1d89SBrian Gerst {
7618fe5822SH. Peter Anvin 	GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
771a3b1d89SBrian Gerst }
781a3b1d89SBrian Gerst 
791a3b1d89SBrian Gerst /**
80*8bf705d1SDmitry Vyukov  * arch_atomic64_inc - increment atomic64 variable
811a3b1d89SBrian Gerst  * @v: pointer to type atomic64_t
821a3b1d89SBrian Gerst  *
831a3b1d89SBrian Gerst  * Atomically increments @v by 1.
841a3b1d89SBrian Gerst  */
85*8bf705d1SDmitry Vyukov static __always_inline void arch_atomic64_inc(atomic64_t *v)
861a3b1d89SBrian Gerst {
871a3b1d89SBrian Gerst 	asm volatile(LOCK_PREFIX "incq %0"
881a3b1d89SBrian Gerst 		     : "=m" (v->counter)
891a3b1d89SBrian Gerst 		     : "m" (v->counter));
901a3b1d89SBrian Gerst }
911a3b1d89SBrian Gerst 
921a3b1d89SBrian Gerst /**
93*8bf705d1SDmitry Vyukov  * arch_atomic64_dec - decrement atomic64 variable
941a3b1d89SBrian Gerst  * @v: pointer to type atomic64_t
951a3b1d89SBrian Gerst  *
961a3b1d89SBrian Gerst  * Atomically decrements @v by 1.
971a3b1d89SBrian Gerst  */
98*8bf705d1SDmitry Vyukov static __always_inline void arch_atomic64_dec(atomic64_t *v)
991a3b1d89SBrian Gerst {
1001a3b1d89SBrian Gerst 	asm volatile(LOCK_PREFIX "decq %0"
1011a3b1d89SBrian Gerst 		     : "=m" (v->counter)
1021a3b1d89SBrian Gerst 		     : "m" (v->counter));
1031a3b1d89SBrian Gerst }
1041a3b1d89SBrian Gerst 
1051a3b1d89SBrian Gerst /**
106*8bf705d1SDmitry Vyukov  * arch_atomic64_dec_and_test - decrement and test
1071a3b1d89SBrian Gerst  * @v: pointer to type atomic64_t
1081a3b1d89SBrian Gerst  *
1091a3b1d89SBrian Gerst  * Atomically decrements @v by 1 and
1101a3b1d89SBrian Gerst  * returns true if the result is 0, or false for all other
1111a3b1d89SBrian Gerst  * cases.
1121a3b1d89SBrian Gerst  */
113*8bf705d1SDmitry Vyukov static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
1141a3b1d89SBrian Gerst {
11518fe5822SH. Peter Anvin 	GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
1161a3b1d89SBrian Gerst }
1171a3b1d89SBrian Gerst 
1181a3b1d89SBrian Gerst /**
119*8bf705d1SDmitry Vyukov  * arch_atomic64_inc_and_test - increment and test
1201a3b1d89SBrian Gerst  * @v: pointer to type atomic64_t
1211a3b1d89SBrian Gerst  *
1221a3b1d89SBrian Gerst  * Atomically increments @v by 1
1231a3b1d89SBrian Gerst  * and returns true if the result is zero, or false for all
1241a3b1d89SBrian Gerst  * other cases.
1251a3b1d89SBrian Gerst  */
126*8bf705d1SDmitry Vyukov static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
1271a3b1d89SBrian Gerst {
12818fe5822SH. Peter Anvin 	GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
1291a3b1d89SBrian Gerst }
1301a3b1d89SBrian Gerst 
1311a3b1d89SBrian Gerst /**
132*8bf705d1SDmitry Vyukov  * arch_atomic64_add_negative - add and test if negative
1331a3b1d89SBrian Gerst  * @i: integer value to add
1341a3b1d89SBrian Gerst  * @v: pointer to type atomic64_t
1351a3b1d89SBrian Gerst  *
1361a3b1d89SBrian Gerst  * Atomically adds @i to @v and returns true
1371a3b1d89SBrian Gerst  * if the result is negative, or false when
1381a3b1d89SBrian Gerst  * result is greater than or equal to zero.
1391a3b1d89SBrian Gerst  */
140*8bf705d1SDmitry Vyukov static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
1411a3b1d89SBrian Gerst {
14218fe5822SH. Peter Anvin 	GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
1431a3b1d89SBrian Gerst }
1441a3b1d89SBrian Gerst 
1451a3b1d89SBrian Gerst /**
146*8bf705d1SDmitry Vyukov  * arch_atomic64_add_return - add and return
1471a3b1d89SBrian Gerst  * @i: integer value to add
1481a3b1d89SBrian Gerst  * @v: pointer to type atomic64_t
1491a3b1d89SBrian Gerst  *
1501a3b1d89SBrian Gerst  * Atomically adds @i to @v and returns @i + @v
1511a3b1d89SBrian Gerst  */
152*8bf705d1SDmitry Vyukov static __always_inline long arch_atomic64_add_return(long i, atomic64_t *v)
1531a3b1d89SBrian Gerst {
1548b8bc2f7SJeremy Fitzhardinge 	return i + xadd(&v->counter, i);
1551a3b1d89SBrian Gerst }
1561a3b1d89SBrian Gerst 
157*8bf705d1SDmitry Vyukov static inline long arch_atomic64_sub_return(long i, atomic64_t *v)
1581a3b1d89SBrian Gerst {
159*8bf705d1SDmitry Vyukov 	return arch_atomic64_add_return(-i, v);
1601a3b1d89SBrian Gerst }
1611a3b1d89SBrian Gerst 
162*8bf705d1SDmitry Vyukov static inline long arch_atomic64_fetch_add(long i, atomic64_t *v)
163a8bcccabSPeter Zijlstra {
164a8bcccabSPeter Zijlstra 	return xadd(&v->counter, i);
165a8bcccabSPeter Zijlstra }
166a8bcccabSPeter Zijlstra 
167*8bf705d1SDmitry Vyukov static inline long arch_atomic64_fetch_sub(long i, atomic64_t *v)
168a8bcccabSPeter Zijlstra {
169a8bcccabSPeter Zijlstra 	return xadd(&v->counter, -i);
170a8bcccabSPeter Zijlstra }
171a8bcccabSPeter Zijlstra 
172*8bf705d1SDmitry Vyukov #define arch_atomic64_inc_return(v)  (arch_atomic64_add_return(1, (v)))
173*8bf705d1SDmitry Vyukov #define arch_atomic64_dec_return(v)  (arch_atomic64_sub_return(1, (v)))
1741a3b1d89SBrian Gerst 
175*8bf705d1SDmitry Vyukov static inline long arch_atomic64_cmpxchg(atomic64_t *v, long old, long new)
1761a3b1d89SBrian Gerst {
177*8bf705d1SDmitry Vyukov 	return arch_cmpxchg(&v->counter, old, new);
1781a3b1d89SBrian Gerst }
1791a3b1d89SBrian Gerst 
180*8bf705d1SDmitry Vyukov #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
181*8bf705d1SDmitry Vyukov static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new)
182a9ebf306SPeter Zijlstra {
183a9ebf306SPeter Zijlstra 	return try_cmpxchg(&v->counter, old, new);
184a9ebf306SPeter Zijlstra }
185a9ebf306SPeter Zijlstra 
186*8bf705d1SDmitry Vyukov static inline long arch_atomic64_xchg(atomic64_t *v, long new)
1871a3b1d89SBrian Gerst {
1881a3b1d89SBrian Gerst 	return xchg(&v->counter, new);
1891a3b1d89SBrian Gerst }
1901a3b1d89SBrian Gerst 
1911a3b1d89SBrian Gerst /**
192*8bf705d1SDmitry Vyukov  * arch_atomic64_add_unless - add unless the number is a given value
1931a3b1d89SBrian Gerst  * @v: pointer of type atomic64_t
1941a3b1d89SBrian Gerst  * @a: the amount to add to v...
1951a3b1d89SBrian Gerst  * @u: ...unless v is equal to u.
1961a3b1d89SBrian Gerst  *
1971a3b1d89SBrian Gerst  * Atomically adds @a to @v, so long as it was not @u.
198f24219b4SArun Sharma  * Returns the old value of @v.
1991a3b1d89SBrian Gerst  */
200*8bf705d1SDmitry Vyukov static inline bool arch_atomic64_add_unless(atomic64_t *v, long a, long u)
2011a3b1d89SBrian Gerst {
202*8bf705d1SDmitry Vyukov 	s64 c = arch_atomic64_read(v);
203e6790e4bSPeter Zijlstra 	do {
204e6790e4bSPeter Zijlstra 		if (unlikely(c == u))
205e6790e4bSPeter Zijlstra 			return false;
206*8bf705d1SDmitry Vyukov 	} while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
207e6790e4bSPeter Zijlstra 	return true;
2081a3b1d89SBrian Gerst }
2091a3b1d89SBrian Gerst 
210*8bf705d1SDmitry Vyukov #define arch_atomic64_inc_not_zero(v) arch_atomic64_add_unless((v), 1, 0)
2111a3b1d89SBrian Gerst 
212d7f6de1eSLuca Barbieri /*
213*8bf705d1SDmitry Vyukov  * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
214d7f6de1eSLuca Barbieri  * @v: pointer of type atomic_t
215d7f6de1eSLuca Barbieri  *
216d7f6de1eSLuca Barbieri  * The function returns the old value of *v minus 1, even if
217d7f6de1eSLuca Barbieri  * the atomic variable, v, was not decremented.
218d7f6de1eSLuca Barbieri  */
219*8bf705d1SDmitry Vyukov static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
220d7f6de1eSLuca Barbieri {
221*8bf705d1SDmitry Vyukov 	s64 dec, c = arch_atomic64_read(v);
222e6790e4bSPeter Zijlstra 	do {
223d7f6de1eSLuca Barbieri 		dec = c - 1;
224d7f6de1eSLuca Barbieri 		if (unlikely(dec < 0))
225d7f6de1eSLuca Barbieri 			break;
226*8bf705d1SDmitry Vyukov 	} while (!arch_atomic64_try_cmpxchg(v, &c, dec));
227d7f6de1eSLuca Barbieri 	return dec;
228d7f6de1eSLuca Barbieri }
229d7f6de1eSLuca Barbieri 
230*8bf705d1SDmitry Vyukov static inline void arch_atomic64_and(long i, atomic64_t *v)
231ba1c9f83SDmitry Vyukov {
232ba1c9f83SDmitry Vyukov 	asm volatile(LOCK_PREFIX "andq %1,%0"
233ba1c9f83SDmitry Vyukov 			: "+m" (v->counter)
234ba1c9f83SDmitry Vyukov 			: "er" (i)
235ba1c9f83SDmitry Vyukov 			: "memory");
2367fc1845dSPeter Zijlstra }
2377fc1845dSPeter Zijlstra 
238*8bf705d1SDmitry Vyukov static inline long arch_atomic64_fetch_and(long i, atomic64_t *v)
239ba1c9f83SDmitry Vyukov {
240*8bf705d1SDmitry Vyukov 	s64 val = arch_atomic64_read(v);
241ba1c9f83SDmitry Vyukov 
242ba1c9f83SDmitry Vyukov 	do {
243*8bf705d1SDmitry Vyukov 	} while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
244ba1c9f83SDmitry Vyukov 	return val;
245a8bcccabSPeter Zijlstra }
2467fc1845dSPeter Zijlstra 
247*8bf705d1SDmitry Vyukov static inline void arch_atomic64_or(long i, atomic64_t *v)
248ba1c9f83SDmitry Vyukov {
249ba1c9f83SDmitry Vyukov 	asm volatile(LOCK_PREFIX "orq %1,%0"
250ba1c9f83SDmitry Vyukov 			: "+m" (v->counter)
251ba1c9f83SDmitry Vyukov 			: "er" (i)
252ba1c9f83SDmitry Vyukov 			: "memory");
253ba1c9f83SDmitry Vyukov }
254a8bcccabSPeter Zijlstra 
255*8bf705d1SDmitry Vyukov static inline long arch_atomic64_fetch_or(long i, atomic64_t *v)
256ba1c9f83SDmitry Vyukov {
257*8bf705d1SDmitry Vyukov 	s64 val = arch_atomic64_read(v);
258a8bcccabSPeter Zijlstra 
259ba1c9f83SDmitry Vyukov 	do {
260*8bf705d1SDmitry Vyukov 	} while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
261ba1c9f83SDmitry Vyukov 	return val;
262ba1c9f83SDmitry Vyukov }
263ba1c9f83SDmitry Vyukov 
264*8bf705d1SDmitry Vyukov static inline void arch_atomic64_xor(long i, atomic64_t *v)
265ba1c9f83SDmitry Vyukov {
266ba1c9f83SDmitry Vyukov 	asm volatile(LOCK_PREFIX "xorq %1,%0"
267ba1c9f83SDmitry Vyukov 			: "+m" (v->counter)
268ba1c9f83SDmitry Vyukov 			: "er" (i)
269ba1c9f83SDmitry Vyukov 			: "memory");
270ba1c9f83SDmitry Vyukov }
271ba1c9f83SDmitry Vyukov 
272*8bf705d1SDmitry Vyukov static inline long arch_atomic64_fetch_xor(long i, atomic64_t *v)
273ba1c9f83SDmitry Vyukov {
274*8bf705d1SDmitry Vyukov 	s64 val = arch_atomic64_read(v);
275ba1c9f83SDmitry Vyukov 
276ba1c9f83SDmitry Vyukov 	do {
277*8bf705d1SDmitry Vyukov 	} while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
278ba1c9f83SDmitry Vyukov 	return val;
279ba1c9f83SDmitry Vyukov }
2807fc1845dSPeter Zijlstra 
2811a3b1d89SBrian Gerst #endif /* _ASM_X86_ATOMIC64_64_H */
282