xref: /linux/arch/x86/include/asm/atomic64_64.h (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21a3b1d89SBrian Gerst #ifndef _ASM_X86_ATOMIC64_64_H
31a3b1d89SBrian Gerst #define _ASM_X86_ATOMIC64_64_H
41a3b1d89SBrian Gerst 
51a3b1d89SBrian Gerst #include <linux/types.h>
61a3b1d89SBrian Gerst #include <asm/alternative.h>
71a3b1d89SBrian Gerst #include <asm/cmpxchg.h>
81a3b1d89SBrian Gerst 
91a3b1d89SBrian Gerst /* The 64-bit atomic type */
101a3b1d89SBrian Gerst 
111a3b1d89SBrian Gerst #define ATOMIC64_INIT(i)	{ (i) }
121a3b1d89SBrian Gerst 
arch_atomic64_read(const atomic64_t * v)137aab7aa4SPeter Zijlstra static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
141a3b1d89SBrian Gerst {
1537f8173dSPeter Zijlstra 	return __READ_ONCE((v)->counter);
161a3b1d89SBrian Gerst }
171a3b1d89SBrian Gerst 
arch_atomic64_set(atomic64_t * v,s64 i)187aab7aa4SPeter Zijlstra static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
191a3b1d89SBrian Gerst {
2037f8173dSPeter Zijlstra 	__WRITE_ONCE(v->counter, i);
211a3b1d89SBrian Gerst }
221a3b1d89SBrian Gerst 
arch_atomic64_add(s64 i,atomic64_t * v)2379c53a83SMark Rutland static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
241a3b1d89SBrian Gerst {
251a3b1d89SBrian Gerst 	asm volatile(LOCK_PREFIX "addq %1,%0"
261a3b1d89SBrian Gerst 		     : "=m" (v->counter)
2769d927bbSPeter Zijlstra 		     : "er" (i), "m" (v->counter) : "memory");
281a3b1d89SBrian Gerst }
291a3b1d89SBrian Gerst 
arch_atomic64_sub(s64 i,atomic64_t * v)307aab7aa4SPeter Zijlstra static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
311a3b1d89SBrian Gerst {
321a3b1d89SBrian Gerst 	asm volatile(LOCK_PREFIX "subq %1,%0"
331a3b1d89SBrian Gerst 		     : "=m" (v->counter)
3469d927bbSPeter Zijlstra 		     : "er" (i), "m" (v->counter) : "memory");
351a3b1d89SBrian Gerst }
361a3b1d89SBrian Gerst 
arch_atomic64_sub_and_test(s64 i,atomic64_t * v)377aab7aa4SPeter Zijlstra static __always_inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
381a3b1d89SBrian Gerst {
39288e4521SPeter Zijlstra 	return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
401a3b1d89SBrian Gerst }
414331f4d5SRandy Dunlap #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
421a3b1d89SBrian Gerst 
arch_atomic64_inc(atomic64_t * v)438bf705d1SDmitry Vyukov static __always_inline void arch_atomic64_inc(atomic64_t *v)
441a3b1d89SBrian Gerst {
451a3b1d89SBrian Gerst 	asm volatile(LOCK_PREFIX "incq %0"
461a3b1d89SBrian Gerst 		     : "=m" (v->counter)
4769d927bbSPeter Zijlstra 		     : "m" (v->counter) : "memory");
481a3b1d89SBrian Gerst }
494331f4d5SRandy Dunlap #define arch_atomic64_inc arch_atomic64_inc
501a3b1d89SBrian Gerst 
arch_atomic64_dec(atomic64_t * v)518bf705d1SDmitry Vyukov static __always_inline void arch_atomic64_dec(atomic64_t *v)
521a3b1d89SBrian Gerst {
531a3b1d89SBrian Gerst 	asm volatile(LOCK_PREFIX "decq %0"
541a3b1d89SBrian Gerst 		     : "=m" (v->counter)
5569d927bbSPeter Zijlstra 		     : "m" (v->counter) : "memory");
561a3b1d89SBrian Gerst }
574331f4d5SRandy Dunlap #define arch_atomic64_dec arch_atomic64_dec
581a3b1d89SBrian Gerst 
arch_atomic64_dec_and_test(atomic64_t * v)597aab7aa4SPeter Zijlstra static __always_inline bool arch_atomic64_dec_and_test(atomic64_t *v)
601a3b1d89SBrian Gerst {
61288e4521SPeter Zijlstra 	return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
621a3b1d89SBrian Gerst }
634331f4d5SRandy Dunlap #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
641a3b1d89SBrian Gerst 
arch_atomic64_inc_and_test(atomic64_t * v)657aab7aa4SPeter Zijlstra static __always_inline bool arch_atomic64_inc_and_test(atomic64_t *v)
661a3b1d89SBrian Gerst {
67288e4521SPeter Zijlstra 	return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
681a3b1d89SBrian Gerst }
694331f4d5SRandy Dunlap #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
701a3b1d89SBrian Gerst 
arch_atomic64_add_negative(s64 i,atomic64_t * v)717aab7aa4SPeter Zijlstra static __always_inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
721a3b1d89SBrian Gerst {
73288e4521SPeter Zijlstra 	return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
741a3b1d89SBrian Gerst }
754331f4d5SRandy Dunlap #define arch_atomic64_add_negative arch_atomic64_add_negative
761a3b1d89SBrian Gerst 
arch_atomic64_add_return(s64 i,atomic64_t * v)7779c53a83SMark Rutland static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
781a3b1d89SBrian Gerst {
798b8bc2f7SJeremy Fitzhardinge 	return i + xadd(&v->counter, i);
801a3b1d89SBrian Gerst }
8137f8173dSPeter Zijlstra #define arch_atomic64_add_return arch_atomic64_add_return
821a3b1d89SBrian Gerst 
83*21689e4bSUros Bizjak #define arch_atomic64_sub_return(i, v) arch_atomic64_add_return(-(i), v)
841a3b1d89SBrian Gerst 
arch_atomic64_fetch_add(s64 i,atomic64_t * v)857aab7aa4SPeter Zijlstra static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
86a8bcccabSPeter Zijlstra {
87a8bcccabSPeter Zijlstra 	return xadd(&v->counter, i);
88a8bcccabSPeter Zijlstra }
8937f8173dSPeter Zijlstra #define arch_atomic64_fetch_add arch_atomic64_fetch_add
90a8bcccabSPeter Zijlstra 
91*21689e4bSUros Bizjak #define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), v)
92a8bcccabSPeter Zijlstra 
arch_atomic64_cmpxchg(atomic64_t * v,s64 old,s64 new)937aab7aa4SPeter Zijlstra static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
941a3b1d89SBrian Gerst {
958bf705d1SDmitry Vyukov 	return arch_cmpxchg(&v->counter, old, new);
961a3b1d89SBrian Gerst }
9737f8173dSPeter Zijlstra #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
981a3b1d89SBrian Gerst 
arch_atomic64_try_cmpxchg(atomic64_t * v,s64 * old,s64 new)9979c53a83SMark Rutland static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
100a9ebf306SPeter Zijlstra {
10129f006fdSPeter Zijlstra 	return arch_try_cmpxchg(&v->counter, old, new);
102a9ebf306SPeter Zijlstra }
10337f8173dSPeter Zijlstra #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
104a9ebf306SPeter Zijlstra 
arch_atomic64_xchg(atomic64_t * v,s64 new)1057aab7aa4SPeter Zijlstra static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
1061a3b1d89SBrian Gerst {
107f9881cc4SMark Rutland 	return arch_xchg(&v->counter, new);
1081a3b1d89SBrian Gerst }
10937f8173dSPeter Zijlstra #define arch_atomic64_xchg arch_atomic64_xchg
1101a3b1d89SBrian Gerst 
arch_atomic64_and(s64 i,atomic64_t * v)1117aab7aa4SPeter Zijlstra static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
112ba1c9f83SDmitry Vyukov {
113ba1c9f83SDmitry Vyukov 	asm volatile(LOCK_PREFIX "andq %1,%0"
114ba1c9f83SDmitry Vyukov 			: "+m" (v->counter)
115ba1c9f83SDmitry Vyukov 			: "er" (i)
116ba1c9f83SDmitry Vyukov 			: "memory");
1177fc1845dSPeter Zijlstra }
1187fc1845dSPeter Zijlstra 
arch_atomic64_fetch_and(s64 i,atomic64_t * v)1197aab7aa4SPeter Zijlstra static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
120ba1c9f83SDmitry Vyukov {
1218bf705d1SDmitry Vyukov 	s64 val = arch_atomic64_read(v);
122ba1c9f83SDmitry Vyukov 
123ba1c9f83SDmitry Vyukov 	do {
1248bf705d1SDmitry Vyukov 	} while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
125ba1c9f83SDmitry Vyukov 	return val;
126a8bcccabSPeter Zijlstra }
12737f8173dSPeter Zijlstra #define arch_atomic64_fetch_and arch_atomic64_fetch_and
1287fc1845dSPeter Zijlstra 
arch_atomic64_or(s64 i,atomic64_t * v)1297aab7aa4SPeter Zijlstra static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
130ba1c9f83SDmitry Vyukov {
131ba1c9f83SDmitry Vyukov 	asm volatile(LOCK_PREFIX "orq %1,%0"
132ba1c9f83SDmitry Vyukov 			: "+m" (v->counter)
133ba1c9f83SDmitry Vyukov 			: "er" (i)
134ba1c9f83SDmitry Vyukov 			: "memory");
135ba1c9f83SDmitry Vyukov }
136a8bcccabSPeter Zijlstra 
arch_atomic64_fetch_or(s64 i,atomic64_t * v)1377aab7aa4SPeter Zijlstra static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
138ba1c9f83SDmitry Vyukov {
1398bf705d1SDmitry Vyukov 	s64 val = arch_atomic64_read(v);
140a8bcccabSPeter Zijlstra 
141ba1c9f83SDmitry Vyukov 	do {
1428bf705d1SDmitry Vyukov 	} while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
143ba1c9f83SDmitry Vyukov 	return val;
144ba1c9f83SDmitry Vyukov }
14537f8173dSPeter Zijlstra #define arch_atomic64_fetch_or arch_atomic64_fetch_or
146ba1c9f83SDmitry Vyukov 
arch_atomic64_xor(s64 i,atomic64_t * v)1477aab7aa4SPeter Zijlstra static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
148ba1c9f83SDmitry Vyukov {
149ba1c9f83SDmitry Vyukov 	asm volatile(LOCK_PREFIX "xorq %1,%0"
150ba1c9f83SDmitry Vyukov 			: "+m" (v->counter)
151ba1c9f83SDmitry Vyukov 			: "er" (i)
152ba1c9f83SDmitry Vyukov 			: "memory");
153ba1c9f83SDmitry Vyukov }
154ba1c9f83SDmitry Vyukov 
arch_atomic64_fetch_xor(s64 i,atomic64_t * v)1557aab7aa4SPeter Zijlstra static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
156ba1c9f83SDmitry Vyukov {
1578bf705d1SDmitry Vyukov 	s64 val = arch_atomic64_read(v);
158ba1c9f83SDmitry Vyukov 
159ba1c9f83SDmitry Vyukov 	do {
1608bf705d1SDmitry Vyukov 	} while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
161ba1c9f83SDmitry Vyukov 	return val;
162ba1c9f83SDmitry Vyukov }
16337f8173dSPeter Zijlstra #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
1647fc1845dSPeter Zijlstra 
1651a3b1d89SBrian Gerst #endif /* _ASM_X86_ATOMIC64_64_H */
166