xref: /linux/arch/x86/include/asm/cmpxchg_32.h (revision c2df0a6af177b6c06a859806a876f92b072dc624)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_CMPXCHG_32_H
31965aae3SH. Peter Anvin #define _ASM_X86_CMPXCHG_32_H
4bb898558SAl Viro 
5bb898558SAl Viro /*
68b9fd48eSRandy Dunlap  * Note: if you use set64_bit(), __cmpxchg64(), or their variants,
7bb898558SAl Viro  *       you need to test for the feature in boot_cpu_data.
8bb898558SAl Viro  */
9bb898558SAl Viro 
10bb898558SAl Viro /*
1169309a05SH. Peter Anvin  * CMPXCHG8B only writes to the target if we had the previous
1269309a05SH. Peter Anvin  * value in registers, otherwise it acts as a read and gives us the
1369309a05SH. Peter Anvin  * "new previous" value.  That is why there is a loop.  Preloading
1469309a05SH. Peter Anvin  * EDX:EAX is a performance optimization: in the common case it means
1569309a05SH. Peter Anvin  * we need only one locked operation.
16bb898558SAl Viro  *
1769309a05SH. Peter Anvin  * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
1869309a05SH. Peter Anvin  * least an FPU save and/or %cr0.ts manipulation.
1969309a05SH. Peter Anvin  *
2069309a05SH. Peter Anvin  * cmpxchg8b must be used with the lock prefix here to allow the
2169309a05SH. Peter Anvin  * instruction to be executed atomically.  We need to have the reader
2269309a05SH. Peter Anvin  * side to see the coherent 64bit value.
23bb898558SAl Viro  */
2469309a05SH. Peter Anvin static inline void set_64bit(volatile u64 *ptr, u64 value)
25bb898558SAl Viro {
2669309a05SH. Peter Anvin 	u32 low  = value;
2769309a05SH. Peter Anvin 	u32 high = value >> 32;
2869309a05SH. Peter Anvin 	u64 prev = *ptr;
2969309a05SH. Peter Anvin 
30bb898558SAl Viro 	asm volatile("\n1:\t"
3169309a05SH. Peter Anvin 		     LOCK_PREFIX "cmpxchg8b %0\n\t"
32bb898558SAl Viro 		     "jnz 1b"
3369309a05SH. Peter Anvin 		     : "=m" (*ptr), "+A" (prev)
3469309a05SH. Peter Anvin 		     : "b" (low), "c" (high)
3569309a05SH. Peter Anvin 		     : "memory");
36bb898558SAl Viro }
37bb898558SAl Viro 
38bb898558SAl Viro #ifdef CONFIG_X86_CMPXCHG64
398bf705d1SDmitry Vyukov #define arch_cmpxchg64(ptr, o, n)					\
40bb898558SAl Viro 	((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
41bb898558SAl Viro 					 (unsigned long long)(n)))
428bf705d1SDmitry Vyukov #define arch_cmpxchg64_local(ptr, o, n)					\
43bb898558SAl Viro 	((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
44bb898558SAl Viro 					       (unsigned long long)(n)))
45*c2df0a6aSUros Bizjak #define arch_try_cmpxchg64(ptr, po, n)					\
46*c2df0a6aSUros Bizjak 	__try_cmpxchg64((ptr), (unsigned long long *)(po), \
47*c2df0a6aSUros Bizjak 			(unsigned long long)(n))
48bb898558SAl Viro #endif
49bb898558SAl Viro 
504532b305SH. Peter Anvin static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
51bb898558SAl Viro {
524532b305SH. Peter Anvin 	u64 prev;
53113fc5a6SH. Peter Anvin 	asm volatile(LOCK_PREFIX "cmpxchg8b %1"
54113fc5a6SH. Peter Anvin 		     : "=A" (prev),
554532b305SH. Peter Anvin 		       "+m" (*ptr)
564532b305SH. Peter Anvin 		     : "b" ((u32)new),
574532b305SH. Peter Anvin 		       "c" ((u32)(new >> 32)),
58bb898558SAl Viro 		       "0" (old)
59bb898558SAl Viro 		     : "memory");
60bb898558SAl Viro 	return prev;
61bb898558SAl Viro }
62bb898558SAl Viro 
634532b305SH. Peter Anvin static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
64bb898558SAl Viro {
654532b305SH. Peter Anvin 	u64 prev;
66113fc5a6SH. Peter Anvin 	asm volatile("cmpxchg8b %1"
67113fc5a6SH. Peter Anvin 		     : "=A" (prev),
684532b305SH. Peter Anvin 		       "+m" (*ptr)
694532b305SH. Peter Anvin 		     : "b" ((u32)new),
704532b305SH. Peter Anvin 		       "c" ((u32)(new >> 32)),
71bb898558SAl Viro 		       "0" (old)
72bb898558SAl Viro 		     : "memory");
73bb898558SAl Viro 	return prev;
74bb898558SAl Viro }
75bb898558SAl Viro 
76*c2df0a6aSUros Bizjak static inline bool __try_cmpxchg64(volatile u64 *ptr, u64 *pold, u64 new)
77*c2df0a6aSUros Bizjak {
78*c2df0a6aSUros Bizjak 	bool success;
79*c2df0a6aSUros Bizjak 	u64 old = *pold;
80*c2df0a6aSUros Bizjak 	asm volatile(LOCK_PREFIX "cmpxchg8b %[ptr]"
81*c2df0a6aSUros Bizjak 		     CC_SET(z)
82*c2df0a6aSUros Bizjak 		     : CC_OUT(z) (success),
83*c2df0a6aSUros Bizjak 		       [ptr] "+m" (*ptr),
84*c2df0a6aSUros Bizjak 		       "+A" (old)
85*c2df0a6aSUros Bizjak 		     : "b" ((u32)new),
86*c2df0a6aSUros Bizjak 		       "c" ((u32)(new >> 32))
87*c2df0a6aSUros Bizjak 		     : "memory");
88*c2df0a6aSUros Bizjak 
89*c2df0a6aSUros Bizjak 	if (unlikely(!success))
90*c2df0a6aSUros Bizjak 		*pold = old;
91*c2df0a6aSUros Bizjak 	return success;
92*c2df0a6aSUros Bizjak }
93*c2df0a6aSUros Bizjak 
94bb898558SAl Viro #ifndef CONFIG_X86_CMPXCHG64
95bb898558SAl Viro /*
96bb898558SAl Viro  * Building a kernel capable running on 80386 and 80486. It may be necessary
97bb898558SAl Viro  * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
98bb898558SAl Viro  */
99bb898558SAl Viro 
1008bf705d1SDmitry Vyukov #define arch_cmpxchg64(ptr, o, n)				\
101bb898558SAl Viro ({								\
102bb898558SAl Viro 	__typeof__(*(ptr)) __ret;				\
10379e1dd05SArjan van de Ven 	__typeof__(*(ptr)) __old = (o);				\
10479e1dd05SArjan van de Ven 	__typeof__(*(ptr)) __new = (n);				\
1059c76b384SLuca Barbieri 	alternative_io(LOCK_PREFIX_HERE				\
1069c76b384SLuca Barbieri 			"call cmpxchg8b_emu",			\
10779e1dd05SArjan van de Ven 			"lock; cmpxchg8b (%%esi)" ,		\
10879e1dd05SArjan van de Ven 		       X86_FEATURE_CX8,				\
10979e1dd05SArjan van de Ven 		       "=A" (__ret),				\
11079e1dd05SArjan van de Ven 		       "S" ((ptr)), "0" (__old),		\
11179e1dd05SArjan van de Ven 		       "b" ((unsigned int)__new),		\
11279e1dd05SArjan van de Ven 		       "c" ((unsigned int)(__new>>32))		\
11379e1dd05SArjan van de Ven 		       : "memory");				\
11479e1dd05SArjan van de Ven 	__ret; })
11579e1dd05SArjan van de Ven 
11679e1dd05SArjan van de Ven 
1178bf705d1SDmitry Vyukov #define arch_cmpxchg64_local(ptr, o, n)				\
118bb898558SAl Viro ({								\
119bb898558SAl Viro 	__typeof__(*(ptr)) __ret;				\
120a378d933SH. Peter Anvin 	__typeof__(*(ptr)) __old = (o);				\
121a378d933SH. Peter Anvin 	__typeof__(*(ptr)) __new = (n);				\
122a378d933SH. Peter Anvin 	alternative_io("call cmpxchg8b_emu",			\
123a378d933SH. Peter Anvin 		       "cmpxchg8b (%%esi)" ,			\
124a378d933SH. Peter Anvin 		       X86_FEATURE_CX8,				\
125a378d933SH. Peter Anvin 		       "=A" (__ret),				\
126a378d933SH. Peter Anvin 		       "S" ((ptr)), "0" (__old),		\
127a378d933SH. Peter Anvin 		       "b" ((unsigned int)__new),		\
128a378d933SH. Peter Anvin 		       "c" ((unsigned int)(__new>>32))		\
129a378d933SH. Peter Anvin 		       : "memory");				\
130a378d933SH. Peter Anvin 	__ret; })
131bb898558SAl Viro 
132bb898558SAl Viro #endif
133bb898558SAl Viro 
134362f924bSBorislav Petkov #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
1353824abd1SChristoph Lameter 
1361965aae3SH. Peter Anvin #endif /* _ASM_X86_CMPXCHG_32_H */
137