xref: /linux/arch/x86/include/asm/cmpxchg_32.h (revision b24413180f5600bcb3bb70fbed5cf186b60864bd)
1*b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_CMPXCHG_32_H
31965aae3SH. Peter Anvin #define _ASM_X86_CMPXCHG_32_H
4bb898558SAl Viro 
5bb898558SAl Viro /*
6bb898558SAl Viro  * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
7bb898558SAl Viro  *       you need to test for the feature in boot_cpu_data.
8bb898558SAl Viro  */
9bb898558SAl Viro 
10bb898558SAl Viro /*
1169309a05SH. Peter Anvin  * CMPXCHG8B only writes to the target if we had the previous
1269309a05SH. Peter Anvin  * value in registers, otherwise it acts as a read and gives us the
1369309a05SH. Peter Anvin  * "new previous" value.  That is why there is a loop.  Preloading
1469309a05SH. Peter Anvin  * EDX:EAX is a performance optimization: in the common case it means
1569309a05SH. Peter Anvin  * we need only one locked operation.
16bb898558SAl Viro  *
1769309a05SH. Peter Anvin  * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
1869309a05SH. Peter Anvin  * least an FPU save and/or %cr0.ts manipulation.
1969309a05SH. Peter Anvin  *
2069309a05SH. Peter Anvin  * cmpxchg8b must be used with the lock prefix here to allow the
2169309a05SH. Peter Anvin  * instruction to be executed atomically.  We need to have the reader
2269309a05SH. Peter Anvin  * side to see the coherent 64bit value.
23bb898558SAl Viro  */
2469309a05SH. Peter Anvin static inline void set_64bit(volatile u64 *ptr, u64 value)
25bb898558SAl Viro {
2669309a05SH. Peter Anvin 	u32 low  = value;
2769309a05SH. Peter Anvin 	u32 high = value >> 32;
2869309a05SH. Peter Anvin 	u64 prev = *ptr;
2969309a05SH. Peter Anvin 
30bb898558SAl Viro 	asm volatile("\n1:\t"
3169309a05SH. Peter Anvin 		     LOCK_PREFIX "cmpxchg8b %0\n\t"
32bb898558SAl Viro 		     "jnz 1b"
3369309a05SH. Peter Anvin 		     : "=m" (*ptr), "+A" (prev)
3469309a05SH. Peter Anvin 		     : "b" (low), "c" (high)
3569309a05SH. Peter Anvin 		     : "memory");
36bb898558SAl Viro }
37bb898558SAl Viro 
38bb898558SAl Viro #ifdef CONFIG_X86_CMPXCHG64
39bb898558SAl Viro #define cmpxchg64(ptr, o, n)						\
40bb898558SAl Viro 	((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
41bb898558SAl Viro 					 (unsigned long long)(n)))
42bb898558SAl Viro #define cmpxchg64_local(ptr, o, n)					\
43bb898558SAl Viro 	((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
44bb898558SAl Viro 					       (unsigned long long)(n)))
45bb898558SAl Viro #endif
46bb898558SAl Viro 
474532b305SH. Peter Anvin static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
48bb898558SAl Viro {
494532b305SH. Peter Anvin 	u64 prev;
50113fc5a6SH. Peter Anvin 	asm volatile(LOCK_PREFIX "cmpxchg8b %1"
51113fc5a6SH. Peter Anvin 		     : "=A" (prev),
524532b305SH. Peter Anvin 		       "+m" (*ptr)
534532b305SH. Peter Anvin 		     : "b" ((u32)new),
544532b305SH. Peter Anvin 		       "c" ((u32)(new >> 32)),
55bb898558SAl Viro 		       "0" (old)
56bb898558SAl Viro 		     : "memory");
57bb898558SAl Viro 	return prev;
58bb898558SAl Viro }
59bb898558SAl Viro 
604532b305SH. Peter Anvin static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
61bb898558SAl Viro {
624532b305SH. Peter Anvin 	u64 prev;
63113fc5a6SH. Peter Anvin 	asm volatile("cmpxchg8b %1"
64113fc5a6SH. Peter Anvin 		     : "=A" (prev),
654532b305SH. Peter Anvin 		       "+m" (*ptr)
664532b305SH. Peter Anvin 		     : "b" ((u32)new),
674532b305SH. Peter Anvin 		       "c" ((u32)(new >> 32)),
68bb898558SAl Viro 		       "0" (old)
69bb898558SAl Viro 		     : "memory");
70bb898558SAl Viro 	return prev;
71bb898558SAl Viro }
72bb898558SAl Viro 
73bb898558SAl Viro #ifndef CONFIG_X86_CMPXCHG64
74bb898558SAl Viro /*
75bb898558SAl Viro  * Building a kernel capable running on 80386 and 80486. It may be necessary
76bb898558SAl Viro  * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
77bb898558SAl Viro  */
78bb898558SAl Viro 
79bb898558SAl Viro #define cmpxchg64(ptr, o, n)					\
80bb898558SAl Viro ({								\
81bb898558SAl Viro 	__typeof__(*(ptr)) __ret;				\
8279e1dd05SArjan van de Ven 	__typeof__(*(ptr)) __old = (o);				\
8379e1dd05SArjan van de Ven 	__typeof__(*(ptr)) __new = (n);				\
849c76b384SLuca Barbieri 	alternative_io(LOCK_PREFIX_HERE				\
859c76b384SLuca Barbieri 			"call cmpxchg8b_emu",			\
8679e1dd05SArjan van de Ven 			"lock; cmpxchg8b (%%esi)" ,		\
8779e1dd05SArjan van de Ven 		       X86_FEATURE_CX8,				\
8879e1dd05SArjan van de Ven 		       "=A" (__ret),				\
8979e1dd05SArjan van de Ven 		       "S" ((ptr)), "0" (__old),		\
9079e1dd05SArjan van de Ven 		       "b" ((unsigned int)__new),		\
9179e1dd05SArjan van de Ven 		       "c" ((unsigned int)(__new>>32))		\
9279e1dd05SArjan van de Ven 		       : "memory");				\
9379e1dd05SArjan van de Ven 	__ret; })
9479e1dd05SArjan van de Ven 
9579e1dd05SArjan van de Ven 
96bb898558SAl Viro #define cmpxchg64_local(ptr, o, n)				\
97bb898558SAl Viro ({								\
98bb898558SAl Viro 	__typeof__(*(ptr)) __ret;				\
99a378d933SH. Peter Anvin 	__typeof__(*(ptr)) __old = (o);				\
100a378d933SH. Peter Anvin 	__typeof__(*(ptr)) __new = (n);				\
101a378d933SH. Peter Anvin 	alternative_io("call cmpxchg8b_emu",			\
102a378d933SH. Peter Anvin 		       "cmpxchg8b (%%esi)" ,			\
103a378d933SH. Peter Anvin 		       X86_FEATURE_CX8,				\
104a378d933SH. Peter Anvin 		       "=A" (__ret),				\
105a378d933SH. Peter Anvin 		       "S" ((ptr)), "0" (__old),		\
106a378d933SH. Peter Anvin 		       "b" ((unsigned int)__new),		\
107a378d933SH. Peter Anvin 		       "c" ((unsigned int)(__new>>32))		\
108a378d933SH. Peter Anvin 		       : "memory");				\
109a378d933SH. Peter Anvin 	__ret; })
110bb898558SAl Viro 
111bb898558SAl Viro #endif
112bb898558SAl Viro 
113362f924bSBorislav Petkov #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
1143824abd1SChristoph Lameter 
1151965aae3SH. Peter Anvin #endif /* _ASM_X86_CMPXCHG_32_H */
116