xref: /linux/arch/x86/include/asm/cmpxchg_32.h (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 #ifndef _ASM_X86_CMPXCHG_32_H
2 #define _ASM_X86_CMPXCHG_32_H
3 
4 /*
5  * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
6  *       you need to test for the feature in boot_cpu_data.
7  */
8 
9 /*
10  * CMPXCHG8B only writes to the target if we had the previous
11  * value in registers, otherwise it acts as a read and gives us the
12  * "new previous" value.  That is why there is a loop.  Preloading
13  * EDX:EAX is a performance optimization: in the common case it means
14  * we need only one locked operation.
15  *
16  * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
17  * least an FPU save and/or %cr0.ts manipulation.
18  *
19  * cmpxchg8b must be used with the lock prefix here to allow the
20  * instruction to be executed atomically.  We need to have the reader
21  * side to see the coherent 64bit value.
22  */
23 static inline void set_64bit(volatile u64 *ptr, u64 value)
24 {
25 	u32 low  = value;
26 	u32 high = value >> 32;
27 	u64 prev = *ptr;
28 
29 	asm volatile("\n1:\t"
30 		     LOCK_PREFIX "cmpxchg8b %0\n\t"
31 		     "jnz 1b"
32 		     : "=m" (*ptr), "+A" (prev)
33 		     : "b" (low), "c" (high)
34 		     : "memory");
35 }
36 
37 #define __HAVE_ARCH_CMPXCHG 1
38 
39 #ifdef CONFIG_X86_CMPXCHG64
40 #define cmpxchg64(ptr, o, n)						\
41 	((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
42 					 (unsigned long long)(n)))
43 #define cmpxchg64_local(ptr, o, n)					\
44 	((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
45 					       (unsigned long long)(n)))
46 #endif
47 
48 static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
49 {
50 	u64 prev;
51 	asm volatile(LOCK_PREFIX "cmpxchg8b %1"
52 		     : "=A" (prev),
53 		       "+m" (*ptr)
54 		     : "b" ((u32)new),
55 		       "c" ((u32)(new >> 32)),
56 		       "0" (old)
57 		     : "memory");
58 	return prev;
59 }
60 
61 static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
62 {
63 	u64 prev;
64 	asm volatile("cmpxchg8b %1"
65 		     : "=A" (prev),
66 		       "+m" (*ptr)
67 		     : "b" ((u32)new),
68 		       "c" ((u32)(new >> 32)),
69 		       "0" (old)
70 		     : "memory");
71 	return prev;
72 }
73 
74 #ifndef CONFIG_X86_CMPXCHG64
75 /*
76  * Building a kernel capable running on 80386 and 80486. It may be necessary
77  * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
78  */
79 
80 #define cmpxchg64(ptr, o, n)					\
81 ({								\
82 	__typeof__(*(ptr)) __ret;				\
83 	__typeof__(*(ptr)) __old = (o);				\
84 	__typeof__(*(ptr)) __new = (n);				\
85 	alternative_io(LOCK_PREFIX_HERE				\
86 			"call cmpxchg8b_emu",			\
87 			"lock; cmpxchg8b (%%esi)" ,		\
88 		       X86_FEATURE_CX8,				\
89 		       "=A" (__ret),				\
90 		       "S" ((ptr)), "0" (__old),		\
91 		       "b" ((unsigned int)__new),		\
92 		       "c" ((unsigned int)(__new>>32))		\
93 		       : "memory");				\
94 	__ret; })
95 
96 
97 #define cmpxchg64_local(ptr, o, n)				\
98 ({								\
99 	__typeof__(*(ptr)) __ret;				\
100 	__typeof__(*(ptr)) __old = (o);				\
101 	__typeof__(*(ptr)) __new = (n);				\
102 	alternative_io("call cmpxchg8b_emu",			\
103 		       "cmpxchg8b (%%esi)" ,			\
104 		       X86_FEATURE_CX8,				\
105 		       "=A" (__ret),				\
106 		       "S" ((ptr)), "0" (__old),		\
107 		       "b" ((unsigned int)__new),		\
108 		       "c" ((unsigned int)(__new>>32))		\
109 		       : "memory");				\
110 	__ret; })
111 
112 #endif
113 
114 #define system_has_cmpxchg_double() cpu_has_cx8
115 
116 #endif /* _ASM_X86_CMPXCHG_32_H */
117