xref: /linux/arch/sparc/include/asm/cmpxchg_64.h (revision e21f9e2e862e9eb3dd64eaddb6256b3e5098660f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* 64-bit atomic xchg() and cmpxchg() definitions.
3  *
4  * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
5  */
6 
7 #ifndef __ARCH_SPARC64_CMPXCHG__
8 #define __ARCH_SPARC64_CMPXCHG__
9 
10 static inline unsigned long
11 __cmpxchg_u32(volatile int *m, int old, int new)
12 {
13 	__asm__ __volatile__("cas [%2], %3, %0"
14 			     : "=&r" (new)
15 			     : "0" (new), "r" (m), "r" (old)
16 			     : "memory");
17 
18 	return new;
19 }
20 
21 static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
22 {
23 	unsigned long tmp1, tmp2;
24 
25 	__asm__ __volatile__(
26 "	mov		%0, %1\n"
27 "1:	lduw		[%4], %2\n"
28 "	cas		[%4], %2, %0\n"
29 "	cmp		%2, %0\n"
30 "	bne,a,pn	%%icc, 1b\n"
31 "	 mov		%1, %0\n"
32 	: "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
33 	: "0" (val), "r" (m)
34 	: "cc", "memory");
35 	return val;
36 }
37 
38 static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
39 {
40 	unsigned long tmp1, tmp2;
41 
42 	__asm__ __volatile__(
43 "	mov		%0, %1\n"
44 "1:	ldx		[%4], %2\n"
45 "	casx		[%4], %2, %0\n"
46 "	cmp		%2, %0\n"
47 "	bne,a,pn	%%xcc, 1b\n"
48 "	 mov		%1, %0\n"
49 	: "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
50 	: "0" (val), "r" (m)
51 	: "cc", "memory");
52 	return val;
53 }
54 
55 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
56 
57 void __xchg_called_with_bad_pointer(void);
58 
59 /*
60  * Use 4 byte cas instruction to achieve 2 byte xchg. Main logic
61  * here is to get the bit shift of the byte we are interested in.
62  * The XOR is handy for reversing the bits for big-endian byte order.
63  */
64 static inline unsigned long
65 xchg16(__volatile__ unsigned short *m, unsigned short val)
66 {
67 	unsigned long maddr = (unsigned long)m;
68 	int bit_shift = (((unsigned long)m & 2) ^ 2) << 3;
69 	unsigned int mask = 0xffff << bit_shift;
70 	unsigned int *ptr = (unsigned int  *) (maddr & ~2);
71 	unsigned int old32, new32, load32;
72 
73 	/* Read the old value */
74 	load32 = *ptr;
75 
76 	do {
77 		old32 = load32;
78 		new32 = (load32 & (~mask)) | val << bit_shift;
79 		load32 = __cmpxchg_u32(ptr, old32, new32);
80 	} while (load32 != old32);
81 
82 	return (load32 & mask) >> bit_shift;
83 }
84 
85 static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
86 				       int size)
87 {
88 	switch (size) {
89 	case 2:
90 		return xchg16(ptr, x);
91 	case 4:
92 		return xchg32(ptr, x);
93 	case 8:
94 		return xchg64(ptr, x);
95 	}
96 	__xchg_called_with_bad_pointer();
97 	return x;
98 }
99 
100 /*
101  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
102  * store NEW in MEM.  Return the initial value in MEM.  Success is
103  * indicated by comparing RETURN with OLD.
104  */
105 
106 #include <asm-generic/cmpxchg-local.h>
107 
108 
109 static inline unsigned long
110 __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
111 {
112 	__asm__ __volatile__("casx [%2], %3, %0"
113 			     : "=&r" (new)
114 			     : "0" (new), "r" (m), "r" (old)
115 			     : "memory");
116 
117 	return new;
118 }
119 
120 /*
121  * Use 4 byte cas instruction to achieve 1 byte cmpxchg. Main logic
122  * here is to get the bit shift of the byte we are interested in.
123  * The XOR is handy for reversing the bits for big-endian byte order
124  */
125 static inline unsigned long
126 __cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new)
127 {
128 	unsigned long maddr = (unsigned long)m;
129 	int bit_shift = (((unsigned long)m & 3) ^ 3) << 3;
130 	unsigned int mask = 0xff << bit_shift;
131 	unsigned int *ptr = (unsigned int *) (maddr & ~3);
132 	unsigned int old32, new32, load;
133 	unsigned int load32 = *ptr;
134 
135 	do {
136 		new32 = (load32 & ~mask) | (new << bit_shift);
137 		old32 = (load32 & ~mask) | (old << bit_shift);
138 		load32 = __cmpxchg_u32(ptr, old32, new32);
139 		if (load32 == old32)
140 			return old;
141 		load = (load32 & mask) >> bit_shift;
142 	} while (load == old);
143 
144 	return load;
145 }
146 
147 /* This function doesn't exist, so you'll get a linker error
148    if something tries to do an invalid cmpxchg().  */
149 void __cmpxchg_called_with_bad_pointer(void);
150 
151 static inline unsigned long
152 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
153 {
154 	switch (size) {
155 		case 1:
156 			return __cmpxchg_u8(ptr, old, new);
157 		case 4:
158 			return __cmpxchg_u32(ptr, old, new);
159 		case 8:
160 			return __cmpxchg_u64(ptr, old, new);
161 	}
162 	__cmpxchg_called_with_bad_pointer();
163 	return old;
164 }
165 
166 #define cmpxchg(ptr,o,n)						 \
167   ({									 \
168      __typeof__(*(ptr)) _o_ = (o);					 \
169      __typeof__(*(ptr)) _n_ = (n);					 \
170      (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \
171 				    (unsigned long)_n_, sizeof(*(ptr))); \
172   })
173 
174 /*
175  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
176  * them available.
177  */
178 
179 static inline unsigned long __cmpxchg_local(volatile void *ptr,
180 				      unsigned long old,
181 				      unsigned long new, int size)
182 {
183 	switch (size) {
184 	case 4:
185 	case 8:	return __cmpxchg(ptr, old, new, size);
186 	default:
187 		return __cmpxchg_local_generic(ptr, old, new, size);
188 	}
189 
190 	return old;
191 }
192 
193 #define cmpxchg_local(ptr, o, n)				  	\
194 	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
195 			(unsigned long)(n), sizeof(*(ptr))))
196 #define cmpxchg64_local(ptr, o, n)					\
197   ({									\
198 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
199 	cmpxchg_local((ptr), (o), (n));					\
200   })
201 #define cmpxchg64(ptr, o, n)	cmpxchg64_local((ptr), (o), (n))
202 
203 #endif /* __ARCH_SPARC64_CMPXCHG__ */
204