xref: /linux/arch/arm/include/asm/cmpxchg.h (revision 005438a8eef063495ac059d128eea71b58de50e5)
1 #ifndef __ASM_ARM_CMPXCHG_H
2 #define __ASM_ARM_CMPXCHG_H
3 
4 #include <linux/irqflags.h>
5 #include <linux/prefetch.h>
6 #include <asm/barrier.h>
7 
8 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
9 /*
10  * On the StrongARM, "swp" is terminally broken since it bypasses the
11  * cache totally.  This means that the cache becomes inconsistent, and,
12  * since we use normal loads/stores as well, this is really bad.
13  * Typically, this causes oopsen in filp_close, but could have other,
14  * more disastrous effects.  There are two work-arounds:
15  *  1. Disable interrupts and emulate the atomic swap
16  *  2. Clean the cache, perform atomic swap, flush the cache
17  *
18  * We choose (1) since its the "easiest" to achieve here and is not
19  * dependent on the processor type.
20  *
21  * NOTE that this solution won't work on an SMP system, so explcitly
22  * forbid it here.
23  */
24 #define swp_is_buggy
25 #endif
26 
27 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
28 {
29 	extern void __bad_xchg(volatile void *, int);
30 	unsigned long ret;
31 #ifdef swp_is_buggy
32 	unsigned long flags;
33 #endif
34 #if __LINUX_ARM_ARCH__ >= 6
35 	unsigned int tmp;
36 #endif
37 
38 	smp_mb();
39 	prefetchw((const void *)ptr);
40 
41 	switch (size) {
42 #if __LINUX_ARM_ARCH__ >= 6
43 	case 1:
44 		asm volatile("@	__xchg1\n"
45 		"1:	ldrexb	%0, [%3]\n"
46 		"	strexb	%1, %2, [%3]\n"
47 		"	teq	%1, #0\n"
48 		"	bne	1b"
49 			: "=&r" (ret), "=&r" (tmp)
50 			: "r" (x), "r" (ptr)
51 			: "memory", "cc");
52 		break;
53 	case 4:
54 		asm volatile("@	__xchg4\n"
55 		"1:	ldrex	%0, [%3]\n"
56 		"	strex	%1, %2, [%3]\n"
57 		"	teq	%1, #0\n"
58 		"	bne	1b"
59 			: "=&r" (ret), "=&r" (tmp)
60 			: "r" (x), "r" (ptr)
61 			: "memory", "cc");
62 		break;
63 #elif defined(swp_is_buggy)
64 #ifdef CONFIG_SMP
65 #error SMP is not supported on this platform
66 #endif
67 	case 1:
68 		raw_local_irq_save(flags);
69 		ret = *(volatile unsigned char *)ptr;
70 		*(volatile unsigned char *)ptr = x;
71 		raw_local_irq_restore(flags);
72 		break;
73 
74 	case 4:
75 		raw_local_irq_save(flags);
76 		ret = *(volatile unsigned long *)ptr;
77 		*(volatile unsigned long *)ptr = x;
78 		raw_local_irq_restore(flags);
79 		break;
80 #else
81 	case 1:
82 		asm volatile("@	__xchg1\n"
83 		"	swpb	%0, %1, [%2]"
84 			: "=&r" (ret)
85 			: "r" (x), "r" (ptr)
86 			: "memory", "cc");
87 		break;
88 	case 4:
89 		asm volatile("@	__xchg4\n"
90 		"	swp	%0, %1, [%2]"
91 			: "=&r" (ret)
92 			: "r" (x), "r" (ptr)
93 			: "memory", "cc");
94 		break;
95 #endif
96 	default:
97 		/* Cause a link-time error, the xchg() size is not supported */
98 		__bad_xchg(ptr, size), ret = 0;
99 		break;
100 	}
101 	smp_mb();
102 
103 	return ret;
104 }
105 
106 #define xchg(ptr, x) ({							\
107 	(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr),		\
108 				   sizeof(*(ptr)));			\
109 })
110 
111 #include <asm-generic/cmpxchg-local.h>
112 
113 #if __LINUX_ARM_ARCH__ < 6
114 /* min ARCH < ARMv6 */
115 
116 #ifdef CONFIG_SMP
117 #error "SMP is not supported on this platform"
118 #endif
119 
120 /*
121  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
122  * them available.
123  */
124 #define cmpxchg_local(ptr, o, n) ({					\
125 	(__typeof(*ptr))__cmpxchg_local_generic((ptr),			\
126 					        (unsigned long)(o),	\
127 					        (unsigned long)(n),	\
128 					        sizeof(*(ptr)));	\
129 })
130 
131 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
132 
133 #include <asm-generic/cmpxchg.h>
134 
135 #else	/* min ARCH >= ARMv6 */
136 
137 extern void __bad_cmpxchg(volatile void *ptr, int size);
138 
139 /*
140  * cmpxchg only support 32-bits operands on ARMv6.
141  */
142 
143 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
144 				      unsigned long new, int size)
145 {
146 	unsigned long oldval, res;
147 
148 	prefetchw((const void *)ptr);
149 
150 	switch (size) {
151 #ifndef CONFIG_CPU_V6	/* min ARCH >= ARMv6K */
152 	case 1:
153 		do {
154 			asm volatile("@ __cmpxchg1\n"
155 			"	ldrexb	%1, [%2]\n"
156 			"	mov	%0, #0\n"
157 			"	teq	%1, %3\n"
158 			"	strexbeq %0, %4, [%2]\n"
159 				: "=&r" (res), "=&r" (oldval)
160 				: "r" (ptr), "Ir" (old), "r" (new)
161 				: "memory", "cc");
162 		} while (res);
163 		break;
164 	case 2:
165 		do {
166 			asm volatile("@ __cmpxchg1\n"
167 			"	ldrexh	%1, [%2]\n"
168 			"	mov	%0, #0\n"
169 			"	teq	%1, %3\n"
170 			"	strexheq %0, %4, [%2]\n"
171 				: "=&r" (res), "=&r" (oldval)
172 				: "r" (ptr), "Ir" (old), "r" (new)
173 				: "memory", "cc");
174 		} while (res);
175 		break;
176 #endif
177 	case 4:
178 		do {
179 			asm volatile("@ __cmpxchg4\n"
180 			"	ldrex	%1, [%2]\n"
181 			"	mov	%0, #0\n"
182 			"	teq	%1, %3\n"
183 			"	strexeq %0, %4, [%2]\n"
184 				: "=&r" (res), "=&r" (oldval)
185 				: "r" (ptr), "Ir" (old), "r" (new)
186 				: "memory", "cc");
187 		} while (res);
188 		break;
189 	default:
190 		__bad_cmpxchg(ptr, size);
191 		oldval = 0;
192 	}
193 
194 	return oldval;
195 }
196 
197 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
198 					 unsigned long new, int size)
199 {
200 	unsigned long ret;
201 
202 	smp_mb();
203 	ret = __cmpxchg(ptr, old, new, size);
204 	smp_mb();
205 
206 	return ret;
207 }
208 
209 #define cmpxchg(ptr,o,n) ({						\
210 	(__typeof__(*(ptr)))__cmpxchg_mb((ptr),				\
211 					 (unsigned long)(o),		\
212 					 (unsigned long)(n),		\
213 					 sizeof(*(ptr)));		\
214 })
215 
216 static inline unsigned long __cmpxchg_local(volatile void *ptr,
217 					    unsigned long old,
218 					    unsigned long new, int size)
219 {
220 	unsigned long ret;
221 
222 	switch (size) {
223 #ifdef CONFIG_CPU_V6	/* min ARCH == ARMv6 */
224 	case 1:
225 	case 2:
226 		ret = __cmpxchg_local_generic(ptr, old, new, size);
227 		break;
228 #endif
229 	default:
230 		ret = __cmpxchg(ptr, old, new, size);
231 	}
232 
233 	return ret;
234 }
235 
236 #define cmpxchg_local(ptr, o, n) ({					\
237 	(__typeof(*ptr))__cmpxchg_local((ptr),				\
238 				        (unsigned long)(o),		\
239 				        (unsigned long)(n),		\
240 				        sizeof(*(ptr)));		\
241 })
242 
243 static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
244 					     unsigned long long old,
245 					     unsigned long long new)
246 {
247 	unsigned long long oldval;
248 	unsigned long res;
249 
250 	prefetchw(ptr);
251 
252 	__asm__ __volatile__(
253 "1:	ldrexd		%1, %H1, [%3]\n"
254 "	teq		%1, %4\n"
255 "	teqeq		%H1, %H4\n"
256 "	bne		2f\n"
257 "	strexd		%0, %5, %H5, [%3]\n"
258 "	teq		%0, #0\n"
259 "	bne		1b\n"
260 "2:"
261 	: "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
262 	: "r" (ptr), "r" (old), "r" (new)
263 	: "cc");
264 
265 	return oldval;
266 }
267 
268 #define cmpxchg64_relaxed(ptr, o, n) ({					\
269 	(__typeof__(*(ptr)))__cmpxchg64((ptr),				\
270 					(unsigned long long)(o),	\
271 					(unsigned long long)(n));	\
272 })
273 
274 #define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
275 
276 static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
277 						unsigned long long old,
278 						unsigned long long new)
279 {
280 	unsigned long long ret;
281 
282 	smp_mb();
283 	ret = __cmpxchg64(ptr, old, new);
284 	smp_mb();
285 
286 	return ret;
287 }
288 
289 #define cmpxchg64(ptr, o, n) ({						\
290 	(__typeof__(*(ptr)))__cmpxchg64_mb((ptr),			\
291 					   (unsigned long long)(o),	\
292 					   (unsigned long long)(n));	\
293 })
294 
295 #endif	/* __LINUX_ARM_ARCH__ >= 6 */
296 
297 #endif /* __ASM_ARM_CMPXCHG_H */
298