xref: /linux/arch/riscv/include/asm/cmpxchg.h (revision 3f41368fbfe1b3d5922d317fe1a0a0cab6846802)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2014 Regents of the University of California
4  */
5 
6 #ifndef _ASM_RISCV_CMPXCHG_H
7 #define _ASM_RISCV_CMPXCHG_H
8 
9 #include <linux/bug.h>
10 
11 #include <asm/fence.h>
12 
13 #define __arch_xchg_masked(prepend, append, r, p, n)			\
14 ({									\
15 	u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3);			\
16 	ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE;	\
17 	ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0)	\
18 			<< __s;						\
19 	ulong __newx = (ulong)(n) << __s;				\
20 	ulong __retx;							\
21 	ulong __rc;							\
22 									\
23 	__asm__ __volatile__ (						\
24 	       prepend							\
25 	       "0:	lr.w %0, %2\n"					\
26 	       "	and  %1, %0, %z4\n"				\
27 	       "	or   %1, %1, %z3\n"				\
28 	       "	sc.w %1, %1, %2\n"				\
29 	       "	bnez %1, 0b\n"					\
30 	       append							\
31 	       : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b))	\
32 	       : "rJ" (__newx), "rJ" (~__mask)				\
33 	       : "memory");						\
34 									\
35 	r = (__typeof__(*(p)))((__retx & __mask) >> __s);		\
36 })
37 
38 #define __arch_xchg(sfx, prepend, append, r, p, n)			\
39 ({									\
40 	__asm__ __volatile__ (						\
41 		prepend							\
42 		"	amoswap" sfx " %0, %2, %1\n"			\
43 		append							\
44 		: "=r" (r), "+A" (*(p))					\
45 		: "r" (n)						\
46 		: "memory");						\
47 })
48 
49 #define _arch_xchg(ptr, new, sfx, prepend, append)			\
50 ({									\
51 	__typeof__(ptr) __ptr = (ptr);					\
52 	__typeof__(*(__ptr)) __new = (new);				\
53 	__typeof__(*(__ptr)) __ret;					\
54 									\
55 	switch (sizeof(*__ptr)) {					\
56 	case 1:								\
57 	case 2:								\
58 		__arch_xchg_masked(prepend, append,			\
59 				   __ret, __ptr, __new);		\
60 		break;							\
61 	case 4:								\
62 		__arch_xchg(".w" sfx, prepend, append,			\
63 			      __ret, __ptr, __new);			\
64 		break;							\
65 	case 8:								\
66 		__arch_xchg(".d" sfx, prepend, append,			\
67 			      __ret, __ptr, __new);			\
68 		break;							\
69 	default:							\
70 		BUILD_BUG();						\
71 	}								\
72 	(__typeof__(*(__ptr)))__ret;					\
73 })
74 
75 #define arch_xchg_relaxed(ptr, x)					\
76 	_arch_xchg(ptr, x, "", "", "")
77 
78 #define arch_xchg_acquire(ptr, x)					\
79 	_arch_xchg(ptr, x, "", "", RISCV_ACQUIRE_BARRIER)
80 
81 #define arch_xchg_release(ptr, x)					\
82 	_arch_xchg(ptr, x, "", RISCV_RELEASE_BARRIER, "")
83 
84 #define arch_xchg(ptr, x)						\
85 	_arch_xchg(ptr, x, ".aqrl", "", "")
86 
87 #define xchg32(ptr, x)							\
88 ({									\
89 	BUILD_BUG_ON(sizeof(*(ptr)) != 4);				\
90 	arch_xchg((ptr), (x));						\
91 })
92 
93 #define xchg64(ptr, x)							\
94 ({									\
95 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
96 	arch_xchg((ptr), (x));						\
97 })
98 
99 /*
100  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
101  * store NEW in MEM.  Return the initial value in MEM.  Success is
102  * indicated by comparing RETURN with OLD.
103  */
104 
105 #define __arch_cmpxchg_masked(sc_sfx, prepend, append, r, p, o, n)	\
106 ({									\
107 	u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3);			\
108 	ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE;	\
109 	ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0)	\
110 			<< __s;						\
111 	ulong __newx = (ulong)(n) << __s;				\
112 	ulong __oldx = (ulong)(o) << __s;				\
113 	ulong __retx;							\
114 	ulong __rc;							\
115 									\
116 	__asm__ __volatile__ (						\
117 		prepend							\
118 		"0:	lr.w %0, %2\n"					\
119 		"	and  %1, %0, %z5\n"				\
120 		"	bne  %1, %z3, 1f\n"				\
121 		"	and  %1, %0, %z6\n"				\
122 		"	or   %1, %1, %z4\n"				\
123 		"	sc.w" sc_sfx " %1, %1, %2\n"			\
124 		"	bnez %1, 0b\n"					\
125 		append							\
126 		"1:\n"							\
127 		: "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b))	\
128 		: "rJ" ((long)__oldx), "rJ" (__newx),			\
129 		  "rJ" (__mask), "rJ" (~__mask)				\
130 		: "memory");						\
131 									\
132 	r = (__typeof__(*(p)))((__retx & __mask) >> __s);		\
133 })
134 
135 #define __arch_cmpxchg(lr_sfx, sc_sfx, prepend, append, r, p, co, o, n)	\
136 ({									\
137 	register unsigned int __rc;					\
138 									\
139 	__asm__ __volatile__ (						\
140 		prepend							\
141 		"0:	lr" lr_sfx " %0, %2\n"				\
142 		"	bne  %0, %z3, 1f\n"				\
143 		"	sc" sc_sfx " %1, %z4, %2\n"			\
144 		"	bnez %1, 0b\n"					\
145 		append							\
146 		"1:\n"							\
147 		: "=&r" (r), "=&r" (__rc), "+A" (*(p))			\
148 		: "rJ" (co o), "rJ" (n)					\
149 		: "memory");						\
150 })
151 
152 #define _arch_cmpxchg(ptr, old, new, sc_sfx, prepend, append)		\
153 ({									\
154 	__typeof__(ptr) __ptr = (ptr);					\
155 	__typeof__(*(__ptr)) __old = (old);				\
156 	__typeof__(*(__ptr)) __new = (new);				\
157 	__typeof__(*(__ptr)) __ret;					\
158 									\
159 	switch (sizeof(*__ptr)) {					\
160 	case 1:								\
161 	case 2:								\
162 		__arch_cmpxchg_masked(sc_sfx, prepend, append,		\
163 					__ret, __ptr, __old, __new);	\
164 		break;							\
165 	case 4:								\
166 		__arch_cmpxchg(".w", ".w" sc_sfx, prepend, append,	\
167 				__ret, __ptr, (long), __old, __new);	\
168 		break;							\
169 	case 8:								\
170 		__arch_cmpxchg(".d", ".d" sc_sfx, prepend, append,	\
171 				__ret, __ptr, /**/, __old, __new);	\
172 		break;							\
173 	default:							\
174 		BUILD_BUG();						\
175 	}								\
176 	(__typeof__(*(__ptr)))__ret;					\
177 })
178 
179 #define arch_cmpxchg_relaxed(ptr, o, n)					\
180 	_arch_cmpxchg((ptr), (o), (n), "", "", "")
181 
182 #define arch_cmpxchg_acquire(ptr, o, n)					\
183 	_arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER)
184 
185 #define arch_cmpxchg_release(ptr, o, n)					\
186 	_arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "")
187 
188 #define arch_cmpxchg(ptr, o, n)						\
189 	_arch_cmpxchg((ptr), (o), (n), ".rl", "", "	fence rw, rw\n")
190 
191 #define arch_cmpxchg_local(ptr, o, n)					\
192 	arch_cmpxchg_relaxed((ptr), (o), (n))
193 
194 #define arch_cmpxchg64(ptr, o, n)					\
195 ({									\
196 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
197 	arch_cmpxchg((ptr), (o), (n));					\
198 })
199 
200 #define arch_cmpxchg64_local(ptr, o, n)					\
201 ({									\
202 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
203 	arch_cmpxchg_relaxed((ptr), (o), (n));				\
204 })
205 
206 #define arch_cmpxchg64_relaxed(ptr, o, n)				\
207 ({									\
208 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
209 	arch_cmpxchg_relaxed((ptr), (o), (n));				\
210 })
211 
212 #define arch_cmpxchg64_acquire(ptr, o, n)				\
213 ({									\
214 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
215 	arch_cmpxchg_acquire((ptr), (o), (n));				\
216 })
217 
218 #define arch_cmpxchg64_release(ptr, o, n)				\
219 ({									\
220 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
221 	arch_cmpxchg_release((ptr), (o), (n));				\
222 })
223 
224 #endif /* _ASM_RISCV_CMPXCHG_H */
225