xref: /linux/arch/riscv/include/asm/cmpxchg.h (revision 569d7db70e5dcf13fbf072f10e9096577ac1e565)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2014 Regents of the University of California
4  */
5 
6 #ifndef _ASM_RISCV_CMPXCHG_H
7 #define _ASM_RISCV_CMPXCHG_H
8 
9 #include <linux/bug.h>
10 
11 #include <asm/fence.h>
12 
13 #define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n)		\
14 ({									\
15 	u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3);			\
16 	ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE;	\
17 	ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0)	\
18 			<< __s;						\
19 	ulong __newx = (ulong)(n) << __s;				\
20 	ulong __retx;							\
21 	ulong __rc;							\
22 									\
23 	__asm__ __volatile__ (						\
24 	       prepend							\
25 	       "0:	lr.w %0, %2\n"					\
26 	       "	and  %1, %0, %z4\n"				\
27 	       "	or   %1, %1, %z3\n"				\
28 	       "	sc.w" sc_sfx " %1, %1, %2\n"			\
29 	       "	bnez %1, 0b\n"					\
30 	       append							\
31 	       : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b))	\
32 	       : "rJ" (__newx), "rJ" (~__mask)				\
33 	       : "memory");						\
34 									\
35 	r = (__typeof__(*(p)))((__retx & __mask) >> __s);		\
36 })
37 
38 #define __arch_xchg(sfx, prepend, append, r, p, n)			\
39 ({									\
40 	__asm__ __volatile__ (						\
41 		prepend							\
42 		"	amoswap" sfx " %0, %2, %1\n"			\
43 		append							\
44 		: "=r" (r), "+A" (*(p))					\
45 		: "r" (n)						\
46 		: "memory");						\
47 })
48 
49 #define _arch_xchg(ptr, new, sc_sfx, swap_sfx, prepend,			\
50 		   sc_append, swap_append)				\
51 ({									\
52 	__typeof__(ptr) __ptr = (ptr);					\
53 	__typeof__(*(__ptr)) __new = (new);				\
54 	__typeof__(*(__ptr)) __ret;					\
55 									\
56 	switch (sizeof(*__ptr)) {					\
57 	case 1:								\
58 	case 2:								\
59 		__arch_xchg_masked(sc_sfx, prepend, sc_append,		\
60 				   __ret, __ptr, __new);		\
61 		break;							\
62 	case 4:								\
63 		__arch_xchg(".w" swap_sfx, prepend, swap_append,	\
64 			      __ret, __ptr, __new);			\
65 		break;							\
66 	case 8:								\
67 		__arch_xchg(".d" swap_sfx, prepend, swap_append,	\
68 			      __ret, __ptr, __new);			\
69 		break;							\
70 	default:							\
71 		BUILD_BUG();						\
72 	}								\
73 	(__typeof__(*(__ptr)))__ret;					\
74 })
75 
76 #define arch_xchg_relaxed(ptr, x)					\
77 	_arch_xchg(ptr, x, "", "", "", "", "")
78 
79 #define arch_xchg_acquire(ptr, x)					\
80 	_arch_xchg(ptr, x, "", "", "",					\
81 		   RISCV_ACQUIRE_BARRIER, RISCV_ACQUIRE_BARRIER)
82 
83 #define arch_xchg_release(ptr, x)					\
84 	_arch_xchg(ptr, x, "", "", RISCV_RELEASE_BARRIER, "", "")
85 
86 #define arch_xchg(ptr, x)						\
87 	_arch_xchg(ptr, x, ".rl", ".aqrl", "", RISCV_FULL_BARRIER, "")
88 
89 #define xchg32(ptr, x)							\
90 ({									\
91 	BUILD_BUG_ON(sizeof(*(ptr)) != 4);				\
92 	arch_xchg((ptr), (x));						\
93 })
94 
95 #define xchg64(ptr, x)							\
96 ({									\
97 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
98 	arch_xchg((ptr), (x));						\
99 })
100 
101 /*
102  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
103  * store NEW in MEM.  Return the initial value in MEM.  Success is
104  * indicated by comparing RETURN with OLD.
105  */
106 
107 #define __arch_cmpxchg_masked(sc_sfx, prepend, append, r, p, o, n)	\
108 ({									\
109 	u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3);			\
110 	ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE;	\
111 	ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0)	\
112 			<< __s;						\
113 	ulong __newx = (ulong)(n) << __s;				\
114 	ulong __oldx = (ulong)(o) << __s;				\
115 	ulong __retx;							\
116 	ulong __rc;							\
117 									\
118 	__asm__ __volatile__ (						\
119 		prepend							\
120 		"0:	lr.w %0, %2\n"					\
121 		"	and  %1, %0, %z5\n"				\
122 		"	bne  %1, %z3, 1f\n"				\
123 		"	and  %1, %0, %z6\n"				\
124 		"	or   %1, %1, %z4\n"				\
125 		"	sc.w" sc_sfx " %1, %1, %2\n"			\
126 		"	bnez %1, 0b\n"					\
127 		append							\
128 		"1:\n"							\
129 		: "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b))	\
130 		: "rJ" ((long)__oldx), "rJ" (__newx),			\
131 		  "rJ" (__mask), "rJ" (~__mask)				\
132 		: "memory");						\
133 									\
134 	r = (__typeof__(*(p)))((__retx & __mask) >> __s);		\
135 })
136 
137 #define __arch_cmpxchg(lr_sfx, sc_sfx, prepend, append, r, p, co, o, n)	\
138 ({									\
139 	register unsigned int __rc;					\
140 									\
141 	__asm__ __volatile__ (						\
142 		prepend							\
143 		"0:	lr" lr_sfx " %0, %2\n"				\
144 		"	bne  %0, %z3, 1f\n"				\
145 		"	sc" sc_sfx " %1, %z4, %2\n"			\
146 		"	bnez %1, 0b\n"					\
147 		append							\
148 		"1:\n"							\
149 		: "=&r" (r), "=&r" (__rc), "+A" (*(p))			\
150 		: "rJ" (co o), "rJ" (n)					\
151 		: "memory");						\
152 })
153 
154 #define _arch_cmpxchg(ptr, old, new, sc_sfx, prepend, append)		\
155 ({									\
156 	__typeof__(ptr) __ptr = (ptr);					\
157 	__typeof__(*(__ptr)) __old = (old);				\
158 	__typeof__(*(__ptr)) __new = (new);				\
159 	__typeof__(*(__ptr)) __ret;					\
160 									\
161 	switch (sizeof(*__ptr)) {					\
162 	case 1:								\
163 	case 2:								\
164 		__arch_cmpxchg_masked(sc_sfx, prepend, append,		\
165 					__ret, __ptr, __old, __new);	\
166 		break;							\
167 	case 4:								\
168 		__arch_cmpxchg(".w", ".w" sc_sfx, prepend, append,	\
169 				__ret, __ptr, (long), __old, __new);	\
170 		break;							\
171 	case 8:								\
172 		__arch_cmpxchg(".d", ".d" sc_sfx, prepend, append,	\
173 				__ret, __ptr, /**/, __old, __new);	\
174 		break;							\
175 	default:							\
176 		BUILD_BUG();						\
177 	}								\
178 	(__typeof__(*(__ptr)))__ret;					\
179 })
180 
181 #define arch_cmpxchg_relaxed(ptr, o, n)					\
182 	_arch_cmpxchg((ptr), (o), (n), "", "", "")
183 
184 #define arch_cmpxchg_acquire(ptr, o, n)					\
185 	_arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER)
186 
187 #define arch_cmpxchg_release(ptr, o, n)					\
188 	_arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "")
189 
190 #define arch_cmpxchg(ptr, o, n)						\
191 	_arch_cmpxchg((ptr), (o), (n), ".rl", "", "	fence rw, rw\n")
192 
193 #define arch_cmpxchg_local(ptr, o, n)					\
194 	arch_cmpxchg_relaxed((ptr), (o), (n))
195 
196 #define arch_cmpxchg64(ptr, o, n)					\
197 ({									\
198 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
199 	arch_cmpxchg((ptr), (o), (n));					\
200 })
201 
202 #define arch_cmpxchg64_local(ptr, o, n)					\
203 ({									\
204 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
205 	arch_cmpxchg_relaxed((ptr), (o), (n));				\
206 })
207 
208 #define arch_cmpxchg64_relaxed(ptr, o, n)				\
209 ({									\
210 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
211 	arch_cmpxchg_relaxed((ptr), (o), (n));				\
212 })
213 
214 #define arch_cmpxchg64_acquire(ptr, o, n)				\
215 ({									\
216 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
217 	arch_cmpxchg_acquire((ptr), (o), (n));				\
218 })
219 
220 #define arch_cmpxchg64_release(ptr, o, n)				\
221 ({									\
222 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
223 	arch_cmpxchg_release((ptr), (o), (n));				\
224 })
225 
226 #endif /* _ASM_RISCV_CMPXCHG_H */
227