xref: /linux/arch/loongarch/include/asm/cmpxchg.h (revision b61104e7a6349bd2c2b3e2fb3260d87f15eda8f4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  */
5 #ifndef __ASM_CMPXCHG_H
6 #define __ASM_CMPXCHG_H
7 
8 #include <linux/bits.h>
9 #include <linux/build_bug.h>
10 #include <asm/barrier.h>
11 
12 #define __xchg_amo_asm(amswap_db, m, val)	\
13 ({						\
14 	__typeof(val) __ret;			\
15 						\
16 	__asm__ __volatile__ (			\
17 	" "amswap_db" %1, %z2, %0 \n"		\
18 	: "+ZB" (*m), "=&r" (__ret)		\
19 	: "Jr" (val)				\
20 	: "memory");				\
21 						\
22 	__ret;					\
23 })
24 
25 #define __xchg_llsc_asm(ld, st, m, val)			\
26 ({							\
27 	__typeof(val) __ret, __tmp;			\
28 							\
29 	asm volatile (					\
30 	"1:	ll.w	%0, %3		\n"		\
31 	"	move	%1, %z4		\n"		\
32 	"	sc.w	%1, %2		\n"		\
33 	"	beqz	%1, 1b		\n"		\
34 	: "=&r" (__ret), "=&r" (__tmp), "=ZC" (*m)	\
35 	: "ZC" (*m), "Jr" (val)				\
36 	: "memory");					\
37 							\
38 	__ret;						\
39 })
40 
41 static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val,
42 					unsigned int size)
43 {
44 	unsigned int shift;
45 	u32 old32, mask, temp;
46 	volatile u32 *ptr32;
47 
48 	/* Mask value to the correct size. */
49 	mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
50 	val &= mask;
51 
52 	/*
53 	 * Calculate a shift & mask that correspond to the value we wish to
54 	 * exchange within the naturally aligned 4 byte integerthat includes
55 	 * it.
56 	 */
57 	shift = (unsigned long)ptr & 0x3;
58 	shift *= BITS_PER_BYTE;
59 	mask <<= shift;
60 
61 	/*
62 	 * Calculate a pointer to the naturally aligned 4 byte integer that
63 	 * includes our byte of interest, and load its value.
64 	 */
65 	ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
66 
67 	asm volatile (
68 	"1:	ll.w		%0, %3		\n"
69 	"	andn		%1, %0, %z4	\n"
70 	"	or		%1, %1, %z5	\n"
71 	"	sc.w		%1, %2		\n"
72 	"	beqz		%1, 1b		\n"
73 	: "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
74 	: "ZC" (*ptr32), "Jr" (mask), "Jr" (val << shift)
75 	: "memory");
76 
77 	return (old32 & mask) >> shift;
78 }
79 
80 static __always_inline unsigned long
81 __arch_xchg(volatile void *ptr, unsigned long x, int size)
82 {
83 	switch (size) {
84 	case 1:
85 	case 2:
86 		return __xchg_small((volatile void *)ptr, x, size);
87 
88 	case 4:
89 #ifdef CONFIG_CPU_HAS_AMO
90 		return __xchg_amo_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x);
91 #else
92 		return __xchg_llsc_asm("ll.w", "sc.w", (volatile u32 *)ptr, (u32)x);
93 #endif /* CONFIG_CPU_HAS_AMO */
94 
95 #ifdef CONFIG_64BIT
96 	case 8:
97 #ifdef CONFIG_CPU_HAS_AMO
98 		return __xchg_amo_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x);
99 #else
100 		return __xchg_llsc_asm("ll.d", "sc.d", (volatile u64 *)ptr, (u64)x);
101 #endif /* CONFIG_CPU_HAS_AMO */
102 #endif /* CONFIG_64BIT */
103 
104 	default:
105 		BUILD_BUG();
106 	}
107 
108 	return 0;
109 }
110 
111 #define arch_xchg(ptr, x)						\
112 ({									\
113 	__typeof__(*(ptr)) __res;					\
114 									\
115 	__res = (__typeof__(*(ptr)))					\
116 		__arch_xchg((ptr), (unsigned long)(x), sizeof(*(ptr)));	\
117 									\
118 	__res;								\
119 })
120 
121 #define __cmpxchg_asm(ld, st, m, old, new)				\
122 ({									\
123 	__typeof(old) __ret;						\
124 									\
125 	__asm__ __volatile__(						\
126 	"1:	" ld "	%0, %2		# __cmpxchg_asm \n"		\
127 	"	bne	%0, %z3, 2f			\n"		\
128 	"	move	$t0, %z4			\n"		\
129 	"	" st "	$t0, %1				\n"		\
130 	"	beqz	$t0, 1b				\n"		\
131 	"2:						\n"		\
132 	__WEAK_LLSC_MB							\
133 	: "=&r" (__ret), "=ZB"(*m)					\
134 	: "ZB"(*m), "Jr" (old), "Jr" (new)				\
135 	: "t0", "memory");						\
136 									\
137 	__ret;								\
138 })
139 
140 static inline unsigned int __cmpxchg_small(volatile void *ptr, unsigned int old,
141 					   unsigned int new, unsigned int size)
142 {
143 	unsigned int shift;
144 	u32 old32, mask, temp;
145 	volatile u32 *ptr32;
146 
147 	/* Mask inputs to the correct size. */
148 	mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
149 	old &= mask;
150 	new &= mask;
151 
152 	/*
153 	 * Calculate a shift & mask that correspond to the value we wish to
154 	 * compare & exchange within the naturally aligned 4 byte integer
155 	 * that includes it.
156 	 */
157 	shift = (unsigned long)ptr & 0x3;
158 	shift *= BITS_PER_BYTE;
159 	old <<= shift;
160 	new <<= shift;
161 	mask <<= shift;
162 
163 	/*
164 	 * Calculate a pointer to the naturally aligned 4 byte integer that
165 	 * includes our byte of interest, and load its value.
166 	 */
167 	ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
168 
169 	asm volatile (
170 	"1:	ll.w		%0, %3		\n"
171 	"	and		%1, %0, %z4	\n"
172 	"	bne		%1, %z5, 2f	\n"
173 	"	andn		%1, %0, %z4	\n"
174 	"	or		%1, %1, %z6	\n"
175 	"	sc.w		%1, %2		\n"
176 	"	beqz		%1, 1b		\n"
177 	"	b		3f		\n"
178 	"2:					\n"
179 	__WEAK_LLSC_MB
180 	"3:					\n"
181 	: "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32)
182 	: "ZC" (*ptr32), "Jr" (mask), "Jr" (old), "Jr" (new)
183 	: "memory");
184 
185 	return (old32 & mask) >> shift;
186 }
187 
188 static __always_inline unsigned long
189 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size)
190 {
191 	switch (size) {
192 	case 1:
193 	case 2:
194 		return __cmpxchg_small(ptr, old, new, size);
195 
196 	case 4:
197 		return __cmpxchg_asm("ll.w", "sc.w", (volatile u32 *)ptr,
198 				     (u32)old, new);
199 
200 	case 8:
201 		return __cmpxchg_asm("ll.d", "sc.d", (volatile u64 *)ptr,
202 				     (u64)old, new);
203 
204 	default:
205 		BUILD_BUG();
206 	}
207 
208 	return 0;
209 }
210 
211 #define arch_cmpxchg_local(ptr, old, new)				\
212 	((__typeof__(*(ptr)))						\
213 		__cmpxchg((ptr),					\
214 			  (unsigned long)(__typeof__(*(ptr)))(old),	\
215 			  (unsigned long)(__typeof__(*(ptr)))(new),	\
216 			  sizeof(*(ptr))))
217 
218 #define arch_cmpxchg(ptr, old, new)					\
219 ({									\
220 	__typeof__(*(ptr)) __res;					\
221 									\
222 	__res = arch_cmpxchg_local((ptr), (old), (new));		\
223 									\
224 	__res;								\
225 })
226 
227 #ifdef CONFIG_64BIT
228 #define arch_cmpxchg64_local(ptr, o, n)					\
229   ({									\
230 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
231 	arch_cmpxchg_local((ptr), (o), (n));				\
232   })
233 
234 #define arch_cmpxchg64(ptr, o, n)					\
235   ({									\
236 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
237 	arch_cmpxchg((ptr), (o), (n));					\
238   })
239 #else
240 #include <asm-generic/cmpxchg-local.h>
241 #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
242 #define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
243 #endif
244 
245 #endif /* __ASM_CMPXCHG_H */
246