xref: /linux/arch/loongarch/include/asm/atomic-llsc.h (revision 9551a26f17d9445eed497bd7c639d48dfc3c0af4)
1*79974cc3SHuacai Chen /* SPDX-License-Identifier: GPL-2.0 */
2*79974cc3SHuacai Chen /*
3*79974cc3SHuacai Chen  * Atomic operations (LLSC).
4*79974cc3SHuacai Chen  *
5*79974cc3SHuacai Chen  * Copyright (C) 2024-2025 Loongson Technology Corporation Limited
6*79974cc3SHuacai Chen  */
7*79974cc3SHuacai Chen 
8*79974cc3SHuacai Chen #ifndef _ASM_ATOMIC_LLSC_H
9*79974cc3SHuacai Chen #define _ASM_ATOMIC_LLSC_H
10*79974cc3SHuacai Chen 
11*79974cc3SHuacai Chen #include <linux/types.h>
12*79974cc3SHuacai Chen #include <asm/barrier.h>
13*79974cc3SHuacai Chen #include <asm/cmpxchg.h>
14*79974cc3SHuacai Chen 
15*79974cc3SHuacai Chen #define ATOMIC_OP(op, I, asm_op)					\
16*79974cc3SHuacai Chen static inline void arch_atomic_##op(int i, atomic_t *v)			\
17*79974cc3SHuacai Chen {									\
18*79974cc3SHuacai Chen 	int temp;							\
19*79974cc3SHuacai Chen 									\
20*79974cc3SHuacai Chen 	__asm__ __volatile__(						\
21*79974cc3SHuacai Chen 	"1:	ll.w		%0, %1      #atomic_" #op "	\n"	\
22*79974cc3SHuacai Chen 	"       " #asm_op "	%0, %0, %2			\n"	\
23*79974cc3SHuacai Chen 	"	sc.w		%0, %1				\n"	\
24*79974cc3SHuacai Chen 	"       beq		%0, $r0, 1b			\n"	\
25*79974cc3SHuacai Chen 	:"=&r" (temp) , "+ZC"(v->counter)				\
26*79974cc3SHuacai Chen 	:"r" (I)							\
27*79974cc3SHuacai Chen 	);								\
28*79974cc3SHuacai Chen }
29*79974cc3SHuacai Chen 
30*79974cc3SHuacai Chen #define ATOMIC_OP_RETURN(op, I, asm_op)					\
31*79974cc3SHuacai Chen static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)	\
32*79974cc3SHuacai Chen {									\
33*79974cc3SHuacai Chen 	int result, temp;						\
34*79974cc3SHuacai Chen 									\
35*79974cc3SHuacai Chen 	__asm__ __volatile__(						\
36*79974cc3SHuacai Chen 	"1:     ll.w		%1, %2      # atomic_" #op "_return \n"	\
37*79974cc3SHuacai Chen 	"       " #asm_op "	%0, %1, %3                          \n"	\
38*79974cc3SHuacai Chen 	"       sc.w		%0, %2                              \n"	\
39*79974cc3SHuacai Chen 	"       beq		%0, $r0 ,1b                         \n"	\
40*79974cc3SHuacai Chen 	"       " #asm_op "	%0, %1, %3                          \n"	\
41*79974cc3SHuacai Chen 	: "=&r" (result), "=&r" (temp),	"+ZC"(v->counter)		\
42*79974cc3SHuacai Chen 	: "r" (I));							\
43*79974cc3SHuacai Chen 									\
44*79974cc3SHuacai Chen 	return result;							\
45*79974cc3SHuacai Chen }
46*79974cc3SHuacai Chen 
47*79974cc3SHuacai Chen #define ATOMIC_FETCH_OP(op, I, asm_op)					\
48*79974cc3SHuacai Chen static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
49*79974cc3SHuacai Chen {									\
50*79974cc3SHuacai Chen 	int result, temp;						\
51*79974cc3SHuacai Chen 									\
52*79974cc3SHuacai Chen 	__asm__ __volatile__(						\
53*79974cc3SHuacai Chen 	"1:     ll.w		%1, %2      # atomic_fetch_" #op "  \n"	\
54*79974cc3SHuacai Chen 	"       " #asm_op "	%0, %1, %3                          \n" \
55*79974cc3SHuacai Chen 	"       sc.w		%0, %2                              \n"	\
56*79974cc3SHuacai Chen 	"       beq		%0, $r0 ,1b                         \n"	\
57*79974cc3SHuacai Chen 	"       add.w		%0, %1  ,$r0                        \n"	\
58*79974cc3SHuacai Chen 	: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)		\
59*79974cc3SHuacai Chen 	: "r" (I));							\
60*79974cc3SHuacai Chen 									\
61*79974cc3SHuacai Chen 	return result;							\
62*79974cc3SHuacai Chen }
63*79974cc3SHuacai Chen 
64*79974cc3SHuacai Chen #define ATOMIC_OPS(op,I ,asm_op, c_op)					\
65*79974cc3SHuacai Chen 	ATOMIC_OP(op, I, asm_op)					\
66*79974cc3SHuacai Chen 	ATOMIC_OP_RETURN(op, I , asm_op)				\
67*79974cc3SHuacai Chen 	ATOMIC_FETCH_OP(op, I, asm_op)
68*79974cc3SHuacai Chen 
69*79974cc3SHuacai Chen ATOMIC_OPS(add, i , add.w ,+=)
70*79974cc3SHuacai Chen ATOMIC_OPS(sub, -i , add.w ,+=)
71*79974cc3SHuacai Chen 
72*79974cc3SHuacai Chen #define arch_atomic_add_return_relaxed	arch_atomic_add_return_relaxed
73*79974cc3SHuacai Chen #define arch_atomic_sub_return_relaxed	arch_atomic_sub_return_relaxed
74*79974cc3SHuacai Chen #define arch_atomic_fetch_add_relaxed	arch_atomic_fetch_add_relaxed
75*79974cc3SHuacai Chen #define arch_atomic_fetch_sub_relaxed	arch_atomic_fetch_sub_relaxed
76*79974cc3SHuacai Chen 
77*79974cc3SHuacai Chen #undef ATOMIC_OPS
78*79974cc3SHuacai Chen 
79*79974cc3SHuacai Chen #define ATOMIC_OPS(op, I, asm_op)					\
80*79974cc3SHuacai Chen 	ATOMIC_OP(op, I, asm_op)					\
81*79974cc3SHuacai Chen 	ATOMIC_FETCH_OP(op, I, asm_op)
82*79974cc3SHuacai Chen 
83*79974cc3SHuacai Chen ATOMIC_OPS(and, i, and)
84*79974cc3SHuacai Chen ATOMIC_OPS(or, i, or)
85*79974cc3SHuacai Chen ATOMIC_OPS(xor, i, xor)
86*79974cc3SHuacai Chen 
87*79974cc3SHuacai Chen #define arch_atomic_fetch_and_relaxed	arch_atomic_fetch_and_relaxed
88*79974cc3SHuacai Chen #define arch_atomic_fetch_or_relaxed	arch_atomic_fetch_or_relaxed
89*79974cc3SHuacai Chen #define arch_atomic_fetch_xor_relaxed	arch_atomic_fetch_xor_relaxed
90*79974cc3SHuacai Chen 
91*79974cc3SHuacai Chen #undef ATOMIC_OPS
92*79974cc3SHuacai Chen #undef ATOMIC_FETCH_OP
93*79974cc3SHuacai Chen #undef ATOMIC_OP_RETURN
94*79974cc3SHuacai Chen #undef ATOMIC_OP
95*79974cc3SHuacai Chen 
96*79974cc3SHuacai Chen #ifdef CONFIG_64BIT
97*79974cc3SHuacai Chen #error "64-bit LLSC atomic operations are not supported"
98*79974cc3SHuacai Chen #endif
99*79974cc3SHuacai Chen 
100*79974cc3SHuacai Chen #endif /* _ASM_ATOMIC_LLSC_H */
101