xref: /linux/arch/loongarch/include/asm/atomic-llsc.h (revision 9551a26f17d9445eed497bd7c639d48dfc3c0af4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Atomic operations (LLSC).
4  *
5  * Copyright (C) 2024-2025 Loongson Technology Corporation Limited
6  */
7 
8 #ifndef _ASM_ATOMIC_LLSC_H
9 #define _ASM_ATOMIC_LLSC_H
10 
11 #include <linux/types.h>
12 #include <asm/barrier.h>
13 #include <asm/cmpxchg.h>
14 
15 #define ATOMIC_OP(op, I, asm_op)					\
16 static inline void arch_atomic_##op(int i, atomic_t *v)			\
17 {									\
18 	int temp;							\
19 									\
20 	__asm__ __volatile__(						\
21 	"1:	ll.w		%0, %1      #atomic_" #op "	\n"	\
22 	"       " #asm_op "	%0, %0, %2			\n"	\
23 	"	sc.w		%0, %1				\n"	\
24 	"       beq		%0, $r0, 1b			\n"	\
25 	:"=&r" (temp) , "+ZC"(v->counter)				\
26 	:"r" (I)							\
27 	);								\
28 }
29 
30 #define ATOMIC_OP_RETURN(op, I, asm_op)					\
31 static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)	\
32 {									\
33 	int result, temp;						\
34 									\
35 	__asm__ __volatile__(						\
36 	"1:     ll.w		%1, %2      # atomic_" #op "_return \n"	\
37 	"       " #asm_op "	%0, %1, %3                          \n"	\
38 	"       sc.w		%0, %2                              \n"	\
39 	"       beq		%0, $r0 ,1b                         \n"	\
40 	"       " #asm_op "	%0, %1, %3                          \n"	\
41 	: "=&r" (result), "=&r" (temp),	"+ZC"(v->counter)		\
42 	: "r" (I));							\
43 									\
44 	return result;							\
45 }
46 
47 #define ATOMIC_FETCH_OP(op, I, asm_op)					\
48 static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
49 {									\
50 	int result, temp;						\
51 									\
52 	__asm__ __volatile__(						\
53 	"1:     ll.w		%1, %2      # atomic_fetch_" #op "  \n"	\
54 	"       " #asm_op "	%0, %1, %3                          \n" \
55 	"       sc.w		%0, %2                              \n"	\
56 	"       beq		%0, $r0 ,1b                         \n"	\
57 	"       add.w		%0, %1  ,$r0                        \n"	\
58 	: "=&r" (result), "=&r" (temp), "+ZC" (v->counter)		\
59 	: "r" (I));							\
60 									\
61 	return result;							\
62 }
63 
64 #define ATOMIC_OPS(op,I ,asm_op, c_op)					\
65 	ATOMIC_OP(op, I, asm_op)					\
66 	ATOMIC_OP_RETURN(op, I , asm_op)				\
67 	ATOMIC_FETCH_OP(op, I, asm_op)
68 
69 ATOMIC_OPS(add, i , add.w ,+=)
70 ATOMIC_OPS(sub, -i , add.w ,+=)
71 
72 #define arch_atomic_add_return_relaxed	arch_atomic_add_return_relaxed
73 #define arch_atomic_sub_return_relaxed	arch_atomic_sub_return_relaxed
74 #define arch_atomic_fetch_add_relaxed	arch_atomic_fetch_add_relaxed
75 #define arch_atomic_fetch_sub_relaxed	arch_atomic_fetch_sub_relaxed
76 
77 #undef ATOMIC_OPS
78 
79 #define ATOMIC_OPS(op, I, asm_op)					\
80 	ATOMIC_OP(op, I, asm_op)					\
81 	ATOMIC_FETCH_OP(op, I, asm_op)
82 
83 ATOMIC_OPS(and, i, and)
84 ATOMIC_OPS(or, i, or)
85 ATOMIC_OPS(xor, i, xor)
86 
87 #define arch_atomic_fetch_and_relaxed	arch_atomic_fetch_and_relaxed
88 #define arch_atomic_fetch_or_relaxed	arch_atomic_fetch_or_relaxed
89 #define arch_atomic_fetch_xor_relaxed	arch_atomic_fetch_xor_relaxed
90 
91 #undef ATOMIC_OPS
92 #undef ATOMIC_FETCH_OP
93 #undef ATOMIC_OP_RETURN
94 #undef ATOMIC_OP
95 
96 #ifdef CONFIG_64BIT
97 #error "64-bit LLSC atomic operations are not supported"
98 #endif
99 
100 #endif /* _ASM_ATOMIC_LLSC_H */
101