xref: /linux/arch/mips/include/asm/atomic.h (revision cdd5b5a9761fd66d17586e4f4ba6588c70e640ea)
1384740dcSRalf Baechle /*
2384740dcSRalf Baechle  * Atomic operations that C can't guarantee us.  Useful for
3384740dcSRalf Baechle  * resource counting etc..
4384740dcSRalf Baechle  *
5384740dcSRalf Baechle  * But use these as seldom as possible since they are much more slower
6384740dcSRalf Baechle  * than regular operations.
7384740dcSRalf Baechle  *
8384740dcSRalf Baechle  * This file is subject to the terms and conditions of the GNU General Public
9384740dcSRalf Baechle  * License.  See the file "COPYING" in the main directory of this archive
10384740dcSRalf Baechle  * for more details.
11384740dcSRalf Baechle  *
12384740dcSRalf Baechle  * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
13384740dcSRalf Baechle  */
14384740dcSRalf Baechle #ifndef _ASM_ATOMIC_H
15384740dcSRalf Baechle #define _ASM_ATOMIC_H
16384740dcSRalf Baechle 
17384740dcSRalf Baechle #include <linux/irqflags.h>
18ea435467SMatthew Wilcox #include <linux/types.h>
19*f0b7ddbdSHuang Pei #include <asm/asm.h>
20384740dcSRalf Baechle #include <asm/barrier.h>
21b0984c43SMaciej W. Rozycki #include <asm/compiler.h>
22384740dcSRalf Baechle #include <asm/cpu-features.h>
23b81947c6SDavid Howells #include <asm/cmpxchg.h>
244d1dbfe6SPaul Burton #include <asm/sync.h>
25384740dcSRalf Baechle 
261da7bce8SPaul Burton #define ATOMIC_OPS(pfx, type)						\
27c7b5fd6fSMark Rutland static __always_inline type arch_##pfx##_read(const pfx##_t *v)		\
281da7bce8SPaul Burton {									\
291da7bce8SPaul Burton 	return READ_ONCE(v->counter);					\
301da7bce8SPaul Burton }									\
311da7bce8SPaul Burton 									\
32c7b5fd6fSMark Rutland static __always_inline void arch_##pfx##_set(pfx##_t *v, type i)	\
331da7bce8SPaul Burton {									\
341da7bce8SPaul Burton 	WRITE_ONCE(v->counter, i);					\
351da7bce8SPaul Burton }									\
361da7bce8SPaul Burton 
371da7bce8SPaul Burton ATOMIC_OPS(atomic, int)
38384740dcSRalf Baechle 
391da7bce8SPaul Burton #ifdef CONFIG_64BIT
401da7bce8SPaul Burton # define ATOMIC64_INIT(i)	{ (i) }
411da7bce8SPaul Burton ATOMIC_OPS(atomic64, s64)
421da7bce8SPaul Burton #endif
43384740dcSRalf Baechle 
44a38ee6bbSPaul Burton #define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc)			\
45c7b5fd6fSMark Rutland static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v)		\
46ef31563eSPeter Zijlstra {									\
47a38ee6bbSPaul Burton 	type temp;							\
48ef31563eSPeter Zijlstra 									\
499537db24SPaul Burton 	if (!kernel_uses_llsc) {					\
509537db24SPaul Burton 		unsigned long flags;					\
519537db24SPaul Burton 									\
529537db24SPaul Burton 		raw_local_irq_save(flags);				\
539537db24SPaul Burton 		v->counter c_op i;					\
549537db24SPaul Burton 		raw_local_irq_restore(flags);				\
559537db24SPaul Burton 		return;							\
569537db24SPaul Burton 	}								\
579537db24SPaul Burton 									\
58ef31563eSPeter Zijlstra 	__asm__ __volatile__(						\
59378ed6f0SPaul Burton 	"	.set	push					\n"	\
604936084cSJoshua Kinard 	"	.set	" MIPS_ISA_LEVEL "			\n"	\
614d1dbfe6SPaul Burton 	"	" __SYNC(full, loongson3_war) "			\n"	\
62a38ee6bbSPaul Burton 	"1:	" #ll "	%0, %1		# " #pfx "_" #op "	\n"	\
63ef31563eSPeter Zijlstra 	"	" #asm_op " %0, %2				\n"	\
64a38ee6bbSPaul Burton 	"	" #sc "	%0, %1					\n"	\
65*f0b7ddbdSHuang Pei 	"\t" __stringify(SC_BEQZ) "	%0, 1b			\n"	\
66378ed6f0SPaul Burton 	"	.set	pop					\n"	\
6794bfb75aSMarkos Chandras 	: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)		\
6842344113SPeter Zijlstra 	: "Ir" (i) : __LLSC_CLOBBER);					\
69ddb3108eSMaciej W. Rozycki }
70384740dcSRalf Baechle 
71a38ee6bbSPaul Burton #define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc)		\
72c7b5fd6fSMark Rutland static __inline__ type							\
73c7b5fd6fSMark Rutland arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v)			\
74ef31563eSPeter Zijlstra {									\
75a38ee6bbSPaul Burton 	type temp, result;						\
76ef31563eSPeter Zijlstra 									\
779537db24SPaul Burton 	if (!kernel_uses_llsc) {					\
789537db24SPaul Burton 		unsigned long flags;					\
799537db24SPaul Burton 									\
809537db24SPaul Burton 		raw_local_irq_save(flags);				\
819537db24SPaul Burton 		result = v->counter;					\
829537db24SPaul Burton 		result c_op i;						\
839537db24SPaul Burton 		v->counter = result;					\
849537db24SPaul Burton 		raw_local_irq_restore(flags);				\
859537db24SPaul Burton 		return result;						\
869537db24SPaul Burton 	}								\
87ef31563eSPeter Zijlstra 									\
88ef31563eSPeter Zijlstra 	__asm__ __volatile__(						\
89378ed6f0SPaul Burton 	"	.set	push					\n"	\
904936084cSJoshua Kinard 	"	.set	" MIPS_ISA_LEVEL "			\n"	\
914d1dbfe6SPaul Burton 	"	" __SYNC(full, loongson3_war) "			\n"	\
92a38ee6bbSPaul Burton 	"1:	" #ll "	%1, %2		# " #pfx "_" #op "_return\n"	\
93ef31563eSPeter Zijlstra 	"	" #asm_op " %0, %1, %3				\n"	\
94a38ee6bbSPaul Burton 	"	" #sc "	%0, %2					\n"	\
95*f0b7ddbdSHuang Pei 	"\t" __stringify(SC_BEQZ) "	%0, 1b			\n"	\
96da4c5445SPeter Zijlstra 	"	" #asm_op " %0, %1, %3				\n"	\
97378ed6f0SPaul Burton 	"	.set	pop					\n"	\
98b0984c43SMaciej W. Rozycki 	: "=&r" (result), "=&r" (temp),					\
9994bfb75aSMarkos Chandras 	  "+" GCC_OFF_SMALL_ASM() (v->counter)				\
10042344113SPeter Zijlstra 	: "Ir" (i) : __LLSC_CLOBBER);					\
101ef31563eSPeter Zijlstra 									\
102ef31563eSPeter Zijlstra 	return result;							\
103384740dcSRalf Baechle }
104384740dcSRalf Baechle 
105a38ee6bbSPaul Burton #define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)		\
106c7b5fd6fSMark Rutland static __inline__ type							\
107c7b5fd6fSMark Rutland arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v)			\
1084edac529SPeter Zijlstra {									\
1099537db24SPaul Burton 	int temp, result;						\
1104edac529SPeter Zijlstra 									\
1119537db24SPaul Burton 	if (!kernel_uses_llsc) {					\
1129537db24SPaul Burton 		unsigned long flags;					\
1139537db24SPaul Burton 									\
1149537db24SPaul Burton 		raw_local_irq_save(flags);				\
1159537db24SPaul Burton 		result = v->counter;					\
1169537db24SPaul Burton 		v->counter c_op i;					\
1179537db24SPaul Burton 		raw_local_irq_restore(flags);				\
1189537db24SPaul Burton 		return result;						\
1199537db24SPaul Burton 	}								\
1204edac529SPeter Zijlstra 									\
1214edac529SPeter Zijlstra 	__asm__ __volatile__(						\
122378ed6f0SPaul Burton 	"	.set	push					\n"	\
1234936084cSJoshua Kinard 	"	.set	" MIPS_ISA_LEVEL "			\n"	\
1244d1dbfe6SPaul Burton 	"	" __SYNC(full, loongson3_war) "			\n"	\
125a38ee6bbSPaul Burton 	"1:	" #ll "	%1, %2		# " #pfx "_fetch_" #op "\n"	\
1264edac529SPeter Zijlstra 	"	" #asm_op " %0, %1, %3				\n"	\
127a38ee6bbSPaul Burton 	"	" #sc "	%0, %2					\n"	\
128*f0b7ddbdSHuang Pei 	"\t" __stringify(SC_BEQZ) "	%0, 1b			\n"	\
129378ed6f0SPaul Burton 	"	.set	pop					\n"	\
130cfd54de3SPaul Burton 	"	move	%0, %1					\n"	\
1314edac529SPeter Zijlstra 	: "=&r" (result), "=&r" (temp),					\
1324edac529SPeter Zijlstra 	  "+" GCC_OFF_SMALL_ASM() (v->counter)				\
13342344113SPeter Zijlstra 	: "Ir" (i) : __LLSC_CLOBBER);					\
1344edac529SPeter Zijlstra 									\
1354edac529SPeter Zijlstra 	return result;							\
1364edac529SPeter Zijlstra }
1374edac529SPeter Zijlstra 
1381da7bce8SPaul Burton #undef ATOMIC_OPS
139a38ee6bbSPaul Burton #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc)			\
140a38ee6bbSPaul Burton 	ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc)			\
141a38ee6bbSPaul Burton 	ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc)		\
142a38ee6bbSPaul Burton 	ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
143384740dcSRalf Baechle 
144a38ee6bbSPaul Burton ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
145a38ee6bbSPaul Burton ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
146384740dcSRalf Baechle 
147c7b5fd6fSMark Rutland #define arch_atomic_add_return_relaxed	arch_atomic_add_return_relaxed
148c7b5fd6fSMark Rutland #define arch_atomic_sub_return_relaxed	arch_atomic_sub_return_relaxed
149c7b5fd6fSMark Rutland #define arch_atomic_fetch_add_relaxed	arch_atomic_fetch_add_relaxed
150c7b5fd6fSMark Rutland #define arch_atomic_fetch_sub_relaxed	arch_atomic_fetch_sub_relaxed
1514ec45856SPeter Zijlstra 
152a38ee6bbSPaul Burton #ifdef CONFIG_64BIT
153a38ee6bbSPaul Burton ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
154a38ee6bbSPaul Burton ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
155c7b5fd6fSMark Rutland # define arch_atomic64_add_return_relaxed	arch_atomic64_add_return_relaxed
156c7b5fd6fSMark Rutland # define arch_atomic64_sub_return_relaxed	arch_atomic64_sub_return_relaxed
157c7b5fd6fSMark Rutland # define arch_atomic64_fetch_add_relaxed	arch_atomic64_fetch_add_relaxed
158c7b5fd6fSMark Rutland # define arch_atomic64_fetch_sub_relaxed	arch_atomic64_fetch_sub_relaxed
159a38ee6bbSPaul Burton #endif /* CONFIG_64BIT */
1604edac529SPeter Zijlstra 
161a38ee6bbSPaul Burton #undef ATOMIC_OPS
162a38ee6bbSPaul Burton #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc)			\
163a38ee6bbSPaul Burton 	ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc)			\
164a38ee6bbSPaul Burton 	ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
165a38ee6bbSPaul Burton 
166a38ee6bbSPaul Burton ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
167a38ee6bbSPaul Burton ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
168a38ee6bbSPaul Burton ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
16927782f27SPeter Zijlstra 
170c7b5fd6fSMark Rutland #define arch_atomic_fetch_and_relaxed	arch_atomic_fetch_and_relaxed
171c7b5fd6fSMark Rutland #define arch_atomic_fetch_or_relaxed	arch_atomic_fetch_or_relaxed
172c7b5fd6fSMark Rutland #define arch_atomic_fetch_xor_relaxed	arch_atomic_fetch_xor_relaxed
1734ec45856SPeter Zijlstra 
174a38ee6bbSPaul Burton #ifdef CONFIG_64BIT
175a38ee6bbSPaul Burton ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
176a38ee6bbSPaul Burton ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
177a38ee6bbSPaul Burton ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
178c7b5fd6fSMark Rutland # define arch_atomic64_fetch_and_relaxed	arch_atomic64_fetch_and_relaxed
179c7b5fd6fSMark Rutland # define arch_atomic64_fetch_or_relaxed		arch_atomic64_fetch_or_relaxed
180c7b5fd6fSMark Rutland # define arch_atomic64_fetch_xor_relaxed	arch_atomic64_fetch_xor_relaxed
181a38ee6bbSPaul Burton #endif
182a38ee6bbSPaul Burton 
183ef31563eSPeter Zijlstra #undef ATOMIC_OPS
1844edac529SPeter Zijlstra #undef ATOMIC_FETCH_OP
185ef31563eSPeter Zijlstra #undef ATOMIC_OP_RETURN
186ef31563eSPeter Zijlstra #undef ATOMIC_OP
187384740dcSRalf Baechle 
188384740dcSRalf Baechle /*
189384740dcSRalf Baechle  * atomic_sub_if_positive - conditionally subtract integer from atomic variable
190384740dcSRalf Baechle  * @i: integer value to subtract
191384740dcSRalf Baechle  * @v: pointer of type atomic_t
192384740dcSRalf Baechle  *
193384740dcSRalf Baechle  * Atomically test @v and subtract @i if @v is greater or equal than @i.
194384740dcSRalf Baechle  * The function returns the old value of @v minus @i.
195384740dcSRalf Baechle  */
19640e784b4SPaul Burton #define ATOMIC_SIP_OP(pfx, type, op, ll, sc)				\
197cb95ea79SRui Wang static __inline__ type arch_##pfx##_sub_if_positive(type i, pfx##_t * v)	\
19840e784b4SPaul Burton {									\
19940e784b4SPaul Burton 	type temp, result;						\
20040e784b4SPaul Burton 									\
20140e784b4SPaul Burton 	smp_mb__before_atomic();					\
20240e784b4SPaul Burton 									\
20340e784b4SPaul Burton 	if (!kernel_uses_llsc) {					\
20440e784b4SPaul Burton 		unsigned long flags;					\
20540e784b4SPaul Burton 									\
20640e784b4SPaul Burton 		raw_local_irq_save(flags);				\
20740e784b4SPaul Burton 		result = v->counter;					\
20840e784b4SPaul Burton 		result -= i;						\
20940e784b4SPaul Burton 		if (result >= 0)					\
21040e784b4SPaul Burton 			v->counter = result;				\
21140e784b4SPaul Burton 		raw_local_irq_restore(flags);				\
21240e784b4SPaul Burton 		smp_mb__after_atomic();					\
21340e784b4SPaul Burton 		return result;						\
21440e784b4SPaul Burton 	}								\
21540e784b4SPaul Burton 									\
21640e784b4SPaul Burton 	__asm__ __volatile__(						\
21740e784b4SPaul Burton 	"	.set	push					\n"	\
21840e784b4SPaul Burton 	"	.set	" MIPS_ISA_LEVEL "			\n"	\
21940e784b4SPaul Burton 	"	" __SYNC(full, loongson3_war) "			\n"	\
22040e784b4SPaul Burton 	"1:	" #ll "	%1, %2		# atomic_sub_if_positive\n"	\
22140e784b4SPaul Burton 	"	.set	pop					\n"	\
22240e784b4SPaul Burton 	"	" #op "	%0, %1, %3				\n"	\
22340e784b4SPaul Burton 	"	move	%1, %0					\n"	\
22440e784b4SPaul Burton 	"	bltz	%0, 2f					\n"	\
22540e784b4SPaul Burton 	"	.set	push					\n"	\
22640e784b4SPaul Burton 	"	.set	" MIPS_ISA_LEVEL "			\n"	\
22740e784b4SPaul Burton 	"	" #sc "	%1, %2					\n"	\
228*f0b7ddbdSHuang Pei 	"	" __stringify(SC_BEQZ) "	%1, 1b		\n"	\
22940e784b4SPaul Burton 	"2:	" __SYNC(full, loongson3_war) "			\n"	\
23040e784b4SPaul Burton 	"	.set	pop					\n"	\
23140e784b4SPaul Burton 	: "=&r" (result), "=&r" (temp),					\
23240e784b4SPaul Burton 	  "+" GCC_OFF_SMALL_ASM() (v->counter)				\
23340e784b4SPaul Burton 	: "Ir" (i)							\
23440e784b4SPaul Burton 	: __LLSC_CLOBBER);						\
23540e784b4SPaul Burton 									\
23640e784b4SPaul Burton 	/*								\
23740e784b4SPaul Burton 	 * In the Loongson3 workaround case we already have a		\
23840e784b4SPaul Burton 	 * completion barrier at 2: above, which is needed due to the	\
23940e784b4SPaul Burton 	 * bltz that can branch	to code outside of the LL/SC loop. As	\
24040e784b4SPaul Burton 	 * such, we don't need to emit another barrier here.		\
24140e784b4SPaul Burton 	 */								\
2428790ccf8SNathan Chancellor 	if (__SYNC_loongson3_war == 0)					\
24340e784b4SPaul Burton 		smp_mb__after_atomic();					\
24440e784b4SPaul Burton 									\
24540e784b4SPaul Burton 	return result;							\
246384740dcSRalf Baechle }
247384740dcSRalf Baechle 
24840e784b4SPaul Burton ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
249c7b5fd6fSMark Rutland #define arch_atomic_dec_if_positive(v)	arch_atomic_sub_if_positive(1, v)
250384740dcSRalf Baechle 
25140e784b4SPaul Burton #ifdef CONFIG_64BIT
25240e784b4SPaul Burton ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
253c7b5fd6fSMark Rutland #define arch_atomic64_dec_if_positive(v)	arch_atomic64_sub_if_positive(1, v)
25440e784b4SPaul Burton #endif
25540e784b4SPaul Burton 
25640e784b4SPaul Burton #undef ATOMIC_SIP_OP
257384740dcSRalf Baechle 
258384740dcSRalf Baechle #endif /* _ASM_ATOMIC_H */
259