xref: /linux/arch/m68k/include/asm/atomic.h (revision cdd5b5a9761fd66d17586e4f4ba6588c70e640ea)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
269f99746SGreg Ungerer #ifndef __ARCH_M68K_ATOMIC__
369f99746SGreg Ungerer #define __ARCH_M68K_ATOMIC__
469f99746SGreg Ungerer 
569f99746SGreg Ungerer #include <linux/types.h>
6803f6914SDavid Howells #include <linux/irqflags.h>
77224c0d1SGreg Ungerer #include <asm/cmpxchg.h>
82db56e86SPeter Zijlstra #include <asm/barrier.h>
969f99746SGreg Ungerer 
1069f99746SGreg Ungerer /*
1169f99746SGreg Ungerer  * Atomic operations that C can't guarantee us.  Useful for
1269f99746SGreg Ungerer  * resource counting etc..
1369f99746SGreg Ungerer  */
1469f99746SGreg Ungerer 
1569f99746SGreg Ungerer /*
1669f99746SGreg Ungerer  * We do not have SMP m68k systems, so we don't have to deal with that.
1769f99746SGreg Ungerer  */
1869f99746SGreg Ungerer 
19e86e793cSMark Rutland #define arch_atomic_read(v)	READ_ONCE((v)->counter)
20e86e793cSMark Rutland #define arch_atomic_set(v, i)	WRITE_ONCE(((v)->counter), (i))
2169f99746SGreg Ungerer 
2269f99746SGreg Ungerer /*
2369f99746SGreg Ungerer  * The ColdFire parts cannot do some immediate to memory operations,
2469f99746SGreg Ungerer  * so for them we do not specify the "i" asm constraint.
2569f99746SGreg Ungerer  */
2669f99746SGreg Ungerer #ifdef CONFIG_COLDFIRE
2769f99746SGreg Ungerer #define	ASM_DI	"d"
2849148020SSam Ravnborg #else
2969f99746SGreg Ungerer #define	ASM_DI	"di"
3049148020SSam Ravnborg #endif
31b417b717SGeert Uytterhoeven 
32d839bae4SPeter Zijlstra #define ATOMIC_OP(op, c_op, asm_op)					\
33e86e793cSMark Rutland static inline void arch_atomic_##op(int i, atomic_t *v)			\
34d839bae4SPeter Zijlstra {									\
35d839bae4SPeter Zijlstra 	__asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
36d839bae4SPeter Zijlstra }									\
37d839bae4SPeter Zijlstra 
38d839bae4SPeter Zijlstra #ifdef CONFIG_RMW_INSNS
39d839bae4SPeter Zijlstra 
40d839bae4SPeter Zijlstra #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
41e86e793cSMark Rutland static inline int arch_atomic_##op##_return(int i, atomic_t *v)		\
42d839bae4SPeter Zijlstra {									\
43d839bae4SPeter Zijlstra 	int t, tmp;							\
44d839bae4SPeter Zijlstra 									\
45d839bae4SPeter Zijlstra 	__asm__ __volatile__(						\
46d839bae4SPeter Zijlstra 			"1:	movel %2,%1\n"				\
47d839bae4SPeter Zijlstra 			"	" #asm_op "l %3,%1\n"			\
48d839bae4SPeter Zijlstra 			"	casl %2,%1,%0\n"			\
49d839bae4SPeter Zijlstra 			"	jne 1b"					\
50d839bae4SPeter Zijlstra 			: "+m" (*v), "=&d" (t), "=&d" (tmp)		\
5187d93029SGeert Uytterhoeven 			: "di" (i), "2" (arch_atomic_read(v)));		\
52d839bae4SPeter Zijlstra 	return t;							\
5369f99746SGreg Ungerer }
5469f99746SGreg Ungerer 
55e39d88eaSPeter Zijlstra #define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
56e86e793cSMark Rutland static inline int arch_atomic_fetch_##op(int i, atomic_t *v)		\
57e39d88eaSPeter Zijlstra {									\
58e39d88eaSPeter Zijlstra 	int t, tmp;							\
59e39d88eaSPeter Zijlstra 									\
60e39d88eaSPeter Zijlstra 	__asm__ __volatile__(						\
61e39d88eaSPeter Zijlstra 			"1:	movel %2,%1\n"				\
62e39d88eaSPeter Zijlstra 			"	" #asm_op "l %3,%1\n"			\
63e39d88eaSPeter Zijlstra 			"	casl %2,%1,%0\n"			\
64e39d88eaSPeter Zijlstra 			"	jne 1b"					\
65e39d88eaSPeter Zijlstra 			: "+m" (*v), "=&d" (t), "=&d" (tmp)		\
6687d93029SGeert Uytterhoeven 			: "di" (i), "2" (arch_atomic_read(v)));		\
67e39d88eaSPeter Zijlstra 	return tmp;							\
68e39d88eaSPeter Zijlstra }
69e39d88eaSPeter Zijlstra 
70d839bae4SPeter Zijlstra #else
71d839bae4SPeter Zijlstra 
72d839bae4SPeter Zijlstra #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
73e86e793cSMark Rutland static inline int arch_atomic_##op##_return(int i, atomic_t * v)	\
74d839bae4SPeter Zijlstra {									\
75d839bae4SPeter Zijlstra 	unsigned long flags;						\
76d839bae4SPeter Zijlstra 	int t;								\
77d839bae4SPeter Zijlstra 									\
78d839bae4SPeter Zijlstra 	local_irq_save(flags);						\
79d839bae4SPeter Zijlstra 	t = (v->counter c_op i);					\
80d839bae4SPeter Zijlstra 	local_irq_restore(flags);					\
81d839bae4SPeter Zijlstra 									\
82d839bae4SPeter Zijlstra 	return t;							\
8369f99746SGreg Ungerer }
8469f99746SGreg Ungerer 
85e39d88eaSPeter Zijlstra #define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
86e86e793cSMark Rutland static inline int arch_atomic_fetch_##op(int i, atomic_t * v)		\
87e39d88eaSPeter Zijlstra {									\
88e39d88eaSPeter Zijlstra 	unsigned long flags;						\
89e39d88eaSPeter Zijlstra 	int t;								\
90e39d88eaSPeter Zijlstra 									\
91e39d88eaSPeter Zijlstra 	local_irq_save(flags);						\
92e39d88eaSPeter Zijlstra 	t = v->counter;							\
93e39d88eaSPeter Zijlstra 	v->counter c_op i;						\
94e39d88eaSPeter Zijlstra 	local_irq_restore(flags);					\
95e39d88eaSPeter Zijlstra 									\
96e39d88eaSPeter Zijlstra 	return t;							\
97e39d88eaSPeter Zijlstra }
98e39d88eaSPeter Zijlstra 
99d839bae4SPeter Zijlstra #endif /* CONFIG_RMW_INSNS */
100d839bae4SPeter Zijlstra 
101d839bae4SPeter Zijlstra #define ATOMIC_OPS(op, c_op, asm_op)					\
102d839bae4SPeter Zijlstra 	ATOMIC_OP(op, c_op, asm_op)					\
103e39d88eaSPeter Zijlstra 	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
104e39d88eaSPeter Zijlstra 	ATOMIC_FETCH_OP(op, c_op, asm_op)
105d839bae4SPeter Zijlstra 
106d839bae4SPeter Zijlstra ATOMIC_OPS(add, +=, add)
107d839bae4SPeter Zijlstra ATOMIC_OPS(sub, -=, sub)
108d839bae4SPeter Zijlstra 
109*e50f06ceSMark Rutland #define arch_atomic_add_return			arch_atomic_add_return
110*e50f06ceSMark Rutland #define arch_atomic_sub_return			arch_atomic_sub_return
111*e50f06ceSMark Rutland #define arch_atomic_fetch_add			arch_atomic_fetch_add
112*e50f06ceSMark Rutland #define arch_atomic_fetch_sub			arch_atomic_fetch_sub
113*e50f06ceSMark Rutland 
114e39d88eaSPeter Zijlstra #undef ATOMIC_OPS
115e39d88eaSPeter Zijlstra #define ATOMIC_OPS(op, c_op, asm_op)					\
116e39d88eaSPeter Zijlstra 	ATOMIC_OP(op, c_op, asm_op)					\
117e39d88eaSPeter Zijlstra 	ATOMIC_FETCH_OP(op, c_op, asm_op)
118e39d88eaSPeter Zijlstra 
119e39d88eaSPeter Zijlstra ATOMIC_OPS(and, &=, and)
120e39d88eaSPeter Zijlstra ATOMIC_OPS(or, |=, or)
121e39d88eaSPeter Zijlstra ATOMIC_OPS(xor, ^=, eor)
12274b1bc50SPeter Zijlstra 
123*e50f06ceSMark Rutland #define arch_atomic_fetch_and			arch_atomic_fetch_and
124*e50f06ceSMark Rutland #define arch_atomic_fetch_or			arch_atomic_fetch_or
125*e50f06ceSMark Rutland #define arch_atomic_fetch_xor			arch_atomic_fetch_xor
126*e50f06ceSMark Rutland 
127d839bae4SPeter Zijlstra #undef ATOMIC_OPS
128e39d88eaSPeter Zijlstra #undef ATOMIC_FETCH_OP
129d839bae4SPeter Zijlstra #undef ATOMIC_OP_RETURN
130d839bae4SPeter Zijlstra #undef ATOMIC_OP
131d839bae4SPeter Zijlstra 
arch_atomic_inc(atomic_t * v)132e86e793cSMark Rutland static inline void arch_atomic_inc(atomic_t *v)
13369f99746SGreg Ungerer {
13469f99746SGreg Ungerer 	__asm__ __volatile__("addql #1,%0" : "+m" (*v));
13569f99746SGreg Ungerer }
136e86e793cSMark Rutland #define arch_atomic_inc arch_atomic_inc
13769f99746SGreg Ungerer 
arch_atomic_dec(atomic_t * v)138e86e793cSMark Rutland static inline void arch_atomic_dec(atomic_t *v)
13969f99746SGreg Ungerer {
14069f99746SGreg Ungerer 	__asm__ __volatile__("subql #1,%0" : "+m" (*v));
14169f99746SGreg Ungerer }
142e86e793cSMark Rutland #define arch_atomic_dec arch_atomic_dec
14369f99746SGreg Ungerer 
arch_atomic_dec_and_test(atomic_t * v)144e86e793cSMark Rutland static inline int arch_atomic_dec_and_test(atomic_t *v)
14569f99746SGreg Ungerer {
14669f99746SGreg Ungerer 	char c;
14769f99746SGreg Ungerer 	__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
14869f99746SGreg Ungerer 	return c != 0;
14969f99746SGreg Ungerer }
150e86e793cSMark Rutland #define arch_atomic_dec_and_test arch_atomic_dec_and_test
15169f99746SGreg Ungerer 
arch_atomic_dec_and_test_lt(atomic_t * v)152e86e793cSMark Rutland static inline int arch_atomic_dec_and_test_lt(atomic_t *v)
15383b73d6cSGreg Ungerer {
15483b73d6cSGreg Ungerer 	char c;
15583b73d6cSGreg Ungerer 	__asm__ __volatile__(
15683b73d6cSGreg Ungerer 		"subql #1,%1; slt %0"
15783b73d6cSGreg Ungerer 		: "=d" (c), "=m" (*v)
15883b73d6cSGreg Ungerer 		: "m" (*v));
15983b73d6cSGreg Ungerer 	return c != 0;
16083b73d6cSGreg Ungerer }
16183b73d6cSGreg Ungerer 
arch_atomic_inc_and_test(atomic_t * v)162e86e793cSMark Rutland static inline int arch_atomic_inc_and_test(atomic_t *v)
16369f99746SGreg Ungerer {
16469f99746SGreg Ungerer 	char c;
16569f99746SGreg Ungerer 	__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
16669f99746SGreg Ungerer 	return c != 0;
16769f99746SGreg Ungerer }
168e86e793cSMark Rutland #define arch_atomic_inc_and_test arch_atomic_inc_and_test
16969f99746SGreg Ungerer 
170d12157efSMark Rutland #ifndef CONFIG_RMW_INSNS
17169f99746SGreg Ungerer 
arch_atomic_cmpxchg(atomic_t * v,int old,int new)172e86e793cSMark Rutland static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
17369f99746SGreg Ungerer {
17469f99746SGreg Ungerer 	unsigned long flags;
17569f99746SGreg Ungerer 	int prev;
17669f99746SGreg Ungerer 
17769f99746SGreg Ungerer 	local_irq_save(flags);
178e86e793cSMark Rutland 	prev = arch_atomic_read(v);
17969f99746SGreg Ungerer 	if (prev == old)
180e86e793cSMark Rutland 		arch_atomic_set(v, new);
18169f99746SGreg Ungerer 	local_irq_restore(flags);
18269f99746SGreg Ungerer 	return prev;
18369f99746SGreg Ungerer }
184d12157efSMark Rutland #define arch_atomic_cmpxchg arch_atomic_cmpxchg
18569f99746SGreg Ungerer 
arch_atomic_xchg(atomic_t * v,int new)186e86e793cSMark Rutland static inline int arch_atomic_xchg(atomic_t *v, int new)
18769f99746SGreg Ungerer {
18869f99746SGreg Ungerer 	unsigned long flags;
18969f99746SGreg Ungerer 	int prev;
19069f99746SGreg Ungerer 
19169f99746SGreg Ungerer 	local_irq_save(flags);
192e86e793cSMark Rutland 	prev = arch_atomic_read(v);
193e86e793cSMark Rutland 	arch_atomic_set(v, new);
19469f99746SGreg Ungerer 	local_irq_restore(flags);
19569f99746SGreg Ungerer 	return prev;
19669f99746SGreg Ungerer }
197d12157efSMark Rutland #define arch_atomic_xchg arch_atomic_xchg
19869f99746SGreg Ungerer 
19969f99746SGreg Ungerer #endif /* !CONFIG_RMW_INSNS */
20069f99746SGreg Ungerer 
arch_atomic_sub_and_test(int i,atomic_t * v)201e86e793cSMark Rutland static inline int arch_atomic_sub_and_test(int i, atomic_t *v)
20269f99746SGreg Ungerer {
20369f99746SGreg Ungerer 	char c;
20469f99746SGreg Ungerer 	__asm__ __volatile__("subl %2,%1; seq %0"
20569f99746SGreg Ungerer 			     : "=d" (c), "+m" (*v)
20669f99746SGreg Ungerer 			     : ASM_DI (i));
20769f99746SGreg Ungerer 	return c != 0;
20869f99746SGreg Ungerer }
209e86e793cSMark Rutland #define arch_atomic_sub_and_test arch_atomic_sub_and_test
21069f99746SGreg Ungerer 
arch_atomic_add_negative(int i,atomic_t * v)211e86e793cSMark Rutland static inline int arch_atomic_add_negative(int i, atomic_t *v)
21269f99746SGreg Ungerer {
21369f99746SGreg Ungerer 	char c;
21469f99746SGreg Ungerer 	__asm__ __volatile__("addl %2,%1; smi %0"
21569f99746SGreg Ungerer 			     : "=d" (c), "+m" (*v)
21635de6749SGreg Ungerer 			     : ASM_DI (i));
21769f99746SGreg Ungerer 	return c != 0;
21869f99746SGreg Ungerer }
219e86e793cSMark Rutland #define arch_atomic_add_negative arch_atomic_add_negative
22069f99746SGreg Ungerer 
22169f99746SGreg Ungerer #endif /* __ARCH_M68K_ATOMIC __ */
222