xref: /linux/arch/m68k/include/asm/atomic.h (revision 2db56e8606016e33903c64feaed989ffecd66a1b)
169f99746SGreg Ungerer #ifndef __ARCH_M68K_ATOMIC__
269f99746SGreg Ungerer #define __ARCH_M68K_ATOMIC__
369f99746SGreg Ungerer 
469f99746SGreg Ungerer #include <linux/types.h>
5803f6914SDavid Howells #include <linux/irqflags.h>
67224c0d1SGreg Ungerer #include <asm/cmpxchg.h>
7*2db56e86SPeter Zijlstra #include <asm/barrier.h>
869f99746SGreg Ungerer 
969f99746SGreg Ungerer /*
1069f99746SGreg Ungerer  * Atomic operations that C can't guarantee us.  Useful for
1169f99746SGreg Ungerer  * resource counting etc..
1269f99746SGreg Ungerer  */
1369f99746SGreg Ungerer 
1469f99746SGreg Ungerer /*
1569f99746SGreg Ungerer  * We do not have SMP m68k systems, so we don't have to deal with that.
1669f99746SGreg Ungerer  */
1769f99746SGreg Ungerer 
1869f99746SGreg Ungerer #define ATOMIC_INIT(i)	{ (i) }
1969f99746SGreg Ungerer 
2069f99746SGreg Ungerer #define atomic_read(v)		(*(volatile int *)&(v)->counter)
2169f99746SGreg Ungerer #define atomic_set(v, i)	(((v)->counter) = i)
2269f99746SGreg Ungerer 
2369f99746SGreg Ungerer /*
2469f99746SGreg Ungerer  * The ColdFire parts cannot do some immediate to memory operations,
2569f99746SGreg Ungerer  * so for them we do not specify the "i" asm constraint.
2669f99746SGreg Ungerer  */
2769f99746SGreg Ungerer #ifdef CONFIG_COLDFIRE
2869f99746SGreg Ungerer #define	ASM_DI	"d"
2949148020SSam Ravnborg #else
3069f99746SGreg Ungerer #define	ASM_DI	"di"
3149148020SSam Ravnborg #endif
32b417b717SGeert Uytterhoeven 
3369f99746SGreg Ungerer static inline void atomic_add(int i, atomic_t *v)
3469f99746SGreg Ungerer {
3569f99746SGreg Ungerer 	__asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i));
3669f99746SGreg Ungerer }
3769f99746SGreg Ungerer 
3869f99746SGreg Ungerer static inline void atomic_sub(int i, atomic_t *v)
3969f99746SGreg Ungerer {
4069f99746SGreg Ungerer 	__asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i));
4169f99746SGreg Ungerer }
4269f99746SGreg Ungerer 
4369f99746SGreg Ungerer static inline void atomic_inc(atomic_t *v)
4469f99746SGreg Ungerer {
4569f99746SGreg Ungerer 	__asm__ __volatile__("addql #1,%0" : "+m" (*v));
4669f99746SGreg Ungerer }
4769f99746SGreg Ungerer 
4869f99746SGreg Ungerer static inline void atomic_dec(atomic_t *v)
4969f99746SGreg Ungerer {
5069f99746SGreg Ungerer 	__asm__ __volatile__("subql #1,%0" : "+m" (*v));
5169f99746SGreg Ungerer }
5269f99746SGreg Ungerer 
5369f99746SGreg Ungerer static inline int atomic_dec_and_test(atomic_t *v)
5469f99746SGreg Ungerer {
5569f99746SGreg Ungerer 	char c;
5669f99746SGreg Ungerer 	__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
5769f99746SGreg Ungerer 	return c != 0;
5869f99746SGreg Ungerer }
5969f99746SGreg Ungerer 
6083b73d6cSGreg Ungerer static inline int atomic_dec_and_test_lt(atomic_t *v)
6183b73d6cSGreg Ungerer {
6283b73d6cSGreg Ungerer 	char c;
6383b73d6cSGreg Ungerer 	__asm__ __volatile__(
6483b73d6cSGreg Ungerer 		"subql #1,%1; slt %0"
6583b73d6cSGreg Ungerer 		: "=d" (c), "=m" (*v)
6683b73d6cSGreg Ungerer 		: "m" (*v));
6783b73d6cSGreg Ungerer 	return c != 0;
6883b73d6cSGreg Ungerer }
6983b73d6cSGreg Ungerer 
7069f99746SGreg Ungerer static inline int atomic_inc_and_test(atomic_t *v)
7169f99746SGreg Ungerer {
7269f99746SGreg Ungerer 	char c;
7369f99746SGreg Ungerer 	__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
7469f99746SGreg Ungerer 	return c != 0;
7569f99746SGreg Ungerer }
7669f99746SGreg Ungerer 
7769f99746SGreg Ungerer #ifdef CONFIG_RMW_INSNS
7869f99746SGreg Ungerer 
7969f99746SGreg Ungerer static inline int atomic_add_return(int i, atomic_t *v)
8069f99746SGreg Ungerer {
8169f99746SGreg Ungerer 	int t, tmp;
8269f99746SGreg Ungerer 
8369f99746SGreg Ungerer 	__asm__ __volatile__(
8469f99746SGreg Ungerer 			"1:	movel %2,%1\n"
8569f99746SGreg Ungerer 			"	addl %3,%1\n"
8669f99746SGreg Ungerer 			"	casl %2,%1,%0\n"
8769f99746SGreg Ungerer 			"	jne 1b"
8869f99746SGreg Ungerer 			: "+m" (*v), "=&d" (t), "=&d" (tmp)
8969f99746SGreg Ungerer 			: "g" (i), "2" (atomic_read(v)));
9069f99746SGreg Ungerer 	return t;
9169f99746SGreg Ungerer }
9269f99746SGreg Ungerer 
9369f99746SGreg Ungerer static inline int atomic_sub_return(int i, atomic_t *v)
9469f99746SGreg Ungerer {
9569f99746SGreg Ungerer 	int t, tmp;
9669f99746SGreg Ungerer 
9769f99746SGreg Ungerer 	__asm__ __volatile__(
9869f99746SGreg Ungerer 			"1:	movel %2,%1\n"
9969f99746SGreg Ungerer 			"	subl %3,%1\n"
10069f99746SGreg Ungerer 			"	casl %2,%1,%0\n"
10169f99746SGreg Ungerer 			"	jne 1b"
10269f99746SGreg Ungerer 			: "+m" (*v), "=&d" (t), "=&d" (tmp)
10369f99746SGreg Ungerer 			: "g" (i), "2" (atomic_read(v)));
10469f99746SGreg Ungerer 	return t;
10569f99746SGreg Ungerer }
10669f99746SGreg Ungerer 
10769f99746SGreg Ungerer #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
10869f99746SGreg Ungerer #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
10969f99746SGreg Ungerer 
11069f99746SGreg Ungerer #else /* !CONFIG_RMW_INSNS */
11169f99746SGreg Ungerer 
11269f99746SGreg Ungerer static inline int atomic_add_return(int i, atomic_t * v)
11369f99746SGreg Ungerer {
11469f99746SGreg Ungerer 	unsigned long flags;
11569f99746SGreg Ungerer 	int t;
11669f99746SGreg Ungerer 
11769f99746SGreg Ungerer 	local_irq_save(flags);
11869f99746SGreg Ungerer 	t = atomic_read(v);
11969f99746SGreg Ungerer 	t += i;
12069f99746SGreg Ungerer 	atomic_set(v, t);
12169f99746SGreg Ungerer 	local_irq_restore(flags);
12269f99746SGreg Ungerer 
12369f99746SGreg Ungerer 	return t;
12469f99746SGreg Ungerer }
12569f99746SGreg Ungerer 
12669f99746SGreg Ungerer static inline int atomic_sub_return(int i, atomic_t * v)
12769f99746SGreg Ungerer {
12869f99746SGreg Ungerer 	unsigned long flags;
12969f99746SGreg Ungerer 	int t;
13069f99746SGreg Ungerer 
13169f99746SGreg Ungerer 	local_irq_save(flags);
13269f99746SGreg Ungerer 	t = atomic_read(v);
13369f99746SGreg Ungerer 	t -= i;
13469f99746SGreg Ungerer 	atomic_set(v, t);
13569f99746SGreg Ungerer 	local_irq_restore(flags);
13669f99746SGreg Ungerer 
13769f99746SGreg Ungerer 	return t;
13869f99746SGreg Ungerer }
13969f99746SGreg Ungerer 
14069f99746SGreg Ungerer static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
14169f99746SGreg Ungerer {
14269f99746SGreg Ungerer 	unsigned long flags;
14369f99746SGreg Ungerer 	int prev;
14469f99746SGreg Ungerer 
14569f99746SGreg Ungerer 	local_irq_save(flags);
14669f99746SGreg Ungerer 	prev = atomic_read(v);
14769f99746SGreg Ungerer 	if (prev == old)
14869f99746SGreg Ungerer 		atomic_set(v, new);
14969f99746SGreg Ungerer 	local_irq_restore(flags);
15069f99746SGreg Ungerer 	return prev;
15169f99746SGreg Ungerer }
15269f99746SGreg Ungerer 
15369f99746SGreg Ungerer static inline int atomic_xchg(atomic_t *v, int new)
15469f99746SGreg Ungerer {
15569f99746SGreg Ungerer 	unsigned long flags;
15669f99746SGreg Ungerer 	int prev;
15769f99746SGreg Ungerer 
15869f99746SGreg Ungerer 	local_irq_save(flags);
15969f99746SGreg Ungerer 	prev = atomic_read(v);
16069f99746SGreg Ungerer 	atomic_set(v, new);
16169f99746SGreg Ungerer 	local_irq_restore(flags);
16269f99746SGreg Ungerer 	return prev;
16369f99746SGreg Ungerer }
16469f99746SGreg Ungerer 
16569f99746SGreg Ungerer #endif /* !CONFIG_RMW_INSNS */
16669f99746SGreg Ungerer 
16769f99746SGreg Ungerer #define atomic_dec_return(v)	atomic_sub_return(1, (v))
16869f99746SGreg Ungerer #define atomic_inc_return(v)	atomic_add_return(1, (v))
16969f99746SGreg Ungerer 
17069f99746SGreg Ungerer static inline int atomic_sub_and_test(int i, atomic_t *v)
17169f99746SGreg Ungerer {
17269f99746SGreg Ungerer 	char c;
17369f99746SGreg Ungerer 	__asm__ __volatile__("subl %2,%1; seq %0"
17469f99746SGreg Ungerer 			     : "=d" (c), "+m" (*v)
17569f99746SGreg Ungerer 			     : ASM_DI (i));
17669f99746SGreg Ungerer 	return c != 0;
17769f99746SGreg Ungerer }
17869f99746SGreg Ungerer 
17969f99746SGreg Ungerer static inline int atomic_add_negative(int i, atomic_t *v)
18069f99746SGreg Ungerer {
18169f99746SGreg Ungerer 	char c;
18269f99746SGreg Ungerer 	__asm__ __volatile__("addl %2,%1; smi %0"
18369f99746SGreg Ungerer 			     : "=d" (c), "+m" (*v)
18435de6749SGreg Ungerer 			     : ASM_DI (i));
18569f99746SGreg Ungerer 	return c != 0;
18669f99746SGreg Ungerer }
18769f99746SGreg Ungerer 
18869f99746SGreg Ungerer static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
18969f99746SGreg Ungerer {
19035de6749SGreg Ungerer 	__asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
19169f99746SGreg Ungerer }
19269f99746SGreg Ungerer 
19369f99746SGreg Ungerer static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
19469f99746SGreg Ungerer {
19535de6749SGreg Ungerer 	__asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
19669f99746SGreg Ungerer }
19769f99746SGreg Ungerer 
198f24219b4SArun Sharma static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
19969f99746SGreg Ungerer {
20069f99746SGreg Ungerer 	int c, old;
20169f99746SGreg Ungerer 	c = atomic_read(v);
20269f99746SGreg Ungerer 	for (;;) {
20369f99746SGreg Ungerer 		if (unlikely(c == (u)))
20469f99746SGreg Ungerer 			break;
20569f99746SGreg Ungerer 		old = atomic_cmpxchg((v), c, c + (a));
20669f99746SGreg Ungerer 		if (likely(old == c))
20769f99746SGreg Ungerer 			break;
20869f99746SGreg Ungerer 		c = old;
20969f99746SGreg Ungerer 	}
210f24219b4SArun Sharma 	return c;
21169f99746SGreg Ungerer }
21269f99746SGreg Ungerer 
21369f99746SGreg Ungerer #endif /* __ARCH_M68K_ATOMIC __ */
214