xref: /linux/arch/sh/include/asm/atomic-llsc.h (revision cdd5b5a9761fd66d17586e4f4ba6588c70e640ea)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2f15cbe6fSPaul Mundt #ifndef __ASM_SH_ATOMIC_LLSC_H
3f15cbe6fSPaul Mundt #define __ASM_SH_ATOMIC_LLSC_H
4f15cbe6fSPaul Mundt 
5f15cbe6fSPaul Mundt /*
6f15cbe6fSPaul Mundt  * SH-4A note:
7f15cbe6fSPaul Mundt  *
8f15cbe6fSPaul Mundt  * We basically get atomic_xxx_return() for free compared with
9f15cbe6fSPaul Mundt  * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
10f15cbe6fSPaul Mundt  * encoding, so the retval is automatically set without having to
11f15cbe6fSPaul Mundt  * do any special work.
12f15cbe6fSPaul Mundt  */
13c6470150SPeter Zijlstra /*
14c6470150SPeter Zijlstra  * To get proper branch prediction for the main line, we must branch
15c6470150SPeter Zijlstra  * forward to code at the end of this object's .text section, then
16c6470150SPeter Zijlstra  * branch back to restart the operation.
17c6470150SPeter Zijlstra  */
18f15cbe6fSPaul Mundt 
19c6470150SPeter Zijlstra #define ATOMIC_OP(op)							\
208c641755SMark Rutland static inline void arch_atomic_##op(int i, atomic_t *v)			\
21c6470150SPeter Zijlstra {									\
22c6470150SPeter Zijlstra 	unsigned long tmp;						\
23c6470150SPeter Zijlstra 									\
24c6470150SPeter Zijlstra 	__asm__ __volatile__ (						\
25c6470150SPeter Zijlstra "1:	movli.l @%2, %0		! atomic_" #op "\n"			\
26c6470150SPeter Zijlstra "	" #op "	%1, %0				\n"			\
27c6470150SPeter Zijlstra "	movco.l	%0, @%2				\n"			\
28c6470150SPeter Zijlstra "	bf	1b				\n"			\
29c6470150SPeter Zijlstra 	: "=&z" (tmp)							\
30c6470150SPeter Zijlstra 	: "r" (i), "r" (&v->counter)					\
31c6470150SPeter Zijlstra 	: "t");								\
32f15cbe6fSPaul Mundt }
33f15cbe6fSPaul Mundt 
34c6470150SPeter Zijlstra #define ATOMIC_OP_RETURN(op)						\
358c641755SMark Rutland static inline int arch_atomic_##op##_return(int i, atomic_t *v)		\
36c6470150SPeter Zijlstra {									\
37c6470150SPeter Zijlstra 	unsigned long temp;						\
38c6470150SPeter Zijlstra 									\
39c6470150SPeter Zijlstra 	__asm__ __volatile__ (						\
40c6470150SPeter Zijlstra "1:	movli.l @%2, %0		! atomic_" #op "_return	\n"		\
41c6470150SPeter Zijlstra "	" #op "	%1, %0					\n"		\
42c6470150SPeter Zijlstra "	movco.l	%0, @%2					\n"		\
43c6470150SPeter Zijlstra "	bf	1b					\n"		\
44c6470150SPeter Zijlstra "	synco						\n"		\
45c6470150SPeter Zijlstra 	: "=&z" (temp)							\
46c6470150SPeter Zijlstra 	: "r" (i), "r" (&v->counter)					\
47c6470150SPeter Zijlstra 	: "t");								\
48c6470150SPeter Zijlstra 									\
49c6470150SPeter Zijlstra 	return temp;							\
50f15cbe6fSPaul Mundt }
51f15cbe6fSPaul Mundt 
527d9794e7SPeter Zijlstra #define ATOMIC_FETCH_OP(op)						\
538c641755SMark Rutland static inline int arch_atomic_fetch_##op(int i, atomic_t *v)		\
547d9794e7SPeter Zijlstra {									\
557d9794e7SPeter Zijlstra 	unsigned long res, temp;					\
567d9794e7SPeter Zijlstra 									\
577d9794e7SPeter Zijlstra 	__asm__ __volatile__ (						\
587d9794e7SPeter Zijlstra "1:	movli.l @%3, %0		! atomic_fetch_" #op "	\n"		\
597d9794e7SPeter Zijlstra "	mov %0, %1					\n"		\
607d9794e7SPeter Zijlstra "	" #op "	%2, %0					\n"		\
617d9794e7SPeter Zijlstra "	movco.l	%0, @%3					\n"		\
627d9794e7SPeter Zijlstra "	bf	1b					\n"		\
637d9794e7SPeter Zijlstra "	synco						\n"		\
649bf6ffdaSPeter Zijlstra 	: "=&z" (temp), "=&r" (res)					\
657d9794e7SPeter Zijlstra 	: "r" (i), "r" (&v->counter)					\
667d9794e7SPeter Zijlstra 	: "t");								\
677d9794e7SPeter Zijlstra 									\
687d9794e7SPeter Zijlstra 	return res;							\
697d9794e7SPeter Zijlstra }
707d9794e7SPeter Zijlstra 
717d9794e7SPeter Zijlstra #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
72c6470150SPeter Zijlstra 
73c6470150SPeter Zijlstra ATOMIC_OPS(add)
74c6470150SPeter Zijlstra ATOMIC_OPS(sub)
75c6470150SPeter Zijlstra 
76*770345adSMark Rutland #define arch_atomic_add_return	arch_atomic_add_return
77*770345adSMark Rutland #define arch_atomic_sub_return	arch_atomic_sub_return
78*770345adSMark Rutland #define arch_atomic_fetch_add	arch_atomic_fetch_add
79*770345adSMark Rutland #define arch_atomic_fetch_sub	arch_atomic_fetch_sub
80*770345adSMark Rutland 
81c6470150SPeter Zijlstra #undef ATOMIC_OPS
827d9794e7SPeter Zijlstra #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
837d9794e7SPeter Zijlstra 
847d9794e7SPeter Zijlstra ATOMIC_OPS(and)
857d9794e7SPeter Zijlstra ATOMIC_OPS(or)
867d9794e7SPeter Zijlstra ATOMIC_OPS(xor)
877d9794e7SPeter Zijlstra 
88*770345adSMark Rutland #define arch_atomic_fetch_and	arch_atomic_fetch_and
89*770345adSMark Rutland #define arch_atomic_fetch_or	arch_atomic_fetch_or
90*770345adSMark Rutland #define arch_atomic_fetch_xor	arch_atomic_fetch_xor
91*770345adSMark Rutland 
927d9794e7SPeter Zijlstra #undef ATOMIC_OPS
937d9794e7SPeter Zijlstra #undef ATOMIC_FETCH_OP
94c6470150SPeter Zijlstra #undef ATOMIC_OP_RETURN
95c6470150SPeter Zijlstra #undef ATOMIC_OP
96c6470150SPeter Zijlstra 
97f15cbe6fSPaul Mundt #endif /* __ASM_SH_ATOMIC_LLSC_H */
98