xref: /linux/arch/sh/include/asm/atomic-llsc.h (revision 93d90ad708b8da6efc0e487b66111aa9db7f70c7)
1 #ifndef __ASM_SH_ATOMIC_LLSC_H
2 #define __ASM_SH_ATOMIC_LLSC_H
3 
4 /*
5  * SH-4A note:
6  *
7  * We basically get atomic_xxx_return() for free compared with
8  * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
9  * encoding, so the retval is automatically set without having to
10  * do any special work.
11  */
12 /*
13  * To get proper branch prediction for the main line, we must branch
14  * forward to code at the end of this object's .text section, then
15  * branch back to restart the operation.
16  */
17 
18 #define ATOMIC_OP(op)							\
19 static inline void atomic_##op(int i, atomic_t *v)			\
20 {									\
21 	unsigned long tmp;						\
22 									\
23 	__asm__ __volatile__ (						\
24 "1:	movli.l @%2, %0		! atomic_" #op "\n"			\
25 "	" #op "	%1, %0				\n"			\
26 "	movco.l	%0, @%2				\n"			\
27 "	bf	1b				\n"			\
28 	: "=&z" (tmp)							\
29 	: "r" (i), "r" (&v->counter)					\
30 	: "t");								\
31 }
32 
33 #define ATOMIC_OP_RETURN(op)						\
34 static inline int atomic_##op##_return(int i, atomic_t *v)		\
35 {									\
36 	unsigned long temp;						\
37 									\
38 	__asm__ __volatile__ (						\
39 "1:	movli.l @%2, %0		! atomic_" #op "_return	\n"		\
40 "	" #op "	%1, %0					\n"		\
41 "	movco.l	%0, @%2					\n"		\
42 "	bf	1b					\n"		\
43 "	synco						\n"		\
44 	: "=&z" (temp)							\
45 	: "r" (i), "r" (&v->counter)					\
46 	: "t");								\
47 									\
48 	return temp;							\
49 }
50 
51 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
52 
53 ATOMIC_OPS(add)
54 ATOMIC_OPS(sub)
55 
56 #undef ATOMIC_OPS
57 #undef ATOMIC_OP_RETURN
58 #undef ATOMIC_OP
59 
60 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
61 {
62 	unsigned long tmp;
63 
64 	__asm__ __volatile__ (
65 "1:	movli.l @%2, %0		! atomic_clear_mask	\n"
66 "	and	%1, %0					\n"
67 "	movco.l	%0, @%2					\n"
68 "	bf	1b					\n"
69 	: "=&z" (tmp)
70 	: "r" (~mask), "r" (&v->counter)
71 	: "t");
72 }
73 
74 static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
75 {
76 	unsigned long tmp;
77 
78 	__asm__ __volatile__ (
79 "1:	movli.l @%2, %0		! atomic_set_mask	\n"
80 "	or	%1, %0					\n"
81 "	movco.l	%0, @%2					\n"
82 "	bf	1b					\n"
83 	: "=&z" (tmp)
84 	: "r" (mask), "r" (&v->counter)
85 	: "t");
86 }
87 
88 #endif /* __ASM_SH_ATOMIC_LLSC_H */
89