xref: /linux/arch/hexagon/include/asm/atomic.h (revision a083ecc9333c62237551ad93f42e86a42a3c7cc2)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Atomic operations for the Hexagon architecture
4  *
5  * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
6  */
7 
8 #ifndef _ASM_ATOMIC_H
9 #define _ASM_ATOMIC_H
10 
11 #include <linux/types.h>
12 #include <asm/cmpxchg.h>
13 #include <asm/barrier.h>
14 
15 /*  Normal writes in our arch don't clear lock reservations  */
16 
17 static inline void arch_atomic_set(atomic_t *v, int new)
18 {
19 	asm volatile(
20 		"1:	r6 = memw_locked(%0);\n"
21 		"	memw_locked(%0,p0) = %1;\n"
22 		"	if (!P0) jump 1b;\n"
23 		:
24 		: "r" (&v->counter), "r" (new)
25 		: "memory", "p0", "r6"
26 	);
27 }
28 
29 #define arch_atomic_set_release(v, i)	arch_atomic_set((v), (i))
30 
31 /**
32  * arch_atomic_read - reads a word, atomically
33  * @v: pointer to atomic value
34  *
35  * Assumes all word reads on our architecture are atomic.
36  */
37 #define arch_atomic_read(v)		READ_ONCE((v)->counter)
38 
39 #define ATOMIC_OP(op)							\
40 static inline void arch_atomic_##op(int i, atomic_t *v)			\
41 {									\
42 	int output;							\
43 									\
44 	__asm__ __volatile__ (						\
45 		"1:	%0 = memw_locked(%1);\n"			\
46 		"	%0 = "#op "(%0,%2);\n"				\
47 		"	memw_locked(%1,P3)=%0;\n"			\
48 		"	if (!P3) jump 1b;\n"				\
49 		: "=&r" (output)					\
50 		: "r" (&v->counter), "r" (i)				\
51 		: "memory", "p3"					\
52 	);								\
53 }									\
54 
55 #define ATOMIC_OP_RETURN(op)						\
56 static inline int arch_atomic_##op##_return(int i, atomic_t *v)		\
57 {									\
58 	int output;							\
59 									\
60 	__asm__ __volatile__ (						\
61 		"1:	%0 = memw_locked(%1);\n"			\
62 		"	%0 = "#op "(%0,%2);\n"				\
63 		"	memw_locked(%1,P3)=%0;\n"			\
64 		"	if (!P3) jump 1b;\n"				\
65 		: "=&r" (output)					\
66 		: "r" (&v->counter), "r" (i)				\
67 		: "memory", "p3"					\
68 	);								\
69 	return output;							\
70 }
71 
72 #define ATOMIC_FETCH_OP(op)						\
73 static inline int arch_atomic_fetch_##op(int i, atomic_t *v)		\
74 {									\
75 	int output, val;						\
76 									\
77 	__asm__ __volatile__ (						\
78 		"1:	%0 = memw_locked(%2);\n"			\
79 		"	%1 = "#op "(%0,%3);\n"				\
80 		"	memw_locked(%2,P3)=%1;\n"			\
81 		"	if (!P3) jump 1b;\n"				\
82 		: "=&r" (output), "=&r" (val)				\
83 		: "r" (&v->counter), "r" (i)				\
84 		: "memory", "p3"					\
85 	);								\
86 	return output;							\
87 }
88 
89 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
90 
91 ATOMIC_OPS(add)
92 ATOMIC_OPS(sub)
93 
94 #define arch_atomic_add_return			arch_atomic_add_return
95 #define arch_atomic_sub_return			arch_atomic_sub_return
96 #define arch_atomic_fetch_add			arch_atomic_fetch_add
97 #define arch_atomic_fetch_sub			arch_atomic_fetch_sub
98 
99 #undef ATOMIC_OPS
100 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
101 
102 ATOMIC_OPS(and)
103 ATOMIC_OPS(or)
104 ATOMIC_OPS(xor)
105 
106 #define arch_atomic_fetch_and			arch_atomic_fetch_and
107 #define arch_atomic_fetch_or			arch_atomic_fetch_or
108 #define arch_atomic_fetch_xor			arch_atomic_fetch_xor
109 
110 #undef ATOMIC_OPS
111 #undef ATOMIC_FETCH_OP
112 #undef ATOMIC_OP_RETURN
113 #undef ATOMIC_OP
114 
115 /**
116  * arch_atomic_fetch_add_unless - add unless the number is a given value
117  * @v: pointer to value
118  * @a: amount to add
119  * @u: unless value is equal to u
120  *
121  * Returns old value.
122  *
123  */
124 
125 static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
126 {
127 	int __oldval;
128 	register int tmp;
129 
130 	asm volatile(
131 		"1:	%0 = memw_locked(%2);"
132 		"	{"
133 		"		p3 = cmp.eq(%0, %4);"
134 		"		if (p3.new) jump:nt 2f;"
135 		"		%1 = add(%0, %3);"
136 		"	}"
137 		"	memw_locked(%2, p3) = %1;"
138 		"	{"
139 		"		if (!p3) jump 1b;"
140 		"	}"
141 		"2:"
142 		: "=&r" (__oldval), "=&r" (tmp)
143 		: "r" (v), "r" (a), "r" (u)
144 		: "memory", "p3"
145 	);
146 	return __oldval;
147 }
148 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
149 
150 #endif
151