xref: /linux/arch/hexagon/include/asm/atomic.h (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Atomic operations for the Hexagon architecture
3  *
4  * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
5  *
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 and
9  * only version 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19  * 02110-1301, USA.
20  */
21 
22 #ifndef _ASM_ATOMIC_H
23 #define _ASM_ATOMIC_H
24 
25 #include <linux/types.h>
26 #include <asm/cmpxchg.h>
27 #include <asm/barrier.h>
28 
29 #define ATOMIC_INIT(i)		{ (i) }
30 
31 /*  Normal writes in our arch don't clear lock reservations  */
32 
33 static inline void atomic_set(atomic_t *v, int new)
34 {
35 	asm volatile(
36 		"1:	r6 = memw_locked(%0);\n"
37 		"	memw_locked(%0,p0) = %1;\n"
38 		"	if (!P0) jump 1b;\n"
39 		:
40 		: "r" (&v->counter), "r" (new)
41 		: "memory", "p0", "r6"
42 	);
43 }
44 
45 /**
46  * atomic_read - reads a word, atomically
47  * @v: pointer to atomic value
48  *
49  * Assumes all word reads on our architecture are atomic.
50  */
51 #define atomic_read(v)		((v)->counter)
52 
53 /**
54  * atomic_xchg - atomic
55  * @v: pointer to memory to change
56  * @new: new value (technically passed in a register -- see xchg)
57  */
58 #define atomic_xchg(v, new)	(xchg(&((v)->counter), (new)))
59 
60 
61 /**
62  * atomic_cmpxchg - atomic compare-and-exchange values
63  * @v: pointer to value to change
64  * @old:  desired old value to match
65  * @new:  new value to put in
66  *
67  * Parameters are then pointer, value-in-register, value-in-register,
68  * and the output is the old value.
69  *
70  * Apparently this is complicated for archs that don't support
71  * the memw_locked like we do (or it's broken or whatever).
72  *
73  * Kind of the lynchpin of the rest of the generically defined routines.
74  * Remember V2 had that bug with dotnew predicate set by memw_locked.
75  *
76  * "old" is "expected" old val, __oldval is actual old value
77  */
78 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
79 {
80 	int __oldval;
81 
82 	asm volatile(
83 		"1:	%0 = memw_locked(%1);\n"
84 		"	{ P0 = cmp.eq(%0,%2);\n"
85 		"	  if (!P0.new) jump:nt 2f; }\n"
86 		"	memw_locked(%1,P0) = %3;\n"
87 		"	if (!P0) jump 1b;\n"
88 		"2:\n"
89 		: "=&r" (__oldval)
90 		: "r" (&v->counter), "r" (old), "r" (new)
91 		: "memory", "p0"
92 	);
93 
94 	return __oldval;
95 }
96 
97 #define ATOMIC_OP(op)							\
98 static inline void atomic_##op(int i, atomic_t *v)			\
99 {									\
100 	int output;							\
101 									\
102 	__asm__ __volatile__ (						\
103 		"1:	%0 = memw_locked(%1);\n"			\
104 		"	%0 = "#op "(%0,%2);\n"				\
105 		"	memw_locked(%1,P3)=%0;\n"			\
106 		"	if !P3 jump 1b;\n"				\
107 		: "=&r" (output)					\
108 		: "r" (&v->counter), "r" (i)				\
109 		: "memory", "p3"					\
110 	);								\
111 }									\
112 
113 #define ATOMIC_OP_RETURN(op)							\
114 static inline int atomic_##op##_return(int i, atomic_t *v)		\
115 {									\
116 	int output;							\
117 									\
118 	__asm__ __volatile__ (						\
119 		"1:	%0 = memw_locked(%1);\n"			\
120 		"	%0 = "#op "(%0,%2);\n"				\
121 		"	memw_locked(%1,P3)=%0;\n"			\
122 		"	if !P3 jump 1b;\n"				\
123 		: "=&r" (output)					\
124 		: "r" (&v->counter), "r" (i)				\
125 		: "memory", "p3"					\
126 	);								\
127 	return output;							\
128 }
129 
130 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
131 
132 ATOMIC_OPS(add)
133 ATOMIC_OPS(sub)
134 
135 ATOMIC_OP(and)
136 ATOMIC_OP(or)
137 ATOMIC_OP(xor)
138 
139 #undef ATOMIC_OPS
140 #undef ATOMIC_OP_RETURN
141 #undef ATOMIC_OP
142 
143 /**
144  * __atomic_add_unless - add unless the number is a given value
145  * @v: pointer to value
146  * @a: amount to add
147  * @u: unless value is equal to u
148  *
149  * Returns old value.
150  *
151  */
152 
153 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
154 {
155 	int __oldval;
156 	register int tmp;
157 
158 	asm volatile(
159 		"1:	%0 = memw_locked(%2);"
160 		"	{"
161 		"		p3 = cmp.eq(%0, %4);"
162 		"		if (p3.new) jump:nt 2f;"
163 		"		%1 = add(%0, %3);"
164 		"	}"
165 		"	memw_locked(%2, p3) = %1;"
166 		"	{"
167 		"		if !p3 jump 1b;"
168 		"	}"
169 		"2:"
170 		: "=&r" (__oldval), "=&r" (tmp)
171 		: "r" (v), "r" (a), "r" (u)
172 		: "memory", "p3"
173 	);
174 	return __oldval;
175 }
176 
177 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
178 
179 #define atomic_inc(v) atomic_add(1, (v))
180 #define atomic_dec(v) atomic_sub(1, (v))
181 
182 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
183 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
184 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
185 #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
186 
187 #define atomic_inc_return(v) (atomic_add_return(1, v))
188 #define atomic_dec_return(v) (atomic_sub_return(1, v))
189 
190 #endif
191