xref: /linux/arch/x86/include/asm/local.h (revision 20dfee95936413708701eb151f419597fdd9d948)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_LOCAL_H
3 #define _ASM_X86_LOCAL_H
4 
5 #include <linux/percpu.h>
6 
7 #include <linux/atomic.h>
8 #include <asm/asm.h>
9 
10 typedef struct {
11 	atomic_long_t a;
12 } local_t;
13 
14 #define LOCAL_INIT(i)	{ ATOMIC_LONG_INIT(i) }
15 
16 #define local_read(l)	atomic_long_read(&(l)->a)
17 #define local_set(l, i)	atomic_long_set(&(l)->a, (i))
18 
19 static inline void local_inc(local_t *l)
20 {
21 	asm volatile(_ASM_INC "%0"
22 		     : "+m" (l->a.counter));
23 }
24 
25 static inline void local_dec(local_t *l)
26 {
27 	asm volatile(_ASM_DEC "%0"
28 		     : "+m" (l->a.counter));
29 }
30 
31 static inline void local_add(long i, local_t *l)
32 {
33 	asm volatile(_ASM_ADD "%1,%0"
34 		     : "+m" (l->a.counter)
35 		     : "ir" (i));
36 }
37 
38 static inline void local_sub(long i, local_t *l)
39 {
40 	asm volatile(_ASM_SUB "%1,%0"
41 		     : "+m" (l->a.counter)
42 		     : "ir" (i));
43 }
44 
45 /**
46  * local_sub_and_test - subtract value from variable and test result
47  * @i: integer value to subtract
48  * @l: pointer to type local_t
49  *
50  * Atomically subtracts @i from @l and returns
51  * true if the result is zero, or false for all
52  * other cases.
53  */
54 static inline bool local_sub_and_test(long i, local_t *l)
55 {
56 	return GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, e, "er", i);
57 }
58 
59 /**
60  * local_dec_and_test - decrement and test
61  * @l: pointer to type local_t
62  *
63  * Atomically decrements @l by 1 and
64  * returns true if the result is 0, or false for all other
65  * cases.
66  */
67 static inline bool local_dec_and_test(local_t *l)
68 {
69 	return GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, e);
70 }
71 
72 /**
73  * local_inc_and_test - increment and test
74  * @l: pointer to type local_t
75  *
76  * Atomically increments @l by 1
77  * and returns true if the result is zero, or false for all
78  * other cases.
79  */
80 static inline bool local_inc_and_test(local_t *l)
81 {
82 	return GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, e);
83 }
84 
85 /**
86  * local_add_negative - add and test if negative
87  * @i: integer value to add
88  * @l: pointer to type local_t
89  *
90  * Atomically adds @i to @l and returns true
91  * if the result is negative, or false when
92  * result is greater than or equal to zero.
93  */
94 static inline bool local_add_negative(long i, local_t *l)
95 {
96 	return GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, s, "er", i);
97 }
98 
99 /**
100  * local_add_return - add and return
101  * @i: integer value to add
102  * @l: pointer to type local_t
103  *
104  * Atomically adds @i to @l and returns @i + @l
105  */
106 static inline long local_add_return(long i, local_t *l)
107 {
108 	long __i = i;
109 	asm volatile(_ASM_XADD "%0, %1;"
110 		     : "+r" (i), "+m" (l->a.counter)
111 		     : : "memory");
112 	return i + __i;
113 }
114 
115 static inline long local_sub_return(long i, local_t *l)
116 {
117 	return local_add_return(-i, l);
118 }
119 
120 #define local_inc_return(l)  (local_add_return(1, l))
121 #define local_dec_return(l)  (local_sub_return(1, l))
122 
123 static inline long local_cmpxchg(local_t *l, long old, long new)
124 {
125 	return cmpxchg_local(&l->a.counter, old, new);
126 }
127 
128 static inline bool local_try_cmpxchg(local_t *l, long *old, long new)
129 {
130 	return try_cmpxchg_local(&l->a.counter,
131 				 (typeof(l->a.counter) *) old, new);
132 }
133 
134 /*
135  * Implement local_xchg using CMPXCHG instruction without the LOCK prefix.
136  * XCHG is expensive due to the implied LOCK prefix.  The processor
137  * cannot prefetch cachelines if XCHG is used.
138  */
139 static __always_inline long
140 local_xchg(local_t *l, long n)
141 {
142 	long c = local_read(l);
143 
144 	do { } while (!local_try_cmpxchg(l, &c, n));
145 
146 	return c;
147 }
148 
149 /**
150  * local_add_unless - add unless the number is already a given value
151  * @l: pointer of type local_t
152  * @a: the amount to add to l...
153  * @u: ...unless l is equal to u.
154  *
155  * Atomically adds @a to @l, if @v was not already @u.
156  * Returns true if the addition was done.
157  */
158 static __always_inline bool
159 local_add_unless(local_t *l, long a, long u)
160 {
161 	long c = local_read(l);
162 
163 	do {
164 		if (unlikely(c == u))
165 			return false;
166 	} while (!local_try_cmpxchg(l, &c, c + a));
167 
168 	return true;
169 }
170 
171 #define local_inc_not_zero(l) local_add_unless((l), 1, 0)
172 
173 /* On x86_32, these are no better than the atomic variants.
174  * On x86-64 these are better than the atomic variants on SMP kernels
175  * because they dont use a lock prefix.
176  */
177 #define __local_inc(l)		local_inc(l)
178 #define __local_dec(l)		local_dec(l)
179 #define __local_add(i, l)	local_add((i), (l))
180 #define __local_sub(i, l)	local_sub((i), (l))
181 
182 #endif /* _ASM_X86_LOCAL_H */
183