xref: /linux/arch/riscv/include/asm/bitops.h (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  */
5 
6 #ifndef _ASM_RISCV_BITOPS_H
7 #define _ASM_RISCV_BITOPS_H
8 
9 #ifndef _LINUX_BITOPS_H
10 #error "Only <linux/bitops.h> can be included directly"
11 #endif /* _LINUX_BITOPS_H */
12 
13 #include <linux/compiler.h>
14 #include <linux/irqflags.h>
15 #include <asm/barrier.h>
16 #include <asm/bitsperlong.h>
17 
18 #include <asm-generic/bitops/__ffs.h>
19 #include <asm-generic/bitops/ffz.h>
20 #include <asm-generic/bitops/fls.h>
21 #include <asm-generic/bitops/__fls.h>
22 #include <asm-generic/bitops/fls64.h>
23 #include <asm-generic/bitops/sched.h>
24 #include <asm-generic/bitops/ffs.h>
25 
26 #include <asm-generic/bitops/hweight.h>
27 
28 #if (BITS_PER_LONG == 64)
29 #define __AMO(op)	"amo" #op ".d"
30 #elif (BITS_PER_LONG == 32)
31 #define __AMO(op)	"amo" #op ".w"
32 #else
33 #error "Unexpected BITS_PER_LONG"
34 #endif
35 
36 #define __test_and_op_bit_ord(op, mod, nr, addr, ord)		\
37 ({								\
38 	unsigned long __res, __mask;				\
39 	__mask = BIT_MASK(nr);					\
40 	__asm__ __volatile__ (					\
41 		__AMO(op) #ord " %0, %2, %1"			\
42 		: "=r" (__res), "+A" (addr[BIT_WORD(nr)])	\
43 		: "r" (mod(__mask))				\
44 		: "memory");					\
45 	((__res & __mask) != 0);				\
46 })
47 
48 #define __op_bit_ord(op, mod, nr, addr, ord)			\
49 	__asm__ __volatile__ (					\
50 		__AMO(op) #ord " zero, %1, %0"			\
51 		: "+A" (addr[BIT_WORD(nr)])			\
52 		: "r" (mod(BIT_MASK(nr)))			\
53 		: "memory");
54 
55 #define __test_and_op_bit(op, mod, nr, addr) 			\
56 	__test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
57 #define __op_bit(op, mod, nr, addr)				\
58 	__op_bit_ord(op, mod, nr, addr, )
59 
60 /* Bitmask modifiers */
61 #define __NOP(x)	(x)
62 #define __NOT(x)	(~(x))
63 
64 /**
65  * test_and_set_bit - Set a bit and return its old value
66  * @nr: Bit to set
67  * @addr: Address to count from
68  *
69  * This operation may be reordered on other architectures than x86.
70  */
71 static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
72 {
73 	return __test_and_op_bit(or, __NOP, nr, addr);
74 }
75 
76 /**
77  * test_and_clear_bit - Clear a bit and return its old value
78  * @nr: Bit to clear
79  * @addr: Address to count from
80  *
81  * This operation can be reordered on other architectures other than x86.
82  */
83 static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
84 {
85 	return __test_and_op_bit(and, __NOT, nr, addr);
86 }
87 
88 /**
89  * test_and_change_bit - Change a bit and return its old value
90  * @nr: Bit to change
91  * @addr: Address to count from
92  *
93  * This operation is atomic and cannot be reordered.
94  * It also implies a memory barrier.
95  */
96 static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
97 {
98 	return __test_and_op_bit(xor, __NOP, nr, addr);
99 }
100 
101 /**
102  * set_bit - Atomically set a bit in memory
103  * @nr: the bit to set
104  * @addr: the address to start counting from
105  *
106  * Note: there are no guarantees that this function will not be reordered
107  * on non x86 architectures, so if you are writing portable code,
108  * make sure not to rely on its reordering guarantees.
109  *
110  * Note that @nr may be almost arbitrarily large; this function is not
111  * restricted to acting on a single-word quantity.
112  */
113 static inline void set_bit(int nr, volatile unsigned long *addr)
114 {
115 	__op_bit(or, __NOP, nr, addr);
116 }
117 
118 /**
119  * clear_bit - Clears a bit in memory
120  * @nr: Bit to clear
121  * @addr: Address to start counting from
122  *
123  * Note: there are no guarantees that this function will not be reordered
124  * on non x86 architectures, so if you are writing portable code,
125  * make sure not to rely on its reordering guarantees.
126  */
127 static inline void clear_bit(int nr, volatile unsigned long *addr)
128 {
129 	__op_bit(and, __NOT, nr, addr);
130 }
131 
132 /**
133  * change_bit - Toggle a bit in memory
134  * @nr: Bit to change
135  * @addr: Address to start counting from
136  *
137  * change_bit()  may be reordered on other architectures than x86.
138  * Note that @nr may be almost arbitrarily large; this function is not
139  * restricted to acting on a single-word quantity.
140  */
141 static inline void change_bit(int nr, volatile unsigned long *addr)
142 {
143 	__op_bit(xor, __NOP, nr, addr);
144 }
145 
146 /**
147  * test_and_set_bit_lock - Set a bit and return its old value, for lock
148  * @nr: Bit to set
149  * @addr: Address to count from
150  *
151  * This operation is atomic and provides acquire barrier semantics.
152  * It can be used to implement bit locks.
153  */
154 static inline int test_and_set_bit_lock(
155 	unsigned long nr, volatile unsigned long *addr)
156 {
157 	return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
158 }
159 
160 /**
161  * clear_bit_unlock - Clear a bit in memory, for unlock
162  * @nr: the bit to set
163  * @addr: the address to start counting from
164  *
165  * This operation is atomic and provides release barrier semantics.
166  */
167 static inline void clear_bit_unlock(
168 	unsigned long nr, volatile unsigned long *addr)
169 {
170 	__op_bit_ord(and, __NOT, nr, addr, .rl);
171 }
172 
173 /**
174  * __clear_bit_unlock - Clear a bit in memory, for unlock
175  * @nr: the bit to set
176  * @addr: the address to start counting from
177  *
178  * This operation is like clear_bit_unlock, however it is not atomic.
179  * It does provide release barrier semantics so it can be used to unlock
180  * a bit lock, however it would only be used if no other CPU can modify
181  * any bits in the memory until the lock is released (a good example is
182  * if the bit lock itself protects access to the other bits in the word).
183  *
184  * On RISC-V systems there seems to be no benefit to taking advantage of the
185  * non-atomic property here: it's a lot more instructions and we still have to
186  * provide release semantics anyway.
187  */
188 static inline void __clear_bit_unlock(
189 	unsigned long nr, volatile unsigned long *addr)
190 {
191 	clear_bit_unlock(nr, addr);
192 }
193 
194 #undef __test_and_op_bit
195 #undef __op_bit
196 #undef __NOP
197 #undef __NOT
198 #undef __AMO
199 
200 #include <asm-generic/bitops/non-atomic.h>
201 #include <asm-generic/bitops/le.h>
202 #include <asm-generic/bitops/ext2-atomic.h>
203 
204 #endif /* _ASM_RISCV_BITOPS_H */
205