xref: /linux/arch/powerpc/include/asm/bitops.h (revision 148f9bb87745ed45f7a11b2cbd3bc0f017d5d257)
1 /*
2  * PowerPC atomic bit operations.
3  *
4  * Merged version by David Gibson <david@gibson.dropbear.id.au>.
5  * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don
6  * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard.  They
7  * originally took it from the ppc32 code.
8  *
9  * Within a word, bits are numbered LSB first.  Lot's of places make
10  * this assumption by directly testing bits with (val & (1<<nr)).
11  * This can cause confusion for large (> 1 word) bitmaps on a
12  * big-endian system because, unlike little endian, the number of each
13  * bit depends on the word size.
14  *
15  * The bitop functions are defined to work on unsigned longs, so for a
16  * ppc64 system the bits end up numbered:
17  *   |63..............0|127............64|191...........128|255...........196|
18  * and on ppc32:
19  *   |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
20  *
21  * There are a few little-endian macros used mostly for filesystem
22  * bitmaps, these work on similar bit arrays layouts, but
23  * byte-oriented:
24  *   |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
25  *
26  * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
27  * number field needs to be reversed compared to the big-endian bit
28  * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
29  *
30  * This program is free software; you can redistribute it and/or
31  * modify it under the terms of the GNU General Public License
32  * as published by the Free Software Foundation; either version
33  * 2 of the License, or (at your option) any later version.
34  */
35 
36 #ifndef _ASM_POWERPC_BITOPS_H
37 #define _ASM_POWERPC_BITOPS_H
38 
39 #ifdef __KERNEL__
40 
41 #ifndef _LINUX_BITOPS_H
42 #error only <linux/bitops.h> can be included directly
43 #endif
44 
45 #include <linux/compiler.h>
46 #include <asm/asm-compat.h>
47 #include <asm/synch.h>
48 
49 /*
50  * clear_bit doesn't imply a memory barrier
51  */
52 #define smp_mb__before_clear_bit()	smp_mb()
53 #define smp_mb__after_clear_bit()	smp_mb()
54 
55 /* Macro for generating the ***_bits() functions */
56 #define DEFINE_BITOP(fn, op, prefix)		\
57 static __inline__ void fn(unsigned long mask,	\
58 		volatile unsigned long *_p)	\
59 {						\
60 	unsigned long old;			\
61 	unsigned long *p = (unsigned long *)_p;	\
62 	__asm__ __volatile__ (			\
63 	prefix					\
64 "1:"	PPC_LLARX(%0,0,%3,0) "\n"		\
65 	stringify_in_c(op) "%0,%0,%2\n"		\
66 	PPC405_ERR77(0,%3)			\
67 	PPC_STLCX "%0,0,%3\n"			\
68 	"bne- 1b\n"				\
69 	: "=&r" (old), "+m" (*p)		\
70 	: "r" (mask), "r" (p)			\
71 	: "cc", "memory");			\
72 }
73 
74 DEFINE_BITOP(set_bits, or, "")
75 DEFINE_BITOP(clear_bits, andc, "")
76 DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER)
77 DEFINE_BITOP(change_bits, xor, "")
78 
79 static __inline__ void set_bit(int nr, volatile unsigned long *addr)
80 {
81 	set_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
82 }
83 
84 static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
85 {
86 	clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
87 }
88 
89 static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr)
90 {
91 	clear_bits_unlock(BIT_MASK(nr), addr + BIT_WORD(nr));
92 }
93 
94 static __inline__ void change_bit(int nr, volatile unsigned long *addr)
95 {
96 	change_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
97 }
98 
99 /* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output
100  * operands. */
101 #define DEFINE_TESTOP(fn, op, prefix, postfix, eh)	\
102 static __inline__ unsigned long fn(			\
103 		unsigned long mask,			\
104 		volatile unsigned long *_p)		\
105 {							\
106 	unsigned long old, t;				\
107 	unsigned long *p = (unsigned long *)_p;		\
108 	__asm__ __volatile__ (				\
109 	prefix						\
110 "1:"	PPC_LLARX(%0,0,%3,eh) "\n"			\
111 	stringify_in_c(op) "%1,%0,%2\n"			\
112 	PPC405_ERR77(0,%3)				\
113 	PPC_STLCX "%1,0,%3\n"				\
114 	"bne- 1b\n"					\
115 	postfix						\
116 	: "=&r" (old), "=&r" (t)			\
117 	: "r" (mask), "r" (p)				\
118 	: "cc", "memory");				\
119 	return (old & mask);				\
120 }
121 
122 DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER,
123 	      PPC_ATOMIC_EXIT_BARRIER, 0)
124 DEFINE_TESTOP(test_and_set_bits_lock, or, "",
125 	      PPC_ACQUIRE_BARRIER, 1)
126 DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER,
127 	      PPC_ATOMIC_EXIT_BARRIER, 0)
128 DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
129 	      PPC_ATOMIC_EXIT_BARRIER, 0)
130 
131 static __inline__ int test_and_set_bit(unsigned long nr,
132 				       volatile unsigned long *addr)
133 {
134 	return test_and_set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
135 }
136 
137 static __inline__ int test_and_set_bit_lock(unsigned long nr,
138 				       volatile unsigned long *addr)
139 {
140 	return test_and_set_bits_lock(BIT_MASK(nr),
141 				addr + BIT_WORD(nr)) != 0;
142 }
143 
144 static __inline__ int test_and_clear_bit(unsigned long nr,
145 					 volatile unsigned long *addr)
146 {
147 	return test_and_clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
148 }
149 
150 static __inline__ int test_and_change_bit(unsigned long nr,
151 					  volatile unsigned long *addr)
152 {
153 	return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
154 }
155 
156 #include <asm-generic/bitops/non-atomic.h>
157 
158 static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr)
159 {
160 	__asm__ __volatile__(PPC_RELEASE_BARRIER "" ::: "memory");
161 	__clear_bit(nr, addr);
162 }
163 
164 /*
165  * Return the zero-based bit position (LE, not IBM bit numbering) of
166  * the most significant 1-bit in a double word.
167  */
168 static __inline__ __attribute__((const))
169 int __ilog2(unsigned long x)
170 {
171 	int lz;
172 
173 	asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x));
174 	return BITS_PER_LONG - 1 - lz;
175 }
176 
177 static inline __attribute__((const))
178 int __ilog2_u32(u32 n)
179 {
180 	int bit;
181 	asm ("cntlzw %0,%1" : "=r" (bit) : "r" (n));
182 	return 31 - bit;
183 }
184 
185 #ifdef __powerpc64__
186 static inline __attribute__((const))
187 int __ilog2_u64(u64 n)
188 {
189 	int bit;
190 	asm ("cntlzd %0,%1" : "=r" (bit) : "r" (n));
191 	return 63 - bit;
192 }
193 #endif
194 
195 /*
196  * Determines the bit position of the least significant 0 bit in the
197  * specified double word. The returned bit position will be
198  * zero-based, starting from the right side (63/31 - 0).
199  */
200 static __inline__ unsigned long ffz(unsigned long x)
201 {
202 	/* no zero exists anywhere in the 8 byte area. */
203 	if ((x = ~x) == 0)
204 		return BITS_PER_LONG;
205 
206 	/*
207 	 * Calculate the bit position of the least significant '1' bit in x
208 	 * (since x has been changed this will actually be the least significant
209 	 * '0' bit in * the original x).  Note: (x & -x) gives us a mask that
210 	 * is the least significant * (RIGHT-most) 1-bit of the value in x.
211 	 */
212 	return __ilog2(x & -x);
213 }
214 
215 static __inline__ int __ffs(unsigned long x)
216 {
217 	return __ilog2(x & -x);
218 }
219 
220 /*
221  * ffs: find first bit set. This is defined the same way as
222  * the libc and compiler builtin ffs routines, therefore
223  * differs in spirit from the above ffz (man ffs).
224  */
225 static __inline__ int ffs(int x)
226 {
227 	unsigned long i = (unsigned long)x;
228 	return __ilog2(i & -i) + 1;
229 }
230 
231 /*
232  * fls: find last (most-significant) bit set.
233  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
234  */
235 static __inline__ int fls(unsigned int x)
236 {
237 	int lz;
238 
239 	asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
240 	return 32 - lz;
241 }
242 
243 static __inline__ unsigned long __fls(unsigned long x)
244 {
245 	return __ilog2(x);
246 }
247 
248 /*
249  * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
250  * instruction; for 32-bit we use the generic version, which does two
251  * 32-bit fls calls.
252  */
253 #ifdef __powerpc64__
254 static __inline__ int fls64(__u64 x)
255 {
256 	int lz;
257 
258 	asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x));
259 	return 64 - lz;
260 }
261 #else
262 #include <asm-generic/bitops/fls64.h>
263 #endif /* __powerpc64__ */
264 
265 #ifdef CONFIG_PPC64
266 unsigned int __arch_hweight8(unsigned int w);
267 unsigned int __arch_hweight16(unsigned int w);
268 unsigned int __arch_hweight32(unsigned int w);
269 unsigned long __arch_hweight64(__u64 w);
270 #include <asm-generic/bitops/const_hweight.h>
271 #else
272 #include <asm-generic/bitops/hweight.h>
273 #endif
274 
275 #include <asm-generic/bitops/find.h>
276 
277 /* Little-endian versions */
278 #include <asm-generic/bitops/le.h>
279 
280 /* Bitmap functions for the ext2 filesystem */
281 
282 #include <asm-generic/bitops/ext2-atomic-setbit.h>
283 
284 #include <asm-generic/bitops/sched.h>
285 
286 #endif /* __KERNEL__ */
287 
288 #endif /* _ASM_POWERPC_BITOPS_H */
289