xref: /linux/arch/x86/include/asm/bitops.h (revision 1f2367a39f17bd553a75e179a747f9b257bc9478)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_BITOPS_H
3 #define _ASM_X86_BITOPS_H
4 
5 /*
6  * Copyright 1992, Linus Torvalds.
7  *
8  * Note: inlines with more than a single statement should be marked
9  * __always_inline to avoid problems with older gcc's inlining heuristics.
10  */
11 
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
14 #endif
15 
16 #include <linux/compiler.h>
17 #include <asm/alternative.h>
18 #include <asm/rmwcc.h>
19 #include <asm/barrier.h>
20 
21 #if BITS_PER_LONG == 32
22 # define _BITOPS_LONG_SHIFT 5
23 #elif BITS_PER_LONG == 64
24 # define _BITOPS_LONG_SHIFT 6
25 #else
26 # error "Unexpected BITS_PER_LONG"
27 #endif
28 
29 #define BIT_64(n)			(U64_C(1) << (n))
30 
31 /*
32  * These have to be done with inline assembly: that way the bit-setting
33  * is guaranteed to be atomic. All bit operations return 0 if the bit
34  * was cleared before the operation and != 0 if it was not.
35  *
36  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
37  */
38 
39 #define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
40 
41 #define ADDR				BITOP_ADDR(addr)
42 
43 /*
44  * We do the locked ops that don't return the old value as
45  * a mask operation on a byte.
46  */
47 #define IS_IMMEDIATE(nr)		(__builtin_constant_p(nr))
48 #define CONST_MASK_ADDR(nr, addr)	BITOP_ADDR((void *)(addr) + ((nr)>>3))
49 #define CONST_MASK(nr)			(1 << ((nr) & 7))
50 
51 /**
52  * set_bit - Atomically set a bit in memory
53  * @nr: the bit to set
54  * @addr: the address to start counting from
55  *
56  * This function is atomic and may not be reordered.  See __set_bit()
57  * if you do not require the atomic guarantees.
58  *
59  * Note: there are no guarantees that this function will not be reordered
60  * on non x86 architectures, so if you are writing portable code,
61  * make sure not to rely on its reordering guarantees.
62  *
63  * Note that @nr may be almost arbitrarily large; this function is not
64  * restricted to acting on a single-word quantity.
65  */
66 static __always_inline void
67 set_bit(long nr, volatile unsigned long *addr)
68 {
69 	if (IS_IMMEDIATE(nr)) {
70 		asm volatile(LOCK_PREFIX "orb %1,%0"
71 			: CONST_MASK_ADDR(nr, addr)
72 			: "iq" ((u8)CONST_MASK(nr))
73 			: "memory");
74 	} else {
75 		asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
76 			: BITOP_ADDR(addr) : "Ir" (nr) : "memory");
77 	}
78 }
79 
80 /**
81  * __set_bit - Set a bit in memory
82  * @nr: the bit to set
83  * @addr: the address to start counting from
84  *
85  * Unlike set_bit(), this function is non-atomic and may be reordered.
86  * If it's called on the same region of memory simultaneously, the effect
87  * may be that only one operation succeeds.
88  */
89 static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
90 {
91 	asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
92 }
93 
94 /**
95  * clear_bit - Clears a bit in memory
96  * @nr: Bit to clear
97  * @addr: Address to start counting from
98  *
99  * clear_bit() is atomic and may not be reordered.  However, it does
100  * not contain a memory barrier, so if it is used for locking purposes,
101  * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
102  * in order to ensure changes are visible on other processors.
103  */
104 static __always_inline void
105 clear_bit(long nr, volatile unsigned long *addr)
106 {
107 	if (IS_IMMEDIATE(nr)) {
108 		asm volatile(LOCK_PREFIX "andb %1,%0"
109 			: CONST_MASK_ADDR(nr, addr)
110 			: "iq" ((u8)~CONST_MASK(nr)));
111 	} else {
112 		asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
113 			: BITOP_ADDR(addr)
114 			: "Ir" (nr));
115 	}
116 }
117 
118 /*
119  * clear_bit_unlock - Clears a bit in memory
120  * @nr: Bit to clear
121  * @addr: Address to start counting from
122  *
123  * clear_bit() is atomic and implies release semantics before the memory
124  * operation. It can be used for an unlock.
125  */
126 static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
127 {
128 	barrier();
129 	clear_bit(nr, addr);
130 }
131 
132 static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
133 {
134 	asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
135 }
136 
137 static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
138 {
139 	bool negative;
140 	asm volatile(LOCK_PREFIX "andb %2,%1"
141 		CC_SET(s)
142 		: CC_OUT(s) (negative), ADDR
143 		: "ir" ((char) ~(1 << nr)) : "memory");
144 	return negative;
145 }
146 
147 // Let everybody know we have it
148 #define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
149 
150 /*
151  * __clear_bit_unlock - Clears a bit in memory
152  * @nr: Bit to clear
153  * @addr: Address to start counting from
154  *
155  * __clear_bit() is non-atomic and implies release semantics before the memory
156  * operation. It can be used for an unlock if no other CPUs can concurrently
157  * modify other bits in the word.
158  *
159  * No memory barrier is required here, because x86 cannot reorder stores past
160  * older loads. Same principle as spin_unlock.
161  */
162 static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
163 {
164 	barrier();
165 	__clear_bit(nr, addr);
166 }
167 
168 /**
169  * __change_bit - Toggle a bit in memory
170  * @nr: the bit to change
171  * @addr: the address to start counting from
172  *
173  * Unlike change_bit(), this function is non-atomic and may be reordered.
174  * If it's called on the same region of memory simultaneously, the effect
175  * may be that only one operation succeeds.
176  */
177 static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
178 {
179 	asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
180 }
181 
182 /**
183  * change_bit - Toggle a bit in memory
184  * @nr: Bit to change
185  * @addr: Address to start counting from
186  *
187  * change_bit() is atomic and may not be reordered.
188  * Note that @nr may be almost arbitrarily large; this function is not
189  * restricted to acting on a single-word quantity.
190  */
191 static __always_inline void change_bit(long nr, volatile unsigned long *addr)
192 {
193 	if (IS_IMMEDIATE(nr)) {
194 		asm volatile(LOCK_PREFIX "xorb %1,%0"
195 			: CONST_MASK_ADDR(nr, addr)
196 			: "iq" ((u8)CONST_MASK(nr)));
197 	} else {
198 		asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
199 			: BITOP_ADDR(addr)
200 			: "Ir" (nr));
201 	}
202 }
203 
204 /**
205  * test_and_set_bit - Set a bit and return its old value
206  * @nr: Bit to set
207  * @addr: Address to count from
208  *
209  * This operation is atomic and cannot be reordered.
210  * It also implies a memory barrier.
211  */
212 static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
213 {
214 	return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
215 }
216 
217 /**
218  * test_and_set_bit_lock - Set a bit and return its old value for lock
219  * @nr: Bit to set
220  * @addr: Address to count from
221  *
222  * This is the same as test_and_set_bit on x86.
223  */
224 static __always_inline bool
225 test_and_set_bit_lock(long nr, volatile unsigned long *addr)
226 {
227 	return test_and_set_bit(nr, addr);
228 }
229 
230 /**
231  * __test_and_set_bit - Set a bit and return its old value
232  * @nr: Bit to set
233  * @addr: Address to count from
234  *
235  * This operation is non-atomic and can be reordered.
236  * If two examples of this operation race, one can appear to succeed
237  * but actually fail.  You must protect multiple accesses with a lock.
238  */
239 static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
240 {
241 	bool oldbit;
242 
243 	asm(__ASM_SIZE(bts) " %2,%1"
244 	    CC_SET(c)
245 	    : CC_OUT(c) (oldbit), ADDR
246 	    : "Ir" (nr));
247 	return oldbit;
248 }
249 
250 /**
251  * test_and_clear_bit - Clear a bit and return its old value
252  * @nr: Bit to clear
253  * @addr: Address to count from
254  *
255  * This operation is atomic and cannot be reordered.
256  * It also implies a memory barrier.
257  */
258 static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
259 {
260 	return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr);
261 }
262 
263 /**
264  * __test_and_clear_bit - Clear a bit and return its old value
265  * @nr: Bit to clear
266  * @addr: Address to count from
267  *
268  * This operation is non-atomic and can be reordered.
269  * If two examples of this operation race, one can appear to succeed
270  * but actually fail.  You must protect multiple accesses with a lock.
271  *
272  * Note: the operation is performed atomically with respect to
273  * the local CPU, but not other CPUs. Portable code should not
274  * rely on this behaviour.
275  * KVM relies on this behaviour on x86 for modifying memory that is also
276  * accessed from a hypervisor on the same CPU if running in a VM: don't change
277  * this without also updating arch/x86/kernel/kvm.c
278  */
279 static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
280 {
281 	bool oldbit;
282 
283 	asm volatile(__ASM_SIZE(btr) " %2,%1"
284 		     CC_SET(c)
285 		     : CC_OUT(c) (oldbit), ADDR
286 		     : "Ir" (nr));
287 	return oldbit;
288 }
289 
290 /* WARNING: non atomic and it can be reordered! */
291 static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
292 {
293 	bool oldbit;
294 
295 	asm volatile(__ASM_SIZE(btc) " %2,%1"
296 		     CC_SET(c)
297 		     : CC_OUT(c) (oldbit), ADDR
298 		     : "Ir" (nr) : "memory");
299 
300 	return oldbit;
301 }
302 
303 /**
304  * test_and_change_bit - Change a bit and return its old value
305  * @nr: Bit to change
306  * @addr: Address to count from
307  *
308  * This operation is atomic and cannot be reordered.
309  * It also implies a memory barrier.
310  */
311 static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
312 {
313 	return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
314 }
315 
316 static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
317 {
318 	return ((1UL << (nr & (BITS_PER_LONG-1))) &
319 		(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
320 }
321 
322 static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
323 {
324 	bool oldbit;
325 
326 	asm volatile(__ASM_SIZE(bt) " %2,%1"
327 		     CC_SET(c)
328 		     : CC_OUT(c) (oldbit)
329 		     : "m" (*(unsigned long *)addr), "Ir" (nr));
330 
331 	return oldbit;
332 }
333 
334 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
335 /**
336  * test_bit - Determine whether a bit is set
337  * @nr: bit number to test
338  * @addr: Address to start counting from
339  */
340 static bool test_bit(int nr, const volatile unsigned long *addr);
341 #endif
342 
343 #define test_bit(nr, addr)			\
344 	(__builtin_constant_p((nr))		\
345 	 ? constant_test_bit((nr), (addr))	\
346 	 : variable_test_bit((nr), (addr)))
347 
348 /**
349  * __ffs - find first set bit in word
350  * @word: The word to search
351  *
352  * Undefined if no bit exists, so code should check against 0 first.
353  */
354 static __always_inline unsigned long __ffs(unsigned long word)
355 {
356 	asm("rep; bsf %1,%0"
357 		: "=r" (word)
358 		: "rm" (word));
359 	return word;
360 }
361 
362 /**
363  * ffz - find first zero bit in word
364  * @word: The word to search
365  *
366  * Undefined if no zero exists, so code should check against ~0UL first.
367  */
368 static __always_inline unsigned long ffz(unsigned long word)
369 {
370 	asm("rep; bsf %1,%0"
371 		: "=r" (word)
372 		: "r" (~word));
373 	return word;
374 }
375 
376 /*
377  * __fls: find last set bit in word
378  * @word: The word to search
379  *
380  * Undefined if no set bit exists, so code should check against 0 first.
381  */
382 static __always_inline unsigned long __fls(unsigned long word)
383 {
384 	asm("bsr %1,%0"
385 	    : "=r" (word)
386 	    : "rm" (word));
387 	return word;
388 }
389 
390 #undef ADDR
391 
392 #ifdef __KERNEL__
393 /**
394  * ffs - find first set bit in word
395  * @x: the word to search
396  *
397  * This is defined the same way as the libc and compiler builtin ffs
398  * routines, therefore differs in spirit from the other bitops.
399  *
400  * ffs(value) returns 0 if value is 0 or the position of the first
401  * set bit if value is nonzero. The first (least significant) bit
402  * is at position 1.
403  */
404 static __always_inline int ffs(int x)
405 {
406 	int r;
407 
408 #ifdef CONFIG_X86_64
409 	/*
410 	 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
411 	 * dest reg is undefined if x==0, but their CPU architect says its
412 	 * value is written to set it to the same as before, except that the
413 	 * top 32 bits will be cleared.
414 	 *
415 	 * We cannot do this on 32 bits because at the very least some
416 	 * 486 CPUs did not behave this way.
417 	 */
418 	asm("bsfl %1,%0"
419 	    : "=r" (r)
420 	    : "rm" (x), "0" (-1));
421 #elif defined(CONFIG_X86_CMOV)
422 	asm("bsfl %1,%0\n\t"
423 	    "cmovzl %2,%0"
424 	    : "=&r" (r) : "rm" (x), "r" (-1));
425 #else
426 	asm("bsfl %1,%0\n\t"
427 	    "jnz 1f\n\t"
428 	    "movl $-1,%0\n"
429 	    "1:" : "=r" (r) : "rm" (x));
430 #endif
431 	return r + 1;
432 }
433 
434 /**
435  * fls - find last set bit in word
436  * @x: the word to search
437  *
438  * This is defined in a similar way as the libc and compiler builtin
439  * ffs, but returns the position of the most significant set bit.
440  *
441  * fls(value) returns 0 if value is 0 or the position of the last
442  * set bit if value is nonzero. The last (most significant) bit is
443  * at position 32.
444  */
445 static __always_inline int fls(unsigned int x)
446 {
447 	int r;
448 
449 #ifdef CONFIG_X86_64
450 	/*
451 	 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
452 	 * dest reg is undefined if x==0, but their CPU architect says its
453 	 * value is written to set it to the same as before, except that the
454 	 * top 32 bits will be cleared.
455 	 *
456 	 * We cannot do this on 32 bits because at the very least some
457 	 * 486 CPUs did not behave this way.
458 	 */
459 	asm("bsrl %1,%0"
460 	    : "=r" (r)
461 	    : "rm" (x), "0" (-1));
462 #elif defined(CONFIG_X86_CMOV)
463 	asm("bsrl %1,%0\n\t"
464 	    "cmovzl %2,%0"
465 	    : "=&r" (r) : "rm" (x), "rm" (-1));
466 #else
467 	asm("bsrl %1,%0\n\t"
468 	    "jnz 1f\n\t"
469 	    "movl $-1,%0\n"
470 	    "1:" : "=r" (r) : "rm" (x));
471 #endif
472 	return r + 1;
473 }
474 
475 /**
476  * fls64 - find last set bit in a 64-bit word
477  * @x: the word to search
478  *
479  * This is defined in a similar way as the libc and compiler builtin
480  * ffsll, but returns the position of the most significant set bit.
481  *
482  * fls64(value) returns 0 if value is 0 or the position of the last
483  * set bit if value is nonzero. The last (most significant) bit is
484  * at position 64.
485  */
486 #ifdef CONFIG_X86_64
487 static __always_inline int fls64(__u64 x)
488 {
489 	int bitpos = -1;
490 	/*
491 	 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
492 	 * dest reg is undefined if x==0, but their CPU architect says its
493 	 * value is written to set it to the same as before.
494 	 */
495 	asm("bsrq %1,%q0"
496 	    : "+r" (bitpos)
497 	    : "rm" (x));
498 	return bitpos + 1;
499 }
500 #else
501 #include <asm-generic/bitops/fls64.h>
502 #endif
503 
504 #include <asm-generic/bitops/find.h>
505 
506 #include <asm-generic/bitops/sched.h>
507 
508 #include <asm/arch_hweight.h>
509 
510 #include <asm-generic/bitops/const_hweight.h>
511 
512 #include <asm-generic/bitops/le.h>
513 
514 #include <asm-generic/bitops/ext2-atomic-setbit.h>
515 
516 #endif /* __KERNEL__ */
517 #endif /* _ASM_X86_BITOPS_H */
518