xref: /linux/include/asm-generic/bitops/non-atomic.h (revision 5d8dfaa71d87f742c53309b95cb6a8b274119027)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
3 #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
4 
5 #include <asm/types.h>
6 
7 /**
8  * arch___set_bit - Set a bit in memory
9  * @nr: the bit to set
10  * @addr: the address to start counting from
11  *
12  * Unlike set_bit(), this function is non-atomic and may be reordered.
13  * If it's called on the same region of memory simultaneously, the effect
14  * may be that only one operation succeeds.
15  */
16 static __always_inline void
17 arch___set_bit(unsigned int nr, volatile unsigned long *addr)
18 {
19 	unsigned long mask = BIT_MASK(nr);
20 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
21 
22 	*p  |= mask;
23 }
24 #define __set_bit arch___set_bit
25 
26 static __always_inline void
27 arch___clear_bit(unsigned int nr, volatile unsigned long *addr)
28 {
29 	unsigned long mask = BIT_MASK(nr);
30 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
31 
32 	*p &= ~mask;
33 }
34 #define __clear_bit arch___clear_bit
35 
36 /**
37  * arch___change_bit - Toggle a bit in memory
38  * @nr: the bit to change
39  * @addr: the address to start counting from
40  *
41  * Unlike change_bit(), this function is non-atomic and may be reordered.
42  * If it's called on the same region of memory simultaneously, the effect
43  * may be that only one operation succeeds.
44  */
45 static __always_inline
46 void arch___change_bit(unsigned int nr, volatile unsigned long *addr)
47 {
48 	unsigned long mask = BIT_MASK(nr);
49 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
50 
51 	*p ^= mask;
52 }
53 #define __change_bit arch___change_bit
54 
55 /**
56  * arch___test_and_set_bit - Set a bit and return its old value
57  * @nr: Bit to set
58  * @addr: Address to count from
59  *
60  * This operation is non-atomic and can be reordered.
61  * If two examples of this operation race, one can appear to succeed
62  * but actually fail.  You must protect multiple accesses with a lock.
63  */
64 static __always_inline int
65 arch___test_and_set_bit(unsigned int nr, volatile unsigned long *addr)
66 {
67 	unsigned long mask = BIT_MASK(nr);
68 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
69 	unsigned long old = *p;
70 
71 	*p = old | mask;
72 	return (old & mask) != 0;
73 }
74 #define __test_and_set_bit arch___test_and_set_bit
75 
76 /**
77  * arch___test_and_clear_bit - Clear a bit and return its old value
78  * @nr: Bit to clear
79  * @addr: Address to count from
80  *
81  * This operation is non-atomic and can be reordered.
82  * If two examples of this operation race, one can appear to succeed
83  * but actually fail.  You must protect multiple accesses with a lock.
84  */
85 static __always_inline int
86 arch___test_and_clear_bit(unsigned int nr, volatile unsigned long *addr)
87 {
88 	unsigned long mask = BIT_MASK(nr);
89 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
90 	unsigned long old = *p;
91 
92 	*p = old & ~mask;
93 	return (old & mask) != 0;
94 }
95 #define __test_and_clear_bit arch___test_and_clear_bit
96 
97 /* WARNING: non atomic and it can be reordered! */
98 static __always_inline int
99 arch___test_and_change_bit(unsigned int nr, volatile unsigned long *addr)
100 {
101 	unsigned long mask = BIT_MASK(nr);
102 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
103 	unsigned long old = *p;
104 
105 	*p = old ^ mask;
106 	return (old & mask) != 0;
107 }
108 #define __test_and_change_bit arch___test_and_change_bit
109 
110 /**
111  * arch_test_bit - Determine whether a bit is set
112  * @nr: bit number to test
113  * @addr: Address to start counting from
114  */
115 static __always_inline int
116 arch_test_bit(unsigned int nr, const volatile unsigned long *addr)
117 {
118 	return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
119 }
120 #define test_bit arch_test_bit
121 
122 #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
123