xref: /linux/kernel/sched/wait_bit.c (revision 79d2e1919a2728ef49d938eb20ebd5903c14dfb0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 /*
4  * The implementation of the wait_bit*() and related waiting APIs:
5  */
6 
7 #define WAIT_TABLE_BITS 8
8 #define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS)
9 
10 static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned;
11 
12 wait_queue_head_t *bit_waitqueue(unsigned long *word, int bit)
13 {
14 	const int shift = BITS_PER_LONG == 32 ? 5 : 6;
15 	unsigned long val = (unsigned long)word << shift | bit;
16 
17 	return bit_wait_table + hash_long(val, WAIT_TABLE_BITS);
18 }
19 EXPORT_SYMBOL(bit_waitqueue);
20 
21 int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *arg)
22 {
23 	struct wait_bit_key *key = arg;
24 	struct wait_bit_queue_entry *wait_bit = container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
25 
26 	if (wait_bit->key.flags != key->flags ||
27 			wait_bit->key.bit_nr != key->bit_nr ||
28 			test_bit(key->bit_nr, key->flags))
29 		return 0;
30 
31 	return autoremove_wake_function(wq_entry, mode, sync, key);
32 }
33 EXPORT_SYMBOL(wake_bit_function);
34 
35 /*
36  * To allow interruptible waiting and asynchronous (i.e. non-blocking)
37  * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
38  * permitted return codes. Nonzero return codes halt waiting and return.
39  */
40 int __sched
41 __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
42 	      wait_bit_action_f *action, unsigned mode)
43 {
44 	int ret = 0;
45 
46 	do {
47 		prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode);
48 		if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags))
49 			ret = (*action)(&wbq_entry->key, mode);
50 	} while (test_bit_acquire(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
51 
52 	finish_wait(wq_head, &wbq_entry->wq_entry);
53 
54 	return ret;
55 }
56 EXPORT_SYMBOL(__wait_on_bit);
57 
58 int __sched out_of_line_wait_on_bit(unsigned long *word, int bit,
59 				    wait_bit_action_f *action, unsigned mode)
60 {
61 	struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
62 	DEFINE_WAIT_BIT(wq_entry, word, bit);
63 
64 	return __wait_on_bit(wq_head, &wq_entry, action, mode);
65 }
66 EXPORT_SYMBOL(out_of_line_wait_on_bit);
67 
68 int __sched out_of_line_wait_on_bit_timeout(
69 	unsigned long *word, int bit, wait_bit_action_f *action,
70 	unsigned mode, unsigned long timeout)
71 {
72 	struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
73 	DEFINE_WAIT_BIT(wq_entry, word, bit);
74 
75 	wq_entry.key.timeout = jiffies + timeout;
76 
77 	return __wait_on_bit(wq_head, &wq_entry, action, mode);
78 }
79 EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout);
80 
81 int __sched
82 __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
83 			wait_bit_action_f *action, unsigned mode)
84 {
85 	int ret = 0;
86 
87 	for (;;) {
88 		prepare_to_wait_exclusive(wq_head, &wbq_entry->wq_entry, mode);
89 		if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) {
90 			ret = action(&wbq_entry->key, mode);
91 			/*
92 			 * See the comment in prepare_to_wait_event().
93 			 * finish_wait() does not necessarily takes wwq_head->lock,
94 			 * but test_and_set_bit() implies mb() which pairs with
95 			 * smp_mb__after_atomic() before wake_up_page().
96 			 */
97 			if (ret)
98 				finish_wait(wq_head, &wbq_entry->wq_entry);
99 		}
100 		if (!test_and_set_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) {
101 			if (!ret)
102 				finish_wait(wq_head, &wbq_entry->wq_entry);
103 			return 0;
104 		} else if (ret) {
105 			return ret;
106 		}
107 	}
108 }
109 EXPORT_SYMBOL(__wait_on_bit_lock);
110 
111 int __sched out_of_line_wait_on_bit_lock(unsigned long *word, int bit,
112 					 wait_bit_action_f *action, unsigned mode)
113 {
114 	struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
115 	DEFINE_WAIT_BIT(wq_entry, word, bit);
116 
117 	return __wait_on_bit_lock(wq_head, &wq_entry, action, mode);
118 }
119 EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
120 
121 void __wake_up_bit(struct wait_queue_head *wq_head, unsigned long *word, int bit)
122 {
123 	struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
124 
125 	if (waitqueue_active(wq_head))
126 		__wake_up(wq_head, TASK_NORMAL, 1, &key);
127 }
128 EXPORT_SYMBOL(__wake_up_bit);
129 
130 /**
131  * wake_up_bit - wake up waiters on a bit
132  * @word: the address containing the bit being waited on
133  * @bit: the bit at that address being waited on
134  *
135  * Wake up any process waiting in wait_on_bit() or similar for the
136  * given bit to be cleared.
137  *
138  * The wake-up is sent to tasks in a waitqueue selected by hash from a
139  * shared pool.  Only those tasks on that queue which have requested
140  * wake_up on this specific address and bit will be woken, and only if the
141  * bit is clear.
142  *
143  * In order for this to function properly there must be a full memory
144  * barrier after the bit is cleared and before this function is called.
145  * If the bit was cleared atomically, such as a by clear_bit() then
146  * smb_mb__after_atomic() can be used, othwewise smb_mb() is needed.
147  * If the bit was cleared with a fully-ordered operation, no further
148  * barrier is required.
149  *
150  * Normally the bit should be cleared by an operation with RELEASE
151  * semantics so that any changes to memory made before the bit is
152  * cleared are guaranteed to be visible after the matching wait_on_bit()
153  * completes.
154  */
155 void wake_up_bit(unsigned long *word, int bit)
156 {
157 	__wake_up_bit(bit_waitqueue(word, bit), word, bit);
158 }
159 EXPORT_SYMBOL(wake_up_bit);
160 
161 wait_queue_head_t *__var_waitqueue(void *p)
162 {
163 	return bit_wait_table + hash_ptr(p, WAIT_TABLE_BITS);
164 }
165 EXPORT_SYMBOL(__var_waitqueue);
166 
167 static int
168 var_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode,
169 		  int sync, void *arg)
170 {
171 	struct wait_bit_key *key = arg;
172 	struct wait_bit_queue_entry *wbq_entry =
173 		container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
174 
175 	if (wbq_entry->key.flags != key->flags ||
176 	    wbq_entry->key.bit_nr != key->bit_nr)
177 		return 0;
178 
179 	return autoremove_wake_function(wq_entry, mode, sync, key);
180 }
181 
182 void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int flags)
183 {
184 	*wbq_entry = (struct wait_bit_queue_entry){
185 		.key = {
186 			.flags	= (var),
187 			.bit_nr = -1,
188 		},
189 		.wq_entry = {
190 			.flags	 = flags,
191 			.private = current,
192 			.func	 = var_wake_function,
193 			.entry	 = LIST_HEAD_INIT(wbq_entry->wq_entry.entry),
194 		},
195 	};
196 }
197 EXPORT_SYMBOL(init_wait_var_entry);
198 
199 /**
200  * wake_up_var - wake up waiters on a variable (kernel address)
201  * @var: the address of the variable being waited on
202  *
203  * Wake up any process waiting in wait_var_event() or similar for the
204  * given variable to change.  wait_var_event() can be waiting for an
205  * arbitrary condition to be true and associates that condition with an
206  * address.  Calling wake_up_var() suggests that the condition has been
207  * made true, but does not strictly require the condtion to use the
208  * address given.
209  *
210  * The wake-up is sent to tasks in a waitqueue selected by hash from a
211  * shared pool.  Only those tasks on that queue which have requested
212  * wake_up on this specific address will be woken.
213  *
214  * In order for this to function properly there must be a full memory
215  * barrier after the variable is updated (or more accurately, after the
216  * condition waited on has been made to be true) and before this function
217  * is called.  If the variable was updated atomically, such as a by
218  * atomic_dec() then smb_mb__after_atomic() can be used.  If the
219  * variable was updated by a fully ordered operation such as
220  * atomic_dec_and_test() then no extra barrier is required.  Otherwise
221  * smb_mb() is needed.
222  *
223  * Normally the variable should be updated (the condition should be made
224  * to be true) by an operation with RELEASE semantics such as
225  * smp_store_release() so that any changes to memory made before the
226  * variable was updated are guaranteed to be visible after the matching
227  * wait_var_event() completes.
228  */
229 void wake_up_var(void *var)
230 {
231 	__wake_up_bit(__var_waitqueue(var), var, -1);
232 }
233 EXPORT_SYMBOL(wake_up_var);
234 
235 __sched int bit_wait(struct wait_bit_key *word, int mode)
236 {
237 	schedule();
238 	if (signal_pending_state(mode, current))
239 		return -EINTR;
240 
241 	return 0;
242 }
243 EXPORT_SYMBOL(bit_wait);
244 
245 __sched int bit_wait_io(struct wait_bit_key *word, int mode)
246 {
247 	io_schedule();
248 	if (signal_pending_state(mode, current))
249 		return -EINTR;
250 
251 	return 0;
252 }
253 EXPORT_SYMBOL(bit_wait_io);
254 
255 __sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
256 {
257 	unsigned long now = READ_ONCE(jiffies);
258 
259 	if (time_after_eq(now, word->timeout))
260 		return -EAGAIN;
261 	schedule_timeout(word->timeout - now);
262 	if (signal_pending_state(mode, current))
263 		return -EINTR;
264 
265 	return 0;
266 }
267 EXPORT_SYMBOL_GPL(bit_wait_timeout);
268 
269 void __init wait_bit_init(void)
270 {
271 	int i;
272 
273 	for (i = 0; i < WAIT_TABLE_SIZE; i++)
274 		init_waitqueue_head(bit_wait_table + i);
275 }
276