Lines Matching +full:wakeup +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0-only
11 spin_lock_init(&wq_head->lock); in __init_waitqueue_head()
12 lockdep_set_class_and_name(&wq_head->lock, key, name); in __init_waitqueue_head()
13 INIT_LIST_HEAD(&wq_head->head); in __init_waitqueue_head()
22 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; in add_wait_queue()
23 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue()
25 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue()
33 wq_entry->flags |= WQ_FLAG_EXCLUSIVE; in add_wait_queue_exclusive()
34 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue_exclusive()
36 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue_exclusive()
44 wq_entry->flags |= WQ_FLAG_PRIORITY; in add_wait_queue_priority()
45 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue_priority()
47 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue_priority()
54 struct list_head *head = &wq_head->head; in add_wait_queue_priority_exclusive()
56 wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY; in add_wait_queue_priority_exclusive()
58 guard(spinlock_irqsave)(&wq_head->lock); in add_wait_queue_priority_exclusive()
61 (list_first_entry(head, typeof(*wq_entry), entry)->flags & WQ_FLAG_PRIORITY)) in add_wait_queue_priority_exclusive()
62 return -EBUSY; in add_wait_queue_priority_exclusive()
64 list_add(&wq_entry->entry, head); in add_wait_queue_priority_exclusive()
73 spin_lock_irqsave(&wq_head->lock, flags); in remove_wait_queue()
75 spin_unlock_irqrestore(&wq_head->lock, flags); in remove_wait_queue()
80 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
81 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
83 * the non-exclusive tasks. Normally, exclusive tasks will be at the end of
84 * the list and any non-exclusive tasks will be woken first. A priority task
92 static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode, in __wake_up_common() argument
97 lockdep_assert_held(&wq_head->lock); in __wake_up_common()
99 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); in __wake_up_common()
101 if (&curr->entry == &wq_head->head) in __wake_up_common()
104 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { in __wake_up_common()
105 unsigned flags = curr->flags; in __wake_up_common()
108 ret = curr->func(curr, mode, wake_flags, key); in __wake_up_common()
111 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) in __wake_up_common()
118 static int __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode, in __wake_up_common_lock() argument
124 spin_lock_irqsave(&wq_head->lock, flags); in __wake_up_common_lock()
125 remaining = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, in __wake_up_common_lock()
127 spin_unlock_irqrestore(&wq_head->lock, flags); in __wake_up_common_lock()
129 return nr_exclusive - remaining; in __wake_up_common_lock()
133 * __wake_up - wake up threads blocked on a waitqueue.
135 * @mode: which threads
136 * @nr_exclusive: how many wake-one or wake-many threads to wake up
137 * @key: is directly passed to the wakeup function
143 int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, in __wake_up() argument
146 return __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key); in __wake_up()
150 void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key) in __wake_up_on_current_cpu() argument
152 __wake_up_common_lock(wq_head, mode, 1, WF_CURRENT_CPU, key); in __wake_up_on_current_cpu()
158 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr) in __wake_up_locked() argument
160 __wake_up_common(wq_head, mode, nr, 0, NULL); in __wake_up_locked()
164 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key) in __wake_up_locked_key() argument
166 __wake_up_common(wq_head, mode, 1, 0, key); in __wake_up_locked_key()
171 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
173 * @mode: which threads
174 * @key: opaque value to be passed to wakeup targets
176 * The sync wakeup differs that the waker knows that it will schedule
178 * be migrated to another CPU - ie. the two threads are 'synchronized'
186 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, in __wake_up_sync_key() argument
192 __wake_up_common_lock(wq_head, mode, 1, WF_SYNC, key); in __wake_up_sync_key()
197 * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
199 * @mode: which threads
200 * @key: opaque value to be passed to wakeup targets
202 * The sync wakeup differs in that the waker knows that it will schedule
204 * be migrated to another CPU - ie. the two threads are 'synchronized'
213 unsigned int mode, void *key) in __wake_up_locked_sync_key() argument
215 __wake_up_common(wq_head, mode, 1, WF_SYNC, key); in __wake_up_locked_sync_key()
220 * __wake_up_sync - see __wake_up_sync_key()
222 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode) in __wake_up_sync() argument
224 __wake_up_sync_key(wq_head, mode, NULL); in __wake_up_sync()
236 * Note: we use "set_current_state()" _after_ the wait-queue add,
238 * wake-function that tests for the wait-queue being active
240 * tests in this thread will see the wakeup having taken place.
242 * The spin_unlock() itself is semi-permeable and only protects
244 * stops them from bleeding out - it would still allow subsequent
252 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; in prepare_to_wait()
253 spin_lock_irqsave(&wq_head->lock, flags); in prepare_to_wait()
254 if (list_empty(&wq_entry->entry)) in prepare_to_wait()
257 spin_unlock_irqrestore(&wq_head->lock, flags); in prepare_to_wait()
268 wq_entry->flags |= WQ_FLAG_EXCLUSIVE; in prepare_to_wait_exclusive()
269 spin_lock_irqsave(&wq_head->lock, flags); in prepare_to_wait_exclusive()
270 if (list_empty(&wq_entry->entry)) { in prepare_to_wait_exclusive()
271 was_empty = list_empty(&wq_head->head); in prepare_to_wait_exclusive()
275 spin_unlock_irqrestore(&wq_head->lock, flags); in prepare_to_wait_exclusive()
282 wq_entry->flags = flags; in init_wait_entry()
283 wq_entry->private = current; in init_wait_entry()
284 wq_entry->func = autoremove_wake_function; in init_wait_entry()
285 INIT_LIST_HEAD(&wq_entry->entry); in init_wait_entry()
294 spin_lock_irqsave(&wq_head->lock, flags); in prepare_to_wait_event()
297 * Exclusive waiter must not fail if it was selected by wakeup, in prepare_to_wait_event()
302 * wakeup locks/unlocks the same wq_head->lock. in prepare_to_wait_event()
304 * But we need to ensure that set-condition + wakeup after that in prepare_to_wait_event()
308 list_del_init(&wq_entry->entry); in prepare_to_wait_event()
309 ret = -ERESTARTSYS; in prepare_to_wait_event()
311 if (list_empty(&wq_entry->entry)) { in prepare_to_wait_event()
312 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE) in prepare_to_wait_event()
319 spin_unlock_irqrestore(&wq_head->lock, flags); in prepare_to_wait_event()
327 * wait-queue lock held (and interrupts off in the _irq
328 * case), so there is no race with testing the wakeup
334 if (likely(list_empty(&wait->entry))) in do_wait_intr()
339 return -ERESTARTSYS; in do_wait_intr()
341 spin_unlock(&wq->lock); in do_wait_intr()
343 spin_lock(&wq->lock); in do_wait_intr()
351 if (likely(list_empty(&wait->entry))) in do_wait_intr_irq()
356 return -ERESTARTSYS; in do_wait_intr_irq()
358 spin_unlock_irq(&wq->lock); in do_wait_intr_irq()
360 spin_lock_irq(&wq->lock); in do_wait_intr_irq()
367 * finish_wait - clean up after waiting in a queue
383 * - we use the "careful" check that verifies both in finish_wait()
385 * be any half-pending updates in progress on other in finish_wait()
389 * - all other users take the lock (ie we can only in finish_wait()
393 if (!list_empty_careful(&wq_entry->entry)) { in finish_wait()
394 spin_lock_irqsave(&wq_head->lock, flags); in finish_wait()
395 list_del_init(&wq_entry->entry); in finish_wait()
396 spin_unlock_irqrestore(&wq_head->lock, flags); in finish_wait()
401 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) in autoremove_wake_function() argument
403 int ret = default_wake_function(wq_entry, mode, sync, key); in autoremove_wake_function()
406 list_del_init_careful(&wq_entry->entry); in autoremove_wake_function()
422 * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
424 * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
425 * schedule() if (p->state & mode)
426 * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
427 * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
430 * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
432 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout) in wait_woken() argument
437 * either we see the store to wq_entry->flags in woken_wake_function() in wait_woken()
438 * or woken_wake_function() sees our store to current->state. in wait_woken()
440 set_current_state(mode); /* A */ in wait_woken()
441 if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !kthread_should_stop_or_park()) in wait_woken()
448 * being true or the store to wq_entry->flags in woken_wake_function() in wait_woken()
451 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */ in wait_woken()
457 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key) in woken_wake_function() argument
461 wq_entry->flags |= WQ_FLAG_WOKEN; in woken_wake_function()
463 return default_wake_function(wq_entry, mode, sync, key); in woken_wake_function()