Lines Matching refs:waiter

36 static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter,
56 struct rt_mutex_waiter *waiter,
76 * NULL 1 lock is free and has waiters and the top waiter
186 * With the check for the waiter bit in place T3 on CPU2 will not
274 * If a new waiter comes in between the unlock and the cmpxchg
292 * wake waiter();
360 * Update the waiter->tree copy of the sort keys.
363 waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
365 lockdep_assert_held(&waiter->lock->wait_lock);
366 lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry));
368 waiter->tree.prio = __waiter_prio(task);
369 waiter->tree.deadline = task->dl.deadline;
373 * Update the waiter->pi_tree copy of the sort keys (from the tree copy).
376 waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
378 lockdep_assert_held(&waiter->lock->wait_lock);
380 lockdep_assert(RB_EMPTY_NODE(&waiter->pi_tree.entry));
382 waiter->pi_tree.prio = waiter->tree.prio;
383 waiter->pi_tree.deadline = waiter->tree.deadline;
403 * If left waiter has a dl_prio(), and we didn't return 1 above,
404 * then right waiter has a dl_prio() too.
421 * If left waiter has a dl_prio(), and we didn't return 0 above,
422 * then right waiter has a dl_prio() too.
430 static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
433 if (rt_waiter_node_less(&waiter->tree, &top_waiter->tree))
441 if (rt_or_dl_prio(waiter->tree.prio))
444 return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree);
467 /* NOTE: relies on waiter->ww_ctx being set before insertion */
480 rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
484 rb_add_cached(&waiter->tree.entry, &lock->waiters, __waiter_less);
488 rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
492 if (RB_EMPTY_NODE(&waiter->tree.entry))
495 rb_erase_cached(&waiter->tree.entry, &lock->waiters);
496 RB_CLEAR_NODE(&waiter->tree.entry);
508 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
512 rb_add_cached(&waiter->pi_tree.entry, &task->pi_waiters, __pi_waiter_less);
516 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
520 if (RB_EMPTY_NODE(&waiter->pi_tree.entry))
523 rb_erase_cached(&waiter->pi_tree.entry, &task->pi_waiters);
524 RB_CLEAR_NODE(&waiter->pi_tree.entry);
587 * If the waiter argument is NULL this indicates the deboost path and
592 rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
596 return waiter != NULL;
620 * depicted above or if the top waiter is gone away and we are
622 * @top_task: the current top waiter
652 * [2] waiter = task->pi_blocked_on; [P1]
654 * [4] lock = waiter->lock; [P1]
660 * [7] requeue_lock_waiter(lock, waiter); [P1] + [L]
685 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
735 * [2] Get the waiter on which @task is blocked on.
737 waiter = task->pi_blocked_on;
748 if (!waiter)
767 if (next_lock != waiter->lock)
792 if (IS_ENABLED(CONFIG_PREEMPT_RT) && waiter->ww_ctx && detect_deadlock)
805 * are not the top pi waiter of the task. If deadlock
818 * If the waiter priority is the same as the task priority
824 if (rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) {
835 lock = waiter->lock;
913 * Get the top waiter for the next iteration
928 * Store the current top waiter before doing the requeue
934 /* [7] Requeue the waiter in the lock waiter tree. */
935 rt_mutex_dequeue(lock, waiter);
938 * Update the waiter prio fields now that we're dequeued.
948 waiter_update_prio(waiter, task);
950 rt_mutex_enqueue(lock, waiter);
971 * If the requeue [7] above changed the top waiter,
972 * then we need to wake the new top waiter up to try
992 if (waiter == rt_mutex_top_waiter(lock)) {
994 * The waiter became the new top (highest priority)
995 * waiter on the lock. Replace the previous top waiter
996 * in the owner tasks pi waiters tree with this waiter
1000 waiter_clone_prio(waiter, task);
1001 rt_mutex_enqueue_pi(task, waiter);
1004 } else if (prerequeue_top_waiter == waiter) {
1006 * The waiter was the top waiter on the lock, but is
1007 * no longer the top priority waiter. Replace waiter in
1009 * (highest priority) waiter and adjust the priority
1011 * The new top waiter is stored in @waiter so that
1012 * @waiter == @top_waiter evaluates to true below and
1015 rt_mutex_dequeue_pi(task, waiter);
1016 waiter = rt_mutex_top_waiter(lock);
1017 waiter_clone_prio(waiter, task);
1018 rt_mutex_enqueue_pi(task, waiter);
1039 * Store the top waiter of @lock for the end of chain walk
1059 * If the current waiter is not the top waiter on the lock,
1063 if (!detect_deadlock && waiter != top_waiter)
1083 * @waiter: The waiter that is queued to the lock's wait tree if the
1088 struct rt_mutex_waiter *waiter)
1118 * If @waiter != NULL, @task has already enqueued the waiter
1119 * into @lock waiter tree. If @waiter == NULL then this is a
1122 if (waiter) {
1126 * If waiter is the highest priority waiter of @lock,
1129 if (waiter == top_waiter || rt_mutex_steal(waiter, top_waiter)) {
1131 * We can acquire the lock. Remove the waiter from the
1134 rt_mutex_dequeue(lock, waiter);
1154 * The current top waiter stays enqueued. We
1171 * @task->pi_lock. Redundant operation for the @waiter == NULL
1180 * waiter into @task->pi_waiters tree.
1199 * Prepare waiter and propagate pi chain
1204 struct rt_mutex_waiter *waiter,
1211 struct rt_mutex_waiter *top_waiter = waiter;
1220 * only an optimization. We drop the locks, so another waiter
1223 * which is wrong, as the other waiter is not in a deadlock
1233 waiter->task = task;
1234 waiter->lock = lock;
1235 waiter_update_prio(waiter, task);
1236 waiter_clone_prio(waiter, task);
1238 /* Get the top priority waiter on the lock */
1241 rt_mutex_enqueue(lock, waiter);
1243 task->pi_blocked_on = waiter;
1250 /* Check whether the waiter should back out immediately */
1252 res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx, wake_q);
1255 rt_mutex_dequeue(lock, waiter);
1266 if (waiter == rt_mutex_top_waiter(lock)) {
1268 rt_mutex_enqueue_pi(owner, waiter);
1273 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
1299 next_lock, waiter, task);
1307 * Remove the top waiter from the current tasks pi waiter tree and
1315 struct rt_mutex_waiter *waiter;
1321 waiter = rt_mutex_top_waiter(lock);
1330 rt_mutex_dequeue_pi(current, waiter);
1334 * As we are waking up the top waiter, and the waiter stays
1339 * the top waiter can steal this lock.
1344 * We deboosted before waking the top waiter task such that we don't
1354 rt_mutex_wake_q_add(wqh, waiter);
1461 * The wakeup next waiter path does not suffer from the above
1464 * Queue the next waiter for wakeup once we release the wait_lock.
1482 struct rt_mutex_waiter *waiter,
1502 * - current is not longer the top waiter
1508 !rt_mutex_waiter_is_top_waiter(lock, waiter)) {
1519 struct rt_mutex_waiter *waiter,
1534 * Remove a waiter from a lock and give up
1540 struct rt_mutex_waiter *waiter)
1542 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
1549 rt_mutex_dequeue(lock, waiter);
1554 * Only update priority if the waiter was the highest priority
1555 * waiter of the lock and there is an owner to update.
1562 rt_mutex_dequeue_pi(owner, waiter);
1599 * @waiter: the pre-initialized rt_mutex_waiter
1608 struct rt_mutex_waiter *waiter,
1619 if (try_to_take_rt_mutex(lock, current, waiter)) {
1634 ret = __ww_mutex_check_kill(rtm, waiter, ww_ctx);
1639 if (waiter == rt_mutex_top_waiter(lock))
1645 if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) {
1688 * @waiter: Initializer waiter for blocking
1695 struct rt_mutex_waiter *waiter,
1719 ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk, wake_q);
1721 ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter, wake_q);
1733 remove_waiter(lock, waiter);
1734 rt_mutex_handle_deadlock(ret, chwalk, lock, waiter);
1739 * try_to_take_rt_mutex() sets the waiter bit
1754 struct rt_mutex_waiter waiter;
1757 rt_mutex_init_waiter(&waiter);
1758 waiter.ww_ctx = ww_ctx;
1761 &waiter, wake_q);
1763 debug_rt_mutex_free_waiter(&waiter);
1783 * Do all pre-schedule work here, before we queue a waiter and invoke
1786 * rtlock_slowlock() and will then enqueue a second waiter for this
1833 struct rt_mutex_waiter waiter;
1844 rt_mutex_init_rtlock_waiter(&waiter);
1851 task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK, wake_q);
1855 if (try_to_take_rt_mutex(lock, current, &waiter)) {
1860 if (&waiter == rt_mutex_top_waiter(lock))
1866 if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) {
1879 * try_to_take_rt_mutex() sets the waiter bit unconditionally.
1883 debug_rt_mutex_free_waiter(&waiter);