Lines Matching +full:foo +full:- +full:queue
1 // SPDX-License-Identifier: GPL-2.0-only
3 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
7 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
17 * See Documentation/locking/rt-mutex-design.rst for details.
69 * lock->owner state tracking:
71 * lock->owner holds the task_struct pointer of the owner. Bit 0
82 * possible when bit 0 of lock->owner is 0.
85 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
97 __must_hold(&lock->wait_lock) in rt_mutex_owner_encode()
109 __must_hold(&lock->wait_lock) in rt_mutex_set_owner()
112 * lock->wait_lock is held but explicit acquire semantics are needed in rt_mutex_set_owner()
115 xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner)); in rt_mutex_set_owner()
119 __must_hold(&lock->wait_lock) in rt_mutex_clear_owner()
121 /* lock->wait_lock is held so the unlock provides release semantics. */ in rt_mutex_clear_owner()
122 WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL)); in rt_mutex_clear_owner()
126 __must_hold(&lock->wait_lock) in clear_rt_mutex_waiters()
128 lock->owner = (struct task_struct *) in clear_rt_mutex_waiters()
129 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); in clear_rt_mutex_waiters()
134 __must_hold(&lock->wait_lock) in fixup_rt_mutex_waiters()
136 unsigned long owner, *p = (unsigned long *) &lock->owner; in fixup_rt_mutex_waiters()
143 * lock->owner still has the waiters bit set, otherwise the in fixup_rt_mutex_waiters()
147 * l->owner=T1 in fixup_rt_mutex_waiters()
149 * lock(l->lock) in fixup_rt_mutex_waiters()
150 * l->owner = T1 | HAS_WAITERS; in fixup_rt_mutex_waiters()
153 * unlock(l->lock) in fixup_rt_mutex_waiters()
157 * lock(l->lock) in fixup_rt_mutex_waiters()
158 * l->owner = T1 | HAS_WAITERS; in fixup_rt_mutex_waiters()
161 * unlock(l->lock) in fixup_rt_mutex_waiters()
163 * signal(->T2) signal(->T3) in fixup_rt_mutex_waiters()
164 * lock(l->lock) in fixup_rt_mutex_waiters()
167 * unlock(l->lock) in fixup_rt_mutex_waiters()
168 * lock(l->lock) in fixup_rt_mutex_waiters()
172 * unlock(l->lock) in fixup_rt_mutex_waiters()
173 * lock(l->lock) in fixup_rt_mutex_waiters()
176 * l->owner = owner in fixup_rt_mutex_waiters()
177 * owner = l->owner & ~HAS_WAITERS; in fixup_rt_mutex_waiters()
178 * ==> l->owner = T1 in fixup_rt_mutex_waiters()
180 * lock(l->lock) in fixup_rt_mutex_waiters()
183 * owner = l->owner & ~HAS_WAITERS; in fixup_rt_mutex_waiters()
184 * cmpxchg(l->owner, T1, NULL) in fixup_rt_mutex_waiters()
185 * ===> Success (l->owner = NULL) in fixup_rt_mutex_waiters()
187 * l->owner = owner in fixup_rt_mutex_waiters()
188 * ==> l->owner = T1 in fixup_rt_mutex_waiters()
193 * serialized by l->lock, so nothing else can modify the waiters in fixup_rt_mutex_waiters()
194 * bit. If the bit is set then nothing can change l->owner either in fixup_rt_mutex_waiters()
226 return try_cmpxchg_acquire(&lock->owner, &old, new); in rt_mutex_cmpxchg_acquire()
238 return try_cmpxchg_release(&lock->owner, &old, new); in rt_mutex_cmpxchg_release()
242 * Callers must hold the ->wait_lock -- which is the whole purpose as we force
248 unsigned long *p = (unsigned long *) &lock->owner; in mark_rt_mutex_waiters()
257 * The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE in mark_rt_mutex_waiters()
267 * 2) Drop lock->wait_lock
272 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
277 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in unlock_rt_mutex_safe()
323 * acquire a non-contended rtmutex. in rt_mutex_try_acquire()
336 __must_hold(&lock->wait_lock) in mark_rt_mutex_waiters()
338 lock->owner = (struct task_struct *) in mark_rt_mutex_waiters()
339 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); in mark_rt_mutex_waiters()
343 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
347 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
349 lock->owner = NULL; in unlock_rt_mutex_safe()
350 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in unlock_rt_mutex_safe()
357 int prio = task->prio; in __waiter_prio()
366 * Update the waiter->tree copy of the sort keys.
371 lockdep_assert_held(&waiter->lock->wait_lock); in waiter_update_prio()
372 lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry)); in waiter_update_prio()
374 waiter->tree.prio = __waiter_prio(task); in waiter_update_prio()
375 waiter->tree.deadline = task->dl.deadline; in waiter_update_prio()
379 * Update the waiter->pi_tree copy of the sort keys (from the tree copy).
384 lockdep_assert_held(&waiter->lock->wait_lock); in waiter_clone_prio()
385 lockdep_assert_held(&task->pi_lock); in waiter_clone_prio()
386 lockdep_assert(RB_EMPTY_NODE(&waiter->pi_tree.entry)); in waiter_clone_prio()
388 waiter->pi_tree.prio = waiter->tree.prio; in waiter_clone_prio()
389 waiter->pi_tree.deadline = waiter->tree.deadline; in waiter_clone_prio()
396 &(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
403 if (left->prio < right->prio) in rt_waiter_node_less()
412 if (dl_prio(left->prio)) in rt_waiter_node_less()
413 return dl_time_before(left->deadline, right->deadline); in rt_waiter_node_less()
421 if (left->prio != right->prio) in rt_waiter_node_equal()
430 if (dl_prio(left->prio)) in rt_waiter_node_equal()
431 return left->deadline == right->deadline; in rt_waiter_node_equal()
439 if (rt_waiter_node_less(&waiter->tree, &top_waiter->tree)) in rt_mutex_steal()
447 if (rt_or_dl_prio(waiter->tree.prio)) in rt_mutex_steal()
450 return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree); in rt_mutex_steal()
464 if (rt_waiter_node_less(&aw->tree, &bw->tree)) in __waiter_less()
470 if (rt_waiter_node_less(&bw->tree, &aw->tree)) in __waiter_less()
473 /* NOTE: relies on waiter->ww_ctx being set before insertion */ in __waiter_less()
474 if (aw->ww_ctx) { in __waiter_less()
475 if (!bw->ww_ctx) in __waiter_less()
478 return (signed long)(aw->ww_ctx->stamp - in __waiter_less()
479 bw->ww_ctx->stamp) < 0; in __waiter_less()
488 lockdep_assert_held(&lock->wait_lock); in rt_mutex_enqueue()
490 rb_add_cached(&waiter->tree.entry, &lock->waiters, __waiter_less); in rt_mutex_enqueue()
496 lockdep_assert_held(&lock->wait_lock); in rt_mutex_dequeue()
498 if (RB_EMPTY_NODE(&waiter->tree.entry)) in rt_mutex_dequeue()
501 rb_erase_cached(&waiter->tree.entry, &lock->waiters); in rt_mutex_dequeue()
502 RB_CLEAR_NODE(&waiter->tree.entry); in rt_mutex_dequeue()
516 lockdep_assert_held(&task->pi_lock); in rt_mutex_enqueue_pi()
518 rb_add_cached(&waiter->pi_tree.entry, &task->pi_waiters, __pi_waiter_less); in rt_mutex_enqueue_pi()
524 lockdep_assert_held(&task->pi_lock); in rt_mutex_dequeue_pi()
526 if (RB_EMPTY_NODE(&waiter->pi_tree.entry)) in rt_mutex_dequeue_pi()
529 rb_erase_cached(&waiter->pi_tree.entry, &task->pi_waiters); in rt_mutex_dequeue_pi()
530 RB_CLEAR_NODE(&waiter->pi_tree.entry); in rt_mutex_dequeue_pi()
538 lockdep_assert_held(&lock->wait_lock); in rt_mutex_adjust_prio()
540 lockdep_assert_held(&p->pi_lock); in rt_mutex_adjust_prio()
543 pi_task = task_top_pi_waiter(p)->task; in rt_mutex_adjust_prio()
555 WARN_ON_ONCE(wqh->rtlock_task); in rt_mutex_wake_q_add_task()
557 wqh->rtlock_task = task; in rt_mutex_wake_q_add_task()
559 wake_q_add(&wqh->head, task); in rt_mutex_wake_q_add_task()
566 rt_mutex_wake_q_add_task(wqh, w->task, w->wake_state); in rt_mutex_wake_q_add()
571 if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) { in rt_mutex_wake_up_q()
572 wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT); in rt_mutex_wake_up_q()
573 put_task_struct(wqh->rtlock_task); in rt_mutex_wake_up_q()
574 wqh->rtlock_task = NULL; in rt_mutex_wake_up_q()
577 if (!wake_q_empty(&wqh->head)) in rt_mutex_wake_up_q()
578 wake_up_q(&wqh->head); in rt_mutex_wake_up_q()
608 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; in task_blocked_on_lock()
613 * Decreases task's usage by one - may thus free the task.
630 * Returns 0 or -EDEADLK.
635 * [Pn] task->pi_lock held
636 * [L] rtmutex->wait_lock held
640 * rtmutex->wait_lock
641 * task->pi_lock
657 * [1] lock(task->pi_lock); [R] acquire [P1]
658 * [2] waiter = task->pi_blocked_on; [P1]
660 * [4] lock = waiter->lock; [P1]
661 * [5] if (!try_lock(lock->wait_lock)) { [P1] try to acquire [L]
662 * unlock(task->pi_lock); release [P1]
667 * [8] unlock(task->pi_lock); release [P1]
672 * lock(task->pi_lock); [L] acquire [P2]
675 * [13] unlock(task->pi_lock); release [P2]
676 * unlock(lock->wait_lock); release [L]
721 top_task->comm, task_pid_nr(top_task)); in rt_mutex_adjust_prio_chain()
725 return -EDEADLK; in rt_mutex_adjust_prio_chain()
738 raw_spin_lock_irq(&task->pi_lock); in rt_mutex_adjust_prio_chain()
743 waiter = task->pi_blocked_on; in rt_mutex_adjust_prio_chain()
746 * [3] check_exit_conditions_1() protected by task->pi_lock. in rt_mutex_adjust_prio_chain()
773 if (next_lock != waiter->lock) in rt_mutex_adjust_prio_chain()
784 * P3 should not return -EDEADLK because it gets trapped in the cycle in rt_mutex_adjust_prio_chain()
785 * created by P1 and P2 (which will resolve -- and runs into in rt_mutex_adjust_prio_chain()
798 if (IS_ENABLED(CONFIG_PREEMPT_RT) && waiter->ww_ctx && detect_deadlock) in rt_mutex_adjust_prio_chain()
830 if (rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) { in rt_mutex_adjust_prio_chain()
838 * [4] Get the next lock; per holding task->pi_lock we can't unblock in rt_mutex_adjust_prio_chain()
841 lock = waiter->lock; in rt_mutex_adjust_prio_chain()
843 * [5] We need to trylock here as we are holding task->pi_lock, in rt_mutex_adjust_prio_chain()
847 * Per the above, holding task->pi_lock guarantees lock exists, so in rt_mutex_adjust_prio_chain()
848 * inverting this lock order is infeasible from a life-time in rt_mutex_adjust_prio_chain()
851 if (!raw_spin_trylock(&lock->wait_lock)) { in rt_mutex_adjust_prio_chain()
852 raw_spin_unlock_irq(&task->pi_lock); in rt_mutex_adjust_prio_chain()
858 * [6] check_exit_conditions_2() protected by task->pi_lock and in rt_mutex_adjust_prio_chain()
859 * lock->wait_lock. in rt_mutex_adjust_prio_chain()
867 ret = -EDEADLK; in rt_mutex_adjust_prio_chain()
872 * logic pick which of the contending threads gets -EDEADLK. in rt_mutex_adjust_prio_chain()
878 if (IS_ENABLED(CONFIG_PREEMPT_RT) && orig_waiter && orig_waiter->ww_ctx) in rt_mutex_adjust_prio_chain()
881 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
895 raw_spin_unlock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
899 * [9] check_exit_conditions_3 protected by lock->wait_lock. in rt_mutex_adjust_prio_chain()
903 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
909 raw_spin_lock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
924 raw_spin_unlock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
925 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
962 * Since we hold lock->waiter_lock, task cannot unblock, even if we in rt_mutex_adjust_prio_chain()
963 * release task->pi_lock. in rt_mutex_adjust_prio_chain()
965 raw_spin_unlock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
969 * [9] check_exit_conditions_3 protected by lock->wait_lock. in rt_mutex_adjust_prio_chain()
983 wake_up_state(top_waiter->task, top_waiter->wake_state); in rt_mutex_adjust_prio_chain()
984 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
991 * Per holding lock->wait_lock and checking for !owner above, there in rt_mutex_adjust_prio_chain()
995 raw_spin_lock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
1034 * [12] check_exit_conditions_4() protected by task->pi_lock in rt_mutex_adjust_prio_chain()
1035 * and lock->wait_lock. The actual decisions are made after we in rt_mutex_adjust_prio_chain()
1041 * task->pi_lock next_lock cannot be dereferenced anymore. in rt_mutex_adjust_prio_chain()
1051 raw_spin_unlock(&task->pi_lock); in rt_mutex_adjust_prio_chain()
1052 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
1075 raw_spin_unlock_irq(&task->pi_lock); in rt_mutex_adjust_prio_chain()
1083 * Try to take an rt-mutex
1085 * Must be called with lock->wait_lock held and interrupts disabled
1096 lockdep_assert_held(&lock->wait_lock); in try_to_take_rt_mutex()
1100 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all in try_to_take_rt_mutex()
1102 * and they serialize on @lock->wait_lock. in try_to_take_rt_mutex()
1107 * - There is a lock owner. The caller must fixup the in try_to_take_rt_mutex()
1111 * - @task acquires the lock and there are no other in try_to_take_rt_mutex()
1150 * the lock. @task->pi_blocked_on is NULL, so it does in try_to_take_rt_mutex()
1167 * pi_lock dance.@task->pi_blocked_on is NULL in try_to_take_rt_mutex()
1176 * Clear @task->pi_blocked_on. Requires protection by in try_to_take_rt_mutex()
1177 * @task->pi_lock. Redundant operation for the @waiter == NULL in try_to_take_rt_mutex()
1181 raw_spin_lock(&task->pi_lock); in try_to_take_rt_mutex()
1182 task->pi_blocked_on = NULL; in try_to_take_rt_mutex()
1186 * waiter into @task->pi_waiters tree. in try_to_take_rt_mutex()
1190 raw_spin_unlock(&task->pi_lock); in try_to_take_rt_mutex()
1207 * This must be called with lock->wait_lock held and interrupts disabled
1215 __must_hold(&lock->wait_lock) in task_blocks_on_rt_mutex()
1222 lockdep_assert_held(&lock->wait_lock); in task_blocks_on_rt_mutex()
1229 * the other will detect the deadlock and return -EDEADLOCK, in task_blocks_on_rt_mutex()
1237 return -EDEADLK; in task_blocks_on_rt_mutex()
1239 raw_spin_lock(&task->pi_lock); in task_blocks_on_rt_mutex()
1240 waiter->task = task; in task_blocks_on_rt_mutex()
1241 waiter->lock = lock; in task_blocks_on_rt_mutex()
1250 task->pi_blocked_on = waiter; in task_blocks_on_rt_mutex()
1252 raw_spin_unlock(&task->pi_lock); in task_blocks_on_rt_mutex()
1259 __assume_ctx_lock(&rtm->rtmutex.wait_lock); in task_blocks_on_rt_mutex()
1262 raw_spin_lock(&task->pi_lock); in task_blocks_on_rt_mutex()
1264 task->pi_blocked_on = NULL; in task_blocks_on_rt_mutex()
1265 raw_spin_unlock(&task->pi_lock); in task_blocks_on_rt_mutex()
1273 raw_spin_lock(&owner->pi_lock); in task_blocks_on_rt_mutex()
1279 if (owner->pi_blocked_on) in task_blocks_on_rt_mutex()
1288 raw_spin_unlock(&owner->pi_lock); in task_blocks_on_rt_mutex()
1304 raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q); in task_blocks_on_rt_mutex()
1309 raw_spin_lock_irq(&lock->wait_lock); in task_blocks_on_rt_mutex()
1316 * queue it up.
1318 * Called with lock->wait_lock held and interrupts disabled.
1325 lockdep_assert_held(&lock->wait_lock); in mark_wakeup_next_waiter()
1327 raw_spin_lock(¤t->pi_lock); in mark_wakeup_next_waiter()
1332 * Remove it from current->pi_waiters and deboost. in mark_wakeup_next_waiter()
1335 * rt_mutex_setprio() to update p->pi_top_task before the in mark_wakeup_next_waiter()
1349 lock->owner = (void *) RT_MUTEX_HAS_WAITERS; in mark_wakeup_next_waiter()
1354 * p->pi_top_task pointer points to a blocked task). This however can in mark_wakeup_next_waiter()
1363 raw_spin_unlock(¤t->pi_lock); in mark_wakeup_next_waiter()
1367 __must_hold(&lock->wait_lock) in __rt_mutex_slowtrylock()
1381 * Slow path try-lock function:
1390 * This can be done without taking the @lock->wait_lock as in rt_mutex_slowtrylock()
1400 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowtrylock()
1404 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowtrylock()
1418 * Slow path to release a rt-mutex.
1426 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1435 * foo->lock->owner = NULL; in rt_mutex_slowunlock()
1436 * rtmutex_lock(foo->lock); <- fast path in rt_mutex_slowunlock()
1437 * free = atomic_dec_and_test(foo->refcnt); in rt_mutex_slowunlock()
1438 * rtmutex_unlock(foo->lock); <- fast path in rt_mutex_slowunlock()
1440 * kfree(foo); in rt_mutex_slowunlock()
1441 * raw_spin_unlock(foo->lock->wait_lock); in rt_mutex_slowunlock()
1446 * lock->wait_lock. So we do the following sequence: in rt_mutex_slowunlock()
1450 * raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowunlock()
1451 * if (cmpxchg(&lock->owner, owner, 0) == owner) in rt_mutex_slowunlock()
1456 * lock->owner is serialized by lock->wait_lock: in rt_mutex_slowunlock()
1458 * lock->owner = NULL; in rt_mutex_slowunlock()
1459 * raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowunlock()
1462 /* Drops lock->wait_lock ! */ in rt_mutex_slowunlock()
1466 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1473 * Queue the next waiter for wakeup once we release the wait_lock. in rt_mutex_slowunlock()
1476 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_slowunlock()
1510 * - the lock owner has been scheduled out in rtmutex_spin_on_owner()
1511 * - current is not longer the top waiter in rtmutex_spin_on_owner()
1512 * - current is requested to reschedule (redundant in rtmutex_spin_on_owner()
1514 * - the VCPU on which owner runs is preempted in rtmutex_spin_on_owner()
1538 * - rtmutex, futex on all kernels
1539 * - mutex and rwsem substitutions on RT kernels
1545 * Must be called with lock->wait_lock held and interrupts disabled. It must
1552 __must_hold(&lock->wait_lock) in remove_waiter()
1556 struct task_struct *waiter_task = waiter->task; in remove_waiter()
1559 lockdep_assert_held(&lock->wait_lock); in remove_waiter()
1561 scoped_guard(raw_spinlock, &waiter_task->pi_lock) { in remove_waiter()
1563 waiter_task->pi_blocked_on = NULL; in remove_waiter()
1573 raw_spin_lock(&owner->pi_lock); in remove_waiter()
1585 raw_spin_unlock(&owner->pi_lock); in remove_waiter()
1597 raw_spin_unlock_irq(&lock->wait_lock); in remove_waiter()
1602 raw_spin_lock_irq(&lock->wait_lock); in remove_waiter()
1606 * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
1611 * @timeout: the pre-initialized and started timer, or NULL for none
1612 * @waiter: the pre-initialized rt_mutex_waiter
1613 * @wake_q: wake_q of tasks to wake when we drop the lock->wait_lock
1615 * Must be called with lock->wait_lock held and interrupts disabled
1623 __releases(&lock->wait_lock) __acquires(&lock->wait_lock) in rt_mutex_slowlock_block()
1629 __assume_ctx_lock(&rtm->rtmutex.wait_lock); in rt_mutex_slowlock_block()
1639 if (timeout && !timeout->task) { in rt_mutex_slowlock_block()
1640 ret = -ETIMEDOUT; in rt_mutex_slowlock_block()
1644 ret = -EINTR; in rt_mutex_slowlock_block()
1658 raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q); in rt_mutex_slowlock_block()
1665 raw_spin_lock_irq(&lock->wait_lock); in rt_mutex_slowlock_block()
1676 __must_hold(&lock->wait_lock) in rt_mutex_handle_deadlock()
1679 * If the result is not -EDEADLOCK or the caller requested in rt_mutex_handle_deadlock()
1682 if (res != -EDEADLOCK || detect_deadlock) in rt_mutex_handle_deadlock()
1685 if (build_ww_mutex() && w->ww_ctx) in rt_mutex_handle_deadlock()
1688 raw_spin_unlock_irq(&lock->wait_lock); in rt_mutex_handle_deadlock()
1699 * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
1713 __must_hold(&lock->wait_lock) in __rt_mutex_slowlock()
1719 __assume_ctx_lock(&rtm->rtmutex.wait_lock); in __rt_mutex_slowlock()
1720 lockdep_assert_held(&lock->wait_lock); in __rt_mutex_slowlock()
1744 if (!ww_ctx->is_wait_die) in __rt_mutex_slowlock()
1771 __must_hold(&lock->wait_lock) in __rt_mutex_slowlock_locked()
1788 * rt_mutex_slowlock - Locking slowpath invoked when fast path fails
1802 * Do all pre-schedule work here, before we queue a waiter and invoke in rt_mutex_slowlock()
1803 * PI -- any such work that trips on rtlock (PREEMPT_RT spinlock) would in rt_mutex_slowlock()
1814 * rtmutex with lock->wait_lock held. But we cannot unconditionally in rt_mutex_slowlock()
1818 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_slowlock()
1820 raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); in rt_mutex_slowlock()
1829 lockdep_assert(!current->pi_blocked_on); in __rt_mutex_lock()
1844 * rtlock_slowlock_locked - Slow path lock acquisition for RT locks
1850 __releases(&lock->wait_lock) __acquires(&lock->wait_lock) in rtlock_slowlock_locked()
1855 lockdep_assert_held(&lock->wait_lock); in rtlock_slowlock_locked()
1883 raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q); in rtlock_slowlock_locked()
1890 raw_spin_lock_irq(&lock->wait_lock); in rtlock_slowlock_locked()
1913 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rtlock_slowlock()
1915 raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q); in rtlock_slowlock()