xref: /linux/kernel/locking/rtmutex.c (revision 6d41c675a5394057f6fb1dc97cc0a0e360f2c2f8)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21696a8beSPeter Zijlstra /*
31696a8beSPeter Zijlstra  * RT-Mutexes: simple blocking mutual exclusion locks with PI support
41696a8beSPeter Zijlstra  *
51696a8beSPeter Zijlstra  * started by Ingo Molnar and Thomas Gleixner.
61696a8beSPeter Zijlstra  *
71696a8beSPeter Zijlstra  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
81696a8beSPeter Zijlstra  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
91696a8beSPeter Zijlstra  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
101696a8beSPeter Zijlstra  *  Copyright (C) 2006 Esben Nielsen
111696a8beSPeter Zijlstra  *
12387b1468SMauro Carvalho Chehab  *  See Documentation/locking/rt-mutex-design.rst for details.
131696a8beSPeter Zijlstra  */
141696a8beSPeter Zijlstra #include <linux/spinlock.h>
151696a8beSPeter Zijlstra #include <linux/export.h>
16174cd4b1SIngo Molnar #include <linux/sched/signal.h>
171696a8beSPeter Zijlstra #include <linux/sched/rt.h>
18fb00aca4SPeter Zijlstra #include <linux/sched/deadline.h>
1984f001e1SIngo Molnar #include <linux/sched/wake_q.h>
20b17b0153SIngo Molnar #include <linux/sched/debug.h>
211696a8beSPeter Zijlstra #include <linux/timer.h>
221696a8beSPeter Zijlstra 
231696a8beSPeter Zijlstra #include "rtmutex_common.h"
241696a8beSPeter Zijlstra 
251696a8beSPeter Zijlstra /*
261696a8beSPeter Zijlstra  * lock->owner state tracking:
271696a8beSPeter Zijlstra  *
281696a8beSPeter Zijlstra  * lock->owner holds the task_struct pointer of the owner. Bit 0
291696a8beSPeter Zijlstra  * is used to keep track of the "lock has waiters" state.
301696a8beSPeter Zijlstra  *
311696a8beSPeter Zijlstra  * owner	bit0
321696a8beSPeter Zijlstra  * NULL		0	lock is free (fast acquire possible)
331696a8beSPeter Zijlstra  * NULL		1	lock is free and has waiters and the top waiter
341696a8beSPeter Zijlstra  *				is going to take the lock*
351696a8beSPeter Zijlstra  * taskpointer	0	lock is held (fast release possible)
361696a8beSPeter Zijlstra  * taskpointer	1	lock is held and has waiters**
371696a8beSPeter Zijlstra  *
381696a8beSPeter Zijlstra  * The fast atomic compare exchange based acquire and release is only
391696a8beSPeter Zijlstra  * possible when bit 0 of lock->owner is 0.
401696a8beSPeter Zijlstra  *
411696a8beSPeter Zijlstra  * (*) It also can be a transitional state when grabbing the lock
421696a8beSPeter Zijlstra  * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
431696a8beSPeter Zijlstra  * we need to set the bit0 before looking at the lock, and the owner may be
441696a8beSPeter Zijlstra  * NULL in this small time, hence this can be a transitional state.
451696a8beSPeter Zijlstra  *
461696a8beSPeter Zijlstra  * (**) There is a small time when bit 0 is set but there are no
471696a8beSPeter Zijlstra  * waiters. This can happen when grabbing the lock in the slow path.
481696a8beSPeter Zijlstra  * To prevent a cmpxchg of the owner releasing the lock, we need to
491696a8beSPeter Zijlstra  * set this bit before looking at the lock.
501696a8beSPeter Zijlstra  */
511696a8beSPeter Zijlstra 
521696a8beSPeter Zijlstra static void
531696a8beSPeter Zijlstra rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
541696a8beSPeter Zijlstra {
551696a8beSPeter Zijlstra 	unsigned long val = (unsigned long)owner;
561696a8beSPeter Zijlstra 
571696a8beSPeter Zijlstra 	if (rt_mutex_has_waiters(lock))
581696a8beSPeter Zijlstra 		val |= RT_MUTEX_HAS_WAITERS;
591696a8beSPeter Zijlstra 
600050c7b2SPaul E. McKenney 	WRITE_ONCE(lock->owner, (struct task_struct *)val);
611696a8beSPeter Zijlstra }
621696a8beSPeter Zijlstra 
631696a8beSPeter Zijlstra static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
641696a8beSPeter Zijlstra {
651696a8beSPeter Zijlstra 	lock->owner = (struct task_struct *)
661696a8beSPeter Zijlstra 			((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
671696a8beSPeter Zijlstra }
681696a8beSPeter Zijlstra 
691696a8beSPeter Zijlstra static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
701696a8beSPeter Zijlstra {
71dbb26055SThomas Gleixner 	unsigned long owner, *p = (unsigned long *) &lock->owner;
72dbb26055SThomas Gleixner 
73dbb26055SThomas Gleixner 	if (rt_mutex_has_waiters(lock))
74dbb26055SThomas Gleixner 		return;
75dbb26055SThomas Gleixner 
76dbb26055SThomas Gleixner 	/*
77dbb26055SThomas Gleixner 	 * The rbtree has no waiters enqueued, now make sure that the
78dbb26055SThomas Gleixner 	 * lock->owner still has the waiters bit set, otherwise the
79dbb26055SThomas Gleixner 	 * following can happen:
80dbb26055SThomas Gleixner 	 *
81dbb26055SThomas Gleixner 	 * CPU 0	CPU 1		CPU2
82dbb26055SThomas Gleixner 	 * l->owner=T1
83dbb26055SThomas Gleixner 	 *		rt_mutex_lock(l)
84dbb26055SThomas Gleixner 	 *		lock(l->lock)
85dbb26055SThomas Gleixner 	 *		l->owner = T1 | HAS_WAITERS;
86dbb26055SThomas Gleixner 	 *		enqueue(T2)
87dbb26055SThomas Gleixner 	 *		boost()
88dbb26055SThomas Gleixner 	 *		  unlock(l->lock)
89dbb26055SThomas Gleixner 	 *		block()
90dbb26055SThomas Gleixner 	 *
91dbb26055SThomas Gleixner 	 *				rt_mutex_lock(l)
92dbb26055SThomas Gleixner 	 *				lock(l->lock)
93dbb26055SThomas Gleixner 	 *				l->owner = T1 | HAS_WAITERS;
94dbb26055SThomas Gleixner 	 *				enqueue(T3)
95dbb26055SThomas Gleixner 	 *				boost()
96dbb26055SThomas Gleixner 	 *				  unlock(l->lock)
97dbb26055SThomas Gleixner 	 *				block()
98dbb26055SThomas Gleixner 	 *		signal(->T2)	signal(->T3)
99dbb26055SThomas Gleixner 	 *		lock(l->lock)
100dbb26055SThomas Gleixner 	 *		dequeue(T2)
101dbb26055SThomas Gleixner 	 *		deboost()
102dbb26055SThomas Gleixner 	 *		  unlock(l->lock)
103dbb26055SThomas Gleixner 	 *				lock(l->lock)
104dbb26055SThomas Gleixner 	 *				dequeue(T3)
105dbb26055SThomas Gleixner 	 *				 ==> wait list is empty
106dbb26055SThomas Gleixner 	 *				deboost()
107dbb26055SThomas Gleixner 	 *				 unlock(l->lock)
108dbb26055SThomas Gleixner 	 *		lock(l->lock)
109dbb26055SThomas Gleixner 	 *		fixup_rt_mutex_waiters()
110dbb26055SThomas Gleixner 	 *		  if (wait_list_empty(l) {
111dbb26055SThomas Gleixner 	 *		    l->owner = owner
112dbb26055SThomas Gleixner 	 *		    owner = l->owner & ~HAS_WAITERS;
113dbb26055SThomas Gleixner 	 *		      ==> l->owner = T1
114dbb26055SThomas Gleixner 	 *		  }
115dbb26055SThomas Gleixner 	 *				lock(l->lock)
116dbb26055SThomas Gleixner 	 * rt_mutex_unlock(l)		fixup_rt_mutex_waiters()
117dbb26055SThomas Gleixner 	 *				  if (wait_list_empty(l) {
118dbb26055SThomas Gleixner 	 *				    owner = l->owner & ~HAS_WAITERS;
119dbb26055SThomas Gleixner 	 * cmpxchg(l->owner, T1, NULL)
120dbb26055SThomas Gleixner 	 *  ===> Success (l->owner = NULL)
121dbb26055SThomas Gleixner 	 *
122dbb26055SThomas Gleixner 	 *				    l->owner = owner
123dbb26055SThomas Gleixner 	 *				      ==> l->owner = T1
124dbb26055SThomas Gleixner 	 *				  }
125dbb26055SThomas Gleixner 	 *
126dbb26055SThomas Gleixner 	 * With the check for the waiter bit in place T3 on CPU2 will not
127dbb26055SThomas Gleixner 	 * overwrite. All tasks fiddling with the waiters bit are
128dbb26055SThomas Gleixner 	 * serialized by l->lock, so nothing else can modify the waiters
129dbb26055SThomas Gleixner 	 * bit. If the bit is set then nothing can change l->owner either
130dbb26055SThomas Gleixner 	 * so the simple RMW is safe. The cmpxchg() will simply fail if it
131dbb26055SThomas Gleixner 	 * happens in the middle of the RMW because the waiters bit is
132dbb26055SThomas Gleixner 	 * still set.
133dbb26055SThomas Gleixner 	 */
134dbb26055SThomas Gleixner 	owner = READ_ONCE(*p);
135dbb26055SThomas Gleixner 	if (owner & RT_MUTEX_HAS_WAITERS)
136dbb26055SThomas Gleixner 		WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
1371696a8beSPeter Zijlstra }
1381696a8beSPeter Zijlstra 
1391696a8beSPeter Zijlstra /*
140cede8841SSebastian Andrzej Siewior  * We can speed up the acquire/release, if there's no debugging state to be
141cede8841SSebastian Andrzej Siewior  * set up.
1421696a8beSPeter Zijlstra  */
143cede8841SSebastian Andrzej Siewior #ifndef CONFIG_DEBUG_RT_MUTEXES
144700318d1SDavidlohr Bueso # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
145700318d1SDavidlohr Bueso # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
146700318d1SDavidlohr Bueso 
147700318d1SDavidlohr Bueso /*
148700318d1SDavidlohr Bueso  * Callers must hold the ->wait_lock -- which is the whole purpose as we force
149700318d1SDavidlohr Bueso  * all future threads that attempt to [Rmw] the lock to the slowpath. As such
150700318d1SDavidlohr Bueso  * relaxed semantics suffice.
151700318d1SDavidlohr Bueso  */
1521696a8beSPeter Zijlstra static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
1531696a8beSPeter Zijlstra {
1541696a8beSPeter Zijlstra 	unsigned long owner, *p = (unsigned long *) &lock->owner;
1551696a8beSPeter Zijlstra 
1561696a8beSPeter Zijlstra 	do {
1571696a8beSPeter Zijlstra 		owner = *p;
158700318d1SDavidlohr Bueso 	} while (cmpxchg_relaxed(p, owner,
159700318d1SDavidlohr Bueso 				 owner | RT_MUTEX_HAS_WAITERS) != owner);
1601696a8beSPeter Zijlstra }
16127e35715SThomas Gleixner 
16227e35715SThomas Gleixner /*
16327e35715SThomas Gleixner  * Safe fastpath aware unlock:
16427e35715SThomas Gleixner  * 1) Clear the waiters bit
16527e35715SThomas Gleixner  * 2) Drop lock->wait_lock
16627e35715SThomas Gleixner  * 3) Try to unlock the lock with cmpxchg
16727e35715SThomas Gleixner  */
168b4abf910SThomas Gleixner static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
169b4abf910SThomas Gleixner 					unsigned long flags)
17027e35715SThomas Gleixner 	__releases(lock->wait_lock)
17127e35715SThomas Gleixner {
17227e35715SThomas Gleixner 	struct task_struct *owner = rt_mutex_owner(lock);
17327e35715SThomas Gleixner 
17427e35715SThomas Gleixner 	clear_rt_mutex_waiters(lock);
175b4abf910SThomas Gleixner 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
17627e35715SThomas Gleixner 	/*
17727e35715SThomas Gleixner 	 * If a new waiter comes in between the unlock and the cmpxchg
17827e35715SThomas Gleixner 	 * we have two situations:
17927e35715SThomas Gleixner 	 *
18027e35715SThomas Gleixner 	 * unlock(wait_lock);
18127e35715SThomas Gleixner 	 *					lock(wait_lock);
18227e35715SThomas Gleixner 	 * cmpxchg(p, owner, 0) == owner
18327e35715SThomas Gleixner 	 *					mark_rt_mutex_waiters(lock);
18427e35715SThomas Gleixner 	 *					acquire(lock);
18527e35715SThomas Gleixner 	 * or:
18627e35715SThomas Gleixner 	 *
18727e35715SThomas Gleixner 	 * unlock(wait_lock);
18827e35715SThomas Gleixner 	 *					lock(wait_lock);
18927e35715SThomas Gleixner 	 *					mark_rt_mutex_waiters(lock);
19027e35715SThomas Gleixner 	 *
19127e35715SThomas Gleixner 	 * cmpxchg(p, owner, 0) != owner
19227e35715SThomas Gleixner 	 *					enqueue_waiter();
19327e35715SThomas Gleixner 	 *					unlock(wait_lock);
19427e35715SThomas Gleixner 	 * lock(wait_lock);
19527e35715SThomas Gleixner 	 * wake waiter();
19627e35715SThomas Gleixner 	 * unlock(wait_lock);
19727e35715SThomas Gleixner 	 *					lock(wait_lock);
19827e35715SThomas Gleixner 	 *					acquire(lock);
19927e35715SThomas Gleixner 	 */
200700318d1SDavidlohr Bueso 	return rt_mutex_cmpxchg_release(lock, owner, NULL);
20127e35715SThomas Gleixner }
20227e35715SThomas Gleixner 
2031696a8beSPeter Zijlstra #else
204700318d1SDavidlohr Bueso # define rt_mutex_cmpxchg_acquire(l,c,n)	(0)
205700318d1SDavidlohr Bueso # define rt_mutex_cmpxchg_release(l,c,n)	(0)
206700318d1SDavidlohr Bueso 
2071696a8beSPeter Zijlstra static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
2081696a8beSPeter Zijlstra {
2091696a8beSPeter Zijlstra 	lock->owner = (struct task_struct *)
2101696a8beSPeter Zijlstra 			((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
2111696a8beSPeter Zijlstra }
21227e35715SThomas Gleixner 
21327e35715SThomas Gleixner /*
21427e35715SThomas Gleixner  * Simple slow path only version: lock->owner is protected by lock->wait_lock.
21527e35715SThomas Gleixner  */
216b4abf910SThomas Gleixner static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
217b4abf910SThomas Gleixner 					unsigned long flags)
21827e35715SThomas Gleixner 	__releases(lock->wait_lock)
21927e35715SThomas Gleixner {
22027e35715SThomas Gleixner 	lock->owner = NULL;
221b4abf910SThomas Gleixner 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
22227e35715SThomas Gleixner 	return true;
22327e35715SThomas Gleixner }
2241696a8beSPeter Zijlstra #endif
2251696a8beSPeter Zijlstra 
22619830e55SPeter Zijlstra /*
22719830e55SPeter Zijlstra  * Only use with rt_mutex_waiter_{less,equal}()
22819830e55SPeter Zijlstra  */
22919830e55SPeter Zijlstra #define task_to_waiter(p)	\
23019830e55SPeter Zijlstra 	&(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
23119830e55SPeter Zijlstra 
232fb00aca4SPeter Zijlstra static inline int
233fb00aca4SPeter Zijlstra rt_mutex_waiter_less(struct rt_mutex_waiter *left,
234fb00aca4SPeter Zijlstra 		     struct rt_mutex_waiter *right)
235fb00aca4SPeter Zijlstra {
2362d3d891dSDario Faggioli 	if (left->prio < right->prio)
237fb00aca4SPeter Zijlstra 		return 1;
238fb00aca4SPeter Zijlstra 
2391696a8beSPeter Zijlstra 	/*
2402d3d891dSDario Faggioli 	 * If both waiters have dl_prio(), we check the deadlines of the
2412d3d891dSDario Faggioli 	 * associated tasks.
2422d3d891dSDario Faggioli 	 * If left waiter has a dl_prio(), and we didn't return 1 above,
2432d3d891dSDario Faggioli 	 * then right waiter has a dl_prio() too.
244fb00aca4SPeter Zijlstra 	 */
2452d3d891dSDario Faggioli 	if (dl_prio(left->prio))
246e0aad5b4SPeter Zijlstra 		return dl_time_before(left->deadline, right->deadline);
247fb00aca4SPeter Zijlstra 
248fb00aca4SPeter Zijlstra 	return 0;
249fb00aca4SPeter Zijlstra }
250fb00aca4SPeter Zijlstra 
25119830e55SPeter Zijlstra static inline int
25219830e55SPeter Zijlstra rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
25319830e55SPeter Zijlstra 		      struct rt_mutex_waiter *right)
25419830e55SPeter Zijlstra {
25519830e55SPeter Zijlstra 	if (left->prio != right->prio)
25619830e55SPeter Zijlstra 		return 0;
25719830e55SPeter Zijlstra 
25819830e55SPeter Zijlstra 	/*
25919830e55SPeter Zijlstra 	 * If both waiters have dl_prio(), we check the deadlines of the
26019830e55SPeter Zijlstra 	 * associated tasks.
26119830e55SPeter Zijlstra 	 * If left waiter has a dl_prio(), and we didn't return 0 above,
26219830e55SPeter Zijlstra 	 * then right waiter has a dl_prio() too.
26319830e55SPeter Zijlstra 	 */
26419830e55SPeter Zijlstra 	if (dl_prio(left->prio))
26519830e55SPeter Zijlstra 		return left->deadline == right->deadline;
26619830e55SPeter Zijlstra 
26719830e55SPeter Zijlstra 	return 1;
26819830e55SPeter Zijlstra }
26919830e55SPeter Zijlstra 
2705a798725SPeter Zijlstra #define __node_2_waiter(node) \
2715a798725SPeter Zijlstra 	rb_entry((node), struct rt_mutex_waiter, tree_entry)
2725a798725SPeter Zijlstra 
2735a798725SPeter Zijlstra static inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
2745a798725SPeter Zijlstra {
2755a798725SPeter Zijlstra 	return rt_mutex_waiter_less(__node_2_waiter(a), __node_2_waiter(b));
2765a798725SPeter Zijlstra }
2775a798725SPeter Zijlstra 
278fb00aca4SPeter Zijlstra static void
279fb00aca4SPeter Zijlstra rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
280fb00aca4SPeter Zijlstra {
2815a798725SPeter Zijlstra 	rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less);
282fb00aca4SPeter Zijlstra }
283fb00aca4SPeter Zijlstra 
284fb00aca4SPeter Zijlstra static void
285fb00aca4SPeter Zijlstra rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
286fb00aca4SPeter Zijlstra {
287fb00aca4SPeter Zijlstra 	if (RB_EMPTY_NODE(&waiter->tree_entry))
288fb00aca4SPeter Zijlstra 		return;
289fb00aca4SPeter Zijlstra 
290a23ba907SDavidlohr Bueso 	rb_erase_cached(&waiter->tree_entry, &lock->waiters);
291fb00aca4SPeter Zijlstra 	RB_CLEAR_NODE(&waiter->tree_entry);
292fb00aca4SPeter Zijlstra }
293fb00aca4SPeter Zijlstra 
2945a798725SPeter Zijlstra #define __node_2_pi_waiter(node) \
2955a798725SPeter Zijlstra 	rb_entry((node), struct rt_mutex_waiter, pi_tree_entry)
2965a798725SPeter Zijlstra 
2975a798725SPeter Zijlstra static inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b)
2985a798725SPeter Zijlstra {
2995a798725SPeter Zijlstra 	return rt_mutex_waiter_less(__node_2_pi_waiter(a), __node_2_pi_waiter(b));
3005a798725SPeter Zijlstra }
3015a798725SPeter Zijlstra 
302fb00aca4SPeter Zijlstra static void
303fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
304fb00aca4SPeter Zijlstra {
3055a798725SPeter Zijlstra 	rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, __pi_waiter_less);
306fb00aca4SPeter Zijlstra }
307fb00aca4SPeter Zijlstra 
308fb00aca4SPeter Zijlstra static void
309fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
310fb00aca4SPeter Zijlstra {
311fb00aca4SPeter Zijlstra 	if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
312fb00aca4SPeter Zijlstra 		return;
313fb00aca4SPeter Zijlstra 
314a23ba907SDavidlohr Bueso 	rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters);
315fb00aca4SPeter Zijlstra 	RB_CLEAR_NODE(&waiter->pi_tree_entry);
316fb00aca4SPeter Zijlstra }
317fb00aca4SPeter Zijlstra 
318acd58620SPeter Zijlstra static void rt_mutex_adjust_prio(struct task_struct *p)
319e96a7705SXunlei Pang {
320acd58620SPeter Zijlstra 	struct task_struct *pi_task = NULL;
321e96a7705SXunlei Pang 
322acd58620SPeter Zijlstra 	lockdep_assert_held(&p->pi_lock);
323e96a7705SXunlei Pang 
324acd58620SPeter Zijlstra 	if (task_has_pi_waiters(p))
325acd58620SPeter Zijlstra 		pi_task = task_top_pi_waiter(p)->task;
3261696a8beSPeter Zijlstra 
327acd58620SPeter Zijlstra 	rt_mutex_setprio(p, pi_task);
3281696a8beSPeter Zijlstra }
3291696a8beSPeter Zijlstra 
3301696a8beSPeter Zijlstra /*
3318930ed80SThomas Gleixner  * Deadlock detection is conditional:
3328930ed80SThomas Gleixner  *
3338930ed80SThomas Gleixner  * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
3348930ed80SThomas Gleixner  * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
3358930ed80SThomas Gleixner  *
3368930ed80SThomas Gleixner  * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
3378930ed80SThomas Gleixner  * conducted independent of the detect argument.
3388930ed80SThomas Gleixner  *
3398930ed80SThomas Gleixner  * If the waiter argument is NULL this indicates the deboost path and
3408930ed80SThomas Gleixner  * deadlock detection is disabled independent of the detect argument
3418930ed80SThomas Gleixner  * and the config settings.
3428930ed80SThomas Gleixner  */
3438930ed80SThomas Gleixner static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
3448930ed80SThomas Gleixner 					  enum rtmutex_chainwalk chwalk)
3458930ed80SThomas Gleixner {
3468930ed80SThomas Gleixner 	/*
3478930ed80SThomas Gleixner 	 * This is just a wrapper function for the following call,
3488930ed80SThomas Gleixner 	 * because debug_rt_mutex_detect_deadlock() smells like a magic
3498930ed80SThomas Gleixner 	 * debug feature and I wanted to keep the cond function in the
3508930ed80SThomas Gleixner 	 * main source file along with the comments instead of having
3518930ed80SThomas Gleixner 	 * two of the same in the headers.
3528930ed80SThomas Gleixner 	 */
3538930ed80SThomas Gleixner 	return debug_rt_mutex_detect_deadlock(waiter, chwalk);
3548930ed80SThomas Gleixner }
3558930ed80SThomas Gleixner 
3568930ed80SThomas Gleixner /*
3571696a8beSPeter Zijlstra  * Max number of times we'll walk the boosting chain:
3581696a8beSPeter Zijlstra  */
3591696a8beSPeter Zijlstra int max_lock_depth = 1024;
3601696a8beSPeter Zijlstra 
36182084984SThomas Gleixner static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
36282084984SThomas Gleixner {
36382084984SThomas Gleixner 	return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
36482084984SThomas Gleixner }
36582084984SThomas Gleixner 
3661696a8beSPeter Zijlstra /*
3671696a8beSPeter Zijlstra  * Adjust the priority chain. Also used for deadlock detection.
3681696a8beSPeter Zijlstra  * Decreases task's usage by one - may thus free the task.
3691696a8beSPeter Zijlstra  *
37082084984SThomas Gleixner  * @task:	the task owning the mutex (owner) for which a chain walk is
37182084984SThomas Gleixner  *		probably needed
372e6beaa36STom(JeHyeon) Yeon  * @chwalk:	do we have to carry out deadlock detection?
3731696a8beSPeter Zijlstra  * @orig_lock:	the mutex (can be NULL if we are walking the chain to recheck
3741696a8beSPeter Zijlstra  *		things for a task that has just got its priority adjusted, and
3751696a8beSPeter Zijlstra  *		is waiting on a mutex)
37682084984SThomas Gleixner  * @next_lock:	the mutex on which the owner of @orig_lock was blocked before
37782084984SThomas Gleixner  *		we dropped its pi_lock. Is never dereferenced, only used for
37882084984SThomas Gleixner  *		comparison to detect lock chain changes.
3791696a8beSPeter Zijlstra  * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
3801696a8beSPeter Zijlstra  *		its priority to the mutex owner (can be NULL in the case
3811696a8beSPeter Zijlstra  *		depicted above or if the top waiter is gone away and we are
3821696a8beSPeter Zijlstra  *		actually deboosting the owner)
3831696a8beSPeter Zijlstra  * @top_task:	the current top waiter
3841696a8beSPeter Zijlstra  *
3851696a8beSPeter Zijlstra  * Returns 0 or -EDEADLK.
3863eb65aeaSThomas Gleixner  *
3873eb65aeaSThomas Gleixner  * Chain walk basics and protection scope
3883eb65aeaSThomas Gleixner  *
3893eb65aeaSThomas Gleixner  * [R] refcount on task
3903eb65aeaSThomas Gleixner  * [P] task->pi_lock held
3913eb65aeaSThomas Gleixner  * [L] rtmutex->wait_lock held
3923eb65aeaSThomas Gleixner  *
3933eb65aeaSThomas Gleixner  * Step	Description				Protected by
3943eb65aeaSThomas Gleixner  *	function arguments:
3953eb65aeaSThomas Gleixner  *	@task					[R]
3963eb65aeaSThomas Gleixner  *	@orig_lock if != NULL			@top_task is blocked on it
3973eb65aeaSThomas Gleixner  *	@next_lock				Unprotected. Cannot be
3983eb65aeaSThomas Gleixner  *						dereferenced. Only used for
3993eb65aeaSThomas Gleixner  *						comparison.
4003eb65aeaSThomas Gleixner  *	@orig_waiter if != NULL			@top_task is blocked on it
4013eb65aeaSThomas Gleixner  *	@top_task				current, or in case of proxy
4023eb65aeaSThomas Gleixner  *						locking protected by calling
4033eb65aeaSThomas Gleixner  *						code
4043eb65aeaSThomas Gleixner  *	again:
4053eb65aeaSThomas Gleixner  *	  loop_sanity_check();
4063eb65aeaSThomas Gleixner  *	retry:
4073eb65aeaSThomas Gleixner  * [1]	  lock(task->pi_lock);			[R] acquire [P]
4083eb65aeaSThomas Gleixner  * [2]	  waiter = task->pi_blocked_on;		[P]
4093eb65aeaSThomas Gleixner  * [3]	  check_exit_conditions_1();		[P]
4103eb65aeaSThomas Gleixner  * [4]	  lock = waiter->lock;			[P]
4113eb65aeaSThomas Gleixner  * [5]	  if (!try_lock(lock->wait_lock)) {	[P] try to acquire [L]
4123eb65aeaSThomas Gleixner  *	    unlock(task->pi_lock);		release [P]
4133eb65aeaSThomas Gleixner  *	    goto retry;
4143eb65aeaSThomas Gleixner  *	  }
4153eb65aeaSThomas Gleixner  * [6]	  check_exit_conditions_2();		[P] + [L]
4163eb65aeaSThomas Gleixner  * [7]	  requeue_lock_waiter(lock, waiter);	[P] + [L]
4173eb65aeaSThomas Gleixner  * [8]	  unlock(task->pi_lock);		release [P]
4183eb65aeaSThomas Gleixner  *	  put_task_struct(task);		release [R]
4193eb65aeaSThomas Gleixner  * [9]	  check_exit_conditions_3();		[L]
4203eb65aeaSThomas Gleixner  * [10]	  task = owner(lock);			[L]
4213eb65aeaSThomas Gleixner  *	  get_task_struct(task);		[L] acquire [R]
4223eb65aeaSThomas Gleixner  *	  lock(task->pi_lock);			[L] acquire [P]
4233eb65aeaSThomas Gleixner  * [11]	  requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
4243eb65aeaSThomas Gleixner  * [12]	  check_exit_conditions_4();		[P] + [L]
4253eb65aeaSThomas Gleixner  * [13]	  unlock(task->pi_lock);		release [P]
4263eb65aeaSThomas Gleixner  *	  unlock(lock->wait_lock);		release [L]
4273eb65aeaSThomas Gleixner  *	  goto again;
4281696a8beSPeter Zijlstra  */
4291696a8beSPeter Zijlstra static int rt_mutex_adjust_prio_chain(struct task_struct *task,
4308930ed80SThomas Gleixner 				      enum rtmutex_chainwalk chwalk,
4311696a8beSPeter Zijlstra 				      struct rt_mutex *orig_lock,
43282084984SThomas Gleixner 				      struct rt_mutex *next_lock,
4331696a8beSPeter Zijlstra 				      struct rt_mutex_waiter *orig_waiter,
4341696a8beSPeter Zijlstra 				      struct task_struct *top_task)
4351696a8beSPeter Zijlstra {
4361696a8beSPeter Zijlstra 	struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
437a57594a1SThomas Gleixner 	struct rt_mutex_waiter *prerequeue_top_waiter;
4388930ed80SThomas Gleixner 	int ret = 0, depth = 0;
439a57594a1SThomas Gleixner 	struct rt_mutex *lock;
4408930ed80SThomas Gleixner 	bool detect_deadlock;
44167792e2cSThomas Gleixner 	bool requeue = true;
4421696a8beSPeter Zijlstra 
4438930ed80SThomas Gleixner 	detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
4441696a8beSPeter Zijlstra 
4451696a8beSPeter Zijlstra 	/*
4461696a8beSPeter Zijlstra 	 * The (de)boosting is a step by step approach with a lot of
4471696a8beSPeter Zijlstra 	 * pitfalls. We want this to be preemptible and we want hold a
4481696a8beSPeter Zijlstra 	 * maximum of two locks per step. So we have to check
4491696a8beSPeter Zijlstra 	 * carefully whether things change under us.
4501696a8beSPeter Zijlstra 	 */
4511696a8beSPeter Zijlstra  again:
4523eb65aeaSThomas Gleixner 	/*
4533eb65aeaSThomas Gleixner 	 * We limit the lock chain length for each invocation.
4543eb65aeaSThomas Gleixner 	 */
4551696a8beSPeter Zijlstra 	if (++depth > max_lock_depth) {
4561696a8beSPeter Zijlstra 		static int prev_max;
4571696a8beSPeter Zijlstra 
4581696a8beSPeter Zijlstra 		/*
4591696a8beSPeter Zijlstra 		 * Print this only once. If the admin changes the limit,
4601696a8beSPeter Zijlstra 		 * print a new message when reaching the limit again.
4611696a8beSPeter Zijlstra 		 */
4621696a8beSPeter Zijlstra 		if (prev_max != max_lock_depth) {
4631696a8beSPeter Zijlstra 			prev_max = max_lock_depth;
4641696a8beSPeter Zijlstra 			printk(KERN_WARNING "Maximum lock depth %d reached "
4651696a8beSPeter Zijlstra 			       "task: %s (%d)\n", max_lock_depth,
4661696a8beSPeter Zijlstra 			       top_task->comm, task_pid_nr(top_task));
4671696a8beSPeter Zijlstra 		}
4681696a8beSPeter Zijlstra 		put_task_struct(task);
4691696a8beSPeter Zijlstra 
4703d5c9340SThomas Gleixner 		return -EDEADLK;
4711696a8beSPeter Zijlstra 	}
4723eb65aeaSThomas Gleixner 
4733eb65aeaSThomas Gleixner 	/*
4743eb65aeaSThomas Gleixner 	 * We are fully preemptible here and only hold the refcount on
4753eb65aeaSThomas Gleixner 	 * @task. So everything can have changed under us since the
4763eb65aeaSThomas Gleixner 	 * caller or our own code below (goto retry/again) dropped all
4773eb65aeaSThomas Gleixner 	 * locks.
4783eb65aeaSThomas Gleixner 	 */
4791696a8beSPeter Zijlstra  retry:
4801696a8beSPeter Zijlstra 	/*
4813eb65aeaSThomas Gleixner 	 * [1] Task cannot go away as we did a get_task() before !
4821696a8beSPeter Zijlstra 	 */
483b4abf910SThomas Gleixner 	raw_spin_lock_irq(&task->pi_lock);
4841696a8beSPeter Zijlstra 
4853eb65aeaSThomas Gleixner 	/*
4863eb65aeaSThomas Gleixner 	 * [2] Get the waiter on which @task is blocked on.
4873eb65aeaSThomas Gleixner 	 */
4881696a8beSPeter Zijlstra 	waiter = task->pi_blocked_on;
4893eb65aeaSThomas Gleixner 
4903eb65aeaSThomas Gleixner 	/*
4913eb65aeaSThomas Gleixner 	 * [3] check_exit_conditions_1() protected by task->pi_lock.
4923eb65aeaSThomas Gleixner 	 */
4933eb65aeaSThomas Gleixner 
4941696a8beSPeter Zijlstra 	/*
4951696a8beSPeter Zijlstra 	 * Check whether the end of the boosting chain has been
4961696a8beSPeter Zijlstra 	 * reached or the state of the chain has changed while we
4971696a8beSPeter Zijlstra 	 * dropped the locks.
4981696a8beSPeter Zijlstra 	 */
4991696a8beSPeter Zijlstra 	if (!waiter)
5001696a8beSPeter Zijlstra 		goto out_unlock_pi;
5011696a8beSPeter Zijlstra 
5021696a8beSPeter Zijlstra 	/*
5031696a8beSPeter Zijlstra 	 * Check the orig_waiter state. After we dropped the locks,
5041696a8beSPeter Zijlstra 	 * the previous owner of the lock might have released the lock.
5051696a8beSPeter Zijlstra 	 */
5061696a8beSPeter Zijlstra 	if (orig_waiter && !rt_mutex_owner(orig_lock))
5071696a8beSPeter Zijlstra 		goto out_unlock_pi;
5081696a8beSPeter Zijlstra 
5091696a8beSPeter Zijlstra 	/*
51082084984SThomas Gleixner 	 * We dropped all locks after taking a refcount on @task, so
51182084984SThomas Gleixner 	 * the task might have moved on in the lock chain or even left
51282084984SThomas Gleixner 	 * the chain completely and blocks now on an unrelated lock or
51382084984SThomas Gleixner 	 * on @orig_lock.
51482084984SThomas Gleixner 	 *
51582084984SThomas Gleixner 	 * We stored the lock on which @task was blocked in @next_lock,
51682084984SThomas Gleixner 	 * so we can detect the chain change.
51782084984SThomas Gleixner 	 */
51882084984SThomas Gleixner 	if (next_lock != waiter->lock)
51982084984SThomas Gleixner 		goto out_unlock_pi;
52082084984SThomas Gleixner 
52182084984SThomas Gleixner 	/*
5221696a8beSPeter Zijlstra 	 * Drop out, when the task has no waiters. Note,
5231696a8beSPeter Zijlstra 	 * top_waiter can be NULL, when we are in the deboosting
5241696a8beSPeter Zijlstra 	 * mode!
5251696a8beSPeter Zijlstra 	 */
526397335f0SThomas Gleixner 	if (top_waiter) {
527397335f0SThomas Gleixner 		if (!task_has_pi_waiters(task))
5281696a8beSPeter Zijlstra 			goto out_unlock_pi;
529397335f0SThomas Gleixner 		/*
530397335f0SThomas Gleixner 		 * If deadlock detection is off, we stop here if we
53167792e2cSThomas Gleixner 		 * are not the top pi waiter of the task. If deadlock
53267792e2cSThomas Gleixner 		 * detection is enabled we continue, but stop the
53367792e2cSThomas Gleixner 		 * requeueing in the chain walk.
534397335f0SThomas Gleixner 		 */
53567792e2cSThomas Gleixner 		if (top_waiter != task_top_pi_waiter(task)) {
53667792e2cSThomas Gleixner 			if (!detect_deadlock)
537397335f0SThomas Gleixner 				goto out_unlock_pi;
53867792e2cSThomas Gleixner 			else
53967792e2cSThomas Gleixner 				requeue = false;
54067792e2cSThomas Gleixner 		}
541397335f0SThomas Gleixner 	}
5421696a8beSPeter Zijlstra 
5431696a8beSPeter Zijlstra 	/*
54467792e2cSThomas Gleixner 	 * If the waiter priority is the same as the task priority
54567792e2cSThomas Gleixner 	 * then there is no further priority adjustment necessary.  If
54667792e2cSThomas Gleixner 	 * deadlock detection is off, we stop the chain walk. If its
54767792e2cSThomas Gleixner 	 * enabled we continue, but stop the requeueing in the chain
54867792e2cSThomas Gleixner 	 * walk.
5491696a8beSPeter Zijlstra 	 */
55019830e55SPeter Zijlstra 	if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
55167792e2cSThomas Gleixner 		if (!detect_deadlock)
5521696a8beSPeter Zijlstra 			goto out_unlock_pi;
55367792e2cSThomas Gleixner 		else
55467792e2cSThomas Gleixner 			requeue = false;
55567792e2cSThomas Gleixner 	}
5561696a8beSPeter Zijlstra 
5573eb65aeaSThomas Gleixner 	/*
5583eb65aeaSThomas Gleixner 	 * [4] Get the next lock
5593eb65aeaSThomas Gleixner 	 */
5601696a8beSPeter Zijlstra 	lock = waiter->lock;
5613eb65aeaSThomas Gleixner 	/*
5623eb65aeaSThomas Gleixner 	 * [5] We need to trylock here as we are holding task->pi_lock,
5633eb65aeaSThomas Gleixner 	 * which is the reverse lock order versus the other rtmutex
5643eb65aeaSThomas Gleixner 	 * operations.
5653eb65aeaSThomas Gleixner 	 */
5661696a8beSPeter Zijlstra 	if (!raw_spin_trylock(&lock->wait_lock)) {
567b4abf910SThomas Gleixner 		raw_spin_unlock_irq(&task->pi_lock);
5681696a8beSPeter Zijlstra 		cpu_relax();
5691696a8beSPeter Zijlstra 		goto retry;
5701696a8beSPeter Zijlstra 	}
5711696a8beSPeter Zijlstra 
572397335f0SThomas Gleixner 	/*
5733eb65aeaSThomas Gleixner 	 * [6] check_exit_conditions_2() protected by task->pi_lock and
5743eb65aeaSThomas Gleixner 	 * lock->wait_lock.
5753eb65aeaSThomas Gleixner 	 *
576397335f0SThomas Gleixner 	 * Deadlock detection. If the lock is the same as the original
577397335f0SThomas Gleixner 	 * lock which caused us to walk the lock chain or if the
578397335f0SThomas Gleixner 	 * current lock is owned by the task which initiated the chain
579397335f0SThomas Gleixner 	 * walk, we detected a deadlock.
580397335f0SThomas Gleixner 	 */
5811696a8beSPeter Zijlstra 	if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
5821696a8beSPeter Zijlstra 		raw_spin_unlock(&lock->wait_lock);
5833d5c9340SThomas Gleixner 		ret = -EDEADLK;
5841696a8beSPeter Zijlstra 		goto out_unlock_pi;
5851696a8beSPeter Zijlstra 	}
5861696a8beSPeter Zijlstra 
587a57594a1SThomas Gleixner 	/*
58867792e2cSThomas Gleixner 	 * If we just follow the lock chain for deadlock detection, no
58967792e2cSThomas Gleixner 	 * need to do all the requeue operations. To avoid a truckload
59067792e2cSThomas Gleixner 	 * of conditionals around the various places below, just do the
59167792e2cSThomas Gleixner 	 * minimum chain walk checks.
59267792e2cSThomas Gleixner 	 */
59367792e2cSThomas Gleixner 	if (!requeue) {
59467792e2cSThomas Gleixner 		/*
59567792e2cSThomas Gleixner 		 * No requeue[7] here. Just release @task [8]
59667792e2cSThomas Gleixner 		 */
597b4abf910SThomas Gleixner 		raw_spin_unlock(&task->pi_lock);
59867792e2cSThomas Gleixner 		put_task_struct(task);
59967792e2cSThomas Gleixner 
60067792e2cSThomas Gleixner 		/*
60167792e2cSThomas Gleixner 		 * [9] check_exit_conditions_3 protected by lock->wait_lock.
60267792e2cSThomas Gleixner 		 * If there is no owner of the lock, end of chain.
60367792e2cSThomas Gleixner 		 */
60467792e2cSThomas Gleixner 		if (!rt_mutex_owner(lock)) {
605b4abf910SThomas Gleixner 			raw_spin_unlock_irq(&lock->wait_lock);
60667792e2cSThomas Gleixner 			return 0;
60767792e2cSThomas Gleixner 		}
60867792e2cSThomas Gleixner 
60967792e2cSThomas Gleixner 		/* [10] Grab the next task, i.e. owner of @lock */
6107b3c92b8SMatthew Wilcox (Oracle) 		task = get_task_struct(rt_mutex_owner(lock));
611b4abf910SThomas Gleixner 		raw_spin_lock(&task->pi_lock);
61267792e2cSThomas Gleixner 
61367792e2cSThomas Gleixner 		/*
61467792e2cSThomas Gleixner 		 * No requeue [11] here. We just do deadlock detection.
61567792e2cSThomas Gleixner 		 *
61667792e2cSThomas Gleixner 		 * [12] Store whether owner is blocked
61767792e2cSThomas Gleixner 		 * itself. Decision is made after dropping the locks
61867792e2cSThomas Gleixner 		 */
61967792e2cSThomas Gleixner 		next_lock = task_blocked_on_lock(task);
62067792e2cSThomas Gleixner 		/*
62167792e2cSThomas Gleixner 		 * Get the top waiter for the next iteration
62267792e2cSThomas Gleixner 		 */
62367792e2cSThomas Gleixner 		top_waiter = rt_mutex_top_waiter(lock);
62467792e2cSThomas Gleixner 
62567792e2cSThomas Gleixner 		/* [13] Drop locks */
626b4abf910SThomas Gleixner 		raw_spin_unlock(&task->pi_lock);
627b4abf910SThomas Gleixner 		raw_spin_unlock_irq(&lock->wait_lock);
62867792e2cSThomas Gleixner 
62967792e2cSThomas Gleixner 		/* If owner is not blocked, end of chain. */
63067792e2cSThomas Gleixner 		if (!next_lock)
63167792e2cSThomas Gleixner 			goto out_put_task;
63267792e2cSThomas Gleixner 		goto again;
63367792e2cSThomas Gleixner 	}
63467792e2cSThomas Gleixner 
63567792e2cSThomas Gleixner 	/*
636a57594a1SThomas Gleixner 	 * Store the current top waiter before doing the requeue
637a57594a1SThomas Gleixner 	 * operation on @lock. We need it for the boost/deboost
638a57594a1SThomas Gleixner 	 * decision below.
639a57594a1SThomas Gleixner 	 */
640a57594a1SThomas Gleixner 	prerequeue_top_waiter = rt_mutex_top_waiter(lock);
6411696a8beSPeter Zijlstra 
6429f40a51aSDavidlohr Bueso 	/* [7] Requeue the waiter in the lock waiter tree. */
643fb00aca4SPeter Zijlstra 	rt_mutex_dequeue(lock, waiter);
644e0aad5b4SPeter Zijlstra 
645e0aad5b4SPeter Zijlstra 	/*
646e0aad5b4SPeter Zijlstra 	 * Update the waiter prio fields now that we're dequeued.
647e0aad5b4SPeter Zijlstra 	 *
648e0aad5b4SPeter Zijlstra 	 * These values can have changed through either:
649e0aad5b4SPeter Zijlstra 	 *
650e0aad5b4SPeter Zijlstra 	 *   sys_sched_set_scheduler() / sys_sched_setattr()
651e0aad5b4SPeter Zijlstra 	 *
652e0aad5b4SPeter Zijlstra 	 * or
653e0aad5b4SPeter Zijlstra 	 *
654e0aad5b4SPeter Zijlstra 	 *   DL CBS enforcement advancing the effective deadline.
655e0aad5b4SPeter Zijlstra 	 *
656e0aad5b4SPeter Zijlstra 	 * Even though pi_waiters also uses these fields, and that tree is only
657e0aad5b4SPeter Zijlstra 	 * updated in [11], we can do this here, since we hold [L], which
658e0aad5b4SPeter Zijlstra 	 * serializes all pi_waiters access and rb_erase() does not care about
659e0aad5b4SPeter Zijlstra 	 * the values of the node being removed.
660e0aad5b4SPeter Zijlstra 	 */
6612d3d891dSDario Faggioli 	waiter->prio = task->prio;
662e0aad5b4SPeter Zijlstra 	waiter->deadline = task->dl.deadline;
663e0aad5b4SPeter Zijlstra 
664fb00aca4SPeter Zijlstra 	rt_mutex_enqueue(lock, waiter);
6651696a8beSPeter Zijlstra 
6663eb65aeaSThomas Gleixner 	/* [8] Release the task */
667b4abf910SThomas Gleixner 	raw_spin_unlock(&task->pi_lock);
6682ffa5a5cSThomas Gleixner 	put_task_struct(task);
6692ffa5a5cSThomas Gleixner 
670a57594a1SThomas Gleixner 	/*
6713eb65aeaSThomas Gleixner 	 * [9] check_exit_conditions_3 protected by lock->wait_lock.
6723eb65aeaSThomas Gleixner 	 *
673a57594a1SThomas Gleixner 	 * We must abort the chain walk if there is no lock owner even
674a57594a1SThomas Gleixner 	 * in the dead lock detection case, as we have nothing to
675a57594a1SThomas Gleixner 	 * follow here. This is the end of the chain we are walking.
676a57594a1SThomas Gleixner 	 */
6771696a8beSPeter Zijlstra 	if (!rt_mutex_owner(lock)) {
6781696a8beSPeter Zijlstra 		/*
6793eb65aeaSThomas Gleixner 		 * If the requeue [7] above changed the top waiter,
6803eb65aeaSThomas Gleixner 		 * then we need to wake the new top waiter up to try
6813eb65aeaSThomas Gleixner 		 * to get the lock.
6821696a8beSPeter Zijlstra 		 */
683a57594a1SThomas Gleixner 		if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
6841696a8beSPeter Zijlstra 			wake_up_process(rt_mutex_top_waiter(lock)->task);
685b4abf910SThomas Gleixner 		raw_spin_unlock_irq(&lock->wait_lock);
6862ffa5a5cSThomas Gleixner 		return 0;
6871696a8beSPeter Zijlstra 	}
6881696a8beSPeter Zijlstra 
6893eb65aeaSThomas Gleixner 	/* [10] Grab the next task, i.e. the owner of @lock */
6907b3c92b8SMatthew Wilcox (Oracle) 	task = get_task_struct(rt_mutex_owner(lock));
691b4abf910SThomas Gleixner 	raw_spin_lock(&task->pi_lock);
6921696a8beSPeter Zijlstra 
6933eb65aeaSThomas Gleixner 	/* [11] requeue the pi waiters if necessary */
6941696a8beSPeter Zijlstra 	if (waiter == rt_mutex_top_waiter(lock)) {
695a57594a1SThomas Gleixner 		/*
696a57594a1SThomas Gleixner 		 * The waiter became the new top (highest priority)
697a57594a1SThomas Gleixner 		 * waiter on the lock. Replace the previous top waiter
6989f40a51aSDavidlohr Bueso 		 * in the owner tasks pi waiters tree with this waiter
699a57594a1SThomas Gleixner 		 * and adjust the priority of the owner.
700a57594a1SThomas Gleixner 		 */
701a57594a1SThomas Gleixner 		rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
702fb00aca4SPeter Zijlstra 		rt_mutex_enqueue_pi(task, waiter);
703acd58620SPeter Zijlstra 		rt_mutex_adjust_prio(task);
7041696a8beSPeter Zijlstra 
705a57594a1SThomas Gleixner 	} else if (prerequeue_top_waiter == waiter) {
706a57594a1SThomas Gleixner 		/*
707a57594a1SThomas Gleixner 		 * The waiter was the top waiter on the lock, but is
708e2db7592SIngo Molnar 		 * no longer the top priority waiter. Replace waiter in
7099f40a51aSDavidlohr Bueso 		 * the owner tasks pi waiters tree with the new top
710a57594a1SThomas Gleixner 		 * (highest priority) waiter and adjust the priority
711a57594a1SThomas Gleixner 		 * of the owner.
712a57594a1SThomas Gleixner 		 * The new top waiter is stored in @waiter so that
713a57594a1SThomas Gleixner 		 * @waiter == @top_waiter evaluates to true below and
714a57594a1SThomas Gleixner 		 * we continue to deboost the rest of the chain.
715a57594a1SThomas Gleixner 		 */
716fb00aca4SPeter Zijlstra 		rt_mutex_dequeue_pi(task, waiter);
7171696a8beSPeter Zijlstra 		waiter = rt_mutex_top_waiter(lock);
718fb00aca4SPeter Zijlstra 		rt_mutex_enqueue_pi(task, waiter);
719acd58620SPeter Zijlstra 		rt_mutex_adjust_prio(task);
720a57594a1SThomas Gleixner 	} else {
721a57594a1SThomas Gleixner 		/*
722a57594a1SThomas Gleixner 		 * Nothing changed. No need to do any priority
723a57594a1SThomas Gleixner 		 * adjustment.
724a57594a1SThomas Gleixner 		 */
7251696a8beSPeter Zijlstra 	}
7261696a8beSPeter Zijlstra 
72782084984SThomas Gleixner 	/*
7283eb65aeaSThomas Gleixner 	 * [12] check_exit_conditions_4() protected by task->pi_lock
7293eb65aeaSThomas Gleixner 	 * and lock->wait_lock. The actual decisions are made after we
7303eb65aeaSThomas Gleixner 	 * dropped the locks.
7313eb65aeaSThomas Gleixner 	 *
73282084984SThomas Gleixner 	 * Check whether the task which owns the current lock is pi
73382084984SThomas Gleixner 	 * blocked itself. If yes we store a pointer to the lock for
73482084984SThomas Gleixner 	 * the lock chain change detection above. After we dropped
73582084984SThomas Gleixner 	 * task->pi_lock next_lock cannot be dereferenced anymore.
73682084984SThomas Gleixner 	 */
73782084984SThomas Gleixner 	next_lock = task_blocked_on_lock(task);
738a57594a1SThomas Gleixner 	/*
739a57594a1SThomas Gleixner 	 * Store the top waiter of @lock for the end of chain walk
740a57594a1SThomas Gleixner 	 * decision below.
741a57594a1SThomas Gleixner 	 */
7421696a8beSPeter Zijlstra 	top_waiter = rt_mutex_top_waiter(lock);
7433eb65aeaSThomas Gleixner 
7443eb65aeaSThomas Gleixner 	/* [13] Drop the locks */
745b4abf910SThomas Gleixner 	raw_spin_unlock(&task->pi_lock);
746b4abf910SThomas Gleixner 	raw_spin_unlock_irq(&lock->wait_lock);
7471696a8beSPeter Zijlstra 
74882084984SThomas Gleixner 	/*
7493eb65aeaSThomas Gleixner 	 * Make the actual exit decisions [12], based on the stored
7503eb65aeaSThomas Gleixner 	 * values.
7513eb65aeaSThomas Gleixner 	 *
75282084984SThomas Gleixner 	 * We reached the end of the lock chain. Stop right here. No
75382084984SThomas Gleixner 	 * point to go back just to figure that out.
75482084984SThomas Gleixner 	 */
75582084984SThomas Gleixner 	if (!next_lock)
75682084984SThomas Gleixner 		goto out_put_task;
75782084984SThomas Gleixner 
758a57594a1SThomas Gleixner 	/*
759a57594a1SThomas Gleixner 	 * If the current waiter is not the top waiter on the lock,
760a57594a1SThomas Gleixner 	 * then we can stop the chain walk here if we are not in full
761a57594a1SThomas Gleixner 	 * deadlock detection mode.
762a57594a1SThomas Gleixner 	 */
7631696a8beSPeter Zijlstra 	if (!detect_deadlock && waiter != top_waiter)
7641696a8beSPeter Zijlstra 		goto out_put_task;
7651696a8beSPeter Zijlstra 
7661696a8beSPeter Zijlstra 	goto again;
7671696a8beSPeter Zijlstra 
7681696a8beSPeter Zijlstra  out_unlock_pi:
769b4abf910SThomas Gleixner 	raw_spin_unlock_irq(&task->pi_lock);
7701696a8beSPeter Zijlstra  out_put_task:
7711696a8beSPeter Zijlstra 	put_task_struct(task);
7721696a8beSPeter Zijlstra 
7731696a8beSPeter Zijlstra 	return ret;
7741696a8beSPeter Zijlstra }
7751696a8beSPeter Zijlstra 
7761696a8beSPeter Zijlstra /*
7771696a8beSPeter Zijlstra  * Try to take an rt-mutex
7781696a8beSPeter Zijlstra  *
779b4abf910SThomas Gleixner  * Must be called with lock->wait_lock held and interrupts disabled
7801696a8beSPeter Zijlstra  *
781358c331fSThomas Gleixner  * @lock:   The lock to be acquired.
782358c331fSThomas Gleixner  * @task:   The task which wants to acquire the lock
7839f40a51aSDavidlohr Bueso  * @waiter: The waiter that is queued to the lock's wait tree if the
784358c331fSThomas Gleixner  *	    callsite called task_blocked_on_lock(), otherwise NULL
7851696a8beSPeter Zijlstra  */
7861696a8beSPeter Zijlstra static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
7871696a8beSPeter Zijlstra 				struct rt_mutex_waiter *waiter)
7881696a8beSPeter Zijlstra {
789e0aad5b4SPeter Zijlstra 	lockdep_assert_held(&lock->wait_lock);
790e0aad5b4SPeter Zijlstra 
7911696a8beSPeter Zijlstra 	/*
792358c331fSThomas Gleixner 	 * Before testing whether we can acquire @lock, we set the
793358c331fSThomas Gleixner 	 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
794358c331fSThomas Gleixner 	 * other tasks which try to modify @lock into the slow path
795358c331fSThomas Gleixner 	 * and they serialize on @lock->wait_lock.
7961696a8beSPeter Zijlstra 	 *
797358c331fSThomas Gleixner 	 * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
798358c331fSThomas Gleixner 	 * as explained at the top of this file if and only if:
7991696a8beSPeter Zijlstra 	 *
800358c331fSThomas Gleixner 	 * - There is a lock owner. The caller must fixup the
801358c331fSThomas Gleixner 	 *   transient state if it does a trylock or leaves the lock
802358c331fSThomas Gleixner 	 *   function due to a signal or timeout.
803358c331fSThomas Gleixner 	 *
804358c331fSThomas Gleixner 	 * - @task acquires the lock and there are no other
805358c331fSThomas Gleixner 	 *   waiters. This is undone in rt_mutex_set_owner(@task) at
806358c331fSThomas Gleixner 	 *   the end of this function.
8071696a8beSPeter Zijlstra 	 */
8081696a8beSPeter Zijlstra 	mark_rt_mutex_waiters(lock);
8091696a8beSPeter Zijlstra 
810358c331fSThomas Gleixner 	/*
811358c331fSThomas Gleixner 	 * If @lock has an owner, give up.
812358c331fSThomas Gleixner 	 */
8131696a8beSPeter Zijlstra 	if (rt_mutex_owner(lock))
8141696a8beSPeter Zijlstra 		return 0;
8151696a8beSPeter Zijlstra 
8161696a8beSPeter Zijlstra 	/*
817358c331fSThomas Gleixner 	 * If @waiter != NULL, @task has already enqueued the waiter
8189f40a51aSDavidlohr Bueso 	 * into @lock waiter tree. If @waiter == NULL then this is a
819358c331fSThomas Gleixner 	 * trylock attempt.
820358c331fSThomas Gleixner 	 */
821358c331fSThomas Gleixner 	if (waiter) {
822358c331fSThomas Gleixner 		/*
823358c331fSThomas Gleixner 		 * If waiter is not the highest priority waiter of
824358c331fSThomas Gleixner 		 * @lock, give up.
825358c331fSThomas Gleixner 		 */
826358c331fSThomas Gleixner 		if (waiter != rt_mutex_top_waiter(lock))
827358c331fSThomas Gleixner 			return 0;
828358c331fSThomas Gleixner 
829358c331fSThomas Gleixner 		/*
830358c331fSThomas Gleixner 		 * We can acquire the lock. Remove the waiter from the
8319f40a51aSDavidlohr Bueso 		 * lock waiters tree.
832358c331fSThomas Gleixner 		 */
833358c331fSThomas Gleixner 		rt_mutex_dequeue(lock, waiter);
834358c331fSThomas Gleixner 
835358c331fSThomas Gleixner 	} else {
836358c331fSThomas Gleixner 		/*
837358c331fSThomas Gleixner 		 * If the lock has waiters already we check whether @task is
838358c331fSThomas Gleixner 		 * eligible to take over the lock.
839358c331fSThomas Gleixner 		 *
840358c331fSThomas Gleixner 		 * If there are no other waiters, @task can acquire
841358c331fSThomas Gleixner 		 * the lock.  @task->pi_blocked_on is NULL, so it does
842358c331fSThomas Gleixner 		 * not need to be dequeued.
8431696a8beSPeter Zijlstra 		 */
8441696a8beSPeter Zijlstra 		if (rt_mutex_has_waiters(lock)) {
845358c331fSThomas Gleixner 			/*
846358c331fSThomas Gleixner 			 * If @task->prio is greater than or equal to
847358c331fSThomas Gleixner 			 * the top waiter priority (kernel view),
848358c331fSThomas Gleixner 			 * @task lost.
849358c331fSThomas Gleixner 			 */
85019830e55SPeter Zijlstra 			if (!rt_mutex_waiter_less(task_to_waiter(task),
85119830e55SPeter Zijlstra 						  rt_mutex_top_waiter(lock)))
8521696a8beSPeter Zijlstra 				return 0;
853358c331fSThomas Gleixner 
854358c331fSThomas Gleixner 			/*
855358c331fSThomas Gleixner 			 * The current top waiter stays enqueued. We
856358c331fSThomas Gleixner 			 * don't have to change anything in the lock
857358c331fSThomas Gleixner 			 * waiters order.
858358c331fSThomas Gleixner 			 */
859358c331fSThomas Gleixner 		} else {
860358c331fSThomas Gleixner 			/*
861358c331fSThomas Gleixner 			 * No waiters. Take the lock without the
862358c331fSThomas Gleixner 			 * pi_lock dance.@task->pi_blocked_on is NULL
863358c331fSThomas Gleixner 			 * and we have no waiters to enqueue in @task
8649f40a51aSDavidlohr Bueso 			 * pi waiters tree.
865358c331fSThomas Gleixner 			 */
866358c331fSThomas Gleixner 			goto takeit;
8671696a8beSPeter Zijlstra 		}
8681696a8beSPeter Zijlstra 	}
8691696a8beSPeter Zijlstra 
8701696a8beSPeter Zijlstra 	/*
871358c331fSThomas Gleixner 	 * Clear @task->pi_blocked_on. Requires protection by
872358c331fSThomas Gleixner 	 * @task->pi_lock. Redundant operation for the @waiter == NULL
873358c331fSThomas Gleixner 	 * case, but conditionals are more expensive than a redundant
874358c331fSThomas Gleixner 	 * store.
8751696a8beSPeter Zijlstra 	 */
876b4abf910SThomas Gleixner 	raw_spin_lock(&task->pi_lock);
877358c331fSThomas Gleixner 	task->pi_blocked_on = NULL;
878358c331fSThomas Gleixner 	/*
879358c331fSThomas Gleixner 	 * Finish the lock acquisition. @task is the new owner. If
880358c331fSThomas Gleixner 	 * other waiters exist we have to insert the highest priority
8819f40a51aSDavidlohr Bueso 	 * waiter into @task->pi_waiters tree.
882358c331fSThomas Gleixner 	 */
883358c331fSThomas Gleixner 	if (rt_mutex_has_waiters(lock))
884358c331fSThomas Gleixner 		rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
885b4abf910SThomas Gleixner 	raw_spin_unlock(&task->pi_lock);
8861696a8beSPeter Zijlstra 
887358c331fSThomas Gleixner takeit:
8881696a8beSPeter Zijlstra 	/* We got the lock. */
8891696a8beSPeter Zijlstra 	debug_rt_mutex_lock(lock);
8901696a8beSPeter Zijlstra 
891358c331fSThomas Gleixner 	/*
892358c331fSThomas Gleixner 	 * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
893358c331fSThomas Gleixner 	 * are still waiters or clears it.
894358c331fSThomas Gleixner 	 */
8951696a8beSPeter Zijlstra 	rt_mutex_set_owner(lock, task);
8961696a8beSPeter Zijlstra 
8971696a8beSPeter Zijlstra 	return 1;
8981696a8beSPeter Zijlstra }
8991696a8beSPeter Zijlstra 
9001696a8beSPeter Zijlstra /*
9011696a8beSPeter Zijlstra  * Task blocks on lock.
9021696a8beSPeter Zijlstra  *
9031696a8beSPeter Zijlstra  * Prepare waiter and propagate pi chain
9041696a8beSPeter Zijlstra  *
905b4abf910SThomas Gleixner  * This must be called with lock->wait_lock held and interrupts disabled
9061696a8beSPeter Zijlstra  */
9071696a8beSPeter Zijlstra static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
9081696a8beSPeter Zijlstra 				   struct rt_mutex_waiter *waiter,
9091696a8beSPeter Zijlstra 				   struct task_struct *task,
9108930ed80SThomas Gleixner 				   enum rtmutex_chainwalk chwalk)
9111696a8beSPeter Zijlstra {
9121696a8beSPeter Zijlstra 	struct task_struct *owner = rt_mutex_owner(lock);
9131696a8beSPeter Zijlstra 	struct rt_mutex_waiter *top_waiter = waiter;
91482084984SThomas Gleixner 	struct rt_mutex *next_lock;
9151696a8beSPeter Zijlstra 	int chain_walk = 0, res;
9161696a8beSPeter Zijlstra 
917e0aad5b4SPeter Zijlstra 	lockdep_assert_held(&lock->wait_lock);
918e0aad5b4SPeter Zijlstra 
919397335f0SThomas Gleixner 	/*
920397335f0SThomas Gleixner 	 * Early deadlock detection. We really don't want the task to
921397335f0SThomas Gleixner 	 * enqueue on itself just to untangle the mess later. It's not
922397335f0SThomas Gleixner 	 * only an optimization. We drop the locks, so another waiter
923397335f0SThomas Gleixner 	 * can come in before the chain walk detects the deadlock. So
924397335f0SThomas Gleixner 	 * the other will detect the deadlock and return -EDEADLOCK,
925397335f0SThomas Gleixner 	 * which is wrong, as the other waiter is not in a deadlock
926397335f0SThomas Gleixner 	 * situation.
927397335f0SThomas Gleixner 	 */
9283d5c9340SThomas Gleixner 	if (owner == task)
929397335f0SThomas Gleixner 		return -EDEADLK;
930397335f0SThomas Gleixner 
931b4abf910SThomas Gleixner 	raw_spin_lock(&task->pi_lock);
9321696a8beSPeter Zijlstra 	waiter->task = task;
9331696a8beSPeter Zijlstra 	waiter->lock = lock;
9342d3d891dSDario Faggioli 	waiter->prio = task->prio;
935e0aad5b4SPeter Zijlstra 	waiter->deadline = task->dl.deadline;
9361696a8beSPeter Zijlstra 
9371696a8beSPeter Zijlstra 	/* Get the top priority waiter on the lock */
9381696a8beSPeter Zijlstra 	if (rt_mutex_has_waiters(lock))
9391696a8beSPeter Zijlstra 		top_waiter = rt_mutex_top_waiter(lock);
940fb00aca4SPeter Zijlstra 	rt_mutex_enqueue(lock, waiter);
9411696a8beSPeter Zijlstra 
9421696a8beSPeter Zijlstra 	task->pi_blocked_on = waiter;
9431696a8beSPeter Zijlstra 
944b4abf910SThomas Gleixner 	raw_spin_unlock(&task->pi_lock);
9451696a8beSPeter Zijlstra 
9461696a8beSPeter Zijlstra 	if (!owner)
9471696a8beSPeter Zijlstra 		return 0;
9481696a8beSPeter Zijlstra 
949b4abf910SThomas Gleixner 	raw_spin_lock(&owner->pi_lock);
95082084984SThomas Gleixner 	if (waiter == rt_mutex_top_waiter(lock)) {
951fb00aca4SPeter Zijlstra 		rt_mutex_dequeue_pi(owner, top_waiter);
952fb00aca4SPeter Zijlstra 		rt_mutex_enqueue_pi(owner, waiter);
9531696a8beSPeter Zijlstra 
954acd58620SPeter Zijlstra 		rt_mutex_adjust_prio(owner);
9551696a8beSPeter Zijlstra 		if (owner->pi_blocked_on)
9561696a8beSPeter Zijlstra 			chain_walk = 1;
9578930ed80SThomas Gleixner 	} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
9581696a8beSPeter Zijlstra 		chain_walk = 1;
95982084984SThomas Gleixner 	}
9601696a8beSPeter Zijlstra 
96182084984SThomas Gleixner 	/* Store the lock on which owner is blocked or NULL */
96282084984SThomas Gleixner 	next_lock = task_blocked_on_lock(owner);
96382084984SThomas Gleixner 
964b4abf910SThomas Gleixner 	raw_spin_unlock(&owner->pi_lock);
96582084984SThomas Gleixner 	/*
96682084984SThomas Gleixner 	 * Even if full deadlock detection is on, if the owner is not
96782084984SThomas Gleixner 	 * blocked itself, we can avoid finding this out in the chain
96882084984SThomas Gleixner 	 * walk.
96982084984SThomas Gleixner 	 */
97082084984SThomas Gleixner 	if (!chain_walk || !next_lock)
9711696a8beSPeter Zijlstra 		return 0;
9721696a8beSPeter Zijlstra 
9731696a8beSPeter Zijlstra 	/*
9741696a8beSPeter Zijlstra 	 * The owner can't disappear while holding a lock,
9751696a8beSPeter Zijlstra 	 * so the owner struct is protected by wait_lock.
9761696a8beSPeter Zijlstra 	 * Gets dropped in rt_mutex_adjust_prio_chain()!
9771696a8beSPeter Zijlstra 	 */
9781696a8beSPeter Zijlstra 	get_task_struct(owner);
9791696a8beSPeter Zijlstra 
980b4abf910SThomas Gleixner 	raw_spin_unlock_irq(&lock->wait_lock);
9811696a8beSPeter Zijlstra 
9828930ed80SThomas Gleixner 	res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
98382084984SThomas Gleixner 					 next_lock, waiter, task);
9841696a8beSPeter Zijlstra 
985b4abf910SThomas Gleixner 	raw_spin_lock_irq(&lock->wait_lock);
9861696a8beSPeter Zijlstra 
9871696a8beSPeter Zijlstra 	return res;
9881696a8beSPeter Zijlstra }
9891696a8beSPeter Zijlstra 
9901696a8beSPeter Zijlstra /*
9919f40a51aSDavidlohr Bueso  * Remove the top waiter from the current tasks pi waiter tree and
99245ab4effSDavidlohr Bueso  * queue it up.
9931696a8beSPeter Zijlstra  *
994b4abf910SThomas Gleixner  * Called with lock->wait_lock held and interrupts disabled.
9951696a8beSPeter Zijlstra  */
99645ab4effSDavidlohr Bueso static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
99745ab4effSDavidlohr Bueso 				    struct rt_mutex *lock)
9981696a8beSPeter Zijlstra {
9991696a8beSPeter Zijlstra 	struct rt_mutex_waiter *waiter;
10001696a8beSPeter Zijlstra 
1001b4abf910SThomas Gleixner 	raw_spin_lock(&current->pi_lock);
10021696a8beSPeter Zijlstra 
10031696a8beSPeter Zijlstra 	waiter = rt_mutex_top_waiter(lock);
10041696a8beSPeter Zijlstra 
10051696a8beSPeter Zijlstra 	/*
1006acd58620SPeter Zijlstra 	 * Remove it from current->pi_waiters and deboost.
1007acd58620SPeter Zijlstra 	 *
1008acd58620SPeter Zijlstra 	 * We must in fact deboost here in order to ensure we call
1009acd58620SPeter Zijlstra 	 * rt_mutex_setprio() to update p->pi_top_task before the
1010acd58620SPeter Zijlstra 	 * task unblocks.
10111696a8beSPeter Zijlstra 	 */
1012fb00aca4SPeter Zijlstra 	rt_mutex_dequeue_pi(current, waiter);
1013acd58620SPeter Zijlstra 	rt_mutex_adjust_prio(current);
10141696a8beSPeter Zijlstra 
101527e35715SThomas Gleixner 	/*
101627e35715SThomas Gleixner 	 * As we are waking up the top waiter, and the waiter stays
101727e35715SThomas Gleixner 	 * queued on the lock until it gets the lock, this lock
101827e35715SThomas Gleixner 	 * obviously has waiters. Just set the bit here and this has
101927e35715SThomas Gleixner 	 * the added benefit of forcing all new tasks into the
102027e35715SThomas Gleixner 	 * slow path making sure no task of lower priority than
102127e35715SThomas Gleixner 	 * the top waiter can steal this lock.
102227e35715SThomas Gleixner 	 */
102327e35715SThomas Gleixner 	lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
10241696a8beSPeter Zijlstra 
1025acd58620SPeter Zijlstra 	/*
1026acd58620SPeter Zijlstra 	 * We deboosted before waking the top waiter task such that we don't
1027acd58620SPeter Zijlstra 	 * run two tasks with the 'same' priority (and ensure the
1028acd58620SPeter Zijlstra 	 * p->pi_top_task pointer points to a blocked task). This however can
1029acd58620SPeter Zijlstra 	 * lead to priority inversion if we would get preempted after the
1030acd58620SPeter Zijlstra 	 * deboost but before waking our donor task, hence the preempt_disable()
1031acd58620SPeter Zijlstra 	 * before unlock.
1032acd58620SPeter Zijlstra 	 *
1033acd58620SPeter Zijlstra 	 * Pairs with preempt_enable() in rt_mutex_postunlock();
1034acd58620SPeter Zijlstra 	 */
1035acd58620SPeter Zijlstra 	preempt_disable();
103645ab4effSDavidlohr Bueso 	wake_q_add(wake_q, waiter->task);
1037acd58620SPeter Zijlstra 	raw_spin_unlock(&current->pi_lock);
10381696a8beSPeter Zijlstra }
10391696a8beSPeter Zijlstra 
10401696a8beSPeter Zijlstra /*
10411696a8beSPeter Zijlstra  * Remove a waiter from a lock and give up
10421696a8beSPeter Zijlstra  *
1043b4abf910SThomas Gleixner  * Must be called with lock->wait_lock held and interrupts disabled. I must
10441696a8beSPeter Zijlstra  * have just failed to try_to_take_rt_mutex().
10451696a8beSPeter Zijlstra  */
10461696a8beSPeter Zijlstra static void remove_waiter(struct rt_mutex *lock,
10471696a8beSPeter Zijlstra 			  struct rt_mutex_waiter *waiter)
10481696a8beSPeter Zijlstra {
10491ca7b860SThomas Gleixner 	bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
10501696a8beSPeter Zijlstra 	struct task_struct *owner = rt_mutex_owner(lock);
10511ca7b860SThomas Gleixner 	struct rt_mutex *next_lock;
10521696a8beSPeter Zijlstra 
1053e0aad5b4SPeter Zijlstra 	lockdep_assert_held(&lock->wait_lock);
1054e0aad5b4SPeter Zijlstra 
1055b4abf910SThomas Gleixner 	raw_spin_lock(&current->pi_lock);
1056fb00aca4SPeter Zijlstra 	rt_mutex_dequeue(lock, waiter);
10571696a8beSPeter Zijlstra 	current->pi_blocked_on = NULL;
1058b4abf910SThomas Gleixner 	raw_spin_unlock(&current->pi_lock);
10591696a8beSPeter Zijlstra 
10601ca7b860SThomas Gleixner 	/*
10611ca7b860SThomas Gleixner 	 * Only update priority if the waiter was the highest priority
10621ca7b860SThomas Gleixner 	 * waiter of the lock and there is an owner to update.
10631ca7b860SThomas Gleixner 	 */
10641ca7b860SThomas Gleixner 	if (!owner || !is_top_waiter)
10651696a8beSPeter Zijlstra 		return;
10661696a8beSPeter Zijlstra 
1067b4abf910SThomas Gleixner 	raw_spin_lock(&owner->pi_lock);
10681696a8beSPeter Zijlstra 
1069fb00aca4SPeter Zijlstra 	rt_mutex_dequeue_pi(owner, waiter);
10701696a8beSPeter Zijlstra 
10711ca7b860SThomas Gleixner 	if (rt_mutex_has_waiters(lock))
10721ca7b860SThomas Gleixner 		rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
10731696a8beSPeter Zijlstra 
1074acd58620SPeter Zijlstra 	rt_mutex_adjust_prio(owner);
10751696a8beSPeter Zijlstra 
107682084984SThomas Gleixner 	/* Store the lock on which owner is blocked or NULL */
107782084984SThomas Gleixner 	next_lock = task_blocked_on_lock(owner);
10781696a8beSPeter Zijlstra 
1079b4abf910SThomas Gleixner 	raw_spin_unlock(&owner->pi_lock);
10801696a8beSPeter Zijlstra 
10811ca7b860SThomas Gleixner 	/*
10821ca7b860SThomas Gleixner 	 * Don't walk the chain, if the owner task is not blocked
10831ca7b860SThomas Gleixner 	 * itself.
10841ca7b860SThomas Gleixner 	 */
108582084984SThomas Gleixner 	if (!next_lock)
10861696a8beSPeter Zijlstra 		return;
10871696a8beSPeter Zijlstra 
10881696a8beSPeter Zijlstra 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
10891696a8beSPeter Zijlstra 	get_task_struct(owner);
10901696a8beSPeter Zijlstra 
1091b4abf910SThomas Gleixner 	raw_spin_unlock_irq(&lock->wait_lock);
10921696a8beSPeter Zijlstra 
10938930ed80SThomas Gleixner 	rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
10948930ed80SThomas Gleixner 				   next_lock, NULL, current);
10951696a8beSPeter Zijlstra 
1096b4abf910SThomas Gleixner 	raw_spin_lock_irq(&lock->wait_lock);
10971696a8beSPeter Zijlstra }
10981696a8beSPeter Zijlstra 
10991696a8beSPeter Zijlstra /*
11001696a8beSPeter Zijlstra  * Recheck the pi chain, in case we got a priority setting
11011696a8beSPeter Zijlstra  *
11021696a8beSPeter Zijlstra  * Called from sched_setscheduler
11031696a8beSPeter Zijlstra  */
11041696a8beSPeter Zijlstra void rt_mutex_adjust_pi(struct task_struct *task)
11051696a8beSPeter Zijlstra {
11061696a8beSPeter Zijlstra 	struct rt_mutex_waiter *waiter;
110782084984SThomas Gleixner 	struct rt_mutex *next_lock;
11081696a8beSPeter Zijlstra 	unsigned long flags;
11091696a8beSPeter Zijlstra 
11101696a8beSPeter Zijlstra 	raw_spin_lock_irqsave(&task->pi_lock, flags);
11111696a8beSPeter Zijlstra 
11121696a8beSPeter Zijlstra 	waiter = task->pi_blocked_on;
111319830e55SPeter Zijlstra 	if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
11141696a8beSPeter Zijlstra 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
11151696a8beSPeter Zijlstra 		return;
11161696a8beSPeter Zijlstra 	}
111782084984SThomas Gleixner 	next_lock = waiter->lock;
11181696a8beSPeter Zijlstra 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
11191696a8beSPeter Zijlstra 
11201696a8beSPeter Zijlstra 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
11211696a8beSPeter Zijlstra 	get_task_struct(task);
112282084984SThomas Gleixner 
11238930ed80SThomas Gleixner 	rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
11248930ed80SThomas Gleixner 				   next_lock, NULL, task);
11251696a8beSPeter Zijlstra }
11261696a8beSPeter Zijlstra 
112750809358SPeter Zijlstra void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
112850809358SPeter Zijlstra {
112950809358SPeter Zijlstra 	debug_rt_mutex_init_waiter(waiter);
113050809358SPeter Zijlstra 	RB_CLEAR_NODE(&waiter->pi_tree_entry);
113150809358SPeter Zijlstra 	RB_CLEAR_NODE(&waiter->tree_entry);
113250809358SPeter Zijlstra 	waiter->task = NULL;
113350809358SPeter Zijlstra }
113450809358SPeter Zijlstra 
11351696a8beSPeter Zijlstra /**
11361696a8beSPeter Zijlstra  * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
11371696a8beSPeter Zijlstra  * @lock:		 the rt_mutex to take
11381696a8beSPeter Zijlstra  * @state:		 the state the task should block in (TASK_INTERRUPTIBLE
11391696a8beSPeter Zijlstra  *			 or TASK_UNINTERRUPTIBLE)
11401696a8beSPeter Zijlstra  * @timeout:		 the pre-initialized and started timer, or NULL for none
11411696a8beSPeter Zijlstra  * @waiter:		 the pre-initialized rt_mutex_waiter
11421696a8beSPeter Zijlstra  *
1143b4abf910SThomas Gleixner  * Must be called with lock->wait_lock held and interrupts disabled
11441696a8beSPeter Zijlstra  */
11451696a8beSPeter Zijlstra static int __sched
11461696a8beSPeter Zijlstra __rt_mutex_slowlock(struct rt_mutex *lock, int state,
11471696a8beSPeter Zijlstra 		    struct hrtimer_sleeper *timeout,
11481696a8beSPeter Zijlstra 		    struct rt_mutex_waiter *waiter)
11491696a8beSPeter Zijlstra {
11501696a8beSPeter Zijlstra 	int ret = 0;
11511696a8beSPeter Zijlstra 
11521696a8beSPeter Zijlstra 	for (;;) {
11531696a8beSPeter Zijlstra 		/* Try to acquire the lock: */
11541696a8beSPeter Zijlstra 		if (try_to_take_rt_mutex(lock, current, waiter))
11551696a8beSPeter Zijlstra 			break;
11561696a8beSPeter Zijlstra 
11571696a8beSPeter Zijlstra 		/*
11581696a8beSPeter Zijlstra 		 * TASK_INTERRUPTIBLE checks for signals and
11591696a8beSPeter Zijlstra 		 * timeout. Ignored otherwise.
11601696a8beSPeter Zijlstra 		 */
11614009f4b3SSteven Rostedt (VMware) 		if (likely(state == TASK_INTERRUPTIBLE)) {
11621696a8beSPeter Zijlstra 			/* Signal pending? */
11631696a8beSPeter Zijlstra 			if (signal_pending(current))
11641696a8beSPeter Zijlstra 				ret = -EINTR;
11651696a8beSPeter Zijlstra 			if (timeout && !timeout->task)
11661696a8beSPeter Zijlstra 				ret = -ETIMEDOUT;
11671696a8beSPeter Zijlstra 			if (ret)
11681696a8beSPeter Zijlstra 				break;
11691696a8beSPeter Zijlstra 		}
11701696a8beSPeter Zijlstra 
1171b4abf910SThomas Gleixner 		raw_spin_unlock_irq(&lock->wait_lock);
11721696a8beSPeter Zijlstra 
11731b0b7c17SDavidlohr Bueso 		schedule();
11741696a8beSPeter Zijlstra 
1175b4abf910SThomas Gleixner 		raw_spin_lock_irq(&lock->wait_lock);
11761696a8beSPeter Zijlstra 		set_current_state(state);
11771696a8beSPeter Zijlstra 	}
11781696a8beSPeter Zijlstra 
1179afffc6c1SDavidlohr Bueso 	__set_current_state(TASK_RUNNING);
11801696a8beSPeter Zijlstra 	return ret;
11811696a8beSPeter Zijlstra }
11821696a8beSPeter Zijlstra 
11833d5c9340SThomas Gleixner static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
11843d5c9340SThomas Gleixner 				     struct rt_mutex_waiter *w)
11853d5c9340SThomas Gleixner {
11863d5c9340SThomas Gleixner 	/*
11873d5c9340SThomas Gleixner 	 * If the result is not -EDEADLOCK or the caller requested
11883d5c9340SThomas Gleixner 	 * deadlock detection, nothing to do here.
11893d5c9340SThomas Gleixner 	 */
11903d5c9340SThomas Gleixner 	if (res != -EDEADLOCK || detect_deadlock)
11913d5c9340SThomas Gleixner 		return;
11923d5c9340SThomas Gleixner 
11933d5c9340SThomas Gleixner 	/*
1194e2db7592SIngo Molnar 	 * Yell loudly and stop the task right here.
11953d5c9340SThomas Gleixner 	 */
1196*6d41c675SSebastian Andrzej Siewior 	WARN(1, "rtmutex deadlock detected\n");
11973d5c9340SThomas Gleixner 	while (1) {
11983d5c9340SThomas Gleixner 		set_current_state(TASK_INTERRUPTIBLE);
11993d5c9340SThomas Gleixner 		schedule();
12003d5c9340SThomas Gleixner 	}
12013d5c9340SThomas Gleixner }
12023d5c9340SThomas Gleixner 
12031696a8beSPeter Zijlstra /*
12041696a8beSPeter Zijlstra  * Slow path lock function:
12051696a8beSPeter Zijlstra  */
12061696a8beSPeter Zijlstra static int __sched
12071696a8beSPeter Zijlstra rt_mutex_slowlock(struct rt_mutex *lock, int state,
12081696a8beSPeter Zijlstra 		  struct hrtimer_sleeper *timeout,
12098930ed80SThomas Gleixner 		  enum rtmutex_chainwalk chwalk)
12101696a8beSPeter Zijlstra {
12111696a8beSPeter Zijlstra 	struct rt_mutex_waiter waiter;
1212b4abf910SThomas Gleixner 	unsigned long flags;
12131696a8beSPeter Zijlstra 	int ret = 0;
12141696a8beSPeter Zijlstra 
121550809358SPeter Zijlstra 	rt_mutex_init_waiter(&waiter);
12161696a8beSPeter Zijlstra 
1217b4abf910SThomas Gleixner 	/*
1218b4abf910SThomas Gleixner 	 * Technically we could use raw_spin_[un]lock_irq() here, but this can
1219b4abf910SThomas Gleixner 	 * be called in early boot if the cmpxchg() fast path is disabled
1220b4abf910SThomas Gleixner 	 * (debug, no architecture support). In this case we will acquire the
1221b4abf910SThomas Gleixner 	 * rtmutex with lock->wait_lock held. But we cannot unconditionally
1222b4abf910SThomas Gleixner 	 * enable interrupts in that early boot case. So we need to use the
1223b4abf910SThomas Gleixner 	 * irqsave/restore variants.
1224b4abf910SThomas Gleixner 	 */
1225b4abf910SThomas Gleixner 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
12261696a8beSPeter Zijlstra 
12271696a8beSPeter Zijlstra 	/* Try to acquire the lock again: */
12281696a8beSPeter Zijlstra 	if (try_to_take_rt_mutex(lock, current, NULL)) {
1229b4abf910SThomas Gleixner 		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
12301696a8beSPeter Zijlstra 		return 0;
12311696a8beSPeter Zijlstra 	}
12321696a8beSPeter Zijlstra 
12331696a8beSPeter Zijlstra 	set_current_state(state);
12341696a8beSPeter Zijlstra 
12351696a8beSPeter Zijlstra 	/* Setup the timer, when timeout != NULL */
1236ccdd92c1SThomas Gleixner 	if (unlikely(timeout))
12371696a8beSPeter Zijlstra 		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
12381696a8beSPeter Zijlstra 
12398930ed80SThomas Gleixner 	ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
12401696a8beSPeter Zijlstra 
12411696a8beSPeter Zijlstra 	if (likely(!ret))
1242afffc6c1SDavidlohr Bueso 		/* sleep on the mutex */
12431696a8beSPeter Zijlstra 		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
12441696a8beSPeter Zijlstra 
12453d5c9340SThomas Gleixner 	if (unlikely(ret)) {
12469d3e2d02SSebastian Andrzej Siewior 		__set_current_state(TASK_RUNNING);
12471696a8beSPeter Zijlstra 		remove_waiter(lock, &waiter);
12488930ed80SThomas Gleixner 		rt_mutex_handle_deadlock(ret, chwalk, &waiter);
12493d5c9340SThomas Gleixner 	}
12501696a8beSPeter Zijlstra 
12511696a8beSPeter Zijlstra 	/*
12521696a8beSPeter Zijlstra 	 * try_to_take_rt_mutex() sets the waiter bit
12531696a8beSPeter Zijlstra 	 * unconditionally. We might have to fix that up.
12541696a8beSPeter Zijlstra 	 */
12551696a8beSPeter Zijlstra 	fixup_rt_mutex_waiters(lock);
12561696a8beSPeter Zijlstra 
1257b4abf910SThomas Gleixner 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
12581696a8beSPeter Zijlstra 
12591696a8beSPeter Zijlstra 	/* Remove pending timer: */
12601696a8beSPeter Zijlstra 	if (unlikely(timeout))
12611696a8beSPeter Zijlstra 		hrtimer_cancel(&timeout->timer);
12621696a8beSPeter Zijlstra 
12631696a8beSPeter Zijlstra 	debug_rt_mutex_free_waiter(&waiter);
12641696a8beSPeter Zijlstra 
12651696a8beSPeter Zijlstra 	return ret;
12661696a8beSPeter Zijlstra }
12671696a8beSPeter Zijlstra 
1268c1e2f0eaSPeter Zijlstra static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock)
1269c1e2f0eaSPeter Zijlstra {
1270c1e2f0eaSPeter Zijlstra 	int ret = try_to_take_rt_mutex(lock, current, NULL);
1271c1e2f0eaSPeter Zijlstra 
1272c1e2f0eaSPeter Zijlstra 	/*
1273c1e2f0eaSPeter Zijlstra 	 * try_to_take_rt_mutex() sets the lock waiters bit
1274c1e2f0eaSPeter Zijlstra 	 * unconditionally. Clean this up.
1275c1e2f0eaSPeter Zijlstra 	 */
1276c1e2f0eaSPeter Zijlstra 	fixup_rt_mutex_waiters(lock);
1277c1e2f0eaSPeter Zijlstra 
1278c1e2f0eaSPeter Zijlstra 	return ret;
1279c1e2f0eaSPeter Zijlstra }
1280c1e2f0eaSPeter Zijlstra 
12811696a8beSPeter Zijlstra /*
12821696a8beSPeter Zijlstra  * Slow path try-lock function:
12831696a8beSPeter Zijlstra  */
128488f2b4c1SThomas Gleixner static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
12851696a8beSPeter Zijlstra {
1286b4abf910SThomas Gleixner 	unsigned long flags;
128788f2b4c1SThomas Gleixner 	int ret;
12881696a8beSPeter Zijlstra 
128988f2b4c1SThomas Gleixner 	/*
129088f2b4c1SThomas Gleixner 	 * If the lock already has an owner we fail to get the lock.
129188f2b4c1SThomas Gleixner 	 * This can be done without taking the @lock->wait_lock as
129288f2b4c1SThomas Gleixner 	 * it is only being read, and this is a trylock anyway.
129388f2b4c1SThomas Gleixner 	 */
129488f2b4c1SThomas Gleixner 	if (rt_mutex_owner(lock))
129588f2b4c1SThomas Gleixner 		return 0;
129688f2b4c1SThomas Gleixner 
129788f2b4c1SThomas Gleixner 	/*
1298b4abf910SThomas Gleixner 	 * The mutex has currently no owner. Lock the wait lock and try to
1299b4abf910SThomas Gleixner 	 * acquire the lock. We use irqsave here to support early boot calls.
130088f2b4c1SThomas Gleixner 	 */
1301b4abf910SThomas Gleixner 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
13021696a8beSPeter Zijlstra 
1303c1e2f0eaSPeter Zijlstra 	ret = __rt_mutex_slowtrylock(lock);
13041696a8beSPeter Zijlstra 
1305b4abf910SThomas Gleixner 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
13061696a8beSPeter Zijlstra 
13071696a8beSPeter Zijlstra 	return ret;
13081696a8beSPeter Zijlstra }
13091696a8beSPeter Zijlstra 
13101696a8beSPeter Zijlstra /*
1311802ab58dSSebastian Andrzej Siewior  * Slow path to release a rt-mutex.
1312aa2bfe55SPeter Zijlstra  *
1313aa2bfe55SPeter Zijlstra  * Return whether the current task needs to call rt_mutex_postunlock().
13141696a8beSPeter Zijlstra  */
1315802ab58dSSebastian Andrzej Siewior static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
1316802ab58dSSebastian Andrzej Siewior 					struct wake_q_head *wake_q)
13171696a8beSPeter Zijlstra {
1318b4abf910SThomas Gleixner 	unsigned long flags;
1319b4abf910SThomas Gleixner 
1320b4abf910SThomas Gleixner 	/* irqsave required to support early boot calls */
1321b4abf910SThomas Gleixner 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
13221696a8beSPeter Zijlstra 
13231696a8beSPeter Zijlstra 	debug_rt_mutex_unlock(lock);
13241696a8beSPeter Zijlstra 
132527e35715SThomas Gleixner 	/*
132627e35715SThomas Gleixner 	 * We must be careful here if the fast path is enabled. If we
132727e35715SThomas Gleixner 	 * have no waiters queued we cannot set owner to NULL here
132827e35715SThomas Gleixner 	 * because of:
132927e35715SThomas Gleixner 	 *
133027e35715SThomas Gleixner 	 * foo->lock->owner = NULL;
133127e35715SThomas Gleixner 	 *			rtmutex_lock(foo->lock);   <- fast path
133227e35715SThomas Gleixner 	 *			free = atomic_dec_and_test(foo->refcnt);
133327e35715SThomas Gleixner 	 *			rtmutex_unlock(foo->lock); <- fast path
133427e35715SThomas Gleixner 	 *			if (free)
133527e35715SThomas Gleixner 	 *				kfree(foo);
133627e35715SThomas Gleixner 	 * raw_spin_unlock(foo->lock->wait_lock);
133727e35715SThomas Gleixner 	 *
133827e35715SThomas Gleixner 	 * So for the fastpath enabled kernel:
133927e35715SThomas Gleixner 	 *
134027e35715SThomas Gleixner 	 * Nothing can set the waiters bit as long as we hold
134127e35715SThomas Gleixner 	 * lock->wait_lock. So we do the following sequence:
134227e35715SThomas Gleixner 	 *
134327e35715SThomas Gleixner 	 *	owner = rt_mutex_owner(lock);
134427e35715SThomas Gleixner 	 *	clear_rt_mutex_waiters(lock);
134527e35715SThomas Gleixner 	 *	raw_spin_unlock(&lock->wait_lock);
134627e35715SThomas Gleixner 	 *	if (cmpxchg(&lock->owner, owner, 0) == owner)
134727e35715SThomas Gleixner 	 *		return;
134827e35715SThomas Gleixner 	 *	goto retry;
134927e35715SThomas Gleixner 	 *
135027e35715SThomas Gleixner 	 * The fastpath disabled variant is simple as all access to
135127e35715SThomas Gleixner 	 * lock->owner is serialized by lock->wait_lock:
135227e35715SThomas Gleixner 	 *
135327e35715SThomas Gleixner 	 *	lock->owner = NULL;
135427e35715SThomas Gleixner 	 *	raw_spin_unlock(&lock->wait_lock);
135527e35715SThomas Gleixner 	 */
135627e35715SThomas Gleixner 	while (!rt_mutex_has_waiters(lock)) {
135727e35715SThomas Gleixner 		/* Drops lock->wait_lock ! */
1358b4abf910SThomas Gleixner 		if (unlock_rt_mutex_safe(lock, flags) == true)
1359802ab58dSSebastian Andrzej Siewior 			return false;
136027e35715SThomas Gleixner 		/* Relock the rtmutex and try again */
1361b4abf910SThomas Gleixner 		raw_spin_lock_irqsave(&lock->wait_lock, flags);
13621696a8beSPeter Zijlstra 	}
13631696a8beSPeter Zijlstra 
136427e35715SThomas Gleixner 	/*
136527e35715SThomas Gleixner 	 * The wakeup next waiter path does not suffer from the above
136627e35715SThomas Gleixner 	 * race. See the comments there.
136745ab4effSDavidlohr Bueso 	 *
136845ab4effSDavidlohr Bueso 	 * Queue the next waiter for wakeup once we release the wait_lock.
136927e35715SThomas Gleixner 	 */
1370802ab58dSSebastian Andrzej Siewior 	mark_wakeup_next_waiter(wake_q, lock);
1371b4abf910SThomas Gleixner 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
13721696a8beSPeter Zijlstra 
1373aa2bfe55SPeter Zijlstra 	return true; /* call rt_mutex_postunlock() */
13741696a8beSPeter Zijlstra }
13751696a8beSPeter Zijlstra 
13761696a8beSPeter Zijlstra /*
13771696a8beSPeter Zijlstra  * debug aware fast / slowpath lock,trylock,unlock
13781696a8beSPeter Zijlstra  *
13791696a8beSPeter Zijlstra  * The atomic acquire/release ops are compiled away, when either the
13801696a8beSPeter Zijlstra  * architecture does not support cmpxchg or when debugging is enabled.
13811696a8beSPeter Zijlstra  */
13821696a8beSPeter Zijlstra static inline int
13831696a8beSPeter Zijlstra rt_mutex_fastlock(struct rt_mutex *lock, int state,
13841696a8beSPeter Zijlstra 		  int (*slowfn)(struct rt_mutex *lock, int state,
13851696a8beSPeter Zijlstra 				struct hrtimer_sleeper *timeout,
13868930ed80SThomas Gleixner 				enum rtmutex_chainwalk chwalk))
13871696a8beSPeter Zijlstra {
1388fffa954fSPeter Zijlstra 	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
13891696a8beSPeter Zijlstra 		return 0;
1390fffa954fSPeter Zijlstra 
13918930ed80SThomas Gleixner 	return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
13921696a8beSPeter Zijlstra }
13931696a8beSPeter Zijlstra 
13941696a8beSPeter Zijlstra static inline int
13951696a8beSPeter Zijlstra rt_mutex_fasttrylock(struct rt_mutex *lock,
13961696a8beSPeter Zijlstra 		     int (*slowfn)(struct rt_mutex *lock))
13971696a8beSPeter Zijlstra {
1398fffa954fSPeter Zijlstra 	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
13991696a8beSPeter Zijlstra 		return 1;
1400fffa954fSPeter Zijlstra 
14011696a8beSPeter Zijlstra 	return slowfn(lock);
14021696a8beSPeter Zijlstra }
14031696a8beSPeter Zijlstra 
14042a1c6029SXunlei Pang /*
1405c034f48eSRandy Dunlap  * Performs the wakeup of the top-waiter and re-enables preemption.
14062a1c6029SXunlei Pang  */
1407aa2bfe55SPeter Zijlstra void rt_mutex_postunlock(struct wake_q_head *wake_q)
14082a1c6029SXunlei Pang {
14092a1c6029SXunlei Pang 	wake_up_q(wake_q);
14102a1c6029SXunlei Pang 
14112a1c6029SXunlei Pang 	/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
14122a1c6029SXunlei Pang 	preempt_enable();
14132a1c6029SXunlei Pang }
14142a1c6029SXunlei Pang 
14151696a8beSPeter Zijlstra static inline void
14161696a8beSPeter Zijlstra rt_mutex_fastunlock(struct rt_mutex *lock,
1417802ab58dSSebastian Andrzej Siewior 		    bool (*slowfn)(struct rt_mutex *lock,
1418802ab58dSSebastian Andrzej Siewior 				   struct wake_q_head *wqh))
14191696a8beSPeter Zijlstra {
1420194a6b5bSWaiman Long 	DEFINE_WAKE_Q(wake_q);
1421802ab58dSSebastian Andrzej Siewior 
1422fffa954fSPeter Zijlstra 	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
1423fffa954fSPeter Zijlstra 		return;
1424802ab58dSSebastian Andrzej Siewior 
1425aa2bfe55SPeter Zijlstra 	if (slowfn(lock, &wake_q))
1426aa2bfe55SPeter Zijlstra 		rt_mutex_postunlock(&wake_q);
1427802ab58dSSebastian Andrzej Siewior }
14281696a8beSPeter Zijlstra 
142962cedf3eSPeter Rosin static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
143062cedf3eSPeter Rosin {
143162cedf3eSPeter Rosin 	might_sleep();
143262cedf3eSPeter Rosin 
143362cedf3eSPeter Rosin 	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
143462cedf3eSPeter Rosin 	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
143562cedf3eSPeter Rosin }
143662cedf3eSPeter Rosin 
143762cedf3eSPeter Rosin #ifdef CONFIG_DEBUG_LOCK_ALLOC
143862cedf3eSPeter Rosin /**
143962cedf3eSPeter Rosin  * rt_mutex_lock_nested - lock a rt_mutex
144062cedf3eSPeter Rosin  *
144162cedf3eSPeter Rosin  * @lock: the rt_mutex to be locked
144262cedf3eSPeter Rosin  * @subclass: the lockdep subclass
144362cedf3eSPeter Rosin  */
144462cedf3eSPeter Rosin void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
144562cedf3eSPeter Rosin {
144662cedf3eSPeter Rosin 	__rt_mutex_lock(lock, subclass);
144762cedf3eSPeter Rosin }
144862cedf3eSPeter Rosin EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
144962cedf3eSPeter Rosin 
145084818af2SSteven Rostedt (VMware) #else /* !CONFIG_DEBUG_LOCK_ALLOC */
145184818af2SSteven Rostedt (VMware) 
14521696a8beSPeter Zijlstra /**
14531696a8beSPeter Zijlstra  * rt_mutex_lock - lock a rt_mutex
14541696a8beSPeter Zijlstra  *
14551696a8beSPeter Zijlstra  * @lock: the rt_mutex to be locked
14561696a8beSPeter Zijlstra  */
14571696a8beSPeter Zijlstra void __sched rt_mutex_lock(struct rt_mutex *lock)
14581696a8beSPeter Zijlstra {
145962cedf3eSPeter Rosin 	__rt_mutex_lock(lock, 0);
14601696a8beSPeter Zijlstra }
14611696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(rt_mutex_lock);
146262cedf3eSPeter Rosin #endif
14631696a8beSPeter Zijlstra 
14641696a8beSPeter Zijlstra /**
14651696a8beSPeter Zijlstra  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
14661696a8beSPeter Zijlstra  *
14671696a8beSPeter Zijlstra  * @lock:		the rt_mutex to be locked
14681696a8beSPeter Zijlstra  *
14691696a8beSPeter Zijlstra  * Returns:
14701696a8beSPeter Zijlstra  *  0		on success
14711696a8beSPeter Zijlstra  * -EINTR	when interrupted by a signal
14721696a8beSPeter Zijlstra  */
1473c051b21fSThomas Gleixner int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
14741696a8beSPeter Zijlstra {
1475f5694788SPeter Zijlstra 	int ret;
1476f5694788SPeter Zijlstra 
14771696a8beSPeter Zijlstra 	might_sleep();
14781696a8beSPeter Zijlstra 
1479f5694788SPeter Zijlstra 	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1480f5694788SPeter Zijlstra 	ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
1481f5694788SPeter Zijlstra 	if (ret)
14825facae4fSQian Cai 		mutex_release(&lock->dep_map, _RET_IP_);
1483f5694788SPeter Zijlstra 
1484f5694788SPeter Zijlstra 	return ret;
14851696a8beSPeter Zijlstra }
14861696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
14871696a8beSPeter Zijlstra 
1488c051b21fSThomas Gleixner /*
14895293c2efSPeter Zijlstra  * Futex variant, must not use fastpath.
14905293c2efSPeter Zijlstra  */
14915293c2efSPeter Zijlstra int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
14925293c2efSPeter Zijlstra {
14935293c2efSPeter Zijlstra 	return rt_mutex_slowtrylock(lock);
1494c051b21fSThomas Gleixner }
1495c051b21fSThomas Gleixner 
1496c1e2f0eaSPeter Zijlstra int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
1497c1e2f0eaSPeter Zijlstra {
1498c1e2f0eaSPeter Zijlstra 	return __rt_mutex_slowtrylock(lock);
1499c1e2f0eaSPeter Zijlstra }
1500c1e2f0eaSPeter Zijlstra 
15011696a8beSPeter Zijlstra /**
15021696a8beSPeter Zijlstra  * rt_mutex_trylock - try to lock a rt_mutex
15031696a8beSPeter Zijlstra  *
15041696a8beSPeter Zijlstra  * @lock:	the rt_mutex to be locked
15051696a8beSPeter Zijlstra  *
15066ce47fd9SThomas Gleixner  * This function can only be called in thread context. It's safe to
15076ce47fd9SThomas Gleixner  * call it from atomic regions, but not from hard interrupt or soft
15086ce47fd9SThomas Gleixner  * interrupt context.
15096ce47fd9SThomas Gleixner  *
15101696a8beSPeter Zijlstra  * Returns 1 on success and 0 on contention
15111696a8beSPeter Zijlstra  */
15121696a8beSPeter Zijlstra int __sched rt_mutex_trylock(struct rt_mutex *lock)
15131696a8beSPeter Zijlstra {
1514f5694788SPeter Zijlstra 	int ret;
1515f5694788SPeter Zijlstra 
1516a461d587SSebastian Andrzej Siewior 	if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
15176ce47fd9SThomas Gleixner 		return 0;
15186ce47fd9SThomas Gleixner 
1519f5694788SPeter Zijlstra 	ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
1520f5694788SPeter Zijlstra 	if (ret)
1521f5694788SPeter Zijlstra 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1522f5694788SPeter Zijlstra 
1523f5694788SPeter Zijlstra 	return ret;
15241696a8beSPeter Zijlstra }
15251696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(rt_mutex_trylock);
15261696a8beSPeter Zijlstra 
15271696a8beSPeter Zijlstra /**
15281696a8beSPeter Zijlstra  * rt_mutex_unlock - unlock a rt_mutex
15291696a8beSPeter Zijlstra  *
15301696a8beSPeter Zijlstra  * @lock: the rt_mutex to be unlocked
15311696a8beSPeter Zijlstra  */
15321696a8beSPeter Zijlstra void __sched rt_mutex_unlock(struct rt_mutex *lock)
15331696a8beSPeter Zijlstra {
15345facae4fSQian Cai 	mutex_release(&lock->dep_map, _RET_IP_);
15351696a8beSPeter Zijlstra 	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
15361696a8beSPeter Zijlstra }
15371696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(rt_mutex_unlock);
15381696a8beSPeter Zijlstra 
15391696a8beSPeter Zijlstra /**
1540bf594bf4SAlex Shi  * __rt_mutex_futex_unlock - Futex variant, that since futex variants
1541bf594bf4SAlex Shi  * do not use the fast-path, can be simple and will not need to retry.
1542bf594bf4SAlex Shi  *
1543bf594bf4SAlex Shi  * @lock:	The rt_mutex to be unlocked
1544bf594bf4SAlex Shi  * @wake_q:	The wake queue head from which to get the next lock waiter
1545802ab58dSSebastian Andrzej Siewior  */
15465293c2efSPeter Zijlstra bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
15475293c2efSPeter Zijlstra 				    struct wake_q_head *wake_q)
1548802ab58dSSebastian Andrzej Siewior {
15495293c2efSPeter Zijlstra 	lockdep_assert_held(&lock->wait_lock);
1550fffa954fSPeter Zijlstra 
15515293c2efSPeter Zijlstra 	debug_rt_mutex_unlock(lock);
15525293c2efSPeter Zijlstra 
15535293c2efSPeter Zijlstra 	if (!rt_mutex_has_waiters(lock)) {
15545293c2efSPeter Zijlstra 		lock->owner = NULL;
15555293c2efSPeter Zijlstra 		return false; /* done */
15565293c2efSPeter Zijlstra 	}
15575293c2efSPeter Zijlstra 
15582a1c6029SXunlei Pang 	/*
1559def34eaaSMike Galbraith 	 * We've already deboosted, mark_wakeup_next_waiter() will
1560def34eaaSMike Galbraith 	 * retain preempt_disabled when we drop the wait_lock, to
1561def34eaaSMike Galbraith 	 * avoid inversion prior to the wakeup.  preempt_disable()
1562def34eaaSMike Galbraith 	 * therein pairs with rt_mutex_postunlock().
15632a1c6029SXunlei Pang 	 */
1564def34eaaSMike Galbraith 	mark_wakeup_next_waiter(wake_q, lock);
15652a1c6029SXunlei Pang 
1566aa2bfe55SPeter Zijlstra 	return true; /* call postunlock() */
15675293c2efSPeter Zijlstra }
15685293c2efSPeter Zijlstra 
15695293c2efSPeter Zijlstra void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
15705293c2efSPeter Zijlstra {
15715293c2efSPeter Zijlstra 	DEFINE_WAKE_Q(wake_q);
15726b0ef92fSBoqun Feng 	unsigned long flags;
1573aa2bfe55SPeter Zijlstra 	bool postunlock;
15745293c2efSPeter Zijlstra 
15756b0ef92fSBoqun Feng 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
1576aa2bfe55SPeter Zijlstra 	postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
15776b0ef92fSBoqun Feng 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
15785293c2efSPeter Zijlstra 
1579aa2bfe55SPeter Zijlstra 	if (postunlock)
1580aa2bfe55SPeter Zijlstra 		rt_mutex_postunlock(&wake_q);
1581802ab58dSSebastian Andrzej Siewior }
1582802ab58dSSebastian Andrzej Siewior 
1583802ab58dSSebastian Andrzej Siewior /**
15841696a8beSPeter Zijlstra  * rt_mutex_destroy - mark a mutex unusable
15851696a8beSPeter Zijlstra  * @lock: the mutex to be destroyed
15861696a8beSPeter Zijlstra  *
15871696a8beSPeter Zijlstra  * This function marks the mutex uninitialized, and any subsequent
15881696a8beSPeter Zijlstra  * use of the mutex is forbidden. The mutex must not be locked when
15891696a8beSPeter Zijlstra  * this function is called.
15901696a8beSPeter Zijlstra  */
15911696a8beSPeter Zijlstra void rt_mutex_destroy(struct rt_mutex *lock)
15921696a8beSPeter Zijlstra {
15931696a8beSPeter Zijlstra 	WARN_ON(rt_mutex_is_locked(lock));
15941696a8beSPeter Zijlstra }
15951696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(rt_mutex_destroy);
15961696a8beSPeter Zijlstra 
15971696a8beSPeter Zijlstra /**
1598bf594bf4SAlex Shi  * __rt_mutex_init - initialize the rt_mutex
15991696a8beSPeter Zijlstra  *
1600bf594bf4SAlex Shi  * @lock:	The rt_mutex to be initialized
1601bf594bf4SAlex Shi  * @name:	The lock name used for debugging
1602bf594bf4SAlex Shi  * @key:	The lock class key used for debugging
16031696a8beSPeter Zijlstra  *
1604bf594bf4SAlex Shi  * Initialize the rt_mutex to unlocked state.
16051696a8beSPeter Zijlstra  *
1606bf594bf4SAlex Shi  * Initializing of a locked rt_mutex is not allowed
16071696a8beSPeter Zijlstra  */
1608f5694788SPeter Zijlstra void __rt_mutex_init(struct rt_mutex *lock, const char *name,
1609f5694788SPeter Zijlstra 		     struct lock_class_key *key)
16101696a8beSPeter Zijlstra {
16111696a8beSPeter Zijlstra 	lock->owner = NULL;
16121696a8beSPeter Zijlstra 	raw_spin_lock_init(&lock->wait_lock);
1613a23ba907SDavidlohr Bueso 	lock->waiters = RB_ROOT_CACHED;
16141696a8beSPeter Zijlstra 
1615cde50a67SLevin, Alexander (Sasha Levin) 	if (name && key)
1616f5694788SPeter Zijlstra 		debug_rt_mutex_init(lock, name, key);
16171696a8beSPeter Zijlstra }
16181696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(__rt_mutex_init);
16191696a8beSPeter Zijlstra 
16201696a8beSPeter Zijlstra /**
16211696a8beSPeter Zijlstra  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
16221696a8beSPeter Zijlstra  *				proxy owner
16231696a8beSPeter Zijlstra  *
16241696a8beSPeter Zijlstra  * @lock:	the rt_mutex to be locked
16251696a8beSPeter Zijlstra  * @proxy_owner:the task to set as owner
16261696a8beSPeter Zijlstra  *
16271696a8beSPeter Zijlstra  * No locking. Caller has to do serializing itself
162884d82ec5SThomas Gleixner  *
162984d82ec5SThomas Gleixner  * Special API call for PI-futex support. This initializes the rtmutex and
163084d82ec5SThomas Gleixner  * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
163184d82ec5SThomas Gleixner  * possible at this point because the pi_state which contains the rtmutex
163284d82ec5SThomas Gleixner  * is not yet visible to other tasks.
16331696a8beSPeter Zijlstra  */
16341696a8beSPeter Zijlstra void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
16351696a8beSPeter Zijlstra 				struct task_struct *proxy_owner)
16361696a8beSPeter Zijlstra {
1637f5694788SPeter Zijlstra 	__rt_mutex_init(lock, NULL, NULL);
16381696a8beSPeter Zijlstra 	debug_rt_mutex_proxy_lock(lock, proxy_owner);
16391696a8beSPeter Zijlstra 	rt_mutex_set_owner(lock, proxy_owner);
16401696a8beSPeter Zijlstra }
16411696a8beSPeter Zijlstra 
16421696a8beSPeter Zijlstra /**
16431696a8beSPeter Zijlstra  * rt_mutex_proxy_unlock - release a lock on behalf of owner
16441696a8beSPeter Zijlstra  *
16451696a8beSPeter Zijlstra  * @lock:	the rt_mutex to be locked
16461696a8beSPeter Zijlstra  *
16471696a8beSPeter Zijlstra  * No locking. Caller has to do serializing itself
164884d82ec5SThomas Gleixner  *
164984d82ec5SThomas Gleixner  * Special API call for PI-futex support. This merrily cleans up the rtmutex
165084d82ec5SThomas Gleixner  * (debugging) state. Concurrent operations on this rt_mutex are not
165184d82ec5SThomas Gleixner  * possible because it belongs to the pi_state which is about to be freed
165284d82ec5SThomas Gleixner  * and it is not longer visible to other tasks.
16531696a8beSPeter Zijlstra  */
16542156ac19SThomas Gleixner void rt_mutex_proxy_unlock(struct rt_mutex *lock)
16551696a8beSPeter Zijlstra {
16561696a8beSPeter Zijlstra 	debug_rt_mutex_proxy_unlock(lock);
16571696a8beSPeter Zijlstra 	rt_mutex_set_owner(lock, NULL);
16581696a8beSPeter Zijlstra }
16591696a8beSPeter Zijlstra 
16601a1fb985SThomas Gleixner /**
16611a1fb985SThomas Gleixner  * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
16621a1fb985SThomas Gleixner  * @lock:		the rt_mutex to take
16631a1fb985SThomas Gleixner  * @waiter:		the pre-initialized rt_mutex_waiter
16641a1fb985SThomas Gleixner  * @task:		the task to prepare
16651a1fb985SThomas Gleixner  *
16661a1fb985SThomas Gleixner  * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
16671a1fb985SThomas Gleixner  * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
16681a1fb985SThomas Gleixner  *
16691a1fb985SThomas Gleixner  * NOTE: does _NOT_ remove the @waiter on failure; must either call
16701a1fb985SThomas Gleixner  * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
16711a1fb985SThomas Gleixner  *
16721a1fb985SThomas Gleixner  * Returns:
16731a1fb985SThomas Gleixner  *  0 - task blocked on lock
16741a1fb985SThomas Gleixner  *  1 - acquired the lock for task, caller should wake it up
16751a1fb985SThomas Gleixner  * <0 - error
16761a1fb985SThomas Gleixner  *
16771a1fb985SThomas Gleixner  * Special API call for PI-futex support.
16781a1fb985SThomas Gleixner  */
167956222b21SPeter Zijlstra int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
168056222b21SPeter Zijlstra 			      struct rt_mutex_waiter *waiter,
168156222b21SPeter Zijlstra 			      struct task_struct *task)
168256222b21SPeter Zijlstra {
168356222b21SPeter Zijlstra 	int ret;
168456222b21SPeter Zijlstra 
16851a1fb985SThomas Gleixner 	lockdep_assert_held(&lock->wait_lock);
16861a1fb985SThomas Gleixner 
168756222b21SPeter Zijlstra 	if (try_to_take_rt_mutex(lock, task, NULL))
168856222b21SPeter Zijlstra 		return 1;
168956222b21SPeter Zijlstra 
169056222b21SPeter Zijlstra 	/* We enforce deadlock detection for futexes */
169156222b21SPeter Zijlstra 	ret = task_blocks_on_rt_mutex(lock, waiter, task,
169256222b21SPeter Zijlstra 				      RT_MUTEX_FULL_CHAINWALK);
169356222b21SPeter Zijlstra 
169456222b21SPeter Zijlstra 	if (ret && !rt_mutex_owner(lock)) {
169556222b21SPeter Zijlstra 		/*
169656222b21SPeter Zijlstra 		 * Reset the return value. We might have
169756222b21SPeter Zijlstra 		 * returned with -EDEADLK and the owner
169856222b21SPeter Zijlstra 		 * released the lock while we were walking the
169956222b21SPeter Zijlstra 		 * pi chain.  Let the waiter sort it out.
170056222b21SPeter Zijlstra 		 */
170156222b21SPeter Zijlstra 		ret = 0;
170256222b21SPeter Zijlstra 	}
170356222b21SPeter Zijlstra 
170456222b21SPeter Zijlstra 	return ret;
170556222b21SPeter Zijlstra }
170656222b21SPeter Zijlstra 
17071696a8beSPeter Zijlstra /**
17081696a8beSPeter Zijlstra  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
17091696a8beSPeter Zijlstra  * @lock:		the rt_mutex to take
17101696a8beSPeter Zijlstra  * @waiter:		the pre-initialized rt_mutex_waiter
17111696a8beSPeter Zijlstra  * @task:		the task to prepare
17121696a8beSPeter Zijlstra  *
17131a1fb985SThomas Gleixner  * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
17141a1fb985SThomas Gleixner  * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
17151a1fb985SThomas Gleixner  *
17161a1fb985SThomas Gleixner  * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
17171a1fb985SThomas Gleixner  * on failure.
17181a1fb985SThomas Gleixner  *
17191696a8beSPeter Zijlstra  * Returns:
17201696a8beSPeter Zijlstra  *  0 - task blocked on lock
17211696a8beSPeter Zijlstra  *  1 - acquired the lock for task, caller should wake it up
17221696a8beSPeter Zijlstra  * <0 - error
17231696a8beSPeter Zijlstra  *
17241a1fb985SThomas Gleixner  * Special API call for PI-futex support.
17251696a8beSPeter Zijlstra  */
17261696a8beSPeter Zijlstra int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
17271696a8beSPeter Zijlstra 			      struct rt_mutex_waiter *waiter,
1728c051b21fSThomas Gleixner 			      struct task_struct *task)
17291696a8beSPeter Zijlstra {
17301696a8beSPeter Zijlstra 	int ret;
17311696a8beSPeter Zijlstra 
1732b4abf910SThomas Gleixner 	raw_spin_lock_irq(&lock->wait_lock);
173356222b21SPeter Zijlstra 	ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
17341a1fb985SThomas Gleixner 	if (unlikely(ret))
17351a1fb985SThomas Gleixner 		remove_waiter(lock, waiter);
1736b4abf910SThomas Gleixner 	raw_spin_unlock_irq(&lock->wait_lock);
17371696a8beSPeter Zijlstra 
17381696a8beSPeter Zijlstra 	return ret;
17391696a8beSPeter Zijlstra }
17401696a8beSPeter Zijlstra 
17411696a8beSPeter Zijlstra /**
174238d589f2SPeter Zijlstra  * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
17431696a8beSPeter Zijlstra  * @lock:		the rt_mutex we were woken on
17441696a8beSPeter Zijlstra  * @to:			the timeout, null if none. hrtimer should already have
17451696a8beSPeter Zijlstra  *			been started.
17461696a8beSPeter Zijlstra  * @waiter:		the pre-initialized rt_mutex_waiter
17471696a8beSPeter Zijlstra  *
1748c034f48eSRandy Dunlap  * Wait for the lock acquisition started on our behalf by
174938d589f2SPeter Zijlstra  * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
175038d589f2SPeter Zijlstra  * rt_mutex_cleanup_proxy_lock().
17511696a8beSPeter Zijlstra  *
17521696a8beSPeter Zijlstra  * Returns:
17531696a8beSPeter Zijlstra  *  0 - success
1754c051b21fSThomas Gleixner  * <0 - error, one of -EINTR, -ETIMEDOUT
17551696a8beSPeter Zijlstra  *
175638d589f2SPeter Zijlstra  * Special API call for PI-futex support
17571696a8beSPeter Zijlstra  */
175838d589f2SPeter Zijlstra int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
17591696a8beSPeter Zijlstra 			       struct hrtimer_sleeper *to,
1760c051b21fSThomas Gleixner 			       struct rt_mutex_waiter *waiter)
17611696a8beSPeter Zijlstra {
17621696a8beSPeter Zijlstra 	int ret;
17631696a8beSPeter Zijlstra 
1764b4abf910SThomas Gleixner 	raw_spin_lock_irq(&lock->wait_lock);
1765afffc6c1SDavidlohr Bueso 	/* sleep on the mutex */
176604dc1b2fSPeter Zijlstra 	set_current_state(TASK_INTERRUPTIBLE);
17671696a8beSPeter Zijlstra 	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
176804dc1b2fSPeter Zijlstra 	/*
176904dc1b2fSPeter Zijlstra 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
177004dc1b2fSPeter Zijlstra 	 * have to fix that up.
177104dc1b2fSPeter Zijlstra 	 */
177204dc1b2fSPeter Zijlstra 	fixup_rt_mutex_waiters(lock);
1773b4abf910SThomas Gleixner 	raw_spin_unlock_irq(&lock->wait_lock);
17741696a8beSPeter Zijlstra 
17751696a8beSPeter Zijlstra 	return ret;
17761696a8beSPeter Zijlstra }
177738d589f2SPeter Zijlstra 
177838d589f2SPeter Zijlstra /**
177938d589f2SPeter Zijlstra  * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
178038d589f2SPeter Zijlstra  * @lock:		the rt_mutex we were woken on
178138d589f2SPeter Zijlstra  * @waiter:		the pre-initialized rt_mutex_waiter
178238d589f2SPeter Zijlstra  *
17831a1fb985SThomas Gleixner  * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
17841a1fb985SThomas Gleixner  * rt_mutex_wait_proxy_lock().
178538d589f2SPeter Zijlstra  *
178638d589f2SPeter Zijlstra  * Unless we acquired the lock; we're still enqueued on the wait-list and can
178738d589f2SPeter Zijlstra  * in fact still be granted ownership until we're removed. Therefore we can
178838d589f2SPeter Zijlstra  * find we are in fact the owner and must disregard the
178938d589f2SPeter Zijlstra  * rt_mutex_wait_proxy_lock() failure.
179038d589f2SPeter Zijlstra  *
179138d589f2SPeter Zijlstra  * Returns:
179238d589f2SPeter Zijlstra  *  true  - did the cleanup, we done.
179338d589f2SPeter Zijlstra  *  false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
179438d589f2SPeter Zijlstra  *          caller should disregards its return value.
179538d589f2SPeter Zijlstra  *
179638d589f2SPeter Zijlstra  * Special API call for PI-futex support
179738d589f2SPeter Zijlstra  */
179838d589f2SPeter Zijlstra bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
179938d589f2SPeter Zijlstra 				 struct rt_mutex_waiter *waiter)
180038d589f2SPeter Zijlstra {
180138d589f2SPeter Zijlstra 	bool cleanup = false;
180238d589f2SPeter Zijlstra 
180338d589f2SPeter Zijlstra 	raw_spin_lock_irq(&lock->wait_lock);
180438d589f2SPeter Zijlstra 	/*
180504dc1b2fSPeter Zijlstra 	 * Do an unconditional try-lock, this deals with the lock stealing
180604dc1b2fSPeter Zijlstra 	 * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
180704dc1b2fSPeter Zijlstra 	 * sets a NULL owner.
180804dc1b2fSPeter Zijlstra 	 *
180904dc1b2fSPeter Zijlstra 	 * We're not interested in the return value, because the subsequent
181004dc1b2fSPeter Zijlstra 	 * test on rt_mutex_owner() will infer that. If the trylock succeeded,
181104dc1b2fSPeter Zijlstra 	 * we will own the lock and it will have removed the waiter. If we
181204dc1b2fSPeter Zijlstra 	 * failed the trylock, we're still not owner and we need to remove
181304dc1b2fSPeter Zijlstra 	 * ourselves.
181404dc1b2fSPeter Zijlstra 	 */
181504dc1b2fSPeter Zijlstra 	try_to_take_rt_mutex(lock, current, waiter);
181604dc1b2fSPeter Zijlstra 	/*
181738d589f2SPeter Zijlstra 	 * Unless we're the owner; we're still enqueued on the wait_list.
181838d589f2SPeter Zijlstra 	 * So check if we became owner, if not, take us off the wait_list.
181938d589f2SPeter Zijlstra 	 */
182038d589f2SPeter Zijlstra 	if (rt_mutex_owner(lock) != current) {
182138d589f2SPeter Zijlstra 		remove_waiter(lock, waiter);
182238d589f2SPeter Zijlstra 		cleanup = true;
182338d589f2SPeter Zijlstra 	}
1824cfafcd11SPeter Zijlstra 	/*
1825cfafcd11SPeter Zijlstra 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1826cfafcd11SPeter Zijlstra 	 * have to fix that up.
1827cfafcd11SPeter Zijlstra 	 */
1828cfafcd11SPeter Zijlstra 	fixup_rt_mutex_waiters(lock);
1829cfafcd11SPeter Zijlstra 
183038d589f2SPeter Zijlstra 	raw_spin_unlock_irq(&lock->wait_lock);
183138d589f2SPeter Zijlstra 
183238d589f2SPeter Zijlstra 	return cleanup;
183338d589f2SPeter Zijlstra }
1834