1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21696a8beSPeter Zijlstra /* 31696a8beSPeter Zijlstra * RT-Mutexes: simple blocking mutual exclusion locks with PI support 41696a8beSPeter Zijlstra * 51696a8beSPeter Zijlstra * started by Ingo Molnar and Thomas Gleixner. 61696a8beSPeter Zijlstra * 71696a8beSPeter Zijlstra * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 81696a8beSPeter Zijlstra * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> 91696a8beSPeter Zijlstra * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt 101696a8beSPeter Zijlstra * Copyright (C) 2006 Esben Nielsen 11992caf7fSSteven Rostedt * Adaptive Spinlocks: 12992caf7fSSteven Rostedt * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, 13992caf7fSSteven Rostedt * and Peter Morreale, 14992caf7fSSteven Rostedt * Adaptive Spinlocks simplification: 15992caf7fSSteven Rostedt * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com> 161696a8beSPeter Zijlstra * 17387b1468SMauro Carvalho Chehab * See Documentation/locking/rt-mutex-design.rst for details. 181696a8beSPeter Zijlstra */ 19531ae4b0SThomas Gleixner #include <linux/sched.h> 20531ae4b0SThomas Gleixner #include <linux/sched/debug.h> 21531ae4b0SThomas Gleixner #include <linux/sched/deadline.h> 22174cd4b1SIngo Molnar #include <linux/sched/signal.h> 231696a8beSPeter Zijlstra #include <linux/sched/rt.h> 2484f001e1SIngo Molnar #include <linux/sched/wake_q.h> 25add46132SPeter Zijlstra #include <linux/ww_mutex.h> 261696a8beSPeter Zijlstra 27ee042be1SNamhyung Kim #include <trace/events/lock.h> 28ee042be1SNamhyung Kim 291696a8beSPeter Zijlstra #include "rtmutex_common.h" 301696a8beSPeter Zijlstra 31add46132SPeter Zijlstra #ifndef WW_RT 32add46132SPeter Zijlstra # define build_ww_mutex() (false) 33add46132SPeter Zijlstra # define ww_container_of(rtm) NULL 34add46132SPeter Zijlstra 35add46132SPeter Zijlstra static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter, 36add46132SPeter Zijlstra struct rt_mutex *lock, 37add46132SPeter Zijlstra struct ww_acquire_ctx *ww_ctx) 38add46132SPeter Zijlstra { 39add46132SPeter Zijlstra return 0; 40add46132SPeter Zijlstra } 41add46132SPeter Zijlstra 42add46132SPeter Zijlstra static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, 43add46132SPeter Zijlstra struct ww_acquire_ctx *ww_ctx) 44add46132SPeter Zijlstra { 45add46132SPeter Zijlstra } 46add46132SPeter Zijlstra 47add46132SPeter Zijlstra static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, 48add46132SPeter Zijlstra struct ww_acquire_ctx *ww_ctx) 49add46132SPeter Zijlstra { 50add46132SPeter Zijlstra } 51add46132SPeter Zijlstra 52add46132SPeter Zijlstra static inline int __ww_mutex_check_kill(struct rt_mutex *lock, 53add46132SPeter Zijlstra struct rt_mutex_waiter *waiter, 54add46132SPeter Zijlstra struct ww_acquire_ctx *ww_ctx) 55add46132SPeter Zijlstra { 56add46132SPeter Zijlstra return 0; 57add46132SPeter Zijlstra } 58add46132SPeter Zijlstra 59add46132SPeter Zijlstra #else 60add46132SPeter Zijlstra # define build_ww_mutex() (true) 61add46132SPeter Zijlstra # define ww_container_of(rtm) container_of(rtm, struct ww_mutex, base) 62add46132SPeter Zijlstra # include "ww_mutex.h" 63add46132SPeter Zijlstra #endif 64add46132SPeter Zijlstra 651696a8beSPeter Zijlstra /* 661696a8beSPeter Zijlstra * lock->owner state tracking: 671696a8beSPeter Zijlstra * 681696a8beSPeter Zijlstra * lock->owner holds the task_struct pointer of the owner. Bit 0 691696a8beSPeter Zijlstra * is used to keep track of the "lock has waiters" state. 701696a8beSPeter Zijlstra * 711696a8beSPeter Zijlstra * owner bit0 721696a8beSPeter Zijlstra * NULL 0 lock is free (fast acquire possible) 731696a8beSPeter Zijlstra * NULL 1 lock is free and has waiters and the top waiter 741696a8beSPeter Zijlstra * is going to take the lock* 751696a8beSPeter Zijlstra * taskpointer 0 lock is held (fast release possible) 761696a8beSPeter Zijlstra * taskpointer 1 lock is held and has waiters** 771696a8beSPeter Zijlstra * 781696a8beSPeter Zijlstra * The fast atomic compare exchange based acquire and release is only 791696a8beSPeter Zijlstra * possible when bit 0 of lock->owner is 0. 801696a8beSPeter Zijlstra * 811696a8beSPeter Zijlstra * (*) It also can be a transitional state when grabbing the lock 821696a8beSPeter Zijlstra * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, 831696a8beSPeter Zijlstra * we need to set the bit0 before looking at the lock, and the owner may be 841696a8beSPeter Zijlstra * NULL in this small time, hence this can be a transitional state. 851696a8beSPeter Zijlstra * 861696a8beSPeter Zijlstra * (**) There is a small time when bit 0 is set but there are no 871696a8beSPeter Zijlstra * waiters. This can happen when grabbing the lock in the slow path. 881696a8beSPeter Zijlstra * To prevent a cmpxchg of the owner releasing the lock, we need to 891696a8beSPeter Zijlstra * set this bit before looking at the lock. 901696a8beSPeter Zijlstra */ 911696a8beSPeter Zijlstra 921c0908d8SMel Gorman static __always_inline struct task_struct * 931c0908d8SMel Gorman rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner) 941696a8beSPeter Zijlstra { 951696a8beSPeter Zijlstra unsigned long val = (unsigned long)owner; 961696a8beSPeter Zijlstra 971696a8beSPeter Zijlstra if (rt_mutex_has_waiters(lock)) 981696a8beSPeter Zijlstra val |= RT_MUTEX_HAS_WAITERS; 991696a8beSPeter Zijlstra 1001c0908d8SMel Gorman return (struct task_struct *)val; 1011c0908d8SMel Gorman } 1021c0908d8SMel Gorman 1031c0908d8SMel Gorman static __always_inline void 1041c0908d8SMel Gorman rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner) 1051c0908d8SMel Gorman { 1061c0908d8SMel Gorman /* 1071c0908d8SMel Gorman * lock->wait_lock is held but explicit acquire semantics are needed 1081c0908d8SMel Gorman * for a new lock owner so WRITE_ONCE is insufficient. 1091c0908d8SMel Gorman */ 1101c0908d8SMel Gorman xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner)); 1111c0908d8SMel Gorman } 1121c0908d8SMel Gorman 1131c0908d8SMel Gorman static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock) 1141c0908d8SMel Gorman { 1151c0908d8SMel Gorman /* lock->wait_lock is held so the unlock provides release semantics. */ 1161c0908d8SMel Gorman WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL)); 1171696a8beSPeter Zijlstra } 1181696a8beSPeter Zijlstra 119830e6accSPeter Zijlstra static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock) 1201696a8beSPeter Zijlstra { 1211696a8beSPeter Zijlstra lock->owner = (struct task_struct *) 1221696a8beSPeter Zijlstra ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); 1231696a8beSPeter Zijlstra } 1241696a8beSPeter Zijlstra 1251c0908d8SMel Gorman static __always_inline void 1261c0908d8SMel Gorman fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock) 1271696a8beSPeter Zijlstra { 128dbb26055SThomas Gleixner unsigned long owner, *p = (unsigned long *) &lock->owner; 129dbb26055SThomas Gleixner 130dbb26055SThomas Gleixner if (rt_mutex_has_waiters(lock)) 131dbb26055SThomas Gleixner return; 132dbb26055SThomas Gleixner 133dbb26055SThomas Gleixner /* 134dbb26055SThomas Gleixner * The rbtree has no waiters enqueued, now make sure that the 135dbb26055SThomas Gleixner * lock->owner still has the waiters bit set, otherwise the 136dbb26055SThomas Gleixner * following can happen: 137dbb26055SThomas Gleixner * 138dbb26055SThomas Gleixner * CPU 0 CPU 1 CPU2 139dbb26055SThomas Gleixner * l->owner=T1 140dbb26055SThomas Gleixner * rt_mutex_lock(l) 141dbb26055SThomas Gleixner * lock(l->lock) 142dbb26055SThomas Gleixner * l->owner = T1 | HAS_WAITERS; 143dbb26055SThomas Gleixner * enqueue(T2) 144dbb26055SThomas Gleixner * boost() 145dbb26055SThomas Gleixner * unlock(l->lock) 146dbb26055SThomas Gleixner * block() 147dbb26055SThomas Gleixner * 148dbb26055SThomas Gleixner * rt_mutex_lock(l) 149dbb26055SThomas Gleixner * lock(l->lock) 150dbb26055SThomas Gleixner * l->owner = T1 | HAS_WAITERS; 151dbb26055SThomas Gleixner * enqueue(T3) 152dbb26055SThomas Gleixner * boost() 153dbb26055SThomas Gleixner * unlock(l->lock) 154dbb26055SThomas Gleixner * block() 155dbb26055SThomas Gleixner * signal(->T2) signal(->T3) 156dbb26055SThomas Gleixner * lock(l->lock) 157dbb26055SThomas Gleixner * dequeue(T2) 158dbb26055SThomas Gleixner * deboost() 159dbb26055SThomas Gleixner * unlock(l->lock) 160dbb26055SThomas Gleixner * lock(l->lock) 161dbb26055SThomas Gleixner * dequeue(T3) 162dbb26055SThomas Gleixner * ==> wait list is empty 163dbb26055SThomas Gleixner * deboost() 164dbb26055SThomas Gleixner * unlock(l->lock) 165dbb26055SThomas Gleixner * lock(l->lock) 166dbb26055SThomas Gleixner * fixup_rt_mutex_waiters() 167dbb26055SThomas Gleixner * if (wait_list_empty(l) { 168dbb26055SThomas Gleixner * l->owner = owner 169dbb26055SThomas Gleixner * owner = l->owner & ~HAS_WAITERS; 170dbb26055SThomas Gleixner * ==> l->owner = T1 171dbb26055SThomas Gleixner * } 172dbb26055SThomas Gleixner * lock(l->lock) 173dbb26055SThomas Gleixner * rt_mutex_unlock(l) fixup_rt_mutex_waiters() 174dbb26055SThomas Gleixner * if (wait_list_empty(l) { 175dbb26055SThomas Gleixner * owner = l->owner & ~HAS_WAITERS; 176dbb26055SThomas Gleixner * cmpxchg(l->owner, T1, NULL) 177dbb26055SThomas Gleixner * ===> Success (l->owner = NULL) 178dbb26055SThomas Gleixner * 179dbb26055SThomas Gleixner * l->owner = owner 180dbb26055SThomas Gleixner * ==> l->owner = T1 181dbb26055SThomas Gleixner * } 182dbb26055SThomas Gleixner * 183dbb26055SThomas Gleixner * With the check for the waiter bit in place T3 on CPU2 will not 184dbb26055SThomas Gleixner * overwrite. All tasks fiddling with the waiters bit are 185dbb26055SThomas Gleixner * serialized by l->lock, so nothing else can modify the waiters 186dbb26055SThomas Gleixner * bit. If the bit is set then nothing can change l->owner either 187dbb26055SThomas Gleixner * so the simple RMW is safe. The cmpxchg() will simply fail if it 188dbb26055SThomas Gleixner * happens in the middle of the RMW because the waiters bit is 189dbb26055SThomas Gleixner * still set. 190dbb26055SThomas Gleixner */ 191dbb26055SThomas Gleixner owner = READ_ONCE(*p); 1921c0908d8SMel Gorman if (owner & RT_MUTEX_HAS_WAITERS) { 1931c0908d8SMel Gorman /* 1941c0908d8SMel Gorman * See rt_mutex_set_owner() and rt_mutex_clear_owner() on 1951c0908d8SMel Gorman * why xchg_acquire() is used for updating owner for 1961c0908d8SMel Gorman * locking and WRITE_ONCE() for unlocking. 1971c0908d8SMel Gorman * 1981c0908d8SMel Gorman * WRITE_ONCE() would work for the acquire case too, but 1991c0908d8SMel Gorman * in case that the lock acquisition failed it might 2001c0908d8SMel Gorman * force other lockers into the slow path unnecessarily. 2011c0908d8SMel Gorman */ 2021c0908d8SMel Gorman if (acquire_lock) 2031c0908d8SMel Gorman xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS); 2041c0908d8SMel Gorman else 205dbb26055SThomas Gleixner WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); 2061696a8beSPeter Zijlstra } 2071c0908d8SMel Gorman } 2081696a8beSPeter Zijlstra 2091696a8beSPeter Zijlstra /* 210cede8841SSebastian Andrzej Siewior * We can speed up the acquire/release, if there's no debugging state to be 211cede8841SSebastian Andrzej Siewior * set up. 2121696a8beSPeter Zijlstra */ 213cede8841SSebastian Andrzej Siewior #ifndef CONFIG_DEBUG_RT_MUTEXES 214830e6accSPeter Zijlstra static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, 21578515930SSebastian Andrzej Siewior struct task_struct *old, 21678515930SSebastian Andrzej Siewior struct task_struct *new) 21778515930SSebastian Andrzej Siewior { 218709e0b62SThomas Gleixner return try_cmpxchg_acquire(&lock->owner, &old, new); 21978515930SSebastian Andrzej Siewior } 22078515930SSebastian Andrzej Siewior 221af9f0063SSebastian Andrzej Siewior static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock) 222af9f0063SSebastian Andrzej Siewior { 223af9f0063SSebastian Andrzej Siewior return rt_mutex_cmpxchg_acquire(lock, NULL, current); 224af9f0063SSebastian Andrzej Siewior } 225af9f0063SSebastian Andrzej Siewior 226830e6accSPeter Zijlstra static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, 22778515930SSebastian Andrzej Siewior struct task_struct *old, 22878515930SSebastian Andrzej Siewior struct task_struct *new) 22978515930SSebastian Andrzej Siewior { 230709e0b62SThomas Gleixner return try_cmpxchg_release(&lock->owner, &old, new); 23178515930SSebastian Andrzej Siewior } 232700318d1SDavidlohr Bueso 233700318d1SDavidlohr Bueso /* 234700318d1SDavidlohr Bueso * Callers must hold the ->wait_lock -- which is the whole purpose as we force 235700318d1SDavidlohr Bueso * all future threads that attempt to [Rmw] the lock to the slowpath. As such 236700318d1SDavidlohr Bueso * relaxed semantics suffice. 237700318d1SDavidlohr Bueso */ 238830e6accSPeter Zijlstra static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) 2391696a8beSPeter Zijlstra { 240ce3576ebSUros Bizjak unsigned long *p = (unsigned long *) &lock->owner; 241ce3576ebSUros Bizjak unsigned long owner, new; 2421696a8beSPeter Zijlstra 243ce3576ebSUros Bizjak owner = READ_ONCE(*p); 2441696a8beSPeter Zijlstra do { 245ce3576ebSUros Bizjak new = owner | RT_MUTEX_HAS_WAITERS; 246ce3576ebSUros Bizjak } while (!try_cmpxchg_relaxed(p, &owner, new)); 2471c0908d8SMel Gorman 2481c0908d8SMel Gorman /* 2491c0908d8SMel Gorman * The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE 2501c0908d8SMel Gorman * operations in the event of contention. Ensure the successful 2511c0908d8SMel Gorman * cmpxchg is visible. 2521c0908d8SMel Gorman */ 2531c0908d8SMel Gorman smp_mb__after_atomic(); 2541696a8beSPeter Zijlstra } 25527e35715SThomas Gleixner 25627e35715SThomas Gleixner /* 25727e35715SThomas Gleixner * Safe fastpath aware unlock: 25827e35715SThomas Gleixner * 1) Clear the waiters bit 25927e35715SThomas Gleixner * 2) Drop lock->wait_lock 26027e35715SThomas Gleixner * 3) Try to unlock the lock with cmpxchg 26127e35715SThomas Gleixner */ 262830e6accSPeter Zijlstra static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, 263b4abf910SThomas Gleixner unsigned long flags) 26427e35715SThomas Gleixner __releases(lock->wait_lock) 26527e35715SThomas Gleixner { 26627e35715SThomas Gleixner struct task_struct *owner = rt_mutex_owner(lock); 26727e35715SThomas Gleixner 26827e35715SThomas Gleixner clear_rt_mutex_waiters(lock); 269b4abf910SThomas Gleixner raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 27027e35715SThomas Gleixner /* 27127e35715SThomas Gleixner * If a new waiter comes in between the unlock and the cmpxchg 27227e35715SThomas Gleixner * we have two situations: 27327e35715SThomas Gleixner * 27427e35715SThomas Gleixner * unlock(wait_lock); 27527e35715SThomas Gleixner * lock(wait_lock); 27627e35715SThomas Gleixner * cmpxchg(p, owner, 0) == owner 27727e35715SThomas Gleixner * mark_rt_mutex_waiters(lock); 27827e35715SThomas Gleixner * acquire(lock); 27927e35715SThomas Gleixner * or: 28027e35715SThomas Gleixner * 28127e35715SThomas Gleixner * unlock(wait_lock); 28227e35715SThomas Gleixner * lock(wait_lock); 28327e35715SThomas Gleixner * mark_rt_mutex_waiters(lock); 28427e35715SThomas Gleixner * 28527e35715SThomas Gleixner * cmpxchg(p, owner, 0) != owner 28627e35715SThomas Gleixner * enqueue_waiter(); 28727e35715SThomas Gleixner * unlock(wait_lock); 28827e35715SThomas Gleixner * lock(wait_lock); 28927e35715SThomas Gleixner * wake waiter(); 29027e35715SThomas Gleixner * unlock(wait_lock); 29127e35715SThomas Gleixner * lock(wait_lock); 29227e35715SThomas Gleixner * acquire(lock); 29327e35715SThomas Gleixner */ 294700318d1SDavidlohr Bueso return rt_mutex_cmpxchg_release(lock, owner, NULL); 29527e35715SThomas Gleixner } 29627e35715SThomas Gleixner 2971696a8beSPeter Zijlstra #else 298830e6accSPeter Zijlstra static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, 29978515930SSebastian Andrzej Siewior struct task_struct *old, 30078515930SSebastian Andrzej Siewior struct task_struct *new) 30178515930SSebastian Andrzej Siewior { 30278515930SSebastian Andrzej Siewior return false; 30378515930SSebastian Andrzej Siewior 30478515930SSebastian Andrzej Siewior } 30578515930SSebastian Andrzej Siewior 306af9f0063SSebastian Andrzej Siewior static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock); 307af9f0063SSebastian Andrzej Siewior 308af9f0063SSebastian Andrzej Siewior static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock) 309af9f0063SSebastian Andrzej Siewior { 310af9f0063SSebastian Andrzej Siewior /* 311af9f0063SSebastian Andrzej Siewior * With debug enabled rt_mutex_cmpxchg trylock() will always fail. 312af9f0063SSebastian Andrzej Siewior * 313af9f0063SSebastian Andrzej Siewior * Avoid unconditionally taking the slow path by using 314af9f0063SSebastian Andrzej Siewior * rt_mutex_slow_trylock() which is covered by the debug code and can 315af9f0063SSebastian Andrzej Siewior * acquire a non-contended rtmutex. 316af9f0063SSebastian Andrzej Siewior */ 317af9f0063SSebastian Andrzej Siewior return rt_mutex_slowtrylock(lock); 318af9f0063SSebastian Andrzej Siewior } 319af9f0063SSebastian Andrzej Siewior 320830e6accSPeter Zijlstra static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, 32178515930SSebastian Andrzej Siewior struct task_struct *old, 32278515930SSebastian Andrzej Siewior struct task_struct *new) 32378515930SSebastian Andrzej Siewior { 32478515930SSebastian Andrzej Siewior return false; 32578515930SSebastian Andrzej Siewior } 326700318d1SDavidlohr Bueso 327830e6accSPeter Zijlstra static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) 3281696a8beSPeter Zijlstra { 3291696a8beSPeter Zijlstra lock->owner = (struct task_struct *) 3301696a8beSPeter Zijlstra ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); 3311696a8beSPeter Zijlstra } 33227e35715SThomas Gleixner 33327e35715SThomas Gleixner /* 33427e35715SThomas Gleixner * Simple slow path only version: lock->owner is protected by lock->wait_lock. 33527e35715SThomas Gleixner */ 336830e6accSPeter Zijlstra static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, 337b4abf910SThomas Gleixner unsigned long flags) 33827e35715SThomas Gleixner __releases(lock->wait_lock) 33927e35715SThomas Gleixner { 34027e35715SThomas Gleixner lock->owner = NULL; 341b4abf910SThomas Gleixner raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 34227e35715SThomas Gleixner return true; 34327e35715SThomas Gleixner } 3441696a8beSPeter Zijlstra #endif 3451696a8beSPeter Zijlstra 346715f7f9eSPeter Zijlstra static __always_inline int __waiter_prio(struct task_struct *task) 347715f7f9eSPeter Zijlstra { 348715f7f9eSPeter Zijlstra int prio = task->prio; 349715f7f9eSPeter Zijlstra 350715f7f9eSPeter Zijlstra if (!rt_prio(prio)) 351715f7f9eSPeter Zijlstra return DEFAULT_PRIO; 352715f7f9eSPeter Zijlstra 353715f7f9eSPeter Zijlstra return prio; 354715f7f9eSPeter Zijlstra } 355715f7f9eSPeter Zijlstra 356f7853c34SPeter Zijlstra /* 357f7853c34SPeter Zijlstra * Update the waiter->tree copy of the sort keys. 358f7853c34SPeter Zijlstra */ 359715f7f9eSPeter Zijlstra static __always_inline void 360715f7f9eSPeter Zijlstra waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) 361715f7f9eSPeter Zijlstra { 362f7853c34SPeter Zijlstra lockdep_assert_held(&waiter->lock->wait_lock); 363f7853c34SPeter Zijlstra lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry)); 364f7853c34SPeter Zijlstra 365f7853c34SPeter Zijlstra waiter->tree.prio = __waiter_prio(task); 366f7853c34SPeter Zijlstra waiter->tree.deadline = task->dl.deadline; 367715f7f9eSPeter Zijlstra } 368715f7f9eSPeter Zijlstra 36919830e55SPeter Zijlstra /* 370f7853c34SPeter Zijlstra * Update the waiter->pi_tree copy of the sort keys (from the tree copy). 37119830e55SPeter Zijlstra */ 372f7853c34SPeter Zijlstra static __always_inline void 373f7853c34SPeter Zijlstra waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) 374f7853c34SPeter Zijlstra { 375f7853c34SPeter Zijlstra lockdep_assert_held(&waiter->lock->wait_lock); 376f7853c34SPeter Zijlstra lockdep_assert_held(&task->pi_lock); 377f7853c34SPeter Zijlstra lockdep_assert(RB_EMPTY_NODE(&waiter->pi_tree.entry)); 37819830e55SPeter Zijlstra 379f7853c34SPeter Zijlstra waiter->pi_tree.prio = waiter->tree.prio; 380f7853c34SPeter Zijlstra waiter->pi_tree.deadline = waiter->tree.deadline; 381f7853c34SPeter Zijlstra } 382f7853c34SPeter Zijlstra 383f7853c34SPeter Zijlstra /* 384f7853c34SPeter Zijlstra * Only use with rt_waiter_node_{less,equal}() 385f7853c34SPeter Zijlstra */ 386f7853c34SPeter Zijlstra #define task_to_waiter_node(p) \ 387f7853c34SPeter Zijlstra &(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline } 388f7853c34SPeter Zijlstra #define task_to_waiter(p) \ 389f7853c34SPeter Zijlstra &(struct rt_mutex_waiter){ .tree = *task_to_waiter_node(p) } 390f7853c34SPeter Zijlstra 391f7853c34SPeter Zijlstra static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left, 392f7853c34SPeter Zijlstra struct rt_waiter_node *right) 393fb00aca4SPeter Zijlstra { 3942d3d891dSDario Faggioli if (left->prio < right->prio) 395fb00aca4SPeter Zijlstra return 1; 396fb00aca4SPeter Zijlstra 3971696a8beSPeter Zijlstra /* 3982d3d891dSDario Faggioli * If both waiters have dl_prio(), we check the deadlines of the 3992d3d891dSDario Faggioli * associated tasks. 4002d3d891dSDario Faggioli * If left waiter has a dl_prio(), and we didn't return 1 above, 4012d3d891dSDario Faggioli * then right waiter has a dl_prio() too. 402fb00aca4SPeter Zijlstra */ 4032d3d891dSDario Faggioli if (dl_prio(left->prio)) 404e0aad5b4SPeter Zijlstra return dl_time_before(left->deadline, right->deadline); 405fb00aca4SPeter Zijlstra 406fb00aca4SPeter Zijlstra return 0; 407fb00aca4SPeter Zijlstra } 408fb00aca4SPeter Zijlstra 409f7853c34SPeter Zijlstra static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left, 410f7853c34SPeter Zijlstra struct rt_waiter_node *right) 41119830e55SPeter Zijlstra { 41219830e55SPeter Zijlstra if (left->prio != right->prio) 41319830e55SPeter Zijlstra return 0; 41419830e55SPeter Zijlstra 41519830e55SPeter Zijlstra /* 41619830e55SPeter Zijlstra * If both waiters have dl_prio(), we check the deadlines of the 41719830e55SPeter Zijlstra * associated tasks. 41819830e55SPeter Zijlstra * If left waiter has a dl_prio(), and we didn't return 0 above, 41919830e55SPeter Zijlstra * then right waiter has a dl_prio() too. 42019830e55SPeter Zijlstra */ 42119830e55SPeter Zijlstra if (dl_prio(left->prio)) 42219830e55SPeter Zijlstra return left->deadline == right->deadline; 42319830e55SPeter Zijlstra 42419830e55SPeter Zijlstra return 1; 42519830e55SPeter Zijlstra } 42619830e55SPeter Zijlstra 42748eb3f4fSGregory Haskins static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter, 42848eb3f4fSGregory Haskins struct rt_mutex_waiter *top_waiter) 42948eb3f4fSGregory Haskins { 430f7853c34SPeter Zijlstra if (rt_waiter_node_less(&waiter->tree, &top_waiter->tree)) 43148eb3f4fSGregory Haskins return true; 43248eb3f4fSGregory Haskins 43348eb3f4fSGregory Haskins #ifdef RT_MUTEX_BUILD_SPINLOCKS 43448eb3f4fSGregory Haskins /* 43548eb3f4fSGregory Haskins * Note that RT tasks are excluded from same priority (lateral) 43648eb3f4fSGregory Haskins * steals to prevent the introduction of an unbounded latency. 43748eb3f4fSGregory Haskins */ 438f7853c34SPeter Zijlstra if (rt_prio(waiter->tree.prio) || dl_prio(waiter->tree.prio)) 43948eb3f4fSGregory Haskins return false; 44048eb3f4fSGregory Haskins 441f7853c34SPeter Zijlstra return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree); 44248eb3f4fSGregory Haskins #else 44348eb3f4fSGregory Haskins return false; 44448eb3f4fSGregory Haskins #endif 44548eb3f4fSGregory Haskins } 44648eb3f4fSGregory Haskins 4475a798725SPeter Zijlstra #define __node_2_waiter(node) \ 448f7853c34SPeter Zijlstra rb_entry((node), struct rt_mutex_waiter, tree.entry) 4495a798725SPeter Zijlstra 450d7a2edb8SThomas Gleixner static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b) 4515a798725SPeter Zijlstra { 452add46132SPeter Zijlstra struct rt_mutex_waiter *aw = __node_2_waiter(a); 453add46132SPeter Zijlstra struct rt_mutex_waiter *bw = __node_2_waiter(b); 454add46132SPeter Zijlstra 455f7853c34SPeter Zijlstra if (rt_waiter_node_less(&aw->tree, &bw->tree)) 456add46132SPeter Zijlstra return 1; 457add46132SPeter Zijlstra 458add46132SPeter Zijlstra if (!build_ww_mutex()) 459add46132SPeter Zijlstra return 0; 460add46132SPeter Zijlstra 461f7853c34SPeter Zijlstra if (rt_waiter_node_less(&bw->tree, &aw->tree)) 462add46132SPeter Zijlstra return 0; 463add46132SPeter Zijlstra 464add46132SPeter Zijlstra /* NOTE: relies on waiter->ww_ctx being set before insertion */ 465add46132SPeter Zijlstra if (aw->ww_ctx) { 466add46132SPeter Zijlstra if (!bw->ww_ctx) 467add46132SPeter Zijlstra return 1; 468add46132SPeter Zijlstra 469add46132SPeter Zijlstra return (signed long)(aw->ww_ctx->stamp - 470add46132SPeter Zijlstra bw->ww_ctx->stamp) < 0; 471add46132SPeter Zijlstra } 472add46132SPeter Zijlstra 473add46132SPeter Zijlstra return 0; 4745a798725SPeter Zijlstra } 4755a798725SPeter Zijlstra 476d7a2edb8SThomas Gleixner static __always_inline void 477830e6accSPeter Zijlstra rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) 478fb00aca4SPeter Zijlstra { 479f7853c34SPeter Zijlstra lockdep_assert_held(&lock->wait_lock); 480f7853c34SPeter Zijlstra 481f7853c34SPeter Zijlstra rb_add_cached(&waiter->tree.entry, &lock->waiters, __waiter_less); 482fb00aca4SPeter Zijlstra } 483fb00aca4SPeter Zijlstra 484d7a2edb8SThomas Gleixner static __always_inline void 485830e6accSPeter Zijlstra rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) 486fb00aca4SPeter Zijlstra { 487f7853c34SPeter Zijlstra lockdep_assert_held(&lock->wait_lock); 488f7853c34SPeter Zijlstra 489f7853c34SPeter Zijlstra if (RB_EMPTY_NODE(&waiter->tree.entry)) 490fb00aca4SPeter Zijlstra return; 491fb00aca4SPeter Zijlstra 492f7853c34SPeter Zijlstra rb_erase_cached(&waiter->tree.entry, &lock->waiters); 493f7853c34SPeter Zijlstra RB_CLEAR_NODE(&waiter->tree.entry); 494fb00aca4SPeter Zijlstra } 495fb00aca4SPeter Zijlstra 496f7853c34SPeter Zijlstra #define __node_2_rt_node(node) \ 497f7853c34SPeter Zijlstra rb_entry((node), struct rt_waiter_node, entry) 4985a798725SPeter Zijlstra 499f7853c34SPeter Zijlstra static __always_inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b) 5005a798725SPeter Zijlstra { 501f7853c34SPeter Zijlstra return rt_waiter_node_less(__node_2_rt_node(a), __node_2_rt_node(b)); 5025a798725SPeter Zijlstra } 5035a798725SPeter Zijlstra 504d7a2edb8SThomas Gleixner static __always_inline void 505fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) 506fb00aca4SPeter Zijlstra { 507f7853c34SPeter Zijlstra lockdep_assert_held(&task->pi_lock); 508f7853c34SPeter Zijlstra 509f7853c34SPeter Zijlstra rb_add_cached(&waiter->pi_tree.entry, &task->pi_waiters, __pi_waiter_less); 510fb00aca4SPeter Zijlstra } 511fb00aca4SPeter Zijlstra 512d7a2edb8SThomas Gleixner static __always_inline void 513fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) 514fb00aca4SPeter Zijlstra { 515f7853c34SPeter Zijlstra lockdep_assert_held(&task->pi_lock); 516f7853c34SPeter Zijlstra 517f7853c34SPeter Zijlstra if (RB_EMPTY_NODE(&waiter->pi_tree.entry)) 518fb00aca4SPeter Zijlstra return; 519fb00aca4SPeter Zijlstra 520f7853c34SPeter Zijlstra rb_erase_cached(&waiter->pi_tree.entry, &task->pi_waiters); 521f7853c34SPeter Zijlstra RB_CLEAR_NODE(&waiter->pi_tree.entry); 522fb00aca4SPeter Zijlstra } 523fb00aca4SPeter Zijlstra 524f7853c34SPeter Zijlstra static __always_inline void rt_mutex_adjust_prio(struct rt_mutex_base *lock, 525f7853c34SPeter Zijlstra struct task_struct *p) 526e96a7705SXunlei Pang { 527acd58620SPeter Zijlstra struct task_struct *pi_task = NULL; 528e96a7705SXunlei Pang 529f7853c34SPeter Zijlstra lockdep_assert_held(&lock->wait_lock); 530f7853c34SPeter Zijlstra lockdep_assert(rt_mutex_owner(lock) == p); 531acd58620SPeter Zijlstra lockdep_assert_held(&p->pi_lock); 532e96a7705SXunlei Pang 533acd58620SPeter Zijlstra if (task_has_pi_waiters(p)) 534acd58620SPeter Zijlstra pi_task = task_top_pi_waiter(p)->task; 5351696a8beSPeter Zijlstra 536acd58620SPeter Zijlstra rt_mutex_setprio(p, pi_task); 5371696a8beSPeter Zijlstra } 5381696a8beSPeter Zijlstra 539b576e640SThomas Gleixner /* RT mutex specific wake_q wrappers */ 5409321f815SThomas Gleixner static __always_inline void rt_mutex_wake_q_add_task(struct rt_wake_q_head *wqh, 5419321f815SThomas Gleixner struct task_struct *task, 5429321f815SThomas Gleixner unsigned int wake_state) 5439321f815SThomas Gleixner { 5449321f815SThomas Gleixner if (IS_ENABLED(CONFIG_PREEMPT_RT) && wake_state == TASK_RTLOCK_WAIT) { 5459321f815SThomas Gleixner if (IS_ENABLED(CONFIG_PROVE_LOCKING)) 5469321f815SThomas Gleixner WARN_ON_ONCE(wqh->rtlock_task); 5479321f815SThomas Gleixner get_task_struct(task); 5489321f815SThomas Gleixner wqh->rtlock_task = task; 5499321f815SThomas Gleixner } else { 5509321f815SThomas Gleixner wake_q_add(&wqh->head, task); 5519321f815SThomas Gleixner } 5529321f815SThomas Gleixner } 5539321f815SThomas Gleixner 554b576e640SThomas Gleixner static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh, 555b576e640SThomas Gleixner struct rt_mutex_waiter *w) 556b576e640SThomas Gleixner { 5579321f815SThomas Gleixner rt_mutex_wake_q_add_task(wqh, w->task, w->wake_state); 558456cfbc6SThomas Gleixner } 559b576e640SThomas Gleixner 560b576e640SThomas Gleixner static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh) 561b576e640SThomas Gleixner { 562456cfbc6SThomas Gleixner if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) { 563456cfbc6SThomas Gleixner wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT); 564456cfbc6SThomas Gleixner put_task_struct(wqh->rtlock_task); 565456cfbc6SThomas Gleixner wqh->rtlock_task = NULL; 566456cfbc6SThomas Gleixner } 567456cfbc6SThomas Gleixner 568456cfbc6SThomas Gleixner if (!wake_q_empty(&wqh->head)) 569b576e640SThomas Gleixner wake_up_q(&wqh->head); 570b576e640SThomas Gleixner 571b576e640SThomas Gleixner /* Pairs with preempt_disable() in mark_wakeup_next_waiter() */ 572b576e640SThomas Gleixner preempt_enable(); 573b576e640SThomas Gleixner } 574b576e640SThomas Gleixner 5751696a8beSPeter Zijlstra /* 5768930ed80SThomas Gleixner * Deadlock detection is conditional: 5778930ed80SThomas Gleixner * 5788930ed80SThomas Gleixner * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted 5798930ed80SThomas Gleixner * if the detect argument is == RT_MUTEX_FULL_CHAINWALK. 5808930ed80SThomas Gleixner * 5818930ed80SThomas Gleixner * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always 5828930ed80SThomas Gleixner * conducted independent of the detect argument. 5838930ed80SThomas Gleixner * 5848930ed80SThomas Gleixner * If the waiter argument is NULL this indicates the deboost path and 5858930ed80SThomas Gleixner * deadlock detection is disabled independent of the detect argument 5868930ed80SThomas Gleixner * and the config settings. 5878930ed80SThomas Gleixner */ 588d7a2edb8SThomas Gleixner static __always_inline bool 589d7a2edb8SThomas Gleixner rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, 5908930ed80SThomas Gleixner enum rtmutex_chainwalk chwalk) 5918930ed80SThomas Gleixner { 59207d25971SZhen Lei if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) 593f7efc479SThomas Gleixner return waiter != NULL; 594f7efc479SThomas Gleixner return chwalk == RT_MUTEX_FULL_CHAINWALK; 5958930ed80SThomas Gleixner } 5968930ed80SThomas Gleixner 597830e6accSPeter Zijlstra static __always_inline struct rt_mutex_base *task_blocked_on_lock(struct task_struct *p) 59882084984SThomas Gleixner { 59982084984SThomas Gleixner return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; 60082084984SThomas Gleixner } 60182084984SThomas Gleixner 6021696a8beSPeter Zijlstra /* 6031696a8beSPeter Zijlstra * Adjust the priority chain. Also used for deadlock detection. 6041696a8beSPeter Zijlstra * Decreases task's usage by one - may thus free the task. 6051696a8beSPeter Zijlstra * 60682084984SThomas Gleixner * @task: the task owning the mutex (owner) for which a chain walk is 60782084984SThomas Gleixner * probably needed 608e6beaa36STom(JeHyeon) Yeon * @chwalk: do we have to carry out deadlock detection? 6091696a8beSPeter Zijlstra * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck 6101696a8beSPeter Zijlstra * things for a task that has just got its priority adjusted, and 6111696a8beSPeter Zijlstra * is waiting on a mutex) 61282084984SThomas Gleixner * @next_lock: the mutex on which the owner of @orig_lock was blocked before 61382084984SThomas Gleixner * we dropped its pi_lock. Is never dereferenced, only used for 61482084984SThomas Gleixner * comparison to detect lock chain changes. 6151696a8beSPeter Zijlstra * @orig_waiter: rt_mutex_waiter struct for the task that has just donated 6161696a8beSPeter Zijlstra * its priority to the mutex owner (can be NULL in the case 6171696a8beSPeter Zijlstra * depicted above or if the top waiter is gone away and we are 6181696a8beSPeter Zijlstra * actually deboosting the owner) 6191696a8beSPeter Zijlstra * @top_task: the current top waiter 6201696a8beSPeter Zijlstra * 6211696a8beSPeter Zijlstra * Returns 0 or -EDEADLK. 6223eb65aeaSThomas Gleixner * 6233eb65aeaSThomas Gleixner * Chain walk basics and protection scope 6243eb65aeaSThomas Gleixner * 6253eb65aeaSThomas Gleixner * [R] refcount on task 626f7853c34SPeter Zijlstra * [Pn] task->pi_lock held 6273eb65aeaSThomas Gleixner * [L] rtmutex->wait_lock held 6283eb65aeaSThomas Gleixner * 629f7853c34SPeter Zijlstra * Normal locking order: 630f7853c34SPeter Zijlstra * 631f7853c34SPeter Zijlstra * rtmutex->wait_lock 632f7853c34SPeter Zijlstra * task->pi_lock 633f7853c34SPeter Zijlstra * 6343eb65aeaSThomas Gleixner * Step Description Protected by 6353eb65aeaSThomas Gleixner * function arguments: 6363eb65aeaSThomas Gleixner * @task [R] 6373eb65aeaSThomas Gleixner * @orig_lock if != NULL @top_task is blocked on it 6383eb65aeaSThomas Gleixner * @next_lock Unprotected. Cannot be 6393eb65aeaSThomas Gleixner * dereferenced. Only used for 6403eb65aeaSThomas Gleixner * comparison. 6413eb65aeaSThomas Gleixner * @orig_waiter if != NULL @top_task is blocked on it 6423eb65aeaSThomas Gleixner * @top_task current, or in case of proxy 6433eb65aeaSThomas Gleixner * locking protected by calling 6443eb65aeaSThomas Gleixner * code 6453eb65aeaSThomas Gleixner * again: 6463eb65aeaSThomas Gleixner * loop_sanity_check(); 6473eb65aeaSThomas Gleixner * retry: 648f7853c34SPeter Zijlstra * [1] lock(task->pi_lock); [R] acquire [P1] 649f7853c34SPeter Zijlstra * [2] waiter = task->pi_blocked_on; [P1] 650f7853c34SPeter Zijlstra * [3] check_exit_conditions_1(); [P1] 651f7853c34SPeter Zijlstra * [4] lock = waiter->lock; [P1] 652f7853c34SPeter Zijlstra * [5] if (!try_lock(lock->wait_lock)) { [P1] try to acquire [L] 653f7853c34SPeter Zijlstra * unlock(task->pi_lock); release [P1] 6543eb65aeaSThomas Gleixner * goto retry; 6553eb65aeaSThomas Gleixner * } 656f7853c34SPeter Zijlstra * [6] check_exit_conditions_2(); [P1] + [L] 657f7853c34SPeter Zijlstra * [7] requeue_lock_waiter(lock, waiter); [P1] + [L] 658f7853c34SPeter Zijlstra * [8] unlock(task->pi_lock); release [P1] 6593eb65aeaSThomas Gleixner * put_task_struct(task); release [R] 6603eb65aeaSThomas Gleixner * [9] check_exit_conditions_3(); [L] 6613eb65aeaSThomas Gleixner * [10] task = owner(lock); [L] 6623eb65aeaSThomas Gleixner * get_task_struct(task); [L] acquire [R] 663f7853c34SPeter Zijlstra * lock(task->pi_lock); [L] acquire [P2] 664f7853c34SPeter Zijlstra * [11] requeue_pi_waiter(tsk, waiters(lock));[P2] + [L] 665f7853c34SPeter Zijlstra * [12] check_exit_conditions_4(); [P2] + [L] 666f7853c34SPeter Zijlstra * [13] unlock(task->pi_lock); release [P2] 6673eb65aeaSThomas Gleixner * unlock(lock->wait_lock); release [L] 6683eb65aeaSThomas Gleixner * goto again; 669f7853c34SPeter Zijlstra * 670f7853c34SPeter Zijlstra * Where P1 is the blocking task and P2 is the lock owner; going up one step 671f7853c34SPeter Zijlstra * the owner becomes the next blocked task etc.. 672f7853c34SPeter Zijlstra * 673f7853c34SPeter Zijlstra * 6741696a8beSPeter Zijlstra */ 675d7a2edb8SThomas Gleixner static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, 6768930ed80SThomas Gleixner enum rtmutex_chainwalk chwalk, 677830e6accSPeter Zijlstra struct rt_mutex_base *orig_lock, 678830e6accSPeter Zijlstra struct rt_mutex_base *next_lock, 6791696a8beSPeter Zijlstra struct rt_mutex_waiter *orig_waiter, 6801696a8beSPeter Zijlstra struct task_struct *top_task) 6811696a8beSPeter Zijlstra { 6821696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; 683a57594a1SThomas Gleixner struct rt_mutex_waiter *prerequeue_top_waiter; 6848930ed80SThomas Gleixner int ret = 0, depth = 0; 685830e6accSPeter Zijlstra struct rt_mutex_base *lock; 6868930ed80SThomas Gleixner bool detect_deadlock; 68767792e2cSThomas Gleixner bool requeue = true; 6881696a8beSPeter Zijlstra 6898930ed80SThomas Gleixner detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk); 6901696a8beSPeter Zijlstra 6911696a8beSPeter Zijlstra /* 6921696a8beSPeter Zijlstra * The (de)boosting is a step by step approach with a lot of 6931696a8beSPeter Zijlstra * pitfalls. We want this to be preemptible and we want hold a 6941696a8beSPeter Zijlstra * maximum of two locks per step. So we have to check 6951696a8beSPeter Zijlstra * carefully whether things change under us. 6961696a8beSPeter Zijlstra */ 6971696a8beSPeter Zijlstra again: 6983eb65aeaSThomas Gleixner /* 6993eb65aeaSThomas Gleixner * We limit the lock chain length for each invocation. 7003eb65aeaSThomas Gleixner */ 7011696a8beSPeter Zijlstra if (++depth > max_lock_depth) { 7021696a8beSPeter Zijlstra static int prev_max; 7031696a8beSPeter Zijlstra 7041696a8beSPeter Zijlstra /* 7051696a8beSPeter Zijlstra * Print this only once. If the admin changes the limit, 7061696a8beSPeter Zijlstra * print a new message when reaching the limit again. 7071696a8beSPeter Zijlstra */ 7081696a8beSPeter Zijlstra if (prev_max != max_lock_depth) { 7091696a8beSPeter Zijlstra prev_max = max_lock_depth; 7101696a8beSPeter Zijlstra printk(KERN_WARNING "Maximum lock depth %d reached " 7111696a8beSPeter Zijlstra "task: %s (%d)\n", max_lock_depth, 7121696a8beSPeter Zijlstra top_task->comm, task_pid_nr(top_task)); 7131696a8beSPeter Zijlstra } 7141696a8beSPeter Zijlstra put_task_struct(task); 7151696a8beSPeter Zijlstra 7163d5c9340SThomas Gleixner return -EDEADLK; 7171696a8beSPeter Zijlstra } 7183eb65aeaSThomas Gleixner 7193eb65aeaSThomas Gleixner /* 7203eb65aeaSThomas Gleixner * We are fully preemptible here and only hold the refcount on 7213eb65aeaSThomas Gleixner * @task. So everything can have changed under us since the 7223eb65aeaSThomas Gleixner * caller or our own code below (goto retry/again) dropped all 7233eb65aeaSThomas Gleixner * locks. 7243eb65aeaSThomas Gleixner */ 7251696a8beSPeter Zijlstra retry: 7261696a8beSPeter Zijlstra /* 7273eb65aeaSThomas Gleixner * [1] Task cannot go away as we did a get_task() before ! 7281696a8beSPeter Zijlstra */ 729b4abf910SThomas Gleixner raw_spin_lock_irq(&task->pi_lock); 7301696a8beSPeter Zijlstra 7313eb65aeaSThomas Gleixner /* 7323eb65aeaSThomas Gleixner * [2] Get the waiter on which @task is blocked on. 7333eb65aeaSThomas Gleixner */ 7341696a8beSPeter Zijlstra waiter = task->pi_blocked_on; 7353eb65aeaSThomas Gleixner 7363eb65aeaSThomas Gleixner /* 7373eb65aeaSThomas Gleixner * [3] check_exit_conditions_1() protected by task->pi_lock. 7383eb65aeaSThomas Gleixner */ 7393eb65aeaSThomas Gleixner 7401696a8beSPeter Zijlstra /* 7411696a8beSPeter Zijlstra * Check whether the end of the boosting chain has been 7421696a8beSPeter Zijlstra * reached or the state of the chain has changed while we 7431696a8beSPeter Zijlstra * dropped the locks. 7441696a8beSPeter Zijlstra */ 7451696a8beSPeter Zijlstra if (!waiter) 7461696a8beSPeter Zijlstra goto out_unlock_pi; 7471696a8beSPeter Zijlstra 7481696a8beSPeter Zijlstra /* 7491696a8beSPeter Zijlstra * Check the orig_waiter state. After we dropped the locks, 7501696a8beSPeter Zijlstra * the previous owner of the lock might have released the lock. 7511696a8beSPeter Zijlstra */ 7521696a8beSPeter Zijlstra if (orig_waiter && !rt_mutex_owner(orig_lock)) 7531696a8beSPeter Zijlstra goto out_unlock_pi; 7541696a8beSPeter Zijlstra 7551696a8beSPeter Zijlstra /* 75682084984SThomas Gleixner * We dropped all locks after taking a refcount on @task, so 75782084984SThomas Gleixner * the task might have moved on in the lock chain or even left 75882084984SThomas Gleixner * the chain completely and blocks now on an unrelated lock or 75982084984SThomas Gleixner * on @orig_lock. 76082084984SThomas Gleixner * 76182084984SThomas Gleixner * We stored the lock on which @task was blocked in @next_lock, 76282084984SThomas Gleixner * so we can detect the chain change. 76382084984SThomas Gleixner */ 76482084984SThomas Gleixner if (next_lock != waiter->lock) 76582084984SThomas Gleixner goto out_unlock_pi; 76682084984SThomas Gleixner 76782084984SThomas Gleixner /* 7686467822bSPeter Zijlstra * There could be 'spurious' loops in the lock graph due to ww_mutex, 7696467822bSPeter Zijlstra * consider: 7706467822bSPeter Zijlstra * 7716467822bSPeter Zijlstra * P1: A, ww_A, ww_B 7726467822bSPeter Zijlstra * P2: ww_B, ww_A 7736467822bSPeter Zijlstra * P3: A 7746467822bSPeter Zijlstra * 7756467822bSPeter Zijlstra * P3 should not return -EDEADLK because it gets trapped in the cycle 7766467822bSPeter Zijlstra * created by P1 and P2 (which will resolve -- and runs into 7776467822bSPeter Zijlstra * max_lock_depth above). Therefore disable detect_deadlock such that 7786467822bSPeter Zijlstra * the below termination condition can trigger once all relevant tasks 7796467822bSPeter Zijlstra * are boosted. 7806467822bSPeter Zijlstra * 7816467822bSPeter Zijlstra * Even when we start with ww_mutex we can disable deadlock detection, 7826467822bSPeter Zijlstra * since we would supress a ww_mutex induced deadlock at [6] anyway. 7836467822bSPeter Zijlstra * Supressing it here however is not sufficient since we might still 7846467822bSPeter Zijlstra * hit [6] due to adjustment driven iteration. 7856467822bSPeter Zijlstra * 7866467822bSPeter Zijlstra * NOTE: if someone were to create a deadlock between 2 ww_classes we'd 7876467822bSPeter Zijlstra * utterly fail to report it; lockdep should. 7886467822bSPeter Zijlstra */ 7896467822bSPeter Zijlstra if (IS_ENABLED(CONFIG_PREEMPT_RT) && waiter->ww_ctx && detect_deadlock) 7906467822bSPeter Zijlstra detect_deadlock = false; 7916467822bSPeter Zijlstra 7926467822bSPeter Zijlstra /* 7931696a8beSPeter Zijlstra * Drop out, when the task has no waiters. Note, 7941696a8beSPeter Zijlstra * top_waiter can be NULL, when we are in the deboosting 7951696a8beSPeter Zijlstra * mode! 7961696a8beSPeter Zijlstra */ 797397335f0SThomas Gleixner if (top_waiter) { 798397335f0SThomas Gleixner if (!task_has_pi_waiters(task)) 7991696a8beSPeter Zijlstra goto out_unlock_pi; 800397335f0SThomas Gleixner /* 801397335f0SThomas Gleixner * If deadlock detection is off, we stop here if we 80267792e2cSThomas Gleixner * are not the top pi waiter of the task. If deadlock 80367792e2cSThomas Gleixner * detection is enabled we continue, but stop the 80467792e2cSThomas Gleixner * requeueing in the chain walk. 805397335f0SThomas Gleixner */ 80667792e2cSThomas Gleixner if (top_waiter != task_top_pi_waiter(task)) { 80767792e2cSThomas Gleixner if (!detect_deadlock) 808397335f0SThomas Gleixner goto out_unlock_pi; 80967792e2cSThomas Gleixner else 81067792e2cSThomas Gleixner requeue = false; 81167792e2cSThomas Gleixner } 812397335f0SThomas Gleixner } 8131696a8beSPeter Zijlstra 8141696a8beSPeter Zijlstra /* 81567792e2cSThomas Gleixner * If the waiter priority is the same as the task priority 81667792e2cSThomas Gleixner * then there is no further priority adjustment necessary. If 81767792e2cSThomas Gleixner * deadlock detection is off, we stop the chain walk. If its 81867792e2cSThomas Gleixner * enabled we continue, but stop the requeueing in the chain 81967792e2cSThomas Gleixner * walk. 8201696a8beSPeter Zijlstra */ 821f7853c34SPeter Zijlstra if (rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) { 82267792e2cSThomas Gleixner if (!detect_deadlock) 8231696a8beSPeter Zijlstra goto out_unlock_pi; 82467792e2cSThomas Gleixner else 82567792e2cSThomas Gleixner requeue = false; 82667792e2cSThomas Gleixner } 8271696a8beSPeter Zijlstra 8283eb65aeaSThomas Gleixner /* 829f7853c34SPeter Zijlstra * [4] Get the next lock; per holding task->pi_lock we can't unblock 830f7853c34SPeter Zijlstra * and guarantee @lock's existence. 8313eb65aeaSThomas Gleixner */ 8321696a8beSPeter Zijlstra lock = waiter->lock; 8333eb65aeaSThomas Gleixner /* 8343eb65aeaSThomas Gleixner * [5] We need to trylock here as we are holding task->pi_lock, 8353eb65aeaSThomas Gleixner * which is the reverse lock order versus the other rtmutex 8363eb65aeaSThomas Gleixner * operations. 837f7853c34SPeter Zijlstra * 838f7853c34SPeter Zijlstra * Per the above, holding task->pi_lock guarantees lock exists, so 839f7853c34SPeter Zijlstra * inverting this lock order is infeasible from a life-time 840f7853c34SPeter Zijlstra * perspective. 8413eb65aeaSThomas Gleixner */ 8421696a8beSPeter Zijlstra if (!raw_spin_trylock(&lock->wait_lock)) { 843b4abf910SThomas Gleixner raw_spin_unlock_irq(&task->pi_lock); 8441696a8beSPeter Zijlstra cpu_relax(); 8451696a8beSPeter Zijlstra goto retry; 8461696a8beSPeter Zijlstra } 8471696a8beSPeter Zijlstra 848397335f0SThomas Gleixner /* 8493eb65aeaSThomas Gleixner * [6] check_exit_conditions_2() protected by task->pi_lock and 8503eb65aeaSThomas Gleixner * lock->wait_lock. 8513eb65aeaSThomas Gleixner * 852397335f0SThomas Gleixner * Deadlock detection. If the lock is the same as the original 853397335f0SThomas Gleixner * lock which caused us to walk the lock chain or if the 854397335f0SThomas Gleixner * current lock is owned by the task which initiated the chain 855397335f0SThomas Gleixner * walk, we detected a deadlock. 856397335f0SThomas Gleixner */ 8571696a8beSPeter Zijlstra if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { 8583d5c9340SThomas Gleixner ret = -EDEADLK; 859a055fcc1SPeter Zijlstra 860a055fcc1SPeter Zijlstra /* 861a055fcc1SPeter Zijlstra * When the deadlock is due to ww_mutex; also see above. Don't 862a055fcc1SPeter Zijlstra * report the deadlock and instead let the ww_mutex wound/die 863a055fcc1SPeter Zijlstra * logic pick which of the contending threads gets -EDEADLK. 864a055fcc1SPeter Zijlstra * 865a055fcc1SPeter Zijlstra * NOTE: assumes the cycle only contains a single ww_class; any 866a055fcc1SPeter Zijlstra * other configuration and we fail to report; also, see 867a055fcc1SPeter Zijlstra * lockdep. 868a055fcc1SPeter Zijlstra */ 869e5480572SPeter Zijlstra if (IS_ENABLED(CONFIG_PREEMPT_RT) && orig_waiter && orig_waiter->ww_ctx) 870a055fcc1SPeter Zijlstra ret = 0; 871a055fcc1SPeter Zijlstra 872a055fcc1SPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 8731696a8beSPeter Zijlstra goto out_unlock_pi; 8741696a8beSPeter Zijlstra } 8751696a8beSPeter Zijlstra 876a57594a1SThomas Gleixner /* 87767792e2cSThomas Gleixner * If we just follow the lock chain for deadlock detection, no 87867792e2cSThomas Gleixner * need to do all the requeue operations. To avoid a truckload 87967792e2cSThomas Gleixner * of conditionals around the various places below, just do the 88067792e2cSThomas Gleixner * minimum chain walk checks. 88167792e2cSThomas Gleixner */ 88267792e2cSThomas Gleixner if (!requeue) { 88367792e2cSThomas Gleixner /* 88467792e2cSThomas Gleixner * No requeue[7] here. Just release @task [8] 88567792e2cSThomas Gleixner */ 886b4abf910SThomas Gleixner raw_spin_unlock(&task->pi_lock); 88767792e2cSThomas Gleixner put_task_struct(task); 88867792e2cSThomas Gleixner 88967792e2cSThomas Gleixner /* 89067792e2cSThomas Gleixner * [9] check_exit_conditions_3 protected by lock->wait_lock. 89167792e2cSThomas Gleixner * If there is no owner of the lock, end of chain. 89267792e2cSThomas Gleixner */ 89367792e2cSThomas Gleixner if (!rt_mutex_owner(lock)) { 894b4abf910SThomas Gleixner raw_spin_unlock_irq(&lock->wait_lock); 89567792e2cSThomas Gleixner return 0; 89667792e2cSThomas Gleixner } 89767792e2cSThomas Gleixner 89867792e2cSThomas Gleixner /* [10] Grab the next task, i.e. owner of @lock */ 8997b3c92b8SMatthew Wilcox (Oracle) task = get_task_struct(rt_mutex_owner(lock)); 900b4abf910SThomas Gleixner raw_spin_lock(&task->pi_lock); 90167792e2cSThomas Gleixner 90267792e2cSThomas Gleixner /* 90367792e2cSThomas Gleixner * No requeue [11] here. We just do deadlock detection. 90467792e2cSThomas Gleixner * 90567792e2cSThomas Gleixner * [12] Store whether owner is blocked 90667792e2cSThomas Gleixner * itself. Decision is made after dropping the locks 90767792e2cSThomas Gleixner */ 90867792e2cSThomas Gleixner next_lock = task_blocked_on_lock(task); 90967792e2cSThomas Gleixner /* 91067792e2cSThomas Gleixner * Get the top waiter for the next iteration 91167792e2cSThomas Gleixner */ 91267792e2cSThomas Gleixner top_waiter = rt_mutex_top_waiter(lock); 91367792e2cSThomas Gleixner 91467792e2cSThomas Gleixner /* [13] Drop locks */ 915b4abf910SThomas Gleixner raw_spin_unlock(&task->pi_lock); 916b4abf910SThomas Gleixner raw_spin_unlock_irq(&lock->wait_lock); 91767792e2cSThomas Gleixner 91867792e2cSThomas Gleixner /* If owner is not blocked, end of chain. */ 91967792e2cSThomas Gleixner if (!next_lock) 92067792e2cSThomas Gleixner goto out_put_task; 92167792e2cSThomas Gleixner goto again; 92267792e2cSThomas Gleixner } 92367792e2cSThomas Gleixner 92467792e2cSThomas Gleixner /* 925a57594a1SThomas Gleixner * Store the current top waiter before doing the requeue 926a57594a1SThomas Gleixner * operation on @lock. We need it for the boost/deboost 927a57594a1SThomas Gleixner * decision below. 928a57594a1SThomas Gleixner */ 929a57594a1SThomas Gleixner prerequeue_top_waiter = rt_mutex_top_waiter(lock); 9301696a8beSPeter Zijlstra 9319f40a51aSDavidlohr Bueso /* [7] Requeue the waiter in the lock waiter tree. */ 932fb00aca4SPeter Zijlstra rt_mutex_dequeue(lock, waiter); 933e0aad5b4SPeter Zijlstra 934e0aad5b4SPeter Zijlstra /* 935e0aad5b4SPeter Zijlstra * Update the waiter prio fields now that we're dequeued. 936e0aad5b4SPeter Zijlstra * 937e0aad5b4SPeter Zijlstra * These values can have changed through either: 938e0aad5b4SPeter Zijlstra * 939e0aad5b4SPeter Zijlstra * sys_sched_set_scheduler() / sys_sched_setattr() 940e0aad5b4SPeter Zijlstra * 941e0aad5b4SPeter Zijlstra * or 942e0aad5b4SPeter Zijlstra * 943e0aad5b4SPeter Zijlstra * DL CBS enforcement advancing the effective deadline. 944e0aad5b4SPeter Zijlstra */ 945715f7f9eSPeter Zijlstra waiter_update_prio(waiter, task); 946e0aad5b4SPeter Zijlstra 947fb00aca4SPeter Zijlstra rt_mutex_enqueue(lock, waiter); 9481696a8beSPeter Zijlstra 949f7853c34SPeter Zijlstra /* 950f7853c34SPeter Zijlstra * [8] Release the (blocking) task in preparation for 951f7853c34SPeter Zijlstra * taking the owner task in [10]. 952f7853c34SPeter Zijlstra * 953f7853c34SPeter Zijlstra * Since we hold lock->waiter_lock, task cannot unblock, even if we 954f7853c34SPeter Zijlstra * release task->pi_lock. 955f7853c34SPeter Zijlstra */ 956b4abf910SThomas Gleixner raw_spin_unlock(&task->pi_lock); 9572ffa5a5cSThomas Gleixner put_task_struct(task); 9582ffa5a5cSThomas Gleixner 959a57594a1SThomas Gleixner /* 9603eb65aeaSThomas Gleixner * [9] check_exit_conditions_3 protected by lock->wait_lock. 9613eb65aeaSThomas Gleixner * 962a57594a1SThomas Gleixner * We must abort the chain walk if there is no lock owner even 963a57594a1SThomas Gleixner * in the dead lock detection case, as we have nothing to 964a57594a1SThomas Gleixner * follow here. This is the end of the chain we are walking. 965a57594a1SThomas Gleixner */ 9661696a8beSPeter Zijlstra if (!rt_mutex_owner(lock)) { 9671696a8beSPeter Zijlstra /* 9683eb65aeaSThomas Gleixner * If the requeue [7] above changed the top waiter, 9693eb65aeaSThomas Gleixner * then we need to wake the new top waiter up to try 9703eb65aeaSThomas Gleixner * to get the lock. 9711696a8beSPeter Zijlstra */ 972db370a8bSWander Lairson Costa top_waiter = rt_mutex_top_waiter(lock); 973db370a8bSWander Lairson Costa if (prerequeue_top_waiter != top_waiter) 974db370a8bSWander Lairson Costa wake_up_state(top_waiter->task, top_waiter->wake_state); 975b4abf910SThomas Gleixner raw_spin_unlock_irq(&lock->wait_lock); 9762ffa5a5cSThomas Gleixner return 0; 9771696a8beSPeter Zijlstra } 9781696a8beSPeter Zijlstra 979f7853c34SPeter Zijlstra /* 980f7853c34SPeter Zijlstra * [10] Grab the next task, i.e. the owner of @lock 981f7853c34SPeter Zijlstra * 982f7853c34SPeter Zijlstra * Per holding lock->wait_lock and checking for !owner above, there 983f7853c34SPeter Zijlstra * must be an owner and it cannot go away. 984f7853c34SPeter Zijlstra */ 9857b3c92b8SMatthew Wilcox (Oracle) task = get_task_struct(rt_mutex_owner(lock)); 986b4abf910SThomas Gleixner raw_spin_lock(&task->pi_lock); 9871696a8beSPeter Zijlstra 9883eb65aeaSThomas Gleixner /* [11] requeue the pi waiters if necessary */ 9891696a8beSPeter Zijlstra if (waiter == rt_mutex_top_waiter(lock)) { 990a57594a1SThomas Gleixner /* 991a57594a1SThomas Gleixner * The waiter became the new top (highest priority) 992a57594a1SThomas Gleixner * waiter on the lock. Replace the previous top waiter 9939f40a51aSDavidlohr Bueso * in the owner tasks pi waiters tree with this waiter 994a57594a1SThomas Gleixner * and adjust the priority of the owner. 995a57594a1SThomas Gleixner */ 996a57594a1SThomas Gleixner rt_mutex_dequeue_pi(task, prerequeue_top_waiter); 997f7853c34SPeter Zijlstra waiter_clone_prio(waiter, task); 998fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(task, waiter); 999f7853c34SPeter Zijlstra rt_mutex_adjust_prio(lock, task); 10001696a8beSPeter Zijlstra 1001a57594a1SThomas Gleixner } else if (prerequeue_top_waiter == waiter) { 1002a57594a1SThomas Gleixner /* 1003a57594a1SThomas Gleixner * The waiter was the top waiter on the lock, but is 1004e2db7592SIngo Molnar * no longer the top priority waiter. Replace waiter in 10059f40a51aSDavidlohr Bueso * the owner tasks pi waiters tree with the new top 1006a57594a1SThomas Gleixner * (highest priority) waiter and adjust the priority 1007a57594a1SThomas Gleixner * of the owner. 1008a57594a1SThomas Gleixner * The new top waiter is stored in @waiter so that 1009a57594a1SThomas Gleixner * @waiter == @top_waiter evaluates to true below and 1010a57594a1SThomas Gleixner * we continue to deboost the rest of the chain. 1011a57594a1SThomas Gleixner */ 1012fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(task, waiter); 10131696a8beSPeter Zijlstra waiter = rt_mutex_top_waiter(lock); 1014f7853c34SPeter Zijlstra waiter_clone_prio(waiter, task); 1015fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(task, waiter); 1016f7853c34SPeter Zijlstra rt_mutex_adjust_prio(lock, task); 1017a57594a1SThomas Gleixner } else { 1018a57594a1SThomas Gleixner /* 1019a57594a1SThomas Gleixner * Nothing changed. No need to do any priority 1020a57594a1SThomas Gleixner * adjustment. 1021a57594a1SThomas Gleixner */ 10221696a8beSPeter Zijlstra } 10231696a8beSPeter Zijlstra 102482084984SThomas Gleixner /* 10253eb65aeaSThomas Gleixner * [12] check_exit_conditions_4() protected by task->pi_lock 10263eb65aeaSThomas Gleixner * and lock->wait_lock. The actual decisions are made after we 10273eb65aeaSThomas Gleixner * dropped the locks. 10283eb65aeaSThomas Gleixner * 102982084984SThomas Gleixner * Check whether the task which owns the current lock is pi 103082084984SThomas Gleixner * blocked itself. If yes we store a pointer to the lock for 103182084984SThomas Gleixner * the lock chain change detection above. After we dropped 103282084984SThomas Gleixner * task->pi_lock next_lock cannot be dereferenced anymore. 103382084984SThomas Gleixner */ 103482084984SThomas Gleixner next_lock = task_blocked_on_lock(task); 1035a57594a1SThomas Gleixner /* 1036a57594a1SThomas Gleixner * Store the top waiter of @lock for the end of chain walk 1037a57594a1SThomas Gleixner * decision below. 1038a57594a1SThomas Gleixner */ 10391696a8beSPeter Zijlstra top_waiter = rt_mutex_top_waiter(lock); 10403eb65aeaSThomas Gleixner 10413eb65aeaSThomas Gleixner /* [13] Drop the locks */ 1042b4abf910SThomas Gleixner raw_spin_unlock(&task->pi_lock); 1043b4abf910SThomas Gleixner raw_spin_unlock_irq(&lock->wait_lock); 10441696a8beSPeter Zijlstra 104582084984SThomas Gleixner /* 10463eb65aeaSThomas Gleixner * Make the actual exit decisions [12], based on the stored 10473eb65aeaSThomas Gleixner * values. 10483eb65aeaSThomas Gleixner * 104982084984SThomas Gleixner * We reached the end of the lock chain. Stop right here. No 105082084984SThomas Gleixner * point to go back just to figure that out. 105182084984SThomas Gleixner */ 105282084984SThomas Gleixner if (!next_lock) 105382084984SThomas Gleixner goto out_put_task; 105482084984SThomas Gleixner 1055a57594a1SThomas Gleixner /* 1056a57594a1SThomas Gleixner * If the current waiter is not the top waiter on the lock, 1057a57594a1SThomas Gleixner * then we can stop the chain walk here if we are not in full 1058a57594a1SThomas Gleixner * deadlock detection mode. 1059a57594a1SThomas Gleixner */ 10601696a8beSPeter Zijlstra if (!detect_deadlock && waiter != top_waiter) 10611696a8beSPeter Zijlstra goto out_put_task; 10621696a8beSPeter Zijlstra 10631696a8beSPeter Zijlstra goto again; 10641696a8beSPeter Zijlstra 10651696a8beSPeter Zijlstra out_unlock_pi: 1066b4abf910SThomas Gleixner raw_spin_unlock_irq(&task->pi_lock); 10671696a8beSPeter Zijlstra out_put_task: 10681696a8beSPeter Zijlstra put_task_struct(task); 10691696a8beSPeter Zijlstra 10701696a8beSPeter Zijlstra return ret; 10711696a8beSPeter Zijlstra } 10721696a8beSPeter Zijlstra 10731696a8beSPeter Zijlstra /* 10741696a8beSPeter Zijlstra * Try to take an rt-mutex 10751696a8beSPeter Zijlstra * 1076b4abf910SThomas Gleixner * Must be called with lock->wait_lock held and interrupts disabled 10771696a8beSPeter Zijlstra * 1078358c331fSThomas Gleixner * @lock: The lock to be acquired. 1079358c331fSThomas Gleixner * @task: The task which wants to acquire the lock 10809f40a51aSDavidlohr Bueso * @waiter: The waiter that is queued to the lock's wait tree if the 1081358c331fSThomas Gleixner * callsite called task_blocked_on_lock(), otherwise NULL 10821696a8beSPeter Zijlstra */ 1083d7a2edb8SThomas Gleixner static int __sched 1084830e6accSPeter Zijlstra try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task, 10851696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter) 10861696a8beSPeter Zijlstra { 1087e0aad5b4SPeter Zijlstra lockdep_assert_held(&lock->wait_lock); 1088e0aad5b4SPeter Zijlstra 10891696a8beSPeter Zijlstra /* 1090358c331fSThomas Gleixner * Before testing whether we can acquire @lock, we set the 1091358c331fSThomas Gleixner * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all 1092358c331fSThomas Gleixner * other tasks which try to modify @lock into the slow path 1093358c331fSThomas Gleixner * and they serialize on @lock->wait_lock. 10941696a8beSPeter Zijlstra * 1095358c331fSThomas Gleixner * The RT_MUTEX_HAS_WAITERS bit can have a transitional state 1096358c331fSThomas Gleixner * as explained at the top of this file if and only if: 10971696a8beSPeter Zijlstra * 1098358c331fSThomas Gleixner * - There is a lock owner. The caller must fixup the 1099358c331fSThomas Gleixner * transient state if it does a trylock or leaves the lock 1100358c331fSThomas Gleixner * function due to a signal or timeout. 1101358c331fSThomas Gleixner * 1102358c331fSThomas Gleixner * - @task acquires the lock and there are no other 1103358c331fSThomas Gleixner * waiters. This is undone in rt_mutex_set_owner(@task) at 1104358c331fSThomas Gleixner * the end of this function. 11051696a8beSPeter Zijlstra */ 11061696a8beSPeter Zijlstra mark_rt_mutex_waiters(lock); 11071696a8beSPeter Zijlstra 1108358c331fSThomas Gleixner /* 1109358c331fSThomas Gleixner * If @lock has an owner, give up. 1110358c331fSThomas Gleixner */ 11111696a8beSPeter Zijlstra if (rt_mutex_owner(lock)) 11121696a8beSPeter Zijlstra return 0; 11131696a8beSPeter Zijlstra 11141696a8beSPeter Zijlstra /* 1115358c331fSThomas Gleixner * If @waiter != NULL, @task has already enqueued the waiter 11169f40a51aSDavidlohr Bueso * into @lock waiter tree. If @waiter == NULL then this is a 1117358c331fSThomas Gleixner * trylock attempt. 1118358c331fSThomas Gleixner */ 1119358c331fSThomas Gleixner if (waiter) { 112048eb3f4fSGregory Haskins struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock); 1121358c331fSThomas Gleixner 1122358c331fSThomas Gleixner /* 112348eb3f4fSGregory Haskins * If waiter is the highest priority waiter of @lock, 112448eb3f4fSGregory Haskins * or allowed to steal it, take it over. 112548eb3f4fSGregory Haskins */ 112648eb3f4fSGregory Haskins if (waiter == top_waiter || rt_mutex_steal(waiter, top_waiter)) { 112748eb3f4fSGregory Haskins /* 1128358c331fSThomas Gleixner * We can acquire the lock. Remove the waiter from the 11299f40a51aSDavidlohr Bueso * lock waiters tree. 1130358c331fSThomas Gleixner */ 1131358c331fSThomas Gleixner rt_mutex_dequeue(lock, waiter); 113248eb3f4fSGregory Haskins } else { 113348eb3f4fSGregory Haskins return 0; 113448eb3f4fSGregory Haskins } 1135358c331fSThomas Gleixner } else { 1136358c331fSThomas Gleixner /* 1137358c331fSThomas Gleixner * If the lock has waiters already we check whether @task is 1138358c331fSThomas Gleixner * eligible to take over the lock. 1139358c331fSThomas Gleixner * 1140358c331fSThomas Gleixner * If there are no other waiters, @task can acquire 1141358c331fSThomas Gleixner * the lock. @task->pi_blocked_on is NULL, so it does 1142358c331fSThomas Gleixner * not need to be dequeued. 11431696a8beSPeter Zijlstra */ 11441696a8beSPeter Zijlstra if (rt_mutex_has_waiters(lock)) { 114548eb3f4fSGregory Haskins /* Check whether the trylock can steal it. */ 114648eb3f4fSGregory Haskins if (!rt_mutex_steal(task_to_waiter(task), 114719830e55SPeter Zijlstra rt_mutex_top_waiter(lock))) 11481696a8beSPeter Zijlstra return 0; 1149358c331fSThomas Gleixner 1150358c331fSThomas Gleixner /* 1151358c331fSThomas Gleixner * The current top waiter stays enqueued. We 1152358c331fSThomas Gleixner * don't have to change anything in the lock 1153358c331fSThomas Gleixner * waiters order. 1154358c331fSThomas Gleixner */ 1155358c331fSThomas Gleixner } else { 1156358c331fSThomas Gleixner /* 1157358c331fSThomas Gleixner * No waiters. Take the lock without the 1158358c331fSThomas Gleixner * pi_lock dance.@task->pi_blocked_on is NULL 1159358c331fSThomas Gleixner * and we have no waiters to enqueue in @task 11609f40a51aSDavidlohr Bueso * pi waiters tree. 1161358c331fSThomas Gleixner */ 1162358c331fSThomas Gleixner goto takeit; 11631696a8beSPeter Zijlstra } 11641696a8beSPeter Zijlstra } 11651696a8beSPeter Zijlstra 11661696a8beSPeter Zijlstra /* 1167358c331fSThomas Gleixner * Clear @task->pi_blocked_on. Requires protection by 1168358c331fSThomas Gleixner * @task->pi_lock. Redundant operation for the @waiter == NULL 1169358c331fSThomas Gleixner * case, but conditionals are more expensive than a redundant 1170358c331fSThomas Gleixner * store. 11711696a8beSPeter Zijlstra */ 1172b4abf910SThomas Gleixner raw_spin_lock(&task->pi_lock); 1173358c331fSThomas Gleixner task->pi_blocked_on = NULL; 1174358c331fSThomas Gleixner /* 1175358c331fSThomas Gleixner * Finish the lock acquisition. @task is the new owner. If 1176358c331fSThomas Gleixner * other waiters exist we have to insert the highest priority 11779f40a51aSDavidlohr Bueso * waiter into @task->pi_waiters tree. 1178358c331fSThomas Gleixner */ 1179358c331fSThomas Gleixner if (rt_mutex_has_waiters(lock)) 1180358c331fSThomas Gleixner rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); 1181b4abf910SThomas Gleixner raw_spin_unlock(&task->pi_lock); 11821696a8beSPeter Zijlstra 1183358c331fSThomas Gleixner takeit: 1184358c331fSThomas Gleixner /* 1185358c331fSThomas Gleixner * This either preserves the RT_MUTEX_HAS_WAITERS bit if there 1186358c331fSThomas Gleixner * are still waiters or clears it. 1187358c331fSThomas Gleixner */ 11881696a8beSPeter Zijlstra rt_mutex_set_owner(lock, task); 11891696a8beSPeter Zijlstra 11901696a8beSPeter Zijlstra return 1; 11911696a8beSPeter Zijlstra } 11921696a8beSPeter Zijlstra 11931696a8beSPeter Zijlstra /* 11941696a8beSPeter Zijlstra * Task blocks on lock. 11951696a8beSPeter Zijlstra * 11961696a8beSPeter Zijlstra * Prepare waiter and propagate pi chain 11971696a8beSPeter Zijlstra * 1198b4abf910SThomas Gleixner * This must be called with lock->wait_lock held and interrupts disabled 11991696a8beSPeter Zijlstra */ 1200830e6accSPeter Zijlstra static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, 12011696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter, 12021696a8beSPeter Zijlstra struct task_struct *task, 1203add46132SPeter Zijlstra struct ww_acquire_ctx *ww_ctx, 12048930ed80SThomas Gleixner enum rtmutex_chainwalk chwalk) 12051696a8beSPeter Zijlstra { 12061696a8beSPeter Zijlstra struct task_struct *owner = rt_mutex_owner(lock); 12071696a8beSPeter Zijlstra struct rt_mutex_waiter *top_waiter = waiter; 1208830e6accSPeter Zijlstra struct rt_mutex_base *next_lock; 12091696a8beSPeter Zijlstra int chain_walk = 0, res; 12101696a8beSPeter Zijlstra 1211e0aad5b4SPeter Zijlstra lockdep_assert_held(&lock->wait_lock); 1212e0aad5b4SPeter Zijlstra 1213397335f0SThomas Gleixner /* 1214397335f0SThomas Gleixner * Early deadlock detection. We really don't want the task to 1215397335f0SThomas Gleixner * enqueue on itself just to untangle the mess later. It's not 1216397335f0SThomas Gleixner * only an optimization. We drop the locks, so another waiter 1217397335f0SThomas Gleixner * can come in before the chain walk detects the deadlock. So 1218397335f0SThomas Gleixner * the other will detect the deadlock and return -EDEADLOCK, 1219397335f0SThomas Gleixner * which is wrong, as the other waiter is not in a deadlock 1220397335f0SThomas Gleixner * situation. 122102ea9fc9SPeter Zijlstra * 122202ea9fc9SPeter Zijlstra * Except for ww_mutex, in that case the chain walk must already deal 122302ea9fc9SPeter Zijlstra * with spurious cycles, see the comments at [3] and [6]. 1224397335f0SThomas Gleixner */ 122502ea9fc9SPeter Zijlstra if (owner == task && !(build_ww_mutex() && ww_ctx)) 1226397335f0SThomas Gleixner return -EDEADLK; 1227397335f0SThomas Gleixner 1228b4abf910SThomas Gleixner raw_spin_lock(&task->pi_lock); 12291696a8beSPeter Zijlstra waiter->task = task; 12301696a8beSPeter Zijlstra waiter->lock = lock; 1231715f7f9eSPeter Zijlstra waiter_update_prio(waiter, task); 1232f7853c34SPeter Zijlstra waiter_clone_prio(waiter, task); 12331696a8beSPeter Zijlstra 12341696a8beSPeter Zijlstra /* Get the top priority waiter on the lock */ 12351696a8beSPeter Zijlstra if (rt_mutex_has_waiters(lock)) 12361696a8beSPeter Zijlstra top_waiter = rt_mutex_top_waiter(lock); 1237fb00aca4SPeter Zijlstra rt_mutex_enqueue(lock, waiter); 12381696a8beSPeter Zijlstra 12391696a8beSPeter Zijlstra task->pi_blocked_on = waiter; 12401696a8beSPeter Zijlstra 1241b4abf910SThomas Gleixner raw_spin_unlock(&task->pi_lock); 12421696a8beSPeter Zijlstra 1243add46132SPeter Zijlstra if (build_ww_mutex() && ww_ctx) { 1244add46132SPeter Zijlstra struct rt_mutex *rtm; 1245add46132SPeter Zijlstra 1246add46132SPeter Zijlstra /* Check whether the waiter should back out immediately */ 1247add46132SPeter Zijlstra rtm = container_of(lock, struct rt_mutex, rtmutex); 1248add46132SPeter Zijlstra res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx); 124937e8abffSThomas Gleixner if (res) { 125037e8abffSThomas Gleixner raw_spin_lock(&task->pi_lock); 125137e8abffSThomas Gleixner rt_mutex_dequeue(lock, waiter); 125237e8abffSThomas Gleixner task->pi_blocked_on = NULL; 125337e8abffSThomas Gleixner raw_spin_unlock(&task->pi_lock); 1254add46132SPeter Zijlstra return res; 1255add46132SPeter Zijlstra } 125637e8abffSThomas Gleixner } 1257add46132SPeter Zijlstra 12581696a8beSPeter Zijlstra if (!owner) 12591696a8beSPeter Zijlstra return 0; 12601696a8beSPeter Zijlstra 1261b4abf910SThomas Gleixner raw_spin_lock(&owner->pi_lock); 126282084984SThomas Gleixner if (waiter == rt_mutex_top_waiter(lock)) { 1263fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(owner, top_waiter); 1264fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(owner, waiter); 12651696a8beSPeter Zijlstra 1266f7853c34SPeter Zijlstra rt_mutex_adjust_prio(lock, owner); 12671696a8beSPeter Zijlstra if (owner->pi_blocked_on) 12681696a8beSPeter Zijlstra chain_walk = 1; 12698930ed80SThomas Gleixner } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { 12701696a8beSPeter Zijlstra chain_walk = 1; 127182084984SThomas Gleixner } 12721696a8beSPeter Zijlstra 127382084984SThomas Gleixner /* Store the lock on which owner is blocked or NULL */ 127482084984SThomas Gleixner next_lock = task_blocked_on_lock(owner); 127582084984SThomas Gleixner 1276b4abf910SThomas Gleixner raw_spin_unlock(&owner->pi_lock); 127782084984SThomas Gleixner /* 127882084984SThomas Gleixner * Even if full deadlock detection is on, if the owner is not 127982084984SThomas Gleixner * blocked itself, we can avoid finding this out in the chain 128082084984SThomas Gleixner * walk. 128182084984SThomas Gleixner */ 128282084984SThomas Gleixner if (!chain_walk || !next_lock) 12831696a8beSPeter Zijlstra return 0; 12841696a8beSPeter Zijlstra 12851696a8beSPeter Zijlstra /* 12861696a8beSPeter Zijlstra * The owner can't disappear while holding a lock, 12871696a8beSPeter Zijlstra * so the owner struct is protected by wait_lock. 12881696a8beSPeter Zijlstra * Gets dropped in rt_mutex_adjust_prio_chain()! 12891696a8beSPeter Zijlstra */ 12901696a8beSPeter Zijlstra get_task_struct(owner); 12911696a8beSPeter Zijlstra 1292b4abf910SThomas Gleixner raw_spin_unlock_irq(&lock->wait_lock); 12931696a8beSPeter Zijlstra 12948930ed80SThomas Gleixner res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, 129582084984SThomas Gleixner next_lock, waiter, task); 12961696a8beSPeter Zijlstra 1297b4abf910SThomas Gleixner raw_spin_lock_irq(&lock->wait_lock); 12981696a8beSPeter Zijlstra 12991696a8beSPeter Zijlstra return res; 13001696a8beSPeter Zijlstra } 13011696a8beSPeter Zijlstra 13021696a8beSPeter Zijlstra /* 13039f40a51aSDavidlohr Bueso * Remove the top waiter from the current tasks pi waiter tree and 130445ab4effSDavidlohr Bueso * queue it up. 13051696a8beSPeter Zijlstra * 1306b4abf910SThomas Gleixner * Called with lock->wait_lock held and interrupts disabled. 13071696a8beSPeter Zijlstra */ 13087980aa39SThomas Gleixner static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh, 1309830e6accSPeter Zijlstra struct rt_mutex_base *lock) 13101696a8beSPeter Zijlstra { 13111696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter; 13121696a8beSPeter Zijlstra 1313f7853c34SPeter Zijlstra lockdep_assert_held(&lock->wait_lock); 1314f7853c34SPeter Zijlstra 1315b4abf910SThomas Gleixner raw_spin_lock(¤t->pi_lock); 13161696a8beSPeter Zijlstra 13171696a8beSPeter Zijlstra waiter = rt_mutex_top_waiter(lock); 13181696a8beSPeter Zijlstra 13191696a8beSPeter Zijlstra /* 1320acd58620SPeter Zijlstra * Remove it from current->pi_waiters and deboost. 1321acd58620SPeter Zijlstra * 1322acd58620SPeter Zijlstra * We must in fact deboost here in order to ensure we call 1323acd58620SPeter Zijlstra * rt_mutex_setprio() to update p->pi_top_task before the 1324acd58620SPeter Zijlstra * task unblocks. 13251696a8beSPeter Zijlstra */ 1326fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(current, waiter); 1327f7853c34SPeter Zijlstra rt_mutex_adjust_prio(lock, current); 13281696a8beSPeter Zijlstra 132927e35715SThomas Gleixner /* 133027e35715SThomas Gleixner * As we are waking up the top waiter, and the waiter stays 133127e35715SThomas Gleixner * queued on the lock until it gets the lock, this lock 133227e35715SThomas Gleixner * obviously has waiters. Just set the bit here and this has 133327e35715SThomas Gleixner * the added benefit of forcing all new tasks into the 133427e35715SThomas Gleixner * slow path making sure no task of lower priority than 133527e35715SThomas Gleixner * the top waiter can steal this lock. 133627e35715SThomas Gleixner */ 133727e35715SThomas Gleixner lock->owner = (void *) RT_MUTEX_HAS_WAITERS; 13381696a8beSPeter Zijlstra 1339acd58620SPeter Zijlstra /* 1340acd58620SPeter Zijlstra * We deboosted before waking the top waiter task such that we don't 1341acd58620SPeter Zijlstra * run two tasks with the 'same' priority (and ensure the 1342acd58620SPeter Zijlstra * p->pi_top_task pointer points to a blocked task). This however can 1343acd58620SPeter Zijlstra * lead to priority inversion if we would get preempted after the 1344acd58620SPeter Zijlstra * deboost but before waking our donor task, hence the preempt_disable() 1345acd58620SPeter Zijlstra * before unlock. 1346acd58620SPeter Zijlstra * 13477980aa39SThomas Gleixner * Pairs with preempt_enable() in rt_mutex_wake_up_q(); 1348acd58620SPeter Zijlstra */ 1349acd58620SPeter Zijlstra preempt_disable(); 13507980aa39SThomas Gleixner rt_mutex_wake_q_add(wqh, waiter); 1351acd58620SPeter Zijlstra raw_spin_unlock(¤t->pi_lock); 13521696a8beSPeter Zijlstra } 13531696a8beSPeter Zijlstra 1354e17ba59bSThomas Gleixner static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock) 1355e17ba59bSThomas Gleixner { 1356e17ba59bSThomas Gleixner int ret = try_to_take_rt_mutex(lock, current, NULL); 1357e17ba59bSThomas Gleixner 1358e17ba59bSThomas Gleixner /* 1359e17ba59bSThomas Gleixner * try_to_take_rt_mutex() sets the lock waiters bit 1360e17ba59bSThomas Gleixner * unconditionally. Clean this up. 1361e17ba59bSThomas Gleixner */ 13621c0908d8SMel Gorman fixup_rt_mutex_waiters(lock, true); 1363e17ba59bSThomas Gleixner 1364e17ba59bSThomas Gleixner return ret; 1365e17ba59bSThomas Gleixner } 1366e17ba59bSThomas Gleixner 1367e17ba59bSThomas Gleixner /* 1368e17ba59bSThomas Gleixner * Slow path try-lock function: 1369e17ba59bSThomas Gleixner */ 1370e17ba59bSThomas Gleixner static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock) 1371e17ba59bSThomas Gleixner { 1372e17ba59bSThomas Gleixner unsigned long flags; 1373e17ba59bSThomas Gleixner int ret; 1374e17ba59bSThomas Gleixner 1375e17ba59bSThomas Gleixner /* 1376e17ba59bSThomas Gleixner * If the lock already has an owner we fail to get the lock. 1377e17ba59bSThomas Gleixner * This can be done without taking the @lock->wait_lock as 1378e17ba59bSThomas Gleixner * it is only being read, and this is a trylock anyway. 1379e17ba59bSThomas Gleixner */ 1380e17ba59bSThomas Gleixner if (rt_mutex_owner(lock)) 1381e17ba59bSThomas Gleixner return 0; 1382e17ba59bSThomas Gleixner 1383e17ba59bSThomas Gleixner /* 1384e17ba59bSThomas Gleixner * The mutex has currently no owner. Lock the wait lock and try to 1385e17ba59bSThomas Gleixner * acquire the lock. We use irqsave here to support early boot calls. 1386e17ba59bSThomas Gleixner */ 1387e17ba59bSThomas Gleixner raw_spin_lock_irqsave(&lock->wait_lock, flags); 1388e17ba59bSThomas Gleixner 1389e17ba59bSThomas Gleixner ret = __rt_mutex_slowtrylock(lock); 1390e17ba59bSThomas Gleixner 1391e17ba59bSThomas Gleixner raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 1392e17ba59bSThomas Gleixner 1393e17ba59bSThomas Gleixner return ret; 1394e17ba59bSThomas Gleixner } 1395e17ba59bSThomas Gleixner 1396e17ba59bSThomas Gleixner static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock) 1397e17ba59bSThomas Gleixner { 1398e17ba59bSThomas Gleixner if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) 1399e17ba59bSThomas Gleixner return 1; 1400e17ba59bSThomas Gleixner 1401e17ba59bSThomas Gleixner return rt_mutex_slowtrylock(lock); 1402e17ba59bSThomas Gleixner } 1403e17ba59bSThomas Gleixner 1404e17ba59bSThomas Gleixner /* 1405e17ba59bSThomas Gleixner * Slow path to release a rt-mutex. 1406e17ba59bSThomas Gleixner */ 1407e17ba59bSThomas Gleixner static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock) 1408e17ba59bSThomas Gleixner { 1409e17ba59bSThomas Gleixner DEFINE_RT_WAKE_Q(wqh); 1410e17ba59bSThomas Gleixner unsigned long flags; 1411e17ba59bSThomas Gleixner 1412e17ba59bSThomas Gleixner /* irqsave required to support early boot calls */ 1413e17ba59bSThomas Gleixner raw_spin_lock_irqsave(&lock->wait_lock, flags); 1414e17ba59bSThomas Gleixner 1415e17ba59bSThomas Gleixner debug_rt_mutex_unlock(lock); 1416e17ba59bSThomas Gleixner 1417e17ba59bSThomas Gleixner /* 1418e17ba59bSThomas Gleixner * We must be careful here if the fast path is enabled. If we 1419e17ba59bSThomas Gleixner * have no waiters queued we cannot set owner to NULL here 1420e17ba59bSThomas Gleixner * because of: 1421e17ba59bSThomas Gleixner * 1422e17ba59bSThomas Gleixner * foo->lock->owner = NULL; 1423e17ba59bSThomas Gleixner * rtmutex_lock(foo->lock); <- fast path 1424e17ba59bSThomas Gleixner * free = atomic_dec_and_test(foo->refcnt); 1425e17ba59bSThomas Gleixner * rtmutex_unlock(foo->lock); <- fast path 1426e17ba59bSThomas Gleixner * if (free) 1427e17ba59bSThomas Gleixner * kfree(foo); 1428e17ba59bSThomas Gleixner * raw_spin_unlock(foo->lock->wait_lock); 1429e17ba59bSThomas Gleixner * 1430e17ba59bSThomas Gleixner * So for the fastpath enabled kernel: 1431e17ba59bSThomas Gleixner * 1432e17ba59bSThomas Gleixner * Nothing can set the waiters bit as long as we hold 1433e17ba59bSThomas Gleixner * lock->wait_lock. So we do the following sequence: 1434e17ba59bSThomas Gleixner * 1435e17ba59bSThomas Gleixner * owner = rt_mutex_owner(lock); 1436e17ba59bSThomas Gleixner * clear_rt_mutex_waiters(lock); 1437e17ba59bSThomas Gleixner * raw_spin_unlock(&lock->wait_lock); 1438e17ba59bSThomas Gleixner * if (cmpxchg(&lock->owner, owner, 0) == owner) 1439e17ba59bSThomas Gleixner * return; 1440e17ba59bSThomas Gleixner * goto retry; 1441e17ba59bSThomas Gleixner * 1442e17ba59bSThomas Gleixner * The fastpath disabled variant is simple as all access to 1443e17ba59bSThomas Gleixner * lock->owner is serialized by lock->wait_lock: 1444e17ba59bSThomas Gleixner * 1445e17ba59bSThomas Gleixner * lock->owner = NULL; 1446e17ba59bSThomas Gleixner * raw_spin_unlock(&lock->wait_lock); 1447e17ba59bSThomas Gleixner */ 1448e17ba59bSThomas Gleixner while (!rt_mutex_has_waiters(lock)) { 1449e17ba59bSThomas Gleixner /* Drops lock->wait_lock ! */ 1450e17ba59bSThomas Gleixner if (unlock_rt_mutex_safe(lock, flags) == true) 1451e17ba59bSThomas Gleixner return; 1452e17ba59bSThomas Gleixner /* Relock the rtmutex and try again */ 1453e17ba59bSThomas Gleixner raw_spin_lock_irqsave(&lock->wait_lock, flags); 1454e17ba59bSThomas Gleixner } 1455e17ba59bSThomas Gleixner 1456e17ba59bSThomas Gleixner /* 1457e17ba59bSThomas Gleixner * The wakeup next waiter path does not suffer from the above 1458e17ba59bSThomas Gleixner * race. See the comments there. 1459e17ba59bSThomas Gleixner * 1460e17ba59bSThomas Gleixner * Queue the next waiter for wakeup once we release the wait_lock. 1461e17ba59bSThomas Gleixner */ 1462e17ba59bSThomas Gleixner mark_wakeup_next_waiter(&wqh, lock); 1463e17ba59bSThomas Gleixner raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 1464e17ba59bSThomas Gleixner 1465e17ba59bSThomas Gleixner rt_mutex_wake_up_q(&wqh); 1466e17ba59bSThomas Gleixner } 1467e17ba59bSThomas Gleixner 1468e17ba59bSThomas Gleixner static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock) 1469e17ba59bSThomas Gleixner { 1470e17ba59bSThomas Gleixner if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) 1471e17ba59bSThomas Gleixner return; 1472e17ba59bSThomas Gleixner 1473e17ba59bSThomas Gleixner rt_mutex_slowunlock(lock); 1474e17ba59bSThomas Gleixner } 1475e17ba59bSThomas Gleixner 1476992caf7fSSteven Rostedt #ifdef CONFIG_SMP 1477992caf7fSSteven Rostedt static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, 1478992caf7fSSteven Rostedt struct rt_mutex_waiter *waiter, 1479992caf7fSSteven Rostedt struct task_struct *owner) 1480992caf7fSSteven Rostedt { 1481992caf7fSSteven Rostedt bool res = true; 1482992caf7fSSteven Rostedt 1483992caf7fSSteven Rostedt rcu_read_lock(); 1484992caf7fSSteven Rostedt for (;;) { 1485992caf7fSSteven Rostedt /* If owner changed, trylock again. */ 1486992caf7fSSteven Rostedt if (owner != rt_mutex_owner(lock)) 1487992caf7fSSteven Rostedt break; 1488992caf7fSSteven Rostedt /* 1489992caf7fSSteven Rostedt * Ensure that @owner is dereferenced after checking that 1490992caf7fSSteven Rostedt * the lock owner still matches @owner. If that fails, 1491992caf7fSSteven Rostedt * @owner might point to freed memory. If it still matches, 1492992caf7fSSteven Rostedt * the rcu_read_lock() ensures the memory stays valid. 1493992caf7fSSteven Rostedt */ 1494992caf7fSSteven Rostedt barrier(); 1495992caf7fSSteven Rostedt /* 1496992caf7fSSteven Rostedt * Stop spinning when: 1497992caf7fSSteven Rostedt * - the lock owner has been scheduled out 1498992caf7fSSteven Rostedt * - current is not longer the top waiter 1499992caf7fSSteven Rostedt * - current is requested to reschedule (redundant 1500992caf7fSSteven Rostedt * for CONFIG_PREEMPT_RCU=y) 1501992caf7fSSteven Rostedt * - the VCPU on which owner runs is preempted 1502992caf7fSSteven Rostedt */ 1503c0bed69dSKefeng Wang if (!owner_on_cpu(owner) || need_resched() || 1504f16cc980SThomas Gleixner !rt_mutex_waiter_is_top_waiter(lock, waiter)) { 1505992caf7fSSteven Rostedt res = false; 1506992caf7fSSteven Rostedt break; 1507992caf7fSSteven Rostedt } 1508992caf7fSSteven Rostedt cpu_relax(); 1509992caf7fSSteven Rostedt } 1510992caf7fSSteven Rostedt rcu_read_unlock(); 1511992caf7fSSteven Rostedt return res; 1512992caf7fSSteven Rostedt } 1513992caf7fSSteven Rostedt #else 1514992caf7fSSteven Rostedt static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, 1515992caf7fSSteven Rostedt struct rt_mutex_waiter *waiter, 1516992caf7fSSteven Rostedt struct task_struct *owner) 1517992caf7fSSteven Rostedt { 1518992caf7fSSteven Rostedt return false; 1519992caf7fSSteven Rostedt } 1520992caf7fSSteven Rostedt #endif 1521992caf7fSSteven Rostedt 1522e17ba59bSThomas Gleixner #ifdef RT_MUTEX_BUILD_MUTEX 1523e17ba59bSThomas Gleixner /* 1524e17ba59bSThomas Gleixner * Functions required for: 1525e17ba59bSThomas Gleixner * - rtmutex, futex on all kernels 1526e17ba59bSThomas Gleixner * - mutex and rwsem substitutions on RT kernels 1527e17ba59bSThomas Gleixner */ 1528e17ba59bSThomas Gleixner 15291696a8beSPeter Zijlstra /* 15301696a8beSPeter Zijlstra * Remove a waiter from a lock and give up 15311696a8beSPeter Zijlstra * 1532e17ba59bSThomas Gleixner * Must be called with lock->wait_lock held and interrupts disabled. It must 15331696a8beSPeter Zijlstra * have just failed to try_to_take_rt_mutex(). 15341696a8beSPeter Zijlstra */ 1535830e6accSPeter Zijlstra static void __sched remove_waiter(struct rt_mutex_base *lock, 15361696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter) 15371696a8beSPeter Zijlstra { 15381ca7b860SThomas Gleixner bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); 15391696a8beSPeter Zijlstra struct task_struct *owner = rt_mutex_owner(lock); 1540830e6accSPeter Zijlstra struct rt_mutex_base *next_lock; 15411696a8beSPeter Zijlstra 1542e0aad5b4SPeter Zijlstra lockdep_assert_held(&lock->wait_lock); 1543e0aad5b4SPeter Zijlstra 1544b4abf910SThomas Gleixner raw_spin_lock(¤t->pi_lock); 1545fb00aca4SPeter Zijlstra rt_mutex_dequeue(lock, waiter); 15461696a8beSPeter Zijlstra current->pi_blocked_on = NULL; 1547b4abf910SThomas Gleixner raw_spin_unlock(¤t->pi_lock); 15481696a8beSPeter Zijlstra 15491ca7b860SThomas Gleixner /* 15501ca7b860SThomas Gleixner * Only update priority if the waiter was the highest priority 15511ca7b860SThomas Gleixner * waiter of the lock and there is an owner to update. 15521ca7b860SThomas Gleixner */ 15531ca7b860SThomas Gleixner if (!owner || !is_top_waiter) 15541696a8beSPeter Zijlstra return; 15551696a8beSPeter Zijlstra 1556b4abf910SThomas Gleixner raw_spin_lock(&owner->pi_lock); 15571696a8beSPeter Zijlstra 1558fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(owner, waiter); 15591696a8beSPeter Zijlstra 15601ca7b860SThomas Gleixner if (rt_mutex_has_waiters(lock)) 15611ca7b860SThomas Gleixner rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); 15621696a8beSPeter Zijlstra 1563f7853c34SPeter Zijlstra rt_mutex_adjust_prio(lock, owner); 15641696a8beSPeter Zijlstra 156582084984SThomas Gleixner /* Store the lock on which owner is blocked or NULL */ 156682084984SThomas Gleixner next_lock = task_blocked_on_lock(owner); 15671696a8beSPeter Zijlstra 1568b4abf910SThomas Gleixner raw_spin_unlock(&owner->pi_lock); 15691696a8beSPeter Zijlstra 15701ca7b860SThomas Gleixner /* 15711ca7b860SThomas Gleixner * Don't walk the chain, if the owner task is not blocked 15721ca7b860SThomas Gleixner * itself. 15731ca7b860SThomas Gleixner */ 157482084984SThomas Gleixner if (!next_lock) 15751696a8beSPeter Zijlstra return; 15761696a8beSPeter Zijlstra 15771696a8beSPeter Zijlstra /* gets dropped in rt_mutex_adjust_prio_chain()! */ 15781696a8beSPeter Zijlstra get_task_struct(owner); 15791696a8beSPeter Zijlstra 1580b4abf910SThomas Gleixner raw_spin_unlock_irq(&lock->wait_lock); 15811696a8beSPeter Zijlstra 15828930ed80SThomas Gleixner rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, 15838930ed80SThomas Gleixner next_lock, NULL, current); 15841696a8beSPeter Zijlstra 1585b4abf910SThomas Gleixner raw_spin_lock_irq(&lock->wait_lock); 15861696a8beSPeter Zijlstra } 15871696a8beSPeter Zijlstra 15881696a8beSPeter Zijlstra /** 1589ebbdc41eSThomas Gleixner * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop 15901696a8beSPeter Zijlstra * @lock: the rt_mutex to take 1591add46132SPeter Zijlstra * @ww_ctx: WW mutex context pointer 15921696a8beSPeter Zijlstra * @state: the state the task should block in (TASK_INTERRUPTIBLE 15931696a8beSPeter Zijlstra * or TASK_UNINTERRUPTIBLE) 15941696a8beSPeter Zijlstra * @timeout: the pre-initialized and started timer, or NULL for none 15951696a8beSPeter Zijlstra * @waiter: the pre-initialized rt_mutex_waiter 15961696a8beSPeter Zijlstra * 1597b4abf910SThomas Gleixner * Must be called with lock->wait_lock held and interrupts disabled 15981696a8beSPeter Zijlstra */ 1599ebbdc41eSThomas Gleixner static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, 1600add46132SPeter Zijlstra struct ww_acquire_ctx *ww_ctx, 1601830e6accSPeter Zijlstra unsigned int state, 16021696a8beSPeter Zijlstra struct hrtimer_sleeper *timeout, 16031696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter) 16041696a8beSPeter Zijlstra { 1605add46132SPeter Zijlstra struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); 1606992caf7fSSteven Rostedt struct task_struct *owner; 16071696a8beSPeter Zijlstra int ret = 0; 16081696a8beSPeter Zijlstra 16091696a8beSPeter Zijlstra for (;;) { 16101696a8beSPeter Zijlstra /* Try to acquire the lock: */ 16111696a8beSPeter Zijlstra if (try_to_take_rt_mutex(lock, current, waiter)) 16121696a8beSPeter Zijlstra break; 16131696a8beSPeter Zijlstra 1614a51a327fSThomas Gleixner if (timeout && !timeout->task) { 16151696a8beSPeter Zijlstra ret = -ETIMEDOUT; 1616a51a327fSThomas Gleixner break; 1617a51a327fSThomas Gleixner } 1618a51a327fSThomas Gleixner if (signal_pending_state(state, current)) { 1619a51a327fSThomas Gleixner ret = -EINTR; 16201696a8beSPeter Zijlstra break; 16211696a8beSPeter Zijlstra } 16221696a8beSPeter Zijlstra 1623add46132SPeter Zijlstra if (build_ww_mutex() && ww_ctx) { 1624add46132SPeter Zijlstra ret = __ww_mutex_check_kill(rtm, waiter, ww_ctx); 1625add46132SPeter Zijlstra if (ret) 1626add46132SPeter Zijlstra break; 1627add46132SPeter Zijlstra } 1628add46132SPeter Zijlstra 1629992caf7fSSteven Rostedt if (waiter == rt_mutex_top_waiter(lock)) 1630992caf7fSSteven Rostedt owner = rt_mutex_owner(lock); 1631992caf7fSSteven Rostedt else 1632992caf7fSSteven Rostedt owner = NULL; 1633b4abf910SThomas Gleixner raw_spin_unlock_irq(&lock->wait_lock); 16341696a8beSPeter Zijlstra 1635992caf7fSSteven Rostedt if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) 1636d14f9e93SSebastian Andrzej Siewior rt_mutex_schedule(); 16371696a8beSPeter Zijlstra 1638b4abf910SThomas Gleixner raw_spin_lock_irq(&lock->wait_lock); 16391696a8beSPeter Zijlstra set_current_state(state); 16401696a8beSPeter Zijlstra } 16411696a8beSPeter Zijlstra 1642afffc6c1SDavidlohr Bueso __set_current_state(TASK_RUNNING); 16431696a8beSPeter Zijlstra return ret; 16441696a8beSPeter Zijlstra } 16451696a8beSPeter Zijlstra 1646d7a2edb8SThomas Gleixner static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, 1647*d33d2603SRoland Xu struct rt_mutex_base *lock, 16483d5c9340SThomas Gleixner struct rt_mutex_waiter *w) 16493d5c9340SThomas Gleixner { 16503d5c9340SThomas Gleixner /* 16513d5c9340SThomas Gleixner * If the result is not -EDEADLOCK or the caller requested 16523d5c9340SThomas Gleixner * deadlock detection, nothing to do here. 16533d5c9340SThomas Gleixner */ 16543d5c9340SThomas Gleixner if (res != -EDEADLOCK || detect_deadlock) 16553d5c9340SThomas Gleixner return; 16563d5c9340SThomas Gleixner 1657add46132SPeter Zijlstra if (build_ww_mutex() && w->ww_ctx) 1658add46132SPeter Zijlstra return; 1659add46132SPeter Zijlstra 1660*d33d2603SRoland Xu raw_spin_unlock_irq(&lock->wait_lock); 1661*d33d2603SRoland Xu 16626d41c675SSebastian Andrzej Siewior WARN(1, "rtmutex deadlock detected\n"); 1663*d33d2603SRoland Xu 16643d5c9340SThomas Gleixner while (1) { 16653d5c9340SThomas Gleixner set_current_state(TASK_INTERRUPTIBLE); 1666d14f9e93SSebastian Andrzej Siewior rt_mutex_schedule(); 16673d5c9340SThomas Gleixner } 16683d5c9340SThomas Gleixner } 16693d5c9340SThomas Gleixner 1670ebbdc41eSThomas Gleixner /** 1671ebbdc41eSThomas Gleixner * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held 1672ebbdc41eSThomas Gleixner * @lock: The rtmutex to block lock 1673add46132SPeter Zijlstra * @ww_ctx: WW mutex context pointer 1674ebbdc41eSThomas Gleixner * @state: The task state for sleeping 1675ebbdc41eSThomas Gleixner * @chwalk: Indicator whether full or partial chainwalk is requested 1676ebbdc41eSThomas Gleixner * @waiter: Initializer waiter for blocking 16771696a8beSPeter Zijlstra */ 1678ebbdc41eSThomas Gleixner static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, 1679add46132SPeter Zijlstra struct ww_acquire_ctx *ww_ctx, 1680830e6accSPeter Zijlstra unsigned int state, 1681ebbdc41eSThomas Gleixner enum rtmutex_chainwalk chwalk, 1682ebbdc41eSThomas Gleixner struct rt_mutex_waiter *waiter) 1683ebbdc41eSThomas Gleixner { 1684add46132SPeter Zijlstra struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); 1685add46132SPeter Zijlstra struct ww_mutex *ww = ww_container_of(rtm); 1686ebbdc41eSThomas Gleixner int ret; 1687ebbdc41eSThomas Gleixner 1688ebbdc41eSThomas Gleixner lockdep_assert_held(&lock->wait_lock); 1689ebbdc41eSThomas Gleixner 1690ebbdc41eSThomas Gleixner /* Try to acquire the lock again: */ 1691add46132SPeter Zijlstra if (try_to_take_rt_mutex(lock, current, NULL)) { 1692add46132SPeter Zijlstra if (build_ww_mutex() && ww_ctx) { 1693add46132SPeter Zijlstra __ww_mutex_check_waiters(rtm, ww_ctx); 1694add46132SPeter Zijlstra ww_mutex_lock_acquired(ww, ww_ctx); 1695add46132SPeter Zijlstra } 1696ebbdc41eSThomas Gleixner return 0; 1697add46132SPeter Zijlstra } 1698ebbdc41eSThomas Gleixner 1699ebbdc41eSThomas Gleixner set_current_state(state); 1700ebbdc41eSThomas Gleixner 1701ee042be1SNamhyung Kim trace_contention_begin(lock, LCB_F_RT); 1702ee042be1SNamhyung Kim 1703add46132SPeter Zijlstra ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk); 1704ebbdc41eSThomas Gleixner if (likely(!ret)) 1705add46132SPeter Zijlstra ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter); 1706ebbdc41eSThomas Gleixner 1707add46132SPeter Zijlstra if (likely(!ret)) { 1708add46132SPeter Zijlstra /* acquired the lock */ 1709add46132SPeter Zijlstra if (build_ww_mutex() && ww_ctx) { 1710add46132SPeter Zijlstra if (!ww_ctx->is_wait_die) 1711add46132SPeter Zijlstra __ww_mutex_check_waiters(rtm, ww_ctx); 1712add46132SPeter Zijlstra ww_mutex_lock_acquired(ww, ww_ctx); 1713add46132SPeter Zijlstra } 1714add46132SPeter Zijlstra } else { 1715ebbdc41eSThomas Gleixner __set_current_state(TASK_RUNNING); 1716ebbdc41eSThomas Gleixner remove_waiter(lock, waiter); 1717*d33d2603SRoland Xu rt_mutex_handle_deadlock(ret, chwalk, lock, waiter); 1718ebbdc41eSThomas Gleixner } 1719ebbdc41eSThomas Gleixner 1720ebbdc41eSThomas Gleixner /* 1721ebbdc41eSThomas Gleixner * try_to_take_rt_mutex() sets the waiter bit 1722ebbdc41eSThomas Gleixner * unconditionally. We might have to fix that up. 1723ebbdc41eSThomas Gleixner */ 17241c0908d8SMel Gorman fixup_rt_mutex_waiters(lock, true); 1725ee042be1SNamhyung Kim 1726ee042be1SNamhyung Kim trace_contention_end(lock, ret); 1727ee042be1SNamhyung Kim 1728ebbdc41eSThomas Gleixner return ret; 1729ebbdc41eSThomas Gleixner } 1730ebbdc41eSThomas Gleixner 1731ebbdc41eSThomas Gleixner static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock, 1732add46132SPeter Zijlstra struct ww_acquire_ctx *ww_ctx, 1733ebbdc41eSThomas Gleixner unsigned int state) 17341696a8beSPeter Zijlstra { 17351696a8beSPeter Zijlstra struct rt_mutex_waiter waiter; 1736ebbdc41eSThomas Gleixner int ret; 17371696a8beSPeter Zijlstra 173850809358SPeter Zijlstra rt_mutex_init_waiter(&waiter); 1739add46132SPeter Zijlstra waiter.ww_ctx = ww_ctx; 17401696a8beSPeter Zijlstra 1741add46132SPeter Zijlstra ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK, 1742add46132SPeter Zijlstra &waiter); 1743ebbdc41eSThomas Gleixner 1744ebbdc41eSThomas Gleixner debug_rt_mutex_free_waiter(&waiter); 1745ebbdc41eSThomas Gleixner return ret; 1746ebbdc41eSThomas Gleixner } 1747ebbdc41eSThomas Gleixner 1748ebbdc41eSThomas Gleixner /* 1749ebbdc41eSThomas Gleixner * rt_mutex_slowlock - Locking slowpath invoked when fast path fails 1750ebbdc41eSThomas Gleixner * @lock: The rtmutex to block lock 1751add46132SPeter Zijlstra * @ww_ctx: WW mutex context pointer 1752ebbdc41eSThomas Gleixner * @state: The task state for sleeping 1753ebbdc41eSThomas Gleixner */ 1754ebbdc41eSThomas Gleixner static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, 1755add46132SPeter Zijlstra struct ww_acquire_ctx *ww_ctx, 1756ebbdc41eSThomas Gleixner unsigned int state) 1757ebbdc41eSThomas Gleixner { 1758ebbdc41eSThomas Gleixner unsigned long flags; 1759ebbdc41eSThomas Gleixner int ret; 1760ebbdc41eSThomas Gleixner 1761b4abf910SThomas Gleixner /* 1762d14f9e93SSebastian Andrzej Siewior * Do all pre-schedule work here, before we queue a waiter and invoke 1763d14f9e93SSebastian Andrzej Siewior * PI -- any such work that trips on rtlock (PREEMPT_RT spinlock) would 1764d14f9e93SSebastian Andrzej Siewior * otherwise recurse back into task_blocks_on_rt_mutex() through 1765d14f9e93SSebastian Andrzej Siewior * rtlock_slowlock() and will then enqueue a second waiter for this 1766d14f9e93SSebastian Andrzej Siewior * same task and things get really confusing real fast. 1767d14f9e93SSebastian Andrzej Siewior */ 1768d14f9e93SSebastian Andrzej Siewior rt_mutex_pre_schedule(); 1769d14f9e93SSebastian Andrzej Siewior 1770d14f9e93SSebastian Andrzej Siewior /* 1771b4abf910SThomas Gleixner * Technically we could use raw_spin_[un]lock_irq() here, but this can 1772b4abf910SThomas Gleixner * be called in early boot if the cmpxchg() fast path is disabled 1773b4abf910SThomas Gleixner * (debug, no architecture support). In this case we will acquire the 1774b4abf910SThomas Gleixner * rtmutex with lock->wait_lock held. But we cannot unconditionally 1775b4abf910SThomas Gleixner * enable interrupts in that early boot case. So we need to use the 1776b4abf910SThomas Gleixner * irqsave/restore variants. 1777b4abf910SThomas Gleixner */ 1778b4abf910SThomas Gleixner raw_spin_lock_irqsave(&lock->wait_lock, flags); 1779add46132SPeter Zijlstra ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state); 1780b4abf910SThomas Gleixner raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 1781d14f9e93SSebastian Andrzej Siewior rt_mutex_post_schedule(); 17821696a8beSPeter Zijlstra 17831696a8beSPeter Zijlstra return ret; 17841696a8beSPeter Zijlstra } 17851696a8beSPeter Zijlstra 1786830e6accSPeter Zijlstra static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock, 1787531ae4b0SThomas Gleixner unsigned int state) 1788531ae4b0SThomas Gleixner { 178945f67f30SThomas Gleixner lockdep_assert(!current->pi_blocked_on); 179045f67f30SThomas Gleixner 1791af9f0063SSebastian Andrzej Siewior if (likely(rt_mutex_try_acquire(lock))) 1792531ae4b0SThomas Gleixner return 0; 1793531ae4b0SThomas Gleixner 1794add46132SPeter Zijlstra return rt_mutex_slowlock(lock, NULL, state); 1795531ae4b0SThomas Gleixner } 1796e17ba59bSThomas Gleixner #endif /* RT_MUTEX_BUILD_MUTEX */ 17971c143c4bSThomas Gleixner 17981c143c4bSThomas Gleixner #ifdef RT_MUTEX_BUILD_SPINLOCKS 17991c143c4bSThomas Gleixner /* 18001c143c4bSThomas Gleixner * Functions required for spin/rw_lock substitution on RT kernels 18011c143c4bSThomas Gleixner */ 18021c143c4bSThomas Gleixner 18031c143c4bSThomas Gleixner /** 18041c143c4bSThomas Gleixner * rtlock_slowlock_locked - Slow path lock acquisition for RT locks 18051c143c4bSThomas Gleixner * @lock: The underlying RT mutex 18061c143c4bSThomas Gleixner */ 18071c143c4bSThomas Gleixner static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock) 18081c143c4bSThomas Gleixner { 18091c143c4bSThomas Gleixner struct rt_mutex_waiter waiter; 1810992caf7fSSteven Rostedt struct task_struct *owner; 18111c143c4bSThomas Gleixner 18121c143c4bSThomas Gleixner lockdep_assert_held(&lock->wait_lock); 18131c143c4bSThomas Gleixner 18141c143c4bSThomas Gleixner if (try_to_take_rt_mutex(lock, current, NULL)) 18151c143c4bSThomas Gleixner return; 18161c143c4bSThomas Gleixner 18171c143c4bSThomas Gleixner rt_mutex_init_rtlock_waiter(&waiter); 18181c143c4bSThomas Gleixner 18191c143c4bSThomas Gleixner /* Save current state and set state to TASK_RTLOCK_WAIT */ 18201c143c4bSThomas Gleixner current_save_and_set_rtlock_wait_state(); 18211c143c4bSThomas Gleixner 1822ee042be1SNamhyung Kim trace_contention_begin(lock, LCB_F_RT); 1823ee042be1SNamhyung Kim 1824add46132SPeter Zijlstra task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK); 18251c143c4bSThomas Gleixner 18261c143c4bSThomas Gleixner for (;;) { 18271c143c4bSThomas Gleixner /* Try to acquire the lock again */ 18281c143c4bSThomas Gleixner if (try_to_take_rt_mutex(lock, current, &waiter)) 18291c143c4bSThomas Gleixner break; 18301c143c4bSThomas Gleixner 1831992caf7fSSteven Rostedt if (&waiter == rt_mutex_top_waiter(lock)) 1832992caf7fSSteven Rostedt owner = rt_mutex_owner(lock); 1833992caf7fSSteven Rostedt else 1834992caf7fSSteven Rostedt owner = NULL; 18351c143c4bSThomas Gleixner raw_spin_unlock_irq(&lock->wait_lock); 18361c143c4bSThomas Gleixner 1837992caf7fSSteven Rostedt if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) 18381c143c4bSThomas Gleixner schedule_rtlock(); 18391c143c4bSThomas Gleixner 18401c143c4bSThomas Gleixner raw_spin_lock_irq(&lock->wait_lock); 18411c143c4bSThomas Gleixner set_current_state(TASK_RTLOCK_WAIT); 18421c143c4bSThomas Gleixner } 18431c143c4bSThomas Gleixner 18441c143c4bSThomas Gleixner /* Restore the task state */ 18451c143c4bSThomas Gleixner current_restore_rtlock_saved_state(); 18461c143c4bSThomas Gleixner 18471c143c4bSThomas Gleixner /* 18481c143c4bSThomas Gleixner * try_to_take_rt_mutex() sets the waiter bit unconditionally. 18491c143c4bSThomas Gleixner * We might have to fix that up: 18501c143c4bSThomas Gleixner */ 18511c0908d8SMel Gorman fixup_rt_mutex_waiters(lock, true); 18521c143c4bSThomas Gleixner debug_rt_mutex_free_waiter(&waiter); 1853ee042be1SNamhyung Kim 1854ee042be1SNamhyung Kim trace_contention_end(lock, 0); 18551c143c4bSThomas Gleixner } 18561c143c4bSThomas Gleixner 18571c143c4bSThomas Gleixner static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock) 18581c143c4bSThomas Gleixner { 18591c143c4bSThomas Gleixner unsigned long flags; 18601c143c4bSThomas Gleixner 18611c143c4bSThomas Gleixner raw_spin_lock_irqsave(&lock->wait_lock, flags); 18621c143c4bSThomas Gleixner rtlock_slowlock_locked(lock); 18631c143c4bSThomas Gleixner raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 18641c143c4bSThomas Gleixner } 18651c143c4bSThomas Gleixner 18661c143c4bSThomas Gleixner #endif /* RT_MUTEX_BUILD_SPINLOCKS */ 1867