1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21696a8beSPeter Zijlstra /* 31696a8beSPeter Zijlstra * RT-Mutexes: simple blocking mutual exclusion locks with PI support 41696a8beSPeter Zijlstra * 51696a8beSPeter Zijlstra * started by Ingo Molnar and Thomas Gleixner. 61696a8beSPeter Zijlstra * 71696a8beSPeter Zijlstra * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 81696a8beSPeter Zijlstra * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> 91696a8beSPeter Zijlstra * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt 101696a8beSPeter Zijlstra * Copyright (C) 2006 Esben Nielsen 11992caf7fSSteven Rostedt * Adaptive Spinlocks: 12992caf7fSSteven Rostedt * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, 13992caf7fSSteven Rostedt * and Peter Morreale, 14992caf7fSSteven Rostedt * Adaptive Spinlocks simplification: 15992caf7fSSteven Rostedt * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com> 161696a8beSPeter Zijlstra * 17387b1468SMauro Carvalho Chehab * See Documentation/locking/rt-mutex-design.rst for details. 181696a8beSPeter Zijlstra */ 19531ae4b0SThomas Gleixner #include <linux/sched.h> 20531ae4b0SThomas Gleixner #include <linux/sched/debug.h> 21531ae4b0SThomas Gleixner #include <linux/sched/deadline.h> 22174cd4b1SIngo Molnar #include <linux/sched/signal.h> 231696a8beSPeter Zijlstra #include <linux/sched/rt.h> 2484f001e1SIngo Molnar #include <linux/sched/wake_q.h> 25add46132SPeter Zijlstra #include <linux/ww_mutex.h> 261696a8beSPeter Zijlstra 27ee042be1SNamhyung Kim #include <trace/events/lock.h> 28ee042be1SNamhyung Kim 291696a8beSPeter Zijlstra #include "rtmutex_common.h" 301696a8beSPeter Zijlstra 31add46132SPeter Zijlstra #ifndef WW_RT 32add46132SPeter Zijlstra # define build_ww_mutex() (false) 33add46132SPeter Zijlstra # define ww_container_of(rtm) NULL 34add46132SPeter Zijlstra 35add46132SPeter Zijlstra static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter, 36add46132SPeter Zijlstra struct rt_mutex *lock, 37add46132SPeter Zijlstra struct ww_acquire_ctx *ww_ctx) 38add46132SPeter Zijlstra { 39add46132SPeter Zijlstra return 0; 40add46132SPeter Zijlstra } 41add46132SPeter Zijlstra 42add46132SPeter Zijlstra static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, 43add46132SPeter Zijlstra struct ww_acquire_ctx *ww_ctx) 44add46132SPeter Zijlstra { 45add46132SPeter Zijlstra } 46add46132SPeter Zijlstra 47add46132SPeter Zijlstra static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, 48add46132SPeter Zijlstra struct ww_acquire_ctx *ww_ctx) 49add46132SPeter Zijlstra { 50add46132SPeter Zijlstra } 51add46132SPeter Zijlstra 52add46132SPeter Zijlstra static inline int __ww_mutex_check_kill(struct rt_mutex *lock, 53add46132SPeter Zijlstra struct rt_mutex_waiter *waiter, 54add46132SPeter Zijlstra struct ww_acquire_ctx *ww_ctx) 55add46132SPeter Zijlstra { 56add46132SPeter Zijlstra return 0; 57add46132SPeter Zijlstra } 58add46132SPeter Zijlstra 59add46132SPeter Zijlstra #else 60add46132SPeter Zijlstra # define build_ww_mutex() (true) 61add46132SPeter Zijlstra # define ww_container_of(rtm) container_of(rtm, struct ww_mutex, base) 62add46132SPeter Zijlstra # include "ww_mutex.h" 63add46132SPeter Zijlstra #endif 64add46132SPeter Zijlstra 651696a8beSPeter Zijlstra /* 661696a8beSPeter Zijlstra * lock->owner state tracking: 671696a8beSPeter Zijlstra * 681696a8beSPeter Zijlstra * lock->owner holds the task_struct pointer of the owner. Bit 0 691696a8beSPeter Zijlstra * is used to keep track of the "lock has waiters" state. 701696a8beSPeter Zijlstra * 711696a8beSPeter Zijlstra * owner bit0 721696a8beSPeter Zijlstra * NULL 0 lock is free (fast acquire possible) 731696a8beSPeter Zijlstra * NULL 1 lock is free and has waiters and the top waiter 741696a8beSPeter Zijlstra * is going to take the lock* 751696a8beSPeter Zijlstra * taskpointer 0 lock is held (fast release possible) 761696a8beSPeter Zijlstra * taskpointer 1 lock is held and has waiters** 771696a8beSPeter Zijlstra * 781696a8beSPeter Zijlstra * The fast atomic compare exchange based acquire and release is only 791696a8beSPeter Zijlstra * possible when bit 0 of lock->owner is 0. 801696a8beSPeter Zijlstra * 811696a8beSPeter Zijlstra * (*) It also can be a transitional state when grabbing the lock 821696a8beSPeter Zijlstra * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, 831696a8beSPeter Zijlstra * we need to set the bit0 before looking at the lock, and the owner may be 841696a8beSPeter Zijlstra * NULL in this small time, hence this can be a transitional state. 851696a8beSPeter Zijlstra * 861696a8beSPeter Zijlstra * (**) There is a small time when bit 0 is set but there are no 871696a8beSPeter Zijlstra * waiters. This can happen when grabbing the lock in the slow path. 881696a8beSPeter Zijlstra * To prevent a cmpxchg of the owner releasing the lock, we need to 891696a8beSPeter Zijlstra * set this bit before looking at the lock. 901696a8beSPeter Zijlstra */ 911696a8beSPeter Zijlstra 921c0908d8SMel Gorman static __always_inline struct task_struct * 931c0908d8SMel Gorman rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner) 941696a8beSPeter Zijlstra { 951696a8beSPeter Zijlstra unsigned long val = (unsigned long)owner; 961696a8beSPeter Zijlstra 971696a8beSPeter Zijlstra if (rt_mutex_has_waiters(lock)) 981696a8beSPeter Zijlstra val |= RT_MUTEX_HAS_WAITERS; 991696a8beSPeter Zijlstra 1001c0908d8SMel Gorman return (struct task_struct *)val; 1011c0908d8SMel Gorman } 1021c0908d8SMel Gorman 1031c0908d8SMel Gorman static __always_inline void 1041c0908d8SMel Gorman rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner) 1051c0908d8SMel Gorman { 1061c0908d8SMel Gorman /* 1071c0908d8SMel Gorman * lock->wait_lock is held but explicit acquire semantics are needed 1081c0908d8SMel Gorman * for a new lock owner so WRITE_ONCE is insufficient. 1091c0908d8SMel Gorman */ 1101c0908d8SMel Gorman xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner)); 1111c0908d8SMel Gorman } 1121c0908d8SMel Gorman 1131c0908d8SMel Gorman static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock) 1141c0908d8SMel Gorman { 1151c0908d8SMel Gorman /* lock->wait_lock is held so the unlock provides release semantics. */ 1161c0908d8SMel Gorman WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL)); 1171696a8beSPeter Zijlstra } 1181696a8beSPeter Zijlstra 119830e6accSPeter Zijlstra static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock) 1201696a8beSPeter Zijlstra { 1211696a8beSPeter Zijlstra lock->owner = (struct task_struct *) 1221696a8beSPeter Zijlstra ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); 1231696a8beSPeter Zijlstra } 1241696a8beSPeter Zijlstra 1251c0908d8SMel Gorman static __always_inline void 1261c0908d8SMel Gorman fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock) 1271696a8beSPeter Zijlstra { 128dbb26055SThomas Gleixner unsigned long owner, *p = (unsigned long *) &lock->owner; 129dbb26055SThomas Gleixner 130dbb26055SThomas Gleixner if (rt_mutex_has_waiters(lock)) 131dbb26055SThomas Gleixner return; 132dbb26055SThomas Gleixner 133dbb26055SThomas Gleixner /* 134dbb26055SThomas Gleixner * The rbtree has no waiters enqueued, now make sure that the 135dbb26055SThomas Gleixner * lock->owner still has the waiters bit set, otherwise the 136dbb26055SThomas Gleixner * following can happen: 137dbb26055SThomas Gleixner * 138dbb26055SThomas Gleixner * CPU 0 CPU 1 CPU2 139dbb26055SThomas Gleixner * l->owner=T1 140dbb26055SThomas Gleixner * rt_mutex_lock(l) 141dbb26055SThomas Gleixner * lock(l->lock) 142dbb26055SThomas Gleixner * l->owner = T1 | HAS_WAITERS; 143dbb26055SThomas Gleixner * enqueue(T2) 144dbb26055SThomas Gleixner * boost() 145dbb26055SThomas Gleixner * unlock(l->lock) 146dbb26055SThomas Gleixner * block() 147dbb26055SThomas Gleixner * 148dbb26055SThomas Gleixner * rt_mutex_lock(l) 149dbb26055SThomas Gleixner * lock(l->lock) 150dbb26055SThomas Gleixner * l->owner = T1 | HAS_WAITERS; 151dbb26055SThomas Gleixner * enqueue(T3) 152dbb26055SThomas Gleixner * boost() 153dbb26055SThomas Gleixner * unlock(l->lock) 154dbb26055SThomas Gleixner * block() 155dbb26055SThomas Gleixner * signal(->T2) signal(->T3) 156dbb26055SThomas Gleixner * lock(l->lock) 157dbb26055SThomas Gleixner * dequeue(T2) 158dbb26055SThomas Gleixner * deboost() 159dbb26055SThomas Gleixner * unlock(l->lock) 160dbb26055SThomas Gleixner * lock(l->lock) 161dbb26055SThomas Gleixner * dequeue(T3) 162dbb26055SThomas Gleixner * ==> wait list is empty 163dbb26055SThomas Gleixner * deboost() 164dbb26055SThomas Gleixner * unlock(l->lock) 165dbb26055SThomas Gleixner * lock(l->lock) 166dbb26055SThomas Gleixner * fixup_rt_mutex_waiters() 167dbb26055SThomas Gleixner * if (wait_list_empty(l) { 168dbb26055SThomas Gleixner * l->owner = owner 169dbb26055SThomas Gleixner * owner = l->owner & ~HAS_WAITERS; 170dbb26055SThomas Gleixner * ==> l->owner = T1 171dbb26055SThomas Gleixner * } 172dbb26055SThomas Gleixner * lock(l->lock) 173dbb26055SThomas Gleixner * rt_mutex_unlock(l) fixup_rt_mutex_waiters() 174dbb26055SThomas Gleixner * if (wait_list_empty(l) { 175dbb26055SThomas Gleixner * owner = l->owner & ~HAS_WAITERS; 176dbb26055SThomas Gleixner * cmpxchg(l->owner, T1, NULL) 177dbb26055SThomas Gleixner * ===> Success (l->owner = NULL) 178dbb26055SThomas Gleixner * 179dbb26055SThomas Gleixner * l->owner = owner 180dbb26055SThomas Gleixner * ==> l->owner = T1 181dbb26055SThomas Gleixner * } 182dbb26055SThomas Gleixner * 183dbb26055SThomas Gleixner * With the check for the waiter bit in place T3 on CPU2 will not 184dbb26055SThomas Gleixner * overwrite. All tasks fiddling with the waiters bit are 185dbb26055SThomas Gleixner * serialized by l->lock, so nothing else can modify the waiters 186dbb26055SThomas Gleixner * bit. If the bit is set then nothing can change l->owner either 187dbb26055SThomas Gleixner * so the simple RMW is safe. The cmpxchg() will simply fail if it 188dbb26055SThomas Gleixner * happens in the middle of the RMW because the waiters bit is 189dbb26055SThomas Gleixner * still set. 190dbb26055SThomas Gleixner */ 191dbb26055SThomas Gleixner owner = READ_ONCE(*p); 1921c0908d8SMel Gorman if (owner & RT_MUTEX_HAS_WAITERS) { 1931c0908d8SMel Gorman /* 1941c0908d8SMel Gorman * See rt_mutex_set_owner() and rt_mutex_clear_owner() on 1951c0908d8SMel Gorman * why xchg_acquire() is used for updating owner for 1961c0908d8SMel Gorman * locking and WRITE_ONCE() for unlocking. 1971c0908d8SMel Gorman * 1981c0908d8SMel Gorman * WRITE_ONCE() would work for the acquire case too, but 1991c0908d8SMel Gorman * in case that the lock acquisition failed it might 2001c0908d8SMel Gorman * force other lockers into the slow path unnecessarily. 2011c0908d8SMel Gorman */ 2021c0908d8SMel Gorman if (acquire_lock) 2031c0908d8SMel Gorman xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS); 2041c0908d8SMel Gorman else 205dbb26055SThomas Gleixner WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); 2061696a8beSPeter Zijlstra } 2071c0908d8SMel Gorman } 2081696a8beSPeter Zijlstra 2091696a8beSPeter Zijlstra /* 210cede8841SSebastian Andrzej Siewior * We can speed up the acquire/release, if there's no debugging state to be 211cede8841SSebastian Andrzej Siewior * set up. 2121696a8beSPeter Zijlstra */ 213cede8841SSebastian Andrzej Siewior #ifndef CONFIG_DEBUG_RT_MUTEXES 214830e6accSPeter Zijlstra static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, 21578515930SSebastian Andrzej Siewior struct task_struct *old, 21678515930SSebastian Andrzej Siewior struct task_struct *new) 21778515930SSebastian Andrzej Siewior { 218709e0b62SThomas Gleixner return try_cmpxchg_acquire(&lock->owner, &old, new); 21978515930SSebastian Andrzej Siewior } 22078515930SSebastian Andrzej Siewior 221af9f0063SSebastian Andrzej Siewior static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock) 222af9f0063SSebastian Andrzej Siewior { 223af9f0063SSebastian Andrzej Siewior return rt_mutex_cmpxchg_acquire(lock, NULL, current); 224af9f0063SSebastian Andrzej Siewior } 225af9f0063SSebastian Andrzej Siewior 226830e6accSPeter Zijlstra static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, 22778515930SSebastian Andrzej Siewior struct task_struct *old, 22878515930SSebastian Andrzej Siewior struct task_struct *new) 22978515930SSebastian Andrzej Siewior { 230709e0b62SThomas Gleixner return try_cmpxchg_release(&lock->owner, &old, new); 23178515930SSebastian Andrzej Siewior } 232700318d1SDavidlohr Bueso 233700318d1SDavidlohr Bueso /* 234700318d1SDavidlohr Bueso * Callers must hold the ->wait_lock -- which is the whole purpose as we force 235700318d1SDavidlohr Bueso * all future threads that attempt to [Rmw] the lock to the slowpath. As such 236700318d1SDavidlohr Bueso * relaxed semantics suffice. 237700318d1SDavidlohr Bueso */ 238830e6accSPeter Zijlstra static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) 2391696a8beSPeter Zijlstra { 2401696a8beSPeter Zijlstra unsigned long owner, *p = (unsigned long *) &lock->owner; 2411696a8beSPeter Zijlstra 2421696a8beSPeter Zijlstra do { 2431696a8beSPeter Zijlstra owner = *p; 244700318d1SDavidlohr Bueso } while (cmpxchg_relaxed(p, owner, 245700318d1SDavidlohr Bueso owner | RT_MUTEX_HAS_WAITERS) != owner); 2461c0908d8SMel Gorman 2471c0908d8SMel Gorman /* 2481c0908d8SMel Gorman * The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE 2491c0908d8SMel Gorman * operations in the event of contention. Ensure the successful 2501c0908d8SMel Gorman * cmpxchg is visible. 2511c0908d8SMel Gorman */ 2521c0908d8SMel Gorman smp_mb__after_atomic(); 2531696a8beSPeter Zijlstra } 25427e35715SThomas Gleixner 25527e35715SThomas Gleixner /* 25627e35715SThomas Gleixner * Safe fastpath aware unlock: 25727e35715SThomas Gleixner * 1) Clear the waiters bit 25827e35715SThomas Gleixner * 2) Drop lock->wait_lock 25927e35715SThomas Gleixner * 3) Try to unlock the lock with cmpxchg 26027e35715SThomas Gleixner */ 261830e6accSPeter Zijlstra static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, 262b4abf910SThomas Gleixner unsigned long flags) 26327e35715SThomas Gleixner __releases(lock->wait_lock) 26427e35715SThomas Gleixner { 26527e35715SThomas Gleixner struct task_struct *owner = rt_mutex_owner(lock); 26627e35715SThomas Gleixner 26727e35715SThomas Gleixner clear_rt_mutex_waiters(lock); 268b4abf910SThomas Gleixner raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 26927e35715SThomas Gleixner /* 27027e35715SThomas Gleixner * If a new waiter comes in between the unlock and the cmpxchg 27127e35715SThomas Gleixner * we have two situations: 27227e35715SThomas Gleixner * 27327e35715SThomas Gleixner * unlock(wait_lock); 27427e35715SThomas Gleixner * lock(wait_lock); 27527e35715SThomas Gleixner * cmpxchg(p, owner, 0) == owner 27627e35715SThomas Gleixner * mark_rt_mutex_waiters(lock); 27727e35715SThomas Gleixner * acquire(lock); 27827e35715SThomas Gleixner * or: 27927e35715SThomas Gleixner * 28027e35715SThomas Gleixner * unlock(wait_lock); 28127e35715SThomas Gleixner * lock(wait_lock); 28227e35715SThomas Gleixner * mark_rt_mutex_waiters(lock); 28327e35715SThomas Gleixner * 28427e35715SThomas Gleixner * cmpxchg(p, owner, 0) != owner 28527e35715SThomas Gleixner * enqueue_waiter(); 28627e35715SThomas Gleixner * unlock(wait_lock); 28727e35715SThomas Gleixner * lock(wait_lock); 28827e35715SThomas Gleixner * wake waiter(); 28927e35715SThomas Gleixner * unlock(wait_lock); 29027e35715SThomas Gleixner * lock(wait_lock); 29127e35715SThomas Gleixner * acquire(lock); 29227e35715SThomas Gleixner */ 293700318d1SDavidlohr Bueso return rt_mutex_cmpxchg_release(lock, owner, NULL); 29427e35715SThomas Gleixner } 29527e35715SThomas Gleixner 2961696a8beSPeter Zijlstra #else 297830e6accSPeter Zijlstra static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, 29878515930SSebastian Andrzej Siewior struct task_struct *old, 29978515930SSebastian Andrzej Siewior struct task_struct *new) 30078515930SSebastian Andrzej Siewior { 30178515930SSebastian Andrzej Siewior return false; 30278515930SSebastian Andrzej Siewior 30378515930SSebastian Andrzej Siewior } 30478515930SSebastian Andrzej Siewior 305af9f0063SSebastian Andrzej Siewior static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock); 306af9f0063SSebastian Andrzej Siewior 307af9f0063SSebastian Andrzej Siewior static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock) 308af9f0063SSebastian Andrzej Siewior { 309af9f0063SSebastian Andrzej Siewior /* 310af9f0063SSebastian Andrzej Siewior * With debug enabled rt_mutex_cmpxchg trylock() will always fail. 311af9f0063SSebastian Andrzej Siewior * 312af9f0063SSebastian Andrzej Siewior * Avoid unconditionally taking the slow path by using 313af9f0063SSebastian Andrzej Siewior * rt_mutex_slow_trylock() which is covered by the debug code and can 314af9f0063SSebastian Andrzej Siewior * acquire a non-contended rtmutex. 315af9f0063SSebastian Andrzej Siewior */ 316af9f0063SSebastian Andrzej Siewior return rt_mutex_slowtrylock(lock); 317af9f0063SSebastian Andrzej Siewior } 318af9f0063SSebastian Andrzej Siewior 319830e6accSPeter Zijlstra static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, 32078515930SSebastian Andrzej Siewior struct task_struct *old, 32178515930SSebastian Andrzej Siewior struct task_struct *new) 32278515930SSebastian Andrzej Siewior { 32378515930SSebastian Andrzej Siewior return false; 32478515930SSebastian Andrzej Siewior } 325700318d1SDavidlohr Bueso 326830e6accSPeter Zijlstra static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) 3271696a8beSPeter Zijlstra { 3281696a8beSPeter Zijlstra lock->owner = (struct task_struct *) 3291696a8beSPeter Zijlstra ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); 3301696a8beSPeter Zijlstra } 33127e35715SThomas Gleixner 33227e35715SThomas Gleixner /* 33327e35715SThomas Gleixner * Simple slow path only version: lock->owner is protected by lock->wait_lock. 33427e35715SThomas Gleixner */ 335830e6accSPeter Zijlstra static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, 336b4abf910SThomas Gleixner unsigned long flags) 33727e35715SThomas Gleixner __releases(lock->wait_lock) 33827e35715SThomas Gleixner { 33927e35715SThomas Gleixner lock->owner = NULL; 340b4abf910SThomas Gleixner raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 34127e35715SThomas Gleixner return true; 34227e35715SThomas Gleixner } 3431696a8beSPeter Zijlstra #endif 3441696a8beSPeter Zijlstra 345715f7f9eSPeter Zijlstra static __always_inline int __waiter_prio(struct task_struct *task) 346715f7f9eSPeter Zijlstra { 347715f7f9eSPeter Zijlstra int prio = task->prio; 348715f7f9eSPeter Zijlstra 349715f7f9eSPeter Zijlstra if (!rt_prio(prio)) 350715f7f9eSPeter Zijlstra return DEFAULT_PRIO; 351715f7f9eSPeter Zijlstra 352715f7f9eSPeter Zijlstra return prio; 353715f7f9eSPeter Zijlstra } 354715f7f9eSPeter Zijlstra 355f7853c34SPeter Zijlstra /* 356f7853c34SPeter Zijlstra * Update the waiter->tree copy of the sort keys. 357f7853c34SPeter Zijlstra */ 358715f7f9eSPeter Zijlstra static __always_inline void 359715f7f9eSPeter Zijlstra waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) 360715f7f9eSPeter Zijlstra { 361f7853c34SPeter Zijlstra lockdep_assert_held(&waiter->lock->wait_lock); 362f7853c34SPeter Zijlstra lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry)); 363f7853c34SPeter Zijlstra 364f7853c34SPeter Zijlstra waiter->tree.prio = __waiter_prio(task); 365f7853c34SPeter Zijlstra waiter->tree.deadline = task->dl.deadline; 366715f7f9eSPeter Zijlstra } 367715f7f9eSPeter Zijlstra 36819830e55SPeter Zijlstra /* 369f7853c34SPeter Zijlstra * Update the waiter->pi_tree copy of the sort keys (from the tree copy). 37019830e55SPeter Zijlstra */ 371f7853c34SPeter Zijlstra static __always_inline void 372f7853c34SPeter Zijlstra waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) 373f7853c34SPeter Zijlstra { 374f7853c34SPeter Zijlstra lockdep_assert_held(&waiter->lock->wait_lock); 375f7853c34SPeter Zijlstra lockdep_assert_held(&task->pi_lock); 376f7853c34SPeter Zijlstra lockdep_assert(RB_EMPTY_NODE(&waiter->pi_tree.entry)); 37719830e55SPeter Zijlstra 378f7853c34SPeter Zijlstra waiter->pi_tree.prio = waiter->tree.prio; 379f7853c34SPeter Zijlstra waiter->pi_tree.deadline = waiter->tree.deadline; 380f7853c34SPeter Zijlstra } 381f7853c34SPeter Zijlstra 382f7853c34SPeter Zijlstra /* 383f7853c34SPeter Zijlstra * Only use with rt_waiter_node_{less,equal}() 384f7853c34SPeter Zijlstra */ 385f7853c34SPeter Zijlstra #define task_to_waiter_node(p) \ 386f7853c34SPeter Zijlstra &(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline } 387f7853c34SPeter Zijlstra #define task_to_waiter(p) \ 388f7853c34SPeter Zijlstra &(struct rt_mutex_waiter){ .tree = *task_to_waiter_node(p) } 389f7853c34SPeter Zijlstra 390f7853c34SPeter Zijlstra static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left, 391f7853c34SPeter Zijlstra struct rt_waiter_node *right) 392fb00aca4SPeter Zijlstra { 3932d3d891dSDario Faggioli if (left->prio < right->prio) 394fb00aca4SPeter Zijlstra return 1; 395fb00aca4SPeter Zijlstra 3961696a8beSPeter Zijlstra /* 3972d3d891dSDario Faggioli * If both waiters have dl_prio(), we check the deadlines of the 3982d3d891dSDario Faggioli * associated tasks. 3992d3d891dSDario Faggioli * If left waiter has a dl_prio(), and we didn't return 1 above, 4002d3d891dSDario Faggioli * then right waiter has a dl_prio() too. 401fb00aca4SPeter Zijlstra */ 4022d3d891dSDario Faggioli if (dl_prio(left->prio)) 403e0aad5b4SPeter Zijlstra return dl_time_before(left->deadline, right->deadline); 404fb00aca4SPeter Zijlstra 405fb00aca4SPeter Zijlstra return 0; 406fb00aca4SPeter Zijlstra } 407fb00aca4SPeter Zijlstra 408f7853c34SPeter Zijlstra static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left, 409f7853c34SPeter Zijlstra struct rt_waiter_node *right) 41019830e55SPeter Zijlstra { 41119830e55SPeter Zijlstra if (left->prio != right->prio) 41219830e55SPeter Zijlstra return 0; 41319830e55SPeter Zijlstra 41419830e55SPeter Zijlstra /* 41519830e55SPeter Zijlstra * If both waiters have dl_prio(), we check the deadlines of the 41619830e55SPeter Zijlstra * associated tasks. 41719830e55SPeter Zijlstra * If left waiter has a dl_prio(), and we didn't return 0 above, 41819830e55SPeter Zijlstra * then right waiter has a dl_prio() too. 41919830e55SPeter Zijlstra */ 42019830e55SPeter Zijlstra if (dl_prio(left->prio)) 42119830e55SPeter Zijlstra return left->deadline == right->deadline; 42219830e55SPeter Zijlstra 42319830e55SPeter Zijlstra return 1; 42419830e55SPeter Zijlstra } 42519830e55SPeter Zijlstra 42648eb3f4fSGregory Haskins static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter, 42748eb3f4fSGregory Haskins struct rt_mutex_waiter *top_waiter) 42848eb3f4fSGregory Haskins { 429f7853c34SPeter Zijlstra if (rt_waiter_node_less(&waiter->tree, &top_waiter->tree)) 43048eb3f4fSGregory Haskins return true; 43148eb3f4fSGregory Haskins 43248eb3f4fSGregory Haskins #ifdef RT_MUTEX_BUILD_SPINLOCKS 43348eb3f4fSGregory Haskins /* 43448eb3f4fSGregory Haskins * Note that RT tasks are excluded from same priority (lateral) 43548eb3f4fSGregory Haskins * steals to prevent the introduction of an unbounded latency. 43648eb3f4fSGregory Haskins */ 437f7853c34SPeter Zijlstra if (rt_prio(waiter->tree.prio) || dl_prio(waiter->tree.prio)) 43848eb3f4fSGregory Haskins return false; 43948eb3f4fSGregory Haskins 440f7853c34SPeter Zijlstra return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree); 44148eb3f4fSGregory Haskins #else 44248eb3f4fSGregory Haskins return false; 44348eb3f4fSGregory Haskins #endif 44448eb3f4fSGregory Haskins } 44548eb3f4fSGregory Haskins 4465a798725SPeter Zijlstra #define __node_2_waiter(node) \ 447f7853c34SPeter Zijlstra rb_entry((node), struct rt_mutex_waiter, tree.entry) 4485a798725SPeter Zijlstra 449d7a2edb8SThomas Gleixner static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b) 4505a798725SPeter Zijlstra { 451add46132SPeter Zijlstra struct rt_mutex_waiter *aw = __node_2_waiter(a); 452add46132SPeter Zijlstra struct rt_mutex_waiter *bw = __node_2_waiter(b); 453add46132SPeter Zijlstra 454f7853c34SPeter Zijlstra if (rt_waiter_node_less(&aw->tree, &bw->tree)) 455add46132SPeter Zijlstra return 1; 456add46132SPeter Zijlstra 457add46132SPeter Zijlstra if (!build_ww_mutex()) 458add46132SPeter Zijlstra return 0; 459add46132SPeter Zijlstra 460f7853c34SPeter Zijlstra if (rt_waiter_node_less(&bw->tree, &aw->tree)) 461add46132SPeter Zijlstra return 0; 462add46132SPeter Zijlstra 463add46132SPeter Zijlstra /* NOTE: relies on waiter->ww_ctx being set before insertion */ 464add46132SPeter Zijlstra if (aw->ww_ctx) { 465add46132SPeter Zijlstra if (!bw->ww_ctx) 466add46132SPeter Zijlstra return 1; 467add46132SPeter Zijlstra 468add46132SPeter Zijlstra return (signed long)(aw->ww_ctx->stamp - 469add46132SPeter Zijlstra bw->ww_ctx->stamp) < 0; 470add46132SPeter Zijlstra } 471add46132SPeter Zijlstra 472add46132SPeter Zijlstra return 0; 4735a798725SPeter Zijlstra } 4745a798725SPeter Zijlstra 475d7a2edb8SThomas Gleixner static __always_inline void 476830e6accSPeter Zijlstra rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) 477fb00aca4SPeter Zijlstra { 478f7853c34SPeter Zijlstra lockdep_assert_held(&lock->wait_lock); 479f7853c34SPeter Zijlstra 480f7853c34SPeter Zijlstra rb_add_cached(&waiter->tree.entry, &lock->waiters, __waiter_less); 481fb00aca4SPeter Zijlstra } 482fb00aca4SPeter Zijlstra 483d7a2edb8SThomas Gleixner static __always_inline void 484830e6accSPeter Zijlstra rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) 485fb00aca4SPeter Zijlstra { 486f7853c34SPeter Zijlstra lockdep_assert_held(&lock->wait_lock); 487f7853c34SPeter Zijlstra 488f7853c34SPeter Zijlstra if (RB_EMPTY_NODE(&waiter->tree.entry)) 489fb00aca4SPeter Zijlstra return; 490fb00aca4SPeter Zijlstra 491f7853c34SPeter Zijlstra rb_erase_cached(&waiter->tree.entry, &lock->waiters); 492f7853c34SPeter Zijlstra RB_CLEAR_NODE(&waiter->tree.entry); 493fb00aca4SPeter Zijlstra } 494fb00aca4SPeter Zijlstra 495f7853c34SPeter Zijlstra #define __node_2_rt_node(node) \ 496f7853c34SPeter Zijlstra rb_entry((node), struct rt_waiter_node, entry) 4975a798725SPeter Zijlstra 498f7853c34SPeter Zijlstra static __always_inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b) 4995a798725SPeter Zijlstra { 500f7853c34SPeter Zijlstra return rt_waiter_node_less(__node_2_rt_node(a), __node_2_rt_node(b)); 5015a798725SPeter Zijlstra } 5025a798725SPeter Zijlstra 503d7a2edb8SThomas Gleixner static __always_inline void 504fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) 505fb00aca4SPeter Zijlstra { 506f7853c34SPeter Zijlstra lockdep_assert_held(&task->pi_lock); 507f7853c34SPeter Zijlstra 508f7853c34SPeter Zijlstra rb_add_cached(&waiter->pi_tree.entry, &task->pi_waiters, __pi_waiter_less); 509fb00aca4SPeter Zijlstra } 510fb00aca4SPeter Zijlstra 511d7a2edb8SThomas Gleixner static __always_inline void 512fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) 513fb00aca4SPeter Zijlstra { 514f7853c34SPeter Zijlstra lockdep_assert_held(&task->pi_lock); 515f7853c34SPeter Zijlstra 516f7853c34SPeter Zijlstra if (RB_EMPTY_NODE(&waiter->pi_tree.entry)) 517fb00aca4SPeter Zijlstra return; 518fb00aca4SPeter Zijlstra 519f7853c34SPeter Zijlstra rb_erase_cached(&waiter->pi_tree.entry, &task->pi_waiters); 520f7853c34SPeter Zijlstra RB_CLEAR_NODE(&waiter->pi_tree.entry); 521fb00aca4SPeter Zijlstra } 522fb00aca4SPeter Zijlstra 523f7853c34SPeter Zijlstra static __always_inline void rt_mutex_adjust_prio(struct rt_mutex_base *lock, 524f7853c34SPeter Zijlstra struct task_struct *p) 525e96a7705SXunlei Pang { 526acd58620SPeter Zijlstra struct task_struct *pi_task = NULL; 527e96a7705SXunlei Pang 528f7853c34SPeter Zijlstra lockdep_assert_held(&lock->wait_lock); 529f7853c34SPeter Zijlstra lockdep_assert(rt_mutex_owner(lock) == p); 530acd58620SPeter Zijlstra lockdep_assert_held(&p->pi_lock); 531e96a7705SXunlei Pang 532acd58620SPeter Zijlstra if (task_has_pi_waiters(p)) 533acd58620SPeter Zijlstra pi_task = task_top_pi_waiter(p)->task; 5341696a8beSPeter Zijlstra 535acd58620SPeter Zijlstra rt_mutex_setprio(p, pi_task); 5361696a8beSPeter Zijlstra } 5371696a8beSPeter Zijlstra 538b576e640SThomas Gleixner /* RT mutex specific wake_q wrappers */ 5399321f815SThomas Gleixner static __always_inline void rt_mutex_wake_q_add_task(struct rt_wake_q_head *wqh, 5409321f815SThomas Gleixner struct task_struct *task, 5419321f815SThomas Gleixner unsigned int wake_state) 5429321f815SThomas Gleixner { 5439321f815SThomas Gleixner if (IS_ENABLED(CONFIG_PREEMPT_RT) && wake_state == TASK_RTLOCK_WAIT) { 5449321f815SThomas Gleixner if (IS_ENABLED(CONFIG_PROVE_LOCKING)) 5459321f815SThomas Gleixner WARN_ON_ONCE(wqh->rtlock_task); 5469321f815SThomas Gleixner get_task_struct(task); 5479321f815SThomas Gleixner wqh->rtlock_task = task; 5489321f815SThomas Gleixner } else { 5499321f815SThomas Gleixner wake_q_add(&wqh->head, task); 5509321f815SThomas Gleixner } 5519321f815SThomas Gleixner } 5529321f815SThomas Gleixner 553b576e640SThomas Gleixner static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh, 554b576e640SThomas Gleixner struct rt_mutex_waiter *w) 555b576e640SThomas Gleixner { 5569321f815SThomas Gleixner rt_mutex_wake_q_add_task(wqh, w->task, w->wake_state); 557456cfbc6SThomas Gleixner } 558b576e640SThomas Gleixner 559b576e640SThomas Gleixner static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh) 560b576e640SThomas Gleixner { 561456cfbc6SThomas Gleixner if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) { 562456cfbc6SThomas Gleixner wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT); 563456cfbc6SThomas Gleixner put_task_struct(wqh->rtlock_task); 564456cfbc6SThomas Gleixner wqh->rtlock_task = NULL; 565456cfbc6SThomas Gleixner } 566456cfbc6SThomas Gleixner 567456cfbc6SThomas Gleixner if (!wake_q_empty(&wqh->head)) 568b576e640SThomas Gleixner wake_up_q(&wqh->head); 569b576e640SThomas Gleixner 570b576e640SThomas Gleixner /* Pairs with preempt_disable() in mark_wakeup_next_waiter() */ 571b576e640SThomas Gleixner preempt_enable(); 572b576e640SThomas Gleixner } 573b576e640SThomas Gleixner 5741696a8beSPeter Zijlstra /* 5758930ed80SThomas Gleixner * Deadlock detection is conditional: 5768930ed80SThomas Gleixner * 5778930ed80SThomas Gleixner * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted 5788930ed80SThomas Gleixner * if the detect argument is == RT_MUTEX_FULL_CHAINWALK. 5798930ed80SThomas Gleixner * 5808930ed80SThomas Gleixner * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always 5818930ed80SThomas Gleixner * conducted independent of the detect argument. 5828930ed80SThomas Gleixner * 5838930ed80SThomas Gleixner * If the waiter argument is NULL this indicates the deboost path and 5848930ed80SThomas Gleixner * deadlock detection is disabled independent of the detect argument 5858930ed80SThomas Gleixner * and the config settings. 5868930ed80SThomas Gleixner */ 587d7a2edb8SThomas Gleixner static __always_inline bool 588d7a2edb8SThomas Gleixner rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, 5898930ed80SThomas Gleixner enum rtmutex_chainwalk chwalk) 5908930ed80SThomas Gleixner { 59107d25971SZhen Lei if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) 592f7efc479SThomas Gleixner return waiter != NULL; 593f7efc479SThomas Gleixner return chwalk == RT_MUTEX_FULL_CHAINWALK; 5948930ed80SThomas Gleixner } 5958930ed80SThomas Gleixner 596830e6accSPeter Zijlstra static __always_inline struct rt_mutex_base *task_blocked_on_lock(struct task_struct *p) 59782084984SThomas Gleixner { 59882084984SThomas Gleixner return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; 59982084984SThomas Gleixner } 60082084984SThomas Gleixner 6011696a8beSPeter Zijlstra /* 6021696a8beSPeter Zijlstra * Adjust the priority chain. Also used for deadlock detection. 6031696a8beSPeter Zijlstra * Decreases task's usage by one - may thus free the task. 6041696a8beSPeter Zijlstra * 60582084984SThomas Gleixner * @task: the task owning the mutex (owner) for which a chain walk is 60682084984SThomas Gleixner * probably needed 607e6beaa36STom(JeHyeon) Yeon * @chwalk: do we have to carry out deadlock detection? 6081696a8beSPeter Zijlstra * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck 6091696a8beSPeter Zijlstra * things for a task that has just got its priority adjusted, and 6101696a8beSPeter Zijlstra * is waiting on a mutex) 61182084984SThomas Gleixner * @next_lock: the mutex on which the owner of @orig_lock was blocked before 61282084984SThomas Gleixner * we dropped its pi_lock. Is never dereferenced, only used for 61382084984SThomas Gleixner * comparison to detect lock chain changes. 6141696a8beSPeter Zijlstra * @orig_waiter: rt_mutex_waiter struct for the task that has just donated 6151696a8beSPeter Zijlstra * its priority to the mutex owner (can be NULL in the case 6161696a8beSPeter Zijlstra * depicted above or if the top waiter is gone away and we are 6171696a8beSPeter Zijlstra * actually deboosting the owner) 6181696a8beSPeter Zijlstra * @top_task: the current top waiter 6191696a8beSPeter Zijlstra * 6201696a8beSPeter Zijlstra * Returns 0 or -EDEADLK. 6213eb65aeaSThomas Gleixner * 6223eb65aeaSThomas Gleixner * Chain walk basics and protection scope 6233eb65aeaSThomas Gleixner * 6243eb65aeaSThomas Gleixner * [R] refcount on task 625f7853c34SPeter Zijlstra * [Pn] task->pi_lock held 6263eb65aeaSThomas Gleixner * [L] rtmutex->wait_lock held 6273eb65aeaSThomas Gleixner * 628f7853c34SPeter Zijlstra * Normal locking order: 629f7853c34SPeter Zijlstra * 630f7853c34SPeter Zijlstra * rtmutex->wait_lock 631f7853c34SPeter Zijlstra * task->pi_lock 632f7853c34SPeter Zijlstra * 6333eb65aeaSThomas Gleixner * Step Description Protected by 6343eb65aeaSThomas Gleixner * function arguments: 6353eb65aeaSThomas Gleixner * @task [R] 6363eb65aeaSThomas Gleixner * @orig_lock if != NULL @top_task is blocked on it 6373eb65aeaSThomas Gleixner * @next_lock Unprotected. Cannot be 6383eb65aeaSThomas Gleixner * dereferenced. Only used for 6393eb65aeaSThomas Gleixner * comparison. 6403eb65aeaSThomas Gleixner * @orig_waiter if != NULL @top_task is blocked on it 6413eb65aeaSThomas Gleixner * @top_task current, or in case of proxy 6423eb65aeaSThomas Gleixner * locking protected by calling 6433eb65aeaSThomas Gleixner * code 6443eb65aeaSThomas Gleixner * again: 6453eb65aeaSThomas Gleixner * loop_sanity_check(); 6463eb65aeaSThomas Gleixner * retry: 647f7853c34SPeter Zijlstra * [1] lock(task->pi_lock); [R] acquire [P1] 648f7853c34SPeter Zijlstra * [2] waiter = task->pi_blocked_on; [P1] 649f7853c34SPeter Zijlstra * [3] check_exit_conditions_1(); [P1] 650f7853c34SPeter Zijlstra * [4] lock = waiter->lock; [P1] 651f7853c34SPeter Zijlstra * [5] if (!try_lock(lock->wait_lock)) { [P1] try to acquire [L] 652f7853c34SPeter Zijlstra * unlock(task->pi_lock); release [P1] 6533eb65aeaSThomas Gleixner * goto retry; 6543eb65aeaSThomas Gleixner * } 655f7853c34SPeter Zijlstra * [6] check_exit_conditions_2(); [P1] + [L] 656f7853c34SPeter Zijlstra * [7] requeue_lock_waiter(lock, waiter); [P1] + [L] 657f7853c34SPeter Zijlstra * [8] unlock(task->pi_lock); release [P1] 6583eb65aeaSThomas Gleixner * put_task_struct(task); release [R] 6593eb65aeaSThomas Gleixner * [9] check_exit_conditions_3(); [L] 6603eb65aeaSThomas Gleixner * [10] task = owner(lock); [L] 6613eb65aeaSThomas Gleixner * get_task_struct(task); [L] acquire [R] 662f7853c34SPeter Zijlstra * lock(task->pi_lock); [L] acquire [P2] 663f7853c34SPeter Zijlstra * [11] requeue_pi_waiter(tsk, waiters(lock));[P2] + [L] 664f7853c34SPeter Zijlstra * [12] check_exit_conditions_4(); [P2] + [L] 665f7853c34SPeter Zijlstra * [13] unlock(task->pi_lock); release [P2] 6663eb65aeaSThomas Gleixner * unlock(lock->wait_lock); release [L] 6673eb65aeaSThomas Gleixner * goto again; 668f7853c34SPeter Zijlstra * 669f7853c34SPeter Zijlstra * Where P1 is the blocking task and P2 is the lock owner; going up one step 670f7853c34SPeter Zijlstra * the owner becomes the next blocked task etc.. 671f7853c34SPeter Zijlstra * 672f7853c34SPeter Zijlstra * 6731696a8beSPeter Zijlstra */ 674d7a2edb8SThomas Gleixner static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, 6758930ed80SThomas Gleixner enum rtmutex_chainwalk chwalk, 676830e6accSPeter Zijlstra struct rt_mutex_base *orig_lock, 677830e6accSPeter Zijlstra struct rt_mutex_base *next_lock, 6781696a8beSPeter Zijlstra struct rt_mutex_waiter *orig_waiter, 6791696a8beSPeter Zijlstra struct task_struct *top_task) 6801696a8beSPeter Zijlstra { 6811696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; 682a57594a1SThomas Gleixner struct rt_mutex_waiter *prerequeue_top_waiter; 6838930ed80SThomas Gleixner int ret = 0, depth = 0; 684830e6accSPeter Zijlstra struct rt_mutex_base *lock; 6858930ed80SThomas Gleixner bool detect_deadlock; 68667792e2cSThomas Gleixner bool requeue = true; 6871696a8beSPeter Zijlstra 6888930ed80SThomas Gleixner detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk); 6891696a8beSPeter Zijlstra 6901696a8beSPeter Zijlstra /* 6911696a8beSPeter Zijlstra * The (de)boosting is a step by step approach with a lot of 6921696a8beSPeter Zijlstra * pitfalls. We want this to be preemptible and we want hold a 6931696a8beSPeter Zijlstra * maximum of two locks per step. So we have to check 6941696a8beSPeter Zijlstra * carefully whether things change under us. 6951696a8beSPeter Zijlstra */ 6961696a8beSPeter Zijlstra again: 6973eb65aeaSThomas Gleixner /* 6983eb65aeaSThomas Gleixner * We limit the lock chain length for each invocation. 6993eb65aeaSThomas Gleixner */ 7001696a8beSPeter Zijlstra if (++depth > max_lock_depth) { 7011696a8beSPeter Zijlstra static int prev_max; 7021696a8beSPeter Zijlstra 7031696a8beSPeter Zijlstra /* 7041696a8beSPeter Zijlstra * Print this only once. If the admin changes the limit, 7051696a8beSPeter Zijlstra * print a new message when reaching the limit again. 7061696a8beSPeter Zijlstra */ 7071696a8beSPeter Zijlstra if (prev_max != max_lock_depth) { 7081696a8beSPeter Zijlstra prev_max = max_lock_depth; 7091696a8beSPeter Zijlstra printk(KERN_WARNING "Maximum lock depth %d reached " 7101696a8beSPeter Zijlstra "task: %s (%d)\n", max_lock_depth, 7111696a8beSPeter Zijlstra top_task->comm, task_pid_nr(top_task)); 7121696a8beSPeter Zijlstra } 7131696a8beSPeter Zijlstra put_task_struct(task); 7141696a8beSPeter Zijlstra 7153d5c9340SThomas Gleixner return -EDEADLK; 7161696a8beSPeter Zijlstra } 7173eb65aeaSThomas Gleixner 7183eb65aeaSThomas Gleixner /* 7193eb65aeaSThomas Gleixner * We are fully preemptible here and only hold the refcount on 7203eb65aeaSThomas Gleixner * @task. So everything can have changed under us since the 7213eb65aeaSThomas Gleixner * caller or our own code below (goto retry/again) dropped all 7223eb65aeaSThomas Gleixner * locks. 7233eb65aeaSThomas Gleixner */ 7241696a8beSPeter Zijlstra retry: 7251696a8beSPeter Zijlstra /* 7263eb65aeaSThomas Gleixner * [1] Task cannot go away as we did a get_task() before ! 7271696a8beSPeter Zijlstra */ 728b4abf910SThomas Gleixner raw_spin_lock_irq(&task->pi_lock); 7291696a8beSPeter Zijlstra 7303eb65aeaSThomas Gleixner /* 7313eb65aeaSThomas Gleixner * [2] Get the waiter on which @task is blocked on. 7323eb65aeaSThomas Gleixner */ 7331696a8beSPeter Zijlstra waiter = task->pi_blocked_on; 7343eb65aeaSThomas Gleixner 7353eb65aeaSThomas Gleixner /* 7363eb65aeaSThomas Gleixner * [3] check_exit_conditions_1() protected by task->pi_lock. 7373eb65aeaSThomas Gleixner */ 7383eb65aeaSThomas Gleixner 7391696a8beSPeter Zijlstra /* 7401696a8beSPeter Zijlstra * Check whether the end of the boosting chain has been 7411696a8beSPeter Zijlstra * reached or the state of the chain has changed while we 7421696a8beSPeter Zijlstra * dropped the locks. 7431696a8beSPeter Zijlstra */ 7441696a8beSPeter Zijlstra if (!waiter) 7451696a8beSPeter Zijlstra goto out_unlock_pi; 7461696a8beSPeter Zijlstra 7471696a8beSPeter Zijlstra /* 7481696a8beSPeter Zijlstra * Check the orig_waiter state. After we dropped the locks, 7491696a8beSPeter Zijlstra * the previous owner of the lock might have released the lock. 7501696a8beSPeter Zijlstra */ 7511696a8beSPeter Zijlstra if (orig_waiter && !rt_mutex_owner(orig_lock)) 7521696a8beSPeter Zijlstra goto out_unlock_pi; 7531696a8beSPeter Zijlstra 7541696a8beSPeter Zijlstra /* 75582084984SThomas Gleixner * We dropped all locks after taking a refcount on @task, so 75682084984SThomas Gleixner * the task might have moved on in the lock chain or even left 75782084984SThomas Gleixner * the chain completely and blocks now on an unrelated lock or 75882084984SThomas Gleixner * on @orig_lock. 75982084984SThomas Gleixner * 76082084984SThomas Gleixner * We stored the lock on which @task was blocked in @next_lock, 76182084984SThomas Gleixner * so we can detect the chain change. 76282084984SThomas Gleixner */ 76382084984SThomas Gleixner if (next_lock != waiter->lock) 76482084984SThomas Gleixner goto out_unlock_pi; 76582084984SThomas Gleixner 76682084984SThomas Gleixner /* 7676467822bSPeter Zijlstra * There could be 'spurious' loops in the lock graph due to ww_mutex, 7686467822bSPeter Zijlstra * consider: 7696467822bSPeter Zijlstra * 7706467822bSPeter Zijlstra * P1: A, ww_A, ww_B 7716467822bSPeter Zijlstra * P2: ww_B, ww_A 7726467822bSPeter Zijlstra * P3: A 7736467822bSPeter Zijlstra * 7746467822bSPeter Zijlstra * P3 should not return -EDEADLK because it gets trapped in the cycle 7756467822bSPeter Zijlstra * created by P1 and P2 (which will resolve -- and runs into 7766467822bSPeter Zijlstra * max_lock_depth above). Therefore disable detect_deadlock such that 7776467822bSPeter Zijlstra * the below termination condition can trigger once all relevant tasks 7786467822bSPeter Zijlstra * are boosted. 7796467822bSPeter Zijlstra * 7806467822bSPeter Zijlstra * Even when we start with ww_mutex we can disable deadlock detection, 7816467822bSPeter Zijlstra * since we would supress a ww_mutex induced deadlock at [6] anyway. 7826467822bSPeter Zijlstra * Supressing it here however is not sufficient since we might still 7836467822bSPeter Zijlstra * hit [6] due to adjustment driven iteration. 7846467822bSPeter Zijlstra * 7856467822bSPeter Zijlstra * NOTE: if someone were to create a deadlock between 2 ww_classes we'd 7866467822bSPeter Zijlstra * utterly fail to report it; lockdep should. 7876467822bSPeter Zijlstra */ 7886467822bSPeter Zijlstra if (IS_ENABLED(CONFIG_PREEMPT_RT) && waiter->ww_ctx && detect_deadlock) 7896467822bSPeter Zijlstra detect_deadlock = false; 7906467822bSPeter Zijlstra 7916467822bSPeter Zijlstra /* 7921696a8beSPeter Zijlstra * Drop out, when the task has no waiters. Note, 7931696a8beSPeter Zijlstra * top_waiter can be NULL, when we are in the deboosting 7941696a8beSPeter Zijlstra * mode! 7951696a8beSPeter Zijlstra */ 796397335f0SThomas Gleixner if (top_waiter) { 797397335f0SThomas Gleixner if (!task_has_pi_waiters(task)) 7981696a8beSPeter Zijlstra goto out_unlock_pi; 799397335f0SThomas Gleixner /* 800397335f0SThomas Gleixner * If deadlock detection is off, we stop here if we 80167792e2cSThomas Gleixner * are not the top pi waiter of the task. If deadlock 80267792e2cSThomas Gleixner * detection is enabled we continue, but stop the 80367792e2cSThomas Gleixner * requeueing in the chain walk. 804397335f0SThomas Gleixner */ 80567792e2cSThomas Gleixner if (top_waiter != task_top_pi_waiter(task)) { 80667792e2cSThomas Gleixner if (!detect_deadlock) 807397335f0SThomas Gleixner goto out_unlock_pi; 80867792e2cSThomas Gleixner else 80967792e2cSThomas Gleixner requeue = false; 81067792e2cSThomas Gleixner } 811397335f0SThomas Gleixner } 8121696a8beSPeter Zijlstra 8131696a8beSPeter Zijlstra /* 81467792e2cSThomas Gleixner * If the waiter priority is the same as the task priority 81567792e2cSThomas Gleixner * then there is no further priority adjustment necessary. If 81667792e2cSThomas Gleixner * deadlock detection is off, we stop the chain walk. If its 81767792e2cSThomas Gleixner * enabled we continue, but stop the requeueing in the chain 81867792e2cSThomas Gleixner * walk. 8191696a8beSPeter Zijlstra */ 820f7853c34SPeter Zijlstra if (rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) { 82167792e2cSThomas Gleixner if (!detect_deadlock) 8221696a8beSPeter Zijlstra goto out_unlock_pi; 82367792e2cSThomas Gleixner else 82467792e2cSThomas Gleixner requeue = false; 82567792e2cSThomas Gleixner } 8261696a8beSPeter Zijlstra 8273eb65aeaSThomas Gleixner /* 828f7853c34SPeter Zijlstra * [4] Get the next lock; per holding task->pi_lock we can't unblock 829f7853c34SPeter Zijlstra * and guarantee @lock's existence. 8303eb65aeaSThomas Gleixner */ 8311696a8beSPeter Zijlstra lock = waiter->lock; 8323eb65aeaSThomas Gleixner /* 8333eb65aeaSThomas Gleixner * [5] We need to trylock here as we are holding task->pi_lock, 8343eb65aeaSThomas Gleixner * which is the reverse lock order versus the other rtmutex 8353eb65aeaSThomas Gleixner * operations. 836f7853c34SPeter Zijlstra * 837f7853c34SPeter Zijlstra * Per the above, holding task->pi_lock guarantees lock exists, so 838f7853c34SPeter Zijlstra * inverting this lock order is infeasible from a life-time 839f7853c34SPeter Zijlstra * perspective. 8403eb65aeaSThomas Gleixner */ 8411696a8beSPeter Zijlstra if (!raw_spin_trylock(&lock->wait_lock)) { 842b4abf910SThomas Gleixner raw_spin_unlock_irq(&task->pi_lock); 8431696a8beSPeter Zijlstra cpu_relax(); 8441696a8beSPeter Zijlstra goto retry; 8451696a8beSPeter Zijlstra } 8461696a8beSPeter Zijlstra 847397335f0SThomas Gleixner /* 8483eb65aeaSThomas Gleixner * [6] check_exit_conditions_2() protected by task->pi_lock and 8493eb65aeaSThomas Gleixner * lock->wait_lock. 8503eb65aeaSThomas Gleixner * 851397335f0SThomas Gleixner * Deadlock detection. If the lock is the same as the original 852397335f0SThomas Gleixner * lock which caused us to walk the lock chain or if the 853397335f0SThomas Gleixner * current lock is owned by the task which initiated the chain 854397335f0SThomas Gleixner * walk, we detected a deadlock. 855397335f0SThomas Gleixner */ 8561696a8beSPeter Zijlstra if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { 8573d5c9340SThomas Gleixner ret = -EDEADLK; 858a055fcc1SPeter Zijlstra 859a055fcc1SPeter Zijlstra /* 860a055fcc1SPeter Zijlstra * When the deadlock is due to ww_mutex; also see above. Don't 861a055fcc1SPeter Zijlstra * report the deadlock and instead let the ww_mutex wound/die 862a055fcc1SPeter Zijlstra * logic pick which of the contending threads gets -EDEADLK. 863a055fcc1SPeter Zijlstra * 864a055fcc1SPeter Zijlstra * NOTE: assumes the cycle only contains a single ww_class; any 865a055fcc1SPeter Zijlstra * other configuration and we fail to report; also, see 866a055fcc1SPeter Zijlstra * lockdep. 867a055fcc1SPeter Zijlstra */ 868e5480572SPeter Zijlstra if (IS_ENABLED(CONFIG_PREEMPT_RT) && orig_waiter && orig_waiter->ww_ctx) 869a055fcc1SPeter Zijlstra ret = 0; 870a055fcc1SPeter Zijlstra 871a055fcc1SPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 8721696a8beSPeter Zijlstra goto out_unlock_pi; 8731696a8beSPeter Zijlstra } 8741696a8beSPeter Zijlstra 875a57594a1SThomas Gleixner /* 87667792e2cSThomas Gleixner * If we just follow the lock chain for deadlock detection, no 87767792e2cSThomas Gleixner * need to do all the requeue operations. To avoid a truckload 87867792e2cSThomas Gleixner * of conditionals around the various places below, just do the 87967792e2cSThomas Gleixner * minimum chain walk checks. 88067792e2cSThomas Gleixner */ 88167792e2cSThomas Gleixner if (!requeue) { 88267792e2cSThomas Gleixner /* 88367792e2cSThomas Gleixner * No requeue[7] here. Just release @task [8] 88467792e2cSThomas Gleixner */ 885b4abf910SThomas Gleixner raw_spin_unlock(&task->pi_lock); 88667792e2cSThomas Gleixner put_task_struct(task); 88767792e2cSThomas Gleixner 88867792e2cSThomas Gleixner /* 88967792e2cSThomas Gleixner * [9] check_exit_conditions_3 protected by lock->wait_lock. 89067792e2cSThomas Gleixner * If there is no owner of the lock, end of chain. 89167792e2cSThomas Gleixner */ 89267792e2cSThomas Gleixner if (!rt_mutex_owner(lock)) { 893b4abf910SThomas Gleixner raw_spin_unlock_irq(&lock->wait_lock); 89467792e2cSThomas Gleixner return 0; 89567792e2cSThomas Gleixner } 89667792e2cSThomas Gleixner 89767792e2cSThomas Gleixner /* [10] Grab the next task, i.e. owner of @lock */ 8987b3c92b8SMatthew Wilcox (Oracle) task = get_task_struct(rt_mutex_owner(lock)); 899b4abf910SThomas Gleixner raw_spin_lock(&task->pi_lock); 90067792e2cSThomas Gleixner 90167792e2cSThomas Gleixner /* 90267792e2cSThomas Gleixner * No requeue [11] here. We just do deadlock detection. 90367792e2cSThomas Gleixner * 90467792e2cSThomas Gleixner * [12] Store whether owner is blocked 90567792e2cSThomas Gleixner * itself. Decision is made after dropping the locks 90667792e2cSThomas Gleixner */ 90767792e2cSThomas Gleixner next_lock = task_blocked_on_lock(task); 90867792e2cSThomas Gleixner /* 90967792e2cSThomas Gleixner * Get the top waiter for the next iteration 91067792e2cSThomas Gleixner */ 91167792e2cSThomas Gleixner top_waiter = rt_mutex_top_waiter(lock); 91267792e2cSThomas Gleixner 91367792e2cSThomas Gleixner /* [13] Drop locks */ 914b4abf910SThomas Gleixner raw_spin_unlock(&task->pi_lock); 915b4abf910SThomas Gleixner raw_spin_unlock_irq(&lock->wait_lock); 91667792e2cSThomas Gleixner 91767792e2cSThomas Gleixner /* If owner is not blocked, end of chain. */ 91867792e2cSThomas Gleixner if (!next_lock) 91967792e2cSThomas Gleixner goto out_put_task; 92067792e2cSThomas Gleixner goto again; 92167792e2cSThomas Gleixner } 92267792e2cSThomas Gleixner 92367792e2cSThomas Gleixner /* 924a57594a1SThomas Gleixner * Store the current top waiter before doing the requeue 925a57594a1SThomas Gleixner * operation on @lock. We need it for the boost/deboost 926a57594a1SThomas Gleixner * decision below. 927a57594a1SThomas Gleixner */ 928a57594a1SThomas Gleixner prerequeue_top_waiter = rt_mutex_top_waiter(lock); 9291696a8beSPeter Zijlstra 9309f40a51aSDavidlohr Bueso /* [7] Requeue the waiter in the lock waiter tree. */ 931fb00aca4SPeter Zijlstra rt_mutex_dequeue(lock, waiter); 932e0aad5b4SPeter Zijlstra 933e0aad5b4SPeter Zijlstra /* 934e0aad5b4SPeter Zijlstra * Update the waiter prio fields now that we're dequeued. 935e0aad5b4SPeter Zijlstra * 936e0aad5b4SPeter Zijlstra * These values can have changed through either: 937e0aad5b4SPeter Zijlstra * 938e0aad5b4SPeter Zijlstra * sys_sched_set_scheduler() / sys_sched_setattr() 939e0aad5b4SPeter Zijlstra * 940e0aad5b4SPeter Zijlstra * or 941e0aad5b4SPeter Zijlstra * 942e0aad5b4SPeter Zijlstra * DL CBS enforcement advancing the effective deadline. 943e0aad5b4SPeter Zijlstra */ 944715f7f9eSPeter Zijlstra waiter_update_prio(waiter, task); 945e0aad5b4SPeter Zijlstra 946fb00aca4SPeter Zijlstra rt_mutex_enqueue(lock, waiter); 9471696a8beSPeter Zijlstra 948f7853c34SPeter Zijlstra /* 949f7853c34SPeter Zijlstra * [8] Release the (blocking) task in preparation for 950f7853c34SPeter Zijlstra * taking the owner task in [10]. 951f7853c34SPeter Zijlstra * 952f7853c34SPeter Zijlstra * Since we hold lock->waiter_lock, task cannot unblock, even if we 953f7853c34SPeter Zijlstra * release task->pi_lock. 954f7853c34SPeter Zijlstra */ 955b4abf910SThomas Gleixner raw_spin_unlock(&task->pi_lock); 9562ffa5a5cSThomas Gleixner put_task_struct(task); 9572ffa5a5cSThomas Gleixner 958a57594a1SThomas Gleixner /* 9593eb65aeaSThomas Gleixner * [9] check_exit_conditions_3 protected by lock->wait_lock. 9603eb65aeaSThomas Gleixner * 961a57594a1SThomas Gleixner * We must abort the chain walk if there is no lock owner even 962a57594a1SThomas Gleixner * in the dead lock detection case, as we have nothing to 963a57594a1SThomas Gleixner * follow here. This is the end of the chain we are walking. 964a57594a1SThomas Gleixner */ 9651696a8beSPeter Zijlstra if (!rt_mutex_owner(lock)) { 9661696a8beSPeter Zijlstra /* 9673eb65aeaSThomas Gleixner * If the requeue [7] above changed the top waiter, 9683eb65aeaSThomas Gleixner * then we need to wake the new top waiter up to try 9693eb65aeaSThomas Gleixner * to get the lock. 9701696a8beSPeter Zijlstra */ 971db370a8bSWander Lairson Costa top_waiter = rt_mutex_top_waiter(lock); 972db370a8bSWander Lairson Costa if (prerequeue_top_waiter != top_waiter) 973db370a8bSWander Lairson Costa wake_up_state(top_waiter->task, top_waiter->wake_state); 974b4abf910SThomas Gleixner raw_spin_unlock_irq(&lock->wait_lock); 9752ffa5a5cSThomas Gleixner return 0; 9761696a8beSPeter Zijlstra } 9771696a8beSPeter Zijlstra 978f7853c34SPeter Zijlstra /* 979f7853c34SPeter Zijlstra * [10] Grab the next task, i.e. the owner of @lock 980f7853c34SPeter Zijlstra * 981f7853c34SPeter Zijlstra * Per holding lock->wait_lock and checking for !owner above, there 982f7853c34SPeter Zijlstra * must be an owner and it cannot go away. 983f7853c34SPeter Zijlstra */ 9847b3c92b8SMatthew Wilcox (Oracle) task = get_task_struct(rt_mutex_owner(lock)); 985b4abf910SThomas Gleixner raw_spin_lock(&task->pi_lock); 9861696a8beSPeter Zijlstra 9873eb65aeaSThomas Gleixner /* [11] requeue the pi waiters if necessary */ 9881696a8beSPeter Zijlstra if (waiter == rt_mutex_top_waiter(lock)) { 989a57594a1SThomas Gleixner /* 990a57594a1SThomas Gleixner * The waiter became the new top (highest priority) 991a57594a1SThomas Gleixner * waiter on the lock. Replace the previous top waiter 9929f40a51aSDavidlohr Bueso * in the owner tasks pi waiters tree with this waiter 993a57594a1SThomas Gleixner * and adjust the priority of the owner. 994a57594a1SThomas Gleixner */ 995a57594a1SThomas Gleixner rt_mutex_dequeue_pi(task, prerequeue_top_waiter); 996f7853c34SPeter Zijlstra waiter_clone_prio(waiter, task); 997fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(task, waiter); 998f7853c34SPeter Zijlstra rt_mutex_adjust_prio(lock, task); 9991696a8beSPeter Zijlstra 1000a57594a1SThomas Gleixner } else if (prerequeue_top_waiter == waiter) { 1001a57594a1SThomas Gleixner /* 1002a57594a1SThomas Gleixner * The waiter was the top waiter on the lock, but is 1003e2db7592SIngo Molnar * no longer the top priority waiter. Replace waiter in 10049f40a51aSDavidlohr Bueso * the owner tasks pi waiters tree with the new top 1005a57594a1SThomas Gleixner * (highest priority) waiter and adjust the priority 1006a57594a1SThomas Gleixner * of the owner. 1007a57594a1SThomas Gleixner * The new top waiter is stored in @waiter so that 1008a57594a1SThomas Gleixner * @waiter == @top_waiter evaluates to true below and 1009a57594a1SThomas Gleixner * we continue to deboost the rest of the chain. 1010a57594a1SThomas Gleixner */ 1011fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(task, waiter); 10121696a8beSPeter Zijlstra waiter = rt_mutex_top_waiter(lock); 1013f7853c34SPeter Zijlstra waiter_clone_prio(waiter, task); 1014fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(task, waiter); 1015f7853c34SPeter Zijlstra rt_mutex_adjust_prio(lock, task); 1016a57594a1SThomas Gleixner } else { 1017a57594a1SThomas Gleixner /* 1018a57594a1SThomas Gleixner * Nothing changed. No need to do any priority 1019a57594a1SThomas Gleixner * adjustment. 1020a57594a1SThomas Gleixner */ 10211696a8beSPeter Zijlstra } 10221696a8beSPeter Zijlstra 102382084984SThomas Gleixner /* 10243eb65aeaSThomas Gleixner * [12] check_exit_conditions_4() protected by task->pi_lock 10253eb65aeaSThomas Gleixner * and lock->wait_lock. The actual decisions are made after we 10263eb65aeaSThomas Gleixner * dropped the locks. 10273eb65aeaSThomas Gleixner * 102882084984SThomas Gleixner * Check whether the task which owns the current lock is pi 102982084984SThomas Gleixner * blocked itself. If yes we store a pointer to the lock for 103082084984SThomas Gleixner * the lock chain change detection above. After we dropped 103182084984SThomas Gleixner * task->pi_lock next_lock cannot be dereferenced anymore. 103282084984SThomas Gleixner */ 103382084984SThomas Gleixner next_lock = task_blocked_on_lock(task); 1034a57594a1SThomas Gleixner /* 1035a57594a1SThomas Gleixner * Store the top waiter of @lock for the end of chain walk 1036a57594a1SThomas Gleixner * decision below. 1037a57594a1SThomas Gleixner */ 10381696a8beSPeter Zijlstra top_waiter = rt_mutex_top_waiter(lock); 10393eb65aeaSThomas Gleixner 10403eb65aeaSThomas Gleixner /* [13] Drop the locks */ 1041b4abf910SThomas Gleixner raw_spin_unlock(&task->pi_lock); 1042b4abf910SThomas Gleixner raw_spin_unlock_irq(&lock->wait_lock); 10431696a8beSPeter Zijlstra 104482084984SThomas Gleixner /* 10453eb65aeaSThomas Gleixner * Make the actual exit decisions [12], based on the stored 10463eb65aeaSThomas Gleixner * values. 10473eb65aeaSThomas Gleixner * 104882084984SThomas Gleixner * We reached the end of the lock chain. Stop right here. No 104982084984SThomas Gleixner * point to go back just to figure that out. 105082084984SThomas Gleixner */ 105182084984SThomas Gleixner if (!next_lock) 105282084984SThomas Gleixner goto out_put_task; 105382084984SThomas Gleixner 1054a57594a1SThomas Gleixner /* 1055a57594a1SThomas Gleixner * If the current waiter is not the top waiter on the lock, 1056a57594a1SThomas Gleixner * then we can stop the chain walk here if we are not in full 1057a57594a1SThomas Gleixner * deadlock detection mode. 1058a57594a1SThomas Gleixner */ 10591696a8beSPeter Zijlstra if (!detect_deadlock && waiter != top_waiter) 10601696a8beSPeter Zijlstra goto out_put_task; 10611696a8beSPeter Zijlstra 10621696a8beSPeter Zijlstra goto again; 10631696a8beSPeter Zijlstra 10641696a8beSPeter Zijlstra out_unlock_pi: 1065b4abf910SThomas Gleixner raw_spin_unlock_irq(&task->pi_lock); 10661696a8beSPeter Zijlstra out_put_task: 10671696a8beSPeter Zijlstra put_task_struct(task); 10681696a8beSPeter Zijlstra 10691696a8beSPeter Zijlstra return ret; 10701696a8beSPeter Zijlstra } 10711696a8beSPeter Zijlstra 10721696a8beSPeter Zijlstra /* 10731696a8beSPeter Zijlstra * Try to take an rt-mutex 10741696a8beSPeter Zijlstra * 1075b4abf910SThomas Gleixner * Must be called with lock->wait_lock held and interrupts disabled 10761696a8beSPeter Zijlstra * 1077358c331fSThomas Gleixner * @lock: The lock to be acquired. 1078358c331fSThomas Gleixner * @task: The task which wants to acquire the lock 10799f40a51aSDavidlohr Bueso * @waiter: The waiter that is queued to the lock's wait tree if the 1080358c331fSThomas Gleixner * callsite called task_blocked_on_lock(), otherwise NULL 10811696a8beSPeter Zijlstra */ 1082d7a2edb8SThomas Gleixner static int __sched 1083830e6accSPeter Zijlstra try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task, 10841696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter) 10851696a8beSPeter Zijlstra { 1086e0aad5b4SPeter Zijlstra lockdep_assert_held(&lock->wait_lock); 1087e0aad5b4SPeter Zijlstra 10881696a8beSPeter Zijlstra /* 1089358c331fSThomas Gleixner * Before testing whether we can acquire @lock, we set the 1090358c331fSThomas Gleixner * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all 1091358c331fSThomas Gleixner * other tasks which try to modify @lock into the slow path 1092358c331fSThomas Gleixner * and they serialize on @lock->wait_lock. 10931696a8beSPeter Zijlstra * 1094358c331fSThomas Gleixner * The RT_MUTEX_HAS_WAITERS bit can have a transitional state 1095358c331fSThomas Gleixner * as explained at the top of this file if and only if: 10961696a8beSPeter Zijlstra * 1097358c331fSThomas Gleixner * - There is a lock owner. The caller must fixup the 1098358c331fSThomas Gleixner * transient state if it does a trylock or leaves the lock 1099358c331fSThomas Gleixner * function due to a signal or timeout. 1100358c331fSThomas Gleixner * 1101358c331fSThomas Gleixner * - @task acquires the lock and there are no other 1102358c331fSThomas Gleixner * waiters. This is undone in rt_mutex_set_owner(@task) at 1103358c331fSThomas Gleixner * the end of this function. 11041696a8beSPeter Zijlstra */ 11051696a8beSPeter Zijlstra mark_rt_mutex_waiters(lock); 11061696a8beSPeter Zijlstra 1107358c331fSThomas Gleixner /* 1108358c331fSThomas Gleixner * If @lock has an owner, give up. 1109358c331fSThomas Gleixner */ 11101696a8beSPeter Zijlstra if (rt_mutex_owner(lock)) 11111696a8beSPeter Zijlstra return 0; 11121696a8beSPeter Zijlstra 11131696a8beSPeter Zijlstra /* 1114358c331fSThomas Gleixner * If @waiter != NULL, @task has already enqueued the waiter 11159f40a51aSDavidlohr Bueso * into @lock waiter tree. If @waiter == NULL then this is a 1116358c331fSThomas Gleixner * trylock attempt. 1117358c331fSThomas Gleixner */ 1118358c331fSThomas Gleixner if (waiter) { 111948eb3f4fSGregory Haskins struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock); 1120358c331fSThomas Gleixner 1121358c331fSThomas Gleixner /* 112248eb3f4fSGregory Haskins * If waiter is the highest priority waiter of @lock, 112348eb3f4fSGregory Haskins * or allowed to steal it, take it over. 112448eb3f4fSGregory Haskins */ 112548eb3f4fSGregory Haskins if (waiter == top_waiter || rt_mutex_steal(waiter, top_waiter)) { 112648eb3f4fSGregory Haskins /* 1127358c331fSThomas Gleixner * We can acquire the lock. Remove the waiter from the 11289f40a51aSDavidlohr Bueso * lock waiters tree. 1129358c331fSThomas Gleixner */ 1130358c331fSThomas Gleixner rt_mutex_dequeue(lock, waiter); 113148eb3f4fSGregory Haskins } else { 113248eb3f4fSGregory Haskins return 0; 113348eb3f4fSGregory Haskins } 1134358c331fSThomas Gleixner } else { 1135358c331fSThomas Gleixner /* 1136358c331fSThomas Gleixner * If the lock has waiters already we check whether @task is 1137358c331fSThomas Gleixner * eligible to take over the lock. 1138358c331fSThomas Gleixner * 1139358c331fSThomas Gleixner * If there are no other waiters, @task can acquire 1140358c331fSThomas Gleixner * the lock. @task->pi_blocked_on is NULL, so it does 1141358c331fSThomas Gleixner * not need to be dequeued. 11421696a8beSPeter Zijlstra */ 11431696a8beSPeter Zijlstra if (rt_mutex_has_waiters(lock)) { 114448eb3f4fSGregory Haskins /* Check whether the trylock can steal it. */ 114548eb3f4fSGregory Haskins if (!rt_mutex_steal(task_to_waiter(task), 114619830e55SPeter Zijlstra rt_mutex_top_waiter(lock))) 11471696a8beSPeter Zijlstra return 0; 1148358c331fSThomas Gleixner 1149358c331fSThomas Gleixner /* 1150358c331fSThomas Gleixner * The current top waiter stays enqueued. We 1151358c331fSThomas Gleixner * don't have to change anything in the lock 1152358c331fSThomas Gleixner * waiters order. 1153358c331fSThomas Gleixner */ 1154358c331fSThomas Gleixner } else { 1155358c331fSThomas Gleixner /* 1156358c331fSThomas Gleixner * No waiters. Take the lock without the 1157358c331fSThomas Gleixner * pi_lock dance.@task->pi_blocked_on is NULL 1158358c331fSThomas Gleixner * and we have no waiters to enqueue in @task 11599f40a51aSDavidlohr Bueso * pi waiters tree. 1160358c331fSThomas Gleixner */ 1161358c331fSThomas Gleixner goto takeit; 11621696a8beSPeter Zijlstra } 11631696a8beSPeter Zijlstra } 11641696a8beSPeter Zijlstra 11651696a8beSPeter Zijlstra /* 1166358c331fSThomas Gleixner * Clear @task->pi_blocked_on. Requires protection by 1167358c331fSThomas Gleixner * @task->pi_lock. Redundant operation for the @waiter == NULL 1168358c331fSThomas Gleixner * case, but conditionals are more expensive than a redundant 1169358c331fSThomas Gleixner * store. 11701696a8beSPeter Zijlstra */ 1171b4abf910SThomas Gleixner raw_spin_lock(&task->pi_lock); 1172358c331fSThomas Gleixner task->pi_blocked_on = NULL; 1173358c331fSThomas Gleixner /* 1174358c331fSThomas Gleixner * Finish the lock acquisition. @task is the new owner. If 1175358c331fSThomas Gleixner * other waiters exist we have to insert the highest priority 11769f40a51aSDavidlohr Bueso * waiter into @task->pi_waiters tree. 1177358c331fSThomas Gleixner */ 1178358c331fSThomas Gleixner if (rt_mutex_has_waiters(lock)) 1179358c331fSThomas Gleixner rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); 1180b4abf910SThomas Gleixner raw_spin_unlock(&task->pi_lock); 11811696a8beSPeter Zijlstra 1182358c331fSThomas Gleixner takeit: 1183358c331fSThomas Gleixner /* 1184358c331fSThomas Gleixner * This either preserves the RT_MUTEX_HAS_WAITERS bit if there 1185358c331fSThomas Gleixner * are still waiters or clears it. 1186358c331fSThomas Gleixner */ 11871696a8beSPeter Zijlstra rt_mutex_set_owner(lock, task); 11881696a8beSPeter Zijlstra 11891696a8beSPeter Zijlstra return 1; 11901696a8beSPeter Zijlstra } 11911696a8beSPeter Zijlstra 11921696a8beSPeter Zijlstra /* 11931696a8beSPeter Zijlstra * Task blocks on lock. 11941696a8beSPeter Zijlstra * 11951696a8beSPeter Zijlstra * Prepare waiter and propagate pi chain 11961696a8beSPeter Zijlstra * 1197b4abf910SThomas Gleixner * This must be called with lock->wait_lock held and interrupts disabled 11981696a8beSPeter Zijlstra */ 1199830e6accSPeter Zijlstra static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, 12001696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter, 12011696a8beSPeter Zijlstra struct task_struct *task, 1202add46132SPeter Zijlstra struct ww_acquire_ctx *ww_ctx, 12038930ed80SThomas Gleixner enum rtmutex_chainwalk chwalk) 12041696a8beSPeter Zijlstra { 12051696a8beSPeter Zijlstra struct task_struct *owner = rt_mutex_owner(lock); 12061696a8beSPeter Zijlstra struct rt_mutex_waiter *top_waiter = waiter; 1207830e6accSPeter Zijlstra struct rt_mutex_base *next_lock; 12081696a8beSPeter Zijlstra int chain_walk = 0, res; 12091696a8beSPeter Zijlstra 1210e0aad5b4SPeter Zijlstra lockdep_assert_held(&lock->wait_lock); 1211e0aad5b4SPeter Zijlstra 1212397335f0SThomas Gleixner /* 1213397335f0SThomas Gleixner * Early deadlock detection. We really don't want the task to 1214397335f0SThomas Gleixner * enqueue on itself just to untangle the mess later. It's not 1215397335f0SThomas Gleixner * only an optimization. We drop the locks, so another waiter 1216397335f0SThomas Gleixner * can come in before the chain walk detects the deadlock. So 1217397335f0SThomas Gleixner * the other will detect the deadlock and return -EDEADLOCK, 1218397335f0SThomas Gleixner * which is wrong, as the other waiter is not in a deadlock 1219397335f0SThomas Gleixner * situation. 122002ea9fc9SPeter Zijlstra * 122102ea9fc9SPeter Zijlstra * Except for ww_mutex, in that case the chain walk must already deal 122202ea9fc9SPeter Zijlstra * with spurious cycles, see the comments at [3] and [6]. 1223397335f0SThomas Gleixner */ 122402ea9fc9SPeter Zijlstra if (owner == task && !(build_ww_mutex() && ww_ctx)) 1225397335f0SThomas Gleixner return -EDEADLK; 1226397335f0SThomas Gleixner 1227b4abf910SThomas Gleixner raw_spin_lock(&task->pi_lock); 12281696a8beSPeter Zijlstra waiter->task = task; 12291696a8beSPeter Zijlstra waiter->lock = lock; 1230715f7f9eSPeter Zijlstra waiter_update_prio(waiter, task); 1231f7853c34SPeter Zijlstra waiter_clone_prio(waiter, task); 12321696a8beSPeter Zijlstra 12331696a8beSPeter Zijlstra /* Get the top priority waiter on the lock */ 12341696a8beSPeter Zijlstra if (rt_mutex_has_waiters(lock)) 12351696a8beSPeter Zijlstra top_waiter = rt_mutex_top_waiter(lock); 1236fb00aca4SPeter Zijlstra rt_mutex_enqueue(lock, waiter); 12371696a8beSPeter Zijlstra 12381696a8beSPeter Zijlstra task->pi_blocked_on = waiter; 12391696a8beSPeter Zijlstra 1240b4abf910SThomas Gleixner raw_spin_unlock(&task->pi_lock); 12411696a8beSPeter Zijlstra 1242add46132SPeter Zijlstra if (build_ww_mutex() && ww_ctx) { 1243add46132SPeter Zijlstra struct rt_mutex *rtm; 1244add46132SPeter Zijlstra 1245add46132SPeter Zijlstra /* Check whether the waiter should back out immediately */ 1246add46132SPeter Zijlstra rtm = container_of(lock, struct rt_mutex, rtmutex); 1247add46132SPeter Zijlstra res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx); 124837e8abffSThomas Gleixner if (res) { 124937e8abffSThomas Gleixner raw_spin_lock(&task->pi_lock); 125037e8abffSThomas Gleixner rt_mutex_dequeue(lock, waiter); 125137e8abffSThomas Gleixner task->pi_blocked_on = NULL; 125237e8abffSThomas Gleixner raw_spin_unlock(&task->pi_lock); 1253add46132SPeter Zijlstra return res; 1254add46132SPeter Zijlstra } 125537e8abffSThomas Gleixner } 1256add46132SPeter Zijlstra 12571696a8beSPeter Zijlstra if (!owner) 12581696a8beSPeter Zijlstra return 0; 12591696a8beSPeter Zijlstra 1260b4abf910SThomas Gleixner raw_spin_lock(&owner->pi_lock); 126182084984SThomas Gleixner if (waiter == rt_mutex_top_waiter(lock)) { 1262fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(owner, top_waiter); 1263fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(owner, waiter); 12641696a8beSPeter Zijlstra 1265f7853c34SPeter Zijlstra rt_mutex_adjust_prio(lock, owner); 12661696a8beSPeter Zijlstra if (owner->pi_blocked_on) 12671696a8beSPeter Zijlstra chain_walk = 1; 12688930ed80SThomas Gleixner } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { 12691696a8beSPeter Zijlstra chain_walk = 1; 127082084984SThomas Gleixner } 12711696a8beSPeter Zijlstra 127282084984SThomas Gleixner /* Store the lock on which owner is blocked or NULL */ 127382084984SThomas Gleixner next_lock = task_blocked_on_lock(owner); 127482084984SThomas Gleixner 1275b4abf910SThomas Gleixner raw_spin_unlock(&owner->pi_lock); 127682084984SThomas Gleixner /* 127782084984SThomas Gleixner * Even if full deadlock detection is on, if the owner is not 127882084984SThomas Gleixner * blocked itself, we can avoid finding this out in the chain 127982084984SThomas Gleixner * walk. 128082084984SThomas Gleixner */ 128182084984SThomas Gleixner if (!chain_walk || !next_lock) 12821696a8beSPeter Zijlstra return 0; 12831696a8beSPeter Zijlstra 12841696a8beSPeter Zijlstra /* 12851696a8beSPeter Zijlstra * The owner can't disappear while holding a lock, 12861696a8beSPeter Zijlstra * so the owner struct is protected by wait_lock. 12871696a8beSPeter Zijlstra * Gets dropped in rt_mutex_adjust_prio_chain()! 12881696a8beSPeter Zijlstra */ 12891696a8beSPeter Zijlstra get_task_struct(owner); 12901696a8beSPeter Zijlstra 1291b4abf910SThomas Gleixner raw_spin_unlock_irq(&lock->wait_lock); 12921696a8beSPeter Zijlstra 12938930ed80SThomas Gleixner res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, 129482084984SThomas Gleixner next_lock, waiter, task); 12951696a8beSPeter Zijlstra 1296b4abf910SThomas Gleixner raw_spin_lock_irq(&lock->wait_lock); 12971696a8beSPeter Zijlstra 12981696a8beSPeter Zijlstra return res; 12991696a8beSPeter Zijlstra } 13001696a8beSPeter Zijlstra 13011696a8beSPeter Zijlstra /* 13029f40a51aSDavidlohr Bueso * Remove the top waiter from the current tasks pi waiter tree and 130345ab4effSDavidlohr Bueso * queue it up. 13041696a8beSPeter Zijlstra * 1305b4abf910SThomas Gleixner * Called with lock->wait_lock held and interrupts disabled. 13061696a8beSPeter Zijlstra */ 13077980aa39SThomas Gleixner static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh, 1308830e6accSPeter Zijlstra struct rt_mutex_base *lock) 13091696a8beSPeter Zijlstra { 13101696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter; 13111696a8beSPeter Zijlstra 1312f7853c34SPeter Zijlstra lockdep_assert_held(&lock->wait_lock); 1313f7853c34SPeter Zijlstra 1314b4abf910SThomas Gleixner raw_spin_lock(¤t->pi_lock); 13151696a8beSPeter Zijlstra 13161696a8beSPeter Zijlstra waiter = rt_mutex_top_waiter(lock); 13171696a8beSPeter Zijlstra 13181696a8beSPeter Zijlstra /* 1319acd58620SPeter Zijlstra * Remove it from current->pi_waiters and deboost. 1320acd58620SPeter Zijlstra * 1321acd58620SPeter Zijlstra * We must in fact deboost here in order to ensure we call 1322acd58620SPeter Zijlstra * rt_mutex_setprio() to update p->pi_top_task before the 1323acd58620SPeter Zijlstra * task unblocks. 13241696a8beSPeter Zijlstra */ 1325fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(current, waiter); 1326f7853c34SPeter Zijlstra rt_mutex_adjust_prio(lock, current); 13271696a8beSPeter Zijlstra 132827e35715SThomas Gleixner /* 132927e35715SThomas Gleixner * As we are waking up the top waiter, and the waiter stays 133027e35715SThomas Gleixner * queued on the lock until it gets the lock, this lock 133127e35715SThomas Gleixner * obviously has waiters. Just set the bit here and this has 133227e35715SThomas Gleixner * the added benefit of forcing all new tasks into the 133327e35715SThomas Gleixner * slow path making sure no task of lower priority than 133427e35715SThomas Gleixner * the top waiter can steal this lock. 133527e35715SThomas Gleixner */ 133627e35715SThomas Gleixner lock->owner = (void *) RT_MUTEX_HAS_WAITERS; 13371696a8beSPeter Zijlstra 1338acd58620SPeter Zijlstra /* 1339acd58620SPeter Zijlstra * We deboosted before waking the top waiter task such that we don't 1340acd58620SPeter Zijlstra * run two tasks with the 'same' priority (and ensure the 1341acd58620SPeter Zijlstra * p->pi_top_task pointer points to a blocked task). This however can 1342acd58620SPeter Zijlstra * lead to priority inversion if we would get preempted after the 1343acd58620SPeter Zijlstra * deboost but before waking our donor task, hence the preempt_disable() 1344acd58620SPeter Zijlstra * before unlock. 1345acd58620SPeter Zijlstra * 13467980aa39SThomas Gleixner * Pairs with preempt_enable() in rt_mutex_wake_up_q(); 1347acd58620SPeter Zijlstra */ 1348acd58620SPeter Zijlstra preempt_disable(); 13497980aa39SThomas Gleixner rt_mutex_wake_q_add(wqh, waiter); 1350acd58620SPeter Zijlstra raw_spin_unlock(¤t->pi_lock); 13511696a8beSPeter Zijlstra } 13521696a8beSPeter Zijlstra 1353e17ba59bSThomas Gleixner static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock) 1354e17ba59bSThomas Gleixner { 1355e17ba59bSThomas Gleixner int ret = try_to_take_rt_mutex(lock, current, NULL); 1356e17ba59bSThomas Gleixner 1357e17ba59bSThomas Gleixner /* 1358e17ba59bSThomas Gleixner * try_to_take_rt_mutex() sets the lock waiters bit 1359e17ba59bSThomas Gleixner * unconditionally. Clean this up. 1360e17ba59bSThomas Gleixner */ 13611c0908d8SMel Gorman fixup_rt_mutex_waiters(lock, true); 1362e17ba59bSThomas Gleixner 1363e17ba59bSThomas Gleixner return ret; 1364e17ba59bSThomas Gleixner } 1365e17ba59bSThomas Gleixner 1366e17ba59bSThomas Gleixner /* 1367e17ba59bSThomas Gleixner * Slow path try-lock function: 1368e17ba59bSThomas Gleixner */ 1369e17ba59bSThomas Gleixner static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock) 1370e17ba59bSThomas Gleixner { 1371e17ba59bSThomas Gleixner unsigned long flags; 1372e17ba59bSThomas Gleixner int ret; 1373e17ba59bSThomas Gleixner 1374e17ba59bSThomas Gleixner /* 1375e17ba59bSThomas Gleixner * If the lock already has an owner we fail to get the lock. 1376e17ba59bSThomas Gleixner * This can be done without taking the @lock->wait_lock as 1377e17ba59bSThomas Gleixner * it is only being read, and this is a trylock anyway. 1378e17ba59bSThomas Gleixner */ 1379e17ba59bSThomas Gleixner if (rt_mutex_owner(lock)) 1380e17ba59bSThomas Gleixner return 0; 1381e17ba59bSThomas Gleixner 1382e17ba59bSThomas Gleixner /* 1383e17ba59bSThomas Gleixner * The mutex has currently no owner. Lock the wait lock and try to 1384e17ba59bSThomas Gleixner * acquire the lock. We use irqsave here to support early boot calls. 1385e17ba59bSThomas Gleixner */ 1386e17ba59bSThomas Gleixner raw_spin_lock_irqsave(&lock->wait_lock, flags); 1387e17ba59bSThomas Gleixner 1388e17ba59bSThomas Gleixner ret = __rt_mutex_slowtrylock(lock); 1389e17ba59bSThomas Gleixner 1390e17ba59bSThomas Gleixner raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 1391e17ba59bSThomas Gleixner 1392e17ba59bSThomas Gleixner return ret; 1393e17ba59bSThomas Gleixner } 1394e17ba59bSThomas Gleixner 1395e17ba59bSThomas Gleixner static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock) 1396e17ba59bSThomas Gleixner { 1397e17ba59bSThomas Gleixner if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) 1398e17ba59bSThomas Gleixner return 1; 1399e17ba59bSThomas Gleixner 1400e17ba59bSThomas Gleixner return rt_mutex_slowtrylock(lock); 1401e17ba59bSThomas Gleixner } 1402e17ba59bSThomas Gleixner 1403e17ba59bSThomas Gleixner /* 1404e17ba59bSThomas Gleixner * Slow path to release a rt-mutex. 1405e17ba59bSThomas Gleixner */ 1406e17ba59bSThomas Gleixner static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock) 1407e17ba59bSThomas Gleixner { 1408e17ba59bSThomas Gleixner DEFINE_RT_WAKE_Q(wqh); 1409e17ba59bSThomas Gleixner unsigned long flags; 1410e17ba59bSThomas Gleixner 1411e17ba59bSThomas Gleixner /* irqsave required to support early boot calls */ 1412e17ba59bSThomas Gleixner raw_spin_lock_irqsave(&lock->wait_lock, flags); 1413e17ba59bSThomas Gleixner 1414e17ba59bSThomas Gleixner debug_rt_mutex_unlock(lock); 1415e17ba59bSThomas Gleixner 1416e17ba59bSThomas Gleixner /* 1417e17ba59bSThomas Gleixner * We must be careful here if the fast path is enabled. If we 1418e17ba59bSThomas Gleixner * have no waiters queued we cannot set owner to NULL here 1419e17ba59bSThomas Gleixner * because of: 1420e17ba59bSThomas Gleixner * 1421e17ba59bSThomas Gleixner * foo->lock->owner = NULL; 1422e17ba59bSThomas Gleixner * rtmutex_lock(foo->lock); <- fast path 1423e17ba59bSThomas Gleixner * free = atomic_dec_and_test(foo->refcnt); 1424e17ba59bSThomas Gleixner * rtmutex_unlock(foo->lock); <- fast path 1425e17ba59bSThomas Gleixner * if (free) 1426e17ba59bSThomas Gleixner * kfree(foo); 1427e17ba59bSThomas Gleixner * raw_spin_unlock(foo->lock->wait_lock); 1428e17ba59bSThomas Gleixner * 1429e17ba59bSThomas Gleixner * So for the fastpath enabled kernel: 1430e17ba59bSThomas Gleixner * 1431e17ba59bSThomas Gleixner * Nothing can set the waiters bit as long as we hold 1432e17ba59bSThomas Gleixner * lock->wait_lock. So we do the following sequence: 1433e17ba59bSThomas Gleixner * 1434e17ba59bSThomas Gleixner * owner = rt_mutex_owner(lock); 1435e17ba59bSThomas Gleixner * clear_rt_mutex_waiters(lock); 1436e17ba59bSThomas Gleixner * raw_spin_unlock(&lock->wait_lock); 1437e17ba59bSThomas Gleixner * if (cmpxchg(&lock->owner, owner, 0) == owner) 1438e17ba59bSThomas Gleixner * return; 1439e17ba59bSThomas Gleixner * goto retry; 1440e17ba59bSThomas Gleixner * 1441e17ba59bSThomas Gleixner * The fastpath disabled variant is simple as all access to 1442e17ba59bSThomas Gleixner * lock->owner is serialized by lock->wait_lock: 1443e17ba59bSThomas Gleixner * 1444e17ba59bSThomas Gleixner * lock->owner = NULL; 1445e17ba59bSThomas Gleixner * raw_spin_unlock(&lock->wait_lock); 1446e17ba59bSThomas Gleixner */ 1447e17ba59bSThomas Gleixner while (!rt_mutex_has_waiters(lock)) { 1448e17ba59bSThomas Gleixner /* Drops lock->wait_lock ! */ 1449e17ba59bSThomas Gleixner if (unlock_rt_mutex_safe(lock, flags) == true) 1450e17ba59bSThomas Gleixner return; 1451e17ba59bSThomas Gleixner /* Relock the rtmutex and try again */ 1452e17ba59bSThomas Gleixner raw_spin_lock_irqsave(&lock->wait_lock, flags); 1453e17ba59bSThomas Gleixner } 1454e17ba59bSThomas Gleixner 1455e17ba59bSThomas Gleixner /* 1456e17ba59bSThomas Gleixner * The wakeup next waiter path does not suffer from the above 1457e17ba59bSThomas Gleixner * race. See the comments there. 1458e17ba59bSThomas Gleixner * 1459e17ba59bSThomas Gleixner * Queue the next waiter for wakeup once we release the wait_lock. 1460e17ba59bSThomas Gleixner */ 1461e17ba59bSThomas Gleixner mark_wakeup_next_waiter(&wqh, lock); 1462e17ba59bSThomas Gleixner raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 1463e17ba59bSThomas Gleixner 1464e17ba59bSThomas Gleixner rt_mutex_wake_up_q(&wqh); 1465e17ba59bSThomas Gleixner } 1466e17ba59bSThomas Gleixner 1467e17ba59bSThomas Gleixner static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock) 1468e17ba59bSThomas Gleixner { 1469e17ba59bSThomas Gleixner if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) 1470e17ba59bSThomas Gleixner return; 1471e17ba59bSThomas Gleixner 1472e17ba59bSThomas Gleixner rt_mutex_slowunlock(lock); 1473e17ba59bSThomas Gleixner } 1474e17ba59bSThomas Gleixner 1475992caf7fSSteven Rostedt #ifdef CONFIG_SMP 1476992caf7fSSteven Rostedt static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, 1477992caf7fSSteven Rostedt struct rt_mutex_waiter *waiter, 1478992caf7fSSteven Rostedt struct task_struct *owner) 1479992caf7fSSteven Rostedt { 1480992caf7fSSteven Rostedt bool res = true; 1481992caf7fSSteven Rostedt 1482992caf7fSSteven Rostedt rcu_read_lock(); 1483992caf7fSSteven Rostedt for (;;) { 1484992caf7fSSteven Rostedt /* If owner changed, trylock again. */ 1485992caf7fSSteven Rostedt if (owner != rt_mutex_owner(lock)) 1486992caf7fSSteven Rostedt break; 1487992caf7fSSteven Rostedt /* 1488992caf7fSSteven Rostedt * Ensure that @owner is dereferenced after checking that 1489992caf7fSSteven Rostedt * the lock owner still matches @owner. If that fails, 1490992caf7fSSteven Rostedt * @owner might point to freed memory. If it still matches, 1491992caf7fSSteven Rostedt * the rcu_read_lock() ensures the memory stays valid. 1492992caf7fSSteven Rostedt */ 1493992caf7fSSteven Rostedt barrier(); 1494992caf7fSSteven Rostedt /* 1495992caf7fSSteven Rostedt * Stop spinning when: 1496992caf7fSSteven Rostedt * - the lock owner has been scheduled out 1497992caf7fSSteven Rostedt * - current is not longer the top waiter 1498992caf7fSSteven Rostedt * - current is requested to reschedule (redundant 1499992caf7fSSteven Rostedt * for CONFIG_PREEMPT_RCU=y) 1500992caf7fSSteven Rostedt * - the VCPU on which owner runs is preempted 1501992caf7fSSteven Rostedt */ 1502c0bed69dSKefeng Wang if (!owner_on_cpu(owner) || need_resched() || 1503f16cc980SThomas Gleixner !rt_mutex_waiter_is_top_waiter(lock, waiter)) { 1504992caf7fSSteven Rostedt res = false; 1505992caf7fSSteven Rostedt break; 1506992caf7fSSteven Rostedt } 1507992caf7fSSteven Rostedt cpu_relax(); 1508992caf7fSSteven Rostedt } 1509992caf7fSSteven Rostedt rcu_read_unlock(); 1510992caf7fSSteven Rostedt return res; 1511992caf7fSSteven Rostedt } 1512992caf7fSSteven Rostedt #else 1513992caf7fSSteven Rostedt static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, 1514992caf7fSSteven Rostedt struct rt_mutex_waiter *waiter, 1515992caf7fSSteven Rostedt struct task_struct *owner) 1516992caf7fSSteven Rostedt { 1517992caf7fSSteven Rostedt return false; 1518992caf7fSSteven Rostedt } 1519992caf7fSSteven Rostedt #endif 1520992caf7fSSteven Rostedt 1521e17ba59bSThomas Gleixner #ifdef RT_MUTEX_BUILD_MUTEX 1522e17ba59bSThomas Gleixner /* 1523e17ba59bSThomas Gleixner * Functions required for: 1524e17ba59bSThomas Gleixner * - rtmutex, futex on all kernels 1525e17ba59bSThomas Gleixner * - mutex and rwsem substitutions on RT kernels 1526e17ba59bSThomas Gleixner */ 1527e17ba59bSThomas Gleixner 15281696a8beSPeter Zijlstra /* 15291696a8beSPeter Zijlstra * Remove a waiter from a lock and give up 15301696a8beSPeter Zijlstra * 1531e17ba59bSThomas Gleixner * Must be called with lock->wait_lock held and interrupts disabled. It must 15321696a8beSPeter Zijlstra * have just failed to try_to_take_rt_mutex(). 15331696a8beSPeter Zijlstra */ 1534830e6accSPeter Zijlstra static void __sched remove_waiter(struct rt_mutex_base *lock, 15351696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter) 15361696a8beSPeter Zijlstra { 15371ca7b860SThomas Gleixner bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); 15381696a8beSPeter Zijlstra struct task_struct *owner = rt_mutex_owner(lock); 1539830e6accSPeter Zijlstra struct rt_mutex_base *next_lock; 15401696a8beSPeter Zijlstra 1541e0aad5b4SPeter Zijlstra lockdep_assert_held(&lock->wait_lock); 1542e0aad5b4SPeter Zijlstra 1543b4abf910SThomas Gleixner raw_spin_lock(¤t->pi_lock); 1544fb00aca4SPeter Zijlstra rt_mutex_dequeue(lock, waiter); 15451696a8beSPeter Zijlstra current->pi_blocked_on = NULL; 1546b4abf910SThomas Gleixner raw_spin_unlock(¤t->pi_lock); 15471696a8beSPeter Zijlstra 15481ca7b860SThomas Gleixner /* 15491ca7b860SThomas Gleixner * Only update priority if the waiter was the highest priority 15501ca7b860SThomas Gleixner * waiter of the lock and there is an owner to update. 15511ca7b860SThomas Gleixner */ 15521ca7b860SThomas Gleixner if (!owner || !is_top_waiter) 15531696a8beSPeter Zijlstra return; 15541696a8beSPeter Zijlstra 1555b4abf910SThomas Gleixner raw_spin_lock(&owner->pi_lock); 15561696a8beSPeter Zijlstra 1557fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(owner, waiter); 15581696a8beSPeter Zijlstra 15591ca7b860SThomas Gleixner if (rt_mutex_has_waiters(lock)) 15601ca7b860SThomas Gleixner rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); 15611696a8beSPeter Zijlstra 1562f7853c34SPeter Zijlstra rt_mutex_adjust_prio(lock, owner); 15631696a8beSPeter Zijlstra 156482084984SThomas Gleixner /* Store the lock on which owner is blocked or NULL */ 156582084984SThomas Gleixner next_lock = task_blocked_on_lock(owner); 15661696a8beSPeter Zijlstra 1567b4abf910SThomas Gleixner raw_spin_unlock(&owner->pi_lock); 15681696a8beSPeter Zijlstra 15691ca7b860SThomas Gleixner /* 15701ca7b860SThomas Gleixner * Don't walk the chain, if the owner task is not blocked 15711ca7b860SThomas Gleixner * itself. 15721ca7b860SThomas Gleixner */ 157382084984SThomas Gleixner if (!next_lock) 15741696a8beSPeter Zijlstra return; 15751696a8beSPeter Zijlstra 15761696a8beSPeter Zijlstra /* gets dropped in rt_mutex_adjust_prio_chain()! */ 15771696a8beSPeter Zijlstra get_task_struct(owner); 15781696a8beSPeter Zijlstra 1579b4abf910SThomas Gleixner raw_spin_unlock_irq(&lock->wait_lock); 15801696a8beSPeter Zijlstra 15818930ed80SThomas Gleixner rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, 15828930ed80SThomas Gleixner next_lock, NULL, current); 15831696a8beSPeter Zijlstra 1584b4abf910SThomas Gleixner raw_spin_lock_irq(&lock->wait_lock); 15851696a8beSPeter Zijlstra } 15861696a8beSPeter Zijlstra 15871696a8beSPeter Zijlstra /** 1588ebbdc41eSThomas Gleixner * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop 15891696a8beSPeter Zijlstra * @lock: the rt_mutex to take 1590add46132SPeter Zijlstra * @ww_ctx: WW mutex context pointer 15911696a8beSPeter Zijlstra * @state: the state the task should block in (TASK_INTERRUPTIBLE 15921696a8beSPeter Zijlstra * or TASK_UNINTERRUPTIBLE) 15931696a8beSPeter Zijlstra * @timeout: the pre-initialized and started timer, or NULL for none 15941696a8beSPeter Zijlstra * @waiter: the pre-initialized rt_mutex_waiter 15951696a8beSPeter Zijlstra * 1596b4abf910SThomas Gleixner * Must be called with lock->wait_lock held and interrupts disabled 15971696a8beSPeter Zijlstra */ 1598ebbdc41eSThomas Gleixner static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock, 1599add46132SPeter Zijlstra struct ww_acquire_ctx *ww_ctx, 1600830e6accSPeter Zijlstra unsigned int state, 16011696a8beSPeter Zijlstra struct hrtimer_sleeper *timeout, 16021696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter) 16031696a8beSPeter Zijlstra { 1604add46132SPeter Zijlstra struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); 1605992caf7fSSteven Rostedt struct task_struct *owner; 16061696a8beSPeter Zijlstra int ret = 0; 16071696a8beSPeter Zijlstra 16081696a8beSPeter Zijlstra for (;;) { 16091696a8beSPeter Zijlstra /* Try to acquire the lock: */ 16101696a8beSPeter Zijlstra if (try_to_take_rt_mutex(lock, current, waiter)) 16111696a8beSPeter Zijlstra break; 16121696a8beSPeter Zijlstra 1613a51a327fSThomas Gleixner if (timeout && !timeout->task) { 16141696a8beSPeter Zijlstra ret = -ETIMEDOUT; 1615a51a327fSThomas Gleixner break; 1616a51a327fSThomas Gleixner } 1617a51a327fSThomas Gleixner if (signal_pending_state(state, current)) { 1618a51a327fSThomas Gleixner ret = -EINTR; 16191696a8beSPeter Zijlstra break; 16201696a8beSPeter Zijlstra } 16211696a8beSPeter Zijlstra 1622add46132SPeter Zijlstra if (build_ww_mutex() && ww_ctx) { 1623add46132SPeter Zijlstra ret = __ww_mutex_check_kill(rtm, waiter, ww_ctx); 1624add46132SPeter Zijlstra if (ret) 1625add46132SPeter Zijlstra break; 1626add46132SPeter Zijlstra } 1627add46132SPeter Zijlstra 1628992caf7fSSteven Rostedt if (waiter == rt_mutex_top_waiter(lock)) 1629992caf7fSSteven Rostedt owner = rt_mutex_owner(lock); 1630992caf7fSSteven Rostedt else 1631992caf7fSSteven Rostedt owner = NULL; 1632b4abf910SThomas Gleixner raw_spin_unlock_irq(&lock->wait_lock); 16331696a8beSPeter Zijlstra 1634992caf7fSSteven Rostedt if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) 1635*d14f9e93SSebastian Andrzej Siewior rt_mutex_schedule(); 16361696a8beSPeter Zijlstra 1637b4abf910SThomas Gleixner raw_spin_lock_irq(&lock->wait_lock); 16381696a8beSPeter Zijlstra set_current_state(state); 16391696a8beSPeter Zijlstra } 16401696a8beSPeter Zijlstra 1641afffc6c1SDavidlohr Bueso __set_current_state(TASK_RUNNING); 16421696a8beSPeter Zijlstra return ret; 16431696a8beSPeter Zijlstra } 16441696a8beSPeter Zijlstra 1645d7a2edb8SThomas Gleixner static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, 16463d5c9340SThomas Gleixner struct rt_mutex_waiter *w) 16473d5c9340SThomas Gleixner { 16483d5c9340SThomas Gleixner /* 16493d5c9340SThomas Gleixner * If the result is not -EDEADLOCK or the caller requested 16503d5c9340SThomas Gleixner * deadlock detection, nothing to do here. 16513d5c9340SThomas Gleixner */ 16523d5c9340SThomas Gleixner if (res != -EDEADLOCK || detect_deadlock) 16533d5c9340SThomas Gleixner return; 16543d5c9340SThomas Gleixner 1655add46132SPeter Zijlstra if (build_ww_mutex() && w->ww_ctx) 1656add46132SPeter Zijlstra return; 1657add46132SPeter Zijlstra 16583d5c9340SThomas Gleixner /* 1659e2db7592SIngo Molnar * Yell loudly and stop the task right here. 16603d5c9340SThomas Gleixner */ 16616d41c675SSebastian Andrzej Siewior WARN(1, "rtmutex deadlock detected\n"); 16623d5c9340SThomas Gleixner while (1) { 16633d5c9340SThomas Gleixner set_current_state(TASK_INTERRUPTIBLE); 1664*d14f9e93SSebastian Andrzej Siewior rt_mutex_schedule(); 16653d5c9340SThomas Gleixner } 16663d5c9340SThomas Gleixner } 16673d5c9340SThomas Gleixner 1668ebbdc41eSThomas Gleixner /** 1669ebbdc41eSThomas Gleixner * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held 1670ebbdc41eSThomas Gleixner * @lock: The rtmutex to block lock 1671add46132SPeter Zijlstra * @ww_ctx: WW mutex context pointer 1672ebbdc41eSThomas Gleixner * @state: The task state for sleeping 1673ebbdc41eSThomas Gleixner * @chwalk: Indicator whether full or partial chainwalk is requested 1674ebbdc41eSThomas Gleixner * @waiter: Initializer waiter for blocking 16751696a8beSPeter Zijlstra */ 1676ebbdc41eSThomas Gleixner static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, 1677add46132SPeter Zijlstra struct ww_acquire_ctx *ww_ctx, 1678830e6accSPeter Zijlstra unsigned int state, 1679ebbdc41eSThomas Gleixner enum rtmutex_chainwalk chwalk, 1680ebbdc41eSThomas Gleixner struct rt_mutex_waiter *waiter) 1681ebbdc41eSThomas Gleixner { 1682add46132SPeter Zijlstra struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex); 1683add46132SPeter Zijlstra struct ww_mutex *ww = ww_container_of(rtm); 1684ebbdc41eSThomas Gleixner int ret; 1685ebbdc41eSThomas Gleixner 1686ebbdc41eSThomas Gleixner lockdep_assert_held(&lock->wait_lock); 1687ebbdc41eSThomas Gleixner 1688ebbdc41eSThomas Gleixner /* Try to acquire the lock again: */ 1689add46132SPeter Zijlstra if (try_to_take_rt_mutex(lock, current, NULL)) { 1690add46132SPeter Zijlstra if (build_ww_mutex() && ww_ctx) { 1691add46132SPeter Zijlstra __ww_mutex_check_waiters(rtm, ww_ctx); 1692add46132SPeter Zijlstra ww_mutex_lock_acquired(ww, ww_ctx); 1693add46132SPeter Zijlstra } 1694ebbdc41eSThomas Gleixner return 0; 1695add46132SPeter Zijlstra } 1696ebbdc41eSThomas Gleixner 1697ebbdc41eSThomas Gleixner set_current_state(state); 1698ebbdc41eSThomas Gleixner 1699ee042be1SNamhyung Kim trace_contention_begin(lock, LCB_F_RT); 1700ee042be1SNamhyung Kim 1701add46132SPeter Zijlstra ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk); 1702ebbdc41eSThomas Gleixner if (likely(!ret)) 1703add46132SPeter Zijlstra ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter); 1704ebbdc41eSThomas Gleixner 1705add46132SPeter Zijlstra if (likely(!ret)) { 1706add46132SPeter Zijlstra /* acquired the lock */ 1707add46132SPeter Zijlstra if (build_ww_mutex() && ww_ctx) { 1708add46132SPeter Zijlstra if (!ww_ctx->is_wait_die) 1709add46132SPeter Zijlstra __ww_mutex_check_waiters(rtm, ww_ctx); 1710add46132SPeter Zijlstra ww_mutex_lock_acquired(ww, ww_ctx); 1711add46132SPeter Zijlstra } 1712add46132SPeter Zijlstra } else { 1713ebbdc41eSThomas Gleixner __set_current_state(TASK_RUNNING); 1714ebbdc41eSThomas Gleixner remove_waiter(lock, waiter); 1715ebbdc41eSThomas Gleixner rt_mutex_handle_deadlock(ret, chwalk, waiter); 1716ebbdc41eSThomas Gleixner } 1717ebbdc41eSThomas Gleixner 1718ebbdc41eSThomas Gleixner /* 1719ebbdc41eSThomas Gleixner * try_to_take_rt_mutex() sets the waiter bit 1720ebbdc41eSThomas Gleixner * unconditionally. We might have to fix that up. 1721ebbdc41eSThomas Gleixner */ 17221c0908d8SMel Gorman fixup_rt_mutex_waiters(lock, true); 1723ee042be1SNamhyung Kim 1724ee042be1SNamhyung Kim trace_contention_end(lock, ret); 1725ee042be1SNamhyung Kim 1726ebbdc41eSThomas Gleixner return ret; 1727ebbdc41eSThomas Gleixner } 1728ebbdc41eSThomas Gleixner 1729ebbdc41eSThomas Gleixner static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock, 1730add46132SPeter Zijlstra struct ww_acquire_ctx *ww_ctx, 1731ebbdc41eSThomas Gleixner unsigned int state) 17321696a8beSPeter Zijlstra { 17331696a8beSPeter Zijlstra struct rt_mutex_waiter waiter; 1734ebbdc41eSThomas Gleixner int ret; 17351696a8beSPeter Zijlstra 173650809358SPeter Zijlstra rt_mutex_init_waiter(&waiter); 1737add46132SPeter Zijlstra waiter.ww_ctx = ww_ctx; 17381696a8beSPeter Zijlstra 1739add46132SPeter Zijlstra ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK, 1740add46132SPeter Zijlstra &waiter); 1741ebbdc41eSThomas Gleixner 1742ebbdc41eSThomas Gleixner debug_rt_mutex_free_waiter(&waiter); 1743ebbdc41eSThomas Gleixner return ret; 1744ebbdc41eSThomas Gleixner } 1745ebbdc41eSThomas Gleixner 1746ebbdc41eSThomas Gleixner /* 1747ebbdc41eSThomas Gleixner * rt_mutex_slowlock - Locking slowpath invoked when fast path fails 1748ebbdc41eSThomas Gleixner * @lock: The rtmutex to block lock 1749add46132SPeter Zijlstra * @ww_ctx: WW mutex context pointer 1750ebbdc41eSThomas Gleixner * @state: The task state for sleeping 1751ebbdc41eSThomas Gleixner */ 1752ebbdc41eSThomas Gleixner static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock, 1753add46132SPeter Zijlstra struct ww_acquire_ctx *ww_ctx, 1754ebbdc41eSThomas Gleixner unsigned int state) 1755ebbdc41eSThomas Gleixner { 1756ebbdc41eSThomas Gleixner unsigned long flags; 1757ebbdc41eSThomas Gleixner int ret; 1758ebbdc41eSThomas Gleixner 1759b4abf910SThomas Gleixner /* 1760*d14f9e93SSebastian Andrzej Siewior * Do all pre-schedule work here, before we queue a waiter and invoke 1761*d14f9e93SSebastian Andrzej Siewior * PI -- any such work that trips on rtlock (PREEMPT_RT spinlock) would 1762*d14f9e93SSebastian Andrzej Siewior * otherwise recurse back into task_blocks_on_rt_mutex() through 1763*d14f9e93SSebastian Andrzej Siewior * rtlock_slowlock() and will then enqueue a second waiter for this 1764*d14f9e93SSebastian Andrzej Siewior * same task and things get really confusing real fast. 1765*d14f9e93SSebastian Andrzej Siewior */ 1766*d14f9e93SSebastian Andrzej Siewior rt_mutex_pre_schedule(); 1767*d14f9e93SSebastian Andrzej Siewior 1768*d14f9e93SSebastian Andrzej Siewior /* 1769b4abf910SThomas Gleixner * Technically we could use raw_spin_[un]lock_irq() here, but this can 1770b4abf910SThomas Gleixner * be called in early boot if the cmpxchg() fast path is disabled 1771b4abf910SThomas Gleixner * (debug, no architecture support). In this case we will acquire the 1772b4abf910SThomas Gleixner * rtmutex with lock->wait_lock held. But we cannot unconditionally 1773b4abf910SThomas Gleixner * enable interrupts in that early boot case. So we need to use the 1774b4abf910SThomas Gleixner * irqsave/restore variants. 1775b4abf910SThomas Gleixner */ 1776b4abf910SThomas Gleixner raw_spin_lock_irqsave(&lock->wait_lock, flags); 1777add46132SPeter Zijlstra ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state); 1778b4abf910SThomas Gleixner raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 1779*d14f9e93SSebastian Andrzej Siewior rt_mutex_post_schedule(); 17801696a8beSPeter Zijlstra 17811696a8beSPeter Zijlstra return ret; 17821696a8beSPeter Zijlstra } 17831696a8beSPeter Zijlstra 1784830e6accSPeter Zijlstra static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock, 1785531ae4b0SThomas Gleixner unsigned int state) 1786531ae4b0SThomas Gleixner { 1787af9f0063SSebastian Andrzej Siewior if (likely(rt_mutex_try_acquire(lock))) 1788531ae4b0SThomas Gleixner return 0; 1789531ae4b0SThomas Gleixner 1790add46132SPeter Zijlstra return rt_mutex_slowlock(lock, NULL, state); 1791531ae4b0SThomas Gleixner } 1792e17ba59bSThomas Gleixner #endif /* RT_MUTEX_BUILD_MUTEX */ 17931c143c4bSThomas Gleixner 17941c143c4bSThomas Gleixner #ifdef RT_MUTEX_BUILD_SPINLOCKS 17951c143c4bSThomas Gleixner /* 17961c143c4bSThomas Gleixner * Functions required for spin/rw_lock substitution on RT kernels 17971c143c4bSThomas Gleixner */ 17981c143c4bSThomas Gleixner 17991c143c4bSThomas Gleixner /** 18001c143c4bSThomas Gleixner * rtlock_slowlock_locked - Slow path lock acquisition for RT locks 18011c143c4bSThomas Gleixner * @lock: The underlying RT mutex 18021c143c4bSThomas Gleixner */ 18031c143c4bSThomas Gleixner static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock) 18041c143c4bSThomas Gleixner { 18051c143c4bSThomas Gleixner struct rt_mutex_waiter waiter; 1806992caf7fSSteven Rostedt struct task_struct *owner; 18071c143c4bSThomas Gleixner 18081c143c4bSThomas Gleixner lockdep_assert_held(&lock->wait_lock); 18091c143c4bSThomas Gleixner 18101c143c4bSThomas Gleixner if (try_to_take_rt_mutex(lock, current, NULL)) 18111c143c4bSThomas Gleixner return; 18121c143c4bSThomas Gleixner 18131c143c4bSThomas Gleixner rt_mutex_init_rtlock_waiter(&waiter); 18141c143c4bSThomas Gleixner 18151c143c4bSThomas Gleixner /* Save current state and set state to TASK_RTLOCK_WAIT */ 18161c143c4bSThomas Gleixner current_save_and_set_rtlock_wait_state(); 18171c143c4bSThomas Gleixner 1818ee042be1SNamhyung Kim trace_contention_begin(lock, LCB_F_RT); 1819ee042be1SNamhyung Kim 1820add46132SPeter Zijlstra task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK); 18211c143c4bSThomas Gleixner 18221c143c4bSThomas Gleixner for (;;) { 18231c143c4bSThomas Gleixner /* Try to acquire the lock again */ 18241c143c4bSThomas Gleixner if (try_to_take_rt_mutex(lock, current, &waiter)) 18251c143c4bSThomas Gleixner break; 18261c143c4bSThomas Gleixner 1827992caf7fSSteven Rostedt if (&waiter == rt_mutex_top_waiter(lock)) 1828992caf7fSSteven Rostedt owner = rt_mutex_owner(lock); 1829992caf7fSSteven Rostedt else 1830992caf7fSSteven Rostedt owner = NULL; 18311c143c4bSThomas Gleixner raw_spin_unlock_irq(&lock->wait_lock); 18321c143c4bSThomas Gleixner 1833992caf7fSSteven Rostedt if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) 18341c143c4bSThomas Gleixner schedule_rtlock(); 18351c143c4bSThomas Gleixner 18361c143c4bSThomas Gleixner raw_spin_lock_irq(&lock->wait_lock); 18371c143c4bSThomas Gleixner set_current_state(TASK_RTLOCK_WAIT); 18381c143c4bSThomas Gleixner } 18391c143c4bSThomas Gleixner 18401c143c4bSThomas Gleixner /* Restore the task state */ 18411c143c4bSThomas Gleixner current_restore_rtlock_saved_state(); 18421c143c4bSThomas Gleixner 18431c143c4bSThomas Gleixner /* 18441c143c4bSThomas Gleixner * try_to_take_rt_mutex() sets the waiter bit unconditionally. 18451c143c4bSThomas Gleixner * We might have to fix that up: 18461c143c4bSThomas Gleixner */ 18471c0908d8SMel Gorman fixup_rt_mutex_waiters(lock, true); 18481c143c4bSThomas Gleixner debug_rt_mutex_free_waiter(&waiter); 1849ee042be1SNamhyung Kim 1850ee042be1SNamhyung Kim trace_contention_end(lock, 0); 18511c143c4bSThomas Gleixner } 18521c143c4bSThomas Gleixner 18531c143c4bSThomas Gleixner static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock) 18541c143c4bSThomas Gleixner { 18551c143c4bSThomas Gleixner unsigned long flags; 18561c143c4bSThomas Gleixner 18571c143c4bSThomas Gleixner raw_spin_lock_irqsave(&lock->wait_lock, flags); 18581c143c4bSThomas Gleixner rtlock_slowlock_locked(lock); 18591c143c4bSThomas Gleixner raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 18601c143c4bSThomas Gleixner } 18611c143c4bSThomas Gleixner 18621c143c4bSThomas Gleixner #endif /* RT_MUTEX_BUILD_SPINLOCKS */ 1863