11696a8beSPeter Zijlstra /* 21696a8beSPeter Zijlstra * RT-Mutexes: simple blocking mutual exclusion locks with PI support 31696a8beSPeter Zijlstra * 41696a8beSPeter Zijlstra * started by Ingo Molnar and Thomas Gleixner. 51696a8beSPeter Zijlstra * 61696a8beSPeter Zijlstra * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 71696a8beSPeter Zijlstra * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> 81696a8beSPeter Zijlstra * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt 91696a8beSPeter Zijlstra * Copyright (C) 2006 Esben Nielsen 101696a8beSPeter Zijlstra * 111696a8beSPeter Zijlstra * See Documentation/rt-mutex-design.txt for details. 121696a8beSPeter Zijlstra */ 131696a8beSPeter Zijlstra #include <linux/spinlock.h> 141696a8beSPeter Zijlstra #include <linux/export.h> 151696a8beSPeter Zijlstra #include <linux/sched.h> 161696a8beSPeter Zijlstra #include <linux/sched/rt.h> 17fb00aca4SPeter Zijlstra #include <linux/sched/deadline.h> 181696a8beSPeter Zijlstra #include <linux/timer.h> 191696a8beSPeter Zijlstra 201696a8beSPeter Zijlstra #include "rtmutex_common.h" 211696a8beSPeter Zijlstra 221696a8beSPeter Zijlstra /* 231696a8beSPeter Zijlstra * lock->owner state tracking: 241696a8beSPeter Zijlstra * 251696a8beSPeter Zijlstra * lock->owner holds the task_struct pointer of the owner. Bit 0 261696a8beSPeter Zijlstra * is used to keep track of the "lock has waiters" state. 271696a8beSPeter Zijlstra * 281696a8beSPeter Zijlstra * owner bit0 291696a8beSPeter Zijlstra * NULL 0 lock is free (fast acquire possible) 301696a8beSPeter Zijlstra * NULL 1 lock is free and has waiters and the top waiter 311696a8beSPeter Zijlstra * is going to take the lock* 321696a8beSPeter Zijlstra * taskpointer 0 lock is held (fast release possible) 331696a8beSPeter Zijlstra * taskpointer 1 lock is held and has waiters** 341696a8beSPeter Zijlstra * 351696a8beSPeter Zijlstra * The fast atomic compare exchange based acquire and release is only 361696a8beSPeter Zijlstra * possible when bit 0 of lock->owner is 0. 371696a8beSPeter Zijlstra * 381696a8beSPeter Zijlstra * (*) It also can be a transitional state when grabbing the lock 391696a8beSPeter Zijlstra * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, 401696a8beSPeter Zijlstra * we need to set the bit0 before looking at the lock, and the owner may be 411696a8beSPeter Zijlstra * NULL in this small time, hence this can be a transitional state. 421696a8beSPeter Zijlstra * 431696a8beSPeter Zijlstra * (**) There is a small time when bit 0 is set but there are no 441696a8beSPeter Zijlstra * waiters. This can happen when grabbing the lock in the slow path. 451696a8beSPeter Zijlstra * To prevent a cmpxchg of the owner releasing the lock, we need to 461696a8beSPeter Zijlstra * set this bit before looking at the lock. 471696a8beSPeter Zijlstra */ 481696a8beSPeter Zijlstra 491696a8beSPeter Zijlstra static void 501696a8beSPeter Zijlstra rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) 511696a8beSPeter Zijlstra { 521696a8beSPeter Zijlstra unsigned long val = (unsigned long)owner; 531696a8beSPeter Zijlstra 541696a8beSPeter Zijlstra if (rt_mutex_has_waiters(lock)) 551696a8beSPeter Zijlstra val |= RT_MUTEX_HAS_WAITERS; 561696a8beSPeter Zijlstra 571696a8beSPeter Zijlstra lock->owner = (struct task_struct *)val; 581696a8beSPeter Zijlstra } 591696a8beSPeter Zijlstra 601696a8beSPeter Zijlstra static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) 611696a8beSPeter Zijlstra { 621696a8beSPeter Zijlstra lock->owner = (struct task_struct *) 631696a8beSPeter Zijlstra ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); 641696a8beSPeter Zijlstra } 651696a8beSPeter Zijlstra 661696a8beSPeter Zijlstra static void fixup_rt_mutex_waiters(struct rt_mutex *lock) 671696a8beSPeter Zijlstra { 681696a8beSPeter Zijlstra if (!rt_mutex_has_waiters(lock)) 691696a8beSPeter Zijlstra clear_rt_mutex_waiters(lock); 701696a8beSPeter Zijlstra } 711696a8beSPeter Zijlstra 721696a8beSPeter Zijlstra /* 731696a8beSPeter Zijlstra * We can speed up the acquire/release, if the architecture 741696a8beSPeter Zijlstra * supports cmpxchg and if there's no debugging state to be set up 751696a8beSPeter Zijlstra */ 761696a8beSPeter Zijlstra #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) 771696a8beSPeter Zijlstra # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) 781696a8beSPeter Zijlstra static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) 791696a8beSPeter Zijlstra { 801696a8beSPeter Zijlstra unsigned long owner, *p = (unsigned long *) &lock->owner; 811696a8beSPeter Zijlstra 821696a8beSPeter Zijlstra do { 831696a8beSPeter Zijlstra owner = *p; 841696a8beSPeter Zijlstra } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); 851696a8beSPeter Zijlstra } 8627e35715SThomas Gleixner 8727e35715SThomas Gleixner /* 8827e35715SThomas Gleixner * Safe fastpath aware unlock: 8927e35715SThomas Gleixner * 1) Clear the waiters bit 9027e35715SThomas Gleixner * 2) Drop lock->wait_lock 9127e35715SThomas Gleixner * 3) Try to unlock the lock with cmpxchg 9227e35715SThomas Gleixner */ 9327e35715SThomas Gleixner static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) 9427e35715SThomas Gleixner __releases(lock->wait_lock) 9527e35715SThomas Gleixner { 9627e35715SThomas Gleixner struct task_struct *owner = rt_mutex_owner(lock); 9727e35715SThomas Gleixner 9827e35715SThomas Gleixner clear_rt_mutex_waiters(lock); 9927e35715SThomas Gleixner raw_spin_unlock(&lock->wait_lock); 10027e35715SThomas Gleixner /* 10127e35715SThomas Gleixner * If a new waiter comes in between the unlock and the cmpxchg 10227e35715SThomas Gleixner * we have two situations: 10327e35715SThomas Gleixner * 10427e35715SThomas Gleixner * unlock(wait_lock); 10527e35715SThomas Gleixner * lock(wait_lock); 10627e35715SThomas Gleixner * cmpxchg(p, owner, 0) == owner 10727e35715SThomas Gleixner * mark_rt_mutex_waiters(lock); 10827e35715SThomas Gleixner * acquire(lock); 10927e35715SThomas Gleixner * or: 11027e35715SThomas Gleixner * 11127e35715SThomas Gleixner * unlock(wait_lock); 11227e35715SThomas Gleixner * lock(wait_lock); 11327e35715SThomas Gleixner * mark_rt_mutex_waiters(lock); 11427e35715SThomas Gleixner * 11527e35715SThomas Gleixner * cmpxchg(p, owner, 0) != owner 11627e35715SThomas Gleixner * enqueue_waiter(); 11727e35715SThomas Gleixner * unlock(wait_lock); 11827e35715SThomas Gleixner * lock(wait_lock); 11927e35715SThomas Gleixner * wake waiter(); 12027e35715SThomas Gleixner * unlock(wait_lock); 12127e35715SThomas Gleixner * lock(wait_lock); 12227e35715SThomas Gleixner * acquire(lock); 12327e35715SThomas Gleixner */ 12427e35715SThomas Gleixner return rt_mutex_cmpxchg(lock, owner, NULL); 12527e35715SThomas Gleixner } 12627e35715SThomas Gleixner 1271696a8beSPeter Zijlstra #else 1281696a8beSPeter Zijlstra # define rt_mutex_cmpxchg(l,c,n) (0) 1291696a8beSPeter Zijlstra static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) 1301696a8beSPeter Zijlstra { 1311696a8beSPeter Zijlstra lock->owner = (struct task_struct *) 1321696a8beSPeter Zijlstra ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); 1331696a8beSPeter Zijlstra } 13427e35715SThomas Gleixner 13527e35715SThomas Gleixner /* 13627e35715SThomas Gleixner * Simple slow path only version: lock->owner is protected by lock->wait_lock. 13727e35715SThomas Gleixner */ 13827e35715SThomas Gleixner static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) 13927e35715SThomas Gleixner __releases(lock->wait_lock) 14027e35715SThomas Gleixner { 14127e35715SThomas Gleixner lock->owner = NULL; 14227e35715SThomas Gleixner raw_spin_unlock(&lock->wait_lock); 14327e35715SThomas Gleixner return true; 14427e35715SThomas Gleixner } 1451696a8beSPeter Zijlstra #endif 1461696a8beSPeter Zijlstra 147fb00aca4SPeter Zijlstra static inline int 148fb00aca4SPeter Zijlstra rt_mutex_waiter_less(struct rt_mutex_waiter *left, 149fb00aca4SPeter Zijlstra struct rt_mutex_waiter *right) 150fb00aca4SPeter Zijlstra { 1512d3d891dSDario Faggioli if (left->prio < right->prio) 152fb00aca4SPeter Zijlstra return 1; 153fb00aca4SPeter Zijlstra 1541696a8beSPeter Zijlstra /* 1552d3d891dSDario Faggioli * If both waiters have dl_prio(), we check the deadlines of the 1562d3d891dSDario Faggioli * associated tasks. 1572d3d891dSDario Faggioli * If left waiter has a dl_prio(), and we didn't return 1 above, 1582d3d891dSDario Faggioli * then right waiter has a dl_prio() too. 159fb00aca4SPeter Zijlstra */ 1602d3d891dSDario Faggioli if (dl_prio(left->prio)) 161fb00aca4SPeter Zijlstra return (left->task->dl.deadline < right->task->dl.deadline); 162fb00aca4SPeter Zijlstra 163fb00aca4SPeter Zijlstra return 0; 164fb00aca4SPeter Zijlstra } 165fb00aca4SPeter Zijlstra 166fb00aca4SPeter Zijlstra static void 167fb00aca4SPeter Zijlstra rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) 168fb00aca4SPeter Zijlstra { 169fb00aca4SPeter Zijlstra struct rb_node **link = &lock->waiters.rb_node; 170fb00aca4SPeter Zijlstra struct rb_node *parent = NULL; 171fb00aca4SPeter Zijlstra struct rt_mutex_waiter *entry; 172fb00aca4SPeter Zijlstra int leftmost = 1; 173fb00aca4SPeter Zijlstra 174fb00aca4SPeter Zijlstra while (*link) { 175fb00aca4SPeter Zijlstra parent = *link; 176fb00aca4SPeter Zijlstra entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry); 177fb00aca4SPeter Zijlstra if (rt_mutex_waiter_less(waiter, entry)) { 178fb00aca4SPeter Zijlstra link = &parent->rb_left; 179fb00aca4SPeter Zijlstra } else { 180fb00aca4SPeter Zijlstra link = &parent->rb_right; 181fb00aca4SPeter Zijlstra leftmost = 0; 182fb00aca4SPeter Zijlstra } 183fb00aca4SPeter Zijlstra } 184fb00aca4SPeter Zijlstra 185fb00aca4SPeter Zijlstra if (leftmost) 186fb00aca4SPeter Zijlstra lock->waiters_leftmost = &waiter->tree_entry; 187fb00aca4SPeter Zijlstra 188fb00aca4SPeter Zijlstra rb_link_node(&waiter->tree_entry, parent, link); 189fb00aca4SPeter Zijlstra rb_insert_color(&waiter->tree_entry, &lock->waiters); 190fb00aca4SPeter Zijlstra } 191fb00aca4SPeter Zijlstra 192fb00aca4SPeter Zijlstra static void 193fb00aca4SPeter Zijlstra rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) 194fb00aca4SPeter Zijlstra { 195fb00aca4SPeter Zijlstra if (RB_EMPTY_NODE(&waiter->tree_entry)) 196fb00aca4SPeter Zijlstra return; 197fb00aca4SPeter Zijlstra 198fb00aca4SPeter Zijlstra if (lock->waiters_leftmost == &waiter->tree_entry) 199fb00aca4SPeter Zijlstra lock->waiters_leftmost = rb_next(&waiter->tree_entry); 200fb00aca4SPeter Zijlstra 201fb00aca4SPeter Zijlstra rb_erase(&waiter->tree_entry, &lock->waiters); 202fb00aca4SPeter Zijlstra RB_CLEAR_NODE(&waiter->tree_entry); 203fb00aca4SPeter Zijlstra } 204fb00aca4SPeter Zijlstra 205fb00aca4SPeter Zijlstra static void 206fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) 207fb00aca4SPeter Zijlstra { 208fb00aca4SPeter Zijlstra struct rb_node **link = &task->pi_waiters.rb_node; 209fb00aca4SPeter Zijlstra struct rb_node *parent = NULL; 210fb00aca4SPeter Zijlstra struct rt_mutex_waiter *entry; 211fb00aca4SPeter Zijlstra int leftmost = 1; 212fb00aca4SPeter Zijlstra 213fb00aca4SPeter Zijlstra while (*link) { 214fb00aca4SPeter Zijlstra parent = *link; 215fb00aca4SPeter Zijlstra entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry); 216fb00aca4SPeter Zijlstra if (rt_mutex_waiter_less(waiter, entry)) { 217fb00aca4SPeter Zijlstra link = &parent->rb_left; 218fb00aca4SPeter Zijlstra } else { 219fb00aca4SPeter Zijlstra link = &parent->rb_right; 220fb00aca4SPeter Zijlstra leftmost = 0; 221fb00aca4SPeter Zijlstra } 222fb00aca4SPeter Zijlstra } 223fb00aca4SPeter Zijlstra 224fb00aca4SPeter Zijlstra if (leftmost) 225fb00aca4SPeter Zijlstra task->pi_waiters_leftmost = &waiter->pi_tree_entry; 226fb00aca4SPeter Zijlstra 227fb00aca4SPeter Zijlstra rb_link_node(&waiter->pi_tree_entry, parent, link); 228fb00aca4SPeter Zijlstra rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters); 229fb00aca4SPeter Zijlstra } 230fb00aca4SPeter Zijlstra 231fb00aca4SPeter Zijlstra static void 232fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) 233fb00aca4SPeter Zijlstra { 234fb00aca4SPeter Zijlstra if (RB_EMPTY_NODE(&waiter->pi_tree_entry)) 235fb00aca4SPeter Zijlstra return; 236fb00aca4SPeter Zijlstra 237fb00aca4SPeter Zijlstra if (task->pi_waiters_leftmost == &waiter->pi_tree_entry) 238fb00aca4SPeter Zijlstra task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry); 239fb00aca4SPeter Zijlstra 240fb00aca4SPeter Zijlstra rb_erase(&waiter->pi_tree_entry, &task->pi_waiters); 241fb00aca4SPeter Zijlstra RB_CLEAR_NODE(&waiter->pi_tree_entry); 242fb00aca4SPeter Zijlstra } 243fb00aca4SPeter Zijlstra 244fb00aca4SPeter Zijlstra /* 245fb00aca4SPeter Zijlstra * Calculate task priority from the waiter tree priority 2461696a8beSPeter Zijlstra * 247fb00aca4SPeter Zijlstra * Return task->normal_prio when the waiter tree is empty or when 2481696a8beSPeter Zijlstra * the waiter is not allowed to do priority boosting 2491696a8beSPeter Zijlstra */ 2501696a8beSPeter Zijlstra int rt_mutex_getprio(struct task_struct *task) 2511696a8beSPeter Zijlstra { 2521696a8beSPeter Zijlstra if (likely(!task_has_pi_waiters(task))) 2531696a8beSPeter Zijlstra return task->normal_prio; 2541696a8beSPeter Zijlstra 2552d3d891dSDario Faggioli return min(task_top_pi_waiter(task)->prio, 2561696a8beSPeter Zijlstra task->normal_prio); 2571696a8beSPeter Zijlstra } 2581696a8beSPeter Zijlstra 2592d3d891dSDario Faggioli struct task_struct *rt_mutex_get_top_task(struct task_struct *task) 2602d3d891dSDario Faggioli { 2612d3d891dSDario Faggioli if (likely(!task_has_pi_waiters(task))) 2622d3d891dSDario Faggioli return NULL; 2632d3d891dSDario Faggioli 2642d3d891dSDario Faggioli return task_top_pi_waiter(task)->task; 2652d3d891dSDario Faggioli } 2662d3d891dSDario Faggioli 2671696a8beSPeter Zijlstra /* 268c365c292SThomas Gleixner * Called by sched_setscheduler() to check whether the priority change 269c365c292SThomas Gleixner * is overruled by a possible priority boosting. 270c365c292SThomas Gleixner */ 271c365c292SThomas Gleixner int rt_mutex_check_prio(struct task_struct *task, int newprio) 272c365c292SThomas Gleixner { 273c365c292SThomas Gleixner if (!task_has_pi_waiters(task)) 274c365c292SThomas Gleixner return 0; 275c365c292SThomas Gleixner 276c365c292SThomas Gleixner return task_top_pi_waiter(task)->task->prio <= newprio; 277c365c292SThomas Gleixner } 278c365c292SThomas Gleixner 279c365c292SThomas Gleixner /* 2801696a8beSPeter Zijlstra * Adjust the priority of a task, after its pi_waiters got modified. 2811696a8beSPeter Zijlstra * 2821696a8beSPeter Zijlstra * This can be both boosting and unboosting. task->pi_lock must be held. 2831696a8beSPeter Zijlstra */ 2841696a8beSPeter Zijlstra static void __rt_mutex_adjust_prio(struct task_struct *task) 2851696a8beSPeter Zijlstra { 2861696a8beSPeter Zijlstra int prio = rt_mutex_getprio(task); 2871696a8beSPeter Zijlstra 2882d3d891dSDario Faggioli if (task->prio != prio || dl_prio(prio)) 2891696a8beSPeter Zijlstra rt_mutex_setprio(task, prio); 2901696a8beSPeter Zijlstra } 2911696a8beSPeter Zijlstra 2921696a8beSPeter Zijlstra /* 2931696a8beSPeter Zijlstra * Adjust task priority (undo boosting). Called from the exit path of 2941696a8beSPeter Zijlstra * rt_mutex_slowunlock() and rt_mutex_slowlock(). 2951696a8beSPeter Zijlstra * 2961696a8beSPeter Zijlstra * (Note: We do this outside of the protection of lock->wait_lock to 2971696a8beSPeter Zijlstra * allow the lock to be taken while or before we readjust the priority 2981696a8beSPeter Zijlstra * of task. We do not use the spin_xx_mutex() variants here as we are 2991696a8beSPeter Zijlstra * outside of the debug path.) 3001696a8beSPeter Zijlstra */ 3011696a8beSPeter Zijlstra static void rt_mutex_adjust_prio(struct task_struct *task) 3021696a8beSPeter Zijlstra { 3031696a8beSPeter Zijlstra unsigned long flags; 3041696a8beSPeter Zijlstra 3051696a8beSPeter Zijlstra raw_spin_lock_irqsave(&task->pi_lock, flags); 3061696a8beSPeter Zijlstra __rt_mutex_adjust_prio(task); 3071696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&task->pi_lock, flags); 3081696a8beSPeter Zijlstra } 3091696a8beSPeter Zijlstra 3101696a8beSPeter Zijlstra /* 3118930ed80SThomas Gleixner * Deadlock detection is conditional: 3128930ed80SThomas Gleixner * 3138930ed80SThomas Gleixner * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted 3148930ed80SThomas Gleixner * if the detect argument is == RT_MUTEX_FULL_CHAINWALK. 3158930ed80SThomas Gleixner * 3168930ed80SThomas Gleixner * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always 3178930ed80SThomas Gleixner * conducted independent of the detect argument. 3188930ed80SThomas Gleixner * 3198930ed80SThomas Gleixner * If the waiter argument is NULL this indicates the deboost path and 3208930ed80SThomas Gleixner * deadlock detection is disabled independent of the detect argument 3218930ed80SThomas Gleixner * and the config settings. 3228930ed80SThomas Gleixner */ 3238930ed80SThomas Gleixner static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, 3248930ed80SThomas Gleixner enum rtmutex_chainwalk chwalk) 3258930ed80SThomas Gleixner { 3268930ed80SThomas Gleixner /* 3278930ed80SThomas Gleixner * This is just a wrapper function for the following call, 3288930ed80SThomas Gleixner * because debug_rt_mutex_detect_deadlock() smells like a magic 3298930ed80SThomas Gleixner * debug feature and I wanted to keep the cond function in the 3308930ed80SThomas Gleixner * main source file along with the comments instead of having 3318930ed80SThomas Gleixner * two of the same in the headers. 3328930ed80SThomas Gleixner */ 3338930ed80SThomas Gleixner return debug_rt_mutex_detect_deadlock(waiter, chwalk); 3348930ed80SThomas Gleixner } 3358930ed80SThomas Gleixner 3368930ed80SThomas Gleixner /* 3371696a8beSPeter Zijlstra * Max number of times we'll walk the boosting chain: 3381696a8beSPeter Zijlstra */ 3391696a8beSPeter Zijlstra int max_lock_depth = 1024; 3401696a8beSPeter Zijlstra 34182084984SThomas Gleixner static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) 34282084984SThomas Gleixner { 34382084984SThomas Gleixner return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; 34482084984SThomas Gleixner } 34582084984SThomas Gleixner 3461696a8beSPeter Zijlstra /* 3471696a8beSPeter Zijlstra * Adjust the priority chain. Also used for deadlock detection. 3481696a8beSPeter Zijlstra * Decreases task's usage by one - may thus free the task. 3491696a8beSPeter Zijlstra * 35082084984SThomas Gleixner * @task: the task owning the mutex (owner) for which a chain walk is 35182084984SThomas Gleixner * probably needed 3521696a8beSPeter Zijlstra * @deadlock_detect: do we have to carry out deadlock detection? 3531696a8beSPeter Zijlstra * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck 3541696a8beSPeter Zijlstra * things for a task that has just got its priority adjusted, and 3551696a8beSPeter Zijlstra * is waiting on a mutex) 35682084984SThomas Gleixner * @next_lock: the mutex on which the owner of @orig_lock was blocked before 35782084984SThomas Gleixner * we dropped its pi_lock. Is never dereferenced, only used for 35882084984SThomas Gleixner * comparison to detect lock chain changes. 3591696a8beSPeter Zijlstra * @orig_waiter: rt_mutex_waiter struct for the task that has just donated 3601696a8beSPeter Zijlstra * its priority to the mutex owner (can be NULL in the case 3611696a8beSPeter Zijlstra * depicted above or if the top waiter is gone away and we are 3621696a8beSPeter Zijlstra * actually deboosting the owner) 3631696a8beSPeter Zijlstra * @top_task: the current top waiter 3641696a8beSPeter Zijlstra * 3651696a8beSPeter Zijlstra * Returns 0 or -EDEADLK. 3663eb65aeaSThomas Gleixner * 3673eb65aeaSThomas Gleixner * Chain walk basics and protection scope 3683eb65aeaSThomas Gleixner * 3693eb65aeaSThomas Gleixner * [R] refcount on task 3703eb65aeaSThomas Gleixner * [P] task->pi_lock held 3713eb65aeaSThomas Gleixner * [L] rtmutex->wait_lock held 3723eb65aeaSThomas Gleixner * 3733eb65aeaSThomas Gleixner * Step Description Protected by 3743eb65aeaSThomas Gleixner * function arguments: 3753eb65aeaSThomas Gleixner * @task [R] 3763eb65aeaSThomas Gleixner * @orig_lock if != NULL @top_task is blocked on it 3773eb65aeaSThomas Gleixner * @next_lock Unprotected. Cannot be 3783eb65aeaSThomas Gleixner * dereferenced. Only used for 3793eb65aeaSThomas Gleixner * comparison. 3803eb65aeaSThomas Gleixner * @orig_waiter if != NULL @top_task is blocked on it 3813eb65aeaSThomas Gleixner * @top_task current, or in case of proxy 3823eb65aeaSThomas Gleixner * locking protected by calling 3833eb65aeaSThomas Gleixner * code 3843eb65aeaSThomas Gleixner * again: 3853eb65aeaSThomas Gleixner * loop_sanity_check(); 3863eb65aeaSThomas Gleixner * retry: 3873eb65aeaSThomas Gleixner * [1] lock(task->pi_lock); [R] acquire [P] 3883eb65aeaSThomas Gleixner * [2] waiter = task->pi_blocked_on; [P] 3893eb65aeaSThomas Gleixner * [3] check_exit_conditions_1(); [P] 3903eb65aeaSThomas Gleixner * [4] lock = waiter->lock; [P] 3913eb65aeaSThomas Gleixner * [5] if (!try_lock(lock->wait_lock)) { [P] try to acquire [L] 3923eb65aeaSThomas Gleixner * unlock(task->pi_lock); release [P] 3933eb65aeaSThomas Gleixner * goto retry; 3943eb65aeaSThomas Gleixner * } 3953eb65aeaSThomas Gleixner * [6] check_exit_conditions_2(); [P] + [L] 3963eb65aeaSThomas Gleixner * [7] requeue_lock_waiter(lock, waiter); [P] + [L] 3973eb65aeaSThomas Gleixner * [8] unlock(task->pi_lock); release [P] 3983eb65aeaSThomas Gleixner * put_task_struct(task); release [R] 3993eb65aeaSThomas Gleixner * [9] check_exit_conditions_3(); [L] 4003eb65aeaSThomas Gleixner * [10] task = owner(lock); [L] 4013eb65aeaSThomas Gleixner * get_task_struct(task); [L] acquire [R] 4023eb65aeaSThomas Gleixner * lock(task->pi_lock); [L] acquire [P] 4033eb65aeaSThomas Gleixner * [11] requeue_pi_waiter(tsk, waiters(lock));[P] + [L] 4043eb65aeaSThomas Gleixner * [12] check_exit_conditions_4(); [P] + [L] 4053eb65aeaSThomas Gleixner * [13] unlock(task->pi_lock); release [P] 4063eb65aeaSThomas Gleixner * unlock(lock->wait_lock); release [L] 4073eb65aeaSThomas Gleixner * goto again; 4081696a8beSPeter Zijlstra */ 4091696a8beSPeter Zijlstra static int rt_mutex_adjust_prio_chain(struct task_struct *task, 4108930ed80SThomas Gleixner enum rtmutex_chainwalk chwalk, 4111696a8beSPeter Zijlstra struct rt_mutex *orig_lock, 41282084984SThomas Gleixner struct rt_mutex *next_lock, 4131696a8beSPeter Zijlstra struct rt_mutex_waiter *orig_waiter, 4141696a8beSPeter Zijlstra struct task_struct *top_task) 4151696a8beSPeter Zijlstra { 4161696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; 417a57594a1SThomas Gleixner struct rt_mutex_waiter *prerequeue_top_waiter; 4188930ed80SThomas Gleixner int ret = 0, depth = 0; 419a57594a1SThomas Gleixner struct rt_mutex *lock; 4208930ed80SThomas Gleixner bool detect_deadlock; 4211696a8beSPeter Zijlstra unsigned long flags; 422*67792e2cSThomas Gleixner bool requeue = true; 4231696a8beSPeter Zijlstra 4248930ed80SThomas Gleixner detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk); 4251696a8beSPeter Zijlstra 4261696a8beSPeter Zijlstra /* 4271696a8beSPeter Zijlstra * The (de)boosting is a step by step approach with a lot of 4281696a8beSPeter Zijlstra * pitfalls. We want this to be preemptible and we want hold a 4291696a8beSPeter Zijlstra * maximum of two locks per step. So we have to check 4301696a8beSPeter Zijlstra * carefully whether things change under us. 4311696a8beSPeter Zijlstra */ 4321696a8beSPeter Zijlstra again: 4333eb65aeaSThomas Gleixner /* 4343eb65aeaSThomas Gleixner * We limit the lock chain length for each invocation. 4353eb65aeaSThomas Gleixner */ 4361696a8beSPeter Zijlstra if (++depth > max_lock_depth) { 4371696a8beSPeter Zijlstra static int prev_max; 4381696a8beSPeter Zijlstra 4391696a8beSPeter Zijlstra /* 4401696a8beSPeter Zijlstra * Print this only once. If the admin changes the limit, 4411696a8beSPeter Zijlstra * print a new message when reaching the limit again. 4421696a8beSPeter Zijlstra */ 4431696a8beSPeter Zijlstra if (prev_max != max_lock_depth) { 4441696a8beSPeter Zijlstra prev_max = max_lock_depth; 4451696a8beSPeter Zijlstra printk(KERN_WARNING "Maximum lock depth %d reached " 4461696a8beSPeter Zijlstra "task: %s (%d)\n", max_lock_depth, 4471696a8beSPeter Zijlstra top_task->comm, task_pid_nr(top_task)); 4481696a8beSPeter Zijlstra } 4491696a8beSPeter Zijlstra put_task_struct(task); 4501696a8beSPeter Zijlstra 4513d5c9340SThomas Gleixner return -EDEADLK; 4521696a8beSPeter Zijlstra } 4533eb65aeaSThomas Gleixner 4543eb65aeaSThomas Gleixner /* 4553eb65aeaSThomas Gleixner * We are fully preemptible here and only hold the refcount on 4563eb65aeaSThomas Gleixner * @task. So everything can have changed under us since the 4573eb65aeaSThomas Gleixner * caller or our own code below (goto retry/again) dropped all 4583eb65aeaSThomas Gleixner * locks. 4593eb65aeaSThomas Gleixner */ 4601696a8beSPeter Zijlstra retry: 4611696a8beSPeter Zijlstra /* 4623eb65aeaSThomas Gleixner * [1] Task cannot go away as we did a get_task() before ! 4631696a8beSPeter Zijlstra */ 4641696a8beSPeter Zijlstra raw_spin_lock_irqsave(&task->pi_lock, flags); 4651696a8beSPeter Zijlstra 4663eb65aeaSThomas Gleixner /* 4673eb65aeaSThomas Gleixner * [2] Get the waiter on which @task is blocked on. 4683eb65aeaSThomas Gleixner */ 4691696a8beSPeter Zijlstra waiter = task->pi_blocked_on; 4703eb65aeaSThomas Gleixner 4713eb65aeaSThomas Gleixner /* 4723eb65aeaSThomas Gleixner * [3] check_exit_conditions_1() protected by task->pi_lock. 4733eb65aeaSThomas Gleixner */ 4743eb65aeaSThomas Gleixner 4751696a8beSPeter Zijlstra /* 4761696a8beSPeter Zijlstra * Check whether the end of the boosting chain has been 4771696a8beSPeter Zijlstra * reached or the state of the chain has changed while we 4781696a8beSPeter Zijlstra * dropped the locks. 4791696a8beSPeter Zijlstra */ 4801696a8beSPeter Zijlstra if (!waiter) 4811696a8beSPeter Zijlstra goto out_unlock_pi; 4821696a8beSPeter Zijlstra 4831696a8beSPeter Zijlstra /* 4841696a8beSPeter Zijlstra * Check the orig_waiter state. After we dropped the locks, 4851696a8beSPeter Zijlstra * the previous owner of the lock might have released the lock. 4861696a8beSPeter Zijlstra */ 4871696a8beSPeter Zijlstra if (orig_waiter && !rt_mutex_owner(orig_lock)) 4881696a8beSPeter Zijlstra goto out_unlock_pi; 4891696a8beSPeter Zijlstra 4901696a8beSPeter Zijlstra /* 49182084984SThomas Gleixner * We dropped all locks after taking a refcount on @task, so 49282084984SThomas Gleixner * the task might have moved on in the lock chain or even left 49382084984SThomas Gleixner * the chain completely and blocks now on an unrelated lock or 49482084984SThomas Gleixner * on @orig_lock. 49582084984SThomas Gleixner * 49682084984SThomas Gleixner * We stored the lock on which @task was blocked in @next_lock, 49782084984SThomas Gleixner * so we can detect the chain change. 49882084984SThomas Gleixner */ 49982084984SThomas Gleixner if (next_lock != waiter->lock) 50082084984SThomas Gleixner goto out_unlock_pi; 50182084984SThomas Gleixner 50282084984SThomas Gleixner /* 5031696a8beSPeter Zijlstra * Drop out, when the task has no waiters. Note, 5041696a8beSPeter Zijlstra * top_waiter can be NULL, when we are in the deboosting 5051696a8beSPeter Zijlstra * mode! 5061696a8beSPeter Zijlstra */ 507397335f0SThomas Gleixner if (top_waiter) { 508397335f0SThomas Gleixner if (!task_has_pi_waiters(task)) 5091696a8beSPeter Zijlstra goto out_unlock_pi; 510397335f0SThomas Gleixner /* 511397335f0SThomas Gleixner * If deadlock detection is off, we stop here if we 512*67792e2cSThomas Gleixner * are not the top pi waiter of the task. If deadlock 513*67792e2cSThomas Gleixner * detection is enabled we continue, but stop the 514*67792e2cSThomas Gleixner * requeueing in the chain walk. 515397335f0SThomas Gleixner */ 516*67792e2cSThomas Gleixner if (top_waiter != task_top_pi_waiter(task)) { 517*67792e2cSThomas Gleixner if (!detect_deadlock) 518397335f0SThomas Gleixner goto out_unlock_pi; 519*67792e2cSThomas Gleixner else 520*67792e2cSThomas Gleixner requeue = false; 521*67792e2cSThomas Gleixner } 522397335f0SThomas Gleixner } 5231696a8beSPeter Zijlstra 5241696a8beSPeter Zijlstra /* 525*67792e2cSThomas Gleixner * If the waiter priority is the same as the task priority 526*67792e2cSThomas Gleixner * then there is no further priority adjustment necessary. If 527*67792e2cSThomas Gleixner * deadlock detection is off, we stop the chain walk. If its 528*67792e2cSThomas Gleixner * enabled we continue, but stop the requeueing in the chain 529*67792e2cSThomas Gleixner * walk. 5301696a8beSPeter Zijlstra */ 531*67792e2cSThomas Gleixner if (waiter->prio == task->prio) { 532*67792e2cSThomas Gleixner if (!detect_deadlock) 5331696a8beSPeter Zijlstra goto out_unlock_pi; 534*67792e2cSThomas Gleixner else 535*67792e2cSThomas Gleixner requeue = false; 536*67792e2cSThomas Gleixner } 5371696a8beSPeter Zijlstra 5383eb65aeaSThomas Gleixner /* 5393eb65aeaSThomas Gleixner * [4] Get the next lock 5403eb65aeaSThomas Gleixner */ 5411696a8beSPeter Zijlstra lock = waiter->lock; 5423eb65aeaSThomas Gleixner /* 5433eb65aeaSThomas Gleixner * [5] We need to trylock here as we are holding task->pi_lock, 5443eb65aeaSThomas Gleixner * which is the reverse lock order versus the other rtmutex 5453eb65aeaSThomas Gleixner * operations. 5463eb65aeaSThomas Gleixner */ 5471696a8beSPeter Zijlstra if (!raw_spin_trylock(&lock->wait_lock)) { 5481696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&task->pi_lock, flags); 5491696a8beSPeter Zijlstra cpu_relax(); 5501696a8beSPeter Zijlstra goto retry; 5511696a8beSPeter Zijlstra } 5521696a8beSPeter Zijlstra 553397335f0SThomas Gleixner /* 5543eb65aeaSThomas Gleixner * [6] check_exit_conditions_2() protected by task->pi_lock and 5553eb65aeaSThomas Gleixner * lock->wait_lock. 5563eb65aeaSThomas Gleixner * 557397335f0SThomas Gleixner * Deadlock detection. If the lock is the same as the original 558397335f0SThomas Gleixner * lock which caused us to walk the lock chain or if the 559397335f0SThomas Gleixner * current lock is owned by the task which initiated the chain 560397335f0SThomas Gleixner * walk, we detected a deadlock. 561397335f0SThomas Gleixner */ 5621696a8beSPeter Zijlstra if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { 5638930ed80SThomas Gleixner debug_rt_mutex_deadlock(chwalk, orig_waiter, lock); 5641696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 5653d5c9340SThomas Gleixner ret = -EDEADLK; 5661696a8beSPeter Zijlstra goto out_unlock_pi; 5671696a8beSPeter Zijlstra } 5681696a8beSPeter Zijlstra 569a57594a1SThomas Gleixner /* 570*67792e2cSThomas Gleixner * If we just follow the lock chain for deadlock detection, no 571*67792e2cSThomas Gleixner * need to do all the requeue operations. To avoid a truckload 572*67792e2cSThomas Gleixner * of conditionals around the various places below, just do the 573*67792e2cSThomas Gleixner * minimum chain walk checks. 574*67792e2cSThomas Gleixner */ 575*67792e2cSThomas Gleixner if (!requeue) { 576*67792e2cSThomas Gleixner /* 577*67792e2cSThomas Gleixner * No requeue[7] here. Just release @task [8] 578*67792e2cSThomas Gleixner */ 579*67792e2cSThomas Gleixner raw_spin_unlock_irqrestore(&task->pi_lock, flags); 580*67792e2cSThomas Gleixner put_task_struct(task); 581*67792e2cSThomas Gleixner 582*67792e2cSThomas Gleixner /* 583*67792e2cSThomas Gleixner * [9] check_exit_conditions_3 protected by lock->wait_lock. 584*67792e2cSThomas Gleixner * If there is no owner of the lock, end of chain. 585*67792e2cSThomas Gleixner */ 586*67792e2cSThomas Gleixner if (!rt_mutex_owner(lock)) { 587*67792e2cSThomas Gleixner raw_spin_unlock(&lock->wait_lock); 588*67792e2cSThomas Gleixner return 0; 589*67792e2cSThomas Gleixner } 590*67792e2cSThomas Gleixner 591*67792e2cSThomas Gleixner /* [10] Grab the next task, i.e. owner of @lock */ 592*67792e2cSThomas Gleixner task = rt_mutex_owner(lock); 593*67792e2cSThomas Gleixner get_task_struct(task); 594*67792e2cSThomas Gleixner raw_spin_lock_irqsave(&task->pi_lock, flags); 595*67792e2cSThomas Gleixner 596*67792e2cSThomas Gleixner /* 597*67792e2cSThomas Gleixner * No requeue [11] here. We just do deadlock detection. 598*67792e2cSThomas Gleixner * 599*67792e2cSThomas Gleixner * [12] Store whether owner is blocked 600*67792e2cSThomas Gleixner * itself. Decision is made after dropping the locks 601*67792e2cSThomas Gleixner */ 602*67792e2cSThomas Gleixner next_lock = task_blocked_on_lock(task); 603*67792e2cSThomas Gleixner /* 604*67792e2cSThomas Gleixner * Get the top waiter for the next iteration 605*67792e2cSThomas Gleixner */ 606*67792e2cSThomas Gleixner top_waiter = rt_mutex_top_waiter(lock); 607*67792e2cSThomas Gleixner 608*67792e2cSThomas Gleixner /* [13] Drop locks */ 609*67792e2cSThomas Gleixner raw_spin_unlock_irqrestore(&task->pi_lock, flags); 610*67792e2cSThomas Gleixner raw_spin_unlock(&lock->wait_lock); 611*67792e2cSThomas Gleixner 612*67792e2cSThomas Gleixner /* If owner is not blocked, end of chain. */ 613*67792e2cSThomas Gleixner if (!next_lock) 614*67792e2cSThomas Gleixner goto out_put_task; 615*67792e2cSThomas Gleixner goto again; 616*67792e2cSThomas Gleixner } 617*67792e2cSThomas Gleixner 618*67792e2cSThomas Gleixner /* 619a57594a1SThomas Gleixner * Store the current top waiter before doing the requeue 620a57594a1SThomas Gleixner * operation on @lock. We need it for the boost/deboost 621a57594a1SThomas Gleixner * decision below. 622a57594a1SThomas Gleixner */ 623a57594a1SThomas Gleixner prerequeue_top_waiter = rt_mutex_top_waiter(lock); 6241696a8beSPeter Zijlstra 6253eb65aeaSThomas Gleixner /* [7] Requeue the waiter in the lock waiter list. */ 626fb00aca4SPeter Zijlstra rt_mutex_dequeue(lock, waiter); 6272d3d891dSDario Faggioli waiter->prio = task->prio; 628fb00aca4SPeter Zijlstra rt_mutex_enqueue(lock, waiter); 6291696a8beSPeter Zijlstra 6303eb65aeaSThomas Gleixner /* [8] Release the task */ 6311696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&task->pi_lock, flags); 6322ffa5a5cSThomas Gleixner put_task_struct(task); 6332ffa5a5cSThomas Gleixner 634a57594a1SThomas Gleixner /* 6353eb65aeaSThomas Gleixner * [9] check_exit_conditions_3 protected by lock->wait_lock. 6363eb65aeaSThomas Gleixner * 637a57594a1SThomas Gleixner * We must abort the chain walk if there is no lock owner even 638a57594a1SThomas Gleixner * in the dead lock detection case, as we have nothing to 639a57594a1SThomas Gleixner * follow here. This is the end of the chain we are walking. 640a57594a1SThomas Gleixner */ 6411696a8beSPeter Zijlstra if (!rt_mutex_owner(lock)) { 6421696a8beSPeter Zijlstra /* 6433eb65aeaSThomas Gleixner * If the requeue [7] above changed the top waiter, 6443eb65aeaSThomas Gleixner * then we need to wake the new top waiter up to try 6453eb65aeaSThomas Gleixner * to get the lock. 6461696a8beSPeter Zijlstra */ 647a57594a1SThomas Gleixner if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) 6481696a8beSPeter Zijlstra wake_up_process(rt_mutex_top_waiter(lock)->task); 6491696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 6502ffa5a5cSThomas Gleixner return 0; 6511696a8beSPeter Zijlstra } 6521696a8beSPeter Zijlstra 6533eb65aeaSThomas Gleixner /* [10] Grab the next task, i.e. the owner of @lock */ 6541696a8beSPeter Zijlstra task = rt_mutex_owner(lock); 6551696a8beSPeter Zijlstra get_task_struct(task); 6561696a8beSPeter Zijlstra raw_spin_lock_irqsave(&task->pi_lock, flags); 6571696a8beSPeter Zijlstra 6583eb65aeaSThomas Gleixner /* [11] requeue the pi waiters if necessary */ 6591696a8beSPeter Zijlstra if (waiter == rt_mutex_top_waiter(lock)) { 660a57594a1SThomas Gleixner /* 661a57594a1SThomas Gleixner * The waiter became the new top (highest priority) 662a57594a1SThomas Gleixner * waiter on the lock. Replace the previous top waiter 663a57594a1SThomas Gleixner * in the owner tasks pi waiters list with this waiter 664a57594a1SThomas Gleixner * and adjust the priority of the owner. 665a57594a1SThomas Gleixner */ 666a57594a1SThomas Gleixner rt_mutex_dequeue_pi(task, prerequeue_top_waiter); 667fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(task, waiter); 6681696a8beSPeter Zijlstra __rt_mutex_adjust_prio(task); 6691696a8beSPeter Zijlstra 670a57594a1SThomas Gleixner } else if (prerequeue_top_waiter == waiter) { 671a57594a1SThomas Gleixner /* 672a57594a1SThomas Gleixner * The waiter was the top waiter on the lock, but is 673a57594a1SThomas Gleixner * no longer the top prority waiter. Replace waiter in 674a57594a1SThomas Gleixner * the owner tasks pi waiters list with the new top 675a57594a1SThomas Gleixner * (highest priority) waiter and adjust the priority 676a57594a1SThomas Gleixner * of the owner. 677a57594a1SThomas Gleixner * The new top waiter is stored in @waiter so that 678a57594a1SThomas Gleixner * @waiter == @top_waiter evaluates to true below and 679a57594a1SThomas Gleixner * we continue to deboost the rest of the chain. 680a57594a1SThomas Gleixner */ 681fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(task, waiter); 6821696a8beSPeter Zijlstra waiter = rt_mutex_top_waiter(lock); 683fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(task, waiter); 6841696a8beSPeter Zijlstra __rt_mutex_adjust_prio(task); 685a57594a1SThomas Gleixner } else { 686a57594a1SThomas Gleixner /* 687a57594a1SThomas Gleixner * Nothing changed. No need to do any priority 688a57594a1SThomas Gleixner * adjustment. 689a57594a1SThomas Gleixner */ 6901696a8beSPeter Zijlstra } 6911696a8beSPeter Zijlstra 69282084984SThomas Gleixner /* 6933eb65aeaSThomas Gleixner * [12] check_exit_conditions_4() protected by task->pi_lock 6943eb65aeaSThomas Gleixner * and lock->wait_lock. The actual decisions are made after we 6953eb65aeaSThomas Gleixner * dropped the locks. 6963eb65aeaSThomas Gleixner * 69782084984SThomas Gleixner * Check whether the task which owns the current lock is pi 69882084984SThomas Gleixner * blocked itself. If yes we store a pointer to the lock for 69982084984SThomas Gleixner * the lock chain change detection above. After we dropped 70082084984SThomas Gleixner * task->pi_lock next_lock cannot be dereferenced anymore. 70182084984SThomas Gleixner */ 70282084984SThomas Gleixner next_lock = task_blocked_on_lock(task); 703a57594a1SThomas Gleixner /* 704a57594a1SThomas Gleixner * Store the top waiter of @lock for the end of chain walk 705a57594a1SThomas Gleixner * decision below. 706a57594a1SThomas Gleixner */ 7071696a8beSPeter Zijlstra top_waiter = rt_mutex_top_waiter(lock); 7083eb65aeaSThomas Gleixner 7093eb65aeaSThomas Gleixner /* [13] Drop the locks */ 7103eb65aeaSThomas Gleixner raw_spin_unlock_irqrestore(&task->pi_lock, flags); 7111696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 7121696a8beSPeter Zijlstra 71382084984SThomas Gleixner /* 7143eb65aeaSThomas Gleixner * Make the actual exit decisions [12], based on the stored 7153eb65aeaSThomas Gleixner * values. 7163eb65aeaSThomas Gleixner * 71782084984SThomas Gleixner * We reached the end of the lock chain. Stop right here. No 71882084984SThomas Gleixner * point to go back just to figure that out. 71982084984SThomas Gleixner */ 72082084984SThomas Gleixner if (!next_lock) 72182084984SThomas Gleixner goto out_put_task; 72282084984SThomas Gleixner 723a57594a1SThomas Gleixner /* 724a57594a1SThomas Gleixner * If the current waiter is not the top waiter on the lock, 725a57594a1SThomas Gleixner * then we can stop the chain walk here if we are not in full 726a57594a1SThomas Gleixner * deadlock detection mode. 727a57594a1SThomas Gleixner */ 7281696a8beSPeter Zijlstra if (!detect_deadlock && waiter != top_waiter) 7291696a8beSPeter Zijlstra goto out_put_task; 7301696a8beSPeter Zijlstra 7311696a8beSPeter Zijlstra goto again; 7321696a8beSPeter Zijlstra 7331696a8beSPeter Zijlstra out_unlock_pi: 7341696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&task->pi_lock, flags); 7351696a8beSPeter Zijlstra out_put_task: 7361696a8beSPeter Zijlstra put_task_struct(task); 7371696a8beSPeter Zijlstra 7381696a8beSPeter Zijlstra return ret; 7391696a8beSPeter Zijlstra } 7401696a8beSPeter Zijlstra 7411696a8beSPeter Zijlstra /* 7421696a8beSPeter Zijlstra * Try to take an rt-mutex 7431696a8beSPeter Zijlstra * 7441696a8beSPeter Zijlstra * Must be called with lock->wait_lock held. 7451696a8beSPeter Zijlstra * 746358c331fSThomas Gleixner * @lock: The lock to be acquired. 747358c331fSThomas Gleixner * @task: The task which wants to acquire the lock 748358c331fSThomas Gleixner * @waiter: The waiter that is queued to the lock's wait list if the 749358c331fSThomas Gleixner * callsite called task_blocked_on_lock(), otherwise NULL 7501696a8beSPeter Zijlstra */ 7511696a8beSPeter Zijlstra static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, 7521696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter) 7531696a8beSPeter Zijlstra { 754358c331fSThomas Gleixner unsigned long flags; 755358c331fSThomas Gleixner 7561696a8beSPeter Zijlstra /* 757358c331fSThomas Gleixner * Before testing whether we can acquire @lock, we set the 758358c331fSThomas Gleixner * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all 759358c331fSThomas Gleixner * other tasks which try to modify @lock into the slow path 760358c331fSThomas Gleixner * and they serialize on @lock->wait_lock. 7611696a8beSPeter Zijlstra * 762358c331fSThomas Gleixner * The RT_MUTEX_HAS_WAITERS bit can have a transitional state 763358c331fSThomas Gleixner * as explained at the top of this file if and only if: 7641696a8beSPeter Zijlstra * 765358c331fSThomas Gleixner * - There is a lock owner. The caller must fixup the 766358c331fSThomas Gleixner * transient state if it does a trylock or leaves the lock 767358c331fSThomas Gleixner * function due to a signal or timeout. 768358c331fSThomas Gleixner * 769358c331fSThomas Gleixner * - @task acquires the lock and there are no other 770358c331fSThomas Gleixner * waiters. This is undone in rt_mutex_set_owner(@task) at 771358c331fSThomas Gleixner * the end of this function. 7721696a8beSPeter Zijlstra */ 7731696a8beSPeter Zijlstra mark_rt_mutex_waiters(lock); 7741696a8beSPeter Zijlstra 775358c331fSThomas Gleixner /* 776358c331fSThomas Gleixner * If @lock has an owner, give up. 777358c331fSThomas Gleixner */ 7781696a8beSPeter Zijlstra if (rt_mutex_owner(lock)) 7791696a8beSPeter Zijlstra return 0; 7801696a8beSPeter Zijlstra 7811696a8beSPeter Zijlstra /* 782358c331fSThomas Gleixner * If @waiter != NULL, @task has already enqueued the waiter 783358c331fSThomas Gleixner * into @lock waiter list. If @waiter == NULL then this is a 784358c331fSThomas Gleixner * trylock attempt. 785358c331fSThomas Gleixner */ 786358c331fSThomas Gleixner if (waiter) { 787358c331fSThomas Gleixner /* 788358c331fSThomas Gleixner * If waiter is not the highest priority waiter of 789358c331fSThomas Gleixner * @lock, give up. 790358c331fSThomas Gleixner */ 791358c331fSThomas Gleixner if (waiter != rt_mutex_top_waiter(lock)) 792358c331fSThomas Gleixner return 0; 793358c331fSThomas Gleixner 794358c331fSThomas Gleixner /* 795358c331fSThomas Gleixner * We can acquire the lock. Remove the waiter from the 796358c331fSThomas Gleixner * lock waiters list. 797358c331fSThomas Gleixner */ 798358c331fSThomas Gleixner rt_mutex_dequeue(lock, waiter); 799358c331fSThomas Gleixner 800358c331fSThomas Gleixner } else { 801358c331fSThomas Gleixner /* 802358c331fSThomas Gleixner * If the lock has waiters already we check whether @task is 803358c331fSThomas Gleixner * eligible to take over the lock. 804358c331fSThomas Gleixner * 805358c331fSThomas Gleixner * If there are no other waiters, @task can acquire 806358c331fSThomas Gleixner * the lock. @task->pi_blocked_on is NULL, so it does 807358c331fSThomas Gleixner * not need to be dequeued. 8081696a8beSPeter Zijlstra */ 8091696a8beSPeter Zijlstra if (rt_mutex_has_waiters(lock)) { 810358c331fSThomas Gleixner /* 811358c331fSThomas Gleixner * If @task->prio is greater than or equal to 812358c331fSThomas Gleixner * the top waiter priority (kernel view), 813358c331fSThomas Gleixner * @task lost. 814358c331fSThomas Gleixner */ 815358c331fSThomas Gleixner if (task->prio >= rt_mutex_top_waiter(lock)->prio) 8161696a8beSPeter Zijlstra return 0; 817358c331fSThomas Gleixner 818358c331fSThomas Gleixner /* 819358c331fSThomas Gleixner * The current top waiter stays enqueued. We 820358c331fSThomas Gleixner * don't have to change anything in the lock 821358c331fSThomas Gleixner * waiters order. 822358c331fSThomas Gleixner */ 823358c331fSThomas Gleixner } else { 824358c331fSThomas Gleixner /* 825358c331fSThomas Gleixner * No waiters. Take the lock without the 826358c331fSThomas Gleixner * pi_lock dance.@task->pi_blocked_on is NULL 827358c331fSThomas Gleixner * and we have no waiters to enqueue in @task 828358c331fSThomas Gleixner * pi waiters list. 829358c331fSThomas Gleixner */ 830358c331fSThomas Gleixner goto takeit; 8311696a8beSPeter Zijlstra } 8321696a8beSPeter Zijlstra } 8331696a8beSPeter Zijlstra 8341696a8beSPeter Zijlstra /* 835358c331fSThomas Gleixner * Clear @task->pi_blocked_on. Requires protection by 836358c331fSThomas Gleixner * @task->pi_lock. Redundant operation for the @waiter == NULL 837358c331fSThomas Gleixner * case, but conditionals are more expensive than a redundant 838358c331fSThomas Gleixner * store. 8391696a8beSPeter Zijlstra */ 840358c331fSThomas Gleixner raw_spin_lock_irqsave(&task->pi_lock, flags); 841358c331fSThomas Gleixner task->pi_blocked_on = NULL; 842358c331fSThomas Gleixner /* 843358c331fSThomas Gleixner * Finish the lock acquisition. @task is the new owner. If 844358c331fSThomas Gleixner * other waiters exist we have to insert the highest priority 845358c331fSThomas Gleixner * waiter into @task->pi_waiters list. 846358c331fSThomas Gleixner */ 847358c331fSThomas Gleixner if (rt_mutex_has_waiters(lock)) 848358c331fSThomas Gleixner rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); 8491696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&task->pi_lock, flags); 8501696a8beSPeter Zijlstra 851358c331fSThomas Gleixner takeit: 8521696a8beSPeter Zijlstra /* We got the lock. */ 8531696a8beSPeter Zijlstra debug_rt_mutex_lock(lock); 8541696a8beSPeter Zijlstra 855358c331fSThomas Gleixner /* 856358c331fSThomas Gleixner * This either preserves the RT_MUTEX_HAS_WAITERS bit if there 857358c331fSThomas Gleixner * are still waiters or clears it. 858358c331fSThomas Gleixner */ 8591696a8beSPeter Zijlstra rt_mutex_set_owner(lock, task); 8601696a8beSPeter Zijlstra 8611696a8beSPeter Zijlstra rt_mutex_deadlock_account_lock(lock, task); 8621696a8beSPeter Zijlstra 8631696a8beSPeter Zijlstra return 1; 8641696a8beSPeter Zijlstra } 8651696a8beSPeter Zijlstra 8661696a8beSPeter Zijlstra /* 8671696a8beSPeter Zijlstra * Task blocks on lock. 8681696a8beSPeter Zijlstra * 8691696a8beSPeter Zijlstra * Prepare waiter and propagate pi chain 8701696a8beSPeter Zijlstra * 8711696a8beSPeter Zijlstra * This must be called with lock->wait_lock held. 8721696a8beSPeter Zijlstra */ 8731696a8beSPeter Zijlstra static int task_blocks_on_rt_mutex(struct rt_mutex *lock, 8741696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter, 8751696a8beSPeter Zijlstra struct task_struct *task, 8768930ed80SThomas Gleixner enum rtmutex_chainwalk chwalk) 8771696a8beSPeter Zijlstra { 8781696a8beSPeter Zijlstra struct task_struct *owner = rt_mutex_owner(lock); 8791696a8beSPeter Zijlstra struct rt_mutex_waiter *top_waiter = waiter; 88082084984SThomas Gleixner struct rt_mutex *next_lock; 8811696a8beSPeter Zijlstra int chain_walk = 0, res; 88282084984SThomas Gleixner unsigned long flags; 8831696a8beSPeter Zijlstra 884397335f0SThomas Gleixner /* 885397335f0SThomas Gleixner * Early deadlock detection. We really don't want the task to 886397335f0SThomas Gleixner * enqueue on itself just to untangle the mess later. It's not 887397335f0SThomas Gleixner * only an optimization. We drop the locks, so another waiter 888397335f0SThomas Gleixner * can come in before the chain walk detects the deadlock. So 889397335f0SThomas Gleixner * the other will detect the deadlock and return -EDEADLOCK, 890397335f0SThomas Gleixner * which is wrong, as the other waiter is not in a deadlock 891397335f0SThomas Gleixner * situation. 892397335f0SThomas Gleixner */ 8933d5c9340SThomas Gleixner if (owner == task) 894397335f0SThomas Gleixner return -EDEADLK; 895397335f0SThomas Gleixner 8961696a8beSPeter Zijlstra raw_spin_lock_irqsave(&task->pi_lock, flags); 8971696a8beSPeter Zijlstra __rt_mutex_adjust_prio(task); 8981696a8beSPeter Zijlstra waiter->task = task; 8991696a8beSPeter Zijlstra waiter->lock = lock; 9002d3d891dSDario Faggioli waiter->prio = task->prio; 9011696a8beSPeter Zijlstra 9021696a8beSPeter Zijlstra /* Get the top priority waiter on the lock */ 9031696a8beSPeter Zijlstra if (rt_mutex_has_waiters(lock)) 9041696a8beSPeter Zijlstra top_waiter = rt_mutex_top_waiter(lock); 905fb00aca4SPeter Zijlstra rt_mutex_enqueue(lock, waiter); 9061696a8beSPeter Zijlstra 9071696a8beSPeter Zijlstra task->pi_blocked_on = waiter; 9081696a8beSPeter Zijlstra 9091696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&task->pi_lock, flags); 9101696a8beSPeter Zijlstra 9111696a8beSPeter Zijlstra if (!owner) 9121696a8beSPeter Zijlstra return 0; 9131696a8beSPeter Zijlstra 9141696a8beSPeter Zijlstra raw_spin_lock_irqsave(&owner->pi_lock, flags); 91582084984SThomas Gleixner if (waiter == rt_mutex_top_waiter(lock)) { 916fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(owner, top_waiter); 917fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(owner, waiter); 9181696a8beSPeter Zijlstra 9191696a8beSPeter Zijlstra __rt_mutex_adjust_prio(owner); 9201696a8beSPeter Zijlstra if (owner->pi_blocked_on) 9211696a8beSPeter Zijlstra chain_walk = 1; 9228930ed80SThomas Gleixner } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { 9231696a8beSPeter Zijlstra chain_walk = 1; 92482084984SThomas Gleixner } 9251696a8beSPeter Zijlstra 92682084984SThomas Gleixner /* Store the lock on which owner is blocked or NULL */ 92782084984SThomas Gleixner next_lock = task_blocked_on_lock(owner); 92882084984SThomas Gleixner 92982084984SThomas Gleixner raw_spin_unlock_irqrestore(&owner->pi_lock, flags); 93082084984SThomas Gleixner /* 93182084984SThomas Gleixner * Even if full deadlock detection is on, if the owner is not 93282084984SThomas Gleixner * blocked itself, we can avoid finding this out in the chain 93382084984SThomas Gleixner * walk. 93482084984SThomas Gleixner */ 93582084984SThomas Gleixner if (!chain_walk || !next_lock) 9361696a8beSPeter Zijlstra return 0; 9371696a8beSPeter Zijlstra 9381696a8beSPeter Zijlstra /* 9391696a8beSPeter Zijlstra * The owner can't disappear while holding a lock, 9401696a8beSPeter Zijlstra * so the owner struct is protected by wait_lock. 9411696a8beSPeter Zijlstra * Gets dropped in rt_mutex_adjust_prio_chain()! 9421696a8beSPeter Zijlstra */ 9431696a8beSPeter Zijlstra get_task_struct(owner); 9441696a8beSPeter Zijlstra 9451696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 9461696a8beSPeter Zijlstra 9478930ed80SThomas Gleixner res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, 94882084984SThomas Gleixner next_lock, waiter, task); 9491696a8beSPeter Zijlstra 9501696a8beSPeter Zijlstra raw_spin_lock(&lock->wait_lock); 9511696a8beSPeter Zijlstra 9521696a8beSPeter Zijlstra return res; 9531696a8beSPeter Zijlstra } 9541696a8beSPeter Zijlstra 9551696a8beSPeter Zijlstra /* 9561696a8beSPeter Zijlstra * Wake up the next waiter on the lock. 9571696a8beSPeter Zijlstra * 95827e35715SThomas Gleixner * Remove the top waiter from the current tasks pi waiter list and 95927e35715SThomas Gleixner * wake it up. 9601696a8beSPeter Zijlstra * 9611696a8beSPeter Zijlstra * Called with lock->wait_lock held. 9621696a8beSPeter Zijlstra */ 9631696a8beSPeter Zijlstra static void wakeup_next_waiter(struct rt_mutex *lock) 9641696a8beSPeter Zijlstra { 9651696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter; 9661696a8beSPeter Zijlstra unsigned long flags; 9671696a8beSPeter Zijlstra 9681696a8beSPeter Zijlstra raw_spin_lock_irqsave(¤t->pi_lock, flags); 9691696a8beSPeter Zijlstra 9701696a8beSPeter Zijlstra waiter = rt_mutex_top_waiter(lock); 9711696a8beSPeter Zijlstra 9721696a8beSPeter Zijlstra /* 9731696a8beSPeter Zijlstra * Remove it from current->pi_waiters. We do not adjust a 9741696a8beSPeter Zijlstra * possible priority boost right now. We execute wakeup in the 9751696a8beSPeter Zijlstra * boosted mode and go back to normal after releasing 9761696a8beSPeter Zijlstra * lock->wait_lock. 9771696a8beSPeter Zijlstra */ 978fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(current, waiter); 9791696a8beSPeter Zijlstra 98027e35715SThomas Gleixner /* 98127e35715SThomas Gleixner * As we are waking up the top waiter, and the waiter stays 98227e35715SThomas Gleixner * queued on the lock until it gets the lock, this lock 98327e35715SThomas Gleixner * obviously has waiters. Just set the bit here and this has 98427e35715SThomas Gleixner * the added benefit of forcing all new tasks into the 98527e35715SThomas Gleixner * slow path making sure no task of lower priority than 98627e35715SThomas Gleixner * the top waiter can steal this lock. 98727e35715SThomas Gleixner */ 98827e35715SThomas Gleixner lock->owner = (void *) RT_MUTEX_HAS_WAITERS; 9891696a8beSPeter Zijlstra 9901696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 9911696a8beSPeter Zijlstra 99227e35715SThomas Gleixner /* 99327e35715SThomas Gleixner * It's safe to dereference waiter as it cannot go away as 99427e35715SThomas Gleixner * long as we hold lock->wait_lock. The waiter task needs to 99527e35715SThomas Gleixner * acquire it in order to dequeue the waiter. 99627e35715SThomas Gleixner */ 9971696a8beSPeter Zijlstra wake_up_process(waiter->task); 9981696a8beSPeter Zijlstra } 9991696a8beSPeter Zijlstra 10001696a8beSPeter Zijlstra /* 10011696a8beSPeter Zijlstra * Remove a waiter from a lock and give up 10021696a8beSPeter Zijlstra * 10031696a8beSPeter Zijlstra * Must be called with lock->wait_lock held and 10041696a8beSPeter Zijlstra * have just failed to try_to_take_rt_mutex(). 10051696a8beSPeter Zijlstra */ 10061696a8beSPeter Zijlstra static void remove_waiter(struct rt_mutex *lock, 10071696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter) 10081696a8beSPeter Zijlstra { 10091ca7b860SThomas Gleixner bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); 10101696a8beSPeter Zijlstra struct task_struct *owner = rt_mutex_owner(lock); 10111ca7b860SThomas Gleixner struct rt_mutex *next_lock; 10121696a8beSPeter Zijlstra unsigned long flags; 10131696a8beSPeter Zijlstra 10141696a8beSPeter Zijlstra raw_spin_lock_irqsave(¤t->pi_lock, flags); 1015fb00aca4SPeter Zijlstra rt_mutex_dequeue(lock, waiter); 10161696a8beSPeter Zijlstra current->pi_blocked_on = NULL; 10171696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 10181696a8beSPeter Zijlstra 10191ca7b860SThomas Gleixner /* 10201ca7b860SThomas Gleixner * Only update priority if the waiter was the highest priority 10211ca7b860SThomas Gleixner * waiter of the lock and there is an owner to update. 10221ca7b860SThomas Gleixner */ 10231ca7b860SThomas Gleixner if (!owner || !is_top_waiter) 10241696a8beSPeter Zijlstra return; 10251696a8beSPeter Zijlstra 10261696a8beSPeter Zijlstra raw_spin_lock_irqsave(&owner->pi_lock, flags); 10271696a8beSPeter Zijlstra 1028fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(owner, waiter); 10291696a8beSPeter Zijlstra 10301ca7b860SThomas Gleixner if (rt_mutex_has_waiters(lock)) 10311ca7b860SThomas Gleixner rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); 10321696a8beSPeter Zijlstra 10331696a8beSPeter Zijlstra __rt_mutex_adjust_prio(owner); 10341696a8beSPeter Zijlstra 103582084984SThomas Gleixner /* Store the lock on which owner is blocked or NULL */ 103682084984SThomas Gleixner next_lock = task_blocked_on_lock(owner); 10371696a8beSPeter Zijlstra 10381696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&owner->pi_lock, flags); 10391696a8beSPeter Zijlstra 10401ca7b860SThomas Gleixner /* 10411ca7b860SThomas Gleixner * Don't walk the chain, if the owner task is not blocked 10421ca7b860SThomas Gleixner * itself. 10431ca7b860SThomas Gleixner */ 104482084984SThomas Gleixner if (!next_lock) 10451696a8beSPeter Zijlstra return; 10461696a8beSPeter Zijlstra 10471696a8beSPeter Zijlstra /* gets dropped in rt_mutex_adjust_prio_chain()! */ 10481696a8beSPeter Zijlstra get_task_struct(owner); 10491696a8beSPeter Zijlstra 10501696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 10511696a8beSPeter Zijlstra 10528930ed80SThomas Gleixner rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, 10538930ed80SThomas Gleixner next_lock, NULL, current); 10541696a8beSPeter Zijlstra 10551696a8beSPeter Zijlstra raw_spin_lock(&lock->wait_lock); 10561696a8beSPeter Zijlstra } 10571696a8beSPeter Zijlstra 10581696a8beSPeter Zijlstra /* 10591696a8beSPeter Zijlstra * Recheck the pi chain, in case we got a priority setting 10601696a8beSPeter Zijlstra * 10611696a8beSPeter Zijlstra * Called from sched_setscheduler 10621696a8beSPeter Zijlstra */ 10631696a8beSPeter Zijlstra void rt_mutex_adjust_pi(struct task_struct *task) 10641696a8beSPeter Zijlstra { 10651696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter; 106682084984SThomas Gleixner struct rt_mutex *next_lock; 10671696a8beSPeter Zijlstra unsigned long flags; 10681696a8beSPeter Zijlstra 10691696a8beSPeter Zijlstra raw_spin_lock_irqsave(&task->pi_lock, flags); 10701696a8beSPeter Zijlstra 10711696a8beSPeter Zijlstra waiter = task->pi_blocked_on; 10722d3d891dSDario Faggioli if (!waiter || (waiter->prio == task->prio && 10732d3d891dSDario Faggioli !dl_prio(task->prio))) { 10741696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&task->pi_lock, flags); 10751696a8beSPeter Zijlstra return; 10761696a8beSPeter Zijlstra } 107782084984SThomas Gleixner next_lock = waiter->lock; 10781696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&task->pi_lock, flags); 10791696a8beSPeter Zijlstra 10801696a8beSPeter Zijlstra /* gets dropped in rt_mutex_adjust_prio_chain()! */ 10811696a8beSPeter Zijlstra get_task_struct(task); 108282084984SThomas Gleixner 10838930ed80SThomas Gleixner rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL, 10848930ed80SThomas Gleixner next_lock, NULL, task); 10851696a8beSPeter Zijlstra } 10861696a8beSPeter Zijlstra 10871696a8beSPeter Zijlstra /** 10881696a8beSPeter Zijlstra * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop 10891696a8beSPeter Zijlstra * @lock: the rt_mutex to take 10901696a8beSPeter Zijlstra * @state: the state the task should block in (TASK_INTERRUPTIBLE 10911696a8beSPeter Zijlstra * or TASK_UNINTERRUPTIBLE) 10921696a8beSPeter Zijlstra * @timeout: the pre-initialized and started timer, or NULL for none 10931696a8beSPeter Zijlstra * @waiter: the pre-initialized rt_mutex_waiter 10941696a8beSPeter Zijlstra * 10951696a8beSPeter Zijlstra * lock->wait_lock must be held by the caller. 10961696a8beSPeter Zijlstra */ 10971696a8beSPeter Zijlstra static int __sched 10981696a8beSPeter Zijlstra __rt_mutex_slowlock(struct rt_mutex *lock, int state, 10991696a8beSPeter Zijlstra struct hrtimer_sleeper *timeout, 11001696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter) 11011696a8beSPeter Zijlstra { 11021696a8beSPeter Zijlstra int ret = 0; 11031696a8beSPeter Zijlstra 11041696a8beSPeter Zijlstra for (;;) { 11051696a8beSPeter Zijlstra /* Try to acquire the lock: */ 11061696a8beSPeter Zijlstra if (try_to_take_rt_mutex(lock, current, waiter)) 11071696a8beSPeter Zijlstra break; 11081696a8beSPeter Zijlstra 11091696a8beSPeter Zijlstra /* 11101696a8beSPeter Zijlstra * TASK_INTERRUPTIBLE checks for signals and 11111696a8beSPeter Zijlstra * timeout. Ignored otherwise. 11121696a8beSPeter Zijlstra */ 11131696a8beSPeter Zijlstra if (unlikely(state == TASK_INTERRUPTIBLE)) { 11141696a8beSPeter Zijlstra /* Signal pending? */ 11151696a8beSPeter Zijlstra if (signal_pending(current)) 11161696a8beSPeter Zijlstra ret = -EINTR; 11171696a8beSPeter Zijlstra if (timeout && !timeout->task) 11181696a8beSPeter Zijlstra ret = -ETIMEDOUT; 11191696a8beSPeter Zijlstra if (ret) 11201696a8beSPeter Zijlstra break; 11211696a8beSPeter Zijlstra } 11221696a8beSPeter Zijlstra 11231696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 11241696a8beSPeter Zijlstra 11251696a8beSPeter Zijlstra debug_rt_mutex_print_deadlock(waiter); 11261696a8beSPeter Zijlstra 11271696a8beSPeter Zijlstra schedule_rt_mutex(lock); 11281696a8beSPeter Zijlstra 11291696a8beSPeter Zijlstra raw_spin_lock(&lock->wait_lock); 11301696a8beSPeter Zijlstra set_current_state(state); 11311696a8beSPeter Zijlstra } 11321696a8beSPeter Zijlstra 11331696a8beSPeter Zijlstra return ret; 11341696a8beSPeter Zijlstra } 11351696a8beSPeter Zijlstra 11363d5c9340SThomas Gleixner static void rt_mutex_handle_deadlock(int res, int detect_deadlock, 11373d5c9340SThomas Gleixner struct rt_mutex_waiter *w) 11383d5c9340SThomas Gleixner { 11393d5c9340SThomas Gleixner /* 11403d5c9340SThomas Gleixner * If the result is not -EDEADLOCK or the caller requested 11413d5c9340SThomas Gleixner * deadlock detection, nothing to do here. 11423d5c9340SThomas Gleixner */ 11433d5c9340SThomas Gleixner if (res != -EDEADLOCK || detect_deadlock) 11443d5c9340SThomas Gleixner return; 11453d5c9340SThomas Gleixner 11463d5c9340SThomas Gleixner /* 11473d5c9340SThomas Gleixner * Yell lowdly and stop the task right here. 11483d5c9340SThomas Gleixner */ 11493d5c9340SThomas Gleixner rt_mutex_print_deadlock(w); 11503d5c9340SThomas Gleixner while (1) { 11513d5c9340SThomas Gleixner set_current_state(TASK_INTERRUPTIBLE); 11523d5c9340SThomas Gleixner schedule(); 11533d5c9340SThomas Gleixner } 11543d5c9340SThomas Gleixner } 11553d5c9340SThomas Gleixner 11561696a8beSPeter Zijlstra /* 11571696a8beSPeter Zijlstra * Slow path lock function: 11581696a8beSPeter Zijlstra */ 11591696a8beSPeter Zijlstra static int __sched 11601696a8beSPeter Zijlstra rt_mutex_slowlock(struct rt_mutex *lock, int state, 11611696a8beSPeter Zijlstra struct hrtimer_sleeper *timeout, 11628930ed80SThomas Gleixner enum rtmutex_chainwalk chwalk) 11631696a8beSPeter Zijlstra { 11641696a8beSPeter Zijlstra struct rt_mutex_waiter waiter; 11651696a8beSPeter Zijlstra int ret = 0; 11661696a8beSPeter Zijlstra 11671696a8beSPeter Zijlstra debug_rt_mutex_init_waiter(&waiter); 1168fb00aca4SPeter Zijlstra RB_CLEAR_NODE(&waiter.pi_tree_entry); 1169fb00aca4SPeter Zijlstra RB_CLEAR_NODE(&waiter.tree_entry); 11701696a8beSPeter Zijlstra 11711696a8beSPeter Zijlstra raw_spin_lock(&lock->wait_lock); 11721696a8beSPeter Zijlstra 11731696a8beSPeter Zijlstra /* Try to acquire the lock again: */ 11741696a8beSPeter Zijlstra if (try_to_take_rt_mutex(lock, current, NULL)) { 11751696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 11761696a8beSPeter Zijlstra return 0; 11771696a8beSPeter Zijlstra } 11781696a8beSPeter Zijlstra 11791696a8beSPeter Zijlstra set_current_state(state); 11801696a8beSPeter Zijlstra 11811696a8beSPeter Zijlstra /* Setup the timer, when timeout != NULL */ 11821696a8beSPeter Zijlstra if (unlikely(timeout)) { 11831696a8beSPeter Zijlstra hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); 11841696a8beSPeter Zijlstra if (!hrtimer_active(&timeout->timer)) 11851696a8beSPeter Zijlstra timeout->task = NULL; 11861696a8beSPeter Zijlstra } 11871696a8beSPeter Zijlstra 11888930ed80SThomas Gleixner ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); 11891696a8beSPeter Zijlstra 11901696a8beSPeter Zijlstra if (likely(!ret)) 11911696a8beSPeter Zijlstra ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); 11921696a8beSPeter Zijlstra 11931696a8beSPeter Zijlstra set_current_state(TASK_RUNNING); 11941696a8beSPeter Zijlstra 11953d5c9340SThomas Gleixner if (unlikely(ret)) { 11961696a8beSPeter Zijlstra remove_waiter(lock, &waiter); 11978930ed80SThomas Gleixner rt_mutex_handle_deadlock(ret, chwalk, &waiter); 11983d5c9340SThomas Gleixner } 11991696a8beSPeter Zijlstra 12001696a8beSPeter Zijlstra /* 12011696a8beSPeter Zijlstra * try_to_take_rt_mutex() sets the waiter bit 12021696a8beSPeter Zijlstra * unconditionally. We might have to fix that up. 12031696a8beSPeter Zijlstra */ 12041696a8beSPeter Zijlstra fixup_rt_mutex_waiters(lock); 12051696a8beSPeter Zijlstra 12061696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 12071696a8beSPeter Zijlstra 12081696a8beSPeter Zijlstra /* Remove pending timer: */ 12091696a8beSPeter Zijlstra if (unlikely(timeout)) 12101696a8beSPeter Zijlstra hrtimer_cancel(&timeout->timer); 12111696a8beSPeter Zijlstra 12121696a8beSPeter Zijlstra debug_rt_mutex_free_waiter(&waiter); 12131696a8beSPeter Zijlstra 12141696a8beSPeter Zijlstra return ret; 12151696a8beSPeter Zijlstra } 12161696a8beSPeter Zijlstra 12171696a8beSPeter Zijlstra /* 12181696a8beSPeter Zijlstra * Slow path try-lock function: 12191696a8beSPeter Zijlstra */ 122088f2b4c1SThomas Gleixner static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) 12211696a8beSPeter Zijlstra { 122288f2b4c1SThomas Gleixner int ret; 12231696a8beSPeter Zijlstra 122488f2b4c1SThomas Gleixner /* 122588f2b4c1SThomas Gleixner * If the lock already has an owner we fail to get the lock. 122688f2b4c1SThomas Gleixner * This can be done without taking the @lock->wait_lock as 122788f2b4c1SThomas Gleixner * it is only being read, and this is a trylock anyway. 122888f2b4c1SThomas Gleixner */ 122988f2b4c1SThomas Gleixner if (rt_mutex_owner(lock)) 123088f2b4c1SThomas Gleixner return 0; 123188f2b4c1SThomas Gleixner 123288f2b4c1SThomas Gleixner /* 123388f2b4c1SThomas Gleixner * The mutex has currently no owner. Lock the wait lock and 123488f2b4c1SThomas Gleixner * try to acquire the lock. 123588f2b4c1SThomas Gleixner */ 12361696a8beSPeter Zijlstra raw_spin_lock(&lock->wait_lock); 12371696a8beSPeter Zijlstra 12381696a8beSPeter Zijlstra ret = try_to_take_rt_mutex(lock, current, NULL); 123988f2b4c1SThomas Gleixner 12401696a8beSPeter Zijlstra /* 124188f2b4c1SThomas Gleixner * try_to_take_rt_mutex() sets the lock waiters bit 124288f2b4c1SThomas Gleixner * unconditionally. Clean this up. 12431696a8beSPeter Zijlstra */ 12441696a8beSPeter Zijlstra fixup_rt_mutex_waiters(lock); 12451696a8beSPeter Zijlstra 12461696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 12471696a8beSPeter Zijlstra 12481696a8beSPeter Zijlstra return ret; 12491696a8beSPeter Zijlstra } 12501696a8beSPeter Zijlstra 12511696a8beSPeter Zijlstra /* 12521696a8beSPeter Zijlstra * Slow path to release a rt-mutex: 12531696a8beSPeter Zijlstra */ 12541696a8beSPeter Zijlstra static void __sched 12551696a8beSPeter Zijlstra rt_mutex_slowunlock(struct rt_mutex *lock) 12561696a8beSPeter Zijlstra { 12571696a8beSPeter Zijlstra raw_spin_lock(&lock->wait_lock); 12581696a8beSPeter Zijlstra 12591696a8beSPeter Zijlstra debug_rt_mutex_unlock(lock); 12601696a8beSPeter Zijlstra 12611696a8beSPeter Zijlstra rt_mutex_deadlock_account_unlock(current); 12621696a8beSPeter Zijlstra 126327e35715SThomas Gleixner /* 126427e35715SThomas Gleixner * We must be careful here if the fast path is enabled. If we 126527e35715SThomas Gleixner * have no waiters queued we cannot set owner to NULL here 126627e35715SThomas Gleixner * because of: 126727e35715SThomas Gleixner * 126827e35715SThomas Gleixner * foo->lock->owner = NULL; 126927e35715SThomas Gleixner * rtmutex_lock(foo->lock); <- fast path 127027e35715SThomas Gleixner * free = atomic_dec_and_test(foo->refcnt); 127127e35715SThomas Gleixner * rtmutex_unlock(foo->lock); <- fast path 127227e35715SThomas Gleixner * if (free) 127327e35715SThomas Gleixner * kfree(foo); 127427e35715SThomas Gleixner * raw_spin_unlock(foo->lock->wait_lock); 127527e35715SThomas Gleixner * 127627e35715SThomas Gleixner * So for the fastpath enabled kernel: 127727e35715SThomas Gleixner * 127827e35715SThomas Gleixner * Nothing can set the waiters bit as long as we hold 127927e35715SThomas Gleixner * lock->wait_lock. So we do the following sequence: 128027e35715SThomas Gleixner * 128127e35715SThomas Gleixner * owner = rt_mutex_owner(lock); 128227e35715SThomas Gleixner * clear_rt_mutex_waiters(lock); 128327e35715SThomas Gleixner * raw_spin_unlock(&lock->wait_lock); 128427e35715SThomas Gleixner * if (cmpxchg(&lock->owner, owner, 0) == owner) 128527e35715SThomas Gleixner * return; 128627e35715SThomas Gleixner * goto retry; 128727e35715SThomas Gleixner * 128827e35715SThomas Gleixner * The fastpath disabled variant is simple as all access to 128927e35715SThomas Gleixner * lock->owner is serialized by lock->wait_lock: 129027e35715SThomas Gleixner * 129127e35715SThomas Gleixner * lock->owner = NULL; 129227e35715SThomas Gleixner * raw_spin_unlock(&lock->wait_lock); 129327e35715SThomas Gleixner */ 129427e35715SThomas Gleixner while (!rt_mutex_has_waiters(lock)) { 129527e35715SThomas Gleixner /* Drops lock->wait_lock ! */ 129627e35715SThomas Gleixner if (unlock_rt_mutex_safe(lock) == true) 12971696a8beSPeter Zijlstra return; 129827e35715SThomas Gleixner /* Relock the rtmutex and try again */ 129927e35715SThomas Gleixner raw_spin_lock(&lock->wait_lock); 13001696a8beSPeter Zijlstra } 13011696a8beSPeter Zijlstra 130227e35715SThomas Gleixner /* 130327e35715SThomas Gleixner * The wakeup next waiter path does not suffer from the above 130427e35715SThomas Gleixner * race. See the comments there. 130527e35715SThomas Gleixner */ 13061696a8beSPeter Zijlstra wakeup_next_waiter(lock); 13071696a8beSPeter Zijlstra 13081696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 13091696a8beSPeter Zijlstra 13101696a8beSPeter Zijlstra /* Undo pi boosting if necessary: */ 13111696a8beSPeter Zijlstra rt_mutex_adjust_prio(current); 13121696a8beSPeter Zijlstra } 13131696a8beSPeter Zijlstra 13141696a8beSPeter Zijlstra /* 13151696a8beSPeter Zijlstra * debug aware fast / slowpath lock,trylock,unlock 13161696a8beSPeter Zijlstra * 13171696a8beSPeter Zijlstra * The atomic acquire/release ops are compiled away, when either the 13181696a8beSPeter Zijlstra * architecture does not support cmpxchg or when debugging is enabled. 13191696a8beSPeter Zijlstra */ 13201696a8beSPeter Zijlstra static inline int 13211696a8beSPeter Zijlstra rt_mutex_fastlock(struct rt_mutex *lock, int state, 13221696a8beSPeter Zijlstra int (*slowfn)(struct rt_mutex *lock, int state, 13231696a8beSPeter Zijlstra struct hrtimer_sleeper *timeout, 13248930ed80SThomas Gleixner enum rtmutex_chainwalk chwalk)) 13251696a8beSPeter Zijlstra { 1326c051b21fSThomas Gleixner if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { 13271696a8beSPeter Zijlstra rt_mutex_deadlock_account_lock(lock, current); 13281696a8beSPeter Zijlstra return 0; 13291696a8beSPeter Zijlstra } else 13308930ed80SThomas Gleixner return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); 13311696a8beSPeter Zijlstra } 13321696a8beSPeter Zijlstra 13331696a8beSPeter Zijlstra static inline int 13341696a8beSPeter Zijlstra rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, 13358930ed80SThomas Gleixner struct hrtimer_sleeper *timeout, 13368930ed80SThomas Gleixner enum rtmutex_chainwalk chwalk, 13371696a8beSPeter Zijlstra int (*slowfn)(struct rt_mutex *lock, int state, 13381696a8beSPeter Zijlstra struct hrtimer_sleeper *timeout, 13398930ed80SThomas Gleixner enum rtmutex_chainwalk chwalk)) 13401696a8beSPeter Zijlstra { 13418930ed80SThomas Gleixner if (chwalk == RT_MUTEX_MIN_CHAINWALK && 13428930ed80SThomas Gleixner likely(rt_mutex_cmpxchg(lock, NULL, current))) { 13431696a8beSPeter Zijlstra rt_mutex_deadlock_account_lock(lock, current); 13441696a8beSPeter Zijlstra return 0; 13451696a8beSPeter Zijlstra } else 13468930ed80SThomas Gleixner return slowfn(lock, state, timeout, chwalk); 13471696a8beSPeter Zijlstra } 13481696a8beSPeter Zijlstra 13491696a8beSPeter Zijlstra static inline int 13501696a8beSPeter Zijlstra rt_mutex_fasttrylock(struct rt_mutex *lock, 13511696a8beSPeter Zijlstra int (*slowfn)(struct rt_mutex *lock)) 13521696a8beSPeter Zijlstra { 13531696a8beSPeter Zijlstra if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { 13541696a8beSPeter Zijlstra rt_mutex_deadlock_account_lock(lock, current); 13551696a8beSPeter Zijlstra return 1; 13561696a8beSPeter Zijlstra } 13571696a8beSPeter Zijlstra return slowfn(lock); 13581696a8beSPeter Zijlstra } 13591696a8beSPeter Zijlstra 13601696a8beSPeter Zijlstra static inline void 13611696a8beSPeter Zijlstra rt_mutex_fastunlock(struct rt_mutex *lock, 13621696a8beSPeter Zijlstra void (*slowfn)(struct rt_mutex *lock)) 13631696a8beSPeter Zijlstra { 13641696a8beSPeter Zijlstra if (likely(rt_mutex_cmpxchg(lock, current, NULL))) 13651696a8beSPeter Zijlstra rt_mutex_deadlock_account_unlock(current); 13661696a8beSPeter Zijlstra else 13671696a8beSPeter Zijlstra slowfn(lock); 13681696a8beSPeter Zijlstra } 13691696a8beSPeter Zijlstra 13701696a8beSPeter Zijlstra /** 13711696a8beSPeter Zijlstra * rt_mutex_lock - lock a rt_mutex 13721696a8beSPeter Zijlstra * 13731696a8beSPeter Zijlstra * @lock: the rt_mutex to be locked 13741696a8beSPeter Zijlstra */ 13751696a8beSPeter Zijlstra void __sched rt_mutex_lock(struct rt_mutex *lock) 13761696a8beSPeter Zijlstra { 13771696a8beSPeter Zijlstra might_sleep(); 13781696a8beSPeter Zijlstra 1379c051b21fSThomas Gleixner rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); 13801696a8beSPeter Zijlstra } 13811696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(rt_mutex_lock); 13821696a8beSPeter Zijlstra 13831696a8beSPeter Zijlstra /** 13841696a8beSPeter Zijlstra * rt_mutex_lock_interruptible - lock a rt_mutex interruptible 13851696a8beSPeter Zijlstra * 13861696a8beSPeter Zijlstra * @lock: the rt_mutex to be locked 13871696a8beSPeter Zijlstra * 13881696a8beSPeter Zijlstra * Returns: 13891696a8beSPeter Zijlstra * 0 on success 13901696a8beSPeter Zijlstra * -EINTR when interrupted by a signal 13911696a8beSPeter Zijlstra */ 1392c051b21fSThomas Gleixner int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) 13931696a8beSPeter Zijlstra { 13941696a8beSPeter Zijlstra might_sleep(); 13951696a8beSPeter Zijlstra 1396c051b21fSThomas Gleixner return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); 13971696a8beSPeter Zijlstra } 13981696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); 13991696a8beSPeter Zijlstra 1400c051b21fSThomas Gleixner /* 1401c051b21fSThomas Gleixner * Futex variant with full deadlock detection. 1402c051b21fSThomas Gleixner */ 1403c051b21fSThomas Gleixner int rt_mutex_timed_futex_lock(struct rt_mutex *lock, 1404c051b21fSThomas Gleixner struct hrtimer_sleeper *timeout) 1405c051b21fSThomas Gleixner { 1406c051b21fSThomas Gleixner might_sleep(); 1407c051b21fSThomas Gleixner 14088930ed80SThomas Gleixner return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, 14098930ed80SThomas Gleixner RT_MUTEX_FULL_CHAINWALK, 1410c051b21fSThomas Gleixner rt_mutex_slowlock); 1411c051b21fSThomas Gleixner } 1412c051b21fSThomas Gleixner 14131696a8beSPeter Zijlstra /** 14141696a8beSPeter Zijlstra * rt_mutex_timed_lock - lock a rt_mutex interruptible 14151696a8beSPeter Zijlstra * the timeout structure is provided 14161696a8beSPeter Zijlstra * by the caller 14171696a8beSPeter Zijlstra * 14181696a8beSPeter Zijlstra * @lock: the rt_mutex to be locked 14191696a8beSPeter Zijlstra * @timeout: timeout structure or NULL (no timeout) 14201696a8beSPeter Zijlstra * 14211696a8beSPeter Zijlstra * Returns: 14221696a8beSPeter Zijlstra * 0 on success 14231696a8beSPeter Zijlstra * -EINTR when interrupted by a signal 14241696a8beSPeter Zijlstra * -ETIMEDOUT when the timeout expired 14251696a8beSPeter Zijlstra */ 14261696a8beSPeter Zijlstra int 1427c051b21fSThomas Gleixner rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) 14281696a8beSPeter Zijlstra { 14291696a8beSPeter Zijlstra might_sleep(); 14301696a8beSPeter Zijlstra 14318930ed80SThomas Gleixner return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, 14328930ed80SThomas Gleixner RT_MUTEX_MIN_CHAINWALK, 1433c051b21fSThomas Gleixner rt_mutex_slowlock); 14341696a8beSPeter Zijlstra } 14351696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); 14361696a8beSPeter Zijlstra 14371696a8beSPeter Zijlstra /** 14381696a8beSPeter Zijlstra * rt_mutex_trylock - try to lock a rt_mutex 14391696a8beSPeter Zijlstra * 14401696a8beSPeter Zijlstra * @lock: the rt_mutex to be locked 14411696a8beSPeter Zijlstra * 14421696a8beSPeter Zijlstra * Returns 1 on success and 0 on contention 14431696a8beSPeter Zijlstra */ 14441696a8beSPeter Zijlstra int __sched rt_mutex_trylock(struct rt_mutex *lock) 14451696a8beSPeter Zijlstra { 14461696a8beSPeter Zijlstra return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); 14471696a8beSPeter Zijlstra } 14481696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(rt_mutex_trylock); 14491696a8beSPeter Zijlstra 14501696a8beSPeter Zijlstra /** 14511696a8beSPeter Zijlstra * rt_mutex_unlock - unlock a rt_mutex 14521696a8beSPeter Zijlstra * 14531696a8beSPeter Zijlstra * @lock: the rt_mutex to be unlocked 14541696a8beSPeter Zijlstra */ 14551696a8beSPeter Zijlstra void __sched rt_mutex_unlock(struct rt_mutex *lock) 14561696a8beSPeter Zijlstra { 14571696a8beSPeter Zijlstra rt_mutex_fastunlock(lock, rt_mutex_slowunlock); 14581696a8beSPeter Zijlstra } 14591696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(rt_mutex_unlock); 14601696a8beSPeter Zijlstra 14611696a8beSPeter Zijlstra /** 14621696a8beSPeter Zijlstra * rt_mutex_destroy - mark a mutex unusable 14631696a8beSPeter Zijlstra * @lock: the mutex to be destroyed 14641696a8beSPeter Zijlstra * 14651696a8beSPeter Zijlstra * This function marks the mutex uninitialized, and any subsequent 14661696a8beSPeter Zijlstra * use of the mutex is forbidden. The mutex must not be locked when 14671696a8beSPeter Zijlstra * this function is called. 14681696a8beSPeter Zijlstra */ 14691696a8beSPeter Zijlstra void rt_mutex_destroy(struct rt_mutex *lock) 14701696a8beSPeter Zijlstra { 14711696a8beSPeter Zijlstra WARN_ON(rt_mutex_is_locked(lock)); 14721696a8beSPeter Zijlstra #ifdef CONFIG_DEBUG_RT_MUTEXES 14731696a8beSPeter Zijlstra lock->magic = NULL; 14741696a8beSPeter Zijlstra #endif 14751696a8beSPeter Zijlstra } 14761696a8beSPeter Zijlstra 14771696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(rt_mutex_destroy); 14781696a8beSPeter Zijlstra 14791696a8beSPeter Zijlstra /** 14801696a8beSPeter Zijlstra * __rt_mutex_init - initialize the rt lock 14811696a8beSPeter Zijlstra * 14821696a8beSPeter Zijlstra * @lock: the rt lock to be initialized 14831696a8beSPeter Zijlstra * 14841696a8beSPeter Zijlstra * Initialize the rt lock to unlocked state. 14851696a8beSPeter Zijlstra * 14861696a8beSPeter Zijlstra * Initializing of a locked rt lock is not allowed 14871696a8beSPeter Zijlstra */ 14881696a8beSPeter Zijlstra void __rt_mutex_init(struct rt_mutex *lock, const char *name) 14891696a8beSPeter Zijlstra { 14901696a8beSPeter Zijlstra lock->owner = NULL; 14911696a8beSPeter Zijlstra raw_spin_lock_init(&lock->wait_lock); 1492fb00aca4SPeter Zijlstra lock->waiters = RB_ROOT; 1493fb00aca4SPeter Zijlstra lock->waiters_leftmost = NULL; 14941696a8beSPeter Zijlstra 14951696a8beSPeter Zijlstra debug_rt_mutex_init(lock, name); 14961696a8beSPeter Zijlstra } 14971696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(__rt_mutex_init); 14981696a8beSPeter Zijlstra 14991696a8beSPeter Zijlstra /** 15001696a8beSPeter Zijlstra * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a 15011696a8beSPeter Zijlstra * proxy owner 15021696a8beSPeter Zijlstra * 15031696a8beSPeter Zijlstra * @lock: the rt_mutex to be locked 15041696a8beSPeter Zijlstra * @proxy_owner:the task to set as owner 15051696a8beSPeter Zijlstra * 15061696a8beSPeter Zijlstra * No locking. Caller has to do serializing itself 15071696a8beSPeter Zijlstra * Special API call for PI-futex support 15081696a8beSPeter Zijlstra */ 15091696a8beSPeter Zijlstra void rt_mutex_init_proxy_locked(struct rt_mutex *lock, 15101696a8beSPeter Zijlstra struct task_struct *proxy_owner) 15111696a8beSPeter Zijlstra { 15121696a8beSPeter Zijlstra __rt_mutex_init(lock, NULL); 15131696a8beSPeter Zijlstra debug_rt_mutex_proxy_lock(lock, proxy_owner); 15141696a8beSPeter Zijlstra rt_mutex_set_owner(lock, proxy_owner); 15151696a8beSPeter Zijlstra rt_mutex_deadlock_account_lock(lock, proxy_owner); 15161696a8beSPeter Zijlstra } 15171696a8beSPeter Zijlstra 15181696a8beSPeter Zijlstra /** 15191696a8beSPeter Zijlstra * rt_mutex_proxy_unlock - release a lock on behalf of owner 15201696a8beSPeter Zijlstra * 15211696a8beSPeter Zijlstra * @lock: the rt_mutex to be locked 15221696a8beSPeter Zijlstra * 15231696a8beSPeter Zijlstra * No locking. Caller has to do serializing itself 15241696a8beSPeter Zijlstra * Special API call for PI-futex support 15251696a8beSPeter Zijlstra */ 15261696a8beSPeter Zijlstra void rt_mutex_proxy_unlock(struct rt_mutex *lock, 15271696a8beSPeter Zijlstra struct task_struct *proxy_owner) 15281696a8beSPeter Zijlstra { 15291696a8beSPeter Zijlstra debug_rt_mutex_proxy_unlock(lock); 15301696a8beSPeter Zijlstra rt_mutex_set_owner(lock, NULL); 15311696a8beSPeter Zijlstra rt_mutex_deadlock_account_unlock(proxy_owner); 15321696a8beSPeter Zijlstra } 15331696a8beSPeter Zijlstra 15341696a8beSPeter Zijlstra /** 15351696a8beSPeter Zijlstra * rt_mutex_start_proxy_lock() - Start lock acquisition for another task 15361696a8beSPeter Zijlstra * @lock: the rt_mutex to take 15371696a8beSPeter Zijlstra * @waiter: the pre-initialized rt_mutex_waiter 15381696a8beSPeter Zijlstra * @task: the task to prepare 15391696a8beSPeter Zijlstra * 15401696a8beSPeter Zijlstra * Returns: 15411696a8beSPeter Zijlstra * 0 - task blocked on lock 15421696a8beSPeter Zijlstra * 1 - acquired the lock for task, caller should wake it up 15431696a8beSPeter Zijlstra * <0 - error 15441696a8beSPeter Zijlstra * 15451696a8beSPeter Zijlstra * Special API call for FUTEX_REQUEUE_PI support. 15461696a8beSPeter Zijlstra */ 15471696a8beSPeter Zijlstra int rt_mutex_start_proxy_lock(struct rt_mutex *lock, 15481696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter, 1549c051b21fSThomas Gleixner struct task_struct *task) 15501696a8beSPeter Zijlstra { 15511696a8beSPeter Zijlstra int ret; 15521696a8beSPeter Zijlstra 15531696a8beSPeter Zijlstra raw_spin_lock(&lock->wait_lock); 15541696a8beSPeter Zijlstra 15551696a8beSPeter Zijlstra if (try_to_take_rt_mutex(lock, task, NULL)) { 15561696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 15571696a8beSPeter Zijlstra return 1; 15581696a8beSPeter Zijlstra } 15591696a8beSPeter Zijlstra 15603d5c9340SThomas Gleixner /* We enforce deadlock detection for futexes */ 15618930ed80SThomas Gleixner ret = task_blocks_on_rt_mutex(lock, waiter, task, 15628930ed80SThomas Gleixner RT_MUTEX_FULL_CHAINWALK); 15631696a8beSPeter Zijlstra 15641696a8beSPeter Zijlstra if (ret && !rt_mutex_owner(lock)) { 15651696a8beSPeter Zijlstra /* 15661696a8beSPeter Zijlstra * Reset the return value. We might have 15671696a8beSPeter Zijlstra * returned with -EDEADLK and the owner 15681696a8beSPeter Zijlstra * released the lock while we were walking the 15691696a8beSPeter Zijlstra * pi chain. Let the waiter sort it out. 15701696a8beSPeter Zijlstra */ 15711696a8beSPeter Zijlstra ret = 0; 15721696a8beSPeter Zijlstra } 15731696a8beSPeter Zijlstra 15741696a8beSPeter Zijlstra if (unlikely(ret)) 15751696a8beSPeter Zijlstra remove_waiter(lock, waiter); 15761696a8beSPeter Zijlstra 15771696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 15781696a8beSPeter Zijlstra 15791696a8beSPeter Zijlstra debug_rt_mutex_print_deadlock(waiter); 15801696a8beSPeter Zijlstra 15811696a8beSPeter Zijlstra return ret; 15821696a8beSPeter Zijlstra } 15831696a8beSPeter Zijlstra 15841696a8beSPeter Zijlstra /** 15851696a8beSPeter Zijlstra * rt_mutex_next_owner - return the next owner of the lock 15861696a8beSPeter Zijlstra * 15871696a8beSPeter Zijlstra * @lock: the rt lock query 15881696a8beSPeter Zijlstra * 15891696a8beSPeter Zijlstra * Returns the next owner of the lock or NULL 15901696a8beSPeter Zijlstra * 15911696a8beSPeter Zijlstra * Caller has to serialize against other accessors to the lock 15921696a8beSPeter Zijlstra * itself. 15931696a8beSPeter Zijlstra * 15941696a8beSPeter Zijlstra * Special API call for PI-futex support 15951696a8beSPeter Zijlstra */ 15961696a8beSPeter Zijlstra struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) 15971696a8beSPeter Zijlstra { 15981696a8beSPeter Zijlstra if (!rt_mutex_has_waiters(lock)) 15991696a8beSPeter Zijlstra return NULL; 16001696a8beSPeter Zijlstra 16011696a8beSPeter Zijlstra return rt_mutex_top_waiter(lock)->task; 16021696a8beSPeter Zijlstra } 16031696a8beSPeter Zijlstra 16041696a8beSPeter Zijlstra /** 16051696a8beSPeter Zijlstra * rt_mutex_finish_proxy_lock() - Complete lock acquisition 16061696a8beSPeter Zijlstra * @lock: the rt_mutex we were woken on 16071696a8beSPeter Zijlstra * @to: the timeout, null if none. hrtimer should already have 16081696a8beSPeter Zijlstra * been started. 16091696a8beSPeter Zijlstra * @waiter: the pre-initialized rt_mutex_waiter 16101696a8beSPeter Zijlstra * 16111696a8beSPeter Zijlstra * Complete the lock acquisition started our behalf by another thread. 16121696a8beSPeter Zijlstra * 16131696a8beSPeter Zijlstra * Returns: 16141696a8beSPeter Zijlstra * 0 - success 1615c051b21fSThomas Gleixner * <0 - error, one of -EINTR, -ETIMEDOUT 16161696a8beSPeter Zijlstra * 16171696a8beSPeter Zijlstra * Special API call for PI-futex requeue support 16181696a8beSPeter Zijlstra */ 16191696a8beSPeter Zijlstra int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, 16201696a8beSPeter Zijlstra struct hrtimer_sleeper *to, 1621c051b21fSThomas Gleixner struct rt_mutex_waiter *waiter) 16221696a8beSPeter Zijlstra { 16231696a8beSPeter Zijlstra int ret; 16241696a8beSPeter Zijlstra 16251696a8beSPeter Zijlstra raw_spin_lock(&lock->wait_lock); 16261696a8beSPeter Zijlstra 16271696a8beSPeter Zijlstra set_current_state(TASK_INTERRUPTIBLE); 16281696a8beSPeter Zijlstra 16291696a8beSPeter Zijlstra ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); 16301696a8beSPeter Zijlstra 16311696a8beSPeter Zijlstra set_current_state(TASK_RUNNING); 16321696a8beSPeter Zijlstra 16331696a8beSPeter Zijlstra if (unlikely(ret)) 16341696a8beSPeter Zijlstra remove_waiter(lock, waiter); 16351696a8beSPeter Zijlstra 16361696a8beSPeter Zijlstra /* 16371696a8beSPeter Zijlstra * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might 16381696a8beSPeter Zijlstra * have to fix that up. 16391696a8beSPeter Zijlstra */ 16401696a8beSPeter Zijlstra fixup_rt_mutex_waiters(lock); 16411696a8beSPeter Zijlstra 16421696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 16431696a8beSPeter Zijlstra 16441696a8beSPeter Zijlstra return ret; 16451696a8beSPeter Zijlstra } 1646