11696a8beSPeter Zijlstra /* 21696a8beSPeter Zijlstra * RT-Mutexes: simple blocking mutual exclusion locks with PI support 31696a8beSPeter Zijlstra * 41696a8beSPeter Zijlstra * started by Ingo Molnar and Thomas Gleixner. 51696a8beSPeter Zijlstra * 61696a8beSPeter Zijlstra * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 71696a8beSPeter Zijlstra * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> 81696a8beSPeter Zijlstra * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt 91696a8beSPeter Zijlstra * Copyright (C) 2006 Esben Nielsen 101696a8beSPeter Zijlstra * 111696a8beSPeter Zijlstra * See Documentation/rt-mutex-design.txt for details. 121696a8beSPeter Zijlstra */ 131696a8beSPeter Zijlstra #include <linux/spinlock.h> 141696a8beSPeter Zijlstra #include <linux/export.h> 151696a8beSPeter Zijlstra #include <linux/sched.h> 161696a8beSPeter Zijlstra #include <linux/sched/rt.h> 17fb00aca4SPeter Zijlstra #include <linux/sched/deadline.h> 181696a8beSPeter Zijlstra #include <linux/timer.h> 191696a8beSPeter Zijlstra 201696a8beSPeter Zijlstra #include "rtmutex_common.h" 211696a8beSPeter Zijlstra 221696a8beSPeter Zijlstra /* 231696a8beSPeter Zijlstra * lock->owner state tracking: 241696a8beSPeter Zijlstra * 251696a8beSPeter Zijlstra * lock->owner holds the task_struct pointer of the owner. Bit 0 261696a8beSPeter Zijlstra * is used to keep track of the "lock has waiters" state. 271696a8beSPeter Zijlstra * 281696a8beSPeter Zijlstra * owner bit0 291696a8beSPeter Zijlstra * NULL 0 lock is free (fast acquire possible) 301696a8beSPeter Zijlstra * NULL 1 lock is free and has waiters and the top waiter 311696a8beSPeter Zijlstra * is going to take the lock* 321696a8beSPeter Zijlstra * taskpointer 0 lock is held (fast release possible) 331696a8beSPeter Zijlstra * taskpointer 1 lock is held and has waiters** 341696a8beSPeter Zijlstra * 351696a8beSPeter Zijlstra * The fast atomic compare exchange based acquire and release is only 361696a8beSPeter Zijlstra * possible when bit 0 of lock->owner is 0. 371696a8beSPeter Zijlstra * 381696a8beSPeter Zijlstra * (*) It also can be a transitional state when grabbing the lock 391696a8beSPeter Zijlstra * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, 401696a8beSPeter Zijlstra * we need to set the bit0 before looking at the lock, and the owner may be 411696a8beSPeter Zijlstra * NULL in this small time, hence this can be a transitional state. 421696a8beSPeter Zijlstra * 431696a8beSPeter Zijlstra * (**) There is a small time when bit 0 is set but there are no 441696a8beSPeter Zijlstra * waiters. This can happen when grabbing the lock in the slow path. 451696a8beSPeter Zijlstra * To prevent a cmpxchg of the owner releasing the lock, we need to 461696a8beSPeter Zijlstra * set this bit before looking at the lock. 471696a8beSPeter Zijlstra */ 481696a8beSPeter Zijlstra 491696a8beSPeter Zijlstra static void 501696a8beSPeter Zijlstra rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) 511696a8beSPeter Zijlstra { 521696a8beSPeter Zijlstra unsigned long val = (unsigned long)owner; 531696a8beSPeter Zijlstra 541696a8beSPeter Zijlstra if (rt_mutex_has_waiters(lock)) 551696a8beSPeter Zijlstra val |= RT_MUTEX_HAS_WAITERS; 561696a8beSPeter Zijlstra 571696a8beSPeter Zijlstra lock->owner = (struct task_struct *)val; 581696a8beSPeter Zijlstra } 591696a8beSPeter Zijlstra 601696a8beSPeter Zijlstra static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) 611696a8beSPeter Zijlstra { 621696a8beSPeter Zijlstra lock->owner = (struct task_struct *) 631696a8beSPeter Zijlstra ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); 641696a8beSPeter Zijlstra } 651696a8beSPeter Zijlstra 661696a8beSPeter Zijlstra static void fixup_rt_mutex_waiters(struct rt_mutex *lock) 671696a8beSPeter Zijlstra { 681696a8beSPeter Zijlstra if (!rt_mutex_has_waiters(lock)) 691696a8beSPeter Zijlstra clear_rt_mutex_waiters(lock); 701696a8beSPeter Zijlstra } 711696a8beSPeter Zijlstra 721696a8beSPeter Zijlstra /* 731696a8beSPeter Zijlstra * We can speed up the acquire/release, if the architecture 741696a8beSPeter Zijlstra * supports cmpxchg and if there's no debugging state to be set up 751696a8beSPeter Zijlstra */ 761696a8beSPeter Zijlstra #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES) 771696a8beSPeter Zijlstra # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) 781696a8beSPeter Zijlstra static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) 791696a8beSPeter Zijlstra { 801696a8beSPeter Zijlstra unsigned long owner, *p = (unsigned long *) &lock->owner; 811696a8beSPeter Zijlstra 821696a8beSPeter Zijlstra do { 831696a8beSPeter Zijlstra owner = *p; 841696a8beSPeter Zijlstra } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); 851696a8beSPeter Zijlstra } 861696a8beSPeter Zijlstra #else 871696a8beSPeter Zijlstra # define rt_mutex_cmpxchg(l,c,n) (0) 881696a8beSPeter Zijlstra static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) 891696a8beSPeter Zijlstra { 901696a8beSPeter Zijlstra lock->owner = (struct task_struct *) 911696a8beSPeter Zijlstra ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); 921696a8beSPeter Zijlstra } 931696a8beSPeter Zijlstra #endif 941696a8beSPeter Zijlstra 95fb00aca4SPeter Zijlstra static inline int 96fb00aca4SPeter Zijlstra rt_mutex_waiter_less(struct rt_mutex_waiter *left, 97fb00aca4SPeter Zijlstra struct rt_mutex_waiter *right) 98fb00aca4SPeter Zijlstra { 992d3d891dSDario Faggioli if (left->prio < right->prio) 100fb00aca4SPeter Zijlstra return 1; 101fb00aca4SPeter Zijlstra 1021696a8beSPeter Zijlstra /* 1032d3d891dSDario Faggioli * If both waiters have dl_prio(), we check the deadlines of the 1042d3d891dSDario Faggioli * associated tasks. 1052d3d891dSDario Faggioli * If left waiter has a dl_prio(), and we didn't return 1 above, 1062d3d891dSDario Faggioli * then right waiter has a dl_prio() too. 107fb00aca4SPeter Zijlstra */ 1082d3d891dSDario Faggioli if (dl_prio(left->prio)) 109fb00aca4SPeter Zijlstra return (left->task->dl.deadline < right->task->dl.deadline); 110fb00aca4SPeter Zijlstra 111fb00aca4SPeter Zijlstra return 0; 112fb00aca4SPeter Zijlstra } 113fb00aca4SPeter Zijlstra 114fb00aca4SPeter Zijlstra static void 115fb00aca4SPeter Zijlstra rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) 116fb00aca4SPeter Zijlstra { 117fb00aca4SPeter Zijlstra struct rb_node **link = &lock->waiters.rb_node; 118fb00aca4SPeter Zijlstra struct rb_node *parent = NULL; 119fb00aca4SPeter Zijlstra struct rt_mutex_waiter *entry; 120fb00aca4SPeter Zijlstra int leftmost = 1; 121fb00aca4SPeter Zijlstra 122fb00aca4SPeter Zijlstra while (*link) { 123fb00aca4SPeter Zijlstra parent = *link; 124fb00aca4SPeter Zijlstra entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry); 125fb00aca4SPeter Zijlstra if (rt_mutex_waiter_less(waiter, entry)) { 126fb00aca4SPeter Zijlstra link = &parent->rb_left; 127fb00aca4SPeter Zijlstra } else { 128fb00aca4SPeter Zijlstra link = &parent->rb_right; 129fb00aca4SPeter Zijlstra leftmost = 0; 130fb00aca4SPeter Zijlstra } 131fb00aca4SPeter Zijlstra } 132fb00aca4SPeter Zijlstra 133fb00aca4SPeter Zijlstra if (leftmost) 134fb00aca4SPeter Zijlstra lock->waiters_leftmost = &waiter->tree_entry; 135fb00aca4SPeter Zijlstra 136fb00aca4SPeter Zijlstra rb_link_node(&waiter->tree_entry, parent, link); 137fb00aca4SPeter Zijlstra rb_insert_color(&waiter->tree_entry, &lock->waiters); 138fb00aca4SPeter Zijlstra } 139fb00aca4SPeter Zijlstra 140fb00aca4SPeter Zijlstra static void 141fb00aca4SPeter Zijlstra rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) 142fb00aca4SPeter Zijlstra { 143fb00aca4SPeter Zijlstra if (RB_EMPTY_NODE(&waiter->tree_entry)) 144fb00aca4SPeter Zijlstra return; 145fb00aca4SPeter Zijlstra 146fb00aca4SPeter Zijlstra if (lock->waiters_leftmost == &waiter->tree_entry) 147fb00aca4SPeter Zijlstra lock->waiters_leftmost = rb_next(&waiter->tree_entry); 148fb00aca4SPeter Zijlstra 149fb00aca4SPeter Zijlstra rb_erase(&waiter->tree_entry, &lock->waiters); 150fb00aca4SPeter Zijlstra RB_CLEAR_NODE(&waiter->tree_entry); 151fb00aca4SPeter Zijlstra } 152fb00aca4SPeter Zijlstra 153fb00aca4SPeter Zijlstra static void 154fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) 155fb00aca4SPeter Zijlstra { 156fb00aca4SPeter Zijlstra struct rb_node **link = &task->pi_waiters.rb_node; 157fb00aca4SPeter Zijlstra struct rb_node *parent = NULL; 158fb00aca4SPeter Zijlstra struct rt_mutex_waiter *entry; 159fb00aca4SPeter Zijlstra int leftmost = 1; 160fb00aca4SPeter Zijlstra 161fb00aca4SPeter Zijlstra while (*link) { 162fb00aca4SPeter Zijlstra parent = *link; 163fb00aca4SPeter Zijlstra entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry); 164fb00aca4SPeter Zijlstra if (rt_mutex_waiter_less(waiter, entry)) { 165fb00aca4SPeter Zijlstra link = &parent->rb_left; 166fb00aca4SPeter Zijlstra } else { 167fb00aca4SPeter Zijlstra link = &parent->rb_right; 168fb00aca4SPeter Zijlstra leftmost = 0; 169fb00aca4SPeter Zijlstra } 170fb00aca4SPeter Zijlstra } 171fb00aca4SPeter Zijlstra 172fb00aca4SPeter Zijlstra if (leftmost) 173fb00aca4SPeter Zijlstra task->pi_waiters_leftmost = &waiter->pi_tree_entry; 174fb00aca4SPeter Zijlstra 175fb00aca4SPeter Zijlstra rb_link_node(&waiter->pi_tree_entry, parent, link); 176fb00aca4SPeter Zijlstra rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters); 177fb00aca4SPeter Zijlstra } 178fb00aca4SPeter Zijlstra 179fb00aca4SPeter Zijlstra static void 180fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) 181fb00aca4SPeter Zijlstra { 182fb00aca4SPeter Zijlstra if (RB_EMPTY_NODE(&waiter->pi_tree_entry)) 183fb00aca4SPeter Zijlstra return; 184fb00aca4SPeter Zijlstra 185fb00aca4SPeter Zijlstra if (task->pi_waiters_leftmost == &waiter->pi_tree_entry) 186fb00aca4SPeter Zijlstra task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry); 187fb00aca4SPeter Zijlstra 188fb00aca4SPeter Zijlstra rb_erase(&waiter->pi_tree_entry, &task->pi_waiters); 189fb00aca4SPeter Zijlstra RB_CLEAR_NODE(&waiter->pi_tree_entry); 190fb00aca4SPeter Zijlstra } 191fb00aca4SPeter Zijlstra 192fb00aca4SPeter Zijlstra /* 193fb00aca4SPeter Zijlstra * Calculate task priority from the waiter tree priority 1941696a8beSPeter Zijlstra * 195fb00aca4SPeter Zijlstra * Return task->normal_prio when the waiter tree is empty or when 1961696a8beSPeter Zijlstra * the waiter is not allowed to do priority boosting 1971696a8beSPeter Zijlstra */ 1981696a8beSPeter Zijlstra int rt_mutex_getprio(struct task_struct *task) 1991696a8beSPeter Zijlstra { 2001696a8beSPeter Zijlstra if (likely(!task_has_pi_waiters(task))) 2011696a8beSPeter Zijlstra return task->normal_prio; 2021696a8beSPeter Zijlstra 2032d3d891dSDario Faggioli return min(task_top_pi_waiter(task)->prio, 2041696a8beSPeter Zijlstra task->normal_prio); 2051696a8beSPeter Zijlstra } 2061696a8beSPeter Zijlstra 2072d3d891dSDario Faggioli struct task_struct *rt_mutex_get_top_task(struct task_struct *task) 2082d3d891dSDario Faggioli { 2092d3d891dSDario Faggioli if (likely(!task_has_pi_waiters(task))) 2102d3d891dSDario Faggioli return NULL; 2112d3d891dSDario Faggioli 2122d3d891dSDario Faggioli return task_top_pi_waiter(task)->task; 2132d3d891dSDario Faggioli } 2142d3d891dSDario Faggioli 2151696a8beSPeter Zijlstra /* 216c365c292SThomas Gleixner * Called by sched_setscheduler() to check whether the priority change 217c365c292SThomas Gleixner * is overruled by a possible priority boosting. 218c365c292SThomas Gleixner */ 219c365c292SThomas Gleixner int rt_mutex_check_prio(struct task_struct *task, int newprio) 220c365c292SThomas Gleixner { 221c365c292SThomas Gleixner if (!task_has_pi_waiters(task)) 222c365c292SThomas Gleixner return 0; 223c365c292SThomas Gleixner 224c365c292SThomas Gleixner return task_top_pi_waiter(task)->task->prio <= newprio; 225c365c292SThomas Gleixner } 226c365c292SThomas Gleixner 227c365c292SThomas Gleixner /* 2281696a8beSPeter Zijlstra * Adjust the priority of a task, after its pi_waiters got modified. 2291696a8beSPeter Zijlstra * 2301696a8beSPeter Zijlstra * This can be both boosting and unboosting. task->pi_lock must be held. 2311696a8beSPeter Zijlstra */ 2321696a8beSPeter Zijlstra static void __rt_mutex_adjust_prio(struct task_struct *task) 2331696a8beSPeter Zijlstra { 2341696a8beSPeter Zijlstra int prio = rt_mutex_getprio(task); 2351696a8beSPeter Zijlstra 2362d3d891dSDario Faggioli if (task->prio != prio || dl_prio(prio)) 2371696a8beSPeter Zijlstra rt_mutex_setprio(task, prio); 2381696a8beSPeter Zijlstra } 2391696a8beSPeter Zijlstra 2401696a8beSPeter Zijlstra /* 2411696a8beSPeter Zijlstra * Adjust task priority (undo boosting). Called from the exit path of 2421696a8beSPeter Zijlstra * rt_mutex_slowunlock() and rt_mutex_slowlock(). 2431696a8beSPeter Zijlstra * 2441696a8beSPeter Zijlstra * (Note: We do this outside of the protection of lock->wait_lock to 2451696a8beSPeter Zijlstra * allow the lock to be taken while or before we readjust the priority 2461696a8beSPeter Zijlstra * of task. We do not use the spin_xx_mutex() variants here as we are 2471696a8beSPeter Zijlstra * outside of the debug path.) 2481696a8beSPeter Zijlstra */ 2491696a8beSPeter Zijlstra static void rt_mutex_adjust_prio(struct task_struct *task) 2501696a8beSPeter Zijlstra { 2511696a8beSPeter Zijlstra unsigned long flags; 2521696a8beSPeter Zijlstra 2531696a8beSPeter Zijlstra raw_spin_lock_irqsave(&task->pi_lock, flags); 2541696a8beSPeter Zijlstra __rt_mutex_adjust_prio(task); 2551696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&task->pi_lock, flags); 2561696a8beSPeter Zijlstra } 2571696a8beSPeter Zijlstra 2581696a8beSPeter Zijlstra /* 2591696a8beSPeter Zijlstra * Max number of times we'll walk the boosting chain: 2601696a8beSPeter Zijlstra */ 2611696a8beSPeter Zijlstra int max_lock_depth = 1024; 2621696a8beSPeter Zijlstra 2631696a8beSPeter Zijlstra /* 2641696a8beSPeter Zijlstra * Adjust the priority chain. Also used for deadlock detection. 2651696a8beSPeter Zijlstra * Decreases task's usage by one - may thus free the task. 2661696a8beSPeter Zijlstra * 2671696a8beSPeter Zijlstra * @task: the task owning the mutex (owner) for which a chain walk is probably 2681696a8beSPeter Zijlstra * needed 2691696a8beSPeter Zijlstra * @deadlock_detect: do we have to carry out deadlock detection? 2701696a8beSPeter Zijlstra * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck 2711696a8beSPeter Zijlstra * things for a task that has just got its priority adjusted, and 2721696a8beSPeter Zijlstra * is waiting on a mutex) 2731696a8beSPeter Zijlstra * @orig_waiter: rt_mutex_waiter struct for the task that has just donated 2741696a8beSPeter Zijlstra * its priority to the mutex owner (can be NULL in the case 2751696a8beSPeter Zijlstra * depicted above or if the top waiter is gone away and we are 2761696a8beSPeter Zijlstra * actually deboosting the owner) 2771696a8beSPeter Zijlstra * @top_task: the current top waiter 2781696a8beSPeter Zijlstra * 2791696a8beSPeter Zijlstra * Returns 0 or -EDEADLK. 2801696a8beSPeter Zijlstra */ 2811696a8beSPeter Zijlstra static int rt_mutex_adjust_prio_chain(struct task_struct *task, 2821696a8beSPeter Zijlstra int deadlock_detect, 2831696a8beSPeter Zijlstra struct rt_mutex *orig_lock, 2841696a8beSPeter Zijlstra struct rt_mutex_waiter *orig_waiter, 2851696a8beSPeter Zijlstra struct task_struct *top_task) 2861696a8beSPeter Zijlstra { 2871696a8beSPeter Zijlstra struct rt_mutex *lock; 2881696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; 2891696a8beSPeter Zijlstra int detect_deadlock, ret = 0, depth = 0; 2901696a8beSPeter Zijlstra unsigned long flags; 2911696a8beSPeter Zijlstra 2921696a8beSPeter Zijlstra detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter, 2931696a8beSPeter Zijlstra deadlock_detect); 2941696a8beSPeter Zijlstra 2951696a8beSPeter Zijlstra /* 2961696a8beSPeter Zijlstra * The (de)boosting is a step by step approach with a lot of 2971696a8beSPeter Zijlstra * pitfalls. We want this to be preemptible and we want hold a 2981696a8beSPeter Zijlstra * maximum of two locks per step. So we have to check 2991696a8beSPeter Zijlstra * carefully whether things change under us. 3001696a8beSPeter Zijlstra */ 3011696a8beSPeter Zijlstra again: 3021696a8beSPeter Zijlstra if (++depth > max_lock_depth) { 3031696a8beSPeter Zijlstra static int prev_max; 3041696a8beSPeter Zijlstra 3051696a8beSPeter Zijlstra /* 3061696a8beSPeter Zijlstra * Print this only once. If the admin changes the limit, 3071696a8beSPeter Zijlstra * print a new message when reaching the limit again. 3081696a8beSPeter Zijlstra */ 3091696a8beSPeter Zijlstra if (prev_max != max_lock_depth) { 3101696a8beSPeter Zijlstra prev_max = max_lock_depth; 3111696a8beSPeter Zijlstra printk(KERN_WARNING "Maximum lock depth %d reached " 3121696a8beSPeter Zijlstra "task: %s (%d)\n", max_lock_depth, 3131696a8beSPeter Zijlstra top_task->comm, task_pid_nr(top_task)); 3141696a8beSPeter Zijlstra } 3151696a8beSPeter Zijlstra put_task_struct(task); 3161696a8beSPeter Zijlstra 317*3d5c9340SThomas Gleixner return -EDEADLK; 3181696a8beSPeter Zijlstra } 3191696a8beSPeter Zijlstra retry: 3201696a8beSPeter Zijlstra /* 3211696a8beSPeter Zijlstra * Task can not go away as we did a get_task() before ! 3221696a8beSPeter Zijlstra */ 3231696a8beSPeter Zijlstra raw_spin_lock_irqsave(&task->pi_lock, flags); 3241696a8beSPeter Zijlstra 3251696a8beSPeter Zijlstra waiter = task->pi_blocked_on; 3261696a8beSPeter Zijlstra /* 3271696a8beSPeter Zijlstra * Check whether the end of the boosting chain has been 3281696a8beSPeter Zijlstra * reached or the state of the chain has changed while we 3291696a8beSPeter Zijlstra * dropped the locks. 3301696a8beSPeter Zijlstra */ 3311696a8beSPeter Zijlstra if (!waiter) 3321696a8beSPeter Zijlstra goto out_unlock_pi; 3331696a8beSPeter Zijlstra 3341696a8beSPeter Zijlstra /* 3351696a8beSPeter Zijlstra * Check the orig_waiter state. After we dropped the locks, 3361696a8beSPeter Zijlstra * the previous owner of the lock might have released the lock. 3371696a8beSPeter Zijlstra */ 3381696a8beSPeter Zijlstra if (orig_waiter && !rt_mutex_owner(orig_lock)) 3391696a8beSPeter Zijlstra goto out_unlock_pi; 3401696a8beSPeter Zijlstra 3411696a8beSPeter Zijlstra /* 3421696a8beSPeter Zijlstra * Drop out, when the task has no waiters. Note, 3431696a8beSPeter Zijlstra * top_waiter can be NULL, when we are in the deboosting 3441696a8beSPeter Zijlstra * mode! 3451696a8beSPeter Zijlstra */ 346397335f0SThomas Gleixner if (top_waiter) { 347397335f0SThomas Gleixner if (!task_has_pi_waiters(task)) 3481696a8beSPeter Zijlstra goto out_unlock_pi; 349397335f0SThomas Gleixner /* 350397335f0SThomas Gleixner * If deadlock detection is off, we stop here if we 351397335f0SThomas Gleixner * are not the top pi waiter of the task. 352397335f0SThomas Gleixner */ 353397335f0SThomas Gleixner if (!detect_deadlock && top_waiter != task_top_pi_waiter(task)) 354397335f0SThomas Gleixner goto out_unlock_pi; 355397335f0SThomas Gleixner } 3561696a8beSPeter Zijlstra 3571696a8beSPeter Zijlstra /* 3581696a8beSPeter Zijlstra * When deadlock detection is off then we check, if further 3591696a8beSPeter Zijlstra * priority adjustment is necessary. 3601696a8beSPeter Zijlstra */ 3612d3d891dSDario Faggioli if (!detect_deadlock && waiter->prio == task->prio) 3621696a8beSPeter Zijlstra goto out_unlock_pi; 3631696a8beSPeter Zijlstra 3641696a8beSPeter Zijlstra lock = waiter->lock; 3651696a8beSPeter Zijlstra if (!raw_spin_trylock(&lock->wait_lock)) { 3661696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&task->pi_lock, flags); 3671696a8beSPeter Zijlstra cpu_relax(); 3681696a8beSPeter Zijlstra goto retry; 3691696a8beSPeter Zijlstra } 3701696a8beSPeter Zijlstra 371397335f0SThomas Gleixner /* 372397335f0SThomas Gleixner * Deadlock detection. If the lock is the same as the original 373397335f0SThomas Gleixner * lock which caused us to walk the lock chain or if the 374397335f0SThomas Gleixner * current lock is owned by the task which initiated the chain 375397335f0SThomas Gleixner * walk, we detected a deadlock. 376397335f0SThomas Gleixner */ 3771696a8beSPeter Zijlstra if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { 3781696a8beSPeter Zijlstra debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); 3791696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 380*3d5c9340SThomas Gleixner ret = -EDEADLK; 3811696a8beSPeter Zijlstra goto out_unlock_pi; 3821696a8beSPeter Zijlstra } 3831696a8beSPeter Zijlstra 3841696a8beSPeter Zijlstra top_waiter = rt_mutex_top_waiter(lock); 3851696a8beSPeter Zijlstra 3861696a8beSPeter Zijlstra /* Requeue the waiter */ 387fb00aca4SPeter Zijlstra rt_mutex_dequeue(lock, waiter); 3882d3d891dSDario Faggioli waiter->prio = task->prio; 389fb00aca4SPeter Zijlstra rt_mutex_enqueue(lock, waiter); 3901696a8beSPeter Zijlstra 3911696a8beSPeter Zijlstra /* Release the task */ 3921696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&task->pi_lock, flags); 3931696a8beSPeter Zijlstra if (!rt_mutex_owner(lock)) { 3941696a8beSPeter Zijlstra /* 3951696a8beSPeter Zijlstra * If the requeue above changed the top waiter, then we need 3961696a8beSPeter Zijlstra * to wake the new top waiter up to try to get the lock. 3971696a8beSPeter Zijlstra */ 3981696a8beSPeter Zijlstra 3991696a8beSPeter Zijlstra if (top_waiter != rt_mutex_top_waiter(lock)) 4001696a8beSPeter Zijlstra wake_up_process(rt_mutex_top_waiter(lock)->task); 4011696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 4021696a8beSPeter Zijlstra goto out_put_task; 4031696a8beSPeter Zijlstra } 4041696a8beSPeter Zijlstra put_task_struct(task); 4051696a8beSPeter Zijlstra 4061696a8beSPeter Zijlstra /* Grab the next task */ 4071696a8beSPeter Zijlstra task = rt_mutex_owner(lock); 4081696a8beSPeter Zijlstra get_task_struct(task); 4091696a8beSPeter Zijlstra raw_spin_lock_irqsave(&task->pi_lock, flags); 4101696a8beSPeter Zijlstra 4111696a8beSPeter Zijlstra if (waiter == rt_mutex_top_waiter(lock)) { 4121696a8beSPeter Zijlstra /* Boost the owner */ 413fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(task, top_waiter); 414fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(task, waiter); 4151696a8beSPeter Zijlstra __rt_mutex_adjust_prio(task); 4161696a8beSPeter Zijlstra 4171696a8beSPeter Zijlstra } else if (top_waiter == waiter) { 4181696a8beSPeter Zijlstra /* Deboost the owner */ 419fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(task, waiter); 4201696a8beSPeter Zijlstra waiter = rt_mutex_top_waiter(lock); 421fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(task, waiter); 4221696a8beSPeter Zijlstra __rt_mutex_adjust_prio(task); 4231696a8beSPeter Zijlstra } 4241696a8beSPeter Zijlstra 4251696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&task->pi_lock, flags); 4261696a8beSPeter Zijlstra 4271696a8beSPeter Zijlstra top_waiter = rt_mutex_top_waiter(lock); 4281696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 4291696a8beSPeter Zijlstra 4301696a8beSPeter Zijlstra if (!detect_deadlock && waiter != top_waiter) 4311696a8beSPeter Zijlstra goto out_put_task; 4321696a8beSPeter Zijlstra 4331696a8beSPeter Zijlstra goto again; 4341696a8beSPeter Zijlstra 4351696a8beSPeter Zijlstra out_unlock_pi: 4361696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&task->pi_lock, flags); 4371696a8beSPeter Zijlstra out_put_task: 4381696a8beSPeter Zijlstra put_task_struct(task); 4391696a8beSPeter Zijlstra 4401696a8beSPeter Zijlstra return ret; 4411696a8beSPeter Zijlstra } 4421696a8beSPeter Zijlstra 4431696a8beSPeter Zijlstra /* 4441696a8beSPeter Zijlstra * Try to take an rt-mutex 4451696a8beSPeter Zijlstra * 4461696a8beSPeter Zijlstra * Must be called with lock->wait_lock held. 4471696a8beSPeter Zijlstra * 4481696a8beSPeter Zijlstra * @lock: the lock to be acquired. 4491696a8beSPeter Zijlstra * @task: the task which wants to acquire the lock 4501696a8beSPeter Zijlstra * @waiter: the waiter that is queued to the lock's wait list. (could be NULL) 4511696a8beSPeter Zijlstra */ 4521696a8beSPeter Zijlstra static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, 4531696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter) 4541696a8beSPeter Zijlstra { 4551696a8beSPeter Zijlstra /* 4561696a8beSPeter Zijlstra * We have to be careful here if the atomic speedups are 4571696a8beSPeter Zijlstra * enabled, such that, when 4581696a8beSPeter Zijlstra * - no other waiter is on the lock 4591696a8beSPeter Zijlstra * - the lock has been released since we did the cmpxchg 4601696a8beSPeter Zijlstra * the lock can be released or taken while we are doing the 4611696a8beSPeter Zijlstra * checks and marking the lock with RT_MUTEX_HAS_WAITERS. 4621696a8beSPeter Zijlstra * 4631696a8beSPeter Zijlstra * The atomic acquire/release aware variant of 4641696a8beSPeter Zijlstra * mark_rt_mutex_waiters uses a cmpxchg loop. After setting 4651696a8beSPeter Zijlstra * the WAITERS bit, the atomic release / acquire can not 4661696a8beSPeter Zijlstra * happen anymore and lock->wait_lock protects us from the 4671696a8beSPeter Zijlstra * non-atomic case. 4681696a8beSPeter Zijlstra * 4691696a8beSPeter Zijlstra * Note, that this might set lock->owner = 4701696a8beSPeter Zijlstra * RT_MUTEX_HAS_WAITERS in the case the lock is not contended 4711696a8beSPeter Zijlstra * any more. This is fixed up when we take the ownership. 4721696a8beSPeter Zijlstra * This is the transitional state explained at the top of this file. 4731696a8beSPeter Zijlstra */ 4741696a8beSPeter Zijlstra mark_rt_mutex_waiters(lock); 4751696a8beSPeter Zijlstra 4761696a8beSPeter Zijlstra if (rt_mutex_owner(lock)) 4771696a8beSPeter Zijlstra return 0; 4781696a8beSPeter Zijlstra 4791696a8beSPeter Zijlstra /* 4801696a8beSPeter Zijlstra * It will get the lock because of one of these conditions: 4811696a8beSPeter Zijlstra * 1) there is no waiter 4821696a8beSPeter Zijlstra * 2) higher priority than waiters 4831696a8beSPeter Zijlstra * 3) it is top waiter 4841696a8beSPeter Zijlstra */ 4851696a8beSPeter Zijlstra if (rt_mutex_has_waiters(lock)) { 4862d3d891dSDario Faggioli if (task->prio >= rt_mutex_top_waiter(lock)->prio) { 4871696a8beSPeter Zijlstra if (!waiter || waiter != rt_mutex_top_waiter(lock)) 4881696a8beSPeter Zijlstra return 0; 4891696a8beSPeter Zijlstra } 4901696a8beSPeter Zijlstra } 4911696a8beSPeter Zijlstra 4921696a8beSPeter Zijlstra if (waiter || rt_mutex_has_waiters(lock)) { 4931696a8beSPeter Zijlstra unsigned long flags; 4941696a8beSPeter Zijlstra struct rt_mutex_waiter *top; 4951696a8beSPeter Zijlstra 4961696a8beSPeter Zijlstra raw_spin_lock_irqsave(&task->pi_lock, flags); 4971696a8beSPeter Zijlstra 4981696a8beSPeter Zijlstra /* remove the queued waiter. */ 4991696a8beSPeter Zijlstra if (waiter) { 500fb00aca4SPeter Zijlstra rt_mutex_dequeue(lock, waiter); 5011696a8beSPeter Zijlstra task->pi_blocked_on = NULL; 5021696a8beSPeter Zijlstra } 5031696a8beSPeter Zijlstra 5041696a8beSPeter Zijlstra /* 5051696a8beSPeter Zijlstra * We have to enqueue the top waiter(if it exists) into 5061696a8beSPeter Zijlstra * task->pi_waiters list. 5071696a8beSPeter Zijlstra */ 5081696a8beSPeter Zijlstra if (rt_mutex_has_waiters(lock)) { 5091696a8beSPeter Zijlstra top = rt_mutex_top_waiter(lock); 510fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(task, top); 5111696a8beSPeter Zijlstra } 5121696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&task->pi_lock, flags); 5131696a8beSPeter Zijlstra } 5141696a8beSPeter Zijlstra 5151696a8beSPeter Zijlstra /* We got the lock. */ 5161696a8beSPeter Zijlstra debug_rt_mutex_lock(lock); 5171696a8beSPeter Zijlstra 5181696a8beSPeter Zijlstra rt_mutex_set_owner(lock, task); 5191696a8beSPeter Zijlstra 5201696a8beSPeter Zijlstra rt_mutex_deadlock_account_lock(lock, task); 5211696a8beSPeter Zijlstra 5221696a8beSPeter Zijlstra return 1; 5231696a8beSPeter Zijlstra } 5241696a8beSPeter Zijlstra 5251696a8beSPeter Zijlstra /* 5261696a8beSPeter Zijlstra * Task blocks on lock. 5271696a8beSPeter Zijlstra * 5281696a8beSPeter Zijlstra * Prepare waiter and propagate pi chain 5291696a8beSPeter Zijlstra * 5301696a8beSPeter Zijlstra * This must be called with lock->wait_lock held. 5311696a8beSPeter Zijlstra */ 5321696a8beSPeter Zijlstra static int task_blocks_on_rt_mutex(struct rt_mutex *lock, 5331696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter, 5341696a8beSPeter Zijlstra struct task_struct *task, 5351696a8beSPeter Zijlstra int detect_deadlock) 5361696a8beSPeter Zijlstra { 5371696a8beSPeter Zijlstra struct task_struct *owner = rt_mutex_owner(lock); 5381696a8beSPeter Zijlstra struct rt_mutex_waiter *top_waiter = waiter; 5391696a8beSPeter Zijlstra unsigned long flags; 5401696a8beSPeter Zijlstra int chain_walk = 0, res; 5411696a8beSPeter Zijlstra 542397335f0SThomas Gleixner /* 543397335f0SThomas Gleixner * Early deadlock detection. We really don't want the task to 544397335f0SThomas Gleixner * enqueue on itself just to untangle the mess later. It's not 545397335f0SThomas Gleixner * only an optimization. We drop the locks, so another waiter 546397335f0SThomas Gleixner * can come in before the chain walk detects the deadlock. So 547397335f0SThomas Gleixner * the other will detect the deadlock and return -EDEADLOCK, 548397335f0SThomas Gleixner * which is wrong, as the other waiter is not in a deadlock 549397335f0SThomas Gleixner * situation. 550397335f0SThomas Gleixner */ 551*3d5c9340SThomas Gleixner if (owner == task) 552397335f0SThomas Gleixner return -EDEADLK; 553397335f0SThomas Gleixner 5541696a8beSPeter Zijlstra raw_spin_lock_irqsave(&task->pi_lock, flags); 5551696a8beSPeter Zijlstra __rt_mutex_adjust_prio(task); 5561696a8beSPeter Zijlstra waiter->task = task; 5571696a8beSPeter Zijlstra waiter->lock = lock; 5582d3d891dSDario Faggioli waiter->prio = task->prio; 5591696a8beSPeter Zijlstra 5601696a8beSPeter Zijlstra /* Get the top priority waiter on the lock */ 5611696a8beSPeter Zijlstra if (rt_mutex_has_waiters(lock)) 5621696a8beSPeter Zijlstra top_waiter = rt_mutex_top_waiter(lock); 563fb00aca4SPeter Zijlstra rt_mutex_enqueue(lock, waiter); 5641696a8beSPeter Zijlstra 5651696a8beSPeter Zijlstra task->pi_blocked_on = waiter; 5661696a8beSPeter Zijlstra 5671696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&task->pi_lock, flags); 5681696a8beSPeter Zijlstra 5691696a8beSPeter Zijlstra if (!owner) 5701696a8beSPeter Zijlstra return 0; 5711696a8beSPeter Zijlstra 5721696a8beSPeter Zijlstra if (waiter == rt_mutex_top_waiter(lock)) { 5731696a8beSPeter Zijlstra raw_spin_lock_irqsave(&owner->pi_lock, flags); 574fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(owner, top_waiter); 575fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(owner, waiter); 5761696a8beSPeter Zijlstra 5771696a8beSPeter Zijlstra __rt_mutex_adjust_prio(owner); 5781696a8beSPeter Zijlstra if (owner->pi_blocked_on) 5791696a8beSPeter Zijlstra chain_walk = 1; 5801696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&owner->pi_lock, flags); 5811696a8beSPeter Zijlstra } 5821696a8beSPeter Zijlstra else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) 5831696a8beSPeter Zijlstra chain_walk = 1; 5841696a8beSPeter Zijlstra 5851696a8beSPeter Zijlstra if (!chain_walk) 5861696a8beSPeter Zijlstra return 0; 5871696a8beSPeter Zijlstra 5881696a8beSPeter Zijlstra /* 5891696a8beSPeter Zijlstra * The owner can't disappear while holding a lock, 5901696a8beSPeter Zijlstra * so the owner struct is protected by wait_lock. 5911696a8beSPeter Zijlstra * Gets dropped in rt_mutex_adjust_prio_chain()! 5921696a8beSPeter Zijlstra */ 5931696a8beSPeter Zijlstra get_task_struct(owner); 5941696a8beSPeter Zijlstra 5951696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 5961696a8beSPeter Zijlstra 5971696a8beSPeter Zijlstra res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, 5981696a8beSPeter Zijlstra task); 5991696a8beSPeter Zijlstra 6001696a8beSPeter Zijlstra raw_spin_lock(&lock->wait_lock); 6011696a8beSPeter Zijlstra 6021696a8beSPeter Zijlstra return res; 6031696a8beSPeter Zijlstra } 6041696a8beSPeter Zijlstra 6051696a8beSPeter Zijlstra /* 6061696a8beSPeter Zijlstra * Wake up the next waiter on the lock. 6071696a8beSPeter Zijlstra * 6081696a8beSPeter Zijlstra * Remove the top waiter from the current tasks waiter list and wake it up. 6091696a8beSPeter Zijlstra * 6101696a8beSPeter Zijlstra * Called with lock->wait_lock held. 6111696a8beSPeter Zijlstra */ 6121696a8beSPeter Zijlstra static void wakeup_next_waiter(struct rt_mutex *lock) 6131696a8beSPeter Zijlstra { 6141696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter; 6151696a8beSPeter Zijlstra unsigned long flags; 6161696a8beSPeter Zijlstra 6171696a8beSPeter Zijlstra raw_spin_lock_irqsave(¤t->pi_lock, flags); 6181696a8beSPeter Zijlstra 6191696a8beSPeter Zijlstra waiter = rt_mutex_top_waiter(lock); 6201696a8beSPeter Zijlstra 6211696a8beSPeter Zijlstra /* 6221696a8beSPeter Zijlstra * Remove it from current->pi_waiters. We do not adjust a 6231696a8beSPeter Zijlstra * possible priority boost right now. We execute wakeup in the 6241696a8beSPeter Zijlstra * boosted mode and go back to normal after releasing 6251696a8beSPeter Zijlstra * lock->wait_lock. 6261696a8beSPeter Zijlstra */ 627fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(current, waiter); 6281696a8beSPeter Zijlstra 6291696a8beSPeter Zijlstra rt_mutex_set_owner(lock, NULL); 6301696a8beSPeter Zijlstra 6311696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 6321696a8beSPeter Zijlstra 6331696a8beSPeter Zijlstra wake_up_process(waiter->task); 6341696a8beSPeter Zijlstra } 6351696a8beSPeter Zijlstra 6361696a8beSPeter Zijlstra /* 6371696a8beSPeter Zijlstra * Remove a waiter from a lock and give up 6381696a8beSPeter Zijlstra * 6391696a8beSPeter Zijlstra * Must be called with lock->wait_lock held and 6401696a8beSPeter Zijlstra * have just failed to try_to_take_rt_mutex(). 6411696a8beSPeter Zijlstra */ 6421696a8beSPeter Zijlstra static void remove_waiter(struct rt_mutex *lock, 6431696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter) 6441696a8beSPeter Zijlstra { 6451696a8beSPeter Zijlstra int first = (waiter == rt_mutex_top_waiter(lock)); 6461696a8beSPeter Zijlstra struct task_struct *owner = rt_mutex_owner(lock); 6471696a8beSPeter Zijlstra unsigned long flags; 6481696a8beSPeter Zijlstra int chain_walk = 0; 6491696a8beSPeter Zijlstra 6501696a8beSPeter Zijlstra raw_spin_lock_irqsave(¤t->pi_lock, flags); 651fb00aca4SPeter Zijlstra rt_mutex_dequeue(lock, waiter); 6521696a8beSPeter Zijlstra current->pi_blocked_on = NULL; 6531696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 6541696a8beSPeter Zijlstra 6551696a8beSPeter Zijlstra if (!owner) 6561696a8beSPeter Zijlstra return; 6571696a8beSPeter Zijlstra 6581696a8beSPeter Zijlstra if (first) { 6591696a8beSPeter Zijlstra 6601696a8beSPeter Zijlstra raw_spin_lock_irqsave(&owner->pi_lock, flags); 6611696a8beSPeter Zijlstra 662fb00aca4SPeter Zijlstra rt_mutex_dequeue_pi(owner, waiter); 6631696a8beSPeter Zijlstra 6641696a8beSPeter Zijlstra if (rt_mutex_has_waiters(lock)) { 6651696a8beSPeter Zijlstra struct rt_mutex_waiter *next; 6661696a8beSPeter Zijlstra 6671696a8beSPeter Zijlstra next = rt_mutex_top_waiter(lock); 668fb00aca4SPeter Zijlstra rt_mutex_enqueue_pi(owner, next); 6691696a8beSPeter Zijlstra } 6701696a8beSPeter Zijlstra __rt_mutex_adjust_prio(owner); 6711696a8beSPeter Zijlstra 6721696a8beSPeter Zijlstra if (owner->pi_blocked_on) 6731696a8beSPeter Zijlstra chain_walk = 1; 6741696a8beSPeter Zijlstra 6751696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&owner->pi_lock, flags); 6761696a8beSPeter Zijlstra } 6771696a8beSPeter Zijlstra 6781696a8beSPeter Zijlstra if (!chain_walk) 6791696a8beSPeter Zijlstra return; 6801696a8beSPeter Zijlstra 6811696a8beSPeter Zijlstra /* gets dropped in rt_mutex_adjust_prio_chain()! */ 6821696a8beSPeter Zijlstra get_task_struct(owner); 6831696a8beSPeter Zijlstra 6841696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 6851696a8beSPeter Zijlstra 6861696a8beSPeter Zijlstra rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); 6871696a8beSPeter Zijlstra 6881696a8beSPeter Zijlstra raw_spin_lock(&lock->wait_lock); 6891696a8beSPeter Zijlstra } 6901696a8beSPeter Zijlstra 6911696a8beSPeter Zijlstra /* 6921696a8beSPeter Zijlstra * Recheck the pi chain, in case we got a priority setting 6931696a8beSPeter Zijlstra * 6941696a8beSPeter Zijlstra * Called from sched_setscheduler 6951696a8beSPeter Zijlstra */ 6961696a8beSPeter Zijlstra void rt_mutex_adjust_pi(struct task_struct *task) 6971696a8beSPeter Zijlstra { 6981696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter; 6991696a8beSPeter Zijlstra unsigned long flags; 7001696a8beSPeter Zijlstra 7011696a8beSPeter Zijlstra raw_spin_lock_irqsave(&task->pi_lock, flags); 7021696a8beSPeter Zijlstra 7031696a8beSPeter Zijlstra waiter = task->pi_blocked_on; 7042d3d891dSDario Faggioli if (!waiter || (waiter->prio == task->prio && 7052d3d891dSDario Faggioli !dl_prio(task->prio))) { 7061696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&task->pi_lock, flags); 7071696a8beSPeter Zijlstra return; 7081696a8beSPeter Zijlstra } 7091696a8beSPeter Zijlstra 7101696a8beSPeter Zijlstra raw_spin_unlock_irqrestore(&task->pi_lock, flags); 7111696a8beSPeter Zijlstra 7121696a8beSPeter Zijlstra /* gets dropped in rt_mutex_adjust_prio_chain()! */ 7131696a8beSPeter Zijlstra get_task_struct(task); 7141696a8beSPeter Zijlstra rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); 7151696a8beSPeter Zijlstra } 7161696a8beSPeter Zijlstra 7171696a8beSPeter Zijlstra /** 7181696a8beSPeter Zijlstra * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop 7191696a8beSPeter Zijlstra * @lock: the rt_mutex to take 7201696a8beSPeter Zijlstra * @state: the state the task should block in (TASK_INTERRUPTIBLE 7211696a8beSPeter Zijlstra * or TASK_UNINTERRUPTIBLE) 7221696a8beSPeter Zijlstra * @timeout: the pre-initialized and started timer, or NULL for none 7231696a8beSPeter Zijlstra * @waiter: the pre-initialized rt_mutex_waiter 7241696a8beSPeter Zijlstra * 7251696a8beSPeter Zijlstra * lock->wait_lock must be held by the caller. 7261696a8beSPeter Zijlstra */ 7271696a8beSPeter Zijlstra static int __sched 7281696a8beSPeter Zijlstra __rt_mutex_slowlock(struct rt_mutex *lock, int state, 7291696a8beSPeter Zijlstra struct hrtimer_sleeper *timeout, 7301696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter) 7311696a8beSPeter Zijlstra { 7321696a8beSPeter Zijlstra int ret = 0; 7331696a8beSPeter Zijlstra 7341696a8beSPeter Zijlstra for (;;) { 7351696a8beSPeter Zijlstra /* Try to acquire the lock: */ 7361696a8beSPeter Zijlstra if (try_to_take_rt_mutex(lock, current, waiter)) 7371696a8beSPeter Zijlstra break; 7381696a8beSPeter Zijlstra 7391696a8beSPeter Zijlstra /* 7401696a8beSPeter Zijlstra * TASK_INTERRUPTIBLE checks for signals and 7411696a8beSPeter Zijlstra * timeout. Ignored otherwise. 7421696a8beSPeter Zijlstra */ 7431696a8beSPeter Zijlstra if (unlikely(state == TASK_INTERRUPTIBLE)) { 7441696a8beSPeter Zijlstra /* Signal pending? */ 7451696a8beSPeter Zijlstra if (signal_pending(current)) 7461696a8beSPeter Zijlstra ret = -EINTR; 7471696a8beSPeter Zijlstra if (timeout && !timeout->task) 7481696a8beSPeter Zijlstra ret = -ETIMEDOUT; 7491696a8beSPeter Zijlstra if (ret) 7501696a8beSPeter Zijlstra break; 7511696a8beSPeter Zijlstra } 7521696a8beSPeter Zijlstra 7531696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 7541696a8beSPeter Zijlstra 7551696a8beSPeter Zijlstra debug_rt_mutex_print_deadlock(waiter); 7561696a8beSPeter Zijlstra 7571696a8beSPeter Zijlstra schedule_rt_mutex(lock); 7581696a8beSPeter Zijlstra 7591696a8beSPeter Zijlstra raw_spin_lock(&lock->wait_lock); 7601696a8beSPeter Zijlstra set_current_state(state); 7611696a8beSPeter Zijlstra } 7621696a8beSPeter Zijlstra 7631696a8beSPeter Zijlstra return ret; 7641696a8beSPeter Zijlstra } 7651696a8beSPeter Zijlstra 766*3d5c9340SThomas Gleixner static void rt_mutex_handle_deadlock(int res, int detect_deadlock, 767*3d5c9340SThomas Gleixner struct rt_mutex_waiter *w) 768*3d5c9340SThomas Gleixner { 769*3d5c9340SThomas Gleixner /* 770*3d5c9340SThomas Gleixner * If the result is not -EDEADLOCK or the caller requested 771*3d5c9340SThomas Gleixner * deadlock detection, nothing to do here. 772*3d5c9340SThomas Gleixner */ 773*3d5c9340SThomas Gleixner if (res != -EDEADLOCK || detect_deadlock) 774*3d5c9340SThomas Gleixner return; 775*3d5c9340SThomas Gleixner 776*3d5c9340SThomas Gleixner /* 777*3d5c9340SThomas Gleixner * Yell lowdly and stop the task right here. 778*3d5c9340SThomas Gleixner */ 779*3d5c9340SThomas Gleixner rt_mutex_print_deadlock(w); 780*3d5c9340SThomas Gleixner while (1) { 781*3d5c9340SThomas Gleixner set_current_state(TASK_INTERRUPTIBLE); 782*3d5c9340SThomas Gleixner schedule(); 783*3d5c9340SThomas Gleixner } 784*3d5c9340SThomas Gleixner } 785*3d5c9340SThomas Gleixner 7861696a8beSPeter Zijlstra /* 7871696a8beSPeter Zijlstra * Slow path lock function: 7881696a8beSPeter Zijlstra */ 7891696a8beSPeter Zijlstra static int __sched 7901696a8beSPeter Zijlstra rt_mutex_slowlock(struct rt_mutex *lock, int state, 7911696a8beSPeter Zijlstra struct hrtimer_sleeper *timeout, 7921696a8beSPeter Zijlstra int detect_deadlock) 7931696a8beSPeter Zijlstra { 7941696a8beSPeter Zijlstra struct rt_mutex_waiter waiter; 7951696a8beSPeter Zijlstra int ret = 0; 7961696a8beSPeter Zijlstra 7971696a8beSPeter Zijlstra debug_rt_mutex_init_waiter(&waiter); 798fb00aca4SPeter Zijlstra RB_CLEAR_NODE(&waiter.pi_tree_entry); 799fb00aca4SPeter Zijlstra RB_CLEAR_NODE(&waiter.tree_entry); 8001696a8beSPeter Zijlstra 8011696a8beSPeter Zijlstra raw_spin_lock(&lock->wait_lock); 8021696a8beSPeter Zijlstra 8031696a8beSPeter Zijlstra /* Try to acquire the lock again: */ 8041696a8beSPeter Zijlstra if (try_to_take_rt_mutex(lock, current, NULL)) { 8051696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 8061696a8beSPeter Zijlstra return 0; 8071696a8beSPeter Zijlstra } 8081696a8beSPeter Zijlstra 8091696a8beSPeter Zijlstra set_current_state(state); 8101696a8beSPeter Zijlstra 8111696a8beSPeter Zijlstra /* Setup the timer, when timeout != NULL */ 8121696a8beSPeter Zijlstra if (unlikely(timeout)) { 8131696a8beSPeter Zijlstra hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); 8141696a8beSPeter Zijlstra if (!hrtimer_active(&timeout->timer)) 8151696a8beSPeter Zijlstra timeout->task = NULL; 8161696a8beSPeter Zijlstra } 8171696a8beSPeter Zijlstra 8181696a8beSPeter Zijlstra ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock); 8191696a8beSPeter Zijlstra 8201696a8beSPeter Zijlstra if (likely(!ret)) 8211696a8beSPeter Zijlstra ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); 8221696a8beSPeter Zijlstra 8231696a8beSPeter Zijlstra set_current_state(TASK_RUNNING); 8241696a8beSPeter Zijlstra 825*3d5c9340SThomas Gleixner if (unlikely(ret)) { 8261696a8beSPeter Zijlstra remove_waiter(lock, &waiter); 827*3d5c9340SThomas Gleixner rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter); 828*3d5c9340SThomas Gleixner } 8291696a8beSPeter Zijlstra 8301696a8beSPeter Zijlstra /* 8311696a8beSPeter Zijlstra * try_to_take_rt_mutex() sets the waiter bit 8321696a8beSPeter Zijlstra * unconditionally. We might have to fix that up. 8331696a8beSPeter Zijlstra */ 8341696a8beSPeter Zijlstra fixup_rt_mutex_waiters(lock); 8351696a8beSPeter Zijlstra 8361696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 8371696a8beSPeter Zijlstra 8381696a8beSPeter Zijlstra /* Remove pending timer: */ 8391696a8beSPeter Zijlstra if (unlikely(timeout)) 8401696a8beSPeter Zijlstra hrtimer_cancel(&timeout->timer); 8411696a8beSPeter Zijlstra 8421696a8beSPeter Zijlstra debug_rt_mutex_free_waiter(&waiter); 8431696a8beSPeter Zijlstra 8441696a8beSPeter Zijlstra return ret; 8451696a8beSPeter Zijlstra } 8461696a8beSPeter Zijlstra 8471696a8beSPeter Zijlstra /* 8481696a8beSPeter Zijlstra * Slow path try-lock function: 8491696a8beSPeter Zijlstra */ 8501696a8beSPeter Zijlstra static inline int 8511696a8beSPeter Zijlstra rt_mutex_slowtrylock(struct rt_mutex *lock) 8521696a8beSPeter Zijlstra { 8531696a8beSPeter Zijlstra int ret = 0; 8541696a8beSPeter Zijlstra 8551696a8beSPeter Zijlstra raw_spin_lock(&lock->wait_lock); 8561696a8beSPeter Zijlstra 8571696a8beSPeter Zijlstra if (likely(rt_mutex_owner(lock) != current)) { 8581696a8beSPeter Zijlstra 8591696a8beSPeter Zijlstra ret = try_to_take_rt_mutex(lock, current, NULL); 8601696a8beSPeter Zijlstra /* 8611696a8beSPeter Zijlstra * try_to_take_rt_mutex() sets the lock waiters 8621696a8beSPeter Zijlstra * bit unconditionally. Clean this up. 8631696a8beSPeter Zijlstra */ 8641696a8beSPeter Zijlstra fixup_rt_mutex_waiters(lock); 8651696a8beSPeter Zijlstra } 8661696a8beSPeter Zijlstra 8671696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 8681696a8beSPeter Zijlstra 8691696a8beSPeter Zijlstra return ret; 8701696a8beSPeter Zijlstra } 8711696a8beSPeter Zijlstra 8721696a8beSPeter Zijlstra /* 8731696a8beSPeter Zijlstra * Slow path to release a rt-mutex: 8741696a8beSPeter Zijlstra */ 8751696a8beSPeter Zijlstra static void __sched 8761696a8beSPeter Zijlstra rt_mutex_slowunlock(struct rt_mutex *lock) 8771696a8beSPeter Zijlstra { 8781696a8beSPeter Zijlstra raw_spin_lock(&lock->wait_lock); 8791696a8beSPeter Zijlstra 8801696a8beSPeter Zijlstra debug_rt_mutex_unlock(lock); 8811696a8beSPeter Zijlstra 8821696a8beSPeter Zijlstra rt_mutex_deadlock_account_unlock(current); 8831696a8beSPeter Zijlstra 8841696a8beSPeter Zijlstra if (!rt_mutex_has_waiters(lock)) { 8851696a8beSPeter Zijlstra lock->owner = NULL; 8861696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 8871696a8beSPeter Zijlstra return; 8881696a8beSPeter Zijlstra } 8891696a8beSPeter Zijlstra 8901696a8beSPeter Zijlstra wakeup_next_waiter(lock); 8911696a8beSPeter Zijlstra 8921696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 8931696a8beSPeter Zijlstra 8941696a8beSPeter Zijlstra /* Undo pi boosting if necessary: */ 8951696a8beSPeter Zijlstra rt_mutex_adjust_prio(current); 8961696a8beSPeter Zijlstra } 8971696a8beSPeter Zijlstra 8981696a8beSPeter Zijlstra /* 8991696a8beSPeter Zijlstra * debug aware fast / slowpath lock,trylock,unlock 9001696a8beSPeter Zijlstra * 9011696a8beSPeter Zijlstra * The atomic acquire/release ops are compiled away, when either the 9021696a8beSPeter Zijlstra * architecture does not support cmpxchg or when debugging is enabled. 9031696a8beSPeter Zijlstra */ 9041696a8beSPeter Zijlstra static inline int 9051696a8beSPeter Zijlstra rt_mutex_fastlock(struct rt_mutex *lock, int state, 9061696a8beSPeter Zijlstra int detect_deadlock, 9071696a8beSPeter Zijlstra int (*slowfn)(struct rt_mutex *lock, int state, 9081696a8beSPeter Zijlstra struct hrtimer_sleeper *timeout, 9091696a8beSPeter Zijlstra int detect_deadlock)) 9101696a8beSPeter Zijlstra { 9111696a8beSPeter Zijlstra if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { 9121696a8beSPeter Zijlstra rt_mutex_deadlock_account_lock(lock, current); 9131696a8beSPeter Zijlstra return 0; 9141696a8beSPeter Zijlstra } else 9151696a8beSPeter Zijlstra return slowfn(lock, state, NULL, detect_deadlock); 9161696a8beSPeter Zijlstra } 9171696a8beSPeter Zijlstra 9181696a8beSPeter Zijlstra static inline int 9191696a8beSPeter Zijlstra rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, 9201696a8beSPeter Zijlstra struct hrtimer_sleeper *timeout, int detect_deadlock, 9211696a8beSPeter Zijlstra int (*slowfn)(struct rt_mutex *lock, int state, 9221696a8beSPeter Zijlstra struct hrtimer_sleeper *timeout, 9231696a8beSPeter Zijlstra int detect_deadlock)) 9241696a8beSPeter Zijlstra { 9251696a8beSPeter Zijlstra if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { 9261696a8beSPeter Zijlstra rt_mutex_deadlock_account_lock(lock, current); 9271696a8beSPeter Zijlstra return 0; 9281696a8beSPeter Zijlstra } else 9291696a8beSPeter Zijlstra return slowfn(lock, state, timeout, detect_deadlock); 9301696a8beSPeter Zijlstra } 9311696a8beSPeter Zijlstra 9321696a8beSPeter Zijlstra static inline int 9331696a8beSPeter Zijlstra rt_mutex_fasttrylock(struct rt_mutex *lock, 9341696a8beSPeter Zijlstra int (*slowfn)(struct rt_mutex *lock)) 9351696a8beSPeter Zijlstra { 9361696a8beSPeter Zijlstra if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { 9371696a8beSPeter Zijlstra rt_mutex_deadlock_account_lock(lock, current); 9381696a8beSPeter Zijlstra return 1; 9391696a8beSPeter Zijlstra } 9401696a8beSPeter Zijlstra return slowfn(lock); 9411696a8beSPeter Zijlstra } 9421696a8beSPeter Zijlstra 9431696a8beSPeter Zijlstra static inline void 9441696a8beSPeter Zijlstra rt_mutex_fastunlock(struct rt_mutex *lock, 9451696a8beSPeter Zijlstra void (*slowfn)(struct rt_mutex *lock)) 9461696a8beSPeter Zijlstra { 9471696a8beSPeter Zijlstra if (likely(rt_mutex_cmpxchg(lock, current, NULL))) 9481696a8beSPeter Zijlstra rt_mutex_deadlock_account_unlock(current); 9491696a8beSPeter Zijlstra else 9501696a8beSPeter Zijlstra slowfn(lock); 9511696a8beSPeter Zijlstra } 9521696a8beSPeter Zijlstra 9531696a8beSPeter Zijlstra /** 9541696a8beSPeter Zijlstra * rt_mutex_lock - lock a rt_mutex 9551696a8beSPeter Zijlstra * 9561696a8beSPeter Zijlstra * @lock: the rt_mutex to be locked 9571696a8beSPeter Zijlstra */ 9581696a8beSPeter Zijlstra void __sched rt_mutex_lock(struct rt_mutex *lock) 9591696a8beSPeter Zijlstra { 9601696a8beSPeter Zijlstra might_sleep(); 9611696a8beSPeter Zijlstra 9621696a8beSPeter Zijlstra rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock); 9631696a8beSPeter Zijlstra } 9641696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(rt_mutex_lock); 9651696a8beSPeter Zijlstra 9661696a8beSPeter Zijlstra /** 9671696a8beSPeter Zijlstra * rt_mutex_lock_interruptible - lock a rt_mutex interruptible 9681696a8beSPeter Zijlstra * 9691696a8beSPeter Zijlstra * @lock: the rt_mutex to be locked 9701696a8beSPeter Zijlstra * @detect_deadlock: deadlock detection on/off 9711696a8beSPeter Zijlstra * 9721696a8beSPeter Zijlstra * Returns: 9731696a8beSPeter Zijlstra * 0 on success 9741696a8beSPeter Zijlstra * -EINTR when interrupted by a signal 9751696a8beSPeter Zijlstra * -EDEADLK when the lock would deadlock (when deadlock detection is on) 9761696a8beSPeter Zijlstra */ 9771696a8beSPeter Zijlstra int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock, 9781696a8beSPeter Zijlstra int detect_deadlock) 9791696a8beSPeter Zijlstra { 9801696a8beSPeter Zijlstra might_sleep(); 9811696a8beSPeter Zijlstra 9821696a8beSPeter Zijlstra return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, 9831696a8beSPeter Zijlstra detect_deadlock, rt_mutex_slowlock); 9841696a8beSPeter Zijlstra } 9851696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); 9861696a8beSPeter Zijlstra 9871696a8beSPeter Zijlstra /** 9881696a8beSPeter Zijlstra * rt_mutex_timed_lock - lock a rt_mutex interruptible 9891696a8beSPeter Zijlstra * the timeout structure is provided 9901696a8beSPeter Zijlstra * by the caller 9911696a8beSPeter Zijlstra * 9921696a8beSPeter Zijlstra * @lock: the rt_mutex to be locked 9931696a8beSPeter Zijlstra * @timeout: timeout structure or NULL (no timeout) 9941696a8beSPeter Zijlstra * @detect_deadlock: deadlock detection on/off 9951696a8beSPeter Zijlstra * 9961696a8beSPeter Zijlstra * Returns: 9971696a8beSPeter Zijlstra * 0 on success 9981696a8beSPeter Zijlstra * -EINTR when interrupted by a signal 9991696a8beSPeter Zijlstra * -ETIMEDOUT when the timeout expired 10001696a8beSPeter Zijlstra * -EDEADLK when the lock would deadlock (when deadlock detection is on) 10011696a8beSPeter Zijlstra */ 10021696a8beSPeter Zijlstra int 10031696a8beSPeter Zijlstra rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, 10041696a8beSPeter Zijlstra int detect_deadlock) 10051696a8beSPeter Zijlstra { 10061696a8beSPeter Zijlstra might_sleep(); 10071696a8beSPeter Zijlstra 10081696a8beSPeter Zijlstra return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, 10091696a8beSPeter Zijlstra detect_deadlock, rt_mutex_slowlock); 10101696a8beSPeter Zijlstra } 10111696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); 10121696a8beSPeter Zijlstra 10131696a8beSPeter Zijlstra /** 10141696a8beSPeter Zijlstra * rt_mutex_trylock - try to lock a rt_mutex 10151696a8beSPeter Zijlstra * 10161696a8beSPeter Zijlstra * @lock: the rt_mutex to be locked 10171696a8beSPeter Zijlstra * 10181696a8beSPeter Zijlstra * Returns 1 on success and 0 on contention 10191696a8beSPeter Zijlstra */ 10201696a8beSPeter Zijlstra int __sched rt_mutex_trylock(struct rt_mutex *lock) 10211696a8beSPeter Zijlstra { 10221696a8beSPeter Zijlstra return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); 10231696a8beSPeter Zijlstra } 10241696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(rt_mutex_trylock); 10251696a8beSPeter Zijlstra 10261696a8beSPeter Zijlstra /** 10271696a8beSPeter Zijlstra * rt_mutex_unlock - unlock a rt_mutex 10281696a8beSPeter Zijlstra * 10291696a8beSPeter Zijlstra * @lock: the rt_mutex to be unlocked 10301696a8beSPeter Zijlstra */ 10311696a8beSPeter Zijlstra void __sched rt_mutex_unlock(struct rt_mutex *lock) 10321696a8beSPeter Zijlstra { 10331696a8beSPeter Zijlstra rt_mutex_fastunlock(lock, rt_mutex_slowunlock); 10341696a8beSPeter Zijlstra } 10351696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(rt_mutex_unlock); 10361696a8beSPeter Zijlstra 10371696a8beSPeter Zijlstra /** 10381696a8beSPeter Zijlstra * rt_mutex_destroy - mark a mutex unusable 10391696a8beSPeter Zijlstra * @lock: the mutex to be destroyed 10401696a8beSPeter Zijlstra * 10411696a8beSPeter Zijlstra * This function marks the mutex uninitialized, and any subsequent 10421696a8beSPeter Zijlstra * use of the mutex is forbidden. The mutex must not be locked when 10431696a8beSPeter Zijlstra * this function is called. 10441696a8beSPeter Zijlstra */ 10451696a8beSPeter Zijlstra void rt_mutex_destroy(struct rt_mutex *lock) 10461696a8beSPeter Zijlstra { 10471696a8beSPeter Zijlstra WARN_ON(rt_mutex_is_locked(lock)); 10481696a8beSPeter Zijlstra #ifdef CONFIG_DEBUG_RT_MUTEXES 10491696a8beSPeter Zijlstra lock->magic = NULL; 10501696a8beSPeter Zijlstra #endif 10511696a8beSPeter Zijlstra } 10521696a8beSPeter Zijlstra 10531696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(rt_mutex_destroy); 10541696a8beSPeter Zijlstra 10551696a8beSPeter Zijlstra /** 10561696a8beSPeter Zijlstra * __rt_mutex_init - initialize the rt lock 10571696a8beSPeter Zijlstra * 10581696a8beSPeter Zijlstra * @lock: the rt lock to be initialized 10591696a8beSPeter Zijlstra * 10601696a8beSPeter Zijlstra * Initialize the rt lock to unlocked state. 10611696a8beSPeter Zijlstra * 10621696a8beSPeter Zijlstra * Initializing of a locked rt lock is not allowed 10631696a8beSPeter Zijlstra */ 10641696a8beSPeter Zijlstra void __rt_mutex_init(struct rt_mutex *lock, const char *name) 10651696a8beSPeter Zijlstra { 10661696a8beSPeter Zijlstra lock->owner = NULL; 10671696a8beSPeter Zijlstra raw_spin_lock_init(&lock->wait_lock); 1068fb00aca4SPeter Zijlstra lock->waiters = RB_ROOT; 1069fb00aca4SPeter Zijlstra lock->waiters_leftmost = NULL; 10701696a8beSPeter Zijlstra 10711696a8beSPeter Zijlstra debug_rt_mutex_init(lock, name); 10721696a8beSPeter Zijlstra } 10731696a8beSPeter Zijlstra EXPORT_SYMBOL_GPL(__rt_mutex_init); 10741696a8beSPeter Zijlstra 10751696a8beSPeter Zijlstra /** 10761696a8beSPeter Zijlstra * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a 10771696a8beSPeter Zijlstra * proxy owner 10781696a8beSPeter Zijlstra * 10791696a8beSPeter Zijlstra * @lock: the rt_mutex to be locked 10801696a8beSPeter Zijlstra * @proxy_owner:the task to set as owner 10811696a8beSPeter Zijlstra * 10821696a8beSPeter Zijlstra * No locking. Caller has to do serializing itself 10831696a8beSPeter Zijlstra * Special API call for PI-futex support 10841696a8beSPeter Zijlstra */ 10851696a8beSPeter Zijlstra void rt_mutex_init_proxy_locked(struct rt_mutex *lock, 10861696a8beSPeter Zijlstra struct task_struct *proxy_owner) 10871696a8beSPeter Zijlstra { 10881696a8beSPeter Zijlstra __rt_mutex_init(lock, NULL); 10891696a8beSPeter Zijlstra debug_rt_mutex_proxy_lock(lock, proxy_owner); 10901696a8beSPeter Zijlstra rt_mutex_set_owner(lock, proxy_owner); 10911696a8beSPeter Zijlstra rt_mutex_deadlock_account_lock(lock, proxy_owner); 10921696a8beSPeter Zijlstra } 10931696a8beSPeter Zijlstra 10941696a8beSPeter Zijlstra /** 10951696a8beSPeter Zijlstra * rt_mutex_proxy_unlock - release a lock on behalf of owner 10961696a8beSPeter Zijlstra * 10971696a8beSPeter Zijlstra * @lock: the rt_mutex to be locked 10981696a8beSPeter Zijlstra * 10991696a8beSPeter Zijlstra * No locking. Caller has to do serializing itself 11001696a8beSPeter Zijlstra * Special API call for PI-futex support 11011696a8beSPeter Zijlstra */ 11021696a8beSPeter Zijlstra void rt_mutex_proxy_unlock(struct rt_mutex *lock, 11031696a8beSPeter Zijlstra struct task_struct *proxy_owner) 11041696a8beSPeter Zijlstra { 11051696a8beSPeter Zijlstra debug_rt_mutex_proxy_unlock(lock); 11061696a8beSPeter Zijlstra rt_mutex_set_owner(lock, NULL); 11071696a8beSPeter Zijlstra rt_mutex_deadlock_account_unlock(proxy_owner); 11081696a8beSPeter Zijlstra } 11091696a8beSPeter Zijlstra 11101696a8beSPeter Zijlstra /** 11111696a8beSPeter Zijlstra * rt_mutex_start_proxy_lock() - Start lock acquisition for another task 11121696a8beSPeter Zijlstra * @lock: the rt_mutex to take 11131696a8beSPeter Zijlstra * @waiter: the pre-initialized rt_mutex_waiter 11141696a8beSPeter Zijlstra * @task: the task to prepare 11151696a8beSPeter Zijlstra * @detect_deadlock: perform deadlock detection (1) or not (0) 11161696a8beSPeter Zijlstra * 11171696a8beSPeter Zijlstra * Returns: 11181696a8beSPeter Zijlstra * 0 - task blocked on lock 11191696a8beSPeter Zijlstra * 1 - acquired the lock for task, caller should wake it up 11201696a8beSPeter Zijlstra * <0 - error 11211696a8beSPeter Zijlstra * 11221696a8beSPeter Zijlstra * Special API call for FUTEX_REQUEUE_PI support. 11231696a8beSPeter Zijlstra */ 11241696a8beSPeter Zijlstra int rt_mutex_start_proxy_lock(struct rt_mutex *lock, 11251696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter, 11261696a8beSPeter Zijlstra struct task_struct *task, int detect_deadlock) 11271696a8beSPeter Zijlstra { 11281696a8beSPeter Zijlstra int ret; 11291696a8beSPeter Zijlstra 11301696a8beSPeter Zijlstra raw_spin_lock(&lock->wait_lock); 11311696a8beSPeter Zijlstra 11321696a8beSPeter Zijlstra if (try_to_take_rt_mutex(lock, task, NULL)) { 11331696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 11341696a8beSPeter Zijlstra return 1; 11351696a8beSPeter Zijlstra } 11361696a8beSPeter Zijlstra 1137*3d5c9340SThomas Gleixner /* We enforce deadlock detection for futexes */ 1138*3d5c9340SThomas Gleixner ret = task_blocks_on_rt_mutex(lock, waiter, task, 1); 11391696a8beSPeter Zijlstra 11401696a8beSPeter Zijlstra if (ret && !rt_mutex_owner(lock)) { 11411696a8beSPeter Zijlstra /* 11421696a8beSPeter Zijlstra * Reset the return value. We might have 11431696a8beSPeter Zijlstra * returned with -EDEADLK and the owner 11441696a8beSPeter Zijlstra * released the lock while we were walking the 11451696a8beSPeter Zijlstra * pi chain. Let the waiter sort it out. 11461696a8beSPeter Zijlstra */ 11471696a8beSPeter Zijlstra ret = 0; 11481696a8beSPeter Zijlstra } 11491696a8beSPeter Zijlstra 11501696a8beSPeter Zijlstra if (unlikely(ret)) 11511696a8beSPeter Zijlstra remove_waiter(lock, waiter); 11521696a8beSPeter Zijlstra 11531696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 11541696a8beSPeter Zijlstra 11551696a8beSPeter Zijlstra debug_rt_mutex_print_deadlock(waiter); 11561696a8beSPeter Zijlstra 11571696a8beSPeter Zijlstra return ret; 11581696a8beSPeter Zijlstra } 11591696a8beSPeter Zijlstra 11601696a8beSPeter Zijlstra /** 11611696a8beSPeter Zijlstra * rt_mutex_next_owner - return the next owner of the lock 11621696a8beSPeter Zijlstra * 11631696a8beSPeter Zijlstra * @lock: the rt lock query 11641696a8beSPeter Zijlstra * 11651696a8beSPeter Zijlstra * Returns the next owner of the lock or NULL 11661696a8beSPeter Zijlstra * 11671696a8beSPeter Zijlstra * Caller has to serialize against other accessors to the lock 11681696a8beSPeter Zijlstra * itself. 11691696a8beSPeter Zijlstra * 11701696a8beSPeter Zijlstra * Special API call for PI-futex support 11711696a8beSPeter Zijlstra */ 11721696a8beSPeter Zijlstra struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) 11731696a8beSPeter Zijlstra { 11741696a8beSPeter Zijlstra if (!rt_mutex_has_waiters(lock)) 11751696a8beSPeter Zijlstra return NULL; 11761696a8beSPeter Zijlstra 11771696a8beSPeter Zijlstra return rt_mutex_top_waiter(lock)->task; 11781696a8beSPeter Zijlstra } 11791696a8beSPeter Zijlstra 11801696a8beSPeter Zijlstra /** 11811696a8beSPeter Zijlstra * rt_mutex_finish_proxy_lock() - Complete lock acquisition 11821696a8beSPeter Zijlstra * @lock: the rt_mutex we were woken on 11831696a8beSPeter Zijlstra * @to: the timeout, null if none. hrtimer should already have 11841696a8beSPeter Zijlstra * been started. 11851696a8beSPeter Zijlstra * @waiter: the pre-initialized rt_mutex_waiter 11861696a8beSPeter Zijlstra * @detect_deadlock: perform deadlock detection (1) or not (0) 11871696a8beSPeter Zijlstra * 11881696a8beSPeter Zijlstra * Complete the lock acquisition started our behalf by another thread. 11891696a8beSPeter Zijlstra * 11901696a8beSPeter Zijlstra * Returns: 11911696a8beSPeter Zijlstra * 0 - success 11921696a8beSPeter Zijlstra * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK 11931696a8beSPeter Zijlstra * 11941696a8beSPeter Zijlstra * Special API call for PI-futex requeue support 11951696a8beSPeter Zijlstra */ 11961696a8beSPeter Zijlstra int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, 11971696a8beSPeter Zijlstra struct hrtimer_sleeper *to, 11981696a8beSPeter Zijlstra struct rt_mutex_waiter *waiter, 11991696a8beSPeter Zijlstra int detect_deadlock) 12001696a8beSPeter Zijlstra { 12011696a8beSPeter Zijlstra int ret; 12021696a8beSPeter Zijlstra 12031696a8beSPeter Zijlstra raw_spin_lock(&lock->wait_lock); 12041696a8beSPeter Zijlstra 12051696a8beSPeter Zijlstra set_current_state(TASK_INTERRUPTIBLE); 12061696a8beSPeter Zijlstra 12071696a8beSPeter Zijlstra ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); 12081696a8beSPeter Zijlstra 12091696a8beSPeter Zijlstra set_current_state(TASK_RUNNING); 12101696a8beSPeter Zijlstra 12111696a8beSPeter Zijlstra if (unlikely(ret)) 12121696a8beSPeter Zijlstra remove_waiter(lock, waiter); 12131696a8beSPeter Zijlstra 12141696a8beSPeter Zijlstra /* 12151696a8beSPeter Zijlstra * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might 12161696a8beSPeter Zijlstra * have to fix that up. 12171696a8beSPeter Zijlstra */ 12181696a8beSPeter Zijlstra fixup_rt_mutex_waiters(lock); 12191696a8beSPeter Zijlstra 12201696a8beSPeter Zijlstra raw_spin_unlock(&lock->wait_lock); 12211696a8beSPeter Zijlstra 12221696a8beSPeter Zijlstra return ret; 12231696a8beSPeter Zijlstra } 1224