1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * RT Mutexes: blocking mutual exclusion locks with PI support 4 * 5 * started by Ingo Molnar and Thomas Gleixner: 6 * 7 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 8 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> 9 * 10 * This file contains the private data structure and API definitions. 11 */ 12 13 #ifndef __KERNEL_RTMUTEX_COMMON_H 14 #define __KERNEL_RTMUTEX_COMMON_H 15 16 #include <linux/debug_locks.h> 17 #include <linux/rtmutex.h> 18 #include <linux/sched/wake_q.h> 19 20 21 /* 22 * This is a helper for the struct rt_mutex_waiter below. A waiter goes in two 23 * separate trees and they need their own copy of the sort keys because of 24 * different locking requirements. 25 * 26 * @entry: rbtree node to enqueue into the waiters tree 27 * @prio: Priority of the waiter 28 * @deadline: Deadline of the waiter if applicable 29 * 30 * See rt_waiter_node_less() and waiter_*_prio(). 31 */ 32 struct rt_waiter_node { 33 struct rb_node entry; 34 int prio; 35 u64 deadline; 36 }; 37 38 /* 39 * This is the control structure for tasks blocked on a rt_mutex, 40 * which is allocated on the kernel stack on of the blocked task. 41 * 42 * @tree: node to enqueue into the mutex waiters tree 43 * @pi_tree: node to enqueue into the mutex owner waiters tree 44 * @task: task reference to the blocked task 45 * @lock: Pointer to the rt_mutex on which the waiter blocks 46 * @wake_state: Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT) 47 * @ww_ctx: WW context pointer 48 * 49 * @tree is ordered by @lock->wait_lock 50 * @pi_tree is ordered by rt_mutex_owner(@lock)->pi_lock 51 */ 52 struct rt_mutex_waiter { 53 struct rt_waiter_node tree; 54 struct rt_waiter_node pi_tree; 55 struct task_struct *task; 56 struct rt_mutex_base *lock; 57 unsigned int wake_state; 58 struct ww_acquire_ctx *ww_ctx; 59 }; 60 61 /** 62 * rt_wake_q_head - Wrapper around regular wake_q_head to support 63 * "sleeping" spinlocks on RT 64 * @head: The regular wake_q_head for sleeping lock variants 65 * @rtlock_task: Task pointer for RT lock (spin/rwlock) wakeups 66 */ 67 struct rt_wake_q_head { 68 struct wake_q_head head; 69 struct task_struct *rtlock_task; 70 }; 71 72 #define DEFINE_RT_WAKE_Q(name) \ 73 struct rt_wake_q_head name = { \ 74 .head = WAKE_Q_HEAD_INITIALIZER(name.head), \ 75 .rtlock_task = NULL, \ 76 } 77 78 /* 79 * PI-futex support (proxy locking functions, etc.): 80 */ 81 extern void rt_mutex_init_proxy_locked(struct rt_mutex_base *lock, 82 struct task_struct *proxy_owner); 83 extern void rt_mutex_proxy_unlock(struct rt_mutex_base *lock); 84 extern int __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, 85 struct rt_mutex_waiter *waiter, 86 struct task_struct *task); 87 extern int rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, 88 struct rt_mutex_waiter *waiter, 89 struct task_struct *task); 90 extern int rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock, 91 struct hrtimer_sleeper *to, 92 struct rt_mutex_waiter *waiter); 93 extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock, 94 struct rt_mutex_waiter *waiter); 95 96 extern int rt_mutex_futex_trylock(struct rt_mutex_base *l); 97 extern int __rt_mutex_futex_trylock(struct rt_mutex_base *l); 98 99 extern void rt_mutex_futex_unlock(struct rt_mutex_base *lock); 100 extern bool __rt_mutex_futex_unlock(struct rt_mutex_base *lock, 101 struct rt_wake_q_head *wqh); 102 103 extern void rt_mutex_postunlock(struct rt_wake_q_head *wqh); 104 105 /* 106 * Must be guarded because this header is included from rcu/tree_plugin.h 107 * unconditionally. 108 */ 109 #ifdef CONFIG_RT_MUTEXES 110 static inline int rt_mutex_has_waiters(struct rt_mutex_base *lock) 111 { 112 return !RB_EMPTY_ROOT(&lock->waiters.rb_root); 113 } 114 115 /* 116 * Lockless speculative check whether @waiter is still the top waiter on 117 * @lock. This is solely comparing pointers and not derefencing the 118 * leftmost entry which might be about to vanish. 119 */ 120 static inline bool rt_mutex_waiter_is_top_waiter(struct rt_mutex_base *lock, 121 struct rt_mutex_waiter *waiter) 122 { 123 struct rb_node *leftmost = rb_first_cached(&lock->waiters); 124 125 return rb_entry(leftmost, struct rt_mutex_waiter, tree.entry) == waiter; 126 } 127 128 static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *lock) 129 { 130 struct rb_node *leftmost = rb_first_cached(&lock->waiters); 131 struct rt_mutex_waiter *w = NULL; 132 133 lockdep_assert_held(&lock->wait_lock); 134 135 if (leftmost) { 136 w = rb_entry(leftmost, struct rt_mutex_waiter, tree.entry); 137 BUG_ON(w->lock != lock); 138 } 139 return w; 140 } 141 142 static inline int task_has_pi_waiters(struct task_struct *p) 143 { 144 return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root); 145 } 146 147 static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p) 148 { 149 lockdep_assert_held(&p->pi_lock); 150 151 return rb_entry(p->pi_waiters.rb_leftmost, struct rt_mutex_waiter, 152 pi_tree.entry); 153 } 154 155 #define RT_MUTEX_HAS_WAITERS 1UL 156 157 static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock) 158 { 159 unsigned long owner = (unsigned long) READ_ONCE(lock->owner); 160 161 return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS); 162 } 163 164 /* 165 * Constants for rt mutex functions which have a selectable deadlock 166 * detection. 167 * 168 * RT_MUTEX_MIN_CHAINWALK: Stops the lock chain walk when there are 169 * no further PI adjustments to be made. 170 * 171 * RT_MUTEX_FULL_CHAINWALK: Invoke deadlock detection with a full 172 * walk of the lock chain. 173 */ 174 enum rtmutex_chainwalk { 175 RT_MUTEX_MIN_CHAINWALK, 176 RT_MUTEX_FULL_CHAINWALK, 177 }; 178 179 static inline void __rt_mutex_base_init(struct rt_mutex_base *lock) 180 { 181 raw_spin_lock_init(&lock->wait_lock); 182 lock->waiters = RB_ROOT_CACHED; 183 lock->owner = NULL; 184 } 185 186 /* Debug functions */ 187 static inline void debug_rt_mutex_unlock(struct rt_mutex_base *lock) 188 { 189 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) 190 DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current); 191 } 192 193 static inline void debug_rt_mutex_proxy_unlock(struct rt_mutex_base *lock) 194 { 195 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) 196 DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock)); 197 } 198 199 static inline void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) 200 { 201 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) 202 memset(waiter, 0x11, sizeof(*waiter)); 203 } 204 205 static inline void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) 206 { 207 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) 208 memset(waiter, 0x22, sizeof(*waiter)); 209 } 210 211 static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) 212 { 213 debug_rt_mutex_init_waiter(waiter); 214 RB_CLEAR_NODE(&waiter->pi_tree.entry); 215 RB_CLEAR_NODE(&waiter->tree.entry); 216 waiter->wake_state = TASK_NORMAL; 217 waiter->task = NULL; 218 } 219 220 static inline void rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter *waiter) 221 { 222 rt_mutex_init_waiter(waiter); 223 waiter->wake_state = TASK_RTLOCK_WAIT; 224 } 225 226 #else /* CONFIG_RT_MUTEXES */ 227 /* Used in rcu/tree_plugin.h */ 228 static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock) 229 { 230 return NULL; 231 } 232 #endif /* !CONFIG_RT_MUTEXES */ 233 234 #endif 235