1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * RT Mutexes: blocking mutual exclusion locks with PI support 4 * 5 * started by Ingo Molnar and Thomas Gleixner: 6 * 7 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 8 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> 9 * 10 * This file contains the private data structure and API definitions. 11 */ 12 13 #ifndef __KERNEL_RTMUTEX_COMMON_H 14 #define __KERNEL_RTMUTEX_COMMON_H 15 16 #include <linux/debug_locks.h> 17 #include <linux/rtmutex.h> 18 #include <linux/sched/wake_q.h> 19 20 21 /* 22 * This is a helper for the struct rt_mutex_waiter below. A waiter goes in two 23 * separate trees and they need their own copy of the sort keys because of 24 * different locking requirements. 25 * 26 * @entry: rbtree node to enqueue into the waiters tree 27 * @prio: Priority of the waiter 28 * @deadline: Deadline of the waiter if applicable 29 * 30 * See rt_waiter_node_less() and waiter_*_prio(). 31 */ 32 struct rt_waiter_node { 33 struct rb_node entry; 34 int prio; 35 u64 deadline; 36 }; 37 38 /* 39 * This is the control structure for tasks blocked on a rt_mutex, 40 * which is allocated on the kernel stack on of the blocked task. 41 * 42 * @tree: node to enqueue into the mutex waiters tree 43 * @pi_tree: node to enqueue into the mutex owner waiters tree 44 * @task: task reference to the blocked task 45 * @lock: Pointer to the rt_mutex on which the waiter blocks 46 * @wake_state: Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT) 47 * @ww_ctx: WW context pointer 48 * 49 * @tree is ordered by @lock->wait_lock 50 * @pi_tree is ordered by rt_mutex_owner(@lock)->pi_lock 51 */ 52 struct rt_mutex_waiter { 53 struct rt_waiter_node tree; 54 struct rt_waiter_node pi_tree; 55 struct task_struct *task; 56 struct rt_mutex_base *lock; 57 unsigned int wake_state; 58 struct ww_acquire_ctx *ww_ctx; 59 }; 60 61 /** 62 * rt_wake_q_head - Wrapper around regular wake_q_head to support 63 * "sleeping" spinlocks on RT 64 * @head: The regular wake_q_head for sleeping lock variants 65 * @rtlock_task: Task pointer for RT lock (spin/rwlock) wakeups 66 */ 67 struct rt_wake_q_head { 68 struct wake_q_head head; 69 struct task_struct *rtlock_task; 70 }; 71 72 #define DEFINE_RT_WAKE_Q(name) \ 73 struct rt_wake_q_head name = { \ 74 .head = WAKE_Q_HEAD_INITIALIZER(name.head), \ 75 .rtlock_task = NULL, \ 76 } 77 78 /* 79 * PI-futex support (proxy locking functions, etc.): 80 */ 81 extern void rt_mutex_init_proxy_locked(struct rt_mutex_base *lock, 82 struct task_struct *proxy_owner); 83 extern void rt_mutex_proxy_unlock(struct rt_mutex_base *lock); 84 extern int __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, 85 struct rt_mutex_waiter *waiter, 86 struct task_struct *task, 87 struct wake_q_head *); 88 extern int rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, 89 struct rt_mutex_waiter *waiter, 90 struct task_struct *task); 91 extern int rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock, 92 struct hrtimer_sleeper *to, 93 struct rt_mutex_waiter *waiter); 94 extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock, 95 struct rt_mutex_waiter *waiter); 96 97 extern int rt_mutex_futex_trylock(struct rt_mutex_base *l); 98 extern int __rt_mutex_futex_trylock(struct rt_mutex_base *l); 99 100 extern void rt_mutex_futex_unlock(struct rt_mutex_base *lock); 101 extern bool __rt_mutex_futex_unlock(struct rt_mutex_base *lock, 102 struct rt_wake_q_head *wqh); 103 104 extern void rt_mutex_postunlock(struct rt_wake_q_head *wqh); 105 106 /* 107 * Must be guarded because this header is included from rcu/tree_plugin.h 108 * unconditionally. 109 */ 110 #ifdef CONFIG_RT_MUTEXES 111 static inline int rt_mutex_has_waiters(struct rt_mutex_base *lock) 112 { 113 return !RB_EMPTY_ROOT(&lock->waiters.rb_root); 114 } 115 116 /* 117 * Lockless speculative check whether @waiter is still the top waiter on 118 * @lock. This is solely comparing pointers and not derefencing the 119 * leftmost entry which might be about to vanish. 120 */ 121 static inline bool rt_mutex_waiter_is_top_waiter(struct rt_mutex_base *lock, 122 struct rt_mutex_waiter *waiter) 123 { 124 struct rb_node *leftmost = rb_first_cached(&lock->waiters); 125 126 return rb_entry(leftmost, struct rt_mutex_waiter, tree.entry) == waiter; 127 } 128 129 static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *lock) 130 { 131 struct rb_node *leftmost = rb_first_cached(&lock->waiters); 132 struct rt_mutex_waiter *w = NULL; 133 134 lockdep_assert_held(&lock->wait_lock); 135 136 if (leftmost) { 137 w = rb_entry(leftmost, struct rt_mutex_waiter, tree.entry); 138 BUG_ON(w->lock != lock); 139 } 140 return w; 141 } 142 143 static inline int task_has_pi_waiters(struct task_struct *p) 144 { 145 return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root); 146 } 147 148 static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p) 149 { 150 lockdep_assert_held(&p->pi_lock); 151 152 return rb_entry(p->pi_waiters.rb_leftmost, struct rt_mutex_waiter, 153 pi_tree.entry); 154 } 155 156 #define RT_MUTEX_HAS_WAITERS 1UL 157 158 static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock) 159 { 160 unsigned long owner = (unsigned long) READ_ONCE(lock->owner); 161 162 return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS); 163 } 164 165 /* 166 * Constants for rt mutex functions which have a selectable deadlock 167 * detection. 168 * 169 * RT_MUTEX_MIN_CHAINWALK: Stops the lock chain walk when there are 170 * no further PI adjustments to be made. 171 * 172 * RT_MUTEX_FULL_CHAINWALK: Invoke deadlock detection with a full 173 * walk of the lock chain. 174 */ 175 enum rtmutex_chainwalk { 176 RT_MUTEX_MIN_CHAINWALK, 177 RT_MUTEX_FULL_CHAINWALK, 178 }; 179 180 static inline void __rt_mutex_base_init(struct rt_mutex_base *lock) 181 { 182 raw_spin_lock_init(&lock->wait_lock); 183 lock->waiters = RB_ROOT_CACHED; 184 lock->owner = NULL; 185 } 186 187 /* Debug functions */ 188 static inline void debug_rt_mutex_unlock(struct rt_mutex_base *lock) 189 { 190 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) 191 DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current); 192 } 193 194 static inline void debug_rt_mutex_proxy_unlock(struct rt_mutex_base *lock) 195 { 196 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) 197 DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock)); 198 } 199 200 static inline void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) 201 { 202 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) 203 memset(waiter, 0x11, sizeof(*waiter)); 204 } 205 206 static inline void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) 207 { 208 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) 209 memset(waiter, 0x22, sizeof(*waiter)); 210 } 211 212 static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) 213 { 214 debug_rt_mutex_init_waiter(waiter); 215 RB_CLEAR_NODE(&waiter->pi_tree.entry); 216 RB_CLEAR_NODE(&waiter->tree.entry); 217 waiter->wake_state = TASK_NORMAL; 218 waiter->task = NULL; 219 } 220 221 static inline void rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter *waiter) 222 { 223 rt_mutex_init_waiter(waiter); 224 waiter->wake_state = TASK_RTLOCK_WAIT; 225 } 226 227 #else /* CONFIG_RT_MUTEXES */ 228 /* Used in rcu/tree_plugin.h */ 229 static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock) 230 { 231 return NULL; 232 } 233 #endif /* !CONFIG_RT_MUTEXES */ 234 235 #endif 236