1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
4 *
5 * started by Ingo Molnar and Thomas Gleixner.
6 *
7 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
9 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
10 * Copyright (C) 2006 Esben Nielsen
11 * Adaptive Spinlocks:
12 * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
13 * and Peter Morreale,
14 * Adaptive Spinlocks simplification:
15 * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
16 *
17 * See Documentation/locking/rt-mutex-design.rst for details.
18 */
19 #include <linux/sched.h>
20 #include <linux/sched/debug.h>
21 #include <linux/sched/deadline.h>
22 #include <linux/sched/signal.h>
23 #include <linux/sched/rt.h>
24 #include <linux/sched/wake_q.h>
25 #include <linux/ww_mutex.h>
26
27 #include <trace/events/lock.h>
28
29 #include "rtmutex_common.h"
30
31 #ifndef WW_RT
32 # define build_ww_mutex() (false)
33 # define ww_container_of(rtm) NULL
34
__ww_mutex_add_waiter(struct rt_mutex_waiter * waiter,struct rt_mutex * lock,struct ww_acquire_ctx * ww_ctx,struct wake_q_head * wake_q)35 static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter,
36 struct rt_mutex *lock,
37 struct ww_acquire_ctx *ww_ctx,
38 struct wake_q_head *wake_q)
39 {
40 return 0;
41 }
42
__ww_mutex_check_waiters(struct rt_mutex * lock,struct ww_acquire_ctx * ww_ctx,struct wake_q_head * wake_q)43 static inline void __ww_mutex_check_waiters(struct rt_mutex *lock,
44 struct ww_acquire_ctx *ww_ctx,
45 struct wake_q_head *wake_q)
46 {
47 }
48
ww_mutex_lock_acquired(struct ww_mutex * lock,struct ww_acquire_ctx * ww_ctx)49 static inline void ww_mutex_lock_acquired(struct ww_mutex *lock,
50 struct ww_acquire_ctx *ww_ctx)
51 {
52 }
53
__ww_mutex_check_kill(struct rt_mutex * lock,struct rt_mutex_waiter * waiter,struct ww_acquire_ctx * ww_ctx)54 static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
55 struct rt_mutex_waiter *waiter,
56 struct ww_acquire_ctx *ww_ctx)
57 {
58 return 0;
59 }
60
61 #else
62 # define build_ww_mutex() (true)
63 # define ww_container_of(rtm) container_of(rtm, struct ww_mutex, base)
64 # include "ww_mutex.h"
65 #endif
66
67 /*
68 * lock->owner state tracking:
69 *
70 * lock->owner holds the task_struct pointer of the owner. Bit 0
71 * is used to keep track of the "lock has waiters" state.
72 *
73 * owner bit0
74 * NULL 0 lock is free (fast acquire possible)
75 * NULL 1 lock is free and has waiters and the top waiter
76 * is going to take the lock*
77 * taskpointer 0 lock is held (fast release possible)
78 * taskpointer 1 lock is held and has waiters**
79 *
80 * The fast atomic compare exchange based acquire and release is only
81 * possible when bit 0 of lock->owner is 0.
82 *
83 * (*) It also can be a transitional state when grabbing the lock
84 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
85 * we need to set the bit0 before looking at the lock, and the owner may be
86 * NULL in this small time, hence this can be a transitional state.
87 *
88 * (**) There is a small time when bit 0 is set but there are no
89 * waiters. This can happen when grabbing the lock in the slow path.
90 * To prevent a cmpxchg of the owner releasing the lock, we need to
91 * set this bit before looking at the lock.
92 */
93
94 static __always_inline struct task_struct *
rt_mutex_owner_encode(struct rt_mutex_base * lock,struct task_struct * owner)95 rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner)
96 {
97 unsigned long val = (unsigned long)owner;
98
99 if (rt_mutex_has_waiters(lock))
100 val |= RT_MUTEX_HAS_WAITERS;
101
102 return (struct task_struct *)val;
103 }
104
105 static __always_inline void
rt_mutex_set_owner(struct rt_mutex_base * lock,struct task_struct * owner)106 rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
107 {
108 /*
109 * lock->wait_lock is held but explicit acquire semantics are needed
110 * for a new lock owner so WRITE_ONCE is insufficient.
111 */
112 xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner));
113 }
114
rt_mutex_clear_owner(struct rt_mutex_base * lock)115 static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock)
116 {
117 /* lock->wait_lock is held so the unlock provides release semantics. */
118 WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL));
119 }
120
clear_rt_mutex_waiters(struct rt_mutex_base * lock)121 static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
122 {
123 lock->owner = (struct task_struct *)
124 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
125 }
126
127 static __always_inline void
fixup_rt_mutex_waiters(struct rt_mutex_base * lock,bool acquire_lock)128 fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock)
129 {
130 unsigned long owner, *p = (unsigned long *) &lock->owner;
131
132 if (rt_mutex_has_waiters(lock))
133 return;
134
135 /*
136 * The rbtree has no waiters enqueued, now make sure that the
137 * lock->owner still has the waiters bit set, otherwise the
138 * following can happen:
139 *
140 * CPU 0 CPU 1 CPU2
141 * l->owner=T1
142 * rt_mutex_lock(l)
143 * lock(l->lock)
144 * l->owner = T1 | HAS_WAITERS;
145 * enqueue(T2)
146 * boost()
147 * unlock(l->lock)
148 * block()
149 *
150 * rt_mutex_lock(l)
151 * lock(l->lock)
152 * l->owner = T1 | HAS_WAITERS;
153 * enqueue(T3)
154 * boost()
155 * unlock(l->lock)
156 * block()
157 * signal(->T2) signal(->T3)
158 * lock(l->lock)
159 * dequeue(T2)
160 * deboost()
161 * unlock(l->lock)
162 * lock(l->lock)
163 * dequeue(T3)
164 * ==> wait list is empty
165 * deboost()
166 * unlock(l->lock)
167 * lock(l->lock)
168 * fixup_rt_mutex_waiters()
169 * if (wait_list_empty(l) {
170 * l->owner = owner
171 * owner = l->owner & ~HAS_WAITERS;
172 * ==> l->owner = T1
173 * }
174 * lock(l->lock)
175 * rt_mutex_unlock(l) fixup_rt_mutex_waiters()
176 * if (wait_list_empty(l) {
177 * owner = l->owner & ~HAS_WAITERS;
178 * cmpxchg(l->owner, T1, NULL)
179 * ===> Success (l->owner = NULL)
180 *
181 * l->owner = owner
182 * ==> l->owner = T1
183 * }
184 *
185 * With the check for the waiter bit in place T3 on CPU2 will not
186 * overwrite. All tasks fiddling with the waiters bit are
187 * serialized by l->lock, so nothing else can modify the waiters
188 * bit. If the bit is set then nothing can change l->owner either
189 * so the simple RMW is safe. The cmpxchg() will simply fail if it
190 * happens in the middle of the RMW because the waiters bit is
191 * still set.
192 */
193 owner = READ_ONCE(*p);
194 if (owner & RT_MUTEX_HAS_WAITERS) {
195 /*
196 * See rt_mutex_set_owner() and rt_mutex_clear_owner() on
197 * why xchg_acquire() is used for updating owner for
198 * locking and WRITE_ONCE() for unlocking.
199 *
200 * WRITE_ONCE() would work for the acquire case too, but
201 * in case that the lock acquisition failed it might
202 * force other lockers into the slow path unnecessarily.
203 */
204 if (acquire_lock)
205 xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS);
206 else
207 WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
208 }
209 }
210
211 /*
212 * We can speed up the acquire/release, if there's no debugging state to be
213 * set up.
214 */
215 #ifndef CONFIG_DEBUG_RT_MUTEXES
rt_mutex_cmpxchg_acquire(struct rt_mutex_base * lock,struct task_struct * old,struct task_struct * new)216 static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
217 struct task_struct *old,
218 struct task_struct *new)
219 {
220 return try_cmpxchg_acquire(&lock->owner, &old, new);
221 }
222
rt_mutex_try_acquire(struct rt_mutex_base * lock)223 static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
224 {
225 return rt_mutex_cmpxchg_acquire(lock, NULL, current);
226 }
227
rt_mutex_cmpxchg_release(struct rt_mutex_base * lock,struct task_struct * old,struct task_struct * new)228 static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
229 struct task_struct *old,
230 struct task_struct *new)
231 {
232 return try_cmpxchg_release(&lock->owner, &old, new);
233 }
234
235 /*
236 * Callers must hold the ->wait_lock -- which is the whole purpose as we force
237 * all future threads that attempt to [Rmw] the lock to the slowpath. As such
238 * relaxed semantics suffice.
239 */
mark_rt_mutex_waiters(struct rt_mutex_base * lock)240 static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
241 {
242 unsigned long *p = (unsigned long *) &lock->owner;
243 unsigned long owner, new;
244
245 owner = READ_ONCE(*p);
246 do {
247 new = owner | RT_MUTEX_HAS_WAITERS;
248 } while (!try_cmpxchg_relaxed(p, &owner, new));
249
250 /*
251 * The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE
252 * operations in the event of contention. Ensure the successful
253 * cmpxchg is visible.
254 */
255 smp_mb__after_atomic();
256 }
257
258 /*
259 * Safe fastpath aware unlock:
260 * 1) Clear the waiters bit
261 * 2) Drop lock->wait_lock
262 * 3) Try to unlock the lock with cmpxchg
263 */
unlock_rt_mutex_safe(struct rt_mutex_base * lock,unsigned long flags)264 static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock,
265 unsigned long flags)
266 __releases(lock->wait_lock)
267 {
268 struct task_struct *owner = rt_mutex_owner(lock);
269
270 clear_rt_mutex_waiters(lock);
271 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
272 /*
273 * If a new waiter comes in between the unlock and the cmpxchg
274 * we have two situations:
275 *
276 * unlock(wait_lock);
277 * lock(wait_lock);
278 * cmpxchg(p, owner, 0) == owner
279 * mark_rt_mutex_waiters(lock);
280 * acquire(lock);
281 * or:
282 *
283 * unlock(wait_lock);
284 * lock(wait_lock);
285 * mark_rt_mutex_waiters(lock);
286 *
287 * cmpxchg(p, owner, 0) != owner
288 * enqueue_waiter();
289 * unlock(wait_lock);
290 * lock(wait_lock);
291 * wake waiter();
292 * unlock(wait_lock);
293 * lock(wait_lock);
294 * acquire(lock);
295 */
296 return rt_mutex_cmpxchg_release(lock, owner, NULL);
297 }
298
299 #else
rt_mutex_cmpxchg_acquire(struct rt_mutex_base * lock,struct task_struct * old,struct task_struct * new)300 static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
301 struct task_struct *old,
302 struct task_struct *new)
303 {
304 return false;
305
306 }
307
308 static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock);
309
rt_mutex_try_acquire(struct rt_mutex_base * lock)310 static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
311 {
312 /*
313 * With debug enabled rt_mutex_cmpxchg trylock() will always fail.
314 *
315 * Avoid unconditionally taking the slow path by using
316 * rt_mutex_slow_trylock() which is covered by the debug code and can
317 * acquire a non-contended rtmutex.
318 */
319 return rt_mutex_slowtrylock(lock);
320 }
321
rt_mutex_cmpxchg_release(struct rt_mutex_base * lock,struct task_struct * old,struct task_struct * new)322 static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
323 struct task_struct *old,
324 struct task_struct *new)
325 {
326 return false;
327 }
328
mark_rt_mutex_waiters(struct rt_mutex_base * lock)329 static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
330 {
331 lock->owner = (struct task_struct *)
332 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
333 }
334
335 /*
336 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
337 */
unlock_rt_mutex_safe(struct rt_mutex_base * lock,unsigned long flags)338 static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock,
339 unsigned long flags)
340 __releases(lock->wait_lock)
341 {
342 lock->owner = NULL;
343 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
344 return true;
345 }
346 #endif
347
__waiter_prio(struct task_struct * task)348 static __always_inline int __waiter_prio(struct task_struct *task)
349 {
350 int prio = task->prio;
351
352 if (!rt_or_dl_prio(prio))
353 return DEFAULT_PRIO;
354
355 return prio;
356 }
357
358 /*
359 * Update the waiter->tree copy of the sort keys.
360 */
361 static __always_inline void
waiter_update_prio(struct rt_mutex_waiter * waiter,struct task_struct * task)362 waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
363 {
364 lockdep_assert_held(&waiter->lock->wait_lock);
365 lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry));
366
367 waiter->tree.prio = __waiter_prio(task);
368 waiter->tree.deadline = task->dl.deadline;
369 }
370
371 /*
372 * Update the waiter->pi_tree copy of the sort keys (from the tree copy).
373 */
374 static __always_inline void
waiter_clone_prio(struct rt_mutex_waiter * waiter,struct task_struct * task)375 waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
376 {
377 lockdep_assert_held(&waiter->lock->wait_lock);
378 lockdep_assert_held(&task->pi_lock);
379 lockdep_assert(RB_EMPTY_NODE(&waiter->pi_tree.entry));
380
381 waiter->pi_tree.prio = waiter->tree.prio;
382 waiter->pi_tree.deadline = waiter->tree.deadline;
383 }
384
385 /*
386 * Only use with rt_waiter_node_{less,equal}()
387 */
388 #define task_to_waiter_node(p) \
389 &(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
390 #define task_to_waiter(p) \
391 &(struct rt_mutex_waiter){ .tree = *task_to_waiter_node(p) }
392
rt_waiter_node_less(struct rt_waiter_node * left,struct rt_waiter_node * right)393 static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left,
394 struct rt_waiter_node *right)
395 {
396 if (left->prio < right->prio)
397 return 1;
398
399 /*
400 * If both waiters have dl_prio(), we check the deadlines of the
401 * associated tasks.
402 * If left waiter has a dl_prio(), and we didn't return 1 above,
403 * then right waiter has a dl_prio() too.
404 */
405 if (dl_prio(left->prio))
406 return dl_time_before(left->deadline, right->deadline);
407
408 return 0;
409 }
410
rt_waiter_node_equal(struct rt_waiter_node * left,struct rt_waiter_node * right)411 static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left,
412 struct rt_waiter_node *right)
413 {
414 if (left->prio != right->prio)
415 return 0;
416
417 /*
418 * If both waiters have dl_prio(), we check the deadlines of the
419 * associated tasks.
420 * If left waiter has a dl_prio(), and we didn't return 0 above,
421 * then right waiter has a dl_prio() too.
422 */
423 if (dl_prio(left->prio))
424 return left->deadline == right->deadline;
425
426 return 1;
427 }
428
rt_mutex_steal(struct rt_mutex_waiter * waiter,struct rt_mutex_waiter * top_waiter)429 static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
430 struct rt_mutex_waiter *top_waiter)
431 {
432 if (rt_waiter_node_less(&waiter->tree, &top_waiter->tree))
433 return true;
434
435 #ifdef RT_MUTEX_BUILD_SPINLOCKS
436 /*
437 * Note that RT tasks are excluded from same priority (lateral)
438 * steals to prevent the introduction of an unbounded latency.
439 */
440 if (rt_or_dl_prio(waiter->tree.prio))
441 return false;
442
443 return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree);
444 #else
445 return false;
446 #endif
447 }
448
449 #define __node_2_waiter(node) \
450 rb_entry((node), struct rt_mutex_waiter, tree.entry)
451
__waiter_less(struct rb_node * a,const struct rb_node * b)452 static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
453 {
454 struct rt_mutex_waiter *aw = __node_2_waiter(a);
455 struct rt_mutex_waiter *bw = __node_2_waiter(b);
456
457 if (rt_waiter_node_less(&aw->tree, &bw->tree))
458 return 1;
459
460 if (!build_ww_mutex())
461 return 0;
462
463 if (rt_waiter_node_less(&bw->tree, &aw->tree))
464 return 0;
465
466 /* NOTE: relies on waiter->ww_ctx being set before insertion */
467 if (aw->ww_ctx) {
468 if (!bw->ww_ctx)
469 return 1;
470
471 return (signed long)(aw->ww_ctx->stamp -
472 bw->ww_ctx->stamp) < 0;
473 }
474
475 return 0;
476 }
477
478 static __always_inline void
rt_mutex_enqueue(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter)479 rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
480 {
481 lockdep_assert_held(&lock->wait_lock);
482
483 rb_add_cached(&waiter->tree.entry, &lock->waiters, __waiter_less);
484 }
485
486 static __always_inline void
rt_mutex_dequeue(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter)487 rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
488 {
489 lockdep_assert_held(&lock->wait_lock);
490
491 if (RB_EMPTY_NODE(&waiter->tree.entry))
492 return;
493
494 rb_erase_cached(&waiter->tree.entry, &lock->waiters);
495 RB_CLEAR_NODE(&waiter->tree.entry);
496 }
497
498 #define __node_2_rt_node(node) \
499 rb_entry((node), struct rt_waiter_node, entry)
500
__pi_waiter_less(struct rb_node * a,const struct rb_node * b)501 static __always_inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b)
502 {
503 return rt_waiter_node_less(__node_2_rt_node(a), __node_2_rt_node(b));
504 }
505
506 static __always_inline void
rt_mutex_enqueue_pi(struct task_struct * task,struct rt_mutex_waiter * waiter)507 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
508 {
509 lockdep_assert_held(&task->pi_lock);
510
511 rb_add_cached(&waiter->pi_tree.entry, &task->pi_waiters, __pi_waiter_less);
512 }
513
514 static __always_inline void
rt_mutex_dequeue_pi(struct task_struct * task,struct rt_mutex_waiter * waiter)515 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
516 {
517 lockdep_assert_held(&task->pi_lock);
518
519 if (RB_EMPTY_NODE(&waiter->pi_tree.entry))
520 return;
521
522 rb_erase_cached(&waiter->pi_tree.entry, &task->pi_waiters);
523 RB_CLEAR_NODE(&waiter->pi_tree.entry);
524 }
525
rt_mutex_adjust_prio(struct rt_mutex_base * lock,struct task_struct * p)526 static __always_inline void rt_mutex_adjust_prio(struct rt_mutex_base *lock,
527 struct task_struct *p)
528 {
529 struct task_struct *pi_task = NULL;
530
531 lockdep_assert_held(&lock->wait_lock);
532 lockdep_assert(rt_mutex_owner(lock) == p);
533 lockdep_assert_held(&p->pi_lock);
534
535 if (task_has_pi_waiters(p))
536 pi_task = task_top_pi_waiter(p)->task;
537
538 rt_mutex_setprio(p, pi_task);
539 }
540
541 /* RT mutex specific wake_q wrappers */
rt_mutex_wake_q_add_task(struct rt_wake_q_head * wqh,struct task_struct * task,unsigned int wake_state)542 static __always_inline void rt_mutex_wake_q_add_task(struct rt_wake_q_head *wqh,
543 struct task_struct *task,
544 unsigned int wake_state)
545 {
546 if (IS_ENABLED(CONFIG_PREEMPT_RT) && wake_state == TASK_RTLOCK_WAIT) {
547 if (IS_ENABLED(CONFIG_PROVE_LOCKING))
548 WARN_ON_ONCE(wqh->rtlock_task);
549 get_task_struct(task);
550 wqh->rtlock_task = task;
551 } else {
552 wake_q_add(&wqh->head, task);
553 }
554 }
555
rt_mutex_wake_q_add(struct rt_wake_q_head * wqh,struct rt_mutex_waiter * w)556 static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh,
557 struct rt_mutex_waiter *w)
558 {
559 rt_mutex_wake_q_add_task(wqh, w->task, w->wake_state);
560 }
561
rt_mutex_wake_up_q(struct rt_wake_q_head * wqh)562 static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh)
563 {
564 if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) {
565 wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT);
566 put_task_struct(wqh->rtlock_task);
567 wqh->rtlock_task = NULL;
568 }
569
570 if (!wake_q_empty(&wqh->head))
571 wake_up_q(&wqh->head);
572
573 /* Pairs with preempt_disable() in mark_wakeup_next_waiter() */
574 preempt_enable();
575 }
576
577 /*
578 * Deadlock detection is conditional:
579 *
580 * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
581 * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
582 *
583 * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
584 * conducted independent of the detect argument.
585 *
586 * If the waiter argument is NULL this indicates the deboost path and
587 * deadlock detection is disabled independent of the detect argument
588 * and the config settings.
589 */
590 static __always_inline bool
rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter * waiter,enum rtmutex_chainwalk chwalk)591 rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
592 enum rtmutex_chainwalk chwalk)
593 {
594 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
595 return waiter != NULL;
596 return chwalk == RT_MUTEX_FULL_CHAINWALK;
597 }
598
task_blocked_on_lock(struct task_struct * p)599 static __always_inline struct rt_mutex_base *task_blocked_on_lock(struct task_struct *p)
600 {
601 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
602 }
603
604 /*
605 * Adjust the priority chain. Also used for deadlock detection.
606 * Decreases task's usage by one - may thus free the task.
607 *
608 * @task: the task owning the mutex (owner) for which a chain walk is
609 * probably needed
610 * @chwalk: do we have to carry out deadlock detection?
611 * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
612 * things for a task that has just got its priority adjusted, and
613 * is waiting on a mutex)
614 * @next_lock: the mutex on which the owner of @orig_lock was blocked before
615 * we dropped its pi_lock. Is never dereferenced, only used for
616 * comparison to detect lock chain changes.
617 * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
618 * its priority to the mutex owner (can be NULL in the case
619 * depicted above or if the top waiter is gone away and we are
620 * actually deboosting the owner)
621 * @top_task: the current top waiter
622 *
623 * Returns 0 or -EDEADLK.
624 *
625 * Chain walk basics and protection scope
626 *
627 * [R] refcount on task
628 * [Pn] task->pi_lock held
629 * [L] rtmutex->wait_lock held
630 *
631 * Normal locking order:
632 *
633 * rtmutex->wait_lock
634 * task->pi_lock
635 *
636 * Step Description Protected by
637 * function arguments:
638 * @task [R]
639 * @orig_lock if != NULL @top_task is blocked on it
640 * @next_lock Unprotected. Cannot be
641 * dereferenced. Only used for
642 * comparison.
643 * @orig_waiter if != NULL @top_task is blocked on it
644 * @top_task current, or in case of proxy
645 * locking protected by calling
646 * code
647 * again:
648 * loop_sanity_check();
649 * retry:
650 * [1] lock(task->pi_lock); [R] acquire [P1]
651 * [2] waiter = task->pi_blocked_on; [P1]
652 * [3] check_exit_conditions_1(); [P1]
653 * [4] lock = waiter->lock; [P1]
654 * [5] if (!try_lock(lock->wait_lock)) { [P1] try to acquire [L]
655 * unlock(task->pi_lock); release [P1]
656 * goto retry;
657 * }
658 * [6] check_exit_conditions_2(); [P1] + [L]
659 * [7] requeue_lock_waiter(lock, waiter); [P1] + [L]
660 * [8] unlock(task->pi_lock); release [P1]
661 * put_task_struct(task); release [R]
662 * [9] check_exit_conditions_3(); [L]
663 * [10] task = owner(lock); [L]
664 * get_task_struct(task); [L] acquire [R]
665 * lock(task->pi_lock); [L] acquire [P2]
666 * [11] requeue_pi_waiter(tsk, waiters(lock));[P2] + [L]
667 * [12] check_exit_conditions_4(); [P2] + [L]
668 * [13] unlock(task->pi_lock); release [P2]
669 * unlock(lock->wait_lock); release [L]
670 * goto again;
671 *
672 * Where P1 is the blocking task and P2 is the lock owner; going up one step
673 * the owner becomes the next blocked task etc..
674 *
675 *
676 */
rt_mutex_adjust_prio_chain(struct task_struct * task,enum rtmutex_chainwalk chwalk,struct rt_mutex_base * orig_lock,struct rt_mutex_base * next_lock,struct rt_mutex_waiter * orig_waiter,struct task_struct * top_task)677 static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
678 enum rtmutex_chainwalk chwalk,
679 struct rt_mutex_base *orig_lock,
680 struct rt_mutex_base *next_lock,
681 struct rt_mutex_waiter *orig_waiter,
682 struct task_struct *top_task)
683 {
684 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
685 struct rt_mutex_waiter *prerequeue_top_waiter;
686 int ret = 0, depth = 0;
687 struct rt_mutex_base *lock;
688 bool detect_deadlock;
689 bool requeue = true;
690
691 detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
692
693 /*
694 * The (de)boosting is a step by step approach with a lot of
695 * pitfalls. We want this to be preemptible and we want hold a
696 * maximum of two locks per step. So we have to check
697 * carefully whether things change under us.
698 */
699 again:
700 /*
701 * We limit the lock chain length for each invocation.
702 */
703 if (++depth > max_lock_depth) {
704 static int prev_max;
705
706 /*
707 * Print this only once. If the admin changes the limit,
708 * print a new message when reaching the limit again.
709 */
710 if (prev_max != max_lock_depth) {
711 prev_max = max_lock_depth;
712 printk(KERN_WARNING "Maximum lock depth %d reached "
713 "task: %s (%d)\n", max_lock_depth,
714 top_task->comm, task_pid_nr(top_task));
715 }
716 put_task_struct(task);
717
718 return -EDEADLK;
719 }
720
721 /*
722 * We are fully preemptible here and only hold the refcount on
723 * @task. So everything can have changed under us since the
724 * caller or our own code below (goto retry/again) dropped all
725 * locks.
726 */
727 retry:
728 /*
729 * [1] Task cannot go away as we did a get_task() before !
730 */
731 raw_spin_lock_irq(&task->pi_lock);
732
733 /*
734 * [2] Get the waiter on which @task is blocked on.
735 */
736 waiter = task->pi_blocked_on;
737
738 /*
739 * [3] check_exit_conditions_1() protected by task->pi_lock.
740 */
741
742 /*
743 * Check whether the end of the boosting chain has been
744 * reached or the state of the chain has changed while we
745 * dropped the locks.
746 */
747 if (!waiter)
748 goto out_unlock_pi;
749
750 /*
751 * Check the orig_waiter state. After we dropped the locks,
752 * the previous owner of the lock might have released the lock.
753 */
754 if (orig_waiter && !rt_mutex_owner(orig_lock))
755 goto out_unlock_pi;
756
757 /*
758 * We dropped all locks after taking a refcount on @task, so
759 * the task might have moved on in the lock chain or even left
760 * the chain completely and blocks now on an unrelated lock or
761 * on @orig_lock.
762 *
763 * We stored the lock on which @task was blocked in @next_lock,
764 * so we can detect the chain change.
765 */
766 if (next_lock != waiter->lock)
767 goto out_unlock_pi;
768
769 /*
770 * There could be 'spurious' loops in the lock graph due to ww_mutex,
771 * consider:
772 *
773 * P1: A, ww_A, ww_B
774 * P2: ww_B, ww_A
775 * P3: A
776 *
777 * P3 should not return -EDEADLK because it gets trapped in the cycle
778 * created by P1 and P2 (which will resolve -- and runs into
779 * max_lock_depth above). Therefore disable detect_deadlock such that
780 * the below termination condition can trigger once all relevant tasks
781 * are boosted.
782 *
783 * Even when we start with ww_mutex we can disable deadlock detection,
784 * since we would supress a ww_mutex induced deadlock at [6] anyway.
785 * Supressing it here however is not sufficient since we might still
786 * hit [6] due to adjustment driven iteration.
787 *
788 * NOTE: if someone were to create a deadlock between 2 ww_classes we'd
789 * utterly fail to report it; lockdep should.
790 */
791 if (IS_ENABLED(CONFIG_PREEMPT_RT) && waiter->ww_ctx && detect_deadlock)
792 detect_deadlock = false;
793
794 /*
795 * Drop out, when the task has no waiters. Note,
796 * top_waiter can be NULL, when we are in the deboosting
797 * mode!
798 */
799 if (top_waiter) {
800 if (!task_has_pi_waiters(task))
801 goto out_unlock_pi;
802 /*
803 * If deadlock detection is off, we stop here if we
804 * are not the top pi waiter of the task. If deadlock
805 * detection is enabled we continue, but stop the
806 * requeueing in the chain walk.
807 */
808 if (top_waiter != task_top_pi_waiter(task)) {
809 if (!detect_deadlock)
810 goto out_unlock_pi;
811 else
812 requeue = false;
813 }
814 }
815
816 /*
817 * If the waiter priority is the same as the task priority
818 * then there is no further priority adjustment necessary. If
819 * deadlock detection is off, we stop the chain walk. If its
820 * enabled we continue, but stop the requeueing in the chain
821 * walk.
822 */
823 if (rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) {
824 if (!detect_deadlock)
825 goto out_unlock_pi;
826 else
827 requeue = false;
828 }
829
830 /*
831 * [4] Get the next lock; per holding task->pi_lock we can't unblock
832 * and guarantee @lock's existence.
833 */
834 lock = waiter->lock;
835 /*
836 * [5] We need to trylock here as we are holding task->pi_lock,
837 * which is the reverse lock order versus the other rtmutex
838 * operations.
839 *
840 * Per the above, holding task->pi_lock guarantees lock exists, so
841 * inverting this lock order is infeasible from a life-time
842 * perspective.
843 */
844 if (!raw_spin_trylock(&lock->wait_lock)) {
845 raw_spin_unlock_irq(&task->pi_lock);
846 cpu_relax();
847 goto retry;
848 }
849
850 /*
851 * [6] check_exit_conditions_2() protected by task->pi_lock and
852 * lock->wait_lock.
853 *
854 * Deadlock detection. If the lock is the same as the original
855 * lock which caused us to walk the lock chain or if the
856 * current lock is owned by the task which initiated the chain
857 * walk, we detected a deadlock.
858 */
859 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
860 ret = -EDEADLK;
861
862 /*
863 * When the deadlock is due to ww_mutex; also see above. Don't
864 * report the deadlock and instead let the ww_mutex wound/die
865 * logic pick which of the contending threads gets -EDEADLK.
866 *
867 * NOTE: assumes the cycle only contains a single ww_class; any
868 * other configuration and we fail to report; also, see
869 * lockdep.
870 */
871 if (IS_ENABLED(CONFIG_PREEMPT_RT) && orig_waiter && orig_waiter->ww_ctx)
872 ret = 0;
873
874 raw_spin_unlock(&lock->wait_lock);
875 goto out_unlock_pi;
876 }
877
878 /*
879 * If we just follow the lock chain for deadlock detection, no
880 * need to do all the requeue operations. To avoid a truckload
881 * of conditionals around the various places below, just do the
882 * minimum chain walk checks.
883 */
884 if (!requeue) {
885 /*
886 * No requeue[7] here. Just release @task [8]
887 */
888 raw_spin_unlock(&task->pi_lock);
889 put_task_struct(task);
890
891 /*
892 * [9] check_exit_conditions_3 protected by lock->wait_lock.
893 * If there is no owner of the lock, end of chain.
894 */
895 if (!rt_mutex_owner(lock)) {
896 raw_spin_unlock_irq(&lock->wait_lock);
897 return 0;
898 }
899
900 /* [10] Grab the next task, i.e. owner of @lock */
901 task = get_task_struct(rt_mutex_owner(lock));
902 raw_spin_lock(&task->pi_lock);
903
904 /*
905 * No requeue [11] here. We just do deadlock detection.
906 *
907 * [12] Store whether owner is blocked
908 * itself. Decision is made after dropping the locks
909 */
910 next_lock = task_blocked_on_lock(task);
911 /*
912 * Get the top waiter for the next iteration
913 */
914 top_waiter = rt_mutex_top_waiter(lock);
915
916 /* [13] Drop locks */
917 raw_spin_unlock(&task->pi_lock);
918 raw_spin_unlock_irq(&lock->wait_lock);
919
920 /* If owner is not blocked, end of chain. */
921 if (!next_lock)
922 goto out_put_task;
923 goto again;
924 }
925
926 /*
927 * Store the current top waiter before doing the requeue
928 * operation on @lock. We need it for the boost/deboost
929 * decision below.
930 */
931 prerequeue_top_waiter = rt_mutex_top_waiter(lock);
932
933 /* [7] Requeue the waiter in the lock waiter tree. */
934 rt_mutex_dequeue(lock, waiter);
935
936 /*
937 * Update the waiter prio fields now that we're dequeued.
938 *
939 * These values can have changed through either:
940 *
941 * sys_sched_set_scheduler() / sys_sched_setattr()
942 *
943 * or
944 *
945 * DL CBS enforcement advancing the effective deadline.
946 */
947 waiter_update_prio(waiter, task);
948
949 rt_mutex_enqueue(lock, waiter);
950
951 /*
952 * [8] Release the (blocking) task in preparation for
953 * taking the owner task in [10].
954 *
955 * Since we hold lock->waiter_lock, task cannot unblock, even if we
956 * release task->pi_lock.
957 */
958 raw_spin_unlock(&task->pi_lock);
959 put_task_struct(task);
960
961 /*
962 * [9] check_exit_conditions_3 protected by lock->wait_lock.
963 *
964 * We must abort the chain walk if there is no lock owner even
965 * in the dead lock detection case, as we have nothing to
966 * follow here. This is the end of the chain we are walking.
967 */
968 if (!rt_mutex_owner(lock)) {
969 /*
970 * If the requeue [7] above changed the top waiter,
971 * then we need to wake the new top waiter up to try
972 * to get the lock.
973 */
974 top_waiter = rt_mutex_top_waiter(lock);
975 if (prerequeue_top_waiter != top_waiter)
976 wake_up_state(top_waiter->task, top_waiter->wake_state);
977 raw_spin_unlock_irq(&lock->wait_lock);
978 return 0;
979 }
980
981 /*
982 * [10] Grab the next task, i.e. the owner of @lock
983 *
984 * Per holding lock->wait_lock and checking for !owner above, there
985 * must be an owner and it cannot go away.
986 */
987 task = get_task_struct(rt_mutex_owner(lock));
988 raw_spin_lock(&task->pi_lock);
989
990 /* [11] requeue the pi waiters if necessary */
991 if (waiter == rt_mutex_top_waiter(lock)) {
992 /*
993 * The waiter became the new top (highest priority)
994 * waiter on the lock. Replace the previous top waiter
995 * in the owner tasks pi waiters tree with this waiter
996 * and adjust the priority of the owner.
997 */
998 rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
999 waiter_clone_prio(waiter, task);
1000 rt_mutex_enqueue_pi(task, waiter);
1001 rt_mutex_adjust_prio(lock, task);
1002
1003 } else if (prerequeue_top_waiter == waiter) {
1004 /*
1005 * The waiter was the top waiter on the lock, but is
1006 * no longer the top priority waiter. Replace waiter in
1007 * the owner tasks pi waiters tree with the new top
1008 * (highest priority) waiter and adjust the priority
1009 * of the owner.
1010 * The new top waiter is stored in @waiter so that
1011 * @waiter == @top_waiter evaluates to true below and
1012 * we continue to deboost the rest of the chain.
1013 */
1014 rt_mutex_dequeue_pi(task, waiter);
1015 waiter = rt_mutex_top_waiter(lock);
1016 waiter_clone_prio(waiter, task);
1017 rt_mutex_enqueue_pi(task, waiter);
1018 rt_mutex_adjust_prio(lock, task);
1019 } else {
1020 /*
1021 * Nothing changed. No need to do any priority
1022 * adjustment.
1023 */
1024 }
1025
1026 /*
1027 * [12] check_exit_conditions_4() protected by task->pi_lock
1028 * and lock->wait_lock. The actual decisions are made after we
1029 * dropped the locks.
1030 *
1031 * Check whether the task which owns the current lock is pi
1032 * blocked itself. If yes we store a pointer to the lock for
1033 * the lock chain change detection above. After we dropped
1034 * task->pi_lock next_lock cannot be dereferenced anymore.
1035 */
1036 next_lock = task_blocked_on_lock(task);
1037 /*
1038 * Store the top waiter of @lock for the end of chain walk
1039 * decision below.
1040 */
1041 top_waiter = rt_mutex_top_waiter(lock);
1042
1043 /* [13] Drop the locks */
1044 raw_spin_unlock(&task->pi_lock);
1045 raw_spin_unlock_irq(&lock->wait_lock);
1046
1047 /*
1048 * Make the actual exit decisions [12], based on the stored
1049 * values.
1050 *
1051 * We reached the end of the lock chain. Stop right here. No
1052 * point to go back just to figure that out.
1053 */
1054 if (!next_lock)
1055 goto out_put_task;
1056
1057 /*
1058 * If the current waiter is not the top waiter on the lock,
1059 * then we can stop the chain walk here if we are not in full
1060 * deadlock detection mode.
1061 */
1062 if (!detect_deadlock && waiter != top_waiter)
1063 goto out_put_task;
1064
1065 goto again;
1066
1067 out_unlock_pi:
1068 raw_spin_unlock_irq(&task->pi_lock);
1069 out_put_task:
1070 put_task_struct(task);
1071
1072 return ret;
1073 }
1074
1075 /*
1076 * Try to take an rt-mutex
1077 *
1078 * Must be called with lock->wait_lock held and interrupts disabled
1079 *
1080 * @lock: The lock to be acquired.
1081 * @task: The task which wants to acquire the lock
1082 * @waiter: The waiter that is queued to the lock's wait tree if the
1083 * callsite called task_blocked_on_lock(), otherwise NULL
1084 */
1085 static int __sched
try_to_take_rt_mutex(struct rt_mutex_base * lock,struct task_struct * task,struct rt_mutex_waiter * waiter)1086 try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task,
1087 struct rt_mutex_waiter *waiter)
1088 {
1089 lockdep_assert_held(&lock->wait_lock);
1090
1091 /*
1092 * Before testing whether we can acquire @lock, we set the
1093 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
1094 * other tasks which try to modify @lock into the slow path
1095 * and they serialize on @lock->wait_lock.
1096 *
1097 * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
1098 * as explained at the top of this file if and only if:
1099 *
1100 * - There is a lock owner. The caller must fixup the
1101 * transient state if it does a trylock or leaves the lock
1102 * function due to a signal or timeout.
1103 *
1104 * - @task acquires the lock and there are no other
1105 * waiters. This is undone in rt_mutex_set_owner(@task) at
1106 * the end of this function.
1107 */
1108 mark_rt_mutex_waiters(lock);
1109
1110 /*
1111 * If @lock has an owner, give up.
1112 */
1113 if (rt_mutex_owner(lock))
1114 return 0;
1115
1116 /*
1117 * If @waiter != NULL, @task has already enqueued the waiter
1118 * into @lock waiter tree. If @waiter == NULL then this is a
1119 * trylock attempt.
1120 */
1121 if (waiter) {
1122 struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock);
1123
1124 /*
1125 * If waiter is the highest priority waiter of @lock,
1126 * or allowed to steal it, take it over.
1127 */
1128 if (waiter == top_waiter || rt_mutex_steal(waiter, top_waiter)) {
1129 /*
1130 * We can acquire the lock. Remove the waiter from the
1131 * lock waiters tree.
1132 */
1133 rt_mutex_dequeue(lock, waiter);
1134 } else {
1135 return 0;
1136 }
1137 } else {
1138 /*
1139 * If the lock has waiters already we check whether @task is
1140 * eligible to take over the lock.
1141 *
1142 * If there are no other waiters, @task can acquire
1143 * the lock. @task->pi_blocked_on is NULL, so it does
1144 * not need to be dequeued.
1145 */
1146 if (rt_mutex_has_waiters(lock)) {
1147 /* Check whether the trylock can steal it. */
1148 if (!rt_mutex_steal(task_to_waiter(task),
1149 rt_mutex_top_waiter(lock)))
1150 return 0;
1151
1152 /*
1153 * The current top waiter stays enqueued. We
1154 * don't have to change anything in the lock
1155 * waiters order.
1156 */
1157 } else {
1158 /*
1159 * No waiters. Take the lock without the
1160 * pi_lock dance.@task->pi_blocked_on is NULL
1161 * and we have no waiters to enqueue in @task
1162 * pi waiters tree.
1163 */
1164 goto takeit;
1165 }
1166 }
1167
1168 /*
1169 * Clear @task->pi_blocked_on. Requires protection by
1170 * @task->pi_lock. Redundant operation for the @waiter == NULL
1171 * case, but conditionals are more expensive than a redundant
1172 * store.
1173 */
1174 raw_spin_lock(&task->pi_lock);
1175 task->pi_blocked_on = NULL;
1176 /*
1177 * Finish the lock acquisition. @task is the new owner. If
1178 * other waiters exist we have to insert the highest priority
1179 * waiter into @task->pi_waiters tree.
1180 */
1181 if (rt_mutex_has_waiters(lock))
1182 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
1183 raw_spin_unlock(&task->pi_lock);
1184
1185 takeit:
1186 /*
1187 * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
1188 * are still waiters or clears it.
1189 */
1190 rt_mutex_set_owner(lock, task);
1191
1192 return 1;
1193 }
1194
1195 /*
1196 * Task blocks on lock.
1197 *
1198 * Prepare waiter and propagate pi chain
1199 *
1200 * This must be called with lock->wait_lock held and interrupts disabled
1201 */
task_blocks_on_rt_mutex(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter,struct task_struct * task,struct ww_acquire_ctx * ww_ctx,enum rtmutex_chainwalk chwalk,struct wake_q_head * wake_q)1202 static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
1203 struct rt_mutex_waiter *waiter,
1204 struct task_struct *task,
1205 struct ww_acquire_ctx *ww_ctx,
1206 enum rtmutex_chainwalk chwalk,
1207 struct wake_q_head *wake_q)
1208 {
1209 struct task_struct *owner = rt_mutex_owner(lock);
1210 struct rt_mutex_waiter *top_waiter = waiter;
1211 struct rt_mutex_base *next_lock;
1212 int chain_walk = 0, res;
1213
1214 lockdep_assert_held(&lock->wait_lock);
1215
1216 /*
1217 * Early deadlock detection. We really don't want the task to
1218 * enqueue on itself just to untangle the mess later. It's not
1219 * only an optimization. We drop the locks, so another waiter
1220 * can come in before the chain walk detects the deadlock. So
1221 * the other will detect the deadlock and return -EDEADLOCK,
1222 * which is wrong, as the other waiter is not in a deadlock
1223 * situation.
1224 *
1225 * Except for ww_mutex, in that case the chain walk must already deal
1226 * with spurious cycles, see the comments at [3] and [6].
1227 */
1228 if (owner == task && !(build_ww_mutex() && ww_ctx))
1229 return -EDEADLK;
1230
1231 raw_spin_lock(&task->pi_lock);
1232 waiter->task = task;
1233 waiter->lock = lock;
1234 waiter_update_prio(waiter, task);
1235 waiter_clone_prio(waiter, task);
1236
1237 /* Get the top priority waiter on the lock */
1238 if (rt_mutex_has_waiters(lock))
1239 top_waiter = rt_mutex_top_waiter(lock);
1240 rt_mutex_enqueue(lock, waiter);
1241
1242 task->pi_blocked_on = waiter;
1243
1244 raw_spin_unlock(&task->pi_lock);
1245
1246 if (build_ww_mutex() && ww_ctx) {
1247 struct rt_mutex *rtm;
1248
1249 /* Check whether the waiter should back out immediately */
1250 rtm = container_of(lock, struct rt_mutex, rtmutex);
1251 res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx, wake_q);
1252 if (res) {
1253 raw_spin_lock(&task->pi_lock);
1254 rt_mutex_dequeue(lock, waiter);
1255 task->pi_blocked_on = NULL;
1256 raw_spin_unlock(&task->pi_lock);
1257 return res;
1258 }
1259 }
1260
1261 if (!owner)
1262 return 0;
1263
1264 raw_spin_lock(&owner->pi_lock);
1265 if (waiter == rt_mutex_top_waiter(lock)) {
1266 rt_mutex_dequeue_pi(owner, top_waiter);
1267 rt_mutex_enqueue_pi(owner, waiter);
1268
1269 rt_mutex_adjust_prio(lock, owner);
1270 if (owner->pi_blocked_on)
1271 chain_walk = 1;
1272 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
1273 chain_walk = 1;
1274 }
1275
1276 /* Store the lock on which owner is blocked or NULL */
1277 next_lock = task_blocked_on_lock(owner);
1278
1279 raw_spin_unlock(&owner->pi_lock);
1280 /*
1281 * Even if full deadlock detection is on, if the owner is not
1282 * blocked itself, we can avoid finding this out in the chain
1283 * walk.
1284 */
1285 if (!chain_walk || !next_lock)
1286 return 0;
1287
1288 /*
1289 * The owner can't disappear while holding a lock,
1290 * so the owner struct is protected by wait_lock.
1291 * Gets dropped in rt_mutex_adjust_prio_chain()!
1292 */
1293 get_task_struct(owner);
1294
1295 preempt_disable();
1296 raw_spin_unlock_irq(&lock->wait_lock);
1297 /* wake up any tasks on the wake_q before calling rt_mutex_adjust_prio_chain */
1298 wake_up_q(wake_q);
1299 wake_q_init(wake_q);
1300 preempt_enable();
1301
1302
1303 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
1304 next_lock, waiter, task);
1305
1306 raw_spin_lock_irq(&lock->wait_lock);
1307
1308 return res;
1309 }
1310
1311 /*
1312 * Remove the top waiter from the current tasks pi waiter tree and
1313 * queue it up.
1314 *
1315 * Called with lock->wait_lock held and interrupts disabled.
1316 */
mark_wakeup_next_waiter(struct rt_wake_q_head * wqh,struct rt_mutex_base * lock)1317 static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh,
1318 struct rt_mutex_base *lock)
1319 {
1320 struct rt_mutex_waiter *waiter;
1321
1322 lockdep_assert_held(&lock->wait_lock);
1323
1324 raw_spin_lock(¤t->pi_lock);
1325
1326 waiter = rt_mutex_top_waiter(lock);
1327
1328 /*
1329 * Remove it from current->pi_waiters and deboost.
1330 *
1331 * We must in fact deboost here in order to ensure we call
1332 * rt_mutex_setprio() to update p->pi_top_task before the
1333 * task unblocks.
1334 */
1335 rt_mutex_dequeue_pi(current, waiter);
1336 rt_mutex_adjust_prio(lock, current);
1337
1338 /*
1339 * As we are waking up the top waiter, and the waiter stays
1340 * queued on the lock until it gets the lock, this lock
1341 * obviously has waiters. Just set the bit here and this has
1342 * the added benefit of forcing all new tasks into the
1343 * slow path making sure no task of lower priority than
1344 * the top waiter can steal this lock.
1345 */
1346 lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
1347
1348 /*
1349 * We deboosted before waking the top waiter task such that we don't
1350 * run two tasks with the 'same' priority (and ensure the
1351 * p->pi_top_task pointer points to a blocked task). This however can
1352 * lead to priority inversion if we would get preempted after the
1353 * deboost but before waking our donor task, hence the preempt_disable()
1354 * before unlock.
1355 *
1356 * Pairs with preempt_enable() in rt_mutex_wake_up_q();
1357 */
1358 preempt_disable();
1359 rt_mutex_wake_q_add(wqh, waiter);
1360 raw_spin_unlock(¤t->pi_lock);
1361 }
1362
__rt_mutex_slowtrylock(struct rt_mutex_base * lock)1363 static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
1364 {
1365 int ret = try_to_take_rt_mutex(lock, current, NULL);
1366
1367 /*
1368 * try_to_take_rt_mutex() sets the lock waiters bit
1369 * unconditionally. Clean this up.
1370 */
1371 fixup_rt_mutex_waiters(lock, true);
1372
1373 return ret;
1374 }
1375
1376 /*
1377 * Slow path try-lock function:
1378 */
rt_mutex_slowtrylock(struct rt_mutex_base * lock)1379 static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock)
1380 {
1381 unsigned long flags;
1382 int ret;
1383
1384 /*
1385 * If the lock already has an owner we fail to get the lock.
1386 * This can be done without taking the @lock->wait_lock as
1387 * it is only being read, and this is a trylock anyway.
1388 */
1389 if (rt_mutex_owner(lock))
1390 return 0;
1391
1392 /*
1393 * The mutex has currently no owner. Lock the wait lock and try to
1394 * acquire the lock. We use irqsave here to support early boot calls.
1395 */
1396 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1397
1398 ret = __rt_mutex_slowtrylock(lock);
1399
1400 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1401
1402 return ret;
1403 }
1404
__rt_mutex_trylock(struct rt_mutex_base * lock)1405 static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock)
1406 {
1407 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1408 return 1;
1409
1410 return rt_mutex_slowtrylock(lock);
1411 }
1412
1413 /*
1414 * Slow path to release a rt-mutex.
1415 */
rt_mutex_slowunlock(struct rt_mutex_base * lock)1416 static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock)
1417 {
1418 DEFINE_RT_WAKE_Q(wqh);
1419 unsigned long flags;
1420
1421 /* irqsave required to support early boot calls */
1422 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1423
1424 debug_rt_mutex_unlock(lock);
1425
1426 /*
1427 * We must be careful here if the fast path is enabled. If we
1428 * have no waiters queued we cannot set owner to NULL here
1429 * because of:
1430 *
1431 * foo->lock->owner = NULL;
1432 * rtmutex_lock(foo->lock); <- fast path
1433 * free = atomic_dec_and_test(foo->refcnt);
1434 * rtmutex_unlock(foo->lock); <- fast path
1435 * if (free)
1436 * kfree(foo);
1437 * raw_spin_unlock(foo->lock->wait_lock);
1438 *
1439 * So for the fastpath enabled kernel:
1440 *
1441 * Nothing can set the waiters bit as long as we hold
1442 * lock->wait_lock. So we do the following sequence:
1443 *
1444 * owner = rt_mutex_owner(lock);
1445 * clear_rt_mutex_waiters(lock);
1446 * raw_spin_unlock(&lock->wait_lock);
1447 * if (cmpxchg(&lock->owner, owner, 0) == owner)
1448 * return;
1449 * goto retry;
1450 *
1451 * The fastpath disabled variant is simple as all access to
1452 * lock->owner is serialized by lock->wait_lock:
1453 *
1454 * lock->owner = NULL;
1455 * raw_spin_unlock(&lock->wait_lock);
1456 */
1457 while (!rt_mutex_has_waiters(lock)) {
1458 /* Drops lock->wait_lock ! */
1459 if (unlock_rt_mutex_safe(lock, flags) == true)
1460 return;
1461 /* Relock the rtmutex and try again */
1462 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1463 }
1464
1465 /*
1466 * The wakeup next waiter path does not suffer from the above
1467 * race. See the comments there.
1468 *
1469 * Queue the next waiter for wakeup once we release the wait_lock.
1470 */
1471 mark_wakeup_next_waiter(&wqh, lock);
1472 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1473
1474 rt_mutex_wake_up_q(&wqh);
1475 }
1476
__rt_mutex_unlock(struct rt_mutex_base * lock)1477 static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock)
1478 {
1479 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
1480 return;
1481
1482 rt_mutex_slowunlock(lock);
1483 }
1484
1485 #ifdef CONFIG_SMP
rtmutex_spin_on_owner(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter,struct task_struct * owner)1486 static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
1487 struct rt_mutex_waiter *waiter,
1488 struct task_struct *owner)
1489 {
1490 bool res = true;
1491
1492 rcu_read_lock();
1493 for (;;) {
1494 /* If owner changed, trylock again. */
1495 if (owner != rt_mutex_owner(lock))
1496 break;
1497 /*
1498 * Ensure that @owner is dereferenced after checking that
1499 * the lock owner still matches @owner. If that fails,
1500 * @owner might point to freed memory. If it still matches,
1501 * the rcu_read_lock() ensures the memory stays valid.
1502 */
1503 barrier();
1504 /*
1505 * Stop spinning when:
1506 * - the lock owner has been scheduled out
1507 * - current is not longer the top waiter
1508 * - current is requested to reschedule (redundant
1509 * for CONFIG_PREEMPT_RCU=y)
1510 * - the VCPU on which owner runs is preempted
1511 */
1512 if (!owner_on_cpu(owner) || need_resched() ||
1513 !rt_mutex_waiter_is_top_waiter(lock, waiter)) {
1514 res = false;
1515 break;
1516 }
1517 cpu_relax();
1518 }
1519 rcu_read_unlock();
1520 return res;
1521 }
1522 #else
rtmutex_spin_on_owner(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter,struct task_struct * owner)1523 static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
1524 struct rt_mutex_waiter *waiter,
1525 struct task_struct *owner)
1526 {
1527 return false;
1528 }
1529 #endif
1530
1531 #ifdef RT_MUTEX_BUILD_MUTEX
1532 /*
1533 * Functions required for:
1534 * - rtmutex, futex on all kernels
1535 * - mutex and rwsem substitutions on RT kernels
1536 */
1537
1538 /*
1539 * Remove a waiter from a lock and give up
1540 *
1541 * Must be called with lock->wait_lock held and interrupts disabled. It must
1542 * have just failed to try_to_take_rt_mutex().
1543 */
remove_waiter(struct rt_mutex_base * lock,struct rt_mutex_waiter * waiter)1544 static void __sched remove_waiter(struct rt_mutex_base *lock,
1545 struct rt_mutex_waiter *waiter)
1546 {
1547 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
1548 struct task_struct *owner = rt_mutex_owner(lock);
1549 struct rt_mutex_base *next_lock;
1550
1551 lockdep_assert_held(&lock->wait_lock);
1552
1553 raw_spin_lock(¤t->pi_lock);
1554 rt_mutex_dequeue(lock, waiter);
1555 current->pi_blocked_on = NULL;
1556 raw_spin_unlock(¤t->pi_lock);
1557
1558 /*
1559 * Only update priority if the waiter was the highest priority
1560 * waiter of the lock and there is an owner to update.
1561 */
1562 if (!owner || !is_top_waiter)
1563 return;
1564
1565 raw_spin_lock(&owner->pi_lock);
1566
1567 rt_mutex_dequeue_pi(owner, waiter);
1568
1569 if (rt_mutex_has_waiters(lock))
1570 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
1571
1572 rt_mutex_adjust_prio(lock, owner);
1573
1574 /* Store the lock on which owner is blocked or NULL */
1575 next_lock = task_blocked_on_lock(owner);
1576
1577 raw_spin_unlock(&owner->pi_lock);
1578
1579 /*
1580 * Don't walk the chain, if the owner task is not blocked
1581 * itself.
1582 */
1583 if (!next_lock)
1584 return;
1585
1586 /* gets dropped in rt_mutex_adjust_prio_chain()! */
1587 get_task_struct(owner);
1588
1589 raw_spin_unlock_irq(&lock->wait_lock);
1590
1591 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
1592 next_lock, NULL, current);
1593
1594 raw_spin_lock_irq(&lock->wait_lock);
1595 }
1596
1597 /**
1598 * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
1599 * @lock: the rt_mutex to take
1600 * @ww_ctx: WW mutex context pointer
1601 * @state: the state the task should block in (TASK_INTERRUPTIBLE
1602 * or TASK_UNINTERRUPTIBLE)
1603 * @timeout: the pre-initialized and started timer, or NULL for none
1604 * @waiter: the pre-initialized rt_mutex_waiter
1605 * @wake_q: wake_q of tasks to wake when we drop the lock->wait_lock
1606 *
1607 * Must be called with lock->wait_lock held and interrupts disabled
1608 */
rt_mutex_slowlock_block(struct rt_mutex_base * lock,struct ww_acquire_ctx * ww_ctx,unsigned int state,struct hrtimer_sleeper * timeout,struct rt_mutex_waiter * waiter,struct wake_q_head * wake_q)1609 static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
1610 struct ww_acquire_ctx *ww_ctx,
1611 unsigned int state,
1612 struct hrtimer_sleeper *timeout,
1613 struct rt_mutex_waiter *waiter,
1614 struct wake_q_head *wake_q)
1615 __releases(&lock->wait_lock) __acquires(&lock->wait_lock)
1616 {
1617 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
1618 struct task_struct *owner;
1619 int ret = 0;
1620
1621 for (;;) {
1622 /* Try to acquire the lock: */
1623 if (try_to_take_rt_mutex(lock, current, waiter))
1624 break;
1625
1626 if (timeout && !timeout->task) {
1627 ret = -ETIMEDOUT;
1628 break;
1629 }
1630 if (signal_pending_state(state, current)) {
1631 ret = -EINTR;
1632 break;
1633 }
1634
1635 if (build_ww_mutex() && ww_ctx) {
1636 ret = __ww_mutex_check_kill(rtm, waiter, ww_ctx);
1637 if (ret)
1638 break;
1639 }
1640
1641 if (waiter == rt_mutex_top_waiter(lock))
1642 owner = rt_mutex_owner(lock);
1643 else
1644 owner = NULL;
1645 preempt_disable();
1646 raw_spin_unlock_irq(&lock->wait_lock);
1647 if (wake_q) {
1648 wake_up_q(wake_q);
1649 wake_q_init(wake_q);
1650 }
1651 preempt_enable();
1652
1653 if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
1654 rt_mutex_schedule();
1655
1656 raw_spin_lock_irq(&lock->wait_lock);
1657 set_current_state(state);
1658 }
1659
1660 __set_current_state(TASK_RUNNING);
1661 return ret;
1662 }
1663
rt_mutex_handle_deadlock(int res,int detect_deadlock,struct rt_mutex_base * lock,struct rt_mutex_waiter * w)1664 static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
1665 struct rt_mutex_base *lock,
1666 struct rt_mutex_waiter *w)
1667 {
1668 /*
1669 * If the result is not -EDEADLOCK or the caller requested
1670 * deadlock detection, nothing to do here.
1671 */
1672 if (res != -EDEADLOCK || detect_deadlock)
1673 return;
1674
1675 if (build_ww_mutex() && w->ww_ctx)
1676 return;
1677
1678 raw_spin_unlock_irq(&lock->wait_lock);
1679
1680 WARN(1, "rtmutex deadlock detected\n");
1681
1682 while (1) {
1683 set_current_state(TASK_INTERRUPTIBLE);
1684 rt_mutex_schedule();
1685 }
1686 }
1687
1688 /**
1689 * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
1690 * @lock: The rtmutex to block lock
1691 * @ww_ctx: WW mutex context pointer
1692 * @state: The task state for sleeping
1693 * @chwalk: Indicator whether full or partial chainwalk is requested
1694 * @waiter: Initializer waiter for blocking
1695 * @wake_q: The wake_q to wake tasks after we release the wait_lock
1696 */
__rt_mutex_slowlock(struct rt_mutex_base * lock,struct ww_acquire_ctx * ww_ctx,unsigned int state,enum rtmutex_chainwalk chwalk,struct rt_mutex_waiter * waiter,struct wake_q_head * wake_q)1697 static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
1698 struct ww_acquire_ctx *ww_ctx,
1699 unsigned int state,
1700 enum rtmutex_chainwalk chwalk,
1701 struct rt_mutex_waiter *waiter,
1702 struct wake_q_head *wake_q)
1703 {
1704 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
1705 struct ww_mutex *ww = ww_container_of(rtm);
1706 int ret;
1707
1708 lockdep_assert_held(&lock->wait_lock);
1709
1710 /* Try to acquire the lock again: */
1711 if (try_to_take_rt_mutex(lock, current, NULL)) {
1712 if (build_ww_mutex() && ww_ctx) {
1713 __ww_mutex_check_waiters(rtm, ww_ctx, wake_q);
1714 ww_mutex_lock_acquired(ww, ww_ctx);
1715 }
1716 return 0;
1717 }
1718
1719 set_current_state(state);
1720
1721 trace_contention_begin(lock, LCB_F_RT);
1722
1723 ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk, wake_q);
1724 if (likely(!ret))
1725 ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter, wake_q);
1726
1727 if (likely(!ret)) {
1728 /* acquired the lock */
1729 if (build_ww_mutex() && ww_ctx) {
1730 if (!ww_ctx->is_wait_die)
1731 __ww_mutex_check_waiters(rtm, ww_ctx, wake_q);
1732 ww_mutex_lock_acquired(ww, ww_ctx);
1733 }
1734 } else {
1735 __set_current_state(TASK_RUNNING);
1736 remove_waiter(lock, waiter);
1737 rt_mutex_handle_deadlock(ret, chwalk, lock, waiter);
1738 }
1739
1740 /*
1741 * try_to_take_rt_mutex() sets the waiter bit
1742 * unconditionally. We might have to fix that up.
1743 */
1744 fixup_rt_mutex_waiters(lock, true);
1745
1746 trace_contention_end(lock, ret);
1747
1748 return ret;
1749 }
1750
__rt_mutex_slowlock_locked(struct rt_mutex_base * lock,struct ww_acquire_ctx * ww_ctx,unsigned int state,struct wake_q_head * wake_q)1751 static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
1752 struct ww_acquire_ctx *ww_ctx,
1753 unsigned int state,
1754 struct wake_q_head *wake_q)
1755 {
1756 struct rt_mutex_waiter waiter;
1757 int ret;
1758
1759 rt_mutex_init_waiter(&waiter);
1760 waiter.ww_ctx = ww_ctx;
1761
1762 ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK,
1763 &waiter, wake_q);
1764
1765 debug_rt_mutex_free_waiter(&waiter);
1766 return ret;
1767 }
1768
1769 /*
1770 * rt_mutex_slowlock - Locking slowpath invoked when fast path fails
1771 * @lock: The rtmutex to block lock
1772 * @ww_ctx: WW mutex context pointer
1773 * @state: The task state for sleeping
1774 */
rt_mutex_slowlock(struct rt_mutex_base * lock,struct ww_acquire_ctx * ww_ctx,unsigned int state)1775 static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
1776 struct ww_acquire_ctx *ww_ctx,
1777 unsigned int state)
1778 {
1779 DEFINE_WAKE_Q(wake_q);
1780 unsigned long flags;
1781 int ret;
1782
1783 /*
1784 * Do all pre-schedule work here, before we queue a waiter and invoke
1785 * PI -- any such work that trips on rtlock (PREEMPT_RT spinlock) would
1786 * otherwise recurse back into task_blocks_on_rt_mutex() through
1787 * rtlock_slowlock() and will then enqueue a second waiter for this
1788 * same task and things get really confusing real fast.
1789 */
1790 rt_mutex_pre_schedule();
1791
1792 /*
1793 * Technically we could use raw_spin_[un]lock_irq() here, but this can
1794 * be called in early boot if the cmpxchg() fast path is disabled
1795 * (debug, no architecture support). In this case we will acquire the
1796 * rtmutex with lock->wait_lock held. But we cannot unconditionally
1797 * enable interrupts in that early boot case. So we need to use the
1798 * irqsave/restore variants.
1799 */
1800 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1801 ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state, &wake_q);
1802 preempt_disable();
1803 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1804 wake_up_q(&wake_q);
1805 preempt_enable();
1806 rt_mutex_post_schedule();
1807
1808 return ret;
1809 }
1810
__rt_mutex_lock(struct rt_mutex_base * lock,unsigned int state)1811 static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
1812 unsigned int state)
1813 {
1814 lockdep_assert(!current->pi_blocked_on);
1815
1816 if (likely(rt_mutex_try_acquire(lock)))
1817 return 0;
1818
1819 return rt_mutex_slowlock(lock, NULL, state);
1820 }
1821 #endif /* RT_MUTEX_BUILD_MUTEX */
1822
1823 #ifdef RT_MUTEX_BUILD_SPINLOCKS
1824 /*
1825 * Functions required for spin/rw_lock substitution on RT kernels
1826 */
1827
1828 /**
1829 * rtlock_slowlock_locked - Slow path lock acquisition for RT locks
1830 * @lock: The underlying RT mutex
1831 * @wake_q: The wake_q to wake tasks after we release the wait_lock
1832 */
rtlock_slowlock_locked(struct rt_mutex_base * lock,struct wake_q_head * wake_q)1833 static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
1834 struct wake_q_head *wake_q)
1835 __releases(&lock->wait_lock) __acquires(&lock->wait_lock)
1836 {
1837 struct rt_mutex_waiter waiter;
1838 struct task_struct *owner;
1839
1840 lockdep_assert_held(&lock->wait_lock);
1841
1842 if (try_to_take_rt_mutex(lock, current, NULL))
1843 return;
1844
1845 rt_mutex_init_rtlock_waiter(&waiter);
1846
1847 /* Save current state and set state to TASK_RTLOCK_WAIT */
1848 current_save_and_set_rtlock_wait_state();
1849
1850 trace_contention_begin(lock, LCB_F_RT);
1851
1852 task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK, wake_q);
1853
1854 for (;;) {
1855 /* Try to acquire the lock again */
1856 if (try_to_take_rt_mutex(lock, current, &waiter))
1857 break;
1858
1859 if (&waiter == rt_mutex_top_waiter(lock))
1860 owner = rt_mutex_owner(lock);
1861 else
1862 owner = NULL;
1863 preempt_disable();
1864 raw_spin_unlock_irq(&lock->wait_lock);
1865 wake_up_q(wake_q);
1866 wake_q_init(wake_q);
1867 preempt_enable();
1868
1869 if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
1870 schedule_rtlock();
1871
1872 raw_spin_lock_irq(&lock->wait_lock);
1873 set_current_state(TASK_RTLOCK_WAIT);
1874 }
1875
1876 /* Restore the task state */
1877 current_restore_rtlock_saved_state();
1878
1879 /*
1880 * try_to_take_rt_mutex() sets the waiter bit unconditionally.
1881 * We might have to fix that up:
1882 */
1883 fixup_rt_mutex_waiters(lock, true);
1884 debug_rt_mutex_free_waiter(&waiter);
1885
1886 trace_contention_end(lock, 0);
1887 }
1888
rtlock_slowlock(struct rt_mutex_base * lock)1889 static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
1890 {
1891 unsigned long flags;
1892 DEFINE_WAKE_Q(wake_q);
1893
1894 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1895 rtlock_slowlock_locked(lock, &wake_q);
1896 preempt_disable();
1897 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1898 wake_up_q(&wake_q);
1899 preempt_enable();
1900 }
1901
1902 #endif /* RT_MUTEX_BUILD_SPINLOCKS */
1903