xref: /linux/kernel/locking/mutex.c (revision f11c1efe46ad84555a0948401c7bdb63d711088d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/locking/mutex.c
4  *
5  * Mutexes: blocking mutual exclusion locks
6  *
7  * Started by Ingo Molnar:
8  *
9  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10  *
11  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12  * David Howells for suggestions and improvements.
13  *
14  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15  *    from the -rt tree, where it was originally implemented for rtmutexes
16  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
17  *    and Sven Dietrich.
18  *
19  * Also see Documentation/locking/mutex-design.rst.
20  */
21 #include <linux/mutex.h>
22 #include <linux/ww_mutex.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/wake_q.h>
26 #include <linux/sched/debug.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/interrupt.h>
30 #include <linux/debug_locks.h>
31 #include <linux/osq_lock.h>
32 #include <linux/hung_task.h>
33 
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/lock.h>
36 
37 #ifndef CONFIG_PREEMPT_RT
38 #include "mutex.h"
39 
40 #ifdef CONFIG_DEBUG_MUTEXES
41 # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
42 #else
43 # define MUTEX_WARN_ON(cond)
44 #endif
45 
46 void
47 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
48 {
49 	atomic_long_set(&lock->owner, 0);
50 	raw_spin_lock_init(&lock->wait_lock);
51 	INIT_LIST_HEAD(&lock->wait_list);
52 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
53 	osq_lock_init(&lock->osq);
54 #endif
55 
56 	debug_mutex_init(lock, name, key);
57 }
58 EXPORT_SYMBOL(__mutex_init);
59 
60 static inline struct task_struct *__owner_task(unsigned long owner)
61 {
62 	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
63 }
64 
65 bool mutex_is_locked(struct mutex *lock)
66 {
67 	return __mutex_owner(lock) != NULL;
68 }
69 EXPORT_SYMBOL(mutex_is_locked);
70 
71 static inline unsigned long __owner_flags(unsigned long owner)
72 {
73 	return owner & MUTEX_FLAGS;
74 }
75 
76 /* Do not use the return value as a pointer directly. */
77 unsigned long mutex_get_owner(struct mutex *lock)
78 {
79 	unsigned long owner = atomic_long_read(&lock->owner);
80 
81 	return (unsigned long)__owner_task(owner);
82 }
83 
84 /*
85  * Returns: __mutex_owner(lock) on failure or NULL on success.
86  */
87 static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
88 {
89 	unsigned long owner, curr = (unsigned long)current;
90 
91 	owner = atomic_long_read(&lock->owner);
92 	for (;;) { /* must loop, can race against a flag */
93 		unsigned long flags = __owner_flags(owner);
94 		unsigned long task = owner & ~MUTEX_FLAGS;
95 
96 		if (task) {
97 			if (flags & MUTEX_FLAG_PICKUP) {
98 				if (task != curr)
99 					break;
100 				flags &= ~MUTEX_FLAG_PICKUP;
101 			} else if (handoff) {
102 				if (flags & MUTEX_FLAG_HANDOFF)
103 					break;
104 				flags |= MUTEX_FLAG_HANDOFF;
105 			} else {
106 				break;
107 			}
108 		} else {
109 			MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
110 			task = curr;
111 		}
112 
113 		if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
114 			if (task == curr)
115 				return NULL;
116 			break;
117 		}
118 	}
119 
120 	return __owner_task(owner);
121 }
122 
123 /*
124  * Trylock or set HANDOFF
125  */
126 static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
127 {
128 	return !__mutex_trylock_common(lock, handoff);
129 }
130 
131 /*
132  * Actual trylock that will work on any unlocked state.
133  */
134 static inline bool __mutex_trylock(struct mutex *lock)
135 {
136 	return !__mutex_trylock_common(lock, false);
137 }
138 
139 #ifndef CONFIG_DEBUG_LOCK_ALLOC
140 /*
141  * Lockdep annotations are contained to the slow paths for simplicity.
142  * There is nothing that would stop spreading the lockdep annotations outwards
143  * except more code.
144  */
145 
146 /*
147  * Optimistic trylock that only works in the uncontended case. Make sure to
148  * follow with a __mutex_trylock() before failing.
149  */
150 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
151 {
152 	unsigned long curr = (unsigned long)current;
153 	unsigned long zero = 0UL;
154 
155 	MUTEX_WARN_ON(lock->magic != lock);
156 
157 	if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
158 		return true;
159 
160 	return false;
161 }
162 
163 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
164 {
165 	unsigned long curr = (unsigned long)current;
166 
167 	return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
168 }
169 #endif
170 
171 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
172 {
173 	atomic_long_or(flag, &lock->owner);
174 }
175 
176 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
177 {
178 	atomic_long_andnot(flag, &lock->owner);
179 }
180 
181 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
182 {
183 	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
184 }
185 
186 /*
187  * Add @waiter to a given location in the lock wait_list and set the
188  * FLAG_WAITERS flag if it's the first waiter.
189  */
190 static void
191 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
192 		   struct list_head *list)
193 {
194 #ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
195 	hung_task_set_blocker(lock, BLOCKER_TYPE_MUTEX);
196 #endif
197 	debug_mutex_add_waiter(lock, waiter, current);
198 
199 	list_add_tail(&waiter->list, list);
200 	if (__mutex_waiter_is_first(lock, waiter))
201 		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
202 }
203 
204 static void
205 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
206 {
207 	list_del(&waiter->list);
208 	if (likely(list_empty(&lock->wait_list)))
209 		__mutex_clear_flag(lock, MUTEX_FLAGS);
210 
211 	debug_mutex_remove_waiter(lock, waiter, current);
212 #ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
213 	hung_task_clear_blocker();
214 #endif
215 }
216 
217 /*
218  * Give up ownership to a specific task, when @task = NULL, this is equivalent
219  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
220  * WAITERS. Provides RELEASE semantics like a regular unlock, the
221  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
222  */
223 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
224 {
225 	unsigned long owner = atomic_long_read(&lock->owner);
226 
227 	for (;;) {
228 		unsigned long new;
229 
230 		MUTEX_WARN_ON(__owner_task(owner) != current);
231 		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
232 
233 		new = (owner & MUTEX_FLAG_WAITERS);
234 		new |= (unsigned long)task;
235 		if (task)
236 			new |= MUTEX_FLAG_PICKUP;
237 
238 		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
239 			break;
240 	}
241 }
242 
243 #ifndef CONFIG_DEBUG_LOCK_ALLOC
244 /*
245  * We split the mutex lock/unlock logic into separate fastpath and
246  * slowpath functions, to reduce the register pressure on the fastpath.
247  * We also put the fastpath first in the kernel image, to make sure the
248  * branch is predicted by the CPU as default-untaken.
249  */
250 static void __sched __mutex_lock_slowpath(struct mutex *lock);
251 
252 /**
253  * mutex_lock - acquire the mutex
254  * @lock: the mutex to be acquired
255  *
256  * Lock the mutex exclusively for this task. If the mutex is not
257  * available right now, it will sleep until it can get it.
258  *
259  * The mutex must later on be released by the same task that
260  * acquired it. Recursive locking is not allowed. The task
261  * may not exit without first unlocking the mutex. Also, kernel
262  * memory where the mutex resides must not be freed with
263  * the mutex still locked. The mutex must first be initialized
264  * (or statically defined) before it can be locked. memset()-ing
265  * the mutex to 0 is not allowed.
266  *
267  * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
268  * checks that will enforce the restrictions and will also do
269  * deadlock debugging)
270  *
271  * This function is similar to (but not equivalent to) down().
272  */
273 void __sched mutex_lock(struct mutex *lock)
274 {
275 	might_sleep();
276 
277 	if (!__mutex_trylock_fast(lock))
278 		__mutex_lock_slowpath(lock);
279 }
280 EXPORT_SYMBOL(mutex_lock);
281 #endif
282 
283 #include "ww_mutex.h"
284 
285 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
286 
287 /*
288  * Trylock variant that returns the owning task on failure.
289  */
290 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
291 {
292 	return __mutex_trylock_common(lock, false);
293 }
294 
295 static inline
296 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
297 			    struct mutex_waiter *waiter)
298 {
299 	struct ww_mutex *ww;
300 
301 	ww = container_of(lock, struct ww_mutex, base);
302 
303 	/*
304 	 * If ww->ctx is set the contents are undefined, only
305 	 * by acquiring wait_lock there is a guarantee that
306 	 * they are not invalid when reading.
307 	 *
308 	 * As such, when deadlock detection needs to be
309 	 * performed the optimistic spinning cannot be done.
310 	 *
311 	 * Check this in every inner iteration because we may
312 	 * be racing against another thread's ww_mutex_lock.
313 	 */
314 	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
315 		return false;
316 
317 	/*
318 	 * If we aren't on the wait list yet, cancel the spin
319 	 * if there are waiters. We want  to avoid stealing the
320 	 * lock from a waiter with an earlier stamp, since the
321 	 * other thread may already own a lock that we also
322 	 * need.
323 	 */
324 	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
325 		return false;
326 
327 	/*
328 	 * Similarly, stop spinning if we are no longer the
329 	 * first waiter.
330 	 */
331 	if (waiter && !__mutex_waiter_is_first(lock, waiter))
332 		return false;
333 
334 	return true;
335 }
336 
337 /*
338  * Look out! "owner" is an entirely speculative pointer access and not
339  * reliable.
340  *
341  * "noinline" so that this function shows up on perf profiles.
342  */
343 static noinline
344 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
345 			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
346 {
347 	bool ret = true;
348 
349 	lockdep_assert_preemption_disabled();
350 
351 	while (__mutex_owner(lock) == owner) {
352 		/*
353 		 * Ensure we emit the owner->on_cpu, dereference _after_
354 		 * checking lock->owner still matches owner. And we already
355 		 * disabled preemption which is equal to the RCU read-side
356 		 * crital section in optimistic spinning code. Thus the
357 		 * task_strcut structure won't go away during the spinning
358 		 * period
359 		 */
360 		barrier();
361 
362 		/*
363 		 * Use vcpu_is_preempted to detect lock holder preemption issue.
364 		 */
365 		if (!owner_on_cpu(owner) || need_resched()) {
366 			ret = false;
367 			break;
368 		}
369 
370 		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
371 			ret = false;
372 			break;
373 		}
374 
375 		cpu_relax();
376 	}
377 
378 	return ret;
379 }
380 
381 /*
382  * Initial check for entering the mutex spinning loop
383  */
384 static inline int mutex_can_spin_on_owner(struct mutex *lock)
385 {
386 	struct task_struct *owner;
387 	int retval = 1;
388 
389 	lockdep_assert_preemption_disabled();
390 
391 	if (need_resched())
392 		return 0;
393 
394 	/*
395 	 * We already disabled preemption which is equal to the RCU read-side
396 	 * crital section in optimistic spinning code. Thus the task_strcut
397 	 * structure won't go away during the spinning period.
398 	 */
399 	owner = __mutex_owner(lock);
400 	if (owner)
401 		retval = owner_on_cpu(owner);
402 
403 	/*
404 	 * If lock->owner is not set, the mutex has been released. Return true
405 	 * such that we'll trylock in the spin path, which is a faster option
406 	 * than the blocking slow path.
407 	 */
408 	return retval;
409 }
410 
411 /*
412  * Optimistic spinning.
413  *
414  * We try to spin for acquisition when we find that the lock owner
415  * is currently running on a (different) CPU and while we don't
416  * need to reschedule. The rationale is that if the lock owner is
417  * running, it is likely to release the lock soon.
418  *
419  * The mutex spinners are queued up using MCS lock so that only one
420  * spinner can compete for the mutex. However, if mutex spinning isn't
421  * going to happen, there is no point in going through the lock/unlock
422  * overhead.
423  *
424  * Returns true when the lock was taken, otherwise false, indicating
425  * that we need to jump to the slowpath and sleep.
426  *
427  * The waiter flag is set to true if the spinner is a waiter in the wait
428  * queue. The waiter-spinner will spin on the lock directly and concurrently
429  * with the spinner at the head of the OSQ, if present, until the owner is
430  * changed to itself.
431  */
432 static __always_inline bool
433 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
434 		      struct mutex_waiter *waiter)
435 {
436 	if (!waiter) {
437 		/*
438 		 * The purpose of the mutex_can_spin_on_owner() function is
439 		 * to eliminate the overhead of osq_lock() and osq_unlock()
440 		 * in case spinning isn't possible. As a waiter-spinner
441 		 * is not going to take OSQ lock anyway, there is no need
442 		 * to call mutex_can_spin_on_owner().
443 		 */
444 		if (!mutex_can_spin_on_owner(lock))
445 			goto fail;
446 
447 		/*
448 		 * In order to avoid a stampede of mutex spinners trying to
449 		 * acquire the mutex all at once, the spinners need to take a
450 		 * MCS (queued) lock first before spinning on the owner field.
451 		 */
452 		if (!osq_lock(&lock->osq))
453 			goto fail;
454 	}
455 
456 	for (;;) {
457 		struct task_struct *owner;
458 
459 		/* Try to acquire the mutex... */
460 		owner = __mutex_trylock_or_owner(lock);
461 		if (!owner)
462 			break;
463 
464 		/*
465 		 * There's an owner, wait for it to either
466 		 * release the lock or go to sleep.
467 		 */
468 		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
469 			goto fail_unlock;
470 
471 		/*
472 		 * The cpu_relax() call is a compiler barrier which forces
473 		 * everything in this loop to be re-loaded. We don't need
474 		 * memory barriers as we'll eventually observe the right
475 		 * values at the cost of a few extra spins.
476 		 */
477 		cpu_relax();
478 	}
479 
480 	if (!waiter)
481 		osq_unlock(&lock->osq);
482 
483 	return true;
484 
485 
486 fail_unlock:
487 	if (!waiter)
488 		osq_unlock(&lock->osq);
489 
490 fail:
491 	/*
492 	 * If we fell out of the spin path because of need_resched(),
493 	 * reschedule now, before we try-lock the mutex. This avoids getting
494 	 * scheduled out right after we obtained the mutex.
495 	 */
496 	if (need_resched()) {
497 		/*
498 		 * We _should_ have TASK_RUNNING here, but just in case
499 		 * we do not, make it so, otherwise we might get stuck.
500 		 */
501 		__set_current_state(TASK_RUNNING);
502 		schedule_preempt_disabled();
503 	}
504 
505 	return false;
506 }
507 #else
508 static __always_inline bool
509 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
510 		      struct mutex_waiter *waiter)
511 {
512 	return false;
513 }
514 #endif
515 
516 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
517 
518 /**
519  * mutex_unlock - release the mutex
520  * @lock: the mutex to be released
521  *
522  * Unlock a mutex that has been locked by this task previously.
523  *
524  * This function must not be used in interrupt context. Unlocking
525  * of a not locked mutex is not allowed.
526  *
527  * The caller must ensure that the mutex stays alive until this function has
528  * returned - mutex_unlock() can NOT directly be used to release an object such
529  * that another concurrent task can free it.
530  * Mutexes are different from spinlocks & refcounts in this aspect.
531  *
532  * This function is similar to (but not equivalent to) up().
533  */
534 void __sched mutex_unlock(struct mutex *lock)
535 {
536 #ifndef CONFIG_DEBUG_LOCK_ALLOC
537 	if (__mutex_unlock_fast(lock))
538 		return;
539 #endif
540 	__mutex_unlock_slowpath(lock, _RET_IP_);
541 }
542 EXPORT_SYMBOL(mutex_unlock);
543 
544 /**
545  * ww_mutex_unlock - release the w/w mutex
546  * @lock: the mutex to be released
547  *
548  * Unlock a mutex that has been locked by this task previously with any of the
549  * ww_mutex_lock* functions (with or without an acquire context). It is
550  * forbidden to release the locks after releasing the acquire context.
551  *
552  * This function must not be used in interrupt context. Unlocking
553  * of a unlocked mutex is not allowed.
554  */
555 void __sched ww_mutex_unlock(struct ww_mutex *lock)
556 {
557 	__ww_mutex_unlock(lock);
558 	mutex_unlock(&lock->base);
559 }
560 EXPORT_SYMBOL(ww_mutex_unlock);
561 
562 /*
563  * Lock a mutex (possibly interruptible), slowpath:
564  */
565 static __always_inline int __sched
566 __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
567 		    struct lockdep_map *nest_lock, unsigned long ip,
568 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
569 {
570 	DEFINE_WAKE_Q(wake_q);
571 	struct mutex_waiter waiter;
572 	struct ww_mutex *ww;
573 	unsigned long flags;
574 	int ret;
575 
576 	if (!use_ww_ctx)
577 		ww_ctx = NULL;
578 
579 	might_sleep();
580 
581 	MUTEX_WARN_ON(lock->magic != lock);
582 
583 	ww = container_of(lock, struct ww_mutex, base);
584 	if (ww_ctx) {
585 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
586 			return -EALREADY;
587 
588 		/*
589 		 * Reset the wounded flag after a kill. No other process can
590 		 * race and wound us here since they can't have a valid owner
591 		 * pointer if we don't have any locks held.
592 		 */
593 		if (ww_ctx->acquired == 0)
594 			ww_ctx->wounded = 0;
595 
596 #ifdef CONFIG_DEBUG_LOCK_ALLOC
597 		nest_lock = &ww_ctx->dep_map;
598 #endif
599 	}
600 
601 	preempt_disable();
602 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
603 
604 	trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
605 	if (__mutex_trylock(lock) ||
606 	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
607 		/* got the lock, yay! */
608 		lock_acquired(&lock->dep_map, ip);
609 		if (ww_ctx)
610 			ww_mutex_set_context_fastpath(ww, ww_ctx);
611 		trace_contention_end(lock, 0);
612 		preempt_enable();
613 		return 0;
614 	}
615 
616 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
617 	/*
618 	 * After waiting to acquire the wait_lock, try again.
619 	 */
620 	if (__mutex_trylock(lock)) {
621 		if (ww_ctx)
622 			__ww_mutex_check_waiters(lock, ww_ctx, &wake_q);
623 
624 		goto skip_wait;
625 	}
626 
627 	debug_mutex_lock_common(lock, &waiter);
628 	waiter.task = current;
629 	if (use_ww_ctx)
630 		waiter.ww_ctx = ww_ctx;
631 
632 	lock_contended(&lock->dep_map, ip);
633 
634 	if (!use_ww_ctx) {
635 		/* add waiting tasks to the end of the waitqueue (FIFO): */
636 		__mutex_add_waiter(lock, &waiter, &lock->wait_list);
637 	} else {
638 		/*
639 		 * Add in stamp order, waking up waiters that must kill
640 		 * themselves.
641 		 */
642 		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx, &wake_q);
643 		if (ret)
644 			goto err_early_kill;
645 	}
646 
647 	set_current_state(state);
648 	trace_contention_begin(lock, LCB_F_MUTEX);
649 	for (;;) {
650 		bool first;
651 
652 		/*
653 		 * Once we hold wait_lock, we're serialized against
654 		 * mutex_unlock() handing the lock off to us, do a trylock
655 		 * before testing the error conditions to make sure we pick up
656 		 * the handoff.
657 		 */
658 		if (__mutex_trylock(lock))
659 			goto acquired;
660 
661 		/*
662 		 * Check for signals and kill conditions while holding
663 		 * wait_lock. This ensures the lock cancellation is ordered
664 		 * against mutex_unlock() and wake-ups do not go missing.
665 		 */
666 		if (signal_pending_state(state, current)) {
667 			ret = -EINTR;
668 			goto err;
669 		}
670 
671 		if (ww_ctx) {
672 			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
673 			if (ret)
674 				goto err;
675 		}
676 
677 		raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
678 
679 		schedule_preempt_disabled();
680 
681 		first = __mutex_waiter_is_first(lock, &waiter);
682 
683 		set_current_state(state);
684 		/*
685 		 * Here we order against unlock; we must either see it change
686 		 * state back to RUNNING and fall through the next schedule(),
687 		 * or we must see its unlock and acquire.
688 		 */
689 		if (__mutex_trylock_or_handoff(lock, first))
690 			break;
691 
692 		if (first) {
693 			trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
694 			if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
695 				break;
696 			trace_contention_begin(lock, LCB_F_MUTEX);
697 		}
698 
699 		raw_spin_lock_irqsave(&lock->wait_lock, flags);
700 	}
701 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
702 acquired:
703 	__set_current_state(TASK_RUNNING);
704 
705 	if (ww_ctx) {
706 		/*
707 		 * Wound-Wait; we stole the lock (!first_waiter), check the
708 		 * waiters as anyone might want to wound us.
709 		 */
710 		if (!ww_ctx->is_wait_die &&
711 		    !__mutex_waiter_is_first(lock, &waiter))
712 			__ww_mutex_check_waiters(lock, ww_ctx, &wake_q);
713 	}
714 
715 	__mutex_remove_waiter(lock, &waiter);
716 
717 	debug_mutex_free_waiter(&waiter);
718 
719 skip_wait:
720 	/* got the lock - cleanup and rejoice! */
721 	lock_acquired(&lock->dep_map, ip);
722 	trace_contention_end(lock, 0);
723 
724 	if (ww_ctx)
725 		ww_mutex_lock_acquired(ww, ww_ctx);
726 
727 	raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
728 	preempt_enable();
729 	return 0;
730 
731 err:
732 	__set_current_state(TASK_RUNNING);
733 	__mutex_remove_waiter(lock, &waiter);
734 err_early_kill:
735 	trace_contention_end(lock, ret);
736 	raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
737 	debug_mutex_free_waiter(&waiter);
738 	mutex_release(&lock->dep_map, ip);
739 	preempt_enable();
740 	return ret;
741 }
742 
743 static int __sched
744 __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
745 	     struct lockdep_map *nest_lock, unsigned long ip)
746 {
747 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
748 }
749 
750 static int __sched
751 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
752 		unsigned long ip, struct ww_acquire_ctx *ww_ctx)
753 {
754 	return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
755 }
756 
757 /**
758  * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
759  * @ww: mutex to lock
760  * @ww_ctx: optional w/w acquire context
761  *
762  * Trylocks a mutex with the optional acquire context; no deadlock detection is
763  * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
764  *
765  * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
766  * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
767  *
768  * A mutex acquired with this function must be released with ww_mutex_unlock.
769  */
770 int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
771 {
772 	if (!ww_ctx)
773 		return mutex_trylock(&ww->base);
774 
775 	MUTEX_WARN_ON(ww->base.magic != &ww->base);
776 
777 	/*
778 	 * Reset the wounded flag after a kill. No other process can
779 	 * race and wound us here, since they can't have a valid owner
780 	 * pointer if we don't have any locks held.
781 	 */
782 	if (ww_ctx->acquired == 0)
783 		ww_ctx->wounded = 0;
784 
785 	if (__mutex_trylock(&ww->base)) {
786 		ww_mutex_set_context_fastpath(ww, ww_ctx);
787 		mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
788 		return 1;
789 	}
790 
791 	return 0;
792 }
793 EXPORT_SYMBOL(ww_mutex_trylock);
794 
795 #ifdef CONFIG_DEBUG_LOCK_ALLOC
796 void __sched
797 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
798 {
799 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
800 }
801 
802 EXPORT_SYMBOL_GPL(mutex_lock_nested);
803 
804 void __sched
805 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
806 {
807 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
808 }
809 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
810 
811 int __sched
812 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
813 {
814 	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
815 }
816 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
817 
818 int __sched
819 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
820 {
821 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
822 }
823 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
824 
825 void __sched
826 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
827 {
828 	int token;
829 
830 	might_sleep();
831 
832 	token = io_schedule_prepare();
833 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
834 			    subclass, NULL, _RET_IP_, NULL, 0);
835 	io_schedule_finish(token);
836 }
837 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
838 
839 static inline int
840 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
841 {
842 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
843 	unsigned tmp;
844 
845 	if (ctx->deadlock_inject_countdown-- == 0) {
846 		tmp = ctx->deadlock_inject_interval;
847 		if (tmp > UINT_MAX/4)
848 			tmp = UINT_MAX;
849 		else
850 			tmp = tmp*2 + tmp + tmp/2;
851 
852 		ctx->deadlock_inject_interval = tmp;
853 		ctx->deadlock_inject_countdown = tmp;
854 		ctx->contending_lock = lock;
855 
856 		ww_mutex_unlock(lock);
857 
858 		return -EDEADLK;
859 	}
860 #endif
861 
862 	return 0;
863 }
864 
865 int __sched
866 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
867 {
868 	int ret;
869 
870 	might_sleep();
871 	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
872 			       0, _RET_IP_, ctx);
873 	if (!ret && ctx && ctx->acquired > 1)
874 		return ww_mutex_deadlock_injection(lock, ctx);
875 
876 	return ret;
877 }
878 EXPORT_SYMBOL_GPL(ww_mutex_lock);
879 
880 int __sched
881 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
882 {
883 	int ret;
884 
885 	might_sleep();
886 	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
887 			      0, _RET_IP_, ctx);
888 
889 	if (!ret && ctx && ctx->acquired > 1)
890 		return ww_mutex_deadlock_injection(lock, ctx);
891 
892 	return ret;
893 }
894 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
895 
896 #endif
897 
898 /*
899  * Release the lock, slowpath:
900  */
901 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
902 {
903 	struct task_struct *next = NULL;
904 	DEFINE_WAKE_Q(wake_q);
905 	unsigned long owner;
906 	unsigned long flags;
907 
908 	mutex_release(&lock->dep_map, ip);
909 
910 	/*
911 	 * Release the lock before (potentially) taking the spinlock such that
912 	 * other contenders can get on with things ASAP.
913 	 *
914 	 * Except when HANDOFF, in that case we must not clear the owner field,
915 	 * but instead set it to the top waiter.
916 	 */
917 	owner = atomic_long_read(&lock->owner);
918 	for (;;) {
919 		MUTEX_WARN_ON(__owner_task(owner) != current);
920 		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
921 
922 		if (owner & MUTEX_FLAG_HANDOFF)
923 			break;
924 
925 		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
926 			if (owner & MUTEX_FLAG_WAITERS)
927 				break;
928 
929 			return;
930 		}
931 	}
932 
933 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
934 	debug_mutex_unlock(lock);
935 	if (!list_empty(&lock->wait_list)) {
936 		/* get the first entry from the wait-list: */
937 		struct mutex_waiter *waiter =
938 			list_first_entry(&lock->wait_list,
939 					 struct mutex_waiter, list);
940 
941 		next = waiter->task;
942 
943 		debug_mutex_wake_waiter(lock, waiter);
944 		wake_q_add(&wake_q, next);
945 	}
946 
947 	if (owner & MUTEX_FLAG_HANDOFF)
948 		__mutex_handoff(lock, next);
949 
950 	raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
951 }
952 
953 #ifndef CONFIG_DEBUG_LOCK_ALLOC
954 /*
955  * Here come the less common (and hence less performance-critical) APIs:
956  * mutex_lock_interruptible() and mutex_trylock().
957  */
958 static noinline int __sched
959 __mutex_lock_killable_slowpath(struct mutex *lock);
960 
961 static noinline int __sched
962 __mutex_lock_interruptible_slowpath(struct mutex *lock);
963 
964 /**
965  * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
966  * @lock: The mutex to be acquired.
967  *
968  * Lock the mutex like mutex_lock().  If a signal is delivered while the
969  * process is sleeping, this function will return without acquiring the
970  * mutex.
971  *
972  * Context: Process context.
973  * Return: 0 if the lock was successfully acquired or %-EINTR if a
974  * signal arrived.
975  */
976 int __sched mutex_lock_interruptible(struct mutex *lock)
977 {
978 	might_sleep();
979 
980 	if (__mutex_trylock_fast(lock))
981 		return 0;
982 
983 	return __mutex_lock_interruptible_slowpath(lock);
984 }
985 
986 EXPORT_SYMBOL(mutex_lock_interruptible);
987 
988 /**
989  * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
990  * @lock: The mutex to be acquired.
991  *
992  * Lock the mutex like mutex_lock().  If a signal which will be fatal to
993  * the current process is delivered while the process is sleeping, this
994  * function will return without acquiring the mutex.
995  *
996  * Context: Process context.
997  * Return: 0 if the lock was successfully acquired or %-EINTR if a
998  * fatal signal arrived.
999  */
1000 int __sched mutex_lock_killable(struct mutex *lock)
1001 {
1002 	might_sleep();
1003 
1004 	if (__mutex_trylock_fast(lock))
1005 		return 0;
1006 
1007 	return __mutex_lock_killable_slowpath(lock);
1008 }
1009 EXPORT_SYMBOL(mutex_lock_killable);
1010 
1011 /**
1012  * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1013  * @lock: The mutex to be acquired.
1014  *
1015  * Lock the mutex like mutex_lock().  While the task is waiting for this
1016  * mutex, it will be accounted as being in the IO wait state by the
1017  * scheduler.
1018  *
1019  * Context: Process context.
1020  */
1021 void __sched mutex_lock_io(struct mutex *lock)
1022 {
1023 	int token;
1024 
1025 	token = io_schedule_prepare();
1026 	mutex_lock(lock);
1027 	io_schedule_finish(token);
1028 }
1029 EXPORT_SYMBOL_GPL(mutex_lock_io);
1030 
1031 static noinline void __sched
1032 __mutex_lock_slowpath(struct mutex *lock)
1033 {
1034 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1035 }
1036 
1037 static noinline int __sched
1038 __mutex_lock_killable_slowpath(struct mutex *lock)
1039 {
1040 	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1041 }
1042 
1043 static noinline int __sched
1044 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1045 {
1046 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1047 }
1048 
1049 static noinline int __sched
1050 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1051 {
1052 	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
1053 			       _RET_IP_, ctx);
1054 }
1055 
1056 static noinline int __sched
1057 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1058 					    struct ww_acquire_ctx *ctx)
1059 {
1060 	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
1061 			       _RET_IP_, ctx);
1062 }
1063 
1064 #endif
1065 
1066 /**
1067  * mutex_trylock - try to acquire the mutex, without waiting
1068  * @lock: the mutex to be acquired
1069  *
1070  * Try to acquire the mutex atomically. Returns 1 if the mutex
1071  * has been acquired successfully, and 0 on contention.
1072  *
1073  * NOTE: this function follows the spin_trylock() convention, so
1074  * it is negated from the down_trylock() return values! Be careful
1075  * about this when converting semaphore users to mutexes.
1076  *
1077  * This function must not be used in interrupt context. The
1078  * mutex must be released by the same task that acquired it.
1079  */
1080 int __sched mutex_trylock(struct mutex *lock)
1081 {
1082 	bool locked;
1083 
1084 	MUTEX_WARN_ON(lock->magic != lock);
1085 
1086 	locked = __mutex_trylock(lock);
1087 	if (locked)
1088 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1089 
1090 	return locked;
1091 }
1092 EXPORT_SYMBOL(mutex_trylock);
1093 
1094 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1095 int __sched
1096 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1097 {
1098 	might_sleep();
1099 
1100 	if (__mutex_trylock_fast(&lock->base)) {
1101 		if (ctx)
1102 			ww_mutex_set_context_fastpath(lock, ctx);
1103 		return 0;
1104 	}
1105 
1106 	return __ww_mutex_lock_slowpath(lock, ctx);
1107 }
1108 EXPORT_SYMBOL(ww_mutex_lock);
1109 
1110 int __sched
1111 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1112 {
1113 	might_sleep();
1114 
1115 	if (__mutex_trylock_fast(&lock->base)) {
1116 		if (ctx)
1117 			ww_mutex_set_context_fastpath(lock, ctx);
1118 		return 0;
1119 	}
1120 
1121 	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1122 }
1123 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1124 
1125 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
1126 #endif /* !CONFIG_PREEMPT_RT */
1127 
1128 EXPORT_TRACEPOINT_SYMBOL_GPL(contention_begin);
1129 EXPORT_TRACEPOINT_SYMBOL_GPL(contention_end);
1130 
1131 /**
1132  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1133  * @cnt: the atomic which we are to dec
1134  * @lock: the mutex to return holding if we dec to 0
1135  *
1136  * return true and hold lock if we dec to 0, return false otherwise
1137  */
1138 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1139 {
1140 	/* dec if we can't possibly hit 0 */
1141 	if (atomic_add_unless(cnt, -1, 1))
1142 		return 0;
1143 	/* we might hit 0, so take the lock */
1144 	mutex_lock(lock);
1145 	if (!atomic_dec_and_test(cnt)) {
1146 		/* when we actually did the dec, we didn't hit 0 */
1147 		mutex_unlock(lock);
1148 		return 0;
1149 	}
1150 	/* we hit 0, and we hold the lock */
1151 	return 1;
1152 }
1153 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1154