xref: /linux/kernel/locking/mutex.c (revision 892c894b4ba4e4eb835f99de6fe7c41871e6d4f8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/locking/mutex.c
4  *
5  * Mutexes: blocking mutual exclusion locks
6  *
7  * Started by Ingo Molnar:
8  *
9  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10  *
11  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12  * David Howells for suggestions and improvements.
13  *
14  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15  *    from the -rt tree, where it was originally implemented for rtmutexes
16  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
17  *    and Sven Dietrich.
18  *
19  * Also see Documentation/locking/mutex-design.rst.
20  */
21 #include <linux/mutex.h>
22 #include <linux/ww_mutex.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/wake_q.h>
26 #include <linux/sched/debug.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/interrupt.h>
30 #include <linux/debug_locks.h>
31 #include <linux/osq_lock.h>
32 #include <linux/hung_task.h>
33 
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/lock.h>
36 
37 #ifndef CONFIG_PREEMPT_RT
38 #include "mutex.h"
39 
40 #ifdef CONFIG_DEBUG_MUTEXES
41 # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
42 #else
43 # define MUTEX_WARN_ON(cond)
44 #endif
45 
__mutex_init_generic(struct mutex * lock)46 static void __mutex_init_generic(struct mutex *lock)
47 {
48 	atomic_long_set(&lock->owner, 0);
49 	scoped_guard (raw_spinlock_init, &lock->wait_lock) {
50 		lock->first_waiter = NULL;
51 	}
52 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
53 	osq_lock_init(&lock->osq);
54 #endif
55 	debug_mutex_init(lock);
56 }
57 
__owner_task(unsigned long owner)58 static inline struct task_struct *__owner_task(unsigned long owner)
59 {
60 	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
61 }
62 
mutex_is_locked(struct mutex * lock)63 bool mutex_is_locked(struct mutex *lock)
64 {
65 	return __mutex_owner(lock) != NULL;
66 }
67 EXPORT_SYMBOL(mutex_is_locked);
68 
__owner_flags(unsigned long owner)69 static inline unsigned long __owner_flags(unsigned long owner)
70 {
71 	return owner & MUTEX_FLAGS;
72 }
73 
74 /* Do not use the return value as a pointer directly. */
mutex_get_owner(struct mutex * lock)75 unsigned long mutex_get_owner(struct mutex *lock)
76 {
77 	unsigned long owner = atomic_long_read(&lock->owner);
78 
79 	return (unsigned long)__owner_task(owner);
80 }
81 
82 /*
83  * Returns: __mutex_owner(lock) on failure or NULL on success.
84  */
__mutex_trylock_common(struct mutex * lock,bool handoff)85 static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
86 {
87 	unsigned long owner, curr = (unsigned long)current;
88 
89 	owner = atomic_long_read(&lock->owner);
90 	for (;;) { /* must loop, can race against a flag */
91 		unsigned long flags = __owner_flags(owner);
92 		unsigned long task = owner & ~MUTEX_FLAGS;
93 
94 		if (task) {
95 			if (flags & MUTEX_FLAG_PICKUP) {
96 				if (task != curr)
97 					break;
98 				flags &= ~MUTEX_FLAG_PICKUP;
99 			} else if (handoff) {
100 				if (flags & MUTEX_FLAG_HANDOFF)
101 					break;
102 				flags |= MUTEX_FLAG_HANDOFF;
103 			} else {
104 				break;
105 			}
106 		} else {
107 			MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
108 			task = curr;
109 		}
110 
111 		if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
112 			if (task == curr)
113 				return NULL;
114 			break;
115 		}
116 	}
117 
118 	return __owner_task(owner);
119 }
120 
121 /*
122  * Trylock or set HANDOFF
123  */
__mutex_trylock_or_handoff(struct mutex * lock,bool handoff)124 static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
125 {
126 	return !__mutex_trylock_common(lock, handoff);
127 }
128 
129 /*
130  * Actual trylock that will work on any unlocked state.
131  */
__mutex_trylock(struct mutex * lock)132 static inline bool __mutex_trylock(struct mutex *lock)
133 {
134 	return !__mutex_trylock_common(lock, false);
135 }
136 
137 #ifndef CONFIG_DEBUG_LOCK_ALLOC
138 /*
139  * Lockdep annotations are contained to the slow paths for simplicity.
140  * There is nothing that would stop spreading the lockdep annotations outwards
141  * except more code.
142  */
mutex_init_generic(struct mutex * lock)143 void mutex_init_generic(struct mutex *lock)
144 {
145 	__mutex_init_generic(lock);
146 }
147 EXPORT_SYMBOL(mutex_init_generic);
148 
149 /*
150  * Optimistic trylock that only works in the uncontended case. Make sure to
151  * follow with a __mutex_trylock() before failing.
152  */
__mutex_trylock_fast(struct mutex * lock)153 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
154 	__cond_acquires(true, lock)
155 {
156 	unsigned long curr = (unsigned long)current;
157 	unsigned long zero = 0UL;
158 
159 	MUTEX_WARN_ON(lock->magic != lock);
160 
161 	if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
162 		return true;
163 
164 	return false;
165 }
166 
__mutex_unlock_fast(struct mutex * lock)167 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
168 	__cond_releases(true, lock)
169 {
170 	unsigned long curr = (unsigned long)current;
171 
172 	return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
173 }
174 
175 #else /* !CONFIG_DEBUG_LOCK_ALLOC */
176 
mutex_init_lockdep(struct mutex * lock,const char * name,struct lock_class_key * key)177 void mutex_init_lockdep(struct mutex *lock, const char *name, struct lock_class_key *key)
178 {
179 	__mutex_init_generic(lock);
180 
181 	/*
182 	 * Make sure we are not reinitializing a held lock:
183 	 */
184 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
185 	lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
186 }
187 EXPORT_SYMBOL(mutex_init_lockdep);
188 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
189 
__mutex_set_flag(struct mutex * lock,unsigned long flag)190 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
191 {
192 	atomic_long_or(flag, &lock->owner);
193 }
194 
__mutex_clear_flag(struct mutex * lock,unsigned long flag)195 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
196 {
197 	atomic_long_andnot(flag, &lock->owner);
198 }
199 
200 /*
201  * Add @waiter to the @lock wait_list and set the FLAG_WAITERS flag if it's
202  * the first waiter.
203  *
204  * When @pos, @waiter is added before the waiter indicated by @pos. Otherwise
205  * @waiter will be added to the tail of the list.
206  */
207 static void
__mutex_add_waiter(struct mutex * lock,struct mutex_waiter * waiter,struct mutex_waiter * pos)208 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
209 		   struct mutex_waiter *pos)
210 	__must_hold(&lock->wait_lock)
211 {
212 	struct mutex_waiter *first = lock->first_waiter;
213 
214 	hung_task_set_blocker(lock, BLOCKER_TYPE_MUTEX);
215 	debug_mutex_add_waiter(lock, waiter, current);
216 
217 	if (pos) {
218 		/*
219 		 * Insert @waiter before @pos.
220 		 */
221 		list_add_tail(&waiter->list, &pos->list);
222 		/*
223 		 * If @pos == @first, then @waiter will be the new first.
224 		 */
225 		if (pos == first)
226 			lock->first_waiter = waiter;
227 		return;
228 	}
229 
230 	if (first) {
231 		list_add_tail(&waiter->list, &first->list);
232 		return;
233 	}
234 
235 	INIT_LIST_HEAD(&waiter->list);
236 	lock->first_waiter = waiter;
237 	__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
238 }
239 
240 static void
__mutex_remove_waiter(struct mutex * lock,struct mutex_waiter * waiter)241 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
242 	__must_hold(&lock->wait_lock)
243 {
244 	if (list_empty(&waiter->list)) {
245 		__mutex_clear_flag(lock, MUTEX_FLAGS);
246 		lock->first_waiter = NULL;
247 	} else {
248 		if (lock->first_waiter == waiter)
249 			lock->first_waiter = list_next_entry(waiter, list);
250 		list_del(&waiter->list);
251 	}
252 
253 	debug_mutex_remove_waiter(lock, waiter, current);
254 	hung_task_clear_blocker();
255 }
256 
257 /*
258  * Give up ownership to a specific task, when @task = NULL, this is equivalent
259  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
260  * WAITERS. Provides RELEASE semantics like a regular unlock, the
261  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
262  */
__mutex_handoff(struct mutex * lock,struct task_struct * task)263 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
264 {
265 	unsigned long owner = atomic_long_read(&lock->owner);
266 
267 	for (;;) {
268 		unsigned long new;
269 
270 		MUTEX_WARN_ON(__owner_task(owner) != current);
271 		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
272 
273 		new = (owner & MUTEX_FLAG_WAITERS);
274 		new |= (unsigned long)task;
275 		if (task)
276 			new |= MUTEX_FLAG_PICKUP;
277 
278 		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
279 			break;
280 	}
281 }
282 
283 #ifndef CONFIG_DEBUG_LOCK_ALLOC
284 /*
285  * We split the mutex lock/unlock logic into separate fastpath and
286  * slowpath functions, to reduce the register pressure on the fastpath.
287  * We also put the fastpath first in the kernel image, to make sure the
288  * branch is predicted by the CPU as default-untaken.
289  */
290 static void __sched __mutex_lock_slowpath(struct mutex *lock)
291 	__acquires(lock);
292 
293 /**
294  * mutex_lock - acquire the mutex
295  * @lock: the mutex to be acquired
296  *
297  * Lock the mutex exclusively for this task. If the mutex is not
298  * available right now, it will sleep until it can get it.
299  *
300  * The mutex must later on be released by the same task that
301  * acquired it. Recursive locking is not allowed. The task
302  * may not exit without first unlocking the mutex. Also, kernel
303  * memory where the mutex resides must not be freed with
304  * the mutex still locked. The mutex must first be initialized
305  * (or statically defined) before it can be locked. memset()-ing
306  * the mutex to 0 is not allowed.
307  *
308  * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
309  * checks that will enforce the restrictions and will also do
310  * deadlock debugging)
311  *
312  * This function is similar to (but not equivalent to) down().
313  */
mutex_lock(struct mutex * lock)314 void __sched mutex_lock(struct mutex *lock)
315 {
316 	might_sleep();
317 
318 	if (!__mutex_trylock_fast(lock))
319 		__mutex_lock_slowpath(lock);
320 }
321 EXPORT_SYMBOL(mutex_lock);
322 #endif
323 
324 #include "ww_mutex.h"
325 
326 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
327 
328 /*
329  * Trylock variant that returns the owning task on failure.
330  */
__mutex_trylock_or_owner(struct mutex * lock)331 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
332 {
333 	return __mutex_trylock_common(lock, false);
334 }
335 
336 static inline
ww_mutex_spin_on_owner(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)337 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
338 			    struct mutex_waiter *waiter)
339 {
340 	struct ww_mutex *ww;
341 
342 	ww = container_of(lock, struct ww_mutex, base);
343 
344 	/*
345 	 * If ww->ctx is set the contents are undefined, only
346 	 * by acquiring wait_lock there is a guarantee that
347 	 * they are not invalid when reading.
348 	 *
349 	 * As such, when deadlock detection needs to be
350 	 * performed the optimistic spinning cannot be done.
351 	 *
352 	 * Check this in every inner iteration because we may
353 	 * be racing against another thread's ww_mutex_lock.
354 	 */
355 	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
356 		return false;
357 
358 	/*
359 	 * If we aren't on the wait list yet, cancel the spin
360 	 * if there are waiters. We want  to avoid stealing the
361 	 * lock from a waiter with an earlier stamp, since the
362 	 * other thread may already own a lock that we also
363 	 * need.
364 	 */
365 	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
366 		return false;
367 
368 	/*
369 	 * Similarly, stop spinning if we are no longer the
370 	 * first waiter.
371 	 */
372 	if (waiter && data_race(lock->first_waiter != waiter))
373 		return false;
374 
375 	return true;
376 }
377 
378 /*
379  * Look out! "owner" is an entirely speculative pointer access and not
380  * reliable.
381  *
382  * "noinline" so that this function shows up on perf profiles.
383  */
384 static noinline
mutex_spin_on_owner(struct mutex * lock,struct task_struct * owner,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)385 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
386 			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
387 {
388 	bool ret = true;
389 
390 	lockdep_assert_preemption_disabled();
391 
392 	while (__mutex_owner(lock) == owner) {
393 		/*
394 		 * Ensure we emit the owner->on_cpu, dereference _after_
395 		 * checking lock->owner still matches owner. And we already
396 		 * disabled preemption which is equal to the RCU read-side
397 		 * crital section in optimistic spinning code. Thus the
398 		 * task_strcut structure won't go away during the spinning
399 		 * period
400 		 */
401 		barrier();
402 
403 		/*
404 		 * Use vcpu_is_preempted to detect lock holder preemption issue.
405 		 */
406 		if (!owner_on_cpu(owner) || need_resched()) {
407 			ret = false;
408 			break;
409 		}
410 
411 		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
412 			ret = false;
413 			break;
414 		}
415 
416 		cpu_relax();
417 	}
418 
419 	return ret;
420 }
421 
422 /*
423  * Initial check for entering the mutex spinning loop
424  */
mutex_can_spin_on_owner(struct mutex * lock)425 static inline int mutex_can_spin_on_owner(struct mutex *lock)
426 {
427 	struct task_struct *owner;
428 	int retval = 1;
429 
430 	lockdep_assert_preemption_disabled();
431 
432 	if (need_resched())
433 		return 0;
434 
435 	/*
436 	 * We already disabled preemption which is equal to the RCU read-side
437 	 * crital section in optimistic spinning code. Thus the task_strcut
438 	 * structure won't go away during the spinning period.
439 	 */
440 	owner = __mutex_owner(lock);
441 	if (owner)
442 		retval = owner_on_cpu(owner);
443 
444 	/*
445 	 * If lock->owner is not set, the mutex has been released. Return true
446 	 * such that we'll trylock in the spin path, which is a faster option
447 	 * than the blocking slow path.
448 	 */
449 	return retval;
450 }
451 
452 /*
453  * Optimistic spinning.
454  *
455  * We try to spin for acquisition when we find that the lock owner
456  * is currently running on a (different) CPU and while we don't
457  * need to reschedule. The rationale is that if the lock owner is
458  * running, it is likely to release the lock soon.
459  *
460  * The mutex spinners are queued up using MCS lock so that only one
461  * spinner can compete for the mutex. However, if mutex spinning isn't
462  * going to happen, there is no point in going through the lock/unlock
463  * overhead.
464  *
465  * Returns true when the lock was taken, otherwise false, indicating
466  * that we need to jump to the slowpath and sleep.
467  *
468  * The waiter flag is set to true if the spinner is a waiter in the wait
469  * queue. The waiter-spinner will spin on the lock directly and concurrently
470  * with the spinner at the head of the OSQ, if present, until the owner is
471  * changed to itself.
472  */
473 static __always_inline bool
mutex_optimistic_spin(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)474 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
475 		      struct mutex_waiter *waiter)
476 {
477 	if (!waiter) {
478 		/*
479 		 * The purpose of the mutex_can_spin_on_owner() function is
480 		 * to eliminate the overhead of osq_lock() and osq_unlock()
481 		 * in case spinning isn't possible. As a waiter-spinner
482 		 * is not going to take OSQ lock anyway, there is no need
483 		 * to call mutex_can_spin_on_owner().
484 		 */
485 		if (!mutex_can_spin_on_owner(lock))
486 			goto fail;
487 
488 		/*
489 		 * In order to avoid a stampede of mutex spinners trying to
490 		 * acquire the mutex all at once, the spinners need to take a
491 		 * MCS (queued) lock first before spinning on the owner field.
492 		 */
493 		if (!osq_lock(&lock->osq))
494 			goto fail;
495 	}
496 
497 	for (;;) {
498 		struct task_struct *owner;
499 
500 		/* Try to acquire the mutex... */
501 		owner = __mutex_trylock_or_owner(lock);
502 		if (!owner)
503 			break;
504 
505 		/*
506 		 * There's an owner, wait for it to either
507 		 * release the lock or go to sleep.
508 		 */
509 		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
510 			goto fail_unlock;
511 
512 		/*
513 		 * The cpu_relax() call is a compiler barrier which forces
514 		 * everything in this loop to be re-loaded. We don't need
515 		 * memory barriers as we'll eventually observe the right
516 		 * values at the cost of a few extra spins.
517 		 */
518 		cpu_relax();
519 	}
520 
521 	if (!waiter)
522 		osq_unlock(&lock->osq);
523 
524 	return true;
525 
526 
527 fail_unlock:
528 	if (!waiter)
529 		osq_unlock(&lock->osq);
530 
531 fail:
532 	/*
533 	 * If we fell out of the spin path because of need_resched(),
534 	 * reschedule now, before we try-lock the mutex. This avoids getting
535 	 * scheduled out right after we obtained the mutex.
536 	 */
537 	if (need_resched()) {
538 		/*
539 		 * We _should_ have TASK_RUNNING here, but just in case
540 		 * we do not, make it so, otherwise we might get stuck.
541 		 */
542 		__set_current_state(TASK_RUNNING);
543 		schedule_preempt_disabled();
544 	}
545 
546 	return false;
547 }
548 #else
549 static __always_inline bool
mutex_optimistic_spin(struct mutex * lock,struct ww_acquire_ctx * ww_ctx,struct mutex_waiter * waiter)550 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
551 		      struct mutex_waiter *waiter)
552 {
553 	return false;
554 }
555 #endif
556 
557 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
558 	__releases(lock);
559 
560 /**
561  * mutex_unlock - release the mutex
562  * @lock: the mutex to be released
563  *
564  * Unlock a mutex that has been locked by this task previously.
565  *
566  * This function must not be used in interrupt context. Unlocking
567  * of a not locked mutex is not allowed.
568  *
569  * The caller must ensure that the mutex stays alive until this function has
570  * returned - mutex_unlock() can NOT directly be used to release an object such
571  * that another concurrent task can free it.
572  * Mutexes are different from spinlocks & refcounts in this aspect.
573  *
574  * This function is similar to (but not equivalent to) up().
575  */
mutex_unlock(struct mutex * lock)576 void __sched mutex_unlock(struct mutex *lock)
577 {
578 #ifndef CONFIG_DEBUG_LOCK_ALLOC
579 	if (__mutex_unlock_fast(lock))
580 		return;
581 #endif
582 	__mutex_unlock_slowpath(lock, _RET_IP_);
583 }
584 EXPORT_SYMBOL(mutex_unlock);
585 
586 /**
587  * ww_mutex_unlock - release the w/w mutex
588  * @lock: the mutex to be released
589  *
590  * Unlock a mutex that has been locked by this task previously with any of the
591  * ww_mutex_lock* functions (with or without an acquire context). It is
592  * forbidden to release the locks after releasing the acquire context.
593  *
594  * This function must not be used in interrupt context. Unlocking
595  * of a unlocked mutex is not allowed.
596  */
ww_mutex_unlock(struct ww_mutex * lock)597 void __sched ww_mutex_unlock(struct ww_mutex *lock)
598 	__no_context_analysis
599 {
600 	__ww_mutex_unlock(lock);
601 	mutex_unlock(&lock->base);
602 }
603 EXPORT_SYMBOL(ww_mutex_unlock);
604 
605 /*
606  * Lock a mutex (possibly interruptible), slowpath:
607  */
608 static __always_inline int __sched
__mutex_lock_common(struct mutex * lock,unsigned int state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip,struct ww_acquire_ctx * ww_ctx,const bool use_ww_ctx)609 __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
610 		    struct lockdep_map *nest_lock, unsigned long ip,
611 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
612 	__cond_acquires(0, lock)
613 {
614 	DEFINE_WAKE_Q(wake_q);
615 	struct mutex_waiter waiter;
616 	struct ww_mutex *ww;
617 	unsigned long flags;
618 	int ret;
619 
620 	if (!use_ww_ctx)
621 		ww_ctx = NULL;
622 
623 	might_sleep();
624 
625 	MUTEX_WARN_ON(lock->magic != lock);
626 
627 	ww = container_of(lock, struct ww_mutex, base);
628 	if (ww_ctx) {
629 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
630 			return -EALREADY;
631 
632 		/*
633 		 * Reset the wounded flag after a kill. No other process can
634 		 * race and wound us here since they can't have a valid owner
635 		 * pointer if we don't have any locks held.
636 		 */
637 		if (ww_ctx->acquired == 0)
638 			ww_ctx->wounded = 0;
639 
640 #ifdef CONFIG_DEBUG_LOCK_ALLOC
641 		nest_lock = &ww_ctx->dep_map;
642 #endif
643 	}
644 
645 	preempt_disable();
646 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
647 
648 	trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
649 	if (__mutex_trylock(lock) ||
650 	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
651 		/* got the lock, yay! */
652 		lock_acquired(&lock->dep_map, ip);
653 		if (ww_ctx)
654 			ww_mutex_set_context_fastpath(ww, ww_ctx);
655 		trace_contention_end(lock, 0);
656 		preempt_enable();
657 		return 0;
658 	}
659 
660 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
661 	/*
662 	 * After waiting to acquire the wait_lock, try again.
663 	 */
664 	if (__mutex_trylock(lock)) {
665 		if (ww_ctx)
666 			__ww_mutex_check_waiters(lock, ww_ctx, &wake_q);
667 
668 		goto skip_wait;
669 	}
670 
671 	debug_mutex_lock_common(lock, &waiter);
672 	waiter.task = current;
673 	if (use_ww_ctx)
674 		waiter.ww_ctx = ww_ctx;
675 
676 	lock_contended(&lock->dep_map, ip);
677 
678 	if (!use_ww_ctx) {
679 		/* add waiting tasks to the end of the waitqueue (FIFO): */
680 		__mutex_add_waiter(lock, &waiter, NULL);
681 	} else {
682 		/*
683 		 * Add in stamp order, waking up waiters that must kill
684 		 * themselves.
685 		 */
686 		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx, &wake_q);
687 		if (ret)
688 			goto err_early_kill;
689 	}
690 
691 	raw_spin_lock(&current->blocked_lock);
692 	__set_task_blocked_on(current, lock);
693 	set_current_state(state);
694 	trace_contention_begin(lock, LCB_F_MUTEX);
695 	for (;;) {
696 		bool first;
697 
698 		/*
699 		 * Once we hold wait_lock, we're serialized against
700 		 * mutex_unlock() handing the lock off to us, do a trylock
701 		 * before testing the error conditions to make sure we pick up
702 		 * the handoff.
703 		 */
704 		if (__mutex_trylock(lock))
705 			break;
706 
707 		raw_spin_unlock(&current->blocked_lock);
708 		/*
709 		 * Check for signals and kill conditions while holding
710 		 * wait_lock. This ensures the lock cancellation is ordered
711 		 * against mutex_unlock() and wake-ups do not go missing.
712 		 */
713 		if (signal_pending_state(state, current)) {
714 			ret = -EINTR;
715 			goto err;
716 		}
717 
718 		if (ww_ctx) {
719 			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
720 			if (ret)
721 				goto err;
722 		}
723 
724 		raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
725 
726 		schedule_preempt_disabled();
727 
728 		first = lock->first_waiter == &waiter;
729 
730 		raw_spin_lock_irqsave(&lock->wait_lock, flags);
731 		raw_spin_lock(&current->blocked_lock);
732 		/*
733 		 * As we likely have been woken up by task
734 		 * that has cleared our blocked_on state, re-set
735 		 * it to the lock we are trying to acquire.
736 		 */
737 		__set_task_blocked_on(current, lock);
738 		set_current_state(state);
739 		/*
740 		 * Here we order against unlock; we must either see it change
741 		 * state back to RUNNING and fall through the next schedule(),
742 		 * or we must see its unlock and acquire.
743 		 */
744 		if (__mutex_trylock_or_handoff(lock, first))
745 			break;
746 
747 		if (first) {
748 			bool opt_acquired;
749 
750 			/*
751 			 * mutex_optimistic_spin() can call schedule(), so
752 			 * we need to release these locks before calling it,
753 			 * and clear blocked on so we don't become unselectable
754 			 * to run.
755 			 */
756 			__clear_task_blocked_on(current, lock);
757 			raw_spin_unlock(&current->blocked_lock);
758 			raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
759 
760 			trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
761 			opt_acquired = mutex_optimistic_spin(lock, ww_ctx, &waiter);
762 
763 			raw_spin_lock_irqsave(&lock->wait_lock, flags);
764 			raw_spin_lock(&current->blocked_lock);
765 			__set_task_blocked_on(current, lock);
766 
767 			if (opt_acquired)
768 				break;
769 			trace_contention_begin(lock, LCB_F_MUTEX);
770 		}
771 	}
772 	__clear_task_blocked_on(current, lock);
773 	__set_current_state(TASK_RUNNING);
774 	raw_spin_unlock(&current->blocked_lock);
775 
776 	if (ww_ctx) {
777 		/*
778 		 * Wound-Wait; we stole the lock (!first_waiter), check the
779 		 * waiters as anyone might want to wound us.
780 		 */
781 		if (!ww_ctx->is_wait_die && lock->first_waiter != &waiter)
782 			__ww_mutex_check_waiters(lock, ww_ctx, &wake_q);
783 	}
784 
785 	__mutex_remove_waiter(lock, &waiter);
786 
787 	debug_mutex_free_waiter(&waiter);
788 
789 skip_wait:
790 	/* got the lock - cleanup and rejoice! */
791 	lock_acquired(&lock->dep_map, ip);
792 	trace_contention_end(lock, 0);
793 
794 	if (ww_ctx)
795 		ww_mutex_lock_acquired(ww, ww_ctx);
796 
797 	raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
798 	preempt_enable();
799 	return 0;
800 
801 err:
802 	clear_task_blocked_on(current, lock);
803 	__set_current_state(TASK_RUNNING);
804 	__mutex_remove_waiter(lock, &waiter);
805 err_early_kill:
806 	WARN_ON(get_task_blocked_on(current));
807 	trace_contention_end(lock, ret);
808 	raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
809 	debug_mutex_free_waiter(&waiter);
810 	mutex_release(&lock->dep_map, ip);
811 	preempt_enable();
812 	return ret;
813 }
814 
815 static int __sched
__mutex_lock(struct mutex * lock,unsigned int state,unsigned int subclass,struct lockdep_map * nest_lock,unsigned long ip)816 __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
817 	     struct lockdep_map *nest_lock, unsigned long ip)
818 	__cond_acquires(0, lock)
819 {
820 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
821 }
822 
823 static int __sched
__ww_mutex_lock(struct mutex * lock,unsigned int state,unsigned int subclass,unsigned long ip,struct ww_acquire_ctx * ww_ctx)824 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
825 		unsigned long ip, struct ww_acquire_ctx *ww_ctx)
826 	__cond_acquires(0, lock)
827 {
828 	return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
829 }
830 
831 /**
832  * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context
833  * @ww: mutex to lock
834  * @ww_ctx: optional w/w acquire context
835  *
836  * Trylocks a mutex with the optional acquire context; no deadlock detection is
837  * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
838  *
839  * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is
840  * specified, -EALREADY handling may happen in calls to ww_mutex_trylock.
841  *
842  * A mutex acquired with this function must be released with ww_mutex_unlock.
843  */
ww_mutex_trylock(struct ww_mutex * ww,struct ww_acquire_ctx * ww_ctx)844 int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
845 {
846 	if (!ww_ctx)
847 		return mutex_trylock(&ww->base);
848 
849 	MUTEX_WARN_ON(ww->base.magic != &ww->base);
850 
851 	/*
852 	 * Reset the wounded flag after a kill. No other process can
853 	 * race and wound us here, since they can't have a valid owner
854 	 * pointer if we don't have any locks held.
855 	 */
856 	if (ww_ctx->acquired == 0)
857 		ww_ctx->wounded = 0;
858 
859 	if (__mutex_trylock(&ww->base)) {
860 		ww_mutex_set_context_fastpath(ww, ww_ctx);
861 		mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
862 		return 1;
863 	}
864 
865 	return 0;
866 }
867 EXPORT_SYMBOL(ww_mutex_trylock);
868 
869 #ifdef CONFIG_DEBUG_LOCK_ALLOC
870 void __sched
mutex_lock_nested(struct mutex * lock,unsigned int subclass)871 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
872 {
873 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
874 	__acquire(lock);
875 }
876 
877 EXPORT_SYMBOL_GPL(mutex_lock_nested);
878 
879 void __sched
_mutex_lock_nest_lock(struct mutex * lock,struct lockdep_map * nest)880 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
881 {
882 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
883 	__acquire(lock);
884 }
885 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
886 
887 int __sched
_mutex_lock_killable(struct mutex * lock,unsigned int subclass,struct lockdep_map * nest)888 _mutex_lock_killable(struct mutex *lock, unsigned int subclass,
889 				      struct lockdep_map *nest)
890 {
891 	return __mutex_lock(lock, TASK_KILLABLE, subclass, nest, _RET_IP_);
892 }
893 EXPORT_SYMBOL_GPL(_mutex_lock_killable);
894 
895 int __sched
mutex_lock_interruptible_nested(struct mutex * lock,unsigned int subclass)896 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
897 {
898 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
899 }
900 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
901 
902 void __sched
mutex_lock_io_nested(struct mutex * lock,unsigned int subclass)903 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
904 {
905 	int token;
906 
907 	might_sleep();
908 
909 	token = io_schedule_prepare();
910 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
911 			    subclass, NULL, _RET_IP_, NULL, 0);
912 	__acquire(lock);
913 	io_schedule_finish(token);
914 }
915 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
916 
917 static inline int
ww_mutex_deadlock_injection(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)918 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
919 	__cond_releases(nonzero, lock)
920 {
921 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
922 	unsigned tmp;
923 
924 	if (ctx->deadlock_inject_countdown-- == 0) {
925 		tmp = ctx->deadlock_inject_interval;
926 		if (tmp > UINT_MAX/4)
927 			tmp = UINT_MAX;
928 		else
929 			tmp = tmp*2 + tmp + tmp/2;
930 
931 		ctx->deadlock_inject_interval = tmp;
932 		ctx->deadlock_inject_countdown = tmp;
933 		ctx->contending_lock = lock;
934 
935 		ww_mutex_unlock(lock);
936 
937 		return -EDEADLK;
938 	}
939 #endif
940 
941 	return 0;
942 }
943 
944 int __sched
ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)945 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
946 {
947 	int ret;
948 
949 	might_sleep();
950 	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
951 			       0, _RET_IP_, ctx);
952 	if (!ret && ctx && ctx->acquired > 1)
953 		return ww_mutex_deadlock_injection(lock, ctx);
954 
955 	return ret;
956 }
957 EXPORT_SYMBOL_GPL(ww_mutex_lock);
958 
959 int __sched
ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)960 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
961 {
962 	int ret;
963 
964 	might_sleep();
965 	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
966 			      0, _RET_IP_, ctx);
967 
968 	if (!ret && ctx && ctx->acquired > 1)
969 		return ww_mutex_deadlock_injection(lock, ctx);
970 
971 	return ret;
972 }
973 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
974 
975 #endif
976 
977 /*
978  * Release the lock, slowpath:
979  */
__mutex_unlock_slowpath(struct mutex * lock,unsigned long ip)980 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
981 	__releases(lock)
982 {
983 	struct task_struct *next = NULL;
984 	struct mutex_waiter *waiter;
985 	DEFINE_WAKE_Q(wake_q);
986 	unsigned long owner;
987 	unsigned long flags;
988 
989 	mutex_release(&lock->dep_map, ip);
990 	__release(lock);
991 
992 	/*
993 	 * Release the lock before (potentially) taking the spinlock such that
994 	 * other contenders can get on with things ASAP.
995 	 *
996 	 * Except when HANDOFF, in that case we must not clear the owner field,
997 	 * but instead set it to the top waiter.
998 	 */
999 	owner = atomic_long_read(&lock->owner);
1000 	for (;;) {
1001 		MUTEX_WARN_ON(__owner_task(owner) != current);
1002 		MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1003 
1004 		if (owner & MUTEX_FLAG_HANDOFF)
1005 			break;
1006 
1007 		if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
1008 			if (owner & MUTEX_FLAG_WAITERS)
1009 				break;
1010 
1011 			return;
1012 		}
1013 	}
1014 
1015 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
1016 	debug_mutex_unlock(lock);
1017 	waiter = lock->first_waiter;
1018 	if (waiter) {
1019 		next = waiter->task;
1020 
1021 		debug_mutex_wake_waiter(lock, waiter);
1022 		set_task_blocked_on_waking(next, lock);
1023 		wake_q_add(&wake_q, next);
1024 	}
1025 
1026 	if (owner & MUTEX_FLAG_HANDOFF)
1027 		__mutex_handoff(lock, next);
1028 
1029 	raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
1030 }
1031 
1032 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1033 /*
1034  * Here come the less common (and hence less performance-critical) APIs:
1035  * mutex_lock_interruptible() and mutex_trylock().
1036  */
1037 static noinline int __sched
1038 __mutex_lock_killable_slowpath(struct mutex *lock);
1039 
1040 static noinline int __sched
1041 __mutex_lock_interruptible_slowpath(struct mutex *lock);
1042 
1043 /**
1044  * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
1045  * @lock: The mutex to be acquired.
1046  *
1047  * Lock the mutex like mutex_lock().  If a signal is delivered while the
1048  * process is sleeping, this function will return without acquiring the
1049  * mutex.
1050  *
1051  * Context: Process context.
1052  * Return: 0 if the lock was successfully acquired or %-EINTR if a
1053  * signal arrived.
1054  */
mutex_lock_interruptible(struct mutex * lock)1055 int __sched mutex_lock_interruptible(struct mutex *lock)
1056 {
1057 	might_sleep();
1058 
1059 	if (__mutex_trylock_fast(lock))
1060 		return 0;
1061 
1062 	return __mutex_lock_interruptible_slowpath(lock);
1063 }
1064 
1065 EXPORT_SYMBOL(mutex_lock_interruptible);
1066 
1067 /**
1068  * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1069  * @lock: The mutex to be acquired.
1070  *
1071  * Lock the mutex like mutex_lock().  If a signal which will be fatal to
1072  * the current process is delivered while the process is sleeping, this
1073  * function will return without acquiring the mutex.
1074  *
1075  * Context: Process context.
1076  * Return: 0 if the lock was successfully acquired or %-EINTR if a
1077  * fatal signal arrived.
1078  */
mutex_lock_killable(struct mutex * lock)1079 int __sched mutex_lock_killable(struct mutex *lock)
1080 {
1081 	might_sleep();
1082 
1083 	if (__mutex_trylock_fast(lock))
1084 		return 0;
1085 
1086 	return __mutex_lock_killable_slowpath(lock);
1087 }
1088 EXPORT_SYMBOL(mutex_lock_killable);
1089 
1090 /**
1091  * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1092  * @lock: The mutex to be acquired.
1093  *
1094  * Lock the mutex like mutex_lock().  While the task is waiting for this
1095  * mutex, it will be accounted as being in the IO wait state by the
1096  * scheduler.
1097  *
1098  * Context: Process context.
1099  */
mutex_lock_io(struct mutex * lock)1100 void __sched mutex_lock_io(struct mutex *lock)
1101 {
1102 	int token;
1103 
1104 	token = io_schedule_prepare();
1105 	mutex_lock(lock);
1106 	io_schedule_finish(token);
1107 }
1108 EXPORT_SYMBOL_GPL(mutex_lock_io);
1109 
1110 static noinline void __sched
__mutex_lock_slowpath(struct mutex * lock)1111 __mutex_lock_slowpath(struct mutex *lock)
1112 	__acquires(lock)
1113 {
1114 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1115 	__acquire(lock);
1116 }
1117 
1118 static noinline int __sched
__mutex_lock_killable_slowpath(struct mutex * lock)1119 __mutex_lock_killable_slowpath(struct mutex *lock)
1120 	__cond_acquires(0, lock)
1121 {
1122 	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1123 }
1124 
1125 static noinline int __sched
__mutex_lock_interruptible_slowpath(struct mutex * lock)1126 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1127 	__cond_acquires(0, lock)
1128 {
1129 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1130 }
1131 
1132 static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1133 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1134 	__cond_acquires(0, lock)
1135 {
1136 	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
1137 			       _RET_IP_, ctx);
1138 }
1139 
1140 static noinline int __sched
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1141 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1142 					    struct ww_acquire_ctx *ctx)
1143 	__cond_acquires(0, lock)
1144 {
1145 	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
1146 			       _RET_IP_, ctx);
1147 }
1148 
1149 #endif
1150 
1151 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1152 /**
1153  * mutex_trylock - try to acquire the mutex, without waiting
1154  * @lock: the mutex to be acquired
1155  *
1156  * Try to acquire the mutex atomically. Returns 1 if the mutex
1157  * has been acquired successfully, and 0 on contention.
1158  *
1159  * NOTE: this function follows the spin_trylock() convention, so
1160  * it is negated from the down_trylock() return values! Be careful
1161  * about this when converting semaphore users to mutexes.
1162  *
1163  * This function must not be used in interrupt context. The
1164  * mutex must be released by the same task that acquired it.
1165  */
mutex_trylock(struct mutex * lock)1166 int __sched mutex_trylock(struct mutex *lock)
1167 {
1168 	MUTEX_WARN_ON(lock->magic != lock);
1169 	return __mutex_trylock(lock);
1170 }
1171 EXPORT_SYMBOL(mutex_trylock);
1172 #else
_mutex_trylock_nest_lock(struct mutex * lock,struct lockdep_map * nest_lock)1173 int __sched _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock)
1174 {
1175 	bool locked;
1176 
1177 	MUTEX_WARN_ON(lock->magic != lock);
1178 	locked = __mutex_trylock(lock);
1179 	if (locked)
1180 		mutex_acquire_nest(&lock->dep_map, 0, 1, nest_lock, _RET_IP_);
1181 
1182 	return locked;
1183 }
1184 EXPORT_SYMBOL(_mutex_trylock_nest_lock);
1185 #endif
1186 
1187 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1188 int __sched
ww_mutex_lock(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1189 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1190 {
1191 	might_sleep();
1192 
1193 	if (__mutex_trylock_fast(&lock->base)) {
1194 		if (ctx)
1195 			ww_mutex_set_context_fastpath(lock, ctx);
1196 		return 0;
1197 	}
1198 
1199 	return __ww_mutex_lock_slowpath(lock, ctx);
1200 }
1201 EXPORT_SYMBOL(ww_mutex_lock);
1202 
1203 int __sched
ww_mutex_lock_interruptible(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)1204 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1205 {
1206 	might_sleep();
1207 
1208 	if (__mutex_trylock_fast(&lock->base)) {
1209 		if (ctx)
1210 			ww_mutex_set_context_fastpath(lock, ctx);
1211 		return 0;
1212 	}
1213 
1214 	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1215 }
1216 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1217 
1218 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
1219 #endif /* !CONFIG_PREEMPT_RT */
1220 
1221 EXPORT_TRACEPOINT_SYMBOL_GPL(contention_begin);
1222 EXPORT_TRACEPOINT_SYMBOL_GPL(contention_end);
1223 
1224 /**
1225  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1226  * @cnt: the atomic which we are to dec
1227  * @lock: the mutex to return holding if we dec to 0
1228  *
1229  * return true and hold lock if we dec to 0, return false otherwise
1230  */
atomic_dec_and_mutex_lock(atomic_t * cnt,struct mutex * lock)1231 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1232 {
1233 	/* dec if we can't possibly hit 0 */
1234 	if (atomic_add_unless(cnt, -1, 1))
1235 		return 0;
1236 	/* we might hit 0, so take the lock */
1237 	mutex_lock(lock);
1238 	if (!atomic_dec_and_test(cnt)) {
1239 		/* when we actually did the dec, we didn't hit 0 */
1240 		mutex_unlock(lock);
1241 		return 0;
1242 	}
1243 	/* we hit 0, and we hold the lock */
1244 	return 1;
1245 }
1246 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1247