xref: /linux/kernel/locking/ww_mutex.h (revision 892c894b4ba4e4eb835f99de6fe7c41871e6d4f8)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #ifndef WW_RT
4 
5 #define MUTEX		mutex
6 #define MUTEX_WAITER	mutex_waiter
7 #define WAIT_LOCK	wait_lock
8 
9 /*
10  *           +--------+
11  *           | first  |
12  *           +--------+
13  *                |
14  *                v
15  *  +----+     +----+     +----+
16  *  | W3 | <-> | W1 | <-> | W2 |
17  *  +----+     +----+     +----+
18  *    ^                     ^
19  *    +---------------------+
20  */
21 
22 static inline struct mutex_waiter *
__ww_waiter_first(struct mutex * lock)23 __ww_waiter_first(struct mutex *lock)
24 	__must_hold(&lock->wait_lock)
25 {
26 	return lock->first_waiter;
27 }
28 
29 /*
30  * for (cur = __ww_waiter_first(); cur; cur = __ww_waiter_next())
31  *
32  * Should iterate like: W1, W2, W3
33  */
34 static inline struct mutex_waiter *
__ww_waiter_next(struct mutex * lock,struct mutex_waiter * w)35 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
36 	__must_hold(&lock->wait_lock)
37 {
38 	w = list_next_entry(w, list);
39 	/*
40 	 * Terminate if the next entry is the first again, that has already
41 	 * been observed.
42 	 */
43 	if (lock->first_waiter == w)
44 		return NULL;
45 
46 	return w;
47 }
48 
49 /*
50  * for (cur = __ww_waiter_last(); cur; cur = __ww_waiter_prev())
51  *
52  * Should iterate like: W3, W2, W1
53  */
54 static inline struct mutex_waiter *
__ww_waiter_prev(struct mutex * lock,struct mutex_waiter * w)55 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
56 	__must_hold(&lock->wait_lock)
57 {
58 	/*
59 	 * Terminate at the first entry, the previous entry of first is the
60 	 * last and that has already been observed.
61 	 */
62 	if (lock->first_waiter == w)
63 		return NULL;
64 
65 	return list_prev_entry(w, list);
66 }
67 
68 static inline struct mutex_waiter *
__ww_waiter_last(struct mutex * lock)69 __ww_waiter_last(struct mutex *lock)
70 	__must_hold(&lock->wait_lock)
71 {
72 	struct mutex_waiter *w = lock->first_waiter;
73 
74 	if (w)
75 		w = list_prev_entry(w, list);
76 	return w;
77 }
78 
79 static inline void
__ww_waiter_add(struct mutex * lock,struct mutex_waiter * waiter,struct mutex_waiter * pos)80 __ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
81 	__must_hold(&lock->wait_lock)
82 {
83 	__mutex_add_waiter(lock, waiter, pos);
84 }
85 
86 static inline struct task_struct *
__ww_mutex_owner(struct mutex * lock)87 __ww_mutex_owner(struct mutex *lock)
88 {
89 	return __mutex_owner(lock);
90 }
91 
92 static inline bool
__ww_mutex_has_waiters(struct mutex * lock)93 __ww_mutex_has_waiters(struct mutex *lock)
94 {
95 	return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS;
96 }
97 
lock_wait_lock(struct mutex * lock,unsigned long * flags)98 static inline void lock_wait_lock(struct mutex *lock, unsigned long *flags)
99 	__acquires(&lock->wait_lock)
100 {
101 	raw_spin_lock_irqsave(&lock->wait_lock, *flags);
102 }
103 
unlock_wait_lock(struct mutex * lock,unsigned long * flags)104 static inline void unlock_wait_lock(struct mutex *lock, unsigned long *flags)
105 	__releases(&lock->wait_lock)
106 {
107 	raw_spin_unlock_irqrestore(&lock->wait_lock, *flags);
108 }
109 
lockdep_assert_wait_lock_held(struct mutex * lock)110 static inline void lockdep_assert_wait_lock_held(struct mutex *lock)
111 	__must_hold(&lock->wait_lock)
112 {
113 	lockdep_assert_held(&lock->wait_lock);
114 }
115 
116 #else /* WW_RT */
117 
118 #define MUTEX		rt_mutex
119 #define MUTEX_WAITER	rt_mutex_waiter
120 #define WAIT_LOCK	rtmutex.wait_lock
121 
122 static inline struct rt_mutex_waiter *
__ww_waiter_first(struct rt_mutex * lock)123 __ww_waiter_first(struct rt_mutex *lock)
124 	__must_hold(&lock->rtmutex.wait_lock)
125 {
126 	struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root);
127 	if (!n)
128 		return NULL;
129 	return rb_entry(n, struct rt_mutex_waiter, tree.entry);
130 }
131 
132 static inline struct rt_mutex_waiter *
__ww_waiter_next(struct rt_mutex * lock,struct rt_mutex_waiter * w)133 __ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w)
134 {
135 	struct rb_node *n = rb_next(&w->tree.entry);
136 	if (!n)
137 		return NULL;
138 	return rb_entry(n, struct rt_mutex_waiter, tree.entry);
139 }
140 
141 static inline struct rt_mutex_waiter *
__ww_waiter_prev(struct rt_mutex * lock,struct rt_mutex_waiter * w)142 __ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w)
143 {
144 	struct rb_node *n = rb_prev(&w->tree.entry);
145 	if (!n)
146 		return NULL;
147 	return rb_entry(n, struct rt_mutex_waiter, tree.entry);
148 }
149 
150 static inline struct rt_mutex_waiter *
__ww_waiter_last(struct rt_mutex * lock)151 __ww_waiter_last(struct rt_mutex *lock)
152 	__must_hold(&lock->rtmutex.wait_lock)
153 {
154 	struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
155 	if (!n)
156 		return NULL;
157 	return rb_entry(n, struct rt_mutex_waiter, tree.entry);
158 }
159 
160 static inline void
__ww_waiter_add(struct rt_mutex * lock,struct rt_mutex_waiter * waiter,struct rt_mutex_waiter * pos)161 __ww_waiter_add(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct rt_mutex_waiter *pos)
162 {
163 	/* RT unconditionally adds the waiter first and then removes it on error */
164 }
165 
166 static inline struct task_struct *
__ww_mutex_owner(struct rt_mutex * lock)167 __ww_mutex_owner(struct rt_mutex *lock)
168 {
169 	return rt_mutex_owner(&lock->rtmutex);
170 }
171 
172 static inline bool
__ww_mutex_has_waiters(struct rt_mutex * lock)173 __ww_mutex_has_waiters(struct rt_mutex *lock)
174 	__must_hold(&lock->rtmutex.wait_lock)
175 {
176 	return rt_mutex_has_waiters(&lock->rtmutex);
177 }
178 
lock_wait_lock(struct rt_mutex * lock,unsigned long * flags)179 static inline void lock_wait_lock(struct rt_mutex *lock, unsigned long *flags)
180 	__acquires(&lock->rtmutex.wait_lock)
181 {
182 	raw_spin_lock_irqsave(&lock->rtmutex.wait_lock, *flags);
183 }
184 
unlock_wait_lock(struct rt_mutex * lock,unsigned long * flags)185 static inline void unlock_wait_lock(struct rt_mutex *lock, unsigned long *flags)
186 	__releases(&lock->rtmutex.wait_lock)
187 {
188 	raw_spin_unlock_irqrestore(&lock->rtmutex.wait_lock, *flags);
189 }
190 
lockdep_assert_wait_lock_held(struct rt_mutex * lock)191 static inline void lockdep_assert_wait_lock_held(struct rt_mutex *lock)
192 	__must_hold(&lock->rtmutex.wait_lock)
193 {
194 	lockdep_assert_held(&lock->rtmutex.wait_lock);
195 }
196 
197 #endif /* WW_RT */
198 
199 /*
200  * Wait-Die:
201  *   The newer transactions are killed when:
202  *     It (the new transaction) makes a request for a lock being held
203  *     by an older transaction.
204  *
205  * Wound-Wait:
206  *   The newer transactions are wounded when:
207  *     An older transaction makes a request for a lock being held by
208  *     the newer transaction.
209  */
210 
211 /*
212  * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
213  * it.
214  */
215 static __always_inline void
ww_mutex_lock_acquired(struct ww_mutex * ww,struct ww_acquire_ctx * ww_ctx)216 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
217 {
218 #ifdef DEBUG_WW_MUTEXES
219 	/*
220 	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
221 	 * but released with a normal mutex_unlock in this call.
222 	 *
223 	 * This should never happen, always use ww_mutex_unlock.
224 	 */
225 	DEBUG_LOCKS_WARN_ON(ww->ctx);
226 
227 	/*
228 	 * Not quite done after calling ww_acquire_done() ?
229 	 */
230 	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
231 
232 	if (ww_ctx->contending_lock) {
233 		/*
234 		 * After -EDEADLK you tried to
235 		 * acquire a different ww_mutex? Bad!
236 		 */
237 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
238 
239 		/*
240 		 * You called ww_mutex_lock after receiving -EDEADLK,
241 		 * but 'forgot' to unlock everything else first?
242 		 */
243 		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
244 		ww_ctx->contending_lock = NULL;
245 	}
246 
247 	/*
248 	 * Naughty, using a different class will lead to undefined behavior!
249 	 */
250 	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
251 #endif
252 	ww_ctx->acquired++;
253 	ww->ctx = ww_ctx;
254 }
255 
256 /*
257  * Determine if @a is 'less' than @b. IOW, either @a is a lower priority task
258  * or, when of equal priority, a younger transaction than @b.
259  *
260  * Depending on the algorithm, @a will either need to wait for @b, or die.
261  */
262 static inline bool
__ww_ctx_less(struct ww_acquire_ctx * a,struct ww_acquire_ctx * b)263 __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
264 {
265 /*
266  * Can only do the RT prio for WW_RT, because task->prio isn't stable due to PI,
267  * so the wait_list ordering will go wobbly. rt_mutex re-queues the waiter and
268  * isn't affected by this.
269  */
270 #ifdef WW_RT
271 	/* kernel prio; less is more */
272 	int a_prio = a->task->prio;
273 	int b_prio = b->task->prio;
274 
275 	if (rt_or_dl_prio(a_prio) || rt_or_dl_prio(b_prio)) {
276 
277 		if (a_prio > b_prio)
278 			return true;
279 
280 		if (a_prio < b_prio)
281 			return false;
282 
283 		/* equal static prio */
284 
285 		if (dl_prio(a_prio)) {
286 			if (dl_time_before(b->task->dl.deadline,
287 					   a->task->dl.deadline))
288 				return true;
289 
290 			if (dl_time_before(a->task->dl.deadline,
291 					   b->task->dl.deadline))
292 				return false;
293 		}
294 
295 		/* equal prio */
296 	}
297 #endif
298 
299 	/* FIFO order tie break -- bigger is younger */
300 	return (signed long)(a->stamp - b->stamp) > 0;
301 }
302 
303 /*
304  * Wait-Die; wake a lesser waiter context (when locks held) such that it can
305  * die.
306  *
307  * Among waiters with context, only the first one can have other locks acquired
308  * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
309  * __ww_mutex_check_kill() wake any but the earliest context.
310  */
311 static bool
__ww_mutex_die(struct MUTEX * lock,struct MUTEX_WAITER * waiter,struct ww_acquire_ctx * ww_ctx,struct wake_q_head * wake_q)312 __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
313 	       struct ww_acquire_ctx *ww_ctx, struct wake_q_head *wake_q)
314 {
315 	if (!ww_ctx->is_wait_die)
316 		return false;
317 
318 	if (waiter->ww_ctx->acquired > 0 && __ww_ctx_less(waiter->ww_ctx, ww_ctx)) {
319 #ifndef WW_RT
320 		debug_mutex_wake_waiter(lock, waiter);
321 #endif
322 		/*
323 		 * When waking up the task to die, be sure to set the
324 		 * blocked_on to PROXY_WAKING. Otherwise we can see
325 		 * circular blocked_on relationships that can't resolve.
326 		 */
327 		set_task_blocked_on_waking(waiter->task, lock);
328 		wake_q_add(wake_q, waiter->task);
329 	}
330 
331 	return true;
332 }
333 
334 /*
335  * Wound-Wait; wound a lesser @hold_ctx if it holds the lock.
336  *
337  * Wound the lock holder if there are waiters with more important transactions
338  * than the lock holders. Even if multiple waiters may wound the lock holder,
339  * it's sufficient that only one does.
340  */
__ww_mutex_wound(struct MUTEX * lock,struct ww_acquire_ctx * ww_ctx,struct ww_acquire_ctx * hold_ctx,struct wake_q_head * wake_q)341 static bool __ww_mutex_wound(struct MUTEX *lock,
342 			     struct ww_acquire_ctx *ww_ctx,
343 			     struct ww_acquire_ctx *hold_ctx,
344 			     struct wake_q_head *wake_q)
345 	__must_hold(&lock->WAIT_LOCK)
346 {
347 	struct task_struct *owner = __ww_mutex_owner(lock);
348 
349 	lockdep_assert_wait_lock_held(lock);
350 
351 	/*
352 	 * Possible through __ww_mutex_add_waiter() when we race with
353 	 * ww_mutex_set_context_fastpath(). In that case we'll get here again
354 	 * through __ww_mutex_check_waiters().
355 	 */
356 	if (!hold_ctx)
357 		return false;
358 
359 	/*
360 	 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
361 	 * it cannot go away because we'll have FLAG_WAITERS set and hold
362 	 * wait_lock.
363 	 */
364 	if (!owner)
365 		return false;
366 
367 	if (ww_ctx->acquired > 0 && __ww_ctx_less(hold_ctx, ww_ctx)) {
368 		hold_ctx->wounded = 1;
369 
370 		/*
371 		 * wake_up_process() paired with set_current_state()
372 		 * inserts sufficient barriers to make sure @owner either sees
373 		 * it's wounded in __ww_mutex_check_kill() or has a
374 		 * wakeup pending to re-read the wounded state.
375 		 */
376 		if (owner != current) {
377 			/*
378 			 * When waking up the task to wound, be sure to set the
379 			 * blocked_on to PROXY_WAKING. Otherwise we can see
380 			 * circular blocked_on relationships that can't resolve.
381 			 *
382 			 * NOTE: We pass NULL here instead of lock, because we
383 			 * are waking the mutex owner, who may be currently
384 			 * blocked on a different mutex.
385 			 */
386 			set_task_blocked_on_waking(owner, NULL);
387 			wake_q_add(wake_q, owner);
388 		}
389 		return true;
390 	}
391 
392 	return false;
393 }
394 
395 /*
396  * We just acquired @lock under @ww_ctx, if there are more important contexts
397  * waiting behind us on the wait-list, check if they need to die, or wound us.
398  *
399  * See __ww_mutex_add_waiter() for the list-order construction; basically the
400  * list is ordered by stamp, smallest (oldest) first.
401  *
402  * This relies on never mixing wait-die/wound-wait on the same wait-list;
403  * which is currently ensured by that being a ww_class property.
404  *
405  * The current task must not be on the wait list.
406  */
407 static void
__ww_mutex_check_waiters(struct MUTEX * lock,struct ww_acquire_ctx * ww_ctx,struct wake_q_head * wake_q)408 __ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx,
409 			 struct wake_q_head *wake_q)
410 	__must_hold(&lock->WAIT_LOCK)
411 {
412 	struct MUTEX_WAITER *cur;
413 
414 	lockdep_assert_wait_lock_held(lock);
415 
416 	for (cur = __ww_waiter_first(lock); cur;
417 	     cur = __ww_waiter_next(lock, cur)) {
418 
419 		if (!cur->ww_ctx)
420 			continue;
421 
422 		if (__ww_mutex_die(lock, cur, ww_ctx, wake_q) ||
423 		    __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx, wake_q))
424 			break;
425 	}
426 }
427 
428 /*
429  * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
430  * and wake up any waiters so they can recheck.
431  */
432 static __always_inline void
ww_mutex_set_context_fastpath(struct ww_mutex * lock,struct ww_acquire_ctx * ctx)433 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
434 {
435 	DEFINE_WAKE_Q(wake_q);
436 	unsigned long flags;
437 	bool has_waiters;
438 
439 	ww_mutex_lock_acquired(lock, ctx);
440 
441 	/*
442 	 * The lock->ctx update should be visible on all cores before
443 	 * the WAITERS check is done, otherwise contended waiters might be
444 	 * missed. The contended waiters will either see ww_ctx == NULL
445 	 * and keep spinning, or it will acquire wait_lock, add itself
446 	 * to waiter list and sleep.
447 	 */
448 	smp_mb(); /* See comments above and below. */
449 
450 	/*
451 	 * [W] ww->ctx = ctx	    [W] MUTEX_FLAG_WAITERS
452 	 *     MB		        MB
453 	 * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
454 	 *
455 	 * The memory barrier above pairs with the memory barrier in
456 	 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
457 	 * and/or !empty list.
458 	 */
459 	has_waiters = data_race(__ww_mutex_has_waiters(&lock->base));
460 	if (likely(!has_waiters))
461 		return;
462 
463 	/*
464 	 * Uh oh, we raced in fastpath, check if any of the waiters need to
465 	 * die or wound us.
466 	 */
467 	lock_wait_lock(&lock->base, &flags);
468 	__ww_mutex_check_waiters(&lock->base, ctx, &wake_q);
469 	preempt_disable();
470 	unlock_wait_lock(&lock->base, &flags);
471 	wake_up_q(&wake_q);
472 	preempt_enable();
473 }
474 
475 static __always_inline int
__ww_mutex_kill(struct MUTEX * lock,struct ww_acquire_ctx * ww_ctx)476 __ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
477 {
478 	if (ww_ctx->acquired > 0) {
479 #ifdef DEBUG_WW_MUTEXES
480 		struct ww_mutex *ww;
481 
482 		ww = container_of(lock, struct ww_mutex, base);
483 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
484 		ww_ctx->contending_lock = ww;
485 #endif
486 		return -EDEADLK;
487 	}
488 
489 	return 0;
490 }
491 
492 /*
493  * Check the wound condition for the current lock acquire.
494  *
495  * Wound-Wait: If we're wounded, kill ourself.
496  *
497  * Wait-Die: If we're trying to acquire a lock already held by an older
498  *           context, kill ourselves.
499  *
500  * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
501  * look at waiters before us in the wait-list.
502  */
503 static inline int
__ww_mutex_check_kill(struct MUTEX * lock,struct MUTEX_WAITER * waiter,struct ww_acquire_ctx * ctx)504 __ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
505 		      struct ww_acquire_ctx *ctx)
506 	__must_hold(&lock->WAIT_LOCK)
507 {
508 	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
509 	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
510 	struct MUTEX_WAITER *cur;
511 
512 	if (ctx->acquired == 0)
513 		return 0;
514 
515 	if (!ctx->is_wait_die) {
516 		if (ctx->wounded)
517 			return __ww_mutex_kill(lock, ctx);
518 
519 		return 0;
520 	}
521 
522 	if (hold_ctx && __ww_ctx_less(ctx, hold_ctx))
523 		return __ww_mutex_kill(lock, ctx);
524 
525 	/*
526 	 * If there is a waiter in front of us that has a context, then its
527 	 * stamp is earlier than ours and we must kill ourself.
528 	 */
529 	for (cur = __ww_waiter_prev(lock, waiter); cur;
530 	     cur = __ww_waiter_prev(lock, cur)) {
531 
532 		if (!cur->ww_ctx)
533 			continue;
534 
535 		return __ww_mutex_kill(lock, ctx);
536 	}
537 
538 	return 0;
539 }
540 
541 /*
542  * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
543  * first. Such that older contexts are preferred to acquire the lock over
544  * younger contexts.
545  *
546  * Waiters without context are interspersed in FIFO order.
547  *
548  * Furthermore, for Wait-Die kill ourself immediately when possible (there are
549  * older contexts already waiting) to avoid unnecessary waiting and for
550  * Wound-Wait ensure we wound the owning context when it is younger.
551  */
552 static inline int
__ww_mutex_add_waiter(struct MUTEX_WAITER * waiter,struct MUTEX * lock,struct ww_acquire_ctx * ww_ctx,struct wake_q_head * wake_q)553 __ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
554 		      struct MUTEX *lock,
555 		      struct ww_acquire_ctx *ww_ctx,
556 		      struct wake_q_head *wake_q)
557 	__must_hold(&lock->WAIT_LOCK)
558 {
559 	struct MUTEX_WAITER *cur, *pos = NULL;
560 	bool is_wait_die;
561 
562 	if (!ww_ctx) {
563 		__ww_waiter_add(lock, waiter, NULL);
564 		return 0;
565 	}
566 
567 	is_wait_die = ww_ctx->is_wait_die;
568 
569 	/*
570 	 * Add the waiter before the first waiter with a higher stamp.
571 	 * Waiters without a context are skipped to avoid starving
572 	 * them. Wait-Die waiters may die here. Wound-Wait waiters
573 	 * never die here, but they are sorted in stamp order and
574 	 * may wound the lock holder.
575 	 */
576 	for (cur = __ww_waiter_last(lock); cur;
577 	     cur = __ww_waiter_prev(lock, cur)) {
578 
579 		if (!cur->ww_ctx)
580 			continue;
581 
582 		if (__ww_ctx_less(ww_ctx, cur->ww_ctx)) {
583 			/*
584 			 * Wait-Die: if we find an older context waiting, there
585 			 * is no point in queueing behind it, as we'd have to
586 			 * die the moment it would acquire the lock.
587 			 */
588 			if (is_wait_die) {
589 				int ret = __ww_mutex_kill(lock, ww_ctx);
590 
591 				if (ret)
592 					return ret;
593 			}
594 
595 			break;
596 		}
597 
598 		pos = cur;
599 
600 		/* Wait-Die: ensure younger waiters die. */
601 		__ww_mutex_die(lock, cur, ww_ctx, wake_q);
602 	}
603 
604 	__ww_waiter_add(lock, waiter, pos);
605 
606 	/*
607 	 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
608 	 * wound that such that we might proceed.
609 	 */
610 	if (!is_wait_die) {
611 		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
612 
613 		/*
614 		 * See ww_mutex_set_context_fastpath(). Orders setting
615 		 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
616 		 * such that either we or the fastpath will wound @ww->ctx.
617 		 */
618 		smp_mb();
619 		__ww_mutex_wound(lock, ww_ctx, ww->ctx, wake_q);
620 	}
621 
622 	return 0;
623 }
624 
__ww_mutex_unlock(struct ww_mutex * lock)625 static inline void __ww_mutex_unlock(struct ww_mutex *lock)
626 {
627 	if (lock->ctx) {
628 #ifdef DEBUG_WW_MUTEXES
629 		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
630 #endif
631 		if (lock->ctx->acquired > 0)
632 			lock->ctx->acquired--;
633 		lock->ctx = NULL;
634 	}
635 }
636