xref: /freebsd/sys/sys/mutex.h (revision 1148518afe434d590c3a66aceb04ce509fe14708)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Berkeley Software Design Inc's name may not be used to endorse or
15  *    promote products derived from this software without specific prior
16  *    written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  *	from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
31  */
32 
33 #ifndef _SYS_MUTEX_H_
34 #define _SYS_MUTEX_H_
35 
36 #include <sys/queue.h>
37 #include <sys/_lock.h>
38 #include <sys/_mutex.h>
39 
40 #ifdef _KERNEL
41 #include <sys/pcpu.h>
42 #include <sys/lock_profile.h>
43 #include <sys/lockstat.h>
44 #include <machine/atomic.h>
45 #include <machine/cpufunc.h>
46 
47 /*
48  * Mutex types and options passed to mtx_init().  MTX_QUIET and MTX_DUPOK
49  * can also be passed in.
50  */
51 #define	MTX_DEF		0x00000000	/* DEFAULT (sleep) lock */
52 #define MTX_SPIN	0x00000001	/* Spin lock (disables interrupts) */
53 #define MTX_RECURSE	0x00000004	/* Option: lock allowed to recurse */
54 #define	MTX_NOWITNESS	0x00000008	/* Don't do any witness checking. */
55 #define MTX_NOPROFILE   0x00000020	/* Don't profile this lock */
56 #define	MTX_NEW		0x00000040	/* Don't check for double-init */
57 
58 /*
59  * Option flags passed to certain lock/unlock routines, through the use
60  * of corresponding mtx_{lock,unlock}_flags() interface macros.
61  */
62 #define	MTX_QUIET	LOP_QUIET	/* Don't log a mutex event */
63 #define	MTX_DUPOK	LOP_DUPOK	/* Don't log a duplicate acquire */
64 
65 /*
66  * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this,
67  * with the exception of MTX_UNOWNED, applies to spin locks.
68  */
69 #define	MTX_UNOWNED	0x00000000	/* Cookie for free mutex */
70 #define	MTX_RECURSED	0x00000001	/* lock recursed (for MTX_DEF only) */
71 #define	MTX_CONTESTED	0x00000002	/* lock contested (for MTX_DEF only) */
72 #define	MTX_DESTROYED	0x00000004	/* lock destroyed */
73 #define	MTX_FLAGMASK	(MTX_RECURSED | MTX_CONTESTED | MTX_DESTROYED)
74 
75 /*
76  * Prototypes
77  *
78  * NOTE: Functions prepended with `_' (underscore) are exported to other parts
79  *	 of the kernel via macros, thus allowing us to use the cpp LOCK_FILE
80  *	 and LOCK_LINE or for hiding the lock cookie crunching to the
81  *	 consumers. These functions should not be called directly by any
82  *	 code using the API. Their macros cover their functionality.
83  *	 Functions with a `_' suffix are the entrypoint for the common
84  *	 KPI covering both compat shims and fast path case.  These can be
85  *	 used by consumers willing to pass options, file and line
86  *	 informations, in an option-independent way.
87  *
88  * [See below for descriptions]
89  *
90  */
91 void	_mtx_init(volatile uintptr_t *c, const char *name, const char *type,
92 	    int opts);
93 void	_mtx_destroy(volatile uintptr_t *c);
94 void	mtx_sysinit(void *arg);
95 int	_mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF);
96 int	_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file,
97 	    int line);
98 void	mutex_init(void);
99 #if LOCK_DEBUG > 0
100 void	__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts,
101 	    const char *file, int line);
102 void	__mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts,
103 	    const char *file, int line);
104 #else
105 void	__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v);
106 void	__mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v);
107 #endif
108 void	mtx_wait_unlocked(struct mtx *m);
109 
110 #ifdef SMP
111 #if LOCK_DEBUG > 0
112 void	_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts,
113 	    const char *file, int line);
114 #else
115 void	_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v);
116 #endif
117 #endif
118 void	__mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file,
119 	    int line);
120 void	__mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file,
121 	    int line);
122 void	__mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
123 	     int line);
124 int	__mtx_trylock_spin_flags(volatile uintptr_t *c, int opts,
125 	     const char *file, int line);
126 void	__mtx_unlock_spin_flags(volatile uintptr_t *c, int opts,
127 	    const char *file, int line);
128 void	mtx_spin_wait_unlocked(struct mtx *m);
129 
130 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
131 void	__mtx_assert(const volatile uintptr_t *c, int what, const char *file,
132 	    int line);
133 #endif
134 void	thread_lock_flags_(struct thread *, int, const char *, int);
135 #if LOCK_DEBUG > 0
136 void	_thread_lock(struct thread *td, int opts, const char *file, int line);
137 #else
138 void	_thread_lock(struct thread *);
139 #endif
140 
141 #if defined(LOCK_PROFILING) || (defined(KLD_MODULE) && !defined(KLD_TIED))
142 #define	thread_lock(tdp)						\
143 	thread_lock_flags_((tdp), 0, __FILE__, __LINE__)
144 #elif LOCK_DEBUG > 0
145 #define	thread_lock(tdp)						\
146 	_thread_lock((tdp), 0, __FILE__, __LINE__)
147 #else
148 #define	thread_lock(tdp)						\
149 	_thread_lock((tdp))
150 #endif
151 
152 #if LOCK_DEBUG > 0
153 #define	thread_lock_flags(tdp, opt)					\
154 	thread_lock_flags_((tdp), (opt), __FILE__, __LINE__)
155 #else
156 #define	thread_lock_flags(tdp, opt)					\
157 	_thread_lock(tdp)
158 #endif
159 
160 #define	thread_unlock(tdp)						\
161        mtx_unlock_spin((tdp)->td_lock)
162 
163 /*
164  * Top-level macros to provide lock cookie once the actual mtx is passed.
165  * They will also prevent passing a malformed object to the mtx KPI by
166  * failing compilation as the mtx_lock reserved member will not be found.
167  */
168 #define	mtx_init(m, n, t, o)						\
169 	_mtx_init(&(m)->mtx_lock, n, t, o)
170 #define	mtx_destroy(m)							\
171 	_mtx_destroy(&(m)->mtx_lock)
172 #define	mtx_trylock_flags_(m, o, f, l)					\
173 	_mtx_trylock_flags_(&(m)->mtx_lock, o, f, l)
174 #if LOCK_DEBUG > 0
175 #define	_mtx_lock_sleep(m, v, o, f, l)					\
176 	__mtx_lock_sleep(&(m)->mtx_lock, v, o, f, l)
177 #define	_mtx_unlock_sleep(m, v, o, f, l)				\
178 	__mtx_unlock_sleep(&(m)->mtx_lock, v, o, f, l)
179 #else
180 #define	_mtx_lock_sleep(m, v, o, f, l)					\
181 	__mtx_lock_sleep(&(m)->mtx_lock, v)
182 #define	_mtx_unlock_sleep(m, v, o, f, l)				\
183 	__mtx_unlock_sleep(&(m)->mtx_lock, v)
184 #endif
185 #ifdef SMP
186 #if LOCK_DEBUG > 0
187 #define	_mtx_lock_spin(m, v, o, f, l)					\
188 	_mtx_lock_spin_cookie(&(m)->mtx_lock, v, o, f, l)
189 #else
190 #define	_mtx_lock_spin(m, v, o, f, l)					\
191 	_mtx_lock_spin_cookie(&(m)->mtx_lock, v)
192 #endif
193 #endif
194 #define	_mtx_lock_flags(m, o, f, l)					\
195 	__mtx_lock_flags(&(m)->mtx_lock, o, f, l)
196 #define	_mtx_unlock_flags(m, o, f, l)					\
197 	__mtx_unlock_flags(&(m)->mtx_lock, o, f, l)
198 #define	_mtx_lock_spin_flags(m, o, f, l)				\
199 	__mtx_lock_spin_flags(&(m)->mtx_lock, o, f, l)
200 #define	_mtx_trylock_spin_flags(m, o, f, l)				\
201 	__mtx_trylock_spin_flags(&(m)->mtx_lock, o, f, l)
202 #define	_mtx_unlock_spin_flags(m, o, f, l)				\
203 	__mtx_unlock_spin_flags(&(m)->mtx_lock, o, f, l)
204 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
205 #define	_mtx_assert(m, w, f, l)						\
206 	__mtx_assert(&(m)->mtx_lock, w, f, l)
207 #endif
208 
209 #define	mtx_recurse	lock_object.lo_data
210 
211 /* Very simple operations on mtx_lock. */
212 
213 /* Try to obtain mtx_lock once. */
214 #define _mtx_obtain_lock(mp, tid)					\
215 	atomic_cmpset_acq_ptr(&(mp)->mtx_lock, MTX_UNOWNED, (tid))
216 
217 #define _mtx_obtain_lock_fetch(mp, vp, tid)				\
218 	atomic_fcmpset_acq_ptr(&(mp)->mtx_lock, vp, (tid))
219 
220 /* Try to release mtx_lock if it is unrecursed and uncontested. */
221 #define _mtx_release_lock(mp, tid)					\
222 	atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED)
223 
224 /* Release mtx_lock quickly, assuming we own it. */
225 #define _mtx_release_lock_quick(mp)					\
226 	atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED)
227 
228 #define	_mtx_release_lock_fetch(mp, vp)					\
229 	atomic_fcmpset_rel_ptr(&(mp)->mtx_lock, (vp), MTX_UNOWNED)
230 
231 /*
232  * Full lock operations that are suitable to be inlined in non-debug
233  * kernels.  If the lock cannot be acquired or released trivially then
234  * the work is deferred to another function.
235  */
236 
237 /* Lock a normal mutex. */
238 #define __mtx_lock(mp, tid, opts, file, line) __extension__ ({		\
239 	uintptr_t _tid = (uintptr_t)(tid);				\
240 	uintptr_t _v = MTX_UNOWNED;					\
241 									\
242 	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__acquire) ||\
243 	    !_mtx_obtain_lock_fetch((mp), &_v, _tid)))			\
244 		_mtx_lock_sleep((mp), _v, (opts), (file), (line));	\
245 	(void)0; /* ensure void type for expression */			\
246 })
247 
248 /*
249  * Lock a spin mutex.  For spinlocks, we handle recursion inline (it
250  * turns out that function calls can be significantly expensive on
251  * some architectures).  Since spin locks are not _too_ common,
252  * inlining this code is not too big a deal.
253  */
254 #ifdef SMP
255 #define __mtx_lock_spin(mp, tid, opts, file, line) __extension__ ({	\
256 	uintptr_t _tid = (uintptr_t)(tid);				\
257 	uintptr_t _v = MTX_UNOWNED;					\
258 									\
259 	spinlock_enter();						\
260 	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire) ||	\
261 	    !_mtx_obtain_lock_fetch((mp), &_v, _tid))) 			\
262 		_mtx_lock_spin((mp), _v, (opts), (file), (line)); 	\
263 	(void)0; /* ensure void type for expression */			\
264 })
265 #define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__  ({	\
266 	uintptr_t _tid = (uintptr_t)(tid);				\
267 	int _ret;							\
268 									\
269 	spinlock_enter();						\
270 	if (((mp)->mtx_lock != MTX_UNOWNED || !_mtx_obtain_lock((mp), _tid))) {\
271 		spinlock_exit();					\
272 		_ret = 0;						\
273 	} else {							\
274 		LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire,	\
275 		    mp, 0, 0, file, line);				\
276 		_ret = 1;						\
277 	}								\
278 	_ret;								\
279 })
280 #else /* SMP */
281 #define __mtx_lock_spin(mp, tid, opts, file, line) __extension__ ({	\
282 	uintptr_t _tid = (uintptr_t)(tid);				\
283 									\
284 	spinlock_enter();						\
285 	if ((mp)->mtx_lock == _tid)					\
286 		(mp)->mtx_recurse++;					\
287 	else {								\
288 		KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \
289 		(mp)->mtx_lock = _tid;					\
290 	}								\
291 	(void)0; /* ensure void type for expression */			\
292 })
293 #define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__  ({	\
294 	uintptr_t _tid = (uintptr_t)(tid);				\
295 	int _ret;							\
296 									\
297 	spinlock_enter();						\
298 	if ((mp)->mtx_lock != MTX_UNOWNED) {				\
299 		spinlock_exit();					\
300 		_ret = 0;						\
301 	} else {							\
302 		(mp)->mtx_lock = _tid;					\
303 		_ret = 1;						\
304 	}								\
305 	_ret;								\
306 })
307 #endif /* SMP */
308 
309 /* Unlock a normal mutex. */
310 #define __mtx_unlock(mp, tid, opts, file, line) __extension__ ({	\
311 	uintptr_t _v = (uintptr_t)(tid);				\
312 									\
313 	if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__release) ||\
314 	    !_mtx_release_lock_fetch((mp), &_v)))			\
315 		_mtx_unlock_sleep((mp), _v, (opts), (file), (line));	\
316 	(void)0; /* ensure void type for expression */			\
317 })
318 
319 /*
320  * Unlock a spin mutex.  For spinlocks, we can handle everything
321  * inline, as it's pretty simple and a function call would be too
322  * expensive (at least on some architectures).  Since spin locks are
323  * not _too_ common, inlining this code is not too big a deal.
324  *
325  * Since we always perform a spinlock_enter() when attempting to acquire a
326  * spin lock, we need to always perform a matching spinlock_exit() when
327  * releasing a spin lock.  This includes the recursion cases.
328  */
329 #ifdef SMP
330 #define __mtx_unlock_spin(mp) __extension__ ({				\
331 	if (mtx_recursed((mp)))						\
332 		(mp)->mtx_recurse--;					\
333 	else {								\
334 		LOCKSTAT_PROFILE_RELEASE_SPIN_LOCK(spin__release, mp);	\
335 		_mtx_release_lock_quick((mp));				\
336 	}								\
337 	spinlock_exit();						\
338 })
339 #else /* SMP */
340 #define __mtx_unlock_spin(mp) __extension__ ({				\
341 	if (mtx_recursed((mp)))						\
342 		(mp)->mtx_recurse--;					\
343 	else {								\
344 		LOCKSTAT_PROFILE_RELEASE_SPIN_LOCK(spin__release, mp);	\
345 		(mp)->mtx_lock = MTX_UNOWNED;				\
346 	}								\
347 	spinlock_exit();						\
348 })
349 #endif /* SMP */
350 
351 /*
352  * Exported lock manipulation interface.
353  *
354  * mtx_lock(m) locks MTX_DEF mutex `m'
355  *
356  * mtx_lock_spin(m) locks MTX_SPIN mutex `m'
357  *
358  * mtx_unlock(m) unlocks MTX_DEF mutex `m'
359  *
360  * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m'
361  *
362  * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m'
363  *     and passes option flags `opts' to the "hard" function, if required.
364  *     With these routines, it is possible to pass flags such as MTX_QUIET
365  *     to the appropriate lock manipulation routines.
366  *
367  * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if
368  *     it cannot. Rather, it returns 0 on failure and non-zero on success.
369  *     It does NOT handle recursion as we assume that if a caller is properly
370  *     using this part of the interface, he will know that the lock in question
371  *     is _not_ recursed.
372  *
373  * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts
374  *     relevant option flags `opts.'
375  *
376  * mtx_trylock_spin(m) attempts to acquire MTX_SPIN mutex `m' but doesn't
377  *     spin if it cannot.  Rather, it returns 0 on failure and non-zero on
378  *     success.  It always returns failure for recursed lock attempts.
379  *
380  * mtx_initialized(m) returns non-zero if the lock `m' has been initialized.
381  *
382  * mtx_owned(m) returns non-zero if the current thread owns the lock `m'
383  *
384  * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed.
385  */
386 #define mtx_lock(m)		mtx_lock_flags((m), 0)
387 #define mtx_lock_spin(m)	mtx_lock_spin_flags((m), 0)
388 #define mtx_trylock(m)		mtx_trylock_flags((m), 0)
389 #define mtx_trylock_spin(m)	mtx_trylock_spin_flags((m), 0)
390 #define mtx_unlock(m)		mtx_unlock_flags((m), 0)
391 #define mtx_unlock_spin(m)	mtx_unlock_spin_flags((m), 0)
392 
393 struct mtx_pool;
394 
395 struct mtx_pool *mtx_pool_create(const char *mtx_name, int pool_size, int opts);
396 void mtx_pool_destroy(struct mtx_pool **poolp);
397 struct mtx *mtx_pool_find(struct mtx_pool *pool, void *ptr);
398 struct mtx *mtx_pool_alloc(struct mtx_pool *pool);
399 #define mtx_pool_lock(pool, ptr)					\
400 	mtx_lock(mtx_pool_find((pool), (ptr)))
401 #define mtx_pool_lock_spin(pool, ptr)					\
402 	mtx_lock_spin(mtx_pool_find((pool), (ptr)))
403 #define mtx_pool_unlock(pool, ptr)					\
404 	mtx_unlock(mtx_pool_find((pool), (ptr)))
405 #define mtx_pool_unlock_spin(pool, ptr)					\
406 	mtx_unlock_spin(mtx_pool_find((pool), (ptr)))
407 
408 /*
409  * mtxpool_sleep is a general purpose pool of sleep mutexes.
410  */
411 extern struct mtx_pool *mtxpool_sleep;
412 
413 #ifndef LOCK_DEBUG
414 #error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/mutex.h>
415 #endif
416 #if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE)
417 #define	mtx_lock_flags_(m, opts, file, line)				\
418 	_mtx_lock_flags((m), (opts), (file), (line))
419 #define	mtx_unlock_flags_(m, opts, file, line)				\
420 	_mtx_unlock_flags((m), (opts), (file), (line))
421 #define	mtx_lock_spin_flags_(m, opts, file, line)			\
422 	_mtx_lock_spin_flags((m), (opts), (file), (line))
423 #define	mtx_trylock_spin_flags_(m, opts, file, line)			\
424 	_mtx_trylock_spin_flags((m), (opts), (file), (line))
425 #define	mtx_unlock_spin_flags_(m, opts, file, line)			\
426 	_mtx_unlock_spin_flags((m), (opts), (file), (line))
427 #else	/* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */
428 #define	mtx_lock_flags_(m, opts, file, line)				\
429 	__mtx_lock((m), curthread, (opts), (file), (line))
430 #define	mtx_unlock_flags_(m, opts, file, line)				\
431 	__mtx_unlock((m), curthread, (opts), (file), (line))
432 #define	mtx_lock_spin_flags_(m, opts, file, line)			\
433 	__mtx_lock_spin((m), curthread, (opts), (file), (line))
434 #define	mtx_trylock_spin_flags_(m, opts, file, line)			\
435 	__mtx_trylock_spin((m), curthread, (opts), (file), (line))
436 #define	mtx_unlock_spin_flags_(m, opts, file, line)			\
437 	__mtx_unlock_spin((m))
438 #endif	/* LOCK_DEBUG > 0 || MUTEX_NOINLINE */
439 
440 #ifdef INVARIANTS
441 #define	mtx_assert_(m, what, file, line)				\
442 	_mtx_assert((m), (what), (file), (line))
443 
444 #define GIANT_REQUIRED	mtx_assert_(&Giant, MA_OWNED, __FILE__, __LINE__)
445 
446 #else	/* INVARIANTS */
447 #define mtx_assert_(m, what, file, line)	(void)0
448 #define GIANT_REQUIRED
449 #endif	/* INVARIANTS */
450 
451 #define	mtx_lock_flags(m, opts)						\
452 	mtx_lock_flags_((m), (opts), LOCK_FILE, LOCK_LINE)
453 #define	mtx_unlock_flags(m, opts)					\
454 	mtx_unlock_flags_((m), (opts), LOCK_FILE, LOCK_LINE)
455 #define	mtx_lock_spin_flags(m, opts)					\
456 	mtx_lock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE)
457 #define	mtx_unlock_spin_flags(m, opts)					\
458 	mtx_unlock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE)
459 #define mtx_trylock_flags(m, opts)					\
460 	mtx_trylock_flags_((m), (opts), LOCK_FILE, LOCK_LINE)
461 #define mtx_trylock_spin_flags(m, opts)					\
462 	mtx_trylock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE)
463 #define	mtx_assert(m, what)						\
464 	mtx_assert_((m), (what), __FILE__, __LINE__)
465 
466 #define	mtx_sleep(chan, mtx, pri, wmesg, timo)				\
467 	_sleep((chan), &(mtx)->lock_object, (pri), (wmesg),		\
468 	    tick_sbt * (timo), 0, C_HARDCLOCK)
469 
470 #define	MTX_READ_VALUE(m)	((m)->mtx_lock)
471 
472 #define	mtx_initialized(m)	lock_initialized(&(m)->lock_object)
473 
474 #define lv_mtx_owner(v)	((struct thread *)((v) & ~MTX_FLAGMASK))
475 
476 #define mtx_owner(m)	lv_mtx_owner(MTX_READ_VALUE(m))
477 
478 #define mtx_owned(m)	(mtx_owner(m) == curthread)
479 
480 #define mtx_recursed(m)	((m)->mtx_recurse != 0)
481 
482 #define mtx_name(m)	((m)->lock_object.lo_name)
483 
484 /*
485  * Global locks.
486  */
487 extern struct mtx Giant;
488 extern struct mtx blocked_lock;
489 
490 /*
491  * Giant lock manipulation and clean exit macros.
492  * Used to replace return with an exit Giant and return.
493  *
494  * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT()
495  * The #ifndef is to allow lint-like tools to redefine DROP_GIANT.
496  */
497 #ifndef DROP_GIANT
498 #define DROP_GIANT()							\
499 do {									\
500 	int _giantcnt = 0;						\
501 	WITNESS_SAVE_DECL(Giant);					\
502 									\
503 	if (__predict_false(mtx_owned(&Giant))) {			\
504 		WITNESS_SAVE(&Giant.lock_object, Giant);		\
505 		for (_giantcnt = 0; mtx_owned(&Giant) &&		\
506 		    !SCHEDULER_STOPPED(); _giantcnt++)			\
507 			mtx_unlock(&Giant);				\
508 	}
509 
510 #define PICKUP_GIANT()							\
511 	mtx_assert(&Giant, MA_NOTOWNED);				\
512 	if (__predict_false(_giantcnt > 0)) {				\
513 		while (_giantcnt--)					\
514 			mtx_lock(&Giant);				\
515 		WITNESS_RESTORE(&Giant.lock_object, Giant);		\
516 	}								\
517 } while (0)
518 #endif
519 
520 struct mtx_args {
521 	void		*ma_mtx;
522 	const char 	*ma_desc;
523 	int		 ma_opts;
524 };
525 
526 #define	MTX_SYSINIT(name, mtx, desc, opts)				\
527 	static struct mtx_args name##_args = {				\
528 		(mtx),							\
529 		(desc),							\
530 		(opts)							\
531 	};								\
532 	SYSINIT(name##_mtx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE,	\
533 	    mtx_sysinit, &name##_args);					\
534 	SYSUNINIT(name##_mtx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE,	\
535 	    _mtx_destroy, __DEVOLATILE(void *, &(mtx)->mtx_lock))
536 
537 /*
538  * The INVARIANTS-enabled mtx_assert() functionality.
539  *
540  * The constants need to be defined for INVARIANT_SUPPORT infrastructure
541  * support as _mtx_assert() itself uses them and the latter implies that
542  * _mtx_assert() must build.
543  */
544 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
545 #define MA_OWNED	LA_XLOCKED
546 #define MA_NOTOWNED	LA_UNLOCKED
547 #define MA_RECURSED	LA_RECURSED
548 #define MA_NOTRECURSED	LA_NOTRECURSED
549 #endif
550 
551 /*
552  * Common lock type names.
553  */
554 #define	MTX_NETWORK_LOCK	"network driver"
555 
556 #endif	/* _KERNEL */
557 #endif	/* _SYS_MUTEX_H_ */
558