xref: /freebsd/sys/kern/kern_lock.c (revision 995dc984471c92c03daad19a1d35af46c086ef3e)
1 /*-
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Copyright (C) 1997
6  *	John S. Dyson.  All rights reserved.
7  *
8  * This code contains ideas from software contributed to Berkeley by
9  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10  * System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include "opt_ddb.h"
47 #include "opt_global.h"
48 
49 #include <sys/param.h>
50 #include <sys/kdb.h>
51 #include <sys/kernel.h>
52 #include <sys/ktr.h>
53 #include <sys/lock.h>
54 #include <sys/lockmgr.h>
55 #include <sys/mutex.h>
56 #include <sys/proc.h>
57 #include <sys/systm.h>
58 #include <sys/lock_profile.h>
59 #ifdef DEBUG_LOCKS
60 #include <sys/stack.h>
61 #endif
62 
63 #define	LOCKMGR_TRYOP(x)	((x) & LK_NOWAIT)
64 #define	LOCKMGR_TRYW(x)		(LOCKMGR_TRYOP((x)) ? LOP_TRYLOCK : 0)
65 #define	LOCKMGR_UNHELD(x)	(((x) & (LK_HAVE_EXCL | LK_SHARE_NONZERO)) == 0)
66 #define	LOCKMGR_NOTOWNER(td)	((td) != curthread && (td) != LK_KERNPROC)
67 
68 static void	assert_lockmgr(struct lock_object *lock, int what);
69 #ifdef DDB
70 #include <ddb/ddb.h>
71 static void	db_show_lockmgr(struct lock_object *lock);
72 #endif
73 static void	lock_lockmgr(struct lock_object *lock, int how);
74 static int	unlock_lockmgr(struct lock_object *lock);
75 
76 struct lock_class lock_class_lockmgr = {
77 	.lc_name = "lockmgr",
78 	.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
79 	.lc_assert = assert_lockmgr,
80 #ifdef DDB
81 	.lc_ddb_show = db_show_lockmgr,
82 #endif
83 	.lc_lock = lock_lockmgr,
84 	.lc_unlock = unlock_lockmgr,
85 };
86 
87 #ifndef INVARIANTS
88 #define	_lockmgr_assert(lkp, what, file, line)
89 #endif
90 
91 /*
92  * Locking primitives implementation.
93  * Locks provide shared/exclusive sychronization.
94  */
95 
96 void
97 assert_lockmgr(struct lock_object *lock, int what)
98 {
99 
100 	panic("lockmgr locks do not support assertions");
101 }
102 
103 void
104 lock_lockmgr(struct lock_object *lock, int how)
105 {
106 
107 	panic("lockmgr locks do not support sleep interlocking");
108 }
109 
110 int
111 unlock_lockmgr(struct lock_object *lock)
112 {
113 
114 	panic("lockmgr locks do not support sleep interlocking");
115 }
116 
117 #define	COUNT(td, x)	((td)->td_locks += (x))
118 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
119 	LK_SHARE_NONZERO | LK_WAIT_NONZERO)
120 
121 static int acquire(struct lock **lkpp, int extflags, int wanted,
122     const char *wmesg, int prio, int timo, int *contested, uint64_t *waittime);
123 static int acquiredrain(struct lock *lkp, int extflags, const char *wmesg,
124     int prio, int timo);
125 
126 static __inline void
127 sharelock(struct thread *td, struct lock *lkp, int incr) {
128 	lkp->lk_flags |= LK_SHARE_NONZERO;
129 	lkp->lk_sharecount += incr;
130 	COUNT(td, incr);
131 }
132 
133 static __inline void
134 shareunlock(struct thread *td, struct lock *lkp, int decr) {
135 
136 	KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
137 
138 	COUNT(td, -decr);
139 	if (lkp->lk_sharecount == decr) {
140 		lkp->lk_flags &= ~LK_SHARE_NONZERO;
141 		if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
142 			wakeup(lkp);
143 		}
144 		lkp->lk_sharecount = 0;
145 	} else {
146 		lkp->lk_sharecount -= decr;
147 	}
148 }
149 
150 static int
151 acquire(struct lock **lkpp, int extflags, int wanted, const char *wmesg,
152     int prio, int timo, int *contested, uint64_t *waittime)
153 {
154 	struct lock *lkp = *lkpp;
155 	const char *iwmesg;
156 	int error, iprio, itimo;
157 
158 	iwmesg = (wmesg != LK_WMESG_DEFAULT) ? wmesg : lkp->lk_wmesg;
159 	iprio = (prio != LK_PRIO_DEFAULT) ? prio : lkp->lk_prio;
160 	itimo = (timo != LK_TIMO_DEFAULT) ? timo : lkp->lk_timo;
161 
162 	CTR3(KTR_LOCK,
163 	    "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x",
164 	    lkp, extflags, wanted);
165 
166 	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted))
167 		return EBUSY;
168 	error = 0;
169 	if ((lkp->lk_flags & wanted) != 0)
170 		lock_profile_obtain_lock_failed(&lkp->lk_object, contested, waittime);
171 
172 	while ((lkp->lk_flags & wanted) != 0) {
173 		CTR2(KTR_LOCK,
174 		    "acquire(): lkp == %p, lk_flags == 0x%x sleeping",
175 		    lkp, lkp->lk_flags);
176 		lkp->lk_flags |= LK_WAIT_NONZERO;
177 		lkp->lk_waitcount++;
178 		error = msleep(lkp, lkp->lk_interlock, iprio, iwmesg,
179 		    ((extflags & LK_TIMELOCK) ? itimo : 0));
180 		lkp->lk_waitcount--;
181 		if (lkp->lk_waitcount == 0)
182 			lkp->lk_flags &= ~LK_WAIT_NONZERO;
183 		if (error)
184 			break;
185 		if (extflags & LK_SLEEPFAIL) {
186 			error = ENOLCK;
187 			break;
188 		}
189 		if (lkp->lk_newlock != NULL) {
190 			mtx_lock(lkp->lk_newlock->lk_interlock);
191 			mtx_unlock(lkp->lk_interlock);
192 			if (lkp->lk_waitcount == 0)
193 				wakeup((void *)(&lkp->lk_newlock));
194 			*lkpp = lkp = lkp->lk_newlock;
195 		}
196 	}
197 	mtx_assert(lkp->lk_interlock, MA_OWNED);
198 	return (error);
199 }
200 
201 /*
202  * Set, change, or release a lock.
203  *
204  * Shared requests increment the shared count. Exclusive requests set the
205  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
206  * accepted shared locks and shared-to-exclusive upgrades to go away.
207  */
208 int
209 _lockmgr_args(struct lock *lkp, u_int flags, struct mtx *interlkp,
210     const char *wmesg, int prio, int timo, char *file, int line)
211 
212 {
213 	struct thread *td;
214 	int error;
215 	int extflags, lockflags;
216 	int contested = 0;
217 	uint64_t waitstart = 0;
218 
219 	error = 0;
220 	td = curthread;
221 
222 #ifdef INVARIANTS
223 	if (lkp->lk_flags & LK_DESTROYED) {
224 		if (flags & LK_INTERLOCK)
225 			mtx_unlock(interlkp);
226 		if (panicstr != NULL)
227 			return (0);
228 		panic("%s: %p lockmgr is destroyed", __func__, lkp);
229 	}
230 #endif
231 	mtx_lock(lkp->lk_interlock);
232 	CTR6(KTR_LOCK,
233 	    "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, "
234 	    "td == %p", lkp, (wmesg != LK_WMESG_DEFAULT) ? wmesg :
235 	    lkp->lk_wmesg, lkp->lk_lockholder, lkp->lk_exclusivecount, flags,
236 	    td);
237 #ifdef DEBUG_LOCKS
238 	{
239 		struct stack stack; /* XXX */
240 		stack_save(&stack);
241 		CTRSTACK(KTR_LOCK, &stack, 0, 1);
242 	}
243 #endif
244 
245 	if (flags & LK_INTERLOCK) {
246 		mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
247 		mtx_unlock(interlkp);
248 	}
249 
250 	if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
251 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
252 		    &lkp->lk_interlock->lock_object,
253 		    "Acquiring lockmgr lock \"%s\"",
254 		    (wmesg != LK_WMESG_DEFAULT) ? wmesg : lkp->lk_wmesg);
255 
256 	if (panicstr != NULL) {
257 		mtx_unlock(lkp->lk_interlock);
258 		return (0);
259 	}
260 	if ((lkp->lk_flags & LK_NOSHARE) &&
261 	    (flags & LK_TYPE_MASK) == LK_SHARED) {
262 		flags &= ~LK_TYPE_MASK;
263 		flags |= LK_EXCLUSIVE;
264 	}
265 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
266 
267 	switch (flags & LK_TYPE_MASK) {
268 
269 	case LK_SHARED:
270 		if (!LOCKMGR_TRYOP(extflags))
271 			WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER, file,
272 			    line);
273 		/*
274 		 * If we are not the exclusive lock holder, we have to block
275 		 * while there is an exclusive lock holder or while an
276 		 * exclusive lock request or upgrade request is in progress.
277 		 *
278 		 * However, if TDP_DEADLKTREAT is set, we override exclusive
279 		 * lock requests or upgrade requests ( but not the exclusive
280 		 * lock itself ).
281 		 */
282 		if (lkp->lk_lockholder != td) {
283 			lockflags = LK_HAVE_EXCL;
284 			if (!(td->td_pflags & TDP_DEADLKTREAT))
285 				lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
286 			error = acquire(&lkp, extflags, lockflags, wmesg,
287 			    prio, timo, &contested, &waitstart);
288 			if (error)
289 				break;
290 			sharelock(td, lkp, 1);
291 			if (lkp->lk_sharecount == 1)
292 				lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
293 			WITNESS_LOCK(&lkp->lk_object, LOCKMGR_TRYW(extflags),
294 			    file, line);
295 
296 #if defined(DEBUG_LOCKS)
297 			stack_save(&lkp->lk_stack);
298 #endif
299 			break;
300 		}
301 		/*
302 		 * We hold an exclusive lock, so downgrade it to shared.
303 		 * An alternative would be to fail with EDEADLK.
304 		 */
305 		/* FALLTHROUGH downgrade */
306 
307 	case LK_DOWNGRADE:
308 		_lockmgr_assert(lkp, KA_XLOCKED, file, line);
309 		sharelock(td, lkp, lkp->lk_exclusivecount);
310 		WITNESS_DOWNGRADE(&lkp->lk_object, 0, file, line);
311 		COUNT(td, -lkp->lk_exclusivecount);
312 		lkp->lk_exclusivecount = 0;
313 		lkp->lk_flags &= ~LK_HAVE_EXCL;
314 		lkp->lk_lockholder = LK_NOPROC;
315 		if (lkp->lk_waitcount)
316 			wakeup((void *)lkp);
317 		break;
318 
319 	case LK_UPGRADE:
320 		/*
321 		 * Upgrade a shared lock to an exclusive one. If another
322 		 * shared lock has already requested an upgrade to an
323 		 * exclusive lock, our shared lock is released and an
324 		 * exclusive lock is requested (which will be granted
325 		 * after the upgrade). If we return an error, the file
326 		 * will always be unlocked.
327 		 */
328 		_lockmgr_assert(lkp, KA_SLOCKED, file, line);
329 		shareunlock(td, lkp, 1);
330 		if (lkp->lk_sharecount == 0)
331 			lock_profile_release_lock(&lkp->lk_object);
332 		/*
333 		 * If we are just polling, check to see if we will block.
334 		 */
335 		if ((extflags & LK_NOWAIT) &&
336 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
337 		     lkp->lk_sharecount > 1)) {
338 			error = EBUSY;
339 			WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
340 			break;
341 		}
342 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
343 			/*
344 			 * We are first shared lock to request an upgrade, so
345 			 * request upgrade and wait for the shared count to
346 			 * drop to zero, then take exclusive lock.
347 			 */
348 			lkp->lk_flags |= LK_WANT_UPGRADE;
349 			error = acquire(&lkp, extflags, LK_SHARE_NONZERO, wmesg,
350 			    prio, timo, &contested, &waitstart);
351 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
352 
353 			if (error) {
354 			         if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO))
355 			                   wakeup((void *)lkp);
356 				WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
357 			         break;
358 			}
359 			if (lkp->lk_exclusivecount != 0)
360 				panic("lockmgr: non-zero exclusive count");
361 			lkp->lk_flags |= LK_HAVE_EXCL;
362 			lkp->lk_lockholder = td;
363 			lkp->lk_exclusivecount = 1;
364 			WITNESS_UPGRADE(&lkp->lk_object, LOP_EXCLUSIVE |
365 			    LOP_TRYLOCK, file, line);
366 			COUNT(td, 1);
367 			lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
368 #if defined(DEBUG_LOCKS)
369 			stack_save(&lkp->lk_stack);
370 #endif
371 			break;
372 		}
373 		/*
374 		 * Someone else has requested upgrade. Release our shared
375 		 * lock, awaken upgrade requestor if we are the last shared
376 		 * lock, then request an exclusive lock.
377 		 */
378 		WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
379 		if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
380 			LK_WAIT_NONZERO)
381 			wakeup((void *)lkp);
382 		/* FALLTHROUGH exclusive request */
383 
384 	case LK_EXCLUSIVE:
385 		if (!LOCKMGR_TRYOP(extflags))
386 			WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER |
387 			    LOP_EXCLUSIVE, file, line);
388 		if (lkp->lk_lockholder == td) {
389 			/*
390 			 *	Recursive lock.
391 			 */
392 			if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
393 				panic("lockmgr: locking against myself");
394 			if ((extflags & LK_CANRECURSE) != 0) {
395 				lkp->lk_exclusivecount++;
396 				WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE |
397 				    LOCKMGR_TRYW(extflags), file, line);
398 				COUNT(td, 1);
399 				break;
400 			}
401 		}
402 		/*
403 		 * If we are just polling, check to see if we will sleep.
404 		 */
405 		if ((extflags & LK_NOWAIT) &&
406 		    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
407 			error = EBUSY;
408 			break;
409 		}
410 		/*
411 		 * Try to acquire the want_exclusive flag.
412 		 */
413 		error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL),
414 		    wmesg, prio, timo, &contested, &waitstart);
415 		if (error)
416 			break;
417 		lkp->lk_flags |= LK_WANT_EXCL;
418 		/*
419 		 * Wait for shared locks and upgrades to finish.
420 		 */
421 		error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE |
422 		    LK_SHARE_NONZERO, wmesg, prio, timo,
423 		    &contested, &waitstart);
424 		lkp->lk_flags &= ~LK_WANT_EXCL;
425 		if (error) {
426 			if (lkp->lk_flags & LK_WAIT_NONZERO)
427 			         wakeup((void *)lkp);
428 			break;
429 		}
430 		lkp->lk_flags |= LK_HAVE_EXCL;
431 		lkp->lk_lockholder = td;
432 		if (lkp->lk_exclusivecount != 0)
433 			panic("lockmgr: non-zero exclusive count");
434 		lkp->lk_exclusivecount = 1;
435 		WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE |
436 		    LOCKMGR_TRYW(extflags), file, line);
437 		COUNT(td, 1);
438 		lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line);
439 #if defined(DEBUG_LOCKS)
440 		stack_save(&lkp->lk_stack);
441 #endif
442 		break;
443 
444 	case LK_RELEASE:
445 		_lockmgr_assert(lkp, KA_LOCKED, file, line);
446 		if (lkp->lk_exclusivecount != 0) {
447 			if (lkp->lk_lockholder != LK_KERNPROC) {
448 				WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE,
449 				    file, line);
450 				COUNT(td, -1);
451 			}
452 			if (lkp->lk_exclusivecount-- == 1) {
453 				lkp->lk_flags &= ~LK_HAVE_EXCL;
454 				lkp->lk_lockholder = LK_NOPROC;
455 				lock_profile_release_lock(&lkp->lk_object);
456 			}
457 		} else if (lkp->lk_flags & LK_SHARE_NONZERO) {
458 			WITNESS_UNLOCK(&lkp->lk_object, 0, file, line);
459 			shareunlock(td, lkp, 1);
460 		}
461 
462 		if (lkp->lk_flags & LK_WAIT_NONZERO)
463 			wakeup((void *)lkp);
464 		break;
465 
466 	case LK_DRAIN:
467 		/*
468 		 * Check that we do not already hold the lock, as it can
469 		 * never drain if we do. Unfortunately, we have no way to
470 		 * check for holding a shared lock, but at least we can
471 		 * check for an exclusive one.
472 		 */
473 		if (!LOCKMGR_TRYOP(extflags))
474 			WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER |
475 			    LOP_EXCLUSIVE, file, line);
476 		if (lkp->lk_lockholder == td)
477 			panic("lockmgr: draining against myself");
478 
479 		error = acquiredrain(lkp, extflags, wmesg, prio, timo);
480 		if (error)
481 			break;
482 		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
483 		lkp->lk_lockholder = td;
484 		lkp->lk_exclusivecount = 1;
485 		WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE |
486 		    LOCKMGR_TRYW(extflags), file, line);
487 		COUNT(td, 1);
488 #if defined(DEBUG_LOCKS)
489 		stack_save(&lkp->lk_stack);
490 #endif
491 		break;
492 
493 	default:
494 		mtx_unlock(lkp->lk_interlock);
495 		panic("lockmgr: unknown locktype request %d",
496 		    flags & LK_TYPE_MASK);
497 		/* NOTREACHED */
498 	}
499 	if ((lkp->lk_flags & LK_WAITDRAIN) &&
500 	    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
501 		LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
502 		lkp->lk_flags &= ~LK_WAITDRAIN;
503 		wakeup((void *)&lkp->lk_flags);
504 	}
505 	mtx_unlock(lkp->lk_interlock);
506 	return (error);
507 }
508 
509 static int
510 acquiredrain(struct lock *lkp, int extflags, const char *wmesg, int prio,
511     int timo)
512 {
513 	const char *iwmesg;
514 	int error, iprio, itimo;
515 
516 	iwmesg = (wmesg != LK_WMESG_DEFAULT) ? wmesg : lkp->lk_wmesg;
517 	iprio = (prio != LK_PRIO_DEFAULT) ? prio : lkp->lk_prio;
518 	itimo = (timo != LK_TIMO_DEFAULT) ? timo : lkp->lk_timo;
519 
520 	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
521 		return EBUSY;
522 	}
523 	while (lkp->lk_flags & LK_ALL) {
524 		lkp->lk_flags |= LK_WAITDRAIN;
525 		error = msleep(&lkp->lk_flags, lkp->lk_interlock, iprio, iwmesg,
526 			((extflags & LK_TIMELOCK) ? itimo : 0));
527 		if (error)
528 			return error;
529 		if (extflags & LK_SLEEPFAIL) {
530 			return ENOLCK;
531 		}
532 	}
533 	return 0;
534 }
535 
536 /*
537  * Initialize a lock; required before use.
538  */
539 void
540 lockinit(lkp, prio, wmesg, timo, flags)
541 	struct lock *lkp;
542 	int prio;
543 	const char *wmesg;
544 	int timo;
545 	int flags;
546 {
547 	int iflags;
548 
549 	CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
550 	    "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
551 
552 	lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
553 	lkp->lk_flags = (flags & LK_EXTFLG_MASK) & ~(LK_NOWITNESS | LK_NODUP);
554 	lkp->lk_sharecount = 0;
555 	lkp->lk_waitcount = 0;
556 	lkp->lk_exclusivecount = 0;
557 	lkp->lk_prio = prio;
558 	lkp->lk_timo = timo;
559 	lkp->lk_lockholder = LK_NOPROC;
560 	lkp->lk_newlock = NULL;
561 	iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
562 	if (!(flags & LK_NODUP))
563 		iflags |= LO_DUPOK;
564 	if (!(flags & LK_NOWITNESS))
565 		iflags |= LO_WITNESS;
566 #ifdef DEBUG_LOCKS
567 	stack_zero(&lkp->lk_stack);
568 #endif
569 	lock_init(&lkp->lk_object, &lock_class_lockmgr, wmesg, NULL, iflags);
570 }
571 
572 /*
573  * Destroy a lock.
574  */
575 void
576 lockdestroy(lkp)
577 	struct lock *lkp;
578 {
579 
580 	CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
581 	    lkp, lkp->lk_wmesg);
582 	KASSERT((lkp->lk_flags & (LK_HAVE_EXCL | LK_SHARE_NONZERO)) == 0,
583 	    ("lockmgr still held"));
584 	KASSERT(lkp->lk_exclusivecount == 0, ("lockmgr still recursed"));
585 	lkp->lk_flags = LK_DESTROYED;
586 	lock_destroy(&lkp->lk_object);
587 }
588 
589 /*
590  * Disown the lockmgr.
591  */
592 void
593 _lockmgr_disown(struct lock *lkp, const char *file, int line)
594 {
595 	struct thread *td;
596 
597 	td = curthread;
598 	KASSERT(panicstr != NULL || (lkp->lk_flags & LK_DESTROYED) == 0,
599 	    ("%s: %p lockmgr is destroyed", __func__, lkp));
600 	_lockmgr_assert(lkp, KA_XLOCKED | KA_NOTRECURSED, file, line);
601 
602 	/*
603 	 * Drop the lock reference and switch the owner.  This will result
604 	 * in an atomic operation like td_lock is only accessed by curthread
605 	 * and lk_lockholder only needs one write.  Note also that the lock
606 	 * owner can be alredy KERNPROC, so in that case just skip the
607 	 * decrement.
608 	 */
609 	if (lkp->lk_lockholder == td) {
610 		WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE, file, line);
611 		td->td_locks--;
612 	}
613 	lkp->lk_lockholder = LK_KERNPROC;
614 }
615 
616 /*
617  * Determine the status of a lock.
618  */
619 int
620 lockstatus(lkp, td)
621 	struct lock *lkp;
622 	struct thread *td;
623 {
624 	int lock_type = 0;
625 	int interlocked;
626 
627 	KASSERT(td == curthread,
628 	    ("%s: thread passed argument (%p) is not valid", __func__, td));
629 	KASSERT((lkp->lk_flags & LK_DESTROYED) == 0,
630 	    ("%s: %p lockmgr is destroyed", __func__, lkp));
631 
632 	if (!kdb_active) {
633 		interlocked = 1;
634 		mtx_lock(lkp->lk_interlock);
635 	} else
636 		interlocked = 0;
637 	if (lkp->lk_exclusivecount != 0) {
638 		if (lkp->lk_lockholder == td)
639 			lock_type = LK_EXCLUSIVE;
640 		else
641 			lock_type = LK_EXCLOTHER;
642 	} else if (lkp->lk_sharecount != 0)
643 		lock_type = LK_SHARED;
644 	if (interlocked)
645 		mtx_unlock(lkp->lk_interlock);
646 	return (lock_type);
647 }
648 
649 /*
650  * Determine the number of waiters on a lock.
651  */
652 int
653 lockwaiters(lkp)
654 	struct lock *lkp;
655 {
656 	int count;
657 
658 	KASSERT((lkp->lk_flags & LK_DESTROYED) == 0,
659 	    ("%s: %p lockmgr is destroyed", __func__, lkp));
660 	mtx_lock(lkp->lk_interlock);
661 	count = lkp->lk_waitcount;
662 	mtx_unlock(lkp->lk_interlock);
663 	return (count);
664 }
665 
666 /*
667  * Print out information about state of a lock. Used by VOP_PRINT
668  * routines to display status about contained locks.
669  */
670 void
671 lockmgr_printinfo(lkp)
672 	struct lock *lkp;
673 {
674 
675 	if (lkp->lk_sharecount)
676 		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
677 		    lkp->lk_sharecount);
678 	else if (lkp->lk_flags & LK_HAVE_EXCL)
679 		printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)",
680 		    lkp->lk_wmesg, lkp->lk_exclusivecount,
681 		    lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid);
682 	if (lkp->lk_waitcount > 0)
683 		printf(" with %d pending", lkp->lk_waitcount);
684 #ifdef DEBUG_LOCKS
685 	stack_print_ddb(&lkp->lk_stack);
686 #endif
687 }
688 
689 #ifdef INVARIANT_SUPPORT
690 #ifndef INVARIANTS
691 #undef _lockmgr_assert
692 #endif
693 
694 void
695 _lockmgr_assert(struct lock *lkp, int what, const char *file, int line)
696 {
697 	struct thread *td;
698 	u_int x;
699 	int slocked = 0;
700 
701 	x = lkp->lk_flags;
702 	td = lkp->lk_lockholder;
703 	if (panicstr != NULL)
704 		return;
705 	switch (what) {
706 	case KA_SLOCKED:
707 	case KA_SLOCKED | KA_NOTRECURSED:
708 	case KA_SLOCKED | KA_RECURSED:
709 		slocked = 1;
710 	case KA_LOCKED:
711 	case KA_LOCKED | KA_NOTRECURSED:
712 	case KA_LOCKED | KA_RECURSED:
713 #ifdef WITNESS
714 		/*
715 		 * We cannot trust WITNESS if the lock is held in
716 		 * exclusive mode and a call to lockmgr_disown() happened.
717 		 * Workaround this skipping the check if the lock is
718 		 * held in exclusive mode even for the KA_LOCKED case.
719 		 */
720 		if (slocked || (x & LK_HAVE_EXCL) == 0) {
721 			witness_assert(&lkp->lk_object, what, file, line);
722 			break;
723 		}
724 #endif
725 		if (LOCKMGR_UNHELD(x) || ((x & LK_SHARE_NONZERO) == 0 &&
726 		    (slocked || LOCKMGR_NOTOWNER(td))))
727 			panic("Lock %s not %slocked @ %s:%d\n",
728 			    lkp->lk_object.lo_name, slocked ? "share " : "",
729 			    file, line);
730 		if ((x & LK_SHARE_NONZERO) == 0) {
731 			if (lockmgr_recursed(lkp)) {
732 				if (what & KA_NOTRECURSED)
733 					panic("Lock %s recursed @ %s:%d\n",
734 					    lkp->lk_object.lo_name, file, line);
735 			} else if (what & KA_RECURSED)
736 				panic("Lock %s not recursed @ %s:%d\n",
737 				    lkp->lk_object.lo_name, file, line);
738 		}
739 		break;
740 	case KA_XLOCKED:
741 	case KA_XLOCKED | KA_NOTRECURSED:
742 	case KA_XLOCKED | KA_RECURSED:
743 		if ((x & LK_HAVE_EXCL) == 0 || LOCKMGR_NOTOWNER(td))
744 			panic("Lock %s not exclusively locked @ %s:%d\n",
745 			    lkp->lk_object.lo_name, file, line);
746 		if (lockmgr_recursed(lkp)) {
747 			if (what & KA_NOTRECURSED)
748 				panic("Lock %s recursed @ %s:%d\n",
749 				    lkp->lk_object.lo_name, file, line);
750 		} else if (what & KA_RECURSED)
751 			panic("Lock %s not recursed @ %s:%d\n",
752 			    lkp->lk_object.lo_name, file, line);
753 		break;
754 	case KA_UNLOCKED:
755 		if (td == curthread || td == LK_KERNPROC)
756 			panic("Lock %s exclusively locked @ %s:%d\n",
757 			    lkp->lk_object.lo_name, file, line);
758 		break;
759 	case KA_HELD:
760 	case KA_UNHELD:
761 		if (LOCKMGR_UNHELD(x)) {
762 			if (what & KA_HELD)
763 				panic("Lock %s not locked by anyone @ %s:%d\n",
764 				    lkp->lk_object.lo_name, file, line);
765 		} else if (what & KA_UNHELD)
766 			panic("Lock %s locked by someone @ %s:%d\n",
767 			    lkp->lk_object.lo_name, file, line);
768 		break;
769 	default:
770 		panic("Unknown lockmgr lock assertion: 0x%x @ %s:%d", what,
771 		    file, line);
772 	}
773 }
774 #endif	/* INVARIANT_SUPPORT */
775 
776 #ifdef DDB
777 /*
778  * Check to see if a thread that is blocked on a sleep queue is actually
779  * blocked on a 'struct lock'.  If so, output some details and return true.
780  * If the lock has an exclusive owner, return that in *ownerp.
781  */
782 int
783 lockmgr_chain(struct thread *td, struct thread **ownerp)
784 {
785 	struct lock *lkp;
786 
787 	lkp = td->td_wchan;
788 
789 	/* Simple test to see if wchan points to a lockmgr lock. */
790 	if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr &&
791 	    lkp->lk_wmesg == td->td_wmesg)
792 		goto ok;
793 
794 	/*
795 	 * If this thread is doing a DRAIN, then it would be asleep on
796 	 * &lkp->lk_flags rather than lkp.
797 	 */
798 	lkp = (struct lock *)((char *)td->td_wchan -
799 	    offsetof(struct lock, lk_flags));
800 	if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr &&
801 	    lkp->lk_wmesg == td->td_wmesg && (lkp->lk_flags & LK_WAITDRAIN))
802 		goto ok;
803 
804 	/* Doen't seem to be a lockmgr lock. */
805 	return (0);
806 
807 ok:
808 	/* Ok, we think we have a lockmgr lock, so output some details. */
809 	db_printf("blocked on lk \"%s\" ", lkp->lk_wmesg);
810 	if (lkp->lk_sharecount) {
811 		db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
812 		*ownerp = NULL;
813 	} else {
814 		db_printf("EXCL (count %d)\n", lkp->lk_exclusivecount);
815 		*ownerp = lkp->lk_lockholder;
816 	}
817 	return (1);
818 }
819 
820 void
821 db_show_lockmgr(struct lock_object *lock)
822 {
823 	struct thread *td;
824 	struct lock *lkp;
825 
826 	lkp = (struct lock *)lock;
827 
828 	db_printf(" lock type: %s\n", lkp->lk_wmesg);
829 	db_printf(" state: ");
830 	if (lkp->lk_sharecount)
831 		db_printf("SHARED (count %d)\n", lkp->lk_sharecount);
832 	else if (lkp->lk_flags & LK_HAVE_EXCL) {
833 		td = lkp->lk_lockholder;
834 		db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td);
835 		db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid,
836 		    td->td_proc->p_pid, td->td_name);
837 	} else
838 		db_printf("UNLOCKED\n");
839 	if (lkp->lk_waitcount > 0)
840 		db_printf(" waiters: %d\n", lkp->lk_waitcount);
841 }
842 #endif
843