xref: /freebsd/sys/kern/kern_lock.c (revision 729362425c09cf6b362366aabc6fb547eee8035a)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Copyright (C) 1997
6  *	John S. Dyson.  All rights reserved.
7  *
8  * This code contains ideas from software contributed to Berkeley by
9  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10  * System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
41  * $FreeBSD$
42  */
43 
44 #include <sys/param.h>
45 #include <sys/kernel.h>
46 #include <sys/ktr.h>
47 #include <sys/lock.h>
48 #include <sys/lockmgr.h>
49 #include <sys/mutex.h>
50 #include <sys/proc.h>
51 #include <sys/systm.h>
52 
53 /*
54  * Locking primitives implementation.
55  * Locks provide shared/exclusive sychronization.
56  */
57 
58 #define LOCK_WAIT_TIME 100
59 #define LOCK_SAMPLE_WAIT 7
60 
61 #if defined(DIAGNOSTIC)
62 #define LOCK_INLINE
63 #else
64 #define LOCK_INLINE __inline
65 #endif
66 
67 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
68 	LK_SHARE_NONZERO | LK_WAIT_NONZERO)
69 
70 /*
71  * Mutex array variables.  Rather than each lockmgr lock having its own mutex,
72  * share a fixed (at boot time) number of mutexes across all lockmgr locks in
73  * order to keep sizeof(struct lock) down.
74  */
75 int lock_mtx_valid;
76 static struct mtx lock_mtx;
77 
78 static int acquire(struct lock **lkpp, int extflags, int wanted);
79 static int apause(struct lock *lkp, int flags);
80 static int acquiredrain(struct lock *lkp, int extflags) ;
81 
82 static void
83 lockmgr_init(void *dummy __unused)
84 {
85 	/*
86 	 * Initialize the lockmgr protection mutex if it hasn't already been
87 	 * done.  Unless something changes about kernel startup order, VM
88 	 * initialization will always cause this mutex to already be
89 	 * initialized in a call to lockinit().
90 	 */
91 	if (lock_mtx_valid == 0) {
92 		mtx_init(&lock_mtx, "lockmgr", NULL, MTX_DEF);
93 		lock_mtx_valid = 1;
94 	}
95 }
96 SYSINIT(lmgrinit, SI_SUB_LOCK, SI_ORDER_FIRST, lockmgr_init, NULL)
97 
98 static LOCK_INLINE void
99 sharelock(struct lock *lkp, int incr) {
100 	lkp->lk_flags |= LK_SHARE_NONZERO;
101 	lkp->lk_sharecount += incr;
102 }
103 
104 static LOCK_INLINE void
105 shareunlock(struct lock *lkp, int decr) {
106 
107 	KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
108 
109 	if (lkp->lk_sharecount == decr) {
110 		lkp->lk_flags &= ~LK_SHARE_NONZERO;
111 		if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
112 			wakeup(lkp);
113 		}
114 		lkp->lk_sharecount = 0;
115 	} else {
116 		lkp->lk_sharecount -= decr;
117 	}
118 }
119 
120 /*
121  * This is the waitloop optimization.
122  */
123 static int
124 apause(struct lock *lkp, int flags)
125 {
126 #ifdef SMP
127 	int i, lock_wait;
128 #endif
129 
130 	if ((lkp->lk_flags & flags) == 0)
131 		return 0;
132 #ifdef SMP
133 	for (lock_wait = LOCK_WAIT_TIME; lock_wait > 0; lock_wait--) {
134 		mtx_unlock(lkp->lk_interlock);
135 		for (i = LOCK_SAMPLE_WAIT; i > 0; i--)
136 			if ((lkp->lk_flags & flags) == 0)
137 				break;
138 		mtx_lock(lkp->lk_interlock);
139 		if ((lkp->lk_flags & flags) == 0)
140 			return 0;
141 	}
142 #endif
143 	return 1;
144 }
145 
146 static int
147 acquire(struct lock **lkpp, int extflags, int wanted) {
148 	struct lock *lkp = *lkpp;
149 	int s, error;
150 
151 	CTR3(KTR_LOCK,
152 	    "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x\n",
153 	    lkp, extflags, wanted);
154 
155 	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
156 		return EBUSY;
157 	}
158 
159 	if (((lkp->lk_flags | extflags) & LK_NOPAUSE) == 0) {
160 		error = apause(lkp, wanted);
161 		if (error == 0)
162 			return 0;
163 	}
164 
165 	s = splhigh();
166 	while ((lkp->lk_flags & wanted) != 0) {
167 		lkp->lk_flags |= LK_WAIT_NONZERO;
168 		lkp->lk_waitcount++;
169 		error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio,
170 		    lkp->lk_wmesg,
171 		    ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
172 		if (lkp->lk_waitcount == 1) {
173 			lkp->lk_flags &= ~LK_WAIT_NONZERO;
174 			lkp->lk_waitcount = 0;
175 		} else {
176 			lkp->lk_waitcount--;
177 		}
178 		if (error) {
179 			splx(s);
180 			return error;
181 		}
182 		if (extflags & LK_SLEEPFAIL) {
183 			splx(s);
184 			return ENOLCK;
185 		}
186 		if (lkp->lk_newlock != NULL) {
187 			mtx_lock(lkp->lk_newlock->lk_interlock);
188 			mtx_unlock(lkp->lk_interlock);
189 			if (lkp->lk_waitcount == 0)
190 				wakeup((void *)(&lkp->lk_newlock));
191 			*lkpp = lkp = lkp->lk_newlock;
192 		}
193 	}
194 	splx(s);
195 	return 0;
196 }
197 
198 /*
199  * Set, change, or release a lock.
200  *
201  * Shared requests increment the shared count. Exclusive requests set the
202  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
203  * accepted shared locks and shared-to-exclusive upgrades to go away.
204  */
205 int
206 #ifndef	DEBUG_LOCKS
207 lockmgr(lkp, flags, interlkp, td)
208 #else
209 debuglockmgr(lkp, flags, interlkp, td, name, file, line)
210 #endif
211 	struct lock *lkp;
212 	u_int flags;
213 	struct mtx *interlkp;
214 	struct thread *td;
215 #ifdef	DEBUG_LOCKS
216 	const char *name;	/* Name of lock function */
217 	const char *file;	/* Name of file call is from */
218 	int line;		/* Line number in file */
219 #endif
220 {
221 	int error;
222 	struct thread *thr;
223 	int extflags, lockflags;
224 
225 	CTR5(KTR_LOCK,
226 	    "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), flags == 0x%x, "
227 	    "interlkp == %p, td == %p", lkp, lkp->lk_wmesg, flags, interlkp, td);
228 
229 	error = 0;
230 	if (td == NULL)
231 		thr = LK_KERNPROC;
232 	else
233 		thr = td;
234 
235 	if ((flags & LK_INTERNAL) == 0)
236 		mtx_lock(lkp->lk_interlock);
237 	if (flags & LK_INTERLOCK) {
238 		mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
239 		mtx_unlock(interlkp);
240 	}
241 
242 	if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
243 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
244 		    &lkp->lk_interlock->mtx_object,
245 		    "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg);
246 
247 	if (panicstr != NULL) {
248 		mtx_unlock(lkp->lk_interlock);
249 		return (0);
250 	}
251 
252 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
253 
254 	switch (flags & LK_TYPE_MASK) {
255 
256 	case LK_SHARED:
257 		/*
258 		 * If we are not the exclusive lock holder, we have to block
259 		 * while there is an exclusive lock holder or while an
260 		 * exclusive lock request or upgrade request is in progress.
261 		 *
262 		 * However, if TDF_DEADLKTREAT is set, we override exclusive
263 		 * lock requests or upgrade requests ( but not the exclusive
264 		 * lock itself ).
265 		 */
266 		if (lkp->lk_lockholder != thr) {
267 			lockflags = LK_HAVE_EXCL;
268 			mtx_lock_spin(&sched_lock);
269 			if (td != NULL && !(td->td_flags & TDF_DEADLKTREAT))
270 				lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
271 			mtx_unlock_spin(&sched_lock);
272 			error = acquire(&lkp, extflags, lockflags);
273 			if (error)
274 				break;
275 			sharelock(lkp, 1);
276 #if defined(DEBUG_LOCKS)
277 			lkp->lk_slockholder = thr;
278 			lkp->lk_sfilename = file;
279 			lkp->lk_slineno = line;
280 			lkp->lk_slockername = name;
281 #endif
282 			break;
283 		}
284 		/*
285 		 * We hold an exclusive lock, so downgrade it to shared.
286 		 * An alternative would be to fail with EDEADLK.
287 		 */
288 		sharelock(lkp, 1);
289 		/* FALLTHROUGH downgrade */
290 
291 	case LK_DOWNGRADE:
292 		KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0,
293 			("lockmgr: not holding exclusive lock "
294 			"(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
295 			lkp->lk_lockholder, thr, lkp->lk_exclusivecount));
296 		sharelock(lkp, lkp->lk_exclusivecount);
297 		lkp->lk_exclusivecount = 0;
298 		lkp->lk_flags &= ~LK_HAVE_EXCL;
299 		lkp->lk_lockholder = LK_NOPROC;
300 		if (lkp->lk_waitcount)
301 			wakeup((void *)lkp);
302 		break;
303 
304 	case LK_EXCLUPGRADE:
305 		/*
306 		 * If another process is ahead of us to get an upgrade,
307 		 * then we want to fail rather than have an intervening
308 		 * exclusive access.
309 		 */
310 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
311 			shareunlock(lkp, 1);
312 			error = EBUSY;
313 			break;
314 		}
315 		/* FALLTHROUGH normal upgrade */
316 
317 	case LK_UPGRADE:
318 		/*
319 		 * Upgrade a shared lock to an exclusive one. If another
320 		 * shared lock has already requested an upgrade to an
321 		 * exclusive lock, our shared lock is released and an
322 		 * exclusive lock is requested (which will be granted
323 		 * after the upgrade). If we return an error, the file
324 		 * will always be unlocked.
325 		 */
326 		if ((lkp->lk_lockholder == thr) || (lkp->lk_sharecount <= 0))
327 			panic("lockmgr: upgrade exclusive lock");
328 		shareunlock(lkp, 1);
329 		/*
330 		 * If we are just polling, check to see if we will block.
331 		 */
332 		if ((extflags & LK_NOWAIT) &&
333 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
334 		     lkp->lk_sharecount > 1)) {
335 			error = EBUSY;
336 			break;
337 		}
338 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
339 			/*
340 			 * We are first shared lock to request an upgrade, so
341 			 * request upgrade and wait for the shared count to
342 			 * drop to zero, then take exclusive lock.
343 			 */
344 			lkp->lk_flags |= LK_WANT_UPGRADE;
345 			error = acquire(&lkp, extflags, LK_SHARE_NONZERO);
346 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
347 
348 			if (error)
349 				break;
350 			lkp->lk_flags |= LK_HAVE_EXCL;
351 			lkp->lk_lockholder = thr;
352 			if (lkp->lk_exclusivecount != 0)
353 				panic("lockmgr: non-zero exclusive count");
354 			lkp->lk_exclusivecount = 1;
355 #if defined(DEBUG_LOCKS)
356 			lkp->lk_filename = file;
357 			lkp->lk_lineno = line;
358 			lkp->lk_lockername = name;
359 #endif
360 			break;
361 		}
362 		/*
363 		 * Someone else has requested upgrade. Release our shared
364 		 * lock, awaken upgrade requestor if we are the last shared
365 		 * lock, then request an exclusive lock.
366 		 */
367 		if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
368 			LK_WAIT_NONZERO)
369 			wakeup((void *)lkp);
370 		/* FALLTHROUGH exclusive request */
371 
372 	case LK_EXCLUSIVE:
373 		if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) {
374 			/*
375 			 *	Recursive lock.
376 			 */
377 			if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
378 				panic("lockmgr: locking against myself");
379 			if ((extflags & LK_CANRECURSE) != 0) {
380 				lkp->lk_exclusivecount++;
381 				break;
382 			}
383 		}
384 		/*
385 		 * If we are just polling, check to see if we will sleep.
386 		 */
387 		if ((extflags & LK_NOWAIT) &&
388 		    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
389 			error = EBUSY;
390 			break;
391 		}
392 		/*
393 		 * Try to acquire the want_exclusive flag.
394 		 */
395 		error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
396 		if (error)
397 			break;
398 		lkp->lk_flags |= LK_WANT_EXCL;
399 		/*
400 		 * Wait for shared locks and upgrades to finish.
401 		 */
402 		error = acquire(&lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO);
403 		lkp->lk_flags &= ~LK_WANT_EXCL;
404 		if (error)
405 			break;
406 		lkp->lk_flags |= LK_HAVE_EXCL;
407 		lkp->lk_lockholder = thr;
408 		if (lkp->lk_exclusivecount != 0)
409 			panic("lockmgr: non-zero exclusive count");
410 		lkp->lk_exclusivecount = 1;
411 #if defined(DEBUG_LOCKS)
412 			lkp->lk_filename = file;
413 			lkp->lk_lineno = line;
414 			lkp->lk_lockername = name;
415 #endif
416 		break;
417 
418 	case LK_RELEASE:
419 		if (lkp->lk_exclusivecount != 0) {
420 			if (lkp->lk_lockholder != thr &&
421 			    lkp->lk_lockholder != LK_KERNPROC) {
422 				panic("lockmgr: thread %p, not %s %p unlocking",
423 				    thr, "exclusive lock holder",
424 				    lkp->lk_lockholder);
425 			}
426 			if (lkp->lk_exclusivecount == 1) {
427 				lkp->lk_flags &= ~LK_HAVE_EXCL;
428 				lkp->lk_lockholder = LK_NOPROC;
429 				lkp->lk_exclusivecount = 0;
430 			} else {
431 				lkp->lk_exclusivecount--;
432 			}
433 		} else if (lkp->lk_flags & LK_SHARE_NONZERO)
434 			shareunlock(lkp, 1);
435 		if (lkp->lk_flags & LK_WAIT_NONZERO)
436 			wakeup((void *)lkp);
437 		break;
438 
439 	case LK_DRAIN:
440 		/*
441 		 * Check that we do not already hold the lock, as it can
442 		 * never drain if we do. Unfortunately, we have no way to
443 		 * check for holding a shared lock, but at least we can
444 		 * check for an exclusive one.
445 		 */
446 		if (lkp->lk_lockholder == thr)
447 			panic("lockmgr: draining against myself");
448 
449 		error = acquiredrain(lkp, extflags);
450 		if (error)
451 			break;
452 		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
453 		lkp->lk_lockholder = thr;
454 		lkp->lk_exclusivecount = 1;
455 #if defined(DEBUG_LOCKS)
456 			lkp->lk_filename = file;
457 			lkp->lk_lineno = line;
458 			lkp->lk_lockername = name;
459 #endif
460 		break;
461 
462 	default:
463 		mtx_unlock(lkp->lk_interlock);
464 		panic("lockmgr: unknown locktype request %d",
465 		    flags & LK_TYPE_MASK);
466 		/* NOTREACHED */
467 	}
468 	if ((lkp->lk_flags & LK_WAITDRAIN) &&
469 	    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
470 		LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
471 		lkp->lk_flags &= ~LK_WAITDRAIN;
472 		wakeup((void *)&lkp->lk_flags);
473 	}
474 	mtx_unlock(lkp->lk_interlock);
475 	return (error);
476 }
477 
478 static int
479 acquiredrain(struct lock *lkp, int extflags) {
480 	int error;
481 
482 	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
483 		return EBUSY;
484 	}
485 
486 	error = apause(lkp, LK_ALL);
487 	if (error == 0)
488 		return 0;
489 
490 	while (lkp->lk_flags & LK_ALL) {
491 		lkp->lk_flags |= LK_WAITDRAIN;
492 		error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio,
493 			lkp->lk_wmesg,
494 			((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
495 		if (error)
496 			return error;
497 		if (extflags & LK_SLEEPFAIL) {
498 			return ENOLCK;
499 		}
500 	}
501 	return 0;
502 }
503 
504 /*
505  * Transfer any waiting processes from one lock to another.
506  */
507 void
508 transferlockers(from, to)
509 	struct lock *from;
510 	struct lock *to;
511 {
512 
513 	KASSERT(from != to, ("lock transfer to self"));
514 	KASSERT((from->lk_flags&LK_WAITDRAIN) == 0, ("transfer draining lock"));
515 	if (from->lk_waitcount == 0)
516 		return;
517 	from->lk_newlock = to;
518 	wakeup((void *)from);
519 	msleep(&from->lk_newlock, NULL, from->lk_prio, "lkxfer", 0);
520 	from->lk_newlock = NULL;
521 	from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE);
522 	KASSERT(from->lk_waitcount == 0, ("active lock"));
523 }
524 
525 
526 /*
527  * Initialize a lock; required before use.
528  */
529 void
530 lockinit(lkp, prio, wmesg, timo, flags)
531 	struct lock *lkp;
532 	int prio;
533 	const char *wmesg;
534 	int timo;
535 	int flags;
536 {
537 	CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
538 	    "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
539 
540 	if (lock_mtx_valid == 0) {
541 		mtx_init(&lock_mtx, "lockmgr", NULL, MTX_DEF);
542 		lock_mtx_valid = 1;
543 	}
544 	/*
545 	 * XXX cleanup - make sure mtxpool is always initialized before
546 	 * this is ever called.
547 	 */
548 	if (mtx_pool_valid) {
549 		mtx_lock(&lock_mtx);
550 		lkp->lk_interlock = mtx_pool_alloc();
551 		mtx_unlock(&lock_mtx);
552 	} else {
553 		lkp->lk_interlock = &lock_mtx;
554 	}
555 	lkp->lk_flags = (flags & LK_EXTFLG_MASK);
556 	lkp->lk_sharecount = 0;
557 	lkp->lk_waitcount = 0;
558 	lkp->lk_exclusivecount = 0;
559 	lkp->lk_prio = prio;
560 	lkp->lk_wmesg = wmesg;
561 	lkp->lk_timo = timo;
562 	lkp->lk_lockholder = LK_NOPROC;
563 	lkp->lk_newlock = NULL;
564 #ifdef DEBUG_LOCKS
565 	lkp->lk_filename = "none";
566 	lkp->lk_lockername = "never exclusive locked";
567 	lkp->lk_lineno = 0;
568 	lkp->lk_slockholder = LK_NOPROC;
569 	lkp->lk_sfilename = "none";
570 	lkp->lk_slockername = "never share locked";
571 	lkp->lk_slineno = 0;
572 #endif
573 }
574 
575 /*
576  * Destroy a lock.
577  */
578 void
579 lockdestroy(lkp)
580 	struct lock *lkp;
581 {
582 	CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
583 	    lkp, lkp->lk_wmesg);
584 }
585 
586 /*
587  * Determine the status of a lock.
588  */
589 int
590 lockstatus(lkp, td)
591 	struct lock *lkp;
592 	struct thread *td;
593 {
594 	int lock_type = 0;
595 
596 	mtx_lock(lkp->lk_interlock);
597 	if (lkp->lk_exclusivecount != 0) {
598 		if (td == NULL || lkp->lk_lockholder == td)
599 			lock_type = LK_EXCLUSIVE;
600 		else
601 			lock_type = LK_EXCLOTHER;
602 	} else if (lkp->lk_sharecount != 0)
603 		lock_type = LK_SHARED;
604 	mtx_unlock(lkp->lk_interlock);
605 	return (lock_type);
606 }
607 
608 /*
609  * Determine the number of holders of a lock.
610  */
611 int
612 lockcount(lkp)
613 	struct lock *lkp;
614 {
615 	int count;
616 
617 	mtx_lock(lkp->lk_interlock);
618 	count = lkp->lk_exclusivecount + lkp->lk_sharecount;
619 	mtx_unlock(lkp->lk_interlock);
620 	return (count);
621 }
622 
623 /*
624  * Print out information about state of a lock. Used by VOP_PRINT
625  * routines to display status about contained locks.
626  */
627 void
628 lockmgr_printinfo(lkp)
629 	struct lock *lkp;
630 {
631 
632 	if (lkp->lk_sharecount)
633 		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
634 		    lkp->lk_sharecount);
635 	else if (lkp->lk_flags & LK_HAVE_EXCL)
636 		printf(" lock type %s: EXCL (count %d) by thread %p",
637 		    lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
638 	if (lkp->lk_waitcount > 0)
639 		printf(" with %d pending", lkp->lk_waitcount);
640 }
641