xref: /freebsd/sys/kern/kern_lock.c (revision b52f49a9a0f22207ad5130ad8faba08de3ed23d8)
1 /*
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Copyright (C) 1997
6  *	John S. Dyson.  All rights reserved.
7  *
8  * This code contains ideas from software contributed to Berkeley by
9  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10  * System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include <sys/param.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/lock.h>
50 #include <sys/lockmgr.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/systm.h>
54 
55 /*
56  * Locking primitives implementation.
57  * Locks provide shared/exclusive sychronization.
58  */
59 
60 #define LOCK_WAIT_TIME 100
61 #define LOCK_SAMPLE_WAIT 7
62 
63 #if defined(DIAGNOSTIC)
64 #define LOCK_INLINE
65 #else
66 #define LOCK_INLINE __inline
67 #endif
68 
69 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
70 	LK_SHARE_NONZERO | LK_WAIT_NONZERO)
71 
72 /*
73  * Mutex array variables.  Rather than each lockmgr lock having its own mutex,
74  * share a fixed (at boot time) number of mutexes across all lockmgr locks in
75  * order to keep sizeof(struct lock) down.
76  */
77 int lock_mtx_valid;
78 static struct mtx lock_mtx;
79 
80 static int acquire(struct lock **lkpp, int extflags, int wanted);
81 static int apause(struct lock *lkp, int flags);
82 static int acquiredrain(struct lock *lkp, int extflags) ;
83 
84 static void
85 lockmgr_init(void *dummy __unused)
86 {
87 	/*
88 	 * Initialize the lockmgr protection mutex if it hasn't already been
89 	 * done.  Unless something changes about kernel startup order, VM
90 	 * initialization will always cause this mutex to already be
91 	 * initialized in a call to lockinit().
92 	 */
93 	if (lock_mtx_valid == 0) {
94 		mtx_init(&lock_mtx, "lockmgr", NULL, MTX_DEF);
95 		lock_mtx_valid = 1;
96 	}
97 }
98 SYSINIT(lmgrinit, SI_SUB_LOCK, SI_ORDER_FIRST, lockmgr_init, NULL)
99 
100 static LOCK_INLINE void
101 sharelock(struct lock *lkp, int incr) {
102 	lkp->lk_flags |= LK_SHARE_NONZERO;
103 	lkp->lk_sharecount += incr;
104 }
105 
106 static LOCK_INLINE void
107 shareunlock(struct lock *lkp, int decr) {
108 
109 	KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
110 
111 	if (lkp->lk_sharecount == decr) {
112 		lkp->lk_flags &= ~LK_SHARE_NONZERO;
113 		if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
114 			wakeup(lkp);
115 		}
116 		lkp->lk_sharecount = 0;
117 	} else {
118 		lkp->lk_sharecount -= decr;
119 	}
120 }
121 
122 /*
123  * This is the waitloop optimization.
124  */
125 static int
126 apause(struct lock *lkp, int flags)
127 {
128 #ifdef SMP
129 	int i, lock_wait;
130 #endif
131 
132 	if ((lkp->lk_flags & flags) == 0)
133 		return 0;
134 #ifdef SMP
135 	for (lock_wait = LOCK_WAIT_TIME; lock_wait > 0; lock_wait--) {
136 		mtx_unlock(lkp->lk_interlock);
137 		for (i = LOCK_SAMPLE_WAIT; i > 0; i--)
138 			if ((lkp->lk_flags & flags) == 0)
139 				break;
140 		mtx_lock(lkp->lk_interlock);
141 		if ((lkp->lk_flags & flags) == 0)
142 			return 0;
143 	}
144 #endif
145 	return 1;
146 }
147 
148 static int
149 acquire(struct lock **lkpp, int extflags, int wanted) {
150 	struct lock *lkp = *lkpp;
151 	int s, error;
152 
153 	CTR3(KTR_LOCK,
154 	    "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x\n",
155 	    lkp, extflags, wanted);
156 
157 	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
158 		return EBUSY;
159 	}
160 
161 	if (((lkp->lk_flags | extflags) & LK_NOPAUSE) == 0) {
162 		error = apause(lkp, wanted);
163 		if (error == 0)
164 			return 0;
165 	}
166 
167 	s = splhigh();
168 	while ((lkp->lk_flags & wanted) != 0) {
169 		lkp->lk_flags |= LK_WAIT_NONZERO;
170 		lkp->lk_waitcount++;
171 		error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio,
172 		    lkp->lk_wmesg,
173 		    ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
174 		if (lkp->lk_waitcount == 1) {
175 			lkp->lk_flags &= ~LK_WAIT_NONZERO;
176 			lkp->lk_waitcount = 0;
177 		} else {
178 			lkp->lk_waitcount--;
179 		}
180 		if (error) {
181 			splx(s);
182 			return error;
183 		}
184 		if (extflags & LK_SLEEPFAIL) {
185 			splx(s);
186 			return ENOLCK;
187 		}
188 		if (lkp->lk_newlock != NULL) {
189 			mtx_lock(lkp->lk_newlock->lk_interlock);
190 			mtx_unlock(lkp->lk_interlock);
191 			if (lkp->lk_waitcount == 0)
192 				wakeup((void *)(&lkp->lk_newlock));
193 			*lkpp = lkp = lkp->lk_newlock;
194 		}
195 	}
196 	splx(s);
197 	return 0;
198 }
199 
200 /*
201  * Set, change, or release a lock.
202  *
203  * Shared requests increment the shared count. Exclusive requests set the
204  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
205  * accepted shared locks and shared-to-exclusive upgrades to go away.
206  */
207 int
208 #ifndef	DEBUG_LOCKS
209 lockmgr(lkp, flags, interlkp, td)
210 #else
211 debuglockmgr(lkp, flags, interlkp, td, name, file, line)
212 #endif
213 	struct lock *lkp;
214 	u_int flags;
215 	struct mtx *interlkp;
216 	struct thread *td;
217 #ifdef	DEBUG_LOCKS
218 	const char *name;	/* Name of lock function */
219 	const char *file;	/* Name of file call is from */
220 	int line;		/* Line number in file */
221 #endif
222 {
223 	int error;
224 	struct thread *thr;
225 	int extflags, lockflags;
226 
227 	CTR5(KTR_LOCK,
228 	    "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), flags == 0x%x, "
229 	    "interlkp == %p, td == %p", lkp, lkp->lk_wmesg, flags, interlkp, td);
230 
231 	error = 0;
232 	if (td == NULL)
233 		thr = LK_KERNPROC;
234 	else
235 		thr = td;
236 
237 	if ((flags & LK_INTERNAL) == 0)
238 		mtx_lock(lkp->lk_interlock);
239 	if (flags & LK_INTERLOCK) {
240 		mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
241 		mtx_unlock(interlkp);
242 	}
243 
244 	if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
245 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
246 		    &lkp->lk_interlock->mtx_object,
247 		    "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg);
248 
249 	if (panicstr != NULL) {
250 		mtx_unlock(lkp->lk_interlock);
251 		return (0);
252 	}
253 
254 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
255 
256 	switch (flags & LK_TYPE_MASK) {
257 
258 	case LK_SHARED:
259 		/*
260 		 * If we are not the exclusive lock holder, we have to block
261 		 * while there is an exclusive lock holder or while an
262 		 * exclusive lock request or upgrade request is in progress.
263 		 *
264 		 * However, if TDF_DEADLKTREAT is set, we override exclusive
265 		 * lock requests or upgrade requests ( but not the exclusive
266 		 * lock itself ).
267 		 */
268 		if (lkp->lk_lockholder != thr) {
269 			lockflags = LK_HAVE_EXCL;
270 			mtx_lock_spin(&sched_lock);
271 			if (td != NULL && !(td->td_flags & TDF_DEADLKTREAT))
272 				lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
273 			mtx_unlock_spin(&sched_lock);
274 			error = acquire(&lkp, extflags, lockflags);
275 			if (error)
276 				break;
277 			sharelock(lkp, 1);
278 #if defined(DEBUG_LOCKS)
279 			lkp->lk_slockholder = thr;
280 			lkp->lk_sfilename = file;
281 			lkp->lk_slineno = line;
282 			lkp->lk_slockername = name;
283 #endif
284 			break;
285 		}
286 		/*
287 		 * We hold an exclusive lock, so downgrade it to shared.
288 		 * An alternative would be to fail with EDEADLK.
289 		 */
290 		sharelock(lkp, 1);
291 		/* FALLTHROUGH downgrade */
292 
293 	case LK_DOWNGRADE:
294 		KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0,
295 			("lockmgr: not holding exclusive lock "
296 			"(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
297 			lkp->lk_lockholder, thr, lkp->lk_exclusivecount));
298 		sharelock(lkp, lkp->lk_exclusivecount);
299 		lkp->lk_exclusivecount = 0;
300 		lkp->lk_flags &= ~LK_HAVE_EXCL;
301 		lkp->lk_lockholder = LK_NOPROC;
302 		if (lkp->lk_waitcount)
303 			wakeup((void *)lkp);
304 		break;
305 
306 	case LK_EXCLUPGRADE:
307 		/*
308 		 * If another process is ahead of us to get an upgrade,
309 		 * then we want to fail rather than have an intervening
310 		 * exclusive access.
311 		 */
312 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
313 			shareunlock(lkp, 1);
314 			error = EBUSY;
315 			break;
316 		}
317 		/* FALLTHROUGH normal upgrade */
318 
319 	case LK_UPGRADE:
320 		/*
321 		 * Upgrade a shared lock to an exclusive one. If another
322 		 * shared lock has already requested an upgrade to an
323 		 * exclusive lock, our shared lock is released and an
324 		 * exclusive lock is requested (which will be granted
325 		 * after the upgrade). If we return an error, the file
326 		 * will always be unlocked.
327 		 */
328 		if ((lkp->lk_lockholder == thr) || (lkp->lk_sharecount <= 0))
329 			panic("lockmgr: upgrade exclusive lock");
330 		shareunlock(lkp, 1);
331 		/*
332 		 * If we are just polling, check to see if we will block.
333 		 */
334 		if ((extflags & LK_NOWAIT) &&
335 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
336 		     lkp->lk_sharecount > 1)) {
337 			error = EBUSY;
338 			break;
339 		}
340 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
341 			/*
342 			 * We are first shared lock to request an upgrade, so
343 			 * request upgrade and wait for the shared count to
344 			 * drop to zero, then take exclusive lock.
345 			 */
346 			lkp->lk_flags |= LK_WANT_UPGRADE;
347 			error = acquire(&lkp, extflags, LK_SHARE_NONZERO);
348 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
349 
350 			if (error)
351 				break;
352 			lkp->lk_flags |= LK_HAVE_EXCL;
353 			lkp->lk_lockholder = thr;
354 			if (lkp->lk_exclusivecount != 0)
355 				panic("lockmgr: non-zero exclusive count");
356 			lkp->lk_exclusivecount = 1;
357 #if defined(DEBUG_LOCKS)
358 			lkp->lk_filename = file;
359 			lkp->lk_lineno = line;
360 			lkp->lk_lockername = name;
361 #endif
362 			break;
363 		}
364 		/*
365 		 * Someone else has requested upgrade. Release our shared
366 		 * lock, awaken upgrade requestor if we are the last shared
367 		 * lock, then request an exclusive lock.
368 		 */
369 		if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
370 			LK_WAIT_NONZERO)
371 			wakeup((void *)lkp);
372 		/* FALLTHROUGH exclusive request */
373 
374 	case LK_EXCLUSIVE:
375 		if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) {
376 			/*
377 			 *	Recursive lock.
378 			 */
379 			if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
380 				panic("lockmgr: locking against myself");
381 			if ((extflags & LK_CANRECURSE) != 0) {
382 				lkp->lk_exclusivecount++;
383 				break;
384 			}
385 		}
386 		/*
387 		 * If we are just polling, check to see if we will sleep.
388 		 */
389 		if ((extflags & LK_NOWAIT) &&
390 		    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
391 			error = EBUSY;
392 			break;
393 		}
394 		/*
395 		 * Try to acquire the want_exclusive flag.
396 		 */
397 		error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
398 		if (error)
399 			break;
400 		lkp->lk_flags |= LK_WANT_EXCL;
401 		/*
402 		 * Wait for shared locks and upgrades to finish.
403 		 */
404 		error = acquire(&lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO);
405 		lkp->lk_flags &= ~LK_WANT_EXCL;
406 		if (error)
407 			break;
408 		lkp->lk_flags |= LK_HAVE_EXCL;
409 		lkp->lk_lockholder = thr;
410 		if (lkp->lk_exclusivecount != 0)
411 			panic("lockmgr: non-zero exclusive count");
412 		lkp->lk_exclusivecount = 1;
413 #if defined(DEBUG_LOCKS)
414 			lkp->lk_filename = file;
415 			lkp->lk_lineno = line;
416 			lkp->lk_lockername = name;
417 #endif
418 		break;
419 
420 	case LK_RELEASE:
421 		if (lkp->lk_exclusivecount != 0) {
422 			if (lkp->lk_lockholder != thr &&
423 			    lkp->lk_lockholder != LK_KERNPROC) {
424 				panic("lockmgr: thread %p, not %s %p unlocking",
425 				    thr, "exclusive lock holder",
426 				    lkp->lk_lockholder);
427 			}
428 			if (lkp->lk_exclusivecount == 1) {
429 				lkp->lk_flags &= ~LK_HAVE_EXCL;
430 				lkp->lk_lockholder = LK_NOPROC;
431 				lkp->lk_exclusivecount = 0;
432 			} else {
433 				lkp->lk_exclusivecount--;
434 			}
435 		} else if (lkp->lk_flags & LK_SHARE_NONZERO)
436 			shareunlock(lkp, 1);
437 		if (lkp->lk_flags & LK_WAIT_NONZERO)
438 			wakeup((void *)lkp);
439 		break;
440 
441 	case LK_DRAIN:
442 		/*
443 		 * Check that we do not already hold the lock, as it can
444 		 * never drain if we do. Unfortunately, we have no way to
445 		 * check for holding a shared lock, but at least we can
446 		 * check for an exclusive one.
447 		 */
448 		if (lkp->lk_lockholder == thr)
449 			panic("lockmgr: draining against myself");
450 
451 		error = acquiredrain(lkp, extflags);
452 		if (error)
453 			break;
454 		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
455 		lkp->lk_lockholder = thr;
456 		lkp->lk_exclusivecount = 1;
457 #if defined(DEBUG_LOCKS)
458 			lkp->lk_filename = file;
459 			lkp->lk_lineno = line;
460 			lkp->lk_lockername = name;
461 #endif
462 		break;
463 
464 	default:
465 		mtx_unlock(lkp->lk_interlock);
466 		panic("lockmgr: unknown locktype request %d",
467 		    flags & LK_TYPE_MASK);
468 		/* NOTREACHED */
469 	}
470 	if ((lkp->lk_flags & LK_WAITDRAIN) &&
471 	    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
472 		LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
473 		lkp->lk_flags &= ~LK_WAITDRAIN;
474 		wakeup((void *)&lkp->lk_flags);
475 	}
476 	mtx_unlock(lkp->lk_interlock);
477 	return (error);
478 }
479 
480 static int
481 acquiredrain(struct lock *lkp, int extflags) {
482 	int error;
483 
484 	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
485 		return EBUSY;
486 	}
487 
488 	error = apause(lkp, LK_ALL);
489 	if (error == 0)
490 		return 0;
491 
492 	while (lkp->lk_flags & LK_ALL) {
493 		lkp->lk_flags |= LK_WAITDRAIN;
494 		error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio,
495 			lkp->lk_wmesg,
496 			((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
497 		if (error)
498 			return error;
499 		if (extflags & LK_SLEEPFAIL) {
500 			return ENOLCK;
501 		}
502 	}
503 	return 0;
504 }
505 
506 /*
507  * Transfer any waiting processes from one lock to another.
508  */
509 void
510 transferlockers(from, to)
511 	struct lock *from;
512 	struct lock *to;
513 {
514 
515 	KASSERT(from != to, ("lock transfer to self"));
516 	KASSERT((from->lk_flags&LK_WAITDRAIN) == 0, ("transfer draining lock"));
517 	if (from->lk_waitcount == 0)
518 		return;
519 	from->lk_newlock = to;
520 	wakeup((void *)from);
521 	msleep(&from->lk_newlock, NULL, from->lk_prio, "lkxfer", 0);
522 	from->lk_newlock = NULL;
523 	from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE);
524 	KASSERT(from->lk_waitcount == 0, ("active lock"));
525 }
526 
527 
528 /*
529  * Initialize a lock; required before use.
530  */
531 void
532 lockinit(lkp, prio, wmesg, timo, flags)
533 	struct lock *lkp;
534 	int prio;
535 	const char *wmesg;
536 	int timo;
537 	int flags;
538 {
539 	CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
540 	    "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
541 
542 	if (lock_mtx_valid == 0) {
543 		mtx_init(&lock_mtx, "lockmgr", NULL, MTX_DEF);
544 		lock_mtx_valid = 1;
545 	}
546 	/*
547 	 * XXX cleanup - make sure mtxpool is always initialized before
548 	 * this is ever called.
549 	 */
550 	if (mtx_pool_valid) {
551 		mtx_lock(&lock_mtx);
552 		lkp->lk_interlock = mtx_pool_alloc();
553 		mtx_unlock(&lock_mtx);
554 	} else {
555 		lkp->lk_interlock = &lock_mtx;
556 	}
557 	lkp->lk_flags = (flags & LK_EXTFLG_MASK);
558 	lkp->lk_sharecount = 0;
559 	lkp->lk_waitcount = 0;
560 	lkp->lk_exclusivecount = 0;
561 	lkp->lk_prio = prio;
562 	lkp->lk_wmesg = wmesg;
563 	lkp->lk_timo = timo;
564 	lkp->lk_lockholder = LK_NOPROC;
565 	lkp->lk_newlock = NULL;
566 #ifdef DEBUG_LOCKS
567 	lkp->lk_filename = "none";
568 	lkp->lk_lockername = "never exclusive locked";
569 	lkp->lk_lineno = 0;
570 	lkp->lk_slockholder = LK_NOPROC;
571 	lkp->lk_sfilename = "none";
572 	lkp->lk_slockername = "never share locked";
573 	lkp->lk_slineno = 0;
574 #endif
575 }
576 
577 /*
578  * Destroy a lock.
579  */
580 void
581 lockdestroy(lkp)
582 	struct lock *lkp;
583 {
584 	CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
585 	    lkp, lkp->lk_wmesg);
586 }
587 
588 /*
589  * Determine the status of a lock.
590  */
591 int
592 lockstatus(lkp, td)
593 	struct lock *lkp;
594 	struct thread *td;
595 {
596 	int lock_type = 0;
597 
598 	mtx_lock(lkp->lk_interlock);
599 	if (lkp->lk_exclusivecount != 0) {
600 		if (td == NULL || lkp->lk_lockholder == td)
601 			lock_type = LK_EXCLUSIVE;
602 		else
603 			lock_type = LK_EXCLOTHER;
604 	} else if (lkp->lk_sharecount != 0)
605 		lock_type = LK_SHARED;
606 	mtx_unlock(lkp->lk_interlock);
607 	return (lock_type);
608 }
609 
610 /*
611  * Determine the number of holders of a lock.
612  */
613 int
614 lockcount(lkp)
615 	struct lock *lkp;
616 {
617 	int count;
618 
619 	mtx_lock(lkp->lk_interlock);
620 	count = lkp->lk_exclusivecount + lkp->lk_sharecount;
621 	mtx_unlock(lkp->lk_interlock);
622 	return (count);
623 }
624 
625 /*
626  * Print out information about state of a lock. Used by VOP_PRINT
627  * routines to display status about contained locks.
628  */
629 void
630 lockmgr_printinfo(lkp)
631 	struct lock *lkp;
632 {
633 
634 	if (lkp->lk_sharecount)
635 		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
636 		    lkp->lk_sharecount);
637 	else if (lkp->lk_flags & LK_HAVE_EXCL)
638 		printf(" lock type %s: EXCL (count %d) by thread %p",
639 		    lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
640 	if (lkp->lk_waitcount > 0)
641 		printf(" with %d pending", lkp->lk_waitcount);
642 }
643