xref: /freebsd/sys/kern/kern_lock.c (revision ceaec73d406831b1251babb61675df0a1aa54a31)
1 /*-
2  * Copyright (c) 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Copyright (C) 1997
6  *	John S. Dyson.  All rights reserved.
7  *
8  * This code contains ideas from software contributed to Berkeley by
9  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
10  * System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include <sys/param.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/lock.h>
50 #include <sys/lockmgr.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/systm.h>
54 
55 /*
56  * Locking primitives implementation.
57  * Locks provide shared/exclusive sychronization.
58  */
59 
60 #define	COUNT(td, x)	if ((td)) (td)->td_locks += (x)
61 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
62 	LK_SHARE_NONZERO | LK_WAIT_NONZERO)
63 
64 static int acquire(struct lock **lkpp, int extflags, int wanted);
65 static int acquiredrain(struct lock *lkp, int extflags) ;
66 
67 static __inline void
68 sharelock(struct thread *td, struct lock *lkp, int incr) {
69 	lkp->lk_flags |= LK_SHARE_NONZERO;
70 	lkp->lk_sharecount += incr;
71 	COUNT(td, incr);
72 }
73 
74 static __inline void
75 shareunlock(struct thread *td, struct lock *lkp, int decr) {
76 
77 	KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr"));
78 
79 	COUNT(td, -decr);
80 	if (lkp->lk_sharecount == decr) {
81 		lkp->lk_flags &= ~LK_SHARE_NONZERO;
82 		if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) {
83 			wakeup(lkp);
84 		}
85 		lkp->lk_sharecount = 0;
86 	} else {
87 		lkp->lk_sharecount -= decr;
88 	}
89 }
90 
91 static int
92 acquire(struct lock **lkpp, int extflags, int wanted)
93 {
94 	struct lock *lkp = *lkpp;
95 	int error;
96 	CTR3(KTR_LOCK,
97 	    "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x",
98 	    lkp, extflags, wanted);
99 
100 	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted))
101 		return EBUSY;
102 	error = 0;
103 	while ((lkp->lk_flags & wanted) != 0) {
104 		CTR2(KTR_LOCK,
105 		    "acquire(): lkp == %p, lk_flags == 0x%x sleeping",
106 		    lkp, lkp->lk_flags);
107 		lkp->lk_flags |= LK_WAIT_NONZERO;
108 		lkp->lk_waitcount++;
109 		error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio,
110 		    lkp->lk_wmesg,
111 		    ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
112 		lkp->lk_waitcount--;
113 		if (lkp->lk_waitcount == 0)
114 			lkp->lk_flags &= ~LK_WAIT_NONZERO;
115 		if (error)
116 			break;
117 		if (extflags & LK_SLEEPFAIL) {
118 			error = ENOLCK;
119 			break;
120 		}
121 		if (lkp->lk_newlock != NULL) {
122 			mtx_lock(lkp->lk_newlock->lk_interlock);
123 			mtx_unlock(lkp->lk_interlock);
124 			if (lkp->lk_waitcount == 0)
125 				wakeup((void *)(&lkp->lk_newlock));
126 			*lkpp = lkp = lkp->lk_newlock;
127 		}
128 	}
129 	mtx_assert(lkp->lk_interlock, MA_OWNED);
130 	return (error);
131 }
132 
133 /*
134  * Set, change, or release a lock.
135  *
136  * Shared requests increment the shared count. Exclusive requests set the
137  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
138  * accepted shared locks and shared-to-exclusive upgrades to go away.
139  */
140 int
141 #ifndef	DEBUG_LOCKS
142 lockmgr(lkp, flags, interlkp, td)
143 #else
144 debuglockmgr(lkp, flags, interlkp, td, name, file, line)
145 #endif
146 	struct lock *lkp;
147 	u_int flags;
148 	struct mtx *interlkp;
149 	struct thread *td;
150 #ifdef	DEBUG_LOCKS
151 	const char *name;	/* Name of lock function */
152 	const char *file;	/* Name of file call is from */
153 	int line;		/* Line number in file */
154 #endif
155 {
156 	int error;
157 	struct thread *thr;
158 	int extflags, lockflags;
159 
160 	error = 0;
161 	if (td == NULL)
162 		thr = LK_KERNPROC;
163 	else
164 		thr = td;
165 
166 	if ((flags & LK_INTERNAL) == 0)
167 		mtx_lock(lkp->lk_interlock);
168 #ifdef DEBUG_LOCKS
169 	CTR6(KTR_LOCK,
170 	    "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), flags == 0x%x, "
171 	    "td == %p %s:%d", lkp, lkp->lk_wmesg, flags, td, file, line);
172 #else
173 	CTR6(KTR_LOCK,
174 	    "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, "
175 	    "td == %p", lkp, lkp->lk_wmesg, lkp->lk_lockholder,
176 	    lkp->lk_exclusivecount, flags, td);
177 #endif
178 
179 	if (flags & LK_INTERLOCK) {
180 		mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED);
181 		mtx_unlock(interlkp);
182 	}
183 
184 	if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0)
185 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
186 		    &lkp->lk_interlock->mtx_object,
187 		    "Acquiring lockmgr lock \"%s\"", lkp->lk_wmesg);
188 
189 	if (panicstr != NULL) {
190 		mtx_unlock(lkp->lk_interlock);
191 		return (0);
192 	}
193 	if ((lkp->lk_flags & LK_NOSHARE) &&
194 	    (flags & LK_TYPE_MASK) == LK_SHARED) {
195 		flags &= ~LK_TYPE_MASK;
196 		flags |= LK_EXCLUSIVE;
197 	}
198 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
199 
200 	switch (flags & LK_TYPE_MASK) {
201 
202 	case LK_SHARED:
203 		/*
204 		 * If we are not the exclusive lock holder, we have to block
205 		 * while there is an exclusive lock holder or while an
206 		 * exclusive lock request or upgrade request is in progress.
207 		 *
208 		 * However, if TDP_DEADLKTREAT is set, we override exclusive
209 		 * lock requests or upgrade requests ( but not the exclusive
210 		 * lock itself ).
211 		 */
212 		if (lkp->lk_lockholder != thr) {
213 			lockflags = LK_HAVE_EXCL;
214 			if (td != NULL && !(td->td_pflags & TDP_DEADLKTREAT))
215 				lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE;
216 			error = acquire(&lkp, extflags, lockflags);
217 			if (error)
218 				break;
219 			sharelock(td, lkp, 1);
220 #if defined(DEBUG_LOCKS)
221 			lkp->lk_slockholder = thr;
222 			lkp->lk_sfilename = file;
223 			lkp->lk_slineno = line;
224 			lkp->lk_slockername = name;
225 #endif
226 			break;
227 		}
228 		/*
229 		 * We hold an exclusive lock, so downgrade it to shared.
230 		 * An alternative would be to fail with EDEADLK.
231 		 */
232 		sharelock(td, lkp, 1);
233 		/* FALLTHROUGH downgrade */
234 
235 	case LK_DOWNGRADE:
236 		KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0,
237 			("lockmgr: not holding exclusive lock "
238 			"(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
239 			lkp->lk_lockholder, thr, lkp->lk_exclusivecount));
240 		sharelock(td, lkp, lkp->lk_exclusivecount);
241 		COUNT(td, -lkp->lk_exclusivecount);
242 		lkp->lk_exclusivecount = 0;
243 		lkp->lk_flags &= ~LK_HAVE_EXCL;
244 		lkp->lk_lockholder = LK_NOPROC;
245 		if (lkp->lk_waitcount)
246 			wakeup((void *)lkp);
247 		break;
248 
249 	case LK_EXCLUPGRADE:
250 		/*
251 		 * If another process is ahead of us to get an upgrade,
252 		 * then we want to fail rather than have an intervening
253 		 * exclusive access.
254 		 */
255 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
256 			shareunlock(td, lkp, 1);
257 			error = EBUSY;
258 			break;
259 		}
260 		/* FALLTHROUGH normal upgrade */
261 
262 	case LK_UPGRADE:
263 		/*
264 		 * Upgrade a shared lock to an exclusive one. If another
265 		 * shared lock has already requested an upgrade to an
266 		 * exclusive lock, our shared lock is released and an
267 		 * exclusive lock is requested (which will be granted
268 		 * after the upgrade). If we return an error, the file
269 		 * will always be unlocked.
270 		 */
271 		if (lkp->lk_lockholder == thr)
272 			panic("lockmgr: upgrade exclusive lock");
273 		if (lkp->lk_sharecount <= 0)
274 			panic("lockmgr: upgrade without shared");
275 		shareunlock(td, lkp, 1);
276 		/*
277 		 * If we are just polling, check to see if we will block.
278 		 */
279 		if ((extflags & LK_NOWAIT) &&
280 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
281 		     lkp->lk_sharecount > 1)) {
282 			error = EBUSY;
283 			break;
284 		}
285 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
286 			/*
287 			 * We are first shared lock to request an upgrade, so
288 			 * request upgrade and wait for the shared count to
289 			 * drop to zero, then take exclusive lock.
290 			 */
291 			lkp->lk_flags |= LK_WANT_UPGRADE;
292 			error = acquire(&lkp, extflags, LK_SHARE_NONZERO);
293 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
294 
295 			if (error) {
296 			         if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO))
297 			                   wakeup((void *)lkp);
298 			         break;
299 			}
300 			if (lkp->lk_exclusivecount != 0)
301 				panic("lockmgr: non-zero exclusive count");
302 			lkp->lk_flags |= LK_HAVE_EXCL;
303 			lkp->lk_lockholder = thr;
304 			lkp->lk_exclusivecount = 1;
305 			COUNT(td, 1);
306 #if defined(DEBUG_LOCKS)
307 			lkp->lk_filename = file;
308 			lkp->lk_lineno = line;
309 			lkp->lk_lockername = name;
310 #endif
311 			break;
312 		}
313 		/*
314 		 * Someone else has requested upgrade. Release our shared
315 		 * lock, awaken upgrade requestor if we are the last shared
316 		 * lock, then request an exclusive lock.
317 		 */
318 		if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
319 			LK_WAIT_NONZERO)
320 			wakeup((void *)lkp);
321 		/* FALLTHROUGH exclusive request */
322 
323 	case LK_EXCLUSIVE:
324 		if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) {
325 			/*
326 			 *	Recursive lock.
327 			 */
328 			if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0)
329 				panic("lockmgr: locking against myself");
330 			if ((extflags & LK_CANRECURSE) != 0) {
331 				lkp->lk_exclusivecount++;
332 				COUNT(td, 1);
333 				break;
334 			}
335 		}
336 		/*
337 		 * If we are just polling, check to see if we will sleep.
338 		 */
339 		if ((extflags & LK_NOWAIT) &&
340 		    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
341 			error = EBUSY;
342 			break;
343 		}
344 		/*
345 		 * Try to acquire the want_exclusive flag.
346 		 */
347 		error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
348 		if (error)
349 			break;
350 		lkp->lk_flags |= LK_WANT_EXCL;
351 		/*
352 		 * Wait for shared locks and upgrades to finish.
353 		 */
354 		error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO);
355 		lkp->lk_flags &= ~LK_WANT_EXCL;
356 		if (error) {
357 			if (lkp->lk_flags & LK_WAIT_NONZERO)
358 			         wakeup((void *)lkp);
359 			break;
360 		}
361 		lkp->lk_flags |= LK_HAVE_EXCL;
362 		lkp->lk_lockholder = thr;
363 		if (lkp->lk_exclusivecount != 0)
364 			panic("lockmgr: non-zero exclusive count");
365 		lkp->lk_exclusivecount = 1;
366 		COUNT(td, 1);
367 #if defined(DEBUG_LOCKS)
368 			lkp->lk_filename = file;
369 			lkp->lk_lineno = line;
370 			lkp->lk_lockername = name;
371 #endif
372 		break;
373 
374 	case LK_RELEASE:
375 		if (lkp->lk_exclusivecount != 0) {
376 			if (lkp->lk_lockholder != thr &&
377 			    lkp->lk_lockholder != LK_KERNPROC) {
378 				panic("lockmgr: thread %p, not %s %p unlocking",
379 				    thr, "exclusive lock holder",
380 				    lkp->lk_lockholder);
381 			}
382 			if (lkp->lk_lockholder != LK_KERNPROC)
383 				COUNT(td, -1);
384 			if (lkp->lk_exclusivecount == 1) {
385 				lkp->lk_flags &= ~LK_HAVE_EXCL;
386 				lkp->lk_lockholder = LK_NOPROC;
387 				lkp->lk_exclusivecount = 0;
388 			} else {
389 				lkp->lk_exclusivecount--;
390 			}
391 		} else if (lkp->lk_flags & LK_SHARE_NONZERO)
392 			shareunlock(td, lkp, 1);
393 		if (lkp->lk_flags & LK_WAIT_NONZERO)
394 			wakeup((void *)lkp);
395 		break;
396 
397 	case LK_DRAIN:
398 		/*
399 		 * Check that we do not already hold the lock, as it can
400 		 * never drain if we do. Unfortunately, we have no way to
401 		 * check for holding a shared lock, but at least we can
402 		 * check for an exclusive one.
403 		 */
404 		if (lkp->lk_lockholder == thr)
405 			panic("lockmgr: draining against myself");
406 
407 		error = acquiredrain(lkp, extflags);
408 		if (error)
409 			break;
410 		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
411 		lkp->lk_lockholder = thr;
412 		lkp->lk_exclusivecount = 1;
413 		COUNT(td, 1);
414 #if defined(DEBUG_LOCKS)
415 			lkp->lk_filename = file;
416 			lkp->lk_lineno = line;
417 			lkp->lk_lockername = name;
418 #endif
419 		break;
420 
421 	default:
422 		mtx_unlock(lkp->lk_interlock);
423 		panic("lockmgr: unknown locktype request %d",
424 		    flags & LK_TYPE_MASK);
425 		/* NOTREACHED */
426 	}
427 	if ((lkp->lk_flags & LK_WAITDRAIN) &&
428 	    (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
429 		LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
430 		lkp->lk_flags &= ~LK_WAITDRAIN;
431 		wakeup((void *)&lkp->lk_flags);
432 	}
433 	mtx_unlock(lkp->lk_interlock);
434 	return (error);
435 }
436 
437 static int
438 acquiredrain(struct lock *lkp, int extflags) {
439 	int error;
440 
441 	if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
442 		return EBUSY;
443 	}
444 	while (lkp->lk_flags & LK_ALL) {
445 		lkp->lk_flags |= LK_WAITDRAIN;
446 		error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio,
447 			lkp->lk_wmesg,
448 			((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
449 		if (error)
450 			return error;
451 		if (extflags & LK_SLEEPFAIL) {
452 			return ENOLCK;
453 		}
454 	}
455 	return 0;
456 }
457 
458 /*
459  * Transfer any waiting processes from one lock to another.
460  */
461 void
462 transferlockers(from, to)
463 	struct lock *from;
464 	struct lock *to;
465 {
466 
467 	KASSERT(from != to, ("lock transfer to self"));
468 	KASSERT((from->lk_flags&LK_WAITDRAIN) == 0, ("transfer draining lock"));
469 
470 	mtx_lock(from->lk_interlock);
471 	if (from->lk_waitcount == 0) {
472 		mtx_unlock(from->lk_interlock);
473 		return;
474 	}
475 	from->lk_newlock = to;
476 	wakeup((void *)from);
477 	msleep(&from->lk_newlock, from->lk_interlock, from->lk_prio,
478 	    "lkxfer", 0);
479 	from->lk_newlock = NULL;
480 	from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE);
481 	KASSERT(from->lk_waitcount == 0, ("active lock"));
482 	mtx_unlock(from->lk_interlock);
483 }
484 
485 
486 /*
487  * Initialize a lock; required before use.
488  */
489 void
490 lockinit(lkp, prio, wmesg, timo, flags)
491 	struct lock *lkp;
492 	int prio;
493 	const char *wmesg;
494 	int timo;
495 	int flags;
496 {
497 	CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", "
498 	    "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags);
499 
500 	lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder);
501 	lkp->lk_flags = (flags & LK_EXTFLG_MASK);
502 	lkp->lk_sharecount = 0;
503 	lkp->lk_waitcount = 0;
504 	lkp->lk_exclusivecount = 0;
505 	lkp->lk_prio = prio;
506 	lkp->lk_wmesg = wmesg;
507 	lkp->lk_timo = timo;
508 	lkp->lk_lockholder = LK_NOPROC;
509 	lkp->lk_newlock = NULL;
510 #ifdef DEBUG_LOCKS
511 	lkp->lk_filename = "none";
512 	lkp->lk_lockername = "never exclusive locked";
513 	lkp->lk_lineno = 0;
514 	lkp->lk_slockholder = LK_NOPROC;
515 	lkp->lk_sfilename = "none";
516 	lkp->lk_slockername = "never share locked";
517 	lkp->lk_slineno = 0;
518 #endif
519 }
520 
521 /*
522  * Destroy a lock.
523  */
524 void
525 lockdestroy(lkp)
526 	struct lock *lkp;
527 {
528 	CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")",
529 	    lkp, lkp->lk_wmesg);
530 }
531 
532 /*
533  * Determine the status of a lock.
534  */
535 int
536 lockstatus(lkp, td)
537 	struct lock *lkp;
538 	struct thread *td;
539 {
540 	int lock_type = 0;
541 
542 	mtx_lock(lkp->lk_interlock);
543 	if (lkp->lk_exclusivecount != 0) {
544 		if (td == NULL || lkp->lk_lockholder == td)
545 			lock_type = LK_EXCLUSIVE;
546 		else
547 			lock_type = LK_EXCLOTHER;
548 	} else if (lkp->lk_sharecount != 0)
549 		lock_type = LK_SHARED;
550 	mtx_unlock(lkp->lk_interlock);
551 	return (lock_type);
552 }
553 
554 /*
555  * Determine the number of holders of a lock.
556  */
557 int
558 lockcount(lkp)
559 	struct lock *lkp;
560 {
561 	int count;
562 
563 	mtx_lock(lkp->lk_interlock);
564 	count = lkp->lk_exclusivecount + lkp->lk_sharecount;
565 	mtx_unlock(lkp->lk_interlock);
566 	return (count);
567 }
568 
569 /*
570  * Print out information about state of a lock. Used by VOP_PRINT
571  * routines to display status about contained locks.
572  */
573 void
574 lockmgr_printinfo(lkp)
575 	struct lock *lkp;
576 {
577 
578 	if (lkp->lk_sharecount)
579 		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
580 		    lkp->lk_sharecount);
581 	else if (lkp->lk_flags & LK_HAVE_EXCL)
582 		printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)",
583 		    lkp->lk_wmesg, lkp->lk_exclusivecount,
584 		    lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid);
585 	if (lkp->lk_waitcount > 0)
586 		printf(" with %d pending", lkp->lk_waitcount);
587 }
588