xref: /illumos-gate/usr/src/cmd/sendmail/db/lock/lock_region.c (revision 9164a50bf932130cbb5097a16f6986873ce0e6e5)
1 /*-
2  * See the file LICENSE for redistribution information.
3  *
4  * Copyright (c) 1996, 1997, 1998
5  *	Sleepycat Software.  All rights reserved.
6  */
7 
8 #include "config.h"
9 
10 #ifndef lint
11 static const char sccsid[] = "@(#)lock_region.c	10.21 (Sleepycat) 10/19/98";
12 #endif /* not lint */
13 
14 #ifndef NO_SYSTEM_INCLUDES
15 #include <sys/types.h>
16 
17 #include <ctype.h>
18 #include <errno.h>
19 #include <string.h>
20 #endif
21 
22 #include "db_int.h"
23 #include "shqueue.h"
24 #include "db_shash.h"
25 #include "lock.h"
26 #include "common_ext.h"
27 
28 static u_int32_t __lock_count_locks __P((DB_LOCKREGION *));
29 static u_int32_t __lock_count_objs __P((DB_LOCKREGION *));
30 static void	 __lock_dump_locker __P((DB_LOCKTAB *, DB_LOCKOBJ *, FILE *));
31 static void	 __lock_dump_object __P((DB_LOCKTAB *, DB_LOCKOBJ *, FILE *));
32 static const char *
33 		 __lock_dump_status __P((db_status_t));
34 static void	 __lock_reset_region __P((DB_LOCKTAB *));
35 static int	 __lock_tabinit __P((DB_ENV *, DB_LOCKREGION *));
36 
37 int
38 lock_open(path, flags, mode, dbenv, ltp)
39 	const char *path;
40 	u_int32_t flags;
41 	int mode;
42 	DB_ENV *dbenv;
43 	DB_LOCKTAB **ltp;
44 {
45 	DB_LOCKTAB *lt;
46 	u_int32_t lock_modes, maxlocks, regflags;
47 	int ret;
48 
49 	/* Validate arguments. */
50 #ifdef HAVE_SPINLOCKS
51 #define	OKFLAGS	(DB_CREATE | DB_THREAD)
52 #else
53 #define	OKFLAGS	(DB_CREATE)
54 #endif
55 	if ((ret = __db_fchk(dbenv, "lock_open", flags, OKFLAGS)) != 0)
56 		return (ret);
57 
58 	/* Create the lock table structure. */
59 	if ((ret = __os_calloc(1, sizeof(DB_LOCKTAB), &lt)) != 0)
60 		return (ret);
61 	lt->dbenv = dbenv;
62 
63 	/* Grab the values that we need to compute the region size. */
64 	lock_modes = DB_LOCK_RW_N;
65 	maxlocks = DB_LOCK_DEFAULT_N;
66 	regflags = REGION_SIZEDEF;
67 	if (dbenv != NULL) {
68 		if (dbenv->lk_modes != 0) {
69 			lock_modes = dbenv->lk_modes;
70 			regflags = 0;
71 		}
72 		if (dbenv->lk_max != 0) {
73 			maxlocks = dbenv->lk_max;
74 			regflags = 0;
75 		}
76 	}
77 
78 	/* Join/create the lock region. */
79 	lt->reginfo.dbenv = dbenv;
80 	lt->reginfo.appname = DB_APP_NONE;
81 	if (path == NULL)
82 		lt->reginfo.path = NULL;
83 	else
84 		if ((ret = __os_strdup(path, &lt->reginfo.path)) != 0)
85 			goto err;
86 	lt->reginfo.file = DB_DEFAULT_LOCK_FILE;
87 	lt->reginfo.mode = mode;
88 	lt->reginfo.size =
89 	    LOCK_REGION_SIZE(lock_modes, maxlocks, __db_tablesize(maxlocks));
90 	lt->reginfo.dbflags = flags;
91 	lt->reginfo.addr = NULL;
92 	lt->reginfo.fd = -1;
93 	lt->reginfo.flags = regflags;
94 
95 	if ((ret = __db_rattach(&lt->reginfo)) != 0)
96 		goto err;
97 
98 	/* Now set up the pointer to the region. */
99 	lt->region = lt->reginfo.addr;
100 
101 	/* Initialize the region if we created it. */
102 	if (F_ISSET(&lt->reginfo, REGION_CREATED)) {
103 		lt->region->maxlocks = maxlocks;
104 		lt->region->nmodes = lock_modes;
105 		if ((ret = __lock_tabinit(dbenv, lt->region)) != 0)
106 			goto err;
107 	} else {
108 		/* Check for an unexpected region. */
109 		if (lt->region->magic != DB_LOCKMAGIC) {
110 			__db_err(dbenv,
111 			    "lock_open: %s: bad magic number", path);
112 			ret = EINVAL;
113 			goto err;
114 		}
115 	}
116 
117 	/* Check for automatic deadlock detection. */
118 	if (dbenv != NULL && dbenv->lk_detect != DB_LOCK_NORUN) {
119 		if (lt->region->detect != DB_LOCK_NORUN &&
120 		    dbenv->lk_detect != DB_LOCK_DEFAULT &&
121 		    lt->region->detect != dbenv->lk_detect) {
122 			__db_err(dbenv,
123 		    "lock_open: incompatible deadlock detector mode");
124 			ret = EINVAL;
125 			goto err;
126 		}
127 		if (lt->region->detect == DB_LOCK_NORUN)
128 			lt->region->detect = dbenv->lk_detect;
129 	}
130 
131 	/* Set up remaining pointers into region. */
132 	lt->conflicts = (u_int8_t *)lt->region + sizeof(DB_LOCKREGION);
133 	lt->hashtab =
134 	    (DB_HASHTAB *)((u_int8_t *)lt->region + lt->region->hash_off);
135 	lt->mem = (void *)((u_int8_t *)lt->region + lt->region->mem_off);
136 
137 	UNLOCK_LOCKREGION(lt);
138 	*ltp = lt;
139 	return (0);
140 
141 err:	if (lt->reginfo.addr != NULL) {
142 		UNLOCK_LOCKREGION(lt);
143 		(void)__db_rdetach(&lt->reginfo);
144 		if (F_ISSET(&lt->reginfo, REGION_CREATED))
145 			(void)lock_unlink(path, 1, dbenv);
146 	}
147 
148 	if (lt->reginfo.path != NULL)
149 		__os_freestr(lt->reginfo.path);
150 	__os_free(lt, sizeof(*lt));
151 	return (ret);
152 }
153 
154 /*
155  * __lock_panic --
156  *	Panic a lock region.
157  *
158  * PUBLIC: void __lock_panic __P((DB_ENV *));
159  */
160 void
161 __lock_panic(dbenv)
162 	DB_ENV *dbenv;
163 {
164 	if (dbenv->lk_info != NULL)
165 		dbenv->lk_info->region->hdr.panic = 1;
166 }
167 
168 
169 /*
170  * __lock_tabinit --
171  *	Initialize the lock region.
172  */
173 static int
174 __lock_tabinit(dbenv, lrp)
175 	DB_ENV *dbenv;
176 	DB_LOCKREGION *lrp;
177 {
178 	struct __db_lock *lp;
179 	struct lock_header *tq_head;
180 	struct obj_header *obj_head;
181 	DB_LOCKOBJ *op;
182 	u_int32_t i, nelements;
183 	const u_int8_t *conflicts;
184 	u_int8_t *curaddr;
185 
186 	conflicts = dbenv == NULL || dbenv->lk_conflicts == NULL ?
187 	    db_rw_conflicts : dbenv->lk_conflicts;
188 
189 	lrp->table_size = __db_tablesize(lrp->maxlocks);
190 	lrp->magic = DB_LOCKMAGIC;
191 	lrp->version = DB_LOCKVERSION;
192 	lrp->id = 0;
193 	/*
194 	 * These fields (lrp->maxlocks, lrp->nmodes) are initialized
195 	 * in the caller, since we had to grab those values to size
196 	 * the region.
197 	 */
198 	lrp->need_dd = 0;
199 	lrp->detect = DB_LOCK_NORUN;
200 	lrp->numobjs = lrp->maxlocks;
201 	lrp->nlockers = 0;
202 	lrp->mem_bytes = ALIGN(STRING_SIZE(lrp->maxlocks), sizeof(size_t));
203 	lrp->increment = lrp->hdr.size / 2;
204 	lrp->nconflicts = 0;
205 	lrp->nrequests = 0;
206 	lrp->nreleases = 0;
207 	lrp->ndeadlocks = 0;
208 
209 	/*
210 	 * As we write the region, we've got to maintain the alignment
211 	 * for the structures that follow each chunk.  This information
212 	 * ends up being encapsulated both in here as well as in the
213 	 * lock.h file for the XXX_SIZE macros.
214 	 */
215 	/* Initialize conflict matrix. */
216 	curaddr = (u_int8_t *)lrp + sizeof(DB_LOCKREGION);
217 	memcpy(curaddr, conflicts, lrp->nmodes * lrp->nmodes);
218 	curaddr += lrp->nmodes * lrp->nmodes;
219 
220 	/*
221 	 * Initialize hash table.
222 	 */
223 	curaddr = (u_int8_t *)ALIGNP(curaddr, LOCK_HASH_ALIGN);
224 	lrp->hash_off = curaddr - (u_int8_t *)lrp;
225 	nelements = lrp->table_size;
226 	__db_hashinit(curaddr, nelements);
227 	curaddr += nelements * sizeof(DB_HASHTAB);
228 
229 	/*
230 	 * Initialize locks onto a free list. Since locks contains mutexes,
231 	 * we need to make sure that each lock is aligned on a MUTEX_ALIGNMENT
232 	 * boundary.
233 	 */
234 	curaddr = (u_int8_t *)ALIGNP(curaddr, MUTEX_ALIGNMENT);
235 	tq_head = &lrp->free_locks;
236 	SH_TAILQ_INIT(tq_head);
237 
238 	for (i = 0; i++ < lrp->maxlocks;
239 	    curaddr += ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)) {
240 		lp = (struct __db_lock *)curaddr;
241 		lp->status = DB_LSTAT_FREE;
242 		SH_TAILQ_INSERT_HEAD(tq_head, lp, links, __db_lock);
243 	}
244 
245 	/* Initialize objects onto a free list.  */
246 	obj_head = &lrp->free_objs;
247 	SH_TAILQ_INIT(obj_head);
248 
249 	for (i = 0; i++ < lrp->maxlocks; curaddr += sizeof(DB_LOCKOBJ)) {
250 		op = (DB_LOCKOBJ *)curaddr;
251 		SH_TAILQ_INSERT_HEAD(obj_head, op, links, __db_lockobj);
252 	}
253 
254 	/*
255 	 * Initialize the string space; as for all shared memory allocation
256 	 * regions, this requires size_t alignment, since we store the
257 	 * lengths of malloc'd areas in the area.
258 	 */
259 	curaddr = (u_int8_t *)ALIGNP(curaddr, sizeof(size_t));
260 	lrp->mem_off = curaddr - (u_int8_t *)lrp;
261 	__db_shalloc_init(curaddr, lrp->mem_bytes);
262 	return (0);
263 }
264 
265 int
266 lock_close(lt)
267 	DB_LOCKTAB *lt;
268 {
269 	int ret;
270 
271 	LOCK_PANIC_CHECK(lt);
272 
273 	if ((ret = __db_rdetach(&lt->reginfo)) != 0)
274 		return (ret);
275 
276 	if (lt->reginfo.path != NULL)
277 		__os_freestr(lt->reginfo.path);
278 	__os_free(lt, sizeof(*lt));
279 
280 	return (0);
281 }
282 
283 int
284 lock_unlink(path, force, dbenv)
285 	const char *path;
286 	int force;
287 	DB_ENV *dbenv;
288 {
289 	REGINFO reginfo;
290 	int ret;
291 
292 	memset(&reginfo, 0, sizeof(reginfo));
293 	reginfo.dbenv = dbenv;
294 	reginfo.appname = DB_APP_NONE;
295 	if (path != NULL && (ret = __os_strdup(path, &reginfo.path)) != 0)
296 		return (ret);
297 	reginfo.file = DB_DEFAULT_LOCK_FILE;
298 	ret = __db_runlink(&reginfo, force);
299 	if (reginfo.path != NULL)
300 		__os_freestr(reginfo.path);
301 	return (ret);
302 }
303 
304 /*
305  * __lock_validate_region --
306  *	Called at every interface to verify if the region has changed size,
307  *	and if so, to remap the region in and reset the process' pointers.
308  *
309  * PUBLIC: int __lock_validate_region __P((DB_LOCKTAB *));
310  */
311 int
312 __lock_validate_region(lt)
313 	DB_LOCKTAB *lt;
314 {
315 	int ret;
316 
317 	if (lt->reginfo.size == lt->region->hdr.size)
318 		return (0);
319 
320 	/* Detach/reattach the region. */
321 	if ((ret = __db_rreattach(&lt->reginfo, lt->region->hdr.size)) != 0)
322 		return (ret);
323 
324 	/* Reset region information. */
325 	lt->region = lt->reginfo.addr;
326 	__lock_reset_region(lt);
327 
328 	return (0);
329 }
330 
331 /*
332  * __lock_grow_region --
333  *	We have run out of space; time to grow the region.
334  *
335  * PUBLIC: int __lock_grow_region __P((DB_LOCKTAB *, int, size_t));
336  */
337 int
338 __lock_grow_region(lt, which, howmuch)
339 	DB_LOCKTAB *lt;
340 	int which;
341 	size_t howmuch;
342 {
343 	struct __db_lock *newl;
344 	struct lock_header *lock_head;
345 	struct obj_header *obj_head;
346 	DB_LOCKOBJ *op;
347 	DB_LOCKREGION *lrp;
348 	float lock_ratio, obj_ratio;
349 	size_t incr, oldsize, used, usedmem;
350 	u_int32_t i, newlocks, newmem, newobjs, usedlocks, usedobjs;
351 	u_int8_t *curaddr;
352 	int ret;
353 
354 	lrp = lt->region;
355 	oldsize = lrp->hdr.size;
356 	incr = lrp->increment;
357 
358 	/* Figure out how much of each sort of space we have. */
359 	usedmem = lrp->mem_bytes - __db_shalloc_count(lt->mem);
360 	usedobjs = lrp->numobjs - __lock_count_objs(lrp);
361 	usedlocks = lrp->maxlocks - __lock_count_locks(lrp);
362 
363 	/*
364 	 * Figure out what fraction of the used space belongs to each
365 	 * different type of "thing" in the region.  Then partition the
366 	 * new space up according to this ratio.
367 	 */
368 	used = usedmem +
369 	    usedlocks * ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT) +
370 	    usedobjs * sizeof(DB_LOCKOBJ);
371 
372 	lock_ratio = usedlocks *
373 	    ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT) / (float)used;
374 	obj_ratio = usedobjs * sizeof(DB_LOCKOBJ) / (float)used;
375 
376 	newlocks = (u_int32_t)(lock_ratio *
377 	    incr / ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT));
378 	newobjs = (u_int32_t)(obj_ratio * incr / sizeof(DB_LOCKOBJ));
379 	newmem = incr -
380 	    (newobjs * sizeof(DB_LOCKOBJ) +
381 	    newlocks * ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT));
382 
383 	/*
384 	 * Make sure we allocate enough memory for the object being
385 	 * requested.
386 	 */
387 	switch (which) {
388 	case DB_LOCK_LOCK:
389 		if (newlocks == 0) {
390 			newlocks = 10;
391 			incr += newlocks * sizeof(struct __db_lock);
392 		}
393 		break;
394 	case DB_LOCK_OBJ:
395 		if (newobjs == 0) {
396 			newobjs = 10;
397 			incr += newobjs * sizeof(DB_LOCKOBJ);
398 		}
399 		break;
400 	case DB_LOCK_MEM:
401 		if (newmem < howmuch * 2) {
402 			incr += howmuch * 2 - newmem;
403 			newmem = howmuch * 2;
404 		}
405 		break;
406 	}
407 
408 	newmem += ALIGN(incr, sizeof(size_t)) - incr;
409 	incr = ALIGN(incr, sizeof(size_t));
410 
411 	/*
412 	 * Since we are going to be allocating locks at the beginning of the
413 	 * new chunk, we need to make sure that the chunk is MUTEX_ALIGNMENT
414 	 * aligned.  We did not guarantee this when we created the region, so
415 	 * we may need to pad the old region by extra bytes to ensure this
416 	 * alignment.
417 	 */
418 	incr += ALIGN(oldsize, MUTEX_ALIGNMENT) - oldsize;
419 
420 	__db_err(lt->dbenv,
421 	    "Growing lock region: %lu locks %lu objs %lu bytes",
422 	    (u_long)newlocks, (u_long)newobjs, (u_long)newmem);
423 
424 	if ((ret = __db_rgrow(&lt->reginfo, oldsize + incr)) != 0)
425 		return (ret);
426 	lt->region = lt->reginfo.addr;
427 	__lock_reset_region(lt);
428 
429 	/* Update region parameters. */
430 	lrp = lt->region;
431 	lrp->increment = incr << 1;
432 	lrp->maxlocks += newlocks;
433 	lrp->numobjs += newobjs;
434 	lrp->mem_bytes += newmem;
435 
436 	curaddr = (u_int8_t *)lrp + oldsize;
437 	curaddr = (u_int8_t *)ALIGNP(curaddr, MUTEX_ALIGNMENT);
438 
439 	/* Put new locks onto the free list. */
440 	lock_head = &lrp->free_locks;
441 	for (i = 0; i++ < newlocks;
442 	    curaddr += ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)) {
443 		newl = (struct __db_lock *)curaddr;
444 		SH_TAILQ_INSERT_HEAD(lock_head, newl, links, __db_lock);
445 	}
446 
447 	/* Put new objects onto the free list.  */
448 	obj_head = &lrp->free_objs;
449 	for (i = 0; i++ < newobjs; curaddr += sizeof(DB_LOCKOBJ)) {
450 		op = (DB_LOCKOBJ *)curaddr;
451 		SH_TAILQ_INSERT_HEAD(obj_head, op, links, __db_lockobj);
452 	}
453 
454 	*((size_t *)curaddr) = newmem - sizeof(size_t);
455 	curaddr += sizeof(size_t);
456 	__db_shalloc_free(lt->mem, curaddr);
457 
458 	return (0);
459 }
460 
461 static void
462 __lock_reset_region(lt)
463 	DB_LOCKTAB *lt;
464 {
465 	lt->conflicts = (u_int8_t *)lt->region + sizeof(DB_LOCKREGION);
466 	lt->hashtab =
467 	    (DB_HASHTAB *)((u_int8_t *)lt->region + lt->region->hash_off);
468 	lt->mem = (void *)((u_int8_t *)lt->region + lt->region->mem_off);
469 }
470 
471 /*
472  * lock_stat --
473  *	Return LOCK statistics.
474  */
475 int
476 lock_stat(lt, gspp, db_malloc)
477 	DB_LOCKTAB *lt;
478 	DB_LOCK_STAT **gspp;
479 	void *(*db_malloc) __P((size_t));
480 {
481 	DB_LOCKREGION *rp;
482 	int ret;
483 
484 	*gspp = NULL;
485 
486 	LOCK_PANIC_CHECK(lt);
487 
488 	if ((ret = __os_malloc(sizeof(**gspp), db_malloc, gspp)) != 0)
489 		return (ret);
490 
491 	/* Copy out the global statistics. */
492 	LOCK_LOCKREGION(lt);
493 
494 	rp = lt->region;
495 	(*gspp)->st_magic = rp->magic;
496 	(*gspp)->st_version = rp->version;
497 	(*gspp)->st_maxlocks = rp->maxlocks;
498 	(*gspp)->st_nmodes = rp->nmodes;
499 	(*gspp)->st_numobjs = rp->numobjs;
500 	(*gspp)->st_nlockers = rp->nlockers;
501 	(*gspp)->st_nconflicts = rp->nconflicts;
502 	(*gspp)->st_nrequests = rp->nrequests;
503 	(*gspp)->st_nreleases = rp->nreleases;
504 	(*gspp)->st_ndeadlocks = rp->ndeadlocks;
505 	(*gspp)->st_region_nowait = rp->hdr.lock.mutex_set_nowait;
506 	(*gspp)->st_region_wait = rp->hdr.lock.mutex_set_wait;
507 	(*gspp)->st_refcnt = rp->hdr.refcnt;
508 	(*gspp)->st_regsize = rp->hdr.size;
509 
510 	UNLOCK_LOCKREGION(lt);
511 
512 	return (0);
513 }
514 
515 static u_int32_t
516 __lock_count_locks(lrp)
517 	DB_LOCKREGION *lrp;
518 {
519 	struct __db_lock *newl;
520 	u_int32_t count;
521 
522 	count = 0;
523 	for (newl = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock);
524 	    newl != NULL;
525 	    newl = SH_TAILQ_NEXT(newl, links, __db_lock))
526 		count++;
527 
528 	return (count);
529 }
530 
531 static u_int32_t
532 __lock_count_objs(lrp)
533 	DB_LOCKREGION *lrp;
534 {
535 	DB_LOCKOBJ *obj;
536 	u_int32_t count;
537 
538 	count = 0;
539 	for (obj = SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj);
540 	    obj != NULL;
541 	    obj = SH_TAILQ_NEXT(obj, links, __db_lockobj))
542 		count++;
543 
544 	return (count);
545 }
546 
547 #define	LOCK_DUMP_CONF		0x001		/* Conflict matrix. */
548 #define	LOCK_DUMP_FREE		0x002		/* Display lock free list. */
549 #define	LOCK_DUMP_LOCKERS	0x004		/* Display lockers. */
550 #define	LOCK_DUMP_MEM		0x008		/* Display region memory. */
551 #define	LOCK_DUMP_OBJECTS	0x010		/* Display objects. */
552 #define	LOCK_DUMP_ALL		0x01f		/* Display all. */
553 
554 /*
555  * __lock_dump_region --
556  *
557  * PUBLIC: void __lock_dump_region __P((DB_LOCKTAB *, char *, FILE *));
558  */
559 void
560 __lock_dump_region(lt, area, fp)
561 	DB_LOCKTAB *lt;
562 	char *area;
563 	FILE *fp;
564 {
565 	struct __db_lock *lp;
566 	DB_LOCKOBJ *op;
567 	DB_LOCKREGION *lrp;
568 	u_int32_t flags, i, j;
569 	int label;
570 
571 	/* Make it easy to call from the debugger. */
572 	if (fp == NULL)
573 		fp = stderr;
574 
575 	for (flags = 0; *area != '\0'; ++area)
576 		switch (*area) {
577 		case 'A':
578 			LF_SET(LOCK_DUMP_ALL);
579 			break;
580 		case 'c':
581 			LF_SET(LOCK_DUMP_CONF);
582 			break;
583 		case 'f':
584 			LF_SET(LOCK_DUMP_FREE);
585 			break;
586 		case 'l':
587 			LF_SET(LOCK_DUMP_LOCKERS);
588 			break;
589 		case 'm':
590 			LF_SET(LOCK_DUMP_MEM);
591 			break;
592 		case 'o':
593 			LF_SET(LOCK_DUMP_OBJECTS);
594 			break;
595 		}
596 
597 	lrp = lt->region;
598 
599 	fprintf(fp, "%s\nLock region parameters\n", DB_LINE);
600 	fprintf(fp, "%s: %lu, %s: %lu, %s: %lu, %s: %lu\n%s: %lu, %s: %lu\n",
601 	    "table size", (u_long)lrp->table_size,
602 	    "hash_off", (u_long)lrp->hash_off,
603 	    "increment", (u_long)lrp->increment,
604 	    "mem_off", (u_long)lrp->mem_off,
605 	    "mem_bytes", (u_long)lrp->mem_bytes,
606 	    "need_dd", (u_long)lrp->need_dd);
607 
608 	if (LF_ISSET(LOCK_DUMP_CONF)) {
609 		fprintf(fp, "\n%s\nConflict matrix\n", DB_LINE);
610 		for (i = 0; i < lrp->nmodes; i++) {
611 			for (j = 0; j < lrp->nmodes; j++)
612 				fprintf(fp, "%lu\t",
613 				    (u_long)lt->conflicts[i * lrp->nmodes + j]);
614 			fprintf(fp, "\n");
615 		}
616 	}
617 
618 	if (LF_ISSET(LOCK_DUMP_LOCKERS | LOCK_DUMP_OBJECTS)) {
619 		fprintf(fp, "%s\nLock hash buckets\n", DB_LINE);
620 		for (i = 0; i < lrp->table_size; i++) {
621 			label = 1;
622 			for (op = SH_TAILQ_FIRST(&lt->hashtab[i], __db_lockobj);
623 			    op != NULL;
624 			    op = SH_TAILQ_NEXT(op, links, __db_lockobj)) {
625 				if (LF_ISSET(LOCK_DUMP_LOCKERS) &&
626 				    op->type == DB_LOCK_LOCKER) {
627 					if (label) {
628 						fprintf(fp,
629 						    "Bucket %lu:\n", (u_long)i);
630 						label = 0;
631 					}
632 					__lock_dump_locker(lt, op, fp);
633 				}
634 				if (LF_ISSET(LOCK_DUMP_OBJECTS) &&
635 				    op->type == DB_LOCK_OBJTYPE) {
636 					if (label) {
637 						fprintf(fp,
638 						    "Bucket %lu:\n", (u_long)i);
639 						label = 0;
640 					}
641 					__lock_dump_object(lt, op, fp);
642 				}
643 			}
644 		}
645 	}
646 
647 	if (LF_ISSET(LOCK_DUMP_FREE)) {
648 		fprintf(fp, "%s\nLock free list\n", DB_LINE);
649 		for (lp = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock);
650 		    lp != NULL;
651 		    lp = SH_TAILQ_NEXT(lp, links, __db_lock))
652 			fprintf(fp, "0x%lx: %lu\t%lu\t%s\t0x%lx\n", (u_long)lp,
653 			    (u_long)lp->holder, (u_long)lp->mode,
654 			    __lock_dump_status(lp->status), (u_long)lp->obj);
655 
656 		fprintf(fp, "%s\nObject free list\n", DB_LINE);
657 		for (op = SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj);
658 		    op != NULL;
659 		    op = SH_TAILQ_NEXT(op, links, __db_lockobj))
660 			fprintf(fp, "0x%lx\n", (u_long)op);
661 	}
662 
663 	if (LF_ISSET(LOCK_DUMP_MEM))
664 		__db_shalloc_dump(lt->mem, fp);
665 }
666 
667 static void
668 __lock_dump_locker(lt, op, fp)
669 	DB_LOCKTAB *lt;
670 	DB_LOCKOBJ *op;
671 	FILE *fp;
672 {
673 	struct __db_lock *lp;
674 	u_int32_t locker;
675 	void *ptr;
676 
677 	ptr = SH_DBT_PTR(&op->lockobj);
678 	memcpy(&locker, ptr, sizeof(u_int32_t));
679 	fprintf(fp, "L %lx", (u_long)locker);
680 
681 	lp = SH_LIST_FIRST(&op->heldby, __db_lock);
682 	if (lp == NULL) {
683 		fprintf(fp, "\n");
684 		return;
685 	}
686 	for (; lp != NULL; lp = SH_LIST_NEXT(lp, locker_links, __db_lock))
687 		__lock_printlock(lt, lp, 0);
688 }
689 
690 static void
691 __lock_dump_object(lt, op, fp)
692 	DB_LOCKTAB *lt;
693 	DB_LOCKOBJ *op;
694 	FILE *fp;
695 {
696 	struct __db_lock *lp;
697 	u_int32_t j;
698 	u_int8_t *ptr;
699 	u_int ch;
700 
701 	ptr = SH_DBT_PTR(&op->lockobj);
702 	for (j = 0; j < op->lockobj.size; ptr++, j++) {
703 		ch = *ptr;
704 		fprintf(fp, isprint(ch) ? "%c" : "\\%o", ch);
705 	}
706 	fprintf(fp, "\n");
707 
708 	fprintf(fp, "H:");
709 	for (lp =
710 	    SH_TAILQ_FIRST(&op->holders, __db_lock);
711 	    lp != NULL;
712 	    lp = SH_TAILQ_NEXT(lp, links, __db_lock))
713 		__lock_printlock(lt, lp, 0);
714 	lp = SH_TAILQ_FIRST(&op->waiters, __db_lock);
715 	if (lp != NULL) {
716 		fprintf(fp, "\nW:");
717 		for (; lp != NULL; lp = SH_TAILQ_NEXT(lp, links, __db_lock))
718 			__lock_printlock(lt, lp, 0);
719 	}
720 }
721 
722 static const char *
723 __lock_dump_status(status)
724 	db_status_t status;
725 {
726 	switch (status) {
727 	case DB_LSTAT_ABORTED:
728 		return ("aborted");
729 	case DB_LSTAT_ERR:
730 		return ("err");
731 	case DB_LSTAT_FREE:
732 		return ("free");
733 	case DB_LSTAT_HELD:
734 		return ("held");
735 	case DB_LSTAT_NOGRANT:
736 		return ("nogrant");
737 	case DB_LSTAT_PENDING:
738 		return ("pending");
739 	case DB_LSTAT_WAITING:
740 		return ("waiting");
741 	}
742 	return ("unknown status");
743 }
744