1 /*- 2 * See the file LICENSE for redistribution information. 3 * 4 * Copyright (c) 1996, 1997, 1998 5 * Sleepycat Software. All rights reserved. 6 */ 7 8 #pragma ident "%Z%%M% %I% %E% SMI" 9 10 #include "config.h" 11 12 #ifndef lint 13 static const char sccsid[] = "@(#)lock_region.c 10.21 (Sleepycat) 10/19/98"; 14 #endif /* not lint */ 15 16 #ifndef NO_SYSTEM_INCLUDES 17 #include <sys/types.h> 18 19 #include <ctype.h> 20 #include <errno.h> 21 #include <string.h> 22 #endif 23 24 #include "db_int.h" 25 #include "shqueue.h" 26 #include "db_shash.h" 27 #include "lock.h" 28 #include "common_ext.h" 29 30 static u_int32_t __lock_count_locks __P((DB_LOCKREGION *)); 31 static u_int32_t __lock_count_objs __P((DB_LOCKREGION *)); 32 static void __lock_dump_locker __P((DB_LOCKTAB *, DB_LOCKOBJ *, FILE *)); 33 static void __lock_dump_object __P((DB_LOCKTAB *, DB_LOCKOBJ *, FILE *)); 34 static const char * 35 __lock_dump_status __P((db_status_t)); 36 static void __lock_reset_region __P((DB_LOCKTAB *)); 37 static int __lock_tabinit __P((DB_ENV *, DB_LOCKREGION *)); 38 39 int 40 lock_open(path, flags, mode, dbenv, ltp) 41 const char *path; 42 u_int32_t flags; 43 int mode; 44 DB_ENV *dbenv; 45 DB_LOCKTAB **ltp; 46 { 47 DB_LOCKTAB *lt; 48 u_int32_t lock_modes, maxlocks, regflags; 49 int ret; 50 51 /* Validate arguments. */ 52 #ifdef HAVE_SPINLOCKS 53 #define OKFLAGS (DB_CREATE | DB_THREAD) 54 #else 55 #define OKFLAGS (DB_CREATE) 56 #endif 57 if ((ret = __db_fchk(dbenv, "lock_open", flags, OKFLAGS)) != 0) 58 return (ret); 59 60 /* Create the lock table structure. */ 61 if ((ret = __os_calloc(1, sizeof(DB_LOCKTAB), <)) != 0) 62 return (ret); 63 lt->dbenv = dbenv; 64 65 /* Grab the values that we need to compute the region size. */ 66 lock_modes = DB_LOCK_RW_N; 67 maxlocks = DB_LOCK_DEFAULT_N; 68 regflags = REGION_SIZEDEF; 69 if (dbenv != NULL) { 70 if (dbenv->lk_modes != 0) { 71 lock_modes = dbenv->lk_modes; 72 regflags = 0; 73 } 74 if (dbenv->lk_max != 0) { 75 maxlocks = dbenv->lk_max; 76 regflags = 0; 77 } 78 } 79 80 /* Join/create the lock region. */ 81 lt->reginfo.dbenv = dbenv; 82 lt->reginfo.appname = DB_APP_NONE; 83 if (path == NULL) 84 lt->reginfo.path = NULL; 85 else 86 if ((ret = __os_strdup(path, <->reginfo.path)) != 0) 87 goto err; 88 lt->reginfo.file = DB_DEFAULT_LOCK_FILE; 89 lt->reginfo.mode = mode; 90 lt->reginfo.size = 91 LOCK_REGION_SIZE(lock_modes, maxlocks, __db_tablesize(maxlocks)); 92 lt->reginfo.dbflags = flags; 93 lt->reginfo.addr = NULL; 94 lt->reginfo.fd = -1; 95 lt->reginfo.flags = regflags; 96 97 if ((ret = __db_rattach(<->reginfo)) != 0) 98 goto err; 99 100 /* Now set up the pointer to the region. */ 101 lt->region = lt->reginfo.addr; 102 103 /* Initialize the region if we created it. */ 104 if (F_ISSET(<->reginfo, REGION_CREATED)) { 105 lt->region->maxlocks = maxlocks; 106 lt->region->nmodes = lock_modes; 107 if ((ret = __lock_tabinit(dbenv, lt->region)) != 0) 108 goto err; 109 } else { 110 /* Check for an unexpected region. */ 111 if (lt->region->magic != DB_LOCKMAGIC) { 112 __db_err(dbenv, 113 "lock_open: %s: bad magic number", path); 114 ret = EINVAL; 115 goto err; 116 } 117 } 118 119 /* Check for automatic deadlock detection. */ 120 if (dbenv != NULL && dbenv->lk_detect != DB_LOCK_NORUN) { 121 if (lt->region->detect != DB_LOCK_NORUN && 122 dbenv->lk_detect != DB_LOCK_DEFAULT && 123 lt->region->detect != dbenv->lk_detect) { 124 __db_err(dbenv, 125 "lock_open: incompatible deadlock detector mode"); 126 ret = EINVAL; 127 goto err; 128 } 129 if (lt->region->detect == DB_LOCK_NORUN) 130 lt->region->detect = dbenv->lk_detect; 131 } 132 133 /* Set up remaining pointers into region. */ 134 lt->conflicts = (u_int8_t *)lt->region + sizeof(DB_LOCKREGION); 135 lt->hashtab = 136 (DB_HASHTAB *)((u_int8_t *)lt->region + lt->region->hash_off); 137 lt->mem = (void *)((u_int8_t *)lt->region + lt->region->mem_off); 138 139 UNLOCK_LOCKREGION(lt); 140 *ltp = lt; 141 return (0); 142 143 err: if (lt->reginfo.addr != NULL) { 144 UNLOCK_LOCKREGION(lt); 145 (void)__db_rdetach(<->reginfo); 146 if (F_ISSET(<->reginfo, REGION_CREATED)) 147 (void)lock_unlink(path, 1, dbenv); 148 } 149 150 if (lt->reginfo.path != NULL) 151 __os_freestr(lt->reginfo.path); 152 __os_free(lt, sizeof(*lt)); 153 return (ret); 154 } 155 156 /* 157 * __lock_panic -- 158 * Panic a lock region. 159 * 160 * PUBLIC: void __lock_panic __P((DB_ENV *)); 161 */ 162 void 163 __lock_panic(dbenv) 164 DB_ENV *dbenv; 165 { 166 if (dbenv->lk_info != NULL) 167 dbenv->lk_info->region->hdr.panic = 1; 168 } 169 170 171 /* 172 * __lock_tabinit -- 173 * Initialize the lock region. 174 */ 175 static int 176 __lock_tabinit(dbenv, lrp) 177 DB_ENV *dbenv; 178 DB_LOCKREGION *lrp; 179 { 180 struct __db_lock *lp; 181 struct lock_header *tq_head; 182 struct obj_header *obj_head; 183 DB_LOCKOBJ *op; 184 u_int32_t i, nelements; 185 const u_int8_t *conflicts; 186 u_int8_t *curaddr; 187 188 conflicts = dbenv == NULL || dbenv->lk_conflicts == NULL ? 189 db_rw_conflicts : dbenv->lk_conflicts; 190 191 lrp->table_size = __db_tablesize(lrp->maxlocks); 192 lrp->magic = DB_LOCKMAGIC; 193 lrp->version = DB_LOCKVERSION; 194 lrp->id = 0; 195 /* 196 * These fields (lrp->maxlocks, lrp->nmodes) are initialized 197 * in the caller, since we had to grab those values to size 198 * the region. 199 */ 200 lrp->need_dd = 0; 201 lrp->detect = DB_LOCK_NORUN; 202 lrp->numobjs = lrp->maxlocks; 203 lrp->nlockers = 0; 204 lrp->mem_bytes = ALIGN(STRING_SIZE(lrp->maxlocks), sizeof(size_t)); 205 lrp->increment = lrp->hdr.size / 2; 206 lrp->nconflicts = 0; 207 lrp->nrequests = 0; 208 lrp->nreleases = 0; 209 lrp->ndeadlocks = 0; 210 211 /* 212 * As we write the region, we've got to maintain the alignment 213 * for the structures that follow each chunk. This information 214 * ends up being encapsulated both in here as well as in the 215 * lock.h file for the XXX_SIZE macros. 216 */ 217 /* Initialize conflict matrix. */ 218 curaddr = (u_int8_t *)lrp + sizeof(DB_LOCKREGION); 219 memcpy(curaddr, conflicts, lrp->nmodes * lrp->nmodes); 220 curaddr += lrp->nmodes * lrp->nmodes; 221 222 /* 223 * Initialize hash table. 224 */ 225 curaddr = (u_int8_t *)ALIGNP(curaddr, LOCK_HASH_ALIGN); 226 lrp->hash_off = curaddr - (u_int8_t *)lrp; 227 nelements = lrp->table_size; 228 __db_hashinit(curaddr, nelements); 229 curaddr += nelements * sizeof(DB_HASHTAB); 230 231 /* 232 * Initialize locks onto a free list. Since locks contains mutexes, 233 * we need to make sure that each lock is aligned on a MUTEX_ALIGNMENT 234 * boundary. 235 */ 236 curaddr = (u_int8_t *)ALIGNP(curaddr, MUTEX_ALIGNMENT); 237 tq_head = &lrp->free_locks; 238 SH_TAILQ_INIT(tq_head); 239 240 for (i = 0; i++ < lrp->maxlocks; 241 curaddr += ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)) { 242 lp = (struct __db_lock *)curaddr; 243 lp->status = DB_LSTAT_FREE; 244 SH_TAILQ_INSERT_HEAD(tq_head, lp, links, __db_lock); 245 } 246 247 /* Initialize objects onto a free list. */ 248 obj_head = &lrp->free_objs; 249 SH_TAILQ_INIT(obj_head); 250 251 for (i = 0; i++ < lrp->maxlocks; curaddr += sizeof(DB_LOCKOBJ)) { 252 op = (DB_LOCKOBJ *)curaddr; 253 SH_TAILQ_INSERT_HEAD(obj_head, op, links, __db_lockobj); 254 } 255 256 /* 257 * Initialize the string space; as for all shared memory allocation 258 * regions, this requires size_t alignment, since we store the 259 * lengths of malloc'd areas in the area. 260 */ 261 curaddr = (u_int8_t *)ALIGNP(curaddr, sizeof(size_t)); 262 lrp->mem_off = curaddr - (u_int8_t *)lrp; 263 __db_shalloc_init(curaddr, lrp->mem_bytes); 264 return (0); 265 } 266 267 int 268 lock_close(lt) 269 DB_LOCKTAB *lt; 270 { 271 int ret; 272 273 LOCK_PANIC_CHECK(lt); 274 275 if ((ret = __db_rdetach(<->reginfo)) != 0) 276 return (ret); 277 278 if (lt->reginfo.path != NULL) 279 __os_freestr(lt->reginfo.path); 280 __os_free(lt, sizeof(*lt)); 281 282 return (0); 283 } 284 285 int 286 lock_unlink(path, force, dbenv) 287 const char *path; 288 int force; 289 DB_ENV *dbenv; 290 { 291 REGINFO reginfo; 292 int ret; 293 294 memset(®info, 0, sizeof(reginfo)); 295 reginfo.dbenv = dbenv; 296 reginfo.appname = DB_APP_NONE; 297 if (path != NULL && (ret = __os_strdup(path, ®info.path)) != 0) 298 return (ret); 299 reginfo.file = DB_DEFAULT_LOCK_FILE; 300 ret = __db_runlink(®info, force); 301 if (reginfo.path != NULL) 302 __os_freestr(reginfo.path); 303 return (ret); 304 } 305 306 /* 307 * __lock_validate_region -- 308 * Called at every interface to verify if the region has changed size, 309 * and if so, to remap the region in and reset the process' pointers. 310 * 311 * PUBLIC: int __lock_validate_region __P((DB_LOCKTAB *)); 312 */ 313 int 314 __lock_validate_region(lt) 315 DB_LOCKTAB *lt; 316 { 317 int ret; 318 319 if (lt->reginfo.size == lt->region->hdr.size) 320 return (0); 321 322 /* Detach/reattach the region. */ 323 if ((ret = __db_rreattach(<->reginfo, lt->region->hdr.size)) != 0) 324 return (ret); 325 326 /* Reset region information. */ 327 lt->region = lt->reginfo.addr; 328 __lock_reset_region(lt); 329 330 return (0); 331 } 332 333 /* 334 * __lock_grow_region -- 335 * We have run out of space; time to grow the region. 336 * 337 * PUBLIC: int __lock_grow_region __P((DB_LOCKTAB *, int, size_t)); 338 */ 339 int 340 __lock_grow_region(lt, which, howmuch) 341 DB_LOCKTAB *lt; 342 int which; 343 size_t howmuch; 344 { 345 struct __db_lock *newl; 346 struct lock_header *lock_head; 347 struct obj_header *obj_head; 348 DB_LOCKOBJ *op; 349 DB_LOCKREGION *lrp; 350 float lock_ratio, obj_ratio; 351 size_t incr, oldsize, used, usedmem; 352 u_int32_t i, newlocks, newmem, newobjs, usedlocks, usedobjs; 353 u_int8_t *curaddr; 354 int ret; 355 356 lrp = lt->region; 357 oldsize = lrp->hdr.size; 358 incr = lrp->increment; 359 360 /* Figure out how much of each sort of space we have. */ 361 usedmem = lrp->mem_bytes - __db_shalloc_count(lt->mem); 362 usedobjs = lrp->numobjs - __lock_count_objs(lrp); 363 usedlocks = lrp->maxlocks - __lock_count_locks(lrp); 364 365 /* 366 * Figure out what fraction of the used space belongs to each 367 * different type of "thing" in the region. Then partition the 368 * new space up according to this ratio. 369 */ 370 used = usedmem + 371 usedlocks * ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT) + 372 usedobjs * sizeof(DB_LOCKOBJ); 373 374 lock_ratio = usedlocks * 375 ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT) / (float)used; 376 obj_ratio = usedobjs * sizeof(DB_LOCKOBJ) / (float)used; 377 378 newlocks = (u_int32_t)(lock_ratio * 379 incr / ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)); 380 newobjs = (u_int32_t)(obj_ratio * incr / sizeof(DB_LOCKOBJ)); 381 newmem = incr - 382 (newobjs * sizeof(DB_LOCKOBJ) + 383 newlocks * ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)); 384 385 /* 386 * Make sure we allocate enough memory for the object being 387 * requested. 388 */ 389 switch (which) { 390 case DB_LOCK_LOCK: 391 if (newlocks == 0) { 392 newlocks = 10; 393 incr += newlocks * sizeof(struct __db_lock); 394 } 395 break; 396 case DB_LOCK_OBJ: 397 if (newobjs == 0) { 398 newobjs = 10; 399 incr += newobjs * sizeof(DB_LOCKOBJ); 400 } 401 break; 402 case DB_LOCK_MEM: 403 if (newmem < howmuch * 2) { 404 incr += howmuch * 2 - newmem; 405 newmem = howmuch * 2; 406 } 407 break; 408 } 409 410 newmem += ALIGN(incr, sizeof(size_t)) - incr; 411 incr = ALIGN(incr, sizeof(size_t)); 412 413 /* 414 * Since we are going to be allocating locks at the beginning of the 415 * new chunk, we need to make sure that the chunk is MUTEX_ALIGNMENT 416 * aligned. We did not guarantee this when we created the region, so 417 * we may need to pad the old region by extra bytes to ensure this 418 * alignment. 419 */ 420 incr += ALIGN(oldsize, MUTEX_ALIGNMENT) - oldsize; 421 422 __db_err(lt->dbenv, 423 "Growing lock region: %lu locks %lu objs %lu bytes", 424 (u_long)newlocks, (u_long)newobjs, (u_long)newmem); 425 426 if ((ret = __db_rgrow(<->reginfo, oldsize + incr)) != 0) 427 return (ret); 428 lt->region = lt->reginfo.addr; 429 __lock_reset_region(lt); 430 431 /* Update region parameters. */ 432 lrp = lt->region; 433 lrp->increment = incr << 1; 434 lrp->maxlocks += newlocks; 435 lrp->numobjs += newobjs; 436 lrp->mem_bytes += newmem; 437 438 curaddr = (u_int8_t *)lrp + oldsize; 439 curaddr = (u_int8_t *)ALIGNP(curaddr, MUTEX_ALIGNMENT); 440 441 /* Put new locks onto the free list. */ 442 lock_head = &lrp->free_locks; 443 for (i = 0; i++ < newlocks; 444 curaddr += ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)) { 445 newl = (struct __db_lock *)curaddr; 446 SH_TAILQ_INSERT_HEAD(lock_head, newl, links, __db_lock); 447 } 448 449 /* Put new objects onto the free list. */ 450 obj_head = &lrp->free_objs; 451 for (i = 0; i++ < newobjs; curaddr += sizeof(DB_LOCKOBJ)) { 452 op = (DB_LOCKOBJ *)curaddr; 453 SH_TAILQ_INSERT_HEAD(obj_head, op, links, __db_lockobj); 454 } 455 456 *((size_t *)curaddr) = newmem - sizeof(size_t); 457 curaddr += sizeof(size_t); 458 __db_shalloc_free(lt->mem, curaddr); 459 460 return (0); 461 } 462 463 static void 464 __lock_reset_region(lt) 465 DB_LOCKTAB *lt; 466 { 467 lt->conflicts = (u_int8_t *)lt->region + sizeof(DB_LOCKREGION); 468 lt->hashtab = 469 (DB_HASHTAB *)((u_int8_t *)lt->region + lt->region->hash_off); 470 lt->mem = (void *)((u_int8_t *)lt->region + lt->region->mem_off); 471 } 472 473 /* 474 * lock_stat -- 475 * Return LOCK statistics. 476 */ 477 int 478 lock_stat(lt, gspp, db_malloc) 479 DB_LOCKTAB *lt; 480 DB_LOCK_STAT **gspp; 481 void *(*db_malloc) __P((size_t)); 482 { 483 DB_LOCKREGION *rp; 484 int ret; 485 486 *gspp = NULL; 487 488 LOCK_PANIC_CHECK(lt); 489 490 if ((ret = __os_malloc(sizeof(**gspp), db_malloc, gspp)) != 0) 491 return (ret); 492 493 /* Copy out the global statistics. */ 494 LOCK_LOCKREGION(lt); 495 496 rp = lt->region; 497 (*gspp)->st_magic = rp->magic; 498 (*gspp)->st_version = rp->version; 499 (*gspp)->st_maxlocks = rp->maxlocks; 500 (*gspp)->st_nmodes = rp->nmodes; 501 (*gspp)->st_numobjs = rp->numobjs; 502 (*gspp)->st_nlockers = rp->nlockers; 503 (*gspp)->st_nconflicts = rp->nconflicts; 504 (*gspp)->st_nrequests = rp->nrequests; 505 (*gspp)->st_nreleases = rp->nreleases; 506 (*gspp)->st_ndeadlocks = rp->ndeadlocks; 507 (*gspp)->st_region_nowait = rp->hdr.lock.mutex_set_nowait; 508 (*gspp)->st_region_wait = rp->hdr.lock.mutex_set_wait; 509 (*gspp)->st_refcnt = rp->hdr.refcnt; 510 (*gspp)->st_regsize = rp->hdr.size; 511 512 UNLOCK_LOCKREGION(lt); 513 514 return (0); 515 } 516 517 static u_int32_t 518 __lock_count_locks(lrp) 519 DB_LOCKREGION *lrp; 520 { 521 struct __db_lock *newl; 522 u_int32_t count; 523 524 count = 0; 525 for (newl = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock); 526 newl != NULL; 527 newl = SH_TAILQ_NEXT(newl, links, __db_lock)) 528 count++; 529 530 return (count); 531 } 532 533 static u_int32_t 534 __lock_count_objs(lrp) 535 DB_LOCKREGION *lrp; 536 { 537 DB_LOCKOBJ *obj; 538 u_int32_t count; 539 540 count = 0; 541 for (obj = SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj); 542 obj != NULL; 543 obj = SH_TAILQ_NEXT(obj, links, __db_lockobj)) 544 count++; 545 546 return (count); 547 } 548 549 #define LOCK_DUMP_CONF 0x001 /* Conflict matrix. */ 550 #define LOCK_DUMP_FREE 0x002 /* Display lock free list. */ 551 #define LOCK_DUMP_LOCKERS 0x004 /* Display lockers. */ 552 #define LOCK_DUMP_MEM 0x008 /* Display region memory. */ 553 #define LOCK_DUMP_OBJECTS 0x010 /* Display objects. */ 554 #define LOCK_DUMP_ALL 0x01f /* Display all. */ 555 556 /* 557 * __lock_dump_region -- 558 * 559 * PUBLIC: void __lock_dump_region __P((DB_LOCKTAB *, char *, FILE *)); 560 */ 561 void 562 __lock_dump_region(lt, area, fp) 563 DB_LOCKTAB *lt; 564 char *area; 565 FILE *fp; 566 { 567 struct __db_lock *lp; 568 DB_LOCKOBJ *op; 569 DB_LOCKREGION *lrp; 570 u_int32_t flags, i, j; 571 int label; 572 573 /* Make it easy to call from the debugger. */ 574 if (fp == NULL) 575 fp = stderr; 576 577 for (flags = 0; *area != '\0'; ++area) 578 switch (*area) { 579 case 'A': 580 LF_SET(LOCK_DUMP_ALL); 581 break; 582 case 'c': 583 LF_SET(LOCK_DUMP_CONF); 584 break; 585 case 'f': 586 LF_SET(LOCK_DUMP_FREE); 587 break; 588 case 'l': 589 LF_SET(LOCK_DUMP_LOCKERS); 590 break; 591 case 'm': 592 LF_SET(LOCK_DUMP_MEM); 593 break; 594 case 'o': 595 LF_SET(LOCK_DUMP_OBJECTS); 596 break; 597 } 598 599 lrp = lt->region; 600 601 fprintf(fp, "%s\nLock region parameters\n", DB_LINE); 602 fprintf(fp, "%s: %lu, %s: %lu, %s: %lu, %s: %lu\n%s: %lu, %s: %lu\n", 603 "table size", (u_long)lrp->table_size, 604 "hash_off", (u_long)lrp->hash_off, 605 "increment", (u_long)lrp->increment, 606 "mem_off", (u_long)lrp->mem_off, 607 "mem_bytes", (u_long)lrp->mem_bytes, 608 "need_dd", (u_long)lrp->need_dd); 609 610 if (LF_ISSET(LOCK_DUMP_CONF)) { 611 fprintf(fp, "\n%s\nConflict matrix\n", DB_LINE); 612 for (i = 0; i < lrp->nmodes; i++) { 613 for (j = 0; j < lrp->nmodes; j++) 614 fprintf(fp, "%lu\t", 615 (u_long)lt->conflicts[i * lrp->nmodes + j]); 616 fprintf(fp, "\n"); 617 } 618 } 619 620 if (LF_ISSET(LOCK_DUMP_LOCKERS | LOCK_DUMP_OBJECTS)) { 621 fprintf(fp, "%s\nLock hash buckets\n", DB_LINE); 622 for (i = 0; i < lrp->table_size; i++) { 623 label = 1; 624 for (op = SH_TAILQ_FIRST(<->hashtab[i], __db_lockobj); 625 op != NULL; 626 op = SH_TAILQ_NEXT(op, links, __db_lockobj)) { 627 if (LF_ISSET(LOCK_DUMP_LOCKERS) && 628 op->type == DB_LOCK_LOCKER) { 629 if (label) { 630 fprintf(fp, 631 "Bucket %lu:\n", (u_long)i); 632 label = 0; 633 } 634 __lock_dump_locker(lt, op, fp); 635 } 636 if (LF_ISSET(LOCK_DUMP_OBJECTS) && 637 op->type == DB_LOCK_OBJTYPE) { 638 if (label) { 639 fprintf(fp, 640 "Bucket %lu:\n", (u_long)i); 641 label = 0; 642 } 643 __lock_dump_object(lt, op, fp); 644 } 645 } 646 } 647 } 648 649 if (LF_ISSET(LOCK_DUMP_FREE)) { 650 fprintf(fp, "%s\nLock free list\n", DB_LINE); 651 for (lp = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock); 652 lp != NULL; 653 lp = SH_TAILQ_NEXT(lp, links, __db_lock)) 654 fprintf(fp, "0x%lx: %lu\t%lu\t%s\t0x%lx\n", (u_long)lp, 655 (u_long)lp->holder, (u_long)lp->mode, 656 __lock_dump_status(lp->status), (u_long)lp->obj); 657 658 fprintf(fp, "%s\nObject free list\n", DB_LINE); 659 for (op = SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj); 660 op != NULL; 661 op = SH_TAILQ_NEXT(op, links, __db_lockobj)) 662 fprintf(fp, "0x%lx\n", (u_long)op); 663 } 664 665 if (LF_ISSET(LOCK_DUMP_MEM)) 666 __db_shalloc_dump(lt->mem, fp); 667 } 668 669 static void 670 __lock_dump_locker(lt, op, fp) 671 DB_LOCKTAB *lt; 672 DB_LOCKOBJ *op; 673 FILE *fp; 674 { 675 struct __db_lock *lp; 676 u_int32_t locker; 677 void *ptr; 678 679 ptr = SH_DBT_PTR(&op->lockobj); 680 memcpy(&locker, ptr, sizeof(u_int32_t)); 681 fprintf(fp, "L %lx", (u_long)locker); 682 683 lp = SH_LIST_FIRST(&op->heldby, __db_lock); 684 if (lp == NULL) { 685 fprintf(fp, "\n"); 686 return; 687 } 688 for (; lp != NULL; lp = SH_LIST_NEXT(lp, locker_links, __db_lock)) 689 __lock_printlock(lt, lp, 0); 690 } 691 692 static void 693 __lock_dump_object(lt, op, fp) 694 DB_LOCKTAB *lt; 695 DB_LOCKOBJ *op; 696 FILE *fp; 697 { 698 struct __db_lock *lp; 699 u_int32_t j; 700 u_int8_t *ptr; 701 u_int ch; 702 703 ptr = SH_DBT_PTR(&op->lockobj); 704 for (j = 0; j < op->lockobj.size; ptr++, j++) { 705 ch = *ptr; 706 fprintf(fp, isprint(ch) ? "%c" : "\\%o", ch); 707 } 708 fprintf(fp, "\n"); 709 710 fprintf(fp, "H:"); 711 for (lp = 712 SH_TAILQ_FIRST(&op->holders, __db_lock); 713 lp != NULL; 714 lp = SH_TAILQ_NEXT(lp, links, __db_lock)) 715 __lock_printlock(lt, lp, 0); 716 lp = SH_TAILQ_FIRST(&op->waiters, __db_lock); 717 if (lp != NULL) { 718 fprintf(fp, "\nW:"); 719 for (; lp != NULL; lp = SH_TAILQ_NEXT(lp, links, __db_lock)) 720 __lock_printlock(lt, lp, 0); 721 } 722 } 723 724 static const char * 725 __lock_dump_status(status) 726 db_status_t status; 727 { 728 switch (status) { 729 case DB_LSTAT_ABORTED: 730 return ("aborted"); 731 case DB_LSTAT_ERR: 732 return ("err"); 733 case DB_LSTAT_FREE: 734 return ("free"); 735 case DB_LSTAT_HELD: 736 return ("held"); 737 case DB_LSTAT_NOGRANT: 738 return ("nogrant"); 739 case DB_LSTAT_PENDING: 740 return ("pending"); 741 case DB_LSTAT_WAITING: 742 return ("waiting"); 743 } 744 return ("unknown status"); 745 } 746