1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* 29 * This module holds the global variables and functions used to maintain 30 * lock_object structures. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_ddb.h" 37 #include "opt_mprof.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/ktr.h> 43 #include <sys/limits.h> 44 #include <sys/lock.h> 45 #include <sys/lock_profile.h> 46 #include <sys/malloc.h> 47 #include <sys/mutex.h> 48 #include <sys/pcpu.h> 49 #include <sys/proc.h> 50 #include <sys/sbuf.h> 51 #include <sys/sched.h> 52 #include <sys/smp.h> 53 #include <sys/sysctl.h> 54 55 #ifdef DDB 56 #include <ddb/ddb.h> 57 #endif 58 59 #include <machine/cpufunc.h> 60 61 SDT_PROVIDER_DEFINE(lock); 62 SDT_PROBE_DEFINE1(lock, , , starvation, "u_int"); 63 64 CTASSERT(LOCK_CLASS_MAX == 15); 65 66 struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = { 67 &lock_class_mtx_spin, 68 &lock_class_mtx_sleep, 69 &lock_class_sx, 70 &lock_class_rm, 71 &lock_class_rm_sleepable, 72 &lock_class_rw, 73 &lock_class_lockmgr, 74 }; 75 76 void 77 lock_init(struct lock_object *lock, struct lock_class *class, const char *name, 78 const char *type, int flags) 79 { 80 int i; 81 82 /* Check for double-init and zero object. */ 83 KASSERT(flags & LO_NEW || !lock_initialized(lock), 84 ("lock \"%s\" %p already initialized", name, lock)); 85 86 /* Look up lock class to find its index. */ 87 for (i = 0; i < LOCK_CLASS_MAX; i++) 88 if (lock_classes[i] == class) { 89 lock->lo_flags = i << LO_CLASSSHIFT; 90 break; 91 } 92 KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class)); 93 94 /* Initialize the lock object. */ 95 lock->lo_name = name; 96 lock->lo_flags |= flags | LO_INITIALIZED; 97 LOCK_LOG_INIT(lock, 0); 98 WITNESS_INIT(lock, (type != NULL) ? type : name); 99 } 100 101 void 102 lock_destroy(struct lock_object *lock) 103 { 104 105 KASSERT(lock_initialized(lock), ("lock %p is not initialized", lock)); 106 WITNESS_DESTROY(lock); 107 LOCK_LOG_DESTROY(lock, 0); 108 lock->lo_flags &= ~LO_INITIALIZED; 109 } 110 111 static SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 112 "lock debugging"); 113 static SYSCTL_NODE(_debug_lock, OID_AUTO, delay, 114 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 115 "lock delay"); 116 117 static u_int __read_mostly starvation_limit = 131072; 118 SYSCTL_INT(_debug_lock_delay, OID_AUTO, starvation_limit, CTLFLAG_RW, 119 &starvation_limit, 0, ""); 120 121 static u_int __read_mostly restrict_starvation = 0; 122 SYSCTL_INT(_debug_lock_delay, OID_AUTO, restrict_starvation, CTLFLAG_RW, 123 &restrict_starvation, 0, ""); 124 125 void 126 lock_delay(struct lock_delay_arg *la) 127 { 128 struct lock_delay_config *lc = la->config; 129 u_short i; 130 131 for (i = la->delay; i > 0; i--) 132 cpu_spinwait(); 133 la->spin_cnt += la->delay; 134 135 la->delay <<= 1; 136 if (__predict_false(la->delay > lc->max)) 137 la->delay = lc->max; 138 139 if (__predict_false(la->spin_cnt > starvation_limit)) { 140 SDT_PROBE1(lock, , , starvation, la->delay); 141 if (restrict_starvation) 142 la->delay = lc->base; 143 } 144 } 145 146 static u_int 147 lock_roundup_2(u_int val) 148 { 149 u_int res; 150 151 for (res = 1; res <= val; res <<= 1) 152 continue; 153 154 return (res); 155 } 156 157 void 158 lock_delay_default_init(struct lock_delay_config *lc) 159 { 160 161 lc->base = 1; 162 lc->max = lock_roundup_2(mp_ncpus) * 256; 163 if (lc->max > 32678) 164 lc->max = 32678; 165 } 166 167 struct lock_delay_config __read_frequently locks_delay; 168 u_short __read_frequently locks_delay_retries; 169 u_short __read_frequently locks_delay_loops; 170 171 SYSCTL_U16(_debug_lock, OID_AUTO, delay_base, CTLFLAG_RW, &locks_delay.base, 172 0, ""); 173 SYSCTL_U16(_debug_lock, OID_AUTO, delay_max, CTLFLAG_RW, &locks_delay.max, 174 0, ""); 175 SYSCTL_U16(_debug_lock, OID_AUTO, delay_retries, CTLFLAG_RW, &locks_delay_retries, 176 0, ""); 177 SYSCTL_U16(_debug_lock, OID_AUTO, delay_loops, CTLFLAG_RW, &locks_delay_loops, 178 0, ""); 179 180 static void 181 locks_delay_init(void *arg __unused) 182 { 183 184 lock_delay_default_init(&locks_delay); 185 locks_delay_retries = 10; 186 locks_delay_loops = max(10000, locks_delay.max); 187 } 188 LOCK_DELAY_SYSINIT(locks_delay_init); 189 190 #ifdef DDB 191 DB_SHOW_COMMAND(lock, db_show_lock) 192 { 193 struct lock_object *lock; 194 struct lock_class *class; 195 196 if (!have_addr) 197 return; 198 lock = (struct lock_object *)addr; 199 if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) { 200 db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock)); 201 return; 202 } 203 class = LOCK_CLASS(lock); 204 db_printf(" class: %s\n", class->lc_name); 205 db_printf(" name: %s\n", lock->lo_name); 206 class->lc_ddb_show(lock); 207 } 208 #endif 209 210 #ifdef LOCK_PROFILING 211 212 /* 213 * One object per-thread for each lock the thread owns. Tracks individual 214 * lock instances. 215 */ 216 struct lock_profile_object { 217 LIST_ENTRY(lock_profile_object) lpo_link; 218 struct lock_object *lpo_obj; 219 const char *lpo_file; 220 int lpo_line; 221 uint16_t lpo_ref; 222 uint16_t lpo_cnt; 223 uint64_t lpo_acqtime; 224 uint64_t lpo_waittime; 225 u_int lpo_contest_locking; 226 }; 227 228 /* 229 * One lock_prof for each (file, line, lock object) triple. 230 */ 231 struct lock_prof { 232 SLIST_ENTRY(lock_prof) link; 233 struct lock_class *class; 234 const char *file; 235 const char *name; 236 int line; 237 int ticks; 238 uintmax_t cnt_wait_max; 239 uintmax_t cnt_max; 240 uintmax_t cnt_tot; 241 uintmax_t cnt_wait; 242 uintmax_t cnt_cur; 243 uintmax_t cnt_contest_locking; 244 }; 245 246 SLIST_HEAD(lphead, lock_prof); 247 248 #define LPROF_HASH_SIZE 4096 249 #define LPROF_HASH_MASK (LPROF_HASH_SIZE - 1) 250 #define LPROF_CACHE_SIZE 4096 251 252 /* 253 * Array of objects and profs for each type of object for each cpu. Spinlocks 254 * are handled separately because a thread may be preempted and acquire a 255 * spinlock while in the lock profiling code of a non-spinlock. In this way 256 * we only need a critical section to protect the per-cpu lists. 257 */ 258 struct lock_prof_type { 259 struct lphead lpt_lpalloc; 260 struct lpohead lpt_lpoalloc; 261 struct lphead lpt_hash[LPROF_HASH_SIZE]; 262 struct lock_prof lpt_prof[LPROF_CACHE_SIZE]; 263 struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE]; 264 }; 265 266 struct lock_prof_cpu { 267 struct lock_prof_type lpc_types[2]; /* One for spin one for other. */ 268 }; 269 270 DPCPU_DEFINE_STATIC(struct lock_prof_cpu, lp); 271 #define LP_CPU_SELF (DPCPU_PTR(lp)) 272 #define LP_CPU(cpu) (DPCPU_ID_PTR((cpu), lp)) 273 274 volatile int __read_mostly lock_prof_enable; 275 static volatile int lock_prof_resetting; 276 277 #define LPROF_SBUF_SIZE 256 278 279 static int lock_prof_rejected; 280 static int lock_prof_skipspin; 281 static int lock_prof_skipcount; 282 283 #ifndef USE_CPU_NANOSECONDS 284 uint64_t 285 nanoseconds(void) 286 { 287 struct bintime bt; 288 uint64_t ns; 289 290 binuptime(&bt); 291 /* From bintime2timespec */ 292 ns = bt.sec * (uint64_t)1000000000; 293 ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32; 294 return (ns); 295 } 296 #endif 297 298 static void 299 lock_prof_init_type(struct lock_prof_type *type) 300 { 301 int i; 302 303 SLIST_INIT(&type->lpt_lpalloc); 304 LIST_INIT(&type->lpt_lpoalloc); 305 for (i = 0; i < LPROF_CACHE_SIZE; i++) { 306 SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i], 307 link); 308 LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i], 309 lpo_link); 310 } 311 } 312 313 static void 314 lock_prof_init(void *arg) 315 { 316 int cpu; 317 318 CPU_FOREACH(cpu) { 319 lock_prof_init_type(&LP_CPU(cpu)->lpc_types[0]); 320 lock_prof_init_type(&LP_CPU(cpu)->lpc_types[1]); 321 } 322 } 323 SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL); 324 325 static void 326 lock_prof_reset_wait(void) 327 { 328 329 /* 330 * Spin relinquishing our cpu so that quiesce_all_cpus may 331 * complete. 332 */ 333 while (lock_prof_resetting) 334 sched_relinquish(curthread); 335 } 336 337 static void 338 lock_prof_reset(void) 339 { 340 struct lock_prof_cpu *lpc; 341 int enabled, i, cpu; 342 343 /* 344 * We not only race with acquiring and releasing locks but also 345 * thread exit. To be certain that threads exit without valid head 346 * pointers they must see resetting set before enabled is cleared. 347 * Otherwise a lock may not be removed from a per-thread list due 348 * to disabled being set but not wait for reset() to remove it below. 349 */ 350 atomic_store_rel_int(&lock_prof_resetting, 1); 351 enabled = lock_prof_enable; 352 lock_prof_enable = 0; 353 /* 354 * This both publishes lock_prof_enable as disabled and makes sure 355 * everyone else reads it if they are not far enough. We wait for the 356 * rest down below. 357 */ 358 cpus_fence_seq_cst(); 359 quiesce_all_critical(); 360 /* 361 * Some objects may have migrated between CPUs. Clear all links 362 * before we zero the structures. Some items may still be linked 363 * into per-thread lists as well. 364 */ 365 CPU_FOREACH(cpu) { 366 lpc = LP_CPU(cpu); 367 for (i = 0; i < LPROF_CACHE_SIZE; i++) { 368 LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link); 369 LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link); 370 } 371 } 372 CPU_FOREACH(cpu) { 373 lpc = LP_CPU(cpu); 374 bzero(lpc, sizeof(*lpc)); 375 lock_prof_init_type(&lpc->lpc_types[0]); 376 lock_prof_init_type(&lpc->lpc_types[1]); 377 } 378 /* 379 * Paired with the fence from cpus_fence_seq_cst() 380 */ 381 atomic_store_rel_int(&lock_prof_resetting, 0); 382 lock_prof_enable = enabled; 383 } 384 385 static void 386 lock_prof_output(struct lock_prof *lp, struct sbuf *sb) 387 { 388 const char *p; 389 390 for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3); 391 sbuf_printf(sb, 392 "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n", 393 lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000, 394 lp->cnt_wait / 1000, lp->cnt_cur, 395 lp->cnt_cur == 0 ? (uintmax_t)0 : 396 lp->cnt_tot / (lp->cnt_cur * 1000), 397 lp->cnt_cur == 0 ? (uintmax_t)0 : 398 lp->cnt_wait / (lp->cnt_cur * 1000), 399 (uintmax_t)0, lp->cnt_contest_locking, 400 p, lp->line, lp->class->lc_name, lp->name); 401 } 402 403 static void 404 lock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash, 405 int spin, int t) 406 { 407 struct lock_prof_type *type; 408 struct lock_prof *l; 409 int cpu; 410 411 dst->file = match->file; 412 dst->line = match->line; 413 dst->class = match->class; 414 dst->name = match->name; 415 416 CPU_FOREACH(cpu) { 417 type = &LP_CPU(cpu)->lpc_types[spin]; 418 SLIST_FOREACH(l, &type->lpt_hash[hash], link) { 419 if (l->ticks == t) 420 continue; 421 if (l->file != match->file || l->line != match->line || 422 l->name != match->name) 423 continue; 424 l->ticks = t; 425 if (l->cnt_max > dst->cnt_max) 426 dst->cnt_max = l->cnt_max; 427 if (l->cnt_wait_max > dst->cnt_wait_max) 428 dst->cnt_wait_max = l->cnt_wait_max; 429 dst->cnt_tot += l->cnt_tot; 430 dst->cnt_wait += l->cnt_wait; 431 dst->cnt_cur += l->cnt_cur; 432 dst->cnt_contest_locking += l->cnt_contest_locking; 433 } 434 } 435 } 436 437 static void 438 lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin, 439 int t) 440 { 441 struct lock_prof *l; 442 int i; 443 444 for (i = 0; i < LPROF_HASH_SIZE; ++i) { 445 SLIST_FOREACH(l, &type->lpt_hash[i], link) { 446 struct lock_prof lp = {}; 447 448 if (l->ticks == t) 449 continue; 450 lock_prof_sum(l, &lp, i, spin, t); 451 lock_prof_output(&lp, sb); 452 } 453 } 454 } 455 456 static int 457 dump_lock_prof_stats(SYSCTL_HANDLER_ARGS) 458 { 459 struct sbuf *sb; 460 int error, cpu, t; 461 int enabled; 462 463 error = sysctl_wire_old_buffer(req, 0); 464 if (error != 0) 465 return (error); 466 sb = sbuf_new_for_sysctl(NULL, NULL, LPROF_SBUF_SIZE, req); 467 sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n", 468 "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name"); 469 enabled = lock_prof_enable; 470 lock_prof_enable = 0; 471 /* 472 * See the comment in lock_prof_reset 473 */ 474 cpus_fence_seq_cst(); 475 quiesce_all_critical(); 476 t = ticks; 477 CPU_FOREACH(cpu) { 478 lock_prof_type_stats(&LP_CPU(cpu)->lpc_types[0], sb, 0, t); 479 lock_prof_type_stats(&LP_CPU(cpu)->lpc_types[1], sb, 1, t); 480 } 481 atomic_thread_fence_rel(); 482 lock_prof_enable = enabled; 483 484 error = sbuf_finish(sb); 485 /* Output a trailing NUL. */ 486 if (error == 0) 487 error = SYSCTL_OUT(req, "", 1); 488 sbuf_delete(sb); 489 return (error); 490 } 491 492 static int 493 enable_lock_prof(SYSCTL_HANDLER_ARGS) 494 { 495 int error, v; 496 497 v = lock_prof_enable; 498 error = sysctl_handle_int(oidp, &v, v, req); 499 if (error) 500 return (error); 501 if (req->newptr == NULL) 502 return (error); 503 if (v == lock_prof_enable) 504 return (0); 505 if (v == 1) 506 lock_prof_reset(); 507 lock_prof_enable = !!v; 508 509 return (0); 510 } 511 512 static int 513 reset_lock_prof_stats(SYSCTL_HANDLER_ARGS) 514 { 515 int error, v; 516 517 v = 0; 518 error = sysctl_handle_int(oidp, &v, 0, req); 519 if (error) 520 return (error); 521 if (req->newptr == NULL) 522 return (error); 523 if (v == 0) 524 return (0); 525 lock_prof_reset(); 526 527 return (0); 528 } 529 530 static struct lock_prof * 531 lock_profile_lookup(struct lock_object *lo, int spin, const char *file, 532 int line) 533 { 534 const char *unknown = "(unknown)"; 535 struct lock_prof_type *type; 536 struct lock_prof *lp; 537 struct lphead *head; 538 const char *p; 539 u_int hash; 540 541 p = file; 542 if (p == NULL || *p == '\0') 543 p = unknown; 544 hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line; 545 hash &= LPROF_HASH_MASK; 546 type = &LP_CPU_SELF->lpc_types[spin]; 547 head = &type->lpt_hash[hash]; 548 SLIST_FOREACH(lp, head, link) { 549 if (lp->line == line && lp->file == p && 550 lp->name == lo->lo_name) 551 return (lp); 552 } 553 lp = SLIST_FIRST(&type->lpt_lpalloc); 554 if (lp == NULL) { 555 lock_prof_rejected++; 556 return (lp); 557 } 558 SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link); 559 lp->file = p; 560 lp->line = line; 561 lp->class = LOCK_CLASS(lo); 562 lp->name = lo->lo_name; 563 SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link); 564 return (lp); 565 } 566 567 static struct lock_profile_object * 568 lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file, 569 int line) 570 { 571 struct lock_profile_object *l; 572 struct lock_prof_type *type; 573 struct lpohead *head; 574 575 head = &curthread->td_lprof[spin]; 576 LIST_FOREACH(l, head, lpo_link) 577 if (l->lpo_obj == lo && l->lpo_file == file && 578 l->lpo_line == line) 579 return (l); 580 type = &LP_CPU_SELF->lpc_types[spin]; 581 l = LIST_FIRST(&type->lpt_lpoalloc); 582 if (l == NULL) { 583 lock_prof_rejected++; 584 return (NULL); 585 } 586 LIST_REMOVE(l, lpo_link); 587 l->lpo_obj = lo; 588 l->lpo_file = file; 589 l->lpo_line = line; 590 l->lpo_cnt = 0; 591 LIST_INSERT_HEAD(head, l, lpo_link); 592 593 return (l); 594 } 595 596 void 597 lock_profile_obtain_lock_success(struct lock_object *lo, int contested, 598 uint64_t waittime, const char *file, int line) 599 { 600 static int lock_prof_count; 601 struct lock_profile_object *l; 602 int spin; 603 604 if (SCHEDULER_STOPPED()) 605 return; 606 607 /* don't reset the timer when/if recursing */ 608 if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE)) 609 return; 610 if (lock_prof_skipcount && 611 (++lock_prof_count % lock_prof_skipcount) != 0) 612 return; 613 spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; 614 if (spin && lock_prof_skipspin == 1) 615 return; 616 critical_enter(); 617 /* Recheck enabled now that we're in a critical section. */ 618 if (lock_prof_enable == 0) 619 goto out; 620 l = lock_profile_object_lookup(lo, spin, file, line); 621 if (l == NULL) 622 goto out; 623 l->lpo_cnt++; 624 if (++l->lpo_ref > 1) 625 goto out; 626 l->lpo_contest_locking = contested; 627 l->lpo_acqtime = nanoseconds(); 628 if (waittime && (l->lpo_acqtime > waittime)) 629 l->lpo_waittime = l->lpo_acqtime - waittime; 630 else 631 l->lpo_waittime = 0; 632 out: 633 /* 634 * Paired with cpus_fence_seq_cst(). 635 */ 636 atomic_thread_fence_rel(); 637 critical_exit(); 638 } 639 640 void 641 lock_profile_thread_exit(struct thread *td) 642 { 643 #ifdef INVARIANTS 644 struct lock_profile_object *l; 645 646 MPASS(curthread->td_critnest == 0); 647 #endif 648 /* 649 * If lock profiling was disabled we have to wait for reset to 650 * clear our pointers before we can exit safely. 651 */ 652 lock_prof_reset_wait(); 653 #ifdef INVARIANTS 654 LIST_FOREACH(l, &td->td_lprof[0], lpo_link) 655 printf("thread still holds lock acquired at %s:%d\n", 656 l->lpo_file, l->lpo_line); 657 LIST_FOREACH(l, &td->td_lprof[1], lpo_link) 658 printf("thread still holds lock acquired at %s:%d\n", 659 l->lpo_file, l->lpo_line); 660 #endif 661 MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL); 662 MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL); 663 } 664 665 void 666 lock_profile_release_lock(struct lock_object *lo) 667 { 668 struct lock_profile_object *l; 669 struct lock_prof_type *type; 670 struct lock_prof *lp; 671 uint64_t curtime, holdtime; 672 struct lpohead *head; 673 int spin; 674 675 if (SCHEDULER_STOPPED()) 676 return; 677 if (lo->lo_flags & LO_NOPROFILE) 678 return; 679 spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; 680 head = &curthread->td_lprof[spin]; 681 if (LIST_FIRST(head) == NULL) 682 return; 683 critical_enter(); 684 /* Recheck enabled now that we're in a critical section. */ 685 if (lock_prof_enable == 0 && lock_prof_resetting == 1) 686 goto out; 687 /* 688 * If lock profiling is not enabled we still want to remove the 689 * lpo from our queue. 690 */ 691 LIST_FOREACH(l, head, lpo_link) 692 if (l->lpo_obj == lo) 693 break; 694 if (l == NULL) 695 goto out; 696 if (--l->lpo_ref > 0) 697 goto out; 698 lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line); 699 if (lp == NULL) 700 goto release; 701 curtime = nanoseconds(); 702 if (curtime < l->lpo_acqtime) 703 goto release; 704 holdtime = curtime - l->lpo_acqtime; 705 706 /* 707 * Record if the lock has been held longer now than ever 708 * before. 709 */ 710 if (holdtime > lp->cnt_max) 711 lp->cnt_max = holdtime; 712 if (l->lpo_waittime > lp->cnt_wait_max) 713 lp->cnt_wait_max = l->lpo_waittime; 714 lp->cnt_tot += holdtime; 715 lp->cnt_wait += l->lpo_waittime; 716 lp->cnt_contest_locking += l->lpo_contest_locking; 717 lp->cnt_cur += l->lpo_cnt; 718 release: 719 LIST_REMOVE(l, lpo_link); 720 type = &LP_CPU_SELF->lpc_types[spin]; 721 LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link); 722 out: 723 /* 724 * Paired with cpus_fence_seq_cst(). 725 */ 726 atomic_thread_fence_rel(); 727 critical_exit(); 728 } 729 730 static SYSCTL_NODE(_debug_lock, OID_AUTO, prof, 731 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 732 "lock profiling"); 733 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW, 734 &lock_prof_skipspin, 0, "Skip profiling on spinlocks."); 735 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW, 736 &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions."); 737 SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD, 738 &lock_prof_rejected, 0, "Number of rejected profiling records"); 739 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, 740 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 741 dump_lock_prof_stats, "A", 742 "Lock profiling statistics"); 743 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, 744 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, 745 reset_lock_prof_stats, "I", 746 "Reset lock profiling statistics"); 747 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, 748 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0, 749 enable_lock_prof, "I", 750 "Enable lock profiling"); 751 752 #endif 753