xref: /freebsd/sys/kern/subr_lock.c (revision 8fc257994d0ce2396196d7a06d50d20c8015f4b7)
1 /*-
2  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * This module holds the global variables and functions used to maintain
32  * lock_object structures.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_ddb.h"
39 #include "opt_mprof.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/linker_set.h>
46 #include <sys/lock.h>
47 #include <sys/lock_profile.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/pcpu.h>
51 #include <sys/proc.h>
52 #include <sys/sbuf.h>
53 #include <sys/sched.h>
54 #include <sys/smp.h>
55 #include <sys/sysctl.h>
56 
57 #ifdef DDB
58 #include <ddb/ddb.h>
59 #endif
60 
61 #include <machine/cpufunc.h>
62 
63 CTASSERT(LOCK_CLASS_MAX == 15);
64 
65 struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
66 	&lock_class_mtx_spin,
67 	&lock_class_mtx_sleep,
68 	&lock_class_sx,
69 	&lock_class_rm,
70 	&lock_class_rw,
71 	&lock_class_lockmgr,
72 };
73 
74 void
75 lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
76     const char *type, int flags)
77 {
78 	int i;
79 
80 	/* Check for double-init and zero object. */
81 	KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized",
82 	    name, lock));
83 
84 	/* Look up lock class to find its index. */
85 	for (i = 0; i < LOCK_CLASS_MAX; i++)
86 		if (lock_classes[i] == class) {
87 			lock->lo_flags = i << LO_CLASSSHIFT;
88 			break;
89 		}
90 	KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
91 
92 	/* Initialize the lock object. */
93 	lock->lo_name = name;
94 	lock->lo_flags |= flags | LO_INITIALIZED;
95 	LOCK_LOG_INIT(lock, 0);
96 	WITNESS_INIT(lock, (type != NULL) ? type : name);
97 }
98 
99 void
100 lock_destroy(struct lock_object *lock)
101 {
102 
103 	KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock));
104 	WITNESS_DESTROY(lock);
105 	LOCK_LOG_DESTROY(lock, 0);
106 	lock->lo_flags &= ~LO_INITIALIZED;
107 }
108 
109 #ifdef DDB
110 DB_SHOW_COMMAND(lock, db_show_lock)
111 {
112 	struct lock_object *lock;
113 	struct lock_class *class;
114 
115 	if (!have_addr)
116 		return;
117 	lock = (struct lock_object *)addr;
118 	if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) {
119 		db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock));
120 		return;
121 	}
122 	class = LOCK_CLASS(lock);
123 	db_printf(" class: %s\n", class->lc_name);
124 	db_printf(" name: %s\n", lock->lo_name);
125 	class->lc_ddb_show(lock);
126 }
127 #endif
128 
129 #ifdef LOCK_PROFILING
130 
131 /*
132  * One object per-thread for each lock the thread owns.  Tracks individual
133  * lock instances.
134  */
135 struct lock_profile_object {
136 	LIST_ENTRY(lock_profile_object) lpo_link;
137 	struct lock_object *lpo_obj;
138 	const char	*lpo_file;
139 	int		lpo_line;
140 	uint16_t	lpo_ref;
141 	uint16_t	lpo_cnt;
142 	uint64_t	lpo_acqtime;
143 	uint64_t	lpo_waittime;
144 	u_int		lpo_contest_locking;
145 };
146 
147 /*
148  * One lock_prof for each (file, line, lock object) triple.
149  */
150 struct lock_prof {
151 	SLIST_ENTRY(lock_prof) link;
152 	struct lock_class *class;
153 	const char	*file;
154 	const char	*name;
155 	int		line;
156 	int		ticks;
157 	uintmax_t	cnt_wait_max;
158 	uintmax_t	cnt_max;
159 	uintmax_t	cnt_tot;
160 	uintmax_t	cnt_wait;
161 	uintmax_t	cnt_cur;
162 	uintmax_t	cnt_contest_locking;
163 };
164 
165 SLIST_HEAD(lphead, lock_prof);
166 
167 #define	LPROF_HASH_SIZE		4096
168 #define	LPROF_HASH_MASK		(LPROF_HASH_SIZE - 1)
169 #define	LPROF_CACHE_SIZE	4096
170 
171 /*
172  * Array of objects and profs for each type of object for each cpu.  Spinlocks
173  * are handled seperately because a thread may be preempted and acquire a
174  * spinlock while in the lock profiling code of a non-spinlock.  In this way
175  * we only need a critical section to protect the per-cpu lists.
176  */
177 struct lock_prof_type {
178 	struct lphead		lpt_lpalloc;
179 	struct lpohead		lpt_lpoalloc;
180 	struct lphead		lpt_hash[LPROF_HASH_SIZE];
181 	struct lock_prof	lpt_prof[LPROF_CACHE_SIZE];
182 	struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE];
183 };
184 
185 struct lock_prof_cpu {
186 	struct lock_prof_type	lpc_types[2]; /* One for spin one for other. */
187 };
188 
189 struct lock_prof_cpu *lp_cpu[MAXCPU];
190 
191 volatile int lock_prof_enable = 0;
192 static volatile int lock_prof_resetting;
193 
194 /* SWAG: sbuf size = avg stat. line size * number of locks */
195 #define LPROF_SBUF_SIZE		256 * 400
196 
197 static int lock_prof_rejected;
198 static int lock_prof_skipspin;
199 static int lock_prof_skipcount;
200 
201 #ifndef USE_CPU_NANOSECONDS
202 uint64_t
203 nanoseconds(void)
204 {
205 	struct bintime bt;
206 	uint64_t ns;
207 
208 	binuptime(&bt);
209 	/* From bintime2timespec */
210 	ns = bt.sec * (uint64_t)1000000000;
211 	ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32;
212 	return (ns);
213 }
214 #endif
215 
216 static void
217 lock_prof_init_type(struct lock_prof_type *type)
218 {
219 	int i;
220 
221 	SLIST_INIT(&type->lpt_lpalloc);
222 	LIST_INIT(&type->lpt_lpoalloc);
223 	for (i = 0; i < LPROF_CACHE_SIZE; i++) {
224 		SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i],
225 		    link);
226 		LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i],
227 		    lpo_link);
228 	}
229 }
230 
231 static void
232 lock_prof_init(void *arg)
233 {
234 	int cpu;
235 
236 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
237 		lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF,
238 		    M_WAITOK | M_ZERO);
239 		lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]);
240 		lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]);
241 	}
242 }
243 SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL);
244 
245 /*
246  * To be certain that lock profiling has idled on all cpus before we
247  * reset, we schedule the resetting thread on all active cpus.  Since
248  * all operations happen within critical sections we can be sure that
249  * it is safe to zero the profiling structures.
250  */
251 static void
252 lock_prof_idle(void)
253 {
254 	struct thread *td;
255 	int cpu;
256 
257 	td = curthread;
258 	thread_lock(td);
259 	CPU_FOREACH(cpu) {
260 		sched_bind(td, cpu);
261 	}
262 	sched_unbind(td);
263 	thread_unlock(td);
264 }
265 
266 static void
267 lock_prof_reset_wait(void)
268 {
269 
270 	/*
271 	 * Spin relinquishing our cpu so that lock_prof_idle may
272 	 * run on it.
273 	 */
274 	while (lock_prof_resetting)
275 		sched_relinquish(curthread);
276 }
277 
278 static void
279 lock_prof_reset(void)
280 {
281 	struct lock_prof_cpu *lpc;
282 	int enabled, i, cpu;
283 
284 	/*
285 	 * We not only race with acquiring and releasing locks but also
286 	 * thread exit.  To be certain that threads exit without valid head
287 	 * pointers they must see resetting set before enabled is cleared.
288 	 * Otherwise a lock may not be removed from a per-thread list due
289 	 * to disabled being set but not wait for reset() to remove it below.
290 	 */
291 	atomic_store_rel_int(&lock_prof_resetting, 1);
292 	enabled = lock_prof_enable;
293 	lock_prof_enable = 0;
294 	lock_prof_idle();
295 	/*
296 	 * Some objects may have migrated between CPUs.  Clear all links
297 	 * before we zero the structures.  Some items may still be linked
298 	 * into per-thread lists as well.
299 	 */
300 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
301 		lpc = lp_cpu[cpu];
302 		for (i = 0; i < LPROF_CACHE_SIZE; i++) {
303 			LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link);
304 			LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link);
305 		}
306 	}
307 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
308 		lpc = lp_cpu[cpu];
309 		bzero(lpc, sizeof(*lpc));
310 		lock_prof_init_type(&lpc->lpc_types[0]);
311 		lock_prof_init_type(&lpc->lpc_types[1]);
312 	}
313 	atomic_store_rel_int(&lock_prof_resetting, 0);
314 	lock_prof_enable = enabled;
315 }
316 
317 static void
318 lock_prof_output(struct lock_prof *lp, struct sbuf *sb)
319 {
320 	const char *p;
321 
322 	for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3);
323 	sbuf_printf(sb,
324 	    "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n",
325 	    lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000,
326 	    lp->cnt_wait / 1000, lp->cnt_cur,
327 	    lp->cnt_cur == 0 ? (uintmax_t)0 :
328 	    lp->cnt_tot / (lp->cnt_cur * 1000),
329 	    lp->cnt_cur == 0 ? (uintmax_t)0 :
330 	    lp->cnt_wait / (lp->cnt_cur * 1000),
331 	    (uintmax_t)0, lp->cnt_contest_locking,
332 	    p, lp->line, lp->class->lc_name, lp->name);
333 }
334 
335 static void
336 lock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash,
337     int spin, int t)
338 {
339 	struct lock_prof_type *type;
340 	struct lock_prof *l;
341 	int cpu;
342 
343 	dst->file = match->file;
344 	dst->line = match->line;
345 	dst->class = match->class;
346 	dst->name = match->name;
347 
348 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
349 		if (lp_cpu[cpu] == NULL)
350 			continue;
351 		type = &lp_cpu[cpu]->lpc_types[spin];
352 		SLIST_FOREACH(l, &type->lpt_hash[hash], link) {
353 			if (l->ticks == t)
354 				continue;
355 			if (l->file != match->file || l->line != match->line ||
356 			    l->name != match->name)
357 				continue;
358 			l->ticks = t;
359 			if (l->cnt_max > dst->cnt_max)
360 				dst->cnt_max = l->cnt_max;
361 			if (l->cnt_wait_max > dst->cnt_wait_max)
362 				dst->cnt_wait_max = l->cnt_wait_max;
363 			dst->cnt_tot += l->cnt_tot;
364 			dst->cnt_wait += l->cnt_wait;
365 			dst->cnt_cur += l->cnt_cur;
366 			dst->cnt_contest_locking += l->cnt_contest_locking;
367 		}
368 	}
369 
370 }
371 
372 static void
373 lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin,
374     int t)
375 {
376 	struct lock_prof *l;
377 	int i;
378 
379 	for (i = 0; i < LPROF_HASH_SIZE; ++i) {
380 		SLIST_FOREACH(l, &type->lpt_hash[i], link) {
381 			struct lock_prof lp = {};
382 
383 			if (l->ticks == t)
384 				continue;
385 			lock_prof_sum(l, &lp, i, spin, t);
386 			lock_prof_output(&lp, sb);
387 			if (sbuf_overflowed(sb))
388 				return;
389 		}
390 	}
391 }
392 
393 static int
394 dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
395 {
396 	static int multiplier = 1;
397 	struct sbuf *sb;
398 	int error, cpu, t;
399 	int enabled;
400 
401 retry_sbufops:
402 	sb = sbuf_new(NULL, NULL, LPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
403 	sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n",
404 	    "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
405 	enabled = lock_prof_enable;
406 	lock_prof_enable = 0;
407 	lock_prof_idle();
408 	t = ticks;
409 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
410 		if (lp_cpu[cpu] == NULL)
411 			continue;
412 		lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t);
413 		lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t);
414 		if (sbuf_overflowed(sb)) {
415 			sbuf_delete(sb);
416 			multiplier++;
417 			goto retry_sbufops;
418 		}
419 	}
420 	lock_prof_enable = enabled;
421 
422 	sbuf_finish(sb);
423 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
424 	sbuf_delete(sb);
425 	return (error);
426 }
427 
428 static int
429 enable_lock_prof(SYSCTL_HANDLER_ARGS)
430 {
431 	int error, v;
432 
433 	v = lock_prof_enable;
434 	error = sysctl_handle_int(oidp, &v, v, req);
435 	if (error)
436 		return (error);
437 	if (req->newptr == NULL)
438 		return (error);
439 	if (v == lock_prof_enable)
440 		return (0);
441 	if (v == 1)
442 		lock_prof_reset();
443 	lock_prof_enable = !!v;
444 
445 	return (0);
446 }
447 
448 static int
449 reset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
450 {
451 	int error, v;
452 
453 	v = 0;
454 	error = sysctl_handle_int(oidp, &v, 0, req);
455 	if (error)
456 		return (error);
457 	if (req->newptr == NULL)
458 		return (error);
459 	if (v == 0)
460 		return (0);
461 	lock_prof_reset();
462 
463 	return (0);
464 }
465 
466 static struct lock_prof *
467 lock_profile_lookup(struct lock_object *lo, int spin, const char *file,
468     int line)
469 {
470 	const char *unknown = "(unknown)";
471 	struct lock_prof_type *type;
472 	struct lock_prof *lp;
473 	struct lphead *head;
474 	const char *p;
475 	u_int hash;
476 
477 	p = file;
478 	if (p == NULL || *p == '\0')
479 		p = unknown;
480 	hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line;
481 	hash &= LPROF_HASH_MASK;
482 	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
483 	head = &type->lpt_hash[hash];
484 	SLIST_FOREACH(lp, head, link) {
485 		if (lp->line == line && lp->file == p &&
486 		    lp->name == lo->lo_name)
487 			return (lp);
488 
489 	}
490 	lp = SLIST_FIRST(&type->lpt_lpalloc);
491 	if (lp == NULL) {
492 		lock_prof_rejected++;
493 		return (lp);
494 	}
495 	SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link);
496 	lp->file = p;
497 	lp->line = line;
498 	lp->class = LOCK_CLASS(lo);
499 	lp->name = lo->lo_name;
500 	SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link);
501 	return (lp);
502 }
503 
504 static struct lock_profile_object *
505 lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file,
506     int line)
507 {
508 	struct lock_profile_object *l;
509 	struct lock_prof_type *type;
510 	struct lpohead *head;
511 
512 	head = &curthread->td_lprof[spin];
513 	LIST_FOREACH(l, head, lpo_link)
514 		if (l->lpo_obj == lo && l->lpo_file == file &&
515 		    l->lpo_line == line)
516 			return (l);
517 	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
518 	l = LIST_FIRST(&type->lpt_lpoalloc);
519 	if (l == NULL) {
520 		lock_prof_rejected++;
521 		return (NULL);
522 	}
523 	LIST_REMOVE(l, lpo_link);
524 	l->lpo_obj = lo;
525 	l->lpo_file = file;
526 	l->lpo_line = line;
527 	l->lpo_cnt = 0;
528 	LIST_INSERT_HEAD(head, l, lpo_link);
529 
530 	return (l);
531 }
532 
533 void
534 lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
535     uint64_t waittime, const char *file, int line)
536 {
537 	static int lock_prof_count;
538 	struct lock_profile_object *l;
539 	int spin;
540 
541 	/* don't reset the timer when/if recursing */
542 	if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
543 		return;
544 	if (lock_prof_skipcount &&
545 	    (++lock_prof_count % lock_prof_skipcount) != 0)
546 		return;
547 	spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
548 	if (spin && lock_prof_skipspin == 1)
549 		return;
550 	critical_enter();
551 	/* Recheck enabled now that we're in a critical section. */
552 	if (lock_prof_enable == 0)
553 		goto out;
554 	l = lock_profile_object_lookup(lo, spin, file, line);
555 	if (l == NULL)
556 		goto out;
557 	l->lpo_cnt++;
558 	if (++l->lpo_ref > 1)
559 		goto out;
560 	l->lpo_contest_locking = contested;
561 	l->lpo_acqtime = nanoseconds();
562 	if (waittime && (l->lpo_acqtime > waittime))
563 		l->lpo_waittime = l->lpo_acqtime - waittime;
564 	else
565 		l->lpo_waittime = 0;
566 out:
567 	critical_exit();
568 }
569 
570 void
571 lock_profile_thread_exit(struct thread *td)
572 {
573 #ifdef INVARIANTS
574 	struct lock_profile_object *l;
575 
576 	MPASS(curthread->td_critnest == 0);
577 #endif
578 	/*
579 	 * If lock profiling was disabled we have to wait for reset to
580 	 * clear our pointers before we can exit safely.
581 	 */
582 	lock_prof_reset_wait();
583 #ifdef INVARIANTS
584 	LIST_FOREACH(l, &td->td_lprof[0], lpo_link)
585 		printf("thread still holds lock acquired at %s:%d\n",
586 		    l->lpo_file, l->lpo_line);
587 	LIST_FOREACH(l, &td->td_lprof[1], lpo_link)
588 		printf("thread still holds lock acquired at %s:%d\n",
589 		    l->lpo_file, l->lpo_line);
590 #endif
591 	MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL);
592 	MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL);
593 }
594 
595 void
596 lock_profile_release_lock(struct lock_object *lo)
597 {
598 	struct lock_profile_object *l;
599 	struct lock_prof_type *type;
600 	struct lock_prof *lp;
601 	uint64_t curtime, holdtime;
602 	struct lpohead *head;
603 	int spin;
604 
605 	if (lo->lo_flags & LO_NOPROFILE)
606 		return;
607 	spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
608 	head = &curthread->td_lprof[spin];
609 	if (LIST_FIRST(head) == NULL)
610 		return;
611 	critical_enter();
612 	/* Recheck enabled now that we're in a critical section. */
613 	if (lock_prof_enable == 0 && lock_prof_resetting == 1)
614 		goto out;
615 	/*
616 	 * If lock profiling is not enabled we still want to remove the
617 	 * lpo from our queue.
618 	 */
619 	LIST_FOREACH(l, head, lpo_link)
620 		if (l->lpo_obj == lo)
621 			break;
622 	if (l == NULL)
623 		goto out;
624 	if (--l->lpo_ref > 0)
625 		goto out;
626 	lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line);
627 	if (lp == NULL)
628 		goto release;
629 	curtime = nanoseconds();
630 	if (curtime < l->lpo_acqtime)
631 		goto release;
632 	holdtime = curtime - l->lpo_acqtime;
633 
634 	/*
635 	 * Record if the lock has been held longer now than ever
636 	 * before.
637 	 */
638 	if (holdtime > lp->cnt_max)
639 		lp->cnt_max = holdtime;
640 	if (l->lpo_waittime > lp->cnt_wait_max)
641 		lp->cnt_wait_max = l->lpo_waittime;
642 	lp->cnt_tot += holdtime;
643 	lp->cnt_wait += l->lpo_waittime;
644 	lp->cnt_contest_locking += l->lpo_contest_locking;
645 	lp->cnt_cur += l->lpo_cnt;
646 release:
647 	LIST_REMOVE(l, lpo_link);
648 	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
649 	LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link);
650 out:
651 	critical_exit();
652 }
653 
654 SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging");
655 SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, "lock profiling");
656 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW,
657     &lock_prof_skipspin, 0, "Skip profiling on spinlocks.");
658 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW,
659     &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions.");
660 SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
661     &lock_prof_rejected, 0, "Number of rejected profiling records");
662 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
663     NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics");
664 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
665     NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics");
666 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
667     NULL, 0, enable_lock_prof, "I", "Enable lock profiling");
668 
669 #endif
670