xref: /freebsd/sys/kern/subr_lock.c (revision 9a14aa017b21c292740c00ee098195cd46642730)
1 /*-
2  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * This module holds the global variables and functions used to maintain
32  * lock_object structures.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_ddb.h"
39 #include "opt_mprof.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/lock.h>
46 #include <sys/lock_profile.h>
47 #include <sys/malloc.h>
48 #include <sys/mutex.h>
49 #include <sys/pcpu.h>
50 #include <sys/proc.h>
51 #include <sys/sbuf.h>
52 #include <sys/sched.h>
53 #include <sys/smp.h>
54 #include <sys/sysctl.h>
55 
56 #ifdef DDB
57 #include <ddb/ddb.h>
58 #endif
59 
60 #include <machine/cpufunc.h>
61 
62 CTASSERT(LOCK_CLASS_MAX == 15);
63 
64 struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
65 	&lock_class_mtx_spin,
66 	&lock_class_mtx_sleep,
67 	&lock_class_sx,
68 	&lock_class_rm,
69 	&lock_class_rw,
70 	&lock_class_lockmgr,
71 };
72 
73 void
74 lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
75     const char *type, int flags)
76 {
77 	int i;
78 
79 	/* Check for double-init and zero object. */
80 	KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized",
81 	    name, lock));
82 
83 	/* Look up lock class to find its index. */
84 	for (i = 0; i < LOCK_CLASS_MAX; i++)
85 		if (lock_classes[i] == class) {
86 			lock->lo_flags = i << LO_CLASSSHIFT;
87 			break;
88 		}
89 	KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
90 
91 	/* Initialize the lock object. */
92 	lock->lo_name = name;
93 	lock->lo_flags |= flags | LO_INITIALIZED;
94 	LOCK_LOG_INIT(lock, 0);
95 	WITNESS_INIT(lock, (type != NULL) ? type : name);
96 }
97 
98 void
99 lock_destroy(struct lock_object *lock)
100 {
101 
102 	KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock));
103 	WITNESS_DESTROY(lock);
104 	LOCK_LOG_DESTROY(lock, 0);
105 	lock->lo_flags &= ~LO_INITIALIZED;
106 }
107 
108 #ifdef DDB
109 DB_SHOW_COMMAND(lock, db_show_lock)
110 {
111 	struct lock_object *lock;
112 	struct lock_class *class;
113 
114 	if (!have_addr)
115 		return;
116 	lock = (struct lock_object *)addr;
117 	if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) {
118 		db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock));
119 		return;
120 	}
121 	class = LOCK_CLASS(lock);
122 	db_printf(" class: %s\n", class->lc_name);
123 	db_printf(" name: %s\n", lock->lo_name);
124 	class->lc_ddb_show(lock);
125 }
126 #endif
127 
128 #ifdef LOCK_PROFILING
129 
130 /*
131  * One object per-thread for each lock the thread owns.  Tracks individual
132  * lock instances.
133  */
134 struct lock_profile_object {
135 	LIST_ENTRY(lock_profile_object) lpo_link;
136 	struct lock_object *lpo_obj;
137 	const char	*lpo_file;
138 	int		lpo_line;
139 	uint16_t	lpo_ref;
140 	uint16_t	lpo_cnt;
141 	uint64_t	lpo_acqtime;
142 	uint64_t	lpo_waittime;
143 	u_int		lpo_contest_locking;
144 };
145 
146 /*
147  * One lock_prof for each (file, line, lock object) triple.
148  */
149 struct lock_prof {
150 	SLIST_ENTRY(lock_prof) link;
151 	struct lock_class *class;
152 	const char	*file;
153 	const char	*name;
154 	int		line;
155 	int		ticks;
156 	uintmax_t	cnt_wait_max;
157 	uintmax_t	cnt_max;
158 	uintmax_t	cnt_tot;
159 	uintmax_t	cnt_wait;
160 	uintmax_t	cnt_cur;
161 	uintmax_t	cnt_contest_locking;
162 };
163 
164 SLIST_HEAD(lphead, lock_prof);
165 
166 #define	LPROF_HASH_SIZE		4096
167 #define	LPROF_HASH_MASK		(LPROF_HASH_SIZE - 1)
168 #define	LPROF_CACHE_SIZE	4096
169 
170 /*
171  * Array of objects and profs for each type of object for each cpu.  Spinlocks
172  * are handled separately because a thread may be preempted and acquire a
173  * spinlock while in the lock profiling code of a non-spinlock.  In this way
174  * we only need a critical section to protect the per-cpu lists.
175  */
176 struct lock_prof_type {
177 	struct lphead		lpt_lpalloc;
178 	struct lpohead		lpt_lpoalloc;
179 	struct lphead		lpt_hash[LPROF_HASH_SIZE];
180 	struct lock_prof	lpt_prof[LPROF_CACHE_SIZE];
181 	struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE];
182 };
183 
184 struct lock_prof_cpu {
185 	struct lock_prof_type	lpc_types[2]; /* One for spin one for other. */
186 };
187 
188 struct lock_prof_cpu *lp_cpu[MAXCPU];
189 
190 volatile int lock_prof_enable = 0;
191 static volatile int lock_prof_resetting;
192 
193 #define LPROF_SBUF_SIZE		256
194 
195 static int lock_prof_rejected;
196 static int lock_prof_skipspin;
197 static int lock_prof_skipcount;
198 
199 #ifndef USE_CPU_NANOSECONDS
200 uint64_t
201 nanoseconds(void)
202 {
203 	struct bintime bt;
204 	uint64_t ns;
205 
206 	binuptime(&bt);
207 	/* From bintime2timespec */
208 	ns = bt.sec * (uint64_t)1000000000;
209 	ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32;
210 	return (ns);
211 }
212 #endif
213 
214 static void
215 lock_prof_init_type(struct lock_prof_type *type)
216 {
217 	int i;
218 
219 	SLIST_INIT(&type->lpt_lpalloc);
220 	LIST_INIT(&type->lpt_lpoalloc);
221 	for (i = 0; i < LPROF_CACHE_SIZE; i++) {
222 		SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i],
223 		    link);
224 		LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i],
225 		    lpo_link);
226 	}
227 }
228 
229 static void
230 lock_prof_init(void *arg)
231 {
232 	int cpu;
233 
234 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
235 		lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF,
236 		    M_WAITOK | M_ZERO);
237 		lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]);
238 		lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]);
239 	}
240 }
241 SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL);
242 
243 /*
244  * To be certain that lock profiling has idled on all cpus before we
245  * reset, we schedule the resetting thread on all active cpus.  Since
246  * all operations happen within critical sections we can be sure that
247  * it is safe to zero the profiling structures.
248  */
249 static void
250 lock_prof_idle(void)
251 {
252 	struct thread *td;
253 	int cpu;
254 
255 	td = curthread;
256 	thread_lock(td);
257 	CPU_FOREACH(cpu) {
258 		sched_bind(td, cpu);
259 	}
260 	sched_unbind(td);
261 	thread_unlock(td);
262 }
263 
264 static void
265 lock_prof_reset_wait(void)
266 {
267 
268 	/*
269 	 * Spin relinquishing our cpu so that lock_prof_idle may
270 	 * run on it.
271 	 */
272 	while (lock_prof_resetting)
273 		sched_relinquish(curthread);
274 }
275 
276 static void
277 lock_prof_reset(void)
278 {
279 	struct lock_prof_cpu *lpc;
280 	int enabled, i, cpu;
281 
282 	/*
283 	 * We not only race with acquiring and releasing locks but also
284 	 * thread exit.  To be certain that threads exit without valid head
285 	 * pointers they must see resetting set before enabled is cleared.
286 	 * Otherwise a lock may not be removed from a per-thread list due
287 	 * to disabled being set but not wait for reset() to remove it below.
288 	 */
289 	atomic_store_rel_int(&lock_prof_resetting, 1);
290 	enabled = lock_prof_enable;
291 	lock_prof_enable = 0;
292 	lock_prof_idle();
293 	/*
294 	 * Some objects may have migrated between CPUs.  Clear all links
295 	 * before we zero the structures.  Some items may still be linked
296 	 * into per-thread lists as well.
297 	 */
298 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
299 		lpc = lp_cpu[cpu];
300 		for (i = 0; i < LPROF_CACHE_SIZE; i++) {
301 			LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link);
302 			LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link);
303 		}
304 	}
305 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
306 		lpc = lp_cpu[cpu];
307 		bzero(lpc, sizeof(*lpc));
308 		lock_prof_init_type(&lpc->lpc_types[0]);
309 		lock_prof_init_type(&lpc->lpc_types[1]);
310 	}
311 	atomic_store_rel_int(&lock_prof_resetting, 0);
312 	lock_prof_enable = enabled;
313 }
314 
315 static void
316 lock_prof_output(struct lock_prof *lp, struct sbuf *sb)
317 {
318 	const char *p;
319 
320 	for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3);
321 	sbuf_printf(sb,
322 	    "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n",
323 	    lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000,
324 	    lp->cnt_wait / 1000, lp->cnt_cur,
325 	    lp->cnt_cur == 0 ? (uintmax_t)0 :
326 	    lp->cnt_tot / (lp->cnt_cur * 1000),
327 	    lp->cnt_cur == 0 ? (uintmax_t)0 :
328 	    lp->cnt_wait / (lp->cnt_cur * 1000),
329 	    (uintmax_t)0, lp->cnt_contest_locking,
330 	    p, lp->line, lp->class->lc_name, lp->name);
331 }
332 
333 static void
334 lock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash,
335     int spin, int t)
336 {
337 	struct lock_prof_type *type;
338 	struct lock_prof *l;
339 	int cpu;
340 
341 	dst->file = match->file;
342 	dst->line = match->line;
343 	dst->class = match->class;
344 	dst->name = match->name;
345 
346 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
347 		if (lp_cpu[cpu] == NULL)
348 			continue;
349 		type = &lp_cpu[cpu]->lpc_types[spin];
350 		SLIST_FOREACH(l, &type->lpt_hash[hash], link) {
351 			if (l->ticks == t)
352 				continue;
353 			if (l->file != match->file || l->line != match->line ||
354 			    l->name != match->name)
355 				continue;
356 			l->ticks = t;
357 			if (l->cnt_max > dst->cnt_max)
358 				dst->cnt_max = l->cnt_max;
359 			if (l->cnt_wait_max > dst->cnt_wait_max)
360 				dst->cnt_wait_max = l->cnt_wait_max;
361 			dst->cnt_tot += l->cnt_tot;
362 			dst->cnt_wait += l->cnt_wait;
363 			dst->cnt_cur += l->cnt_cur;
364 			dst->cnt_contest_locking += l->cnt_contest_locking;
365 		}
366 	}
367 
368 }
369 
370 static void
371 lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin,
372     int t)
373 {
374 	struct lock_prof *l;
375 	int i;
376 
377 	for (i = 0; i < LPROF_HASH_SIZE; ++i) {
378 		SLIST_FOREACH(l, &type->lpt_hash[i], link) {
379 			struct lock_prof lp = {};
380 
381 			if (l->ticks == t)
382 				continue;
383 			lock_prof_sum(l, &lp, i, spin, t);
384 			lock_prof_output(&lp, sb);
385 		}
386 	}
387 }
388 
389 static int
390 dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
391 {
392 	struct sbuf *sb;
393 	int error, cpu, t;
394 	int enabled;
395 
396 	error = sysctl_wire_old_buffer(req, 0);
397 	if (error != 0)
398 		return (error);
399 	sb = sbuf_new_for_sysctl(NULL, NULL, LPROF_SBUF_SIZE, req);
400 	sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n",
401 	    "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
402 	enabled = lock_prof_enable;
403 	lock_prof_enable = 0;
404 	lock_prof_idle();
405 	t = ticks;
406 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
407 		if (lp_cpu[cpu] == NULL)
408 			continue;
409 		lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t);
410 		lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t);
411 	}
412 	lock_prof_enable = enabled;
413 
414 	error = sbuf_finish(sb);
415 	/* Output a trailing NUL. */
416 	if (error == 0)
417 		error = SYSCTL_OUT(req, "", 1);
418 	sbuf_delete(sb);
419 	return (error);
420 }
421 
422 static int
423 enable_lock_prof(SYSCTL_HANDLER_ARGS)
424 {
425 	int error, v;
426 
427 	v = lock_prof_enable;
428 	error = sysctl_handle_int(oidp, &v, v, req);
429 	if (error)
430 		return (error);
431 	if (req->newptr == NULL)
432 		return (error);
433 	if (v == lock_prof_enable)
434 		return (0);
435 	if (v == 1)
436 		lock_prof_reset();
437 	lock_prof_enable = !!v;
438 
439 	return (0);
440 }
441 
442 static int
443 reset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
444 {
445 	int error, v;
446 
447 	v = 0;
448 	error = sysctl_handle_int(oidp, &v, 0, req);
449 	if (error)
450 		return (error);
451 	if (req->newptr == NULL)
452 		return (error);
453 	if (v == 0)
454 		return (0);
455 	lock_prof_reset();
456 
457 	return (0);
458 }
459 
460 static struct lock_prof *
461 lock_profile_lookup(struct lock_object *lo, int spin, const char *file,
462     int line)
463 {
464 	const char *unknown = "(unknown)";
465 	struct lock_prof_type *type;
466 	struct lock_prof *lp;
467 	struct lphead *head;
468 	const char *p;
469 	u_int hash;
470 
471 	p = file;
472 	if (p == NULL || *p == '\0')
473 		p = unknown;
474 	hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line;
475 	hash &= LPROF_HASH_MASK;
476 	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
477 	head = &type->lpt_hash[hash];
478 	SLIST_FOREACH(lp, head, link) {
479 		if (lp->line == line && lp->file == p &&
480 		    lp->name == lo->lo_name)
481 			return (lp);
482 
483 	}
484 	lp = SLIST_FIRST(&type->lpt_lpalloc);
485 	if (lp == NULL) {
486 		lock_prof_rejected++;
487 		return (lp);
488 	}
489 	SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link);
490 	lp->file = p;
491 	lp->line = line;
492 	lp->class = LOCK_CLASS(lo);
493 	lp->name = lo->lo_name;
494 	SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link);
495 	return (lp);
496 }
497 
498 static struct lock_profile_object *
499 lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file,
500     int line)
501 {
502 	struct lock_profile_object *l;
503 	struct lock_prof_type *type;
504 	struct lpohead *head;
505 
506 	head = &curthread->td_lprof[spin];
507 	LIST_FOREACH(l, head, lpo_link)
508 		if (l->lpo_obj == lo && l->lpo_file == file &&
509 		    l->lpo_line == line)
510 			return (l);
511 	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
512 	l = LIST_FIRST(&type->lpt_lpoalloc);
513 	if (l == NULL) {
514 		lock_prof_rejected++;
515 		return (NULL);
516 	}
517 	LIST_REMOVE(l, lpo_link);
518 	l->lpo_obj = lo;
519 	l->lpo_file = file;
520 	l->lpo_line = line;
521 	l->lpo_cnt = 0;
522 	LIST_INSERT_HEAD(head, l, lpo_link);
523 
524 	return (l);
525 }
526 
527 void
528 lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
529     uint64_t waittime, const char *file, int line)
530 {
531 	static int lock_prof_count;
532 	struct lock_profile_object *l;
533 	int spin;
534 
535 	if (SCHEDULER_STOPPED())
536 		return;
537 
538 	/* don't reset the timer when/if recursing */
539 	if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
540 		return;
541 	if (lock_prof_skipcount &&
542 	    (++lock_prof_count % lock_prof_skipcount) != 0)
543 		return;
544 	spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
545 	if (spin && lock_prof_skipspin == 1)
546 		return;
547 	critical_enter();
548 	/* Recheck enabled now that we're in a critical section. */
549 	if (lock_prof_enable == 0)
550 		goto out;
551 	l = lock_profile_object_lookup(lo, spin, file, line);
552 	if (l == NULL)
553 		goto out;
554 	l->lpo_cnt++;
555 	if (++l->lpo_ref > 1)
556 		goto out;
557 	l->lpo_contest_locking = contested;
558 	l->lpo_acqtime = nanoseconds();
559 	if (waittime && (l->lpo_acqtime > waittime))
560 		l->lpo_waittime = l->lpo_acqtime - waittime;
561 	else
562 		l->lpo_waittime = 0;
563 out:
564 	critical_exit();
565 }
566 
567 void
568 lock_profile_thread_exit(struct thread *td)
569 {
570 #ifdef INVARIANTS
571 	struct lock_profile_object *l;
572 
573 	MPASS(curthread->td_critnest == 0);
574 #endif
575 	/*
576 	 * If lock profiling was disabled we have to wait for reset to
577 	 * clear our pointers before we can exit safely.
578 	 */
579 	lock_prof_reset_wait();
580 #ifdef INVARIANTS
581 	LIST_FOREACH(l, &td->td_lprof[0], lpo_link)
582 		printf("thread still holds lock acquired at %s:%d\n",
583 		    l->lpo_file, l->lpo_line);
584 	LIST_FOREACH(l, &td->td_lprof[1], lpo_link)
585 		printf("thread still holds lock acquired at %s:%d\n",
586 		    l->lpo_file, l->lpo_line);
587 #endif
588 	MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL);
589 	MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL);
590 }
591 
592 void
593 lock_profile_release_lock(struct lock_object *lo)
594 {
595 	struct lock_profile_object *l;
596 	struct lock_prof_type *type;
597 	struct lock_prof *lp;
598 	uint64_t curtime, holdtime;
599 	struct lpohead *head;
600 	int spin;
601 
602 	if (SCHEDULER_STOPPED())
603 		return;
604 	if (lo->lo_flags & LO_NOPROFILE)
605 		return;
606 	spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
607 	head = &curthread->td_lprof[spin];
608 	if (LIST_FIRST(head) == NULL)
609 		return;
610 	critical_enter();
611 	/* Recheck enabled now that we're in a critical section. */
612 	if (lock_prof_enable == 0 && lock_prof_resetting == 1)
613 		goto out;
614 	/*
615 	 * If lock profiling is not enabled we still want to remove the
616 	 * lpo from our queue.
617 	 */
618 	LIST_FOREACH(l, head, lpo_link)
619 		if (l->lpo_obj == lo)
620 			break;
621 	if (l == NULL)
622 		goto out;
623 	if (--l->lpo_ref > 0)
624 		goto out;
625 	lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line);
626 	if (lp == NULL)
627 		goto release;
628 	curtime = nanoseconds();
629 	if (curtime < l->lpo_acqtime)
630 		goto release;
631 	holdtime = curtime - l->lpo_acqtime;
632 
633 	/*
634 	 * Record if the lock has been held longer now than ever
635 	 * before.
636 	 */
637 	if (holdtime > lp->cnt_max)
638 		lp->cnt_max = holdtime;
639 	if (l->lpo_waittime > lp->cnt_wait_max)
640 		lp->cnt_wait_max = l->lpo_waittime;
641 	lp->cnt_tot += holdtime;
642 	lp->cnt_wait += l->lpo_waittime;
643 	lp->cnt_contest_locking += l->lpo_contest_locking;
644 	lp->cnt_cur += l->lpo_cnt;
645 release:
646 	LIST_REMOVE(l, lpo_link);
647 	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
648 	LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link);
649 out:
650 	critical_exit();
651 }
652 
653 static SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging");
654 static SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL,
655     "lock profiling");
656 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW,
657     &lock_prof_skipspin, 0, "Skip profiling on spinlocks.");
658 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW,
659     &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions.");
660 SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
661     &lock_prof_rejected, 0, "Number of rejected profiling records");
662 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
663     NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics");
664 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
665     NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics");
666 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
667     NULL, 0, enable_lock_prof, "I", "Enable lock profiling");
668 
669 #endif
670