xref: /freebsd/sys/kern/subr_lock.c (revision a3cf0ef5a295c885c895fabfd56470c0d1db322d)
1 /*-
2  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * This module holds the global variables and functions used to maintain
32  * lock_object structures.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_ddb.h"
39 #include "opt_mprof.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/linker_set.h>
46 #include <sys/lock.h>
47 #include <sys/lock_profile.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/pcpu.h>
51 #include <sys/proc.h>
52 #include <sys/sbuf.h>
53 #include <sys/sched.h>
54 #include <sys/smp.h>
55 #include <sys/sysctl.h>
56 
57 #ifdef DDB
58 #include <ddb/ddb.h>
59 #endif
60 
61 #include <machine/cpufunc.h>
62 
63 CTASSERT(LOCK_CLASS_MAX == 15);
64 
65 struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
66 	&lock_class_mtx_spin,
67 	&lock_class_mtx_sleep,
68 	&lock_class_sx,
69 	&lock_class_rm,
70 	&lock_class_rw,
71 	&lock_class_lockmgr,
72 };
73 
74 void
75 lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
76     const char *type, int flags)
77 {
78 	int i;
79 
80 	/* Check for double-init and zero object. */
81 	KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized",
82 	    name, lock));
83 
84 	/* Look up lock class to find its index. */
85 	for (i = 0; i < LOCK_CLASS_MAX; i++)
86 		if (lock_classes[i] == class) {
87 			lock->lo_flags = i << LO_CLASSSHIFT;
88 			break;
89 		}
90 	KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
91 
92 	/* Initialize the lock object. */
93 	lock->lo_name = name;
94 	lock->lo_flags |= flags | LO_INITIALIZED;
95 	LOCK_LOG_INIT(lock, 0);
96 	WITNESS_INIT(lock, (type != NULL) ? type : name);
97 }
98 
99 void
100 lock_destroy(struct lock_object *lock)
101 {
102 
103 	KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock));
104 	WITNESS_DESTROY(lock);
105 	LOCK_LOG_DESTROY(lock, 0);
106 	lock->lo_flags &= ~LO_INITIALIZED;
107 }
108 
109 #ifdef DDB
110 DB_SHOW_COMMAND(lock, db_show_lock)
111 {
112 	struct lock_object *lock;
113 	struct lock_class *class;
114 
115 	if (!have_addr)
116 		return;
117 	lock = (struct lock_object *)addr;
118 	if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) {
119 		db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock));
120 		return;
121 	}
122 	class = LOCK_CLASS(lock);
123 	db_printf(" class: %s\n", class->lc_name);
124 	db_printf(" name: %s\n", lock->lo_name);
125 	class->lc_ddb_show(lock);
126 }
127 #endif
128 
129 #ifdef LOCK_PROFILING
130 
131 /*
132  * One object per-thread for each lock the thread owns.  Tracks individual
133  * lock instances.
134  */
135 struct lock_profile_object {
136 	LIST_ENTRY(lock_profile_object) lpo_link;
137 	struct lock_object *lpo_obj;
138 	const char	*lpo_file;
139 	int		lpo_line;
140 	uint16_t	lpo_ref;
141 	uint16_t	lpo_cnt;
142 	uint64_t	lpo_acqtime;
143 	uint64_t	lpo_waittime;
144 	u_int		lpo_contest_locking;
145 };
146 
147 /*
148  * One lock_prof for each (file, line, lock object) triple.
149  */
150 struct lock_prof {
151 	SLIST_ENTRY(lock_prof) link;
152 	struct lock_class *class;
153 	const char	*file;
154 	const char	*name;
155 	int		line;
156 	int		ticks;
157 	uintmax_t	cnt_wait_max;
158 	uintmax_t	cnt_max;
159 	uintmax_t	cnt_tot;
160 	uintmax_t	cnt_wait;
161 	uintmax_t	cnt_cur;
162 	uintmax_t	cnt_contest_locking;
163 };
164 
165 SLIST_HEAD(lphead, lock_prof);
166 
167 #define	LPROF_HASH_SIZE		4096
168 #define	LPROF_HASH_MASK		(LPROF_HASH_SIZE - 1)
169 #define	LPROF_CACHE_SIZE	4096
170 
171 /*
172  * Array of objects and profs for each type of object for each cpu.  Spinlocks
173  * are handled separately because a thread may be preempted and acquire a
174  * spinlock while in the lock profiling code of a non-spinlock.  In this way
175  * we only need a critical section to protect the per-cpu lists.
176  */
177 struct lock_prof_type {
178 	struct lphead		lpt_lpalloc;
179 	struct lpohead		lpt_lpoalloc;
180 	struct lphead		lpt_hash[LPROF_HASH_SIZE];
181 	struct lock_prof	lpt_prof[LPROF_CACHE_SIZE];
182 	struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE];
183 };
184 
185 struct lock_prof_cpu {
186 	struct lock_prof_type	lpc_types[2]; /* One for spin one for other. */
187 };
188 
189 struct lock_prof_cpu *lp_cpu[MAXCPU];
190 
191 volatile int lock_prof_enable = 0;
192 static volatile int lock_prof_resetting;
193 
194 #define LPROF_SBUF_SIZE		256
195 
196 static int lock_prof_rejected;
197 static int lock_prof_skipspin;
198 static int lock_prof_skipcount;
199 
200 #ifndef USE_CPU_NANOSECONDS
201 uint64_t
202 nanoseconds(void)
203 {
204 	struct bintime bt;
205 	uint64_t ns;
206 
207 	binuptime(&bt);
208 	/* From bintime2timespec */
209 	ns = bt.sec * (uint64_t)1000000000;
210 	ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32;
211 	return (ns);
212 }
213 #endif
214 
215 static void
216 lock_prof_init_type(struct lock_prof_type *type)
217 {
218 	int i;
219 
220 	SLIST_INIT(&type->lpt_lpalloc);
221 	LIST_INIT(&type->lpt_lpoalloc);
222 	for (i = 0; i < LPROF_CACHE_SIZE; i++) {
223 		SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i],
224 		    link);
225 		LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i],
226 		    lpo_link);
227 	}
228 }
229 
230 static void
231 lock_prof_init(void *arg)
232 {
233 	int cpu;
234 
235 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
236 		lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF,
237 		    M_WAITOK | M_ZERO);
238 		lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]);
239 		lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]);
240 	}
241 }
242 SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL);
243 
244 /*
245  * To be certain that lock profiling has idled on all cpus before we
246  * reset, we schedule the resetting thread on all active cpus.  Since
247  * all operations happen within critical sections we can be sure that
248  * it is safe to zero the profiling structures.
249  */
250 static void
251 lock_prof_idle(void)
252 {
253 	struct thread *td;
254 	int cpu;
255 
256 	td = curthread;
257 	thread_lock(td);
258 	CPU_FOREACH(cpu) {
259 		sched_bind(td, cpu);
260 	}
261 	sched_unbind(td);
262 	thread_unlock(td);
263 }
264 
265 static void
266 lock_prof_reset_wait(void)
267 {
268 
269 	/*
270 	 * Spin relinquishing our cpu so that lock_prof_idle may
271 	 * run on it.
272 	 */
273 	while (lock_prof_resetting)
274 		sched_relinquish(curthread);
275 }
276 
277 static void
278 lock_prof_reset(void)
279 {
280 	struct lock_prof_cpu *lpc;
281 	int enabled, i, cpu;
282 
283 	/*
284 	 * We not only race with acquiring and releasing locks but also
285 	 * thread exit.  To be certain that threads exit without valid head
286 	 * pointers they must see resetting set before enabled is cleared.
287 	 * Otherwise a lock may not be removed from a per-thread list due
288 	 * to disabled being set but not wait for reset() to remove it below.
289 	 */
290 	atomic_store_rel_int(&lock_prof_resetting, 1);
291 	enabled = lock_prof_enable;
292 	lock_prof_enable = 0;
293 	lock_prof_idle();
294 	/*
295 	 * Some objects may have migrated between CPUs.  Clear all links
296 	 * before we zero the structures.  Some items may still be linked
297 	 * into per-thread lists as well.
298 	 */
299 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
300 		lpc = lp_cpu[cpu];
301 		for (i = 0; i < LPROF_CACHE_SIZE; i++) {
302 			LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link);
303 			LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link);
304 		}
305 	}
306 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
307 		lpc = lp_cpu[cpu];
308 		bzero(lpc, sizeof(*lpc));
309 		lock_prof_init_type(&lpc->lpc_types[0]);
310 		lock_prof_init_type(&lpc->lpc_types[1]);
311 	}
312 	atomic_store_rel_int(&lock_prof_resetting, 0);
313 	lock_prof_enable = enabled;
314 }
315 
316 static void
317 lock_prof_output(struct lock_prof *lp, struct sbuf *sb)
318 {
319 	const char *p;
320 
321 	for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3);
322 	sbuf_printf(sb,
323 	    "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n",
324 	    lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000,
325 	    lp->cnt_wait / 1000, lp->cnt_cur,
326 	    lp->cnt_cur == 0 ? (uintmax_t)0 :
327 	    lp->cnt_tot / (lp->cnt_cur * 1000),
328 	    lp->cnt_cur == 0 ? (uintmax_t)0 :
329 	    lp->cnt_wait / (lp->cnt_cur * 1000),
330 	    (uintmax_t)0, lp->cnt_contest_locking,
331 	    p, lp->line, lp->class->lc_name, lp->name);
332 }
333 
334 static void
335 lock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash,
336     int spin, int t)
337 {
338 	struct lock_prof_type *type;
339 	struct lock_prof *l;
340 	int cpu;
341 
342 	dst->file = match->file;
343 	dst->line = match->line;
344 	dst->class = match->class;
345 	dst->name = match->name;
346 
347 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
348 		if (lp_cpu[cpu] == NULL)
349 			continue;
350 		type = &lp_cpu[cpu]->lpc_types[spin];
351 		SLIST_FOREACH(l, &type->lpt_hash[hash], link) {
352 			if (l->ticks == t)
353 				continue;
354 			if (l->file != match->file || l->line != match->line ||
355 			    l->name != match->name)
356 				continue;
357 			l->ticks = t;
358 			if (l->cnt_max > dst->cnt_max)
359 				dst->cnt_max = l->cnt_max;
360 			if (l->cnt_wait_max > dst->cnt_wait_max)
361 				dst->cnt_wait_max = l->cnt_wait_max;
362 			dst->cnt_tot += l->cnt_tot;
363 			dst->cnt_wait += l->cnt_wait;
364 			dst->cnt_cur += l->cnt_cur;
365 			dst->cnt_contest_locking += l->cnt_contest_locking;
366 		}
367 	}
368 
369 }
370 
371 static void
372 lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin,
373     int t)
374 {
375 	struct lock_prof *l;
376 	int i;
377 
378 	for (i = 0; i < LPROF_HASH_SIZE; ++i) {
379 		SLIST_FOREACH(l, &type->lpt_hash[i], link) {
380 			struct lock_prof lp = {};
381 
382 			if (l->ticks == t)
383 				continue;
384 			lock_prof_sum(l, &lp, i, spin, t);
385 			lock_prof_output(&lp, sb);
386 		}
387 	}
388 }
389 
390 static int
391 dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
392 {
393 	struct sbuf *sb;
394 	int error, cpu, t;
395 	int enabled;
396 
397 	sb = sbuf_new_for_sysctl(NULL, NULL, LPROF_SBUF_SIZE, req);
398 	sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n",
399 	    "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
400 	enabled = lock_prof_enable;
401 	lock_prof_enable = 0;
402 	lock_prof_idle();
403 	t = ticks;
404 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
405 		if (lp_cpu[cpu] == NULL)
406 			continue;
407 		lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t);
408 		lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t);
409 	}
410 	lock_prof_enable = enabled;
411 
412 	error = sbuf_finish(sb);
413 	/* Output a trailing NUL. */
414 	if (error == 0)
415 		error = SYSCTL_OUT(req, "", 1);
416 	sbuf_delete(sb);
417 	return (error);
418 }
419 
420 static int
421 enable_lock_prof(SYSCTL_HANDLER_ARGS)
422 {
423 	int error, v;
424 
425 	v = lock_prof_enable;
426 	error = sysctl_handle_int(oidp, &v, v, req);
427 	if (error)
428 		return (error);
429 	if (req->newptr == NULL)
430 		return (error);
431 	if (v == lock_prof_enable)
432 		return (0);
433 	if (v == 1)
434 		lock_prof_reset();
435 	lock_prof_enable = !!v;
436 
437 	return (0);
438 }
439 
440 static int
441 reset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
442 {
443 	int error, v;
444 
445 	v = 0;
446 	error = sysctl_handle_int(oidp, &v, 0, req);
447 	if (error)
448 		return (error);
449 	if (req->newptr == NULL)
450 		return (error);
451 	if (v == 0)
452 		return (0);
453 	lock_prof_reset();
454 
455 	return (0);
456 }
457 
458 static struct lock_prof *
459 lock_profile_lookup(struct lock_object *lo, int spin, const char *file,
460     int line)
461 {
462 	const char *unknown = "(unknown)";
463 	struct lock_prof_type *type;
464 	struct lock_prof *lp;
465 	struct lphead *head;
466 	const char *p;
467 	u_int hash;
468 
469 	p = file;
470 	if (p == NULL || *p == '\0')
471 		p = unknown;
472 	hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line;
473 	hash &= LPROF_HASH_MASK;
474 	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
475 	head = &type->lpt_hash[hash];
476 	SLIST_FOREACH(lp, head, link) {
477 		if (lp->line == line && lp->file == p &&
478 		    lp->name == lo->lo_name)
479 			return (lp);
480 
481 	}
482 	lp = SLIST_FIRST(&type->lpt_lpalloc);
483 	if (lp == NULL) {
484 		lock_prof_rejected++;
485 		return (lp);
486 	}
487 	SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link);
488 	lp->file = p;
489 	lp->line = line;
490 	lp->class = LOCK_CLASS(lo);
491 	lp->name = lo->lo_name;
492 	SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link);
493 	return (lp);
494 }
495 
496 static struct lock_profile_object *
497 lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file,
498     int line)
499 {
500 	struct lock_profile_object *l;
501 	struct lock_prof_type *type;
502 	struct lpohead *head;
503 
504 	head = &curthread->td_lprof[spin];
505 	LIST_FOREACH(l, head, lpo_link)
506 		if (l->lpo_obj == lo && l->lpo_file == file &&
507 		    l->lpo_line == line)
508 			return (l);
509 	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
510 	l = LIST_FIRST(&type->lpt_lpoalloc);
511 	if (l == NULL) {
512 		lock_prof_rejected++;
513 		return (NULL);
514 	}
515 	LIST_REMOVE(l, lpo_link);
516 	l->lpo_obj = lo;
517 	l->lpo_file = file;
518 	l->lpo_line = line;
519 	l->lpo_cnt = 0;
520 	LIST_INSERT_HEAD(head, l, lpo_link);
521 
522 	return (l);
523 }
524 
525 void
526 lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
527     uint64_t waittime, const char *file, int line)
528 {
529 	static int lock_prof_count;
530 	struct lock_profile_object *l;
531 	int spin;
532 
533 	/* don't reset the timer when/if recursing */
534 	if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
535 		return;
536 	if (lock_prof_skipcount &&
537 	    (++lock_prof_count % lock_prof_skipcount) != 0)
538 		return;
539 	spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
540 	if (spin && lock_prof_skipspin == 1)
541 		return;
542 	critical_enter();
543 	/* Recheck enabled now that we're in a critical section. */
544 	if (lock_prof_enable == 0)
545 		goto out;
546 	l = lock_profile_object_lookup(lo, spin, file, line);
547 	if (l == NULL)
548 		goto out;
549 	l->lpo_cnt++;
550 	if (++l->lpo_ref > 1)
551 		goto out;
552 	l->lpo_contest_locking = contested;
553 	l->lpo_acqtime = nanoseconds();
554 	if (waittime && (l->lpo_acqtime > waittime))
555 		l->lpo_waittime = l->lpo_acqtime - waittime;
556 	else
557 		l->lpo_waittime = 0;
558 out:
559 	critical_exit();
560 }
561 
562 void
563 lock_profile_thread_exit(struct thread *td)
564 {
565 #ifdef INVARIANTS
566 	struct lock_profile_object *l;
567 
568 	MPASS(curthread->td_critnest == 0);
569 #endif
570 	/*
571 	 * If lock profiling was disabled we have to wait for reset to
572 	 * clear our pointers before we can exit safely.
573 	 */
574 	lock_prof_reset_wait();
575 #ifdef INVARIANTS
576 	LIST_FOREACH(l, &td->td_lprof[0], lpo_link)
577 		printf("thread still holds lock acquired at %s:%d\n",
578 		    l->lpo_file, l->lpo_line);
579 	LIST_FOREACH(l, &td->td_lprof[1], lpo_link)
580 		printf("thread still holds lock acquired at %s:%d\n",
581 		    l->lpo_file, l->lpo_line);
582 #endif
583 	MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL);
584 	MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL);
585 }
586 
587 void
588 lock_profile_release_lock(struct lock_object *lo)
589 {
590 	struct lock_profile_object *l;
591 	struct lock_prof_type *type;
592 	struct lock_prof *lp;
593 	uint64_t curtime, holdtime;
594 	struct lpohead *head;
595 	int spin;
596 
597 	if (lo->lo_flags & LO_NOPROFILE)
598 		return;
599 	spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
600 	head = &curthread->td_lprof[spin];
601 	if (LIST_FIRST(head) == NULL)
602 		return;
603 	critical_enter();
604 	/* Recheck enabled now that we're in a critical section. */
605 	if (lock_prof_enable == 0 && lock_prof_resetting == 1)
606 		goto out;
607 	/*
608 	 * If lock profiling is not enabled we still want to remove the
609 	 * lpo from our queue.
610 	 */
611 	LIST_FOREACH(l, head, lpo_link)
612 		if (l->lpo_obj == lo)
613 			break;
614 	if (l == NULL)
615 		goto out;
616 	if (--l->lpo_ref > 0)
617 		goto out;
618 	lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line);
619 	if (lp == NULL)
620 		goto release;
621 	curtime = nanoseconds();
622 	if (curtime < l->lpo_acqtime)
623 		goto release;
624 	holdtime = curtime - l->lpo_acqtime;
625 
626 	/*
627 	 * Record if the lock has been held longer now than ever
628 	 * before.
629 	 */
630 	if (holdtime > lp->cnt_max)
631 		lp->cnt_max = holdtime;
632 	if (l->lpo_waittime > lp->cnt_wait_max)
633 		lp->cnt_wait_max = l->lpo_waittime;
634 	lp->cnt_tot += holdtime;
635 	lp->cnt_wait += l->lpo_waittime;
636 	lp->cnt_contest_locking += l->lpo_contest_locking;
637 	lp->cnt_cur += l->lpo_cnt;
638 release:
639 	LIST_REMOVE(l, lpo_link);
640 	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
641 	LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link);
642 out:
643 	critical_exit();
644 }
645 
646 SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging");
647 SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, "lock profiling");
648 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW,
649     &lock_prof_skipspin, 0, "Skip profiling on spinlocks.");
650 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW,
651     &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions.");
652 SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
653     &lock_prof_rejected, 0, "Number of rejected profiling records");
654 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
655     NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics");
656 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
657     NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics");
658 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
659     NULL, 0, enable_lock_prof, "I", "Enable lock profiling");
660 
661 #endif
662