xref: /freebsd/sys/kern/subr_lock.c (revision 7029da5c36f2d3cf6bb6c81bf551229f416399e8)
183a81bcbSJohn Baldwin /*-
28a36da99SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
38a36da99SPedro F. Giffuni  *
483a81bcbSJohn Baldwin  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
583a81bcbSJohn Baldwin  *
683a81bcbSJohn Baldwin  * Redistribution and use in source and binary forms, with or without
783a81bcbSJohn Baldwin  * modification, are permitted provided that the following conditions
883a81bcbSJohn Baldwin  * are met:
983a81bcbSJohn Baldwin  * 1. Redistributions of source code must retain the above copyright
1083a81bcbSJohn Baldwin  *    notice, this list of conditions and the following disclaimer.
1183a81bcbSJohn Baldwin  * 2. Redistributions in binary form must reproduce the above copyright
1283a81bcbSJohn Baldwin  *    notice, this list of conditions and the following disclaimer in the
1383a81bcbSJohn Baldwin  *    documentation and/or other materials provided with the distribution.
1483a81bcbSJohn Baldwin  *
1583a81bcbSJohn Baldwin  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1683a81bcbSJohn Baldwin  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1783a81bcbSJohn Baldwin  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1883a81bcbSJohn Baldwin  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1983a81bcbSJohn Baldwin  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2083a81bcbSJohn Baldwin  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2183a81bcbSJohn Baldwin  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2283a81bcbSJohn Baldwin  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2383a81bcbSJohn Baldwin  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2483a81bcbSJohn Baldwin  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2583a81bcbSJohn Baldwin  * SUCH DAMAGE.
2683a81bcbSJohn Baldwin  */
2783a81bcbSJohn Baldwin 
2883a81bcbSJohn Baldwin /*
2983a81bcbSJohn Baldwin  * This module holds the global variables and functions used to maintain
3083a81bcbSJohn Baldwin  * lock_object structures.
3183a81bcbSJohn Baldwin  */
3283a81bcbSJohn Baldwin 
3383a81bcbSJohn Baldwin #include <sys/cdefs.h>
3483a81bcbSJohn Baldwin __FBSDID("$FreeBSD$");
3583a81bcbSJohn Baldwin 
366ef970a9SJohn Baldwin #include "opt_ddb.h"
377c0435b9SKip Macy #include "opt_mprof.h"
386ef970a9SJohn Baldwin 
3983a81bcbSJohn Baldwin #include <sys/param.h>
4083a81bcbSJohn Baldwin #include <sys/systm.h>
41eea4f254SJeff Roberson #include <sys/kernel.h>
4283a81bcbSJohn Baldwin #include <sys/ktr.h>
4383a81bcbSJohn Baldwin #include <sys/lock.h>
447c0435b9SKip Macy #include <sys/lock_profile.h>
45eea4f254SJeff Roberson #include <sys/malloc.h>
462e6b8de4SJeff Roberson #include <sys/mutex.h>
47eea4f254SJeff Roberson #include <sys/pcpu.h>
48eea4f254SJeff Roberson #include <sys/proc.h>
49eea4f254SJeff Roberson #include <sys/sbuf.h>
502e6b8de4SJeff Roberson #include <sys/sched.h>
51eea4f254SJeff Roberson #include <sys/smp.h>
52eea4f254SJeff Roberson #include <sys/sysctl.h>
5383a81bcbSJohn Baldwin 
5483a81bcbSJohn Baldwin #ifdef DDB
5583a81bcbSJohn Baldwin #include <ddb/ddb.h>
5683a81bcbSJohn Baldwin #endif
5783a81bcbSJohn Baldwin 
58eea4f254SJeff Roberson #include <machine/cpufunc.h>
59eea4f254SJeff Roberson 
608e5a3e9aSMateusz Guzik SDT_PROVIDER_DEFINE(lock);
618e5a3e9aSMateusz Guzik SDT_PROBE_DEFINE1(lock, , , starvation, "u_int");
628e5a3e9aSMateusz Guzik 
6383a81bcbSJohn Baldwin CTASSERT(LOCK_CLASS_MAX == 15);
6483a81bcbSJohn Baldwin 
6583a81bcbSJohn Baldwin struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
6683a81bcbSJohn Baldwin 	&lock_class_mtx_spin,
6783a81bcbSJohn Baldwin 	&lock_class_mtx_sleep,
6883a81bcbSJohn Baldwin 	&lock_class_sx,
69f53d15feSStephan Uphoff 	&lock_class_rm,
70cd32bd7aSJohn Baldwin 	&lock_class_rm_sleepable,
713f08bd8bSJohn Baldwin 	&lock_class_rw,
7261bd5e21SKip Macy 	&lock_class_lockmgr,
7383a81bcbSJohn Baldwin };
7483a81bcbSJohn Baldwin 
7583a81bcbSJohn Baldwin void
7683a81bcbSJohn Baldwin lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
7783a81bcbSJohn Baldwin     const char *type, int flags)
7883a81bcbSJohn Baldwin {
7983a81bcbSJohn Baldwin 	int i;
8083a81bcbSJohn Baldwin 
8183a81bcbSJohn Baldwin 	/* Check for double-init and zero object. */
82fd07ddcfSDmitry Chagin 	KASSERT(flags & LO_NEW || !lock_initialized(lock),
83fd07ddcfSDmitry Chagin 	    ("lock \"%s\" %p already initialized", name, lock));
8483a81bcbSJohn Baldwin 
8583a81bcbSJohn Baldwin 	/* Look up lock class to find its index. */
8683a81bcbSJohn Baldwin 	for (i = 0; i < LOCK_CLASS_MAX; i++)
8783a81bcbSJohn Baldwin 		if (lock_classes[i] == class) {
8883a81bcbSJohn Baldwin 			lock->lo_flags = i << LO_CLASSSHIFT;
8983a81bcbSJohn Baldwin 			break;
9083a81bcbSJohn Baldwin 		}
9183a81bcbSJohn Baldwin 	KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
9283a81bcbSJohn Baldwin 
9383a81bcbSJohn Baldwin 	/* Initialize the lock object. */
9483a81bcbSJohn Baldwin 	lock->lo_name = name;
9583a81bcbSJohn Baldwin 	lock->lo_flags |= flags | LO_INITIALIZED;
9683a81bcbSJohn Baldwin 	LOCK_LOG_INIT(lock, 0);
9790356491SAttilio Rao 	WITNESS_INIT(lock, (type != NULL) ? type : name);
9883a81bcbSJohn Baldwin }
9983a81bcbSJohn Baldwin 
10083a81bcbSJohn Baldwin void
10183a81bcbSJohn Baldwin lock_destroy(struct lock_object *lock)
10283a81bcbSJohn Baldwin {
10383a81bcbSJohn Baldwin 
1043a6cdc4eSJohn-Mark Gurney 	KASSERT(lock_initialized(lock), ("lock %p is not initialized", lock));
10583a81bcbSJohn Baldwin 	WITNESS_DESTROY(lock);
10683a81bcbSJohn Baldwin 	LOCK_LOG_DESTROY(lock, 0);
10783a81bcbSJohn Baldwin 	lock->lo_flags &= ~LO_INITIALIZED;
10883a81bcbSJohn Baldwin }
10983a81bcbSJohn Baldwin 
110*7029da5cSPawel Biernacki static SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
111*7029da5cSPawel Biernacki     "lock debugging");
112*7029da5cSPawel Biernacki static SYSCTL_NODE(_debug_lock, OID_AUTO, delay,
113*7029da5cSPawel Biernacki     CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1148e5a3e9aSMateusz Guzik     "lock delay");
1158e5a3e9aSMateusz Guzik 
1168e5a3e9aSMateusz Guzik static u_int __read_mostly starvation_limit = 131072;
1178e5a3e9aSMateusz Guzik SYSCTL_INT(_debug_lock_delay, OID_AUTO, starvation_limit, CTLFLAG_RW,
1188e5a3e9aSMateusz Guzik     &starvation_limit, 0, "");
1198e5a3e9aSMateusz Guzik 
1208e5a3e9aSMateusz Guzik static u_int __read_mostly restrict_starvation = 0;
1218e5a3e9aSMateusz Guzik SYSCTL_INT(_debug_lock_delay, OID_AUTO, restrict_starvation, CTLFLAG_RW,
1228e5a3e9aSMateusz Guzik     &restrict_starvation, 0, "");
1238e5a3e9aSMateusz Guzik 
1241ada9041SMateusz Guzik void
1251ada9041SMateusz Guzik lock_delay(struct lock_delay_arg *la)
1261ada9041SMateusz Guzik {
1271ada9041SMateusz Guzik 	struct lock_delay_config *lc = la->config;
1286b8dd26eSMateusz Guzik 	u_short i;
1291ada9041SMateusz Guzik 
1308e5a3e9aSMateusz Guzik 	la->delay <<= 1;
1318e5a3e9aSMateusz Guzik 	if (__predict_false(la->delay > lc->max))
1328e5a3e9aSMateusz Guzik 		la->delay = lc->max;
1331ada9041SMateusz Guzik 
1343c798b2bSMateusz Guzik 	for (i = la->delay; i > 0; i--)
1351ada9041SMateusz Guzik 		cpu_spinwait();
1361ada9041SMateusz Guzik 
1378e5a3e9aSMateusz Guzik 	la->spin_cnt += la->delay;
1388e5a3e9aSMateusz Guzik 	if (__predict_false(la->spin_cnt > starvation_limit)) {
1398e5a3e9aSMateusz Guzik 		SDT_PROBE1(lock, , , starvation, la->delay);
1408e5a3e9aSMateusz Guzik 		if (restrict_starvation)
1418e5a3e9aSMateusz Guzik 			la->delay = lc->base;
1428e5a3e9aSMateusz Guzik 	}
1438e5a3e9aSMateusz Guzik }
1448e5a3e9aSMateusz Guzik 
1458e5a3e9aSMateusz Guzik static u_int
1468e5a3e9aSMateusz Guzik lock_roundup_2(u_int val)
1478e5a3e9aSMateusz Guzik {
1488e5a3e9aSMateusz Guzik 	u_int res;
1498e5a3e9aSMateusz Guzik 
1508e5a3e9aSMateusz Guzik 	for (res = 1; res <= val; res <<= 1)
1518e5a3e9aSMateusz Guzik 		continue;
1528e5a3e9aSMateusz Guzik 
1538e5a3e9aSMateusz Guzik 	return (res);
1548e5a3e9aSMateusz Guzik }
1558e5a3e9aSMateusz Guzik 
1568e5a3e9aSMateusz Guzik void
1578e5a3e9aSMateusz Guzik lock_delay_default_init(struct lock_delay_config *lc)
1588e5a3e9aSMateusz Guzik {
1598e5a3e9aSMateusz Guzik 
160a045941bSMateusz Guzik 	lc->base = 1;
161a045941bSMateusz Guzik 	lc->max = lock_roundup_2(mp_ncpus) * 256;
162a045941bSMateusz Guzik 	if (lc->max > 32678)
163a045941bSMateusz Guzik 		lc->max = 32678;
1641ada9041SMateusz Guzik }
1651ada9041SMateusz Guzik 
1662e77cad1SMateusz Guzik struct lock_delay_config __read_frequently locks_delay;
1672e77cad1SMateusz Guzik u_short __read_frequently locks_delay_retries;
1682e77cad1SMateusz Guzik u_short __read_frequently locks_delay_loops;
1692e77cad1SMateusz Guzik 
1702e77cad1SMateusz Guzik SYSCTL_U16(_debug_lock, OID_AUTO, delay_base, CTLFLAG_RW, &locks_delay.base,
1712e77cad1SMateusz Guzik     0, "");
1722e77cad1SMateusz Guzik SYSCTL_U16(_debug_lock, OID_AUTO, delay_max, CTLFLAG_RW, &locks_delay.max,
1732e77cad1SMateusz Guzik     0, "");
1742e77cad1SMateusz Guzik SYSCTL_U16(_debug_lock, OID_AUTO, delay_retries, CTLFLAG_RW, &locks_delay_retries,
1752e77cad1SMateusz Guzik     0, "");
1762e77cad1SMateusz Guzik SYSCTL_U16(_debug_lock, OID_AUTO, delay_loops, CTLFLAG_RW, &locks_delay_loops,
1772e77cad1SMateusz Guzik     0, "");
1782e77cad1SMateusz Guzik 
1792e77cad1SMateusz Guzik static void
1802e77cad1SMateusz Guzik locks_delay_init(void *arg __unused)
1812e77cad1SMateusz Guzik {
1822e77cad1SMateusz Guzik 
1832e77cad1SMateusz Guzik 	lock_delay_default_init(&locks_delay);
1842e77cad1SMateusz Guzik 	locks_delay_retries = 10;
1852e77cad1SMateusz Guzik 	locks_delay_loops = max(10000, locks_delay.max);
1862e77cad1SMateusz Guzik }
1872e77cad1SMateusz Guzik LOCK_DELAY_SYSINIT(locks_delay_init);
1882e77cad1SMateusz Guzik 
18983a81bcbSJohn Baldwin #ifdef DDB
19083a81bcbSJohn Baldwin DB_SHOW_COMMAND(lock, db_show_lock)
19183a81bcbSJohn Baldwin {
19283a81bcbSJohn Baldwin 	struct lock_object *lock;
19383a81bcbSJohn Baldwin 	struct lock_class *class;
19483a81bcbSJohn Baldwin 
19583a81bcbSJohn Baldwin 	if (!have_addr)
19683a81bcbSJohn Baldwin 		return;
19783a81bcbSJohn Baldwin 	lock = (struct lock_object *)addr;
19883a81bcbSJohn Baldwin 	if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) {
19983a81bcbSJohn Baldwin 		db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock));
20083a81bcbSJohn Baldwin 		return;
20183a81bcbSJohn Baldwin 	}
20283a81bcbSJohn Baldwin 	class = LOCK_CLASS(lock);
20383a81bcbSJohn Baldwin 	db_printf(" class: %s\n", class->lc_name);
20483a81bcbSJohn Baldwin 	db_printf(" name: %s\n", lock->lo_name);
20583a81bcbSJohn Baldwin 	class->lc_ddb_show(lock);
20683a81bcbSJohn Baldwin }
20783a81bcbSJohn Baldwin #endif
2087c0435b9SKip Macy 
2097c0435b9SKip Macy #ifdef LOCK_PROFILING
210eea4f254SJeff Roberson 
211eea4f254SJeff Roberson /*
212eea4f254SJeff Roberson  * One object per-thread for each lock the thread owns.  Tracks individual
213eea4f254SJeff Roberson  * lock instances.
214eea4f254SJeff Roberson  */
215eea4f254SJeff Roberson struct lock_profile_object {
216eea4f254SJeff Roberson 	LIST_ENTRY(lock_profile_object) lpo_link;
217eea4f254SJeff Roberson 	struct lock_object *lpo_obj;
218eea4f254SJeff Roberson 	const char	*lpo_file;
219eea4f254SJeff Roberson 	int		lpo_line;
220eea4f254SJeff Roberson 	uint16_t	lpo_ref;
221eea4f254SJeff Roberson 	uint16_t	lpo_cnt;
22260ae52f7SEd Schouten 	uint64_t	lpo_acqtime;
22360ae52f7SEd Schouten 	uint64_t	lpo_waittime;
224eea4f254SJeff Roberson 	u_int		lpo_contest_locking;
225eea4f254SJeff Roberson };
226eea4f254SJeff Roberson 
227eea4f254SJeff Roberson /*
228eea4f254SJeff Roberson  * One lock_prof for each (file, line, lock object) triple.
229eea4f254SJeff Roberson  */
230eea4f254SJeff Roberson struct lock_prof {
231eea4f254SJeff Roberson 	SLIST_ENTRY(lock_prof) link;
2320c66dc67SJeff Roberson 	struct lock_class *class;
233eea4f254SJeff Roberson 	const char	*file;
234eea4f254SJeff Roberson 	const char	*name;
235eea4f254SJeff Roberson 	int		line;
236eea4f254SJeff Roberson 	int		ticks;
237947265b6SKip Macy 	uintmax_t	cnt_wait_max;
238eea4f254SJeff Roberson 	uintmax_t	cnt_max;
239eea4f254SJeff Roberson 	uintmax_t	cnt_tot;
240eea4f254SJeff Roberson 	uintmax_t	cnt_wait;
241eea4f254SJeff Roberson 	uintmax_t	cnt_cur;
242eea4f254SJeff Roberson 	uintmax_t	cnt_contest_locking;
243eea4f254SJeff Roberson };
244eea4f254SJeff Roberson 
245eea4f254SJeff Roberson SLIST_HEAD(lphead, lock_prof);
246eea4f254SJeff Roberson 
247eea4f254SJeff Roberson #define	LPROF_HASH_SIZE		4096
248eea4f254SJeff Roberson #define	LPROF_HASH_MASK		(LPROF_HASH_SIZE - 1)
249eea4f254SJeff Roberson #define	LPROF_CACHE_SIZE	4096
250eea4f254SJeff Roberson 
251eea4f254SJeff Roberson /*
252eea4f254SJeff Roberson  * Array of objects and profs for each type of object for each cpu.  Spinlocks
253b1ce21c6SRebecca Cran  * are handled separately because a thread may be preempted and acquire a
254eea4f254SJeff Roberson  * spinlock while in the lock profiling code of a non-spinlock.  In this way
255eea4f254SJeff Roberson  * we only need a critical section to protect the per-cpu lists.
256eea4f254SJeff Roberson  */
257eea4f254SJeff Roberson struct lock_prof_type {
258eea4f254SJeff Roberson 	struct lphead		lpt_lpalloc;
259eea4f254SJeff Roberson 	struct lpohead		lpt_lpoalloc;
260eea4f254SJeff Roberson 	struct lphead		lpt_hash[LPROF_HASH_SIZE];
261eea4f254SJeff Roberson 	struct lock_prof	lpt_prof[LPROF_CACHE_SIZE];
262eea4f254SJeff Roberson 	struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE];
263eea4f254SJeff Roberson };
264eea4f254SJeff Roberson 
265eea4f254SJeff Roberson struct lock_prof_cpu {
266eea4f254SJeff Roberson 	struct lock_prof_type	lpc_types[2]; /* One for spin one for other. */
267eea4f254SJeff Roberson };
268eea4f254SJeff Roberson 
269d2be3ef0SMateusz Guzik DPCPU_DEFINE_STATIC(struct lock_prof_cpu, lp);
270d2be3ef0SMateusz Guzik #define	LP_CPU_SELF	(DPCPU_PTR(lp))
271d2be3ef0SMateusz Guzik #define	LP_CPU(cpu)	(DPCPU_ID_PTR((cpu), lp))
272eea4f254SJeff Roberson 
27329051116SMateusz Guzik volatile int __read_mostly lock_prof_enable;
2742e6b8de4SJeff Roberson static volatile int lock_prof_resetting;
275eea4f254SJeff Roberson 
2764e657159SMatthew D Fleming #define LPROF_SBUF_SIZE		256
277eea4f254SJeff Roberson 
278eea4f254SJeff Roberson static int lock_prof_rejected;
279eea4f254SJeff Roberson static int lock_prof_skipspin;
280eea4f254SJeff Roberson static int lock_prof_skipcount;
281eea4f254SJeff Roberson 
282eea4f254SJeff Roberson #ifndef USE_CPU_NANOSECONDS
28360ae52f7SEd Schouten uint64_t
284eea4f254SJeff Roberson nanoseconds(void)
2857c0435b9SKip Macy {
286eea4f254SJeff Roberson 	struct bintime bt;
28760ae52f7SEd Schouten 	uint64_t ns;
2887c0435b9SKip Macy 
289eea4f254SJeff Roberson 	binuptime(&bt);
290eea4f254SJeff Roberson 	/* From bintime2timespec */
29160ae52f7SEd Schouten 	ns = bt.sec * (uint64_t)1000000000;
292eea4f254SJeff Roberson 	ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32;
293eea4f254SJeff Roberson 	return (ns);
294eea4f254SJeff Roberson }
295eea4f254SJeff Roberson #endif
296fe68a916SKip Macy 
297eea4f254SJeff Roberson static void
298eea4f254SJeff Roberson lock_prof_init_type(struct lock_prof_type *type)
299eea4f254SJeff Roberson {
300eea4f254SJeff Roberson 	int i;
301fe68a916SKip Macy 
302eea4f254SJeff Roberson 	SLIST_INIT(&type->lpt_lpalloc);
303eea4f254SJeff Roberson 	LIST_INIT(&type->lpt_lpoalloc);
304eea4f254SJeff Roberson 	for (i = 0; i < LPROF_CACHE_SIZE; i++) {
305eea4f254SJeff Roberson 		SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i],
306eea4f254SJeff Roberson 		    link);
307eea4f254SJeff Roberson 		LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i],
308eea4f254SJeff Roberson 		    lpo_link);
309eea4f254SJeff Roberson 	}
310eea4f254SJeff Roberson }
311eea4f254SJeff Roberson 
312eea4f254SJeff Roberson static void
313eea4f254SJeff Roberson lock_prof_init(void *arg)
314eea4f254SJeff Roberson {
315eea4f254SJeff Roberson 	int cpu;
316eea4f254SJeff Roberson 
317cbba2cb3SMateusz Guzik 	CPU_FOREACH(cpu) {
318d2be3ef0SMateusz Guzik 		lock_prof_init_type(&LP_CPU(cpu)->lpc_types[0]);
319d2be3ef0SMateusz Guzik 		lock_prof_init_type(&LP_CPU(cpu)->lpc_types[1]);
320eea4f254SJeff Roberson 	}
321eea4f254SJeff Roberson }
322eea4f254SJeff Roberson SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL);
323eea4f254SJeff Roberson 
3242e6b8de4SJeff Roberson static void
3252e6b8de4SJeff Roberson lock_prof_reset_wait(void)
3262e6b8de4SJeff Roberson {
3272e6b8de4SJeff Roberson 
3282e6b8de4SJeff Roberson 	/*
32928d91af3SJeff Roberson 	 * Spin relinquishing our cpu so that quiesce_all_cpus may
33028d91af3SJeff Roberson 	 * complete.
3312e6b8de4SJeff Roberson 	 */
3322e6b8de4SJeff Roberson 	while (lock_prof_resetting)
3332e6b8de4SJeff Roberson 		sched_relinquish(curthread);
3342e6b8de4SJeff Roberson }
3352e6b8de4SJeff Roberson 
336eea4f254SJeff Roberson static void
337eea4f254SJeff Roberson lock_prof_reset(void)
338eea4f254SJeff Roberson {
339eea4f254SJeff Roberson 	struct lock_prof_cpu *lpc;
340eea4f254SJeff Roberson 	int enabled, i, cpu;
341eea4f254SJeff Roberson 
3422e6b8de4SJeff Roberson 	/*
3432e6b8de4SJeff Roberson 	 * We not only race with acquiring and releasing locks but also
3442e6b8de4SJeff Roberson 	 * thread exit.  To be certain that threads exit without valid head
3452e6b8de4SJeff Roberson 	 * pointers they must see resetting set before enabled is cleared.
3462e6b8de4SJeff Roberson 	 * Otherwise a lock may not be removed from a per-thread list due
3472e6b8de4SJeff Roberson 	 * to disabled being set but not wait for reset() to remove it below.
3482e6b8de4SJeff Roberson 	 */
3492e6b8de4SJeff Roberson 	atomic_store_rel_int(&lock_prof_resetting, 1);
350eea4f254SJeff Roberson 	enabled = lock_prof_enable;
351eea4f254SJeff Roberson 	lock_prof_enable = 0;
3523ac2ac2eSMateusz Guzik 	/*
3533ac2ac2eSMateusz Guzik 	 * This both publishes lock_prof_enable as disabled and makes sure
3543ac2ac2eSMateusz Guzik 	 * everyone else reads it if they are not far enough. We wait for the
3553ac2ac2eSMateusz Guzik 	 * rest down below.
3563ac2ac2eSMateusz Guzik 	 */
3573ac2ac2eSMateusz Guzik 	cpus_fence_seq_cst();
3583ac2ac2eSMateusz Guzik 	quiesce_all_critical();
3592e6b8de4SJeff Roberson 	/*
3602e6b8de4SJeff Roberson 	 * Some objects may have migrated between CPUs.  Clear all links
3612e6b8de4SJeff Roberson 	 * before we zero the structures.  Some items may still be linked
3622e6b8de4SJeff Roberson 	 * into per-thread lists as well.
3632e6b8de4SJeff Roberson 	 */
364cbba2cb3SMateusz Guzik 	CPU_FOREACH(cpu) {
365d2be3ef0SMateusz Guzik 		lpc = LP_CPU(cpu);
366eea4f254SJeff Roberson 		for (i = 0; i < LPROF_CACHE_SIZE; i++) {
367eea4f254SJeff Roberson 			LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link);
368eea4f254SJeff Roberson 			LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link);
369eea4f254SJeff Roberson 		}
3702e6b8de4SJeff Roberson 	}
371cbba2cb3SMateusz Guzik 	CPU_FOREACH(cpu) {
372d2be3ef0SMateusz Guzik 		lpc = LP_CPU(cpu);
373eea4f254SJeff Roberson 		bzero(lpc, sizeof(*lpc));
374eea4f254SJeff Roberson 		lock_prof_init_type(&lpc->lpc_types[0]);
375eea4f254SJeff Roberson 		lock_prof_init_type(&lpc->lpc_types[1]);
376eea4f254SJeff Roberson 	}
3773ac2ac2eSMateusz Guzik 	/*
3783ac2ac2eSMateusz Guzik 	 * Paired with the fence from cpus_fence_seq_cst()
3793ac2ac2eSMateusz Guzik 	 */
3802e6b8de4SJeff Roberson 	atomic_store_rel_int(&lock_prof_resetting, 0);
381eea4f254SJeff Roberson 	lock_prof_enable = enabled;
382eea4f254SJeff Roberson }
383eea4f254SJeff Roberson 
384eea4f254SJeff Roberson static void
385eea4f254SJeff Roberson lock_prof_output(struct lock_prof *lp, struct sbuf *sb)
386eea4f254SJeff Roberson {
387eea4f254SJeff Roberson 	const char *p;
388eea4f254SJeff Roberson 
389eea4f254SJeff Roberson 	for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3);
390eea4f254SJeff Roberson 	sbuf_printf(sb,
391947265b6SKip Macy 	    "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n",
392947265b6SKip Macy 	    lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000,
393eea4f254SJeff Roberson 	    lp->cnt_wait / 1000, lp->cnt_cur,
394eea4f254SJeff Roberson 	    lp->cnt_cur == 0 ? (uintmax_t)0 :
395eea4f254SJeff Roberson 	    lp->cnt_tot / (lp->cnt_cur * 1000),
396eea4f254SJeff Roberson 	    lp->cnt_cur == 0 ? (uintmax_t)0 :
397eea4f254SJeff Roberson 	    lp->cnt_wait / (lp->cnt_cur * 1000),
398eea4f254SJeff Roberson 	    (uintmax_t)0, lp->cnt_contest_locking,
3990c66dc67SJeff Roberson 	    p, lp->line, lp->class->lc_name, lp->name);
400eea4f254SJeff Roberson }
401eea4f254SJeff Roberson 
402eea4f254SJeff Roberson static void
403eea4f254SJeff Roberson lock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash,
404eea4f254SJeff Roberson     int spin, int t)
405eea4f254SJeff Roberson {
406eea4f254SJeff Roberson 	struct lock_prof_type *type;
407eea4f254SJeff Roberson 	struct lock_prof *l;
408eea4f254SJeff Roberson 	int cpu;
409eea4f254SJeff Roberson 
410eea4f254SJeff Roberson 	dst->file = match->file;
411eea4f254SJeff Roberson 	dst->line = match->line;
4120c66dc67SJeff Roberson 	dst->class = match->class;
413eea4f254SJeff Roberson 	dst->name = match->name;
414eea4f254SJeff Roberson 
415cbba2cb3SMateusz Guzik 	CPU_FOREACH(cpu) {
416d2be3ef0SMateusz Guzik 		type = &LP_CPU(cpu)->lpc_types[spin];
417eea4f254SJeff Roberson 		SLIST_FOREACH(l, &type->lpt_hash[hash], link) {
418eea4f254SJeff Roberson 			if (l->ticks == t)
419eea4f254SJeff Roberson 				continue;
420eea4f254SJeff Roberson 			if (l->file != match->file || l->line != match->line ||
4210c66dc67SJeff Roberson 			    l->name != match->name)
422eea4f254SJeff Roberson 				continue;
423eea4f254SJeff Roberson 			l->ticks = t;
424eea4f254SJeff Roberson 			if (l->cnt_max > dst->cnt_max)
425eea4f254SJeff Roberson 				dst->cnt_max = l->cnt_max;
426947265b6SKip Macy 			if (l->cnt_wait_max > dst->cnt_wait_max)
427947265b6SKip Macy 				dst->cnt_wait_max = l->cnt_wait_max;
428eea4f254SJeff Roberson 			dst->cnt_tot += l->cnt_tot;
429eea4f254SJeff Roberson 			dst->cnt_wait += l->cnt_wait;
430eea4f254SJeff Roberson 			dst->cnt_cur += l->cnt_cur;
431eea4f254SJeff Roberson 			dst->cnt_contest_locking += l->cnt_contest_locking;
432eea4f254SJeff Roberson 		}
433eea4f254SJeff Roberson 	}
434eea4f254SJeff Roberson }
435eea4f254SJeff Roberson 
436eea4f254SJeff Roberson static void
437eea4f254SJeff Roberson lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin,
438eea4f254SJeff Roberson     int t)
439eea4f254SJeff Roberson {
440eea4f254SJeff Roberson 	struct lock_prof *l;
441eea4f254SJeff Roberson 	int i;
442eea4f254SJeff Roberson 
443eea4f254SJeff Roberson 	for (i = 0; i < LPROF_HASH_SIZE; ++i) {
444eea4f254SJeff Roberson 		SLIST_FOREACH(l, &type->lpt_hash[i], link) {
445eea4f254SJeff Roberson 			struct lock_prof lp = {};
446eea4f254SJeff Roberson 
447eea4f254SJeff Roberson 			if (l->ticks == t)
448eea4f254SJeff Roberson 				continue;
449eea4f254SJeff Roberson 			lock_prof_sum(l, &lp, i, spin, t);
450eea4f254SJeff Roberson 			lock_prof_output(&lp, sb);
451eea4f254SJeff Roberson 		}
452eea4f254SJeff Roberson 	}
453eea4f254SJeff Roberson }
454eea4f254SJeff Roberson 
455eea4f254SJeff Roberson static int
456eea4f254SJeff Roberson dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
457eea4f254SJeff Roberson {
458eea4f254SJeff Roberson 	struct sbuf *sb;
459eea4f254SJeff Roberson 	int error, cpu, t;
4600c66dc67SJeff Roberson 	int enabled;
461eea4f254SJeff Roberson 
46200f0e671SMatthew D Fleming 	error = sysctl_wire_old_buffer(req, 0);
46300f0e671SMatthew D Fleming 	if (error != 0)
46400f0e671SMatthew D Fleming 		return (error);
4654e657159SMatthew D Fleming 	sb = sbuf_new_for_sysctl(NULL, NULL, LPROF_SBUF_SIZE, req);
466947265b6SKip Macy 	sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n",
467947265b6SKip Macy 	    "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
4680c66dc67SJeff Roberson 	enabled = lock_prof_enable;
4690c66dc67SJeff Roberson 	lock_prof_enable = 0;
4703ac2ac2eSMateusz Guzik 	/*
4713ac2ac2eSMateusz Guzik 	 * See the comment in lock_prof_reset
4723ac2ac2eSMateusz Guzik 	 */
4733ac2ac2eSMateusz Guzik 	cpus_fence_seq_cst();
4743ac2ac2eSMateusz Guzik 	quiesce_all_critical();
475eea4f254SJeff Roberson 	t = ticks;
476cbba2cb3SMateusz Guzik 	CPU_FOREACH(cpu) {
477d2be3ef0SMateusz Guzik 		lock_prof_type_stats(&LP_CPU(cpu)->lpc_types[0], sb, 0, t);
478d2be3ef0SMateusz Guzik 		lock_prof_type_stats(&LP_CPU(cpu)->lpc_types[1], sb, 1, t);
479eea4f254SJeff Roberson 	}
4803ac2ac2eSMateusz Guzik 	atomic_thread_fence_rel();
4810c66dc67SJeff Roberson 	lock_prof_enable = enabled;
482eea4f254SJeff Roberson 
4834e657159SMatthew D Fleming 	error = sbuf_finish(sb);
4844e657159SMatthew D Fleming 	/* Output a trailing NUL. */
4854e657159SMatthew D Fleming 	if (error == 0)
4864e657159SMatthew D Fleming 		error = SYSCTL_OUT(req, "", 1);
487eea4f254SJeff Roberson 	sbuf_delete(sb);
488eea4f254SJeff Roberson 	return (error);
489eea4f254SJeff Roberson }
490eea4f254SJeff Roberson 
491eea4f254SJeff Roberson static int
492eea4f254SJeff Roberson enable_lock_prof(SYSCTL_HANDLER_ARGS)
493eea4f254SJeff Roberson {
494eea4f254SJeff Roberson 	int error, v;
495eea4f254SJeff Roberson 
496eea4f254SJeff Roberson 	v = lock_prof_enable;
497eea4f254SJeff Roberson 	error = sysctl_handle_int(oidp, &v, v, req);
498eea4f254SJeff Roberson 	if (error)
499eea4f254SJeff Roberson 		return (error);
500eea4f254SJeff Roberson 	if (req->newptr == NULL)
501eea4f254SJeff Roberson 		return (error);
502eea4f254SJeff Roberson 	if (v == lock_prof_enable)
503eea4f254SJeff Roberson 		return (0);
504eea4f254SJeff Roberson 	if (v == 1)
505eea4f254SJeff Roberson 		lock_prof_reset();
506eea4f254SJeff Roberson 	lock_prof_enable = !!v;
507eea4f254SJeff Roberson 
508eea4f254SJeff Roberson 	return (0);
509eea4f254SJeff Roberson }
510eea4f254SJeff Roberson 
511eea4f254SJeff Roberson static int
512eea4f254SJeff Roberson reset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
513eea4f254SJeff Roberson {
514eea4f254SJeff Roberson 	int error, v;
515eea4f254SJeff Roberson 
516eea4f254SJeff Roberson 	v = 0;
517eea4f254SJeff Roberson 	error = sysctl_handle_int(oidp, &v, 0, req);
518eea4f254SJeff Roberson 	if (error)
519eea4f254SJeff Roberson 		return (error);
520eea4f254SJeff Roberson 	if (req->newptr == NULL)
521eea4f254SJeff Roberson 		return (error);
522eea4f254SJeff Roberson 	if (v == 0)
523eea4f254SJeff Roberson 		return (0);
524eea4f254SJeff Roberson 	lock_prof_reset();
525eea4f254SJeff Roberson 
526eea4f254SJeff Roberson 	return (0);
527eea4f254SJeff Roberson }
528eea4f254SJeff Roberson 
529eea4f254SJeff Roberson static struct lock_prof *
530eea4f254SJeff Roberson lock_profile_lookup(struct lock_object *lo, int spin, const char *file,
531eea4f254SJeff Roberson     int line)
532eea4f254SJeff Roberson {
533eea4f254SJeff Roberson 	const char *unknown = "(unknown)";
534eea4f254SJeff Roberson 	struct lock_prof_type *type;
535eea4f254SJeff Roberson 	struct lock_prof *lp;
536eea4f254SJeff Roberson 	struct lphead *head;
537eea4f254SJeff Roberson 	const char *p;
538eea4f254SJeff Roberson 	u_int hash;
539eea4f254SJeff Roberson 
540eea4f254SJeff Roberson 	p = file;
541eea4f254SJeff Roberson 	if (p == NULL || *p == '\0')
542eea4f254SJeff Roberson 		p = unknown;
543eea4f254SJeff Roberson 	hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line;
544eea4f254SJeff Roberson 	hash &= LPROF_HASH_MASK;
545d2be3ef0SMateusz Guzik 	type = &LP_CPU_SELF->lpc_types[spin];
546eea4f254SJeff Roberson 	head = &type->lpt_hash[hash];
547eea4f254SJeff Roberson 	SLIST_FOREACH(lp, head, link) {
548eea4f254SJeff Roberson 		if (lp->line == line && lp->file == p &&
549eea4f254SJeff Roberson 		    lp->name == lo->lo_name)
550eea4f254SJeff Roberson 			return (lp);
551eea4f254SJeff Roberson 
552eea4f254SJeff Roberson 	}
553eea4f254SJeff Roberson 	lp = SLIST_FIRST(&type->lpt_lpalloc);
554eea4f254SJeff Roberson 	if (lp == NULL) {
555eea4f254SJeff Roberson 		lock_prof_rejected++;
556eea4f254SJeff Roberson 		return (lp);
557eea4f254SJeff Roberson 	}
558eea4f254SJeff Roberson 	SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link);
559eea4f254SJeff Roberson 	lp->file = p;
560eea4f254SJeff Roberson 	lp->line = line;
5610c66dc67SJeff Roberson 	lp->class = LOCK_CLASS(lo);
562eea4f254SJeff Roberson 	lp->name = lo->lo_name;
563eea4f254SJeff Roberson 	SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link);
564eea4f254SJeff Roberson 	return (lp);
565eea4f254SJeff Roberson }
566eea4f254SJeff Roberson 
567eea4f254SJeff Roberson static struct lock_profile_object *
568eea4f254SJeff Roberson lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file,
569eea4f254SJeff Roberson     int line)
570eea4f254SJeff Roberson {
571eea4f254SJeff Roberson 	struct lock_profile_object *l;
572eea4f254SJeff Roberson 	struct lock_prof_type *type;
573eea4f254SJeff Roberson 	struct lpohead *head;
574eea4f254SJeff Roberson 
575eea4f254SJeff Roberson 	head = &curthread->td_lprof[spin];
576eea4f254SJeff Roberson 	LIST_FOREACH(l, head, lpo_link)
577eea4f254SJeff Roberson 		if (l->lpo_obj == lo && l->lpo_file == file &&
578eea4f254SJeff Roberson 		    l->lpo_line == line)
579eea4f254SJeff Roberson 			return (l);
580d2be3ef0SMateusz Guzik 	type = &LP_CPU_SELF->lpc_types[spin];
581eea4f254SJeff Roberson 	l = LIST_FIRST(&type->lpt_lpoalloc);
582eea4f254SJeff Roberson 	if (l == NULL) {
583eea4f254SJeff Roberson 		lock_prof_rejected++;
584eea4f254SJeff Roberson 		return (NULL);
585eea4f254SJeff Roberson 	}
586eea4f254SJeff Roberson 	LIST_REMOVE(l, lpo_link);
587eea4f254SJeff Roberson 	l->lpo_obj = lo;
588eea4f254SJeff Roberson 	l->lpo_file = file;
589eea4f254SJeff Roberson 	l->lpo_line = line;
590eea4f254SJeff Roberson 	l->lpo_cnt = 0;
591eea4f254SJeff Roberson 	LIST_INSERT_HEAD(head, l, lpo_link);
592eea4f254SJeff Roberson 
593eea4f254SJeff Roberson 	return (l);
594eea4f254SJeff Roberson }
595eea4f254SJeff Roberson 
596eea4f254SJeff Roberson void
597eea4f254SJeff Roberson lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
598eea4f254SJeff Roberson     uint64_t waittime, const char *file, int line)
599eea4f254SJeff Roberson {
600eea4f254SJeff Roberson 	static int lock_prof_count;
601eea4f254SJeff Roberson 	struct lock_profile_object *l;
602eea4f254SJeff Roberson 	int spin;
603eea4f254SJeff Roberson 
60435370593SAndriy Gapon 	if (SCHEDULER_STOPPED())
60535370593SAndriy Gapon 		return;
60635370593SAndriy Gapon 
607eea4f254SJeff Roberson 	/* don't reset the timer when/if recursing */
608eea4f254SJeff Roberson 	if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
609eea4f254SJeff Roberson 		return;
610eea4f254SJeff Roberson 	if (lock_prof_skipcount &&
611357911ceSKris Kennaway 	    (++lock_prof_count % lock_prof_skipcount) != 0)
612eea4f254SJeff Roberson 		return;
61313ddf72dSAttilio Rao 	spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
614eea4f254SJeff Roberson 	if (spin && lock_prof_skipspin == 1)
615eea4f254SJeff Roberson 		return;
6162e6b8de4SJeff Roberson 	critical_enter();
6172e6b8de4SJeff Roberson 	/* Recheck enabled now that we're in a critical section. */
6182e6b8de4SJeff Roberson 	if (lock_prof_enable == 0)
6192e6b8de4SJeff Roberson 		goto out;
620eea4f254SJeff Roberson 	l = lock_profile_object_lookup(lo, spin, file, line);
621eea4f254SJeff Roberson 	if (l == NULL)
6222e6b8de4SJeff Roberson 		goto out;
623eea4f254SJeff Roberson 	l->lpo_cnt++;
624eea4f254SJeff Roberson 	if (++l->lpo_ref > 1)
6252e6b8de4SJeff Roberson 		goto out;
626eea4f254SJeff Roberson 	l->lpo_contest_locking = contested;
6277c0435b9SKip Macy 	l->lpo_acqtime = nanoseconds();
628aa077979SKip Macy 	if (waittime && (l->lpo_acqtime > waittime))
6297c0435b9SKip Macy 		l->lpo_waittime = l->lpo_acqtime - waittime;
630aa077979SKip Macy 	else
631aa077979SKip Macy 		l->lpo_waittime = 0;
6322e6b8de4SJeff Roberson out:
6333ac2ac2eSMateusz Guzik 	/*
6343ac2ac2eSMateusz Guzik 	 * Paired with cpus_fence_seq_cst().
6353ac2ac2eSMateusz Guzik 	 */
6363ac2ac2eSMateusz Guzik 	atomic_thread_fence_rel();
6372e6b8de4SJeff Roberson 	critical_exit();
6382e6b8de4SJeff Roberson }
6392e6b8de4SJeff Roberson 
6402e6b8de4SJeff Roberson void
6412e6b8de4SJeff Roberson lock_profile_thread_exit(struct thread *td)
6422e6b8de4SJeff Roberson {
6432e6b8de4SJeff Roberson #ifdef INVARIANTS
6442e6b8de4SJeff Roberson 	struct lock_profile_object *l;
6452e6b8de4SJeff Roberson 
6462e6b8de4SJeff Roberson 	MPASS(curthread->td_critnest == 0);
6472e6b8de4SJeff Roberson #endif
6482e6b8de4SJeff Roberson 	/*
6492e6b8de4SJeff Roberson 	 * If lock profiling was disabled we have to wait for reset to
6502e6b8de4SJeff Roberson 	 * clear our pointers before we can exit safely.
6512e6b8de4SJeff Roberson 	 */
6522e6b8de4SJeff Roberson 	lock_prof_reset_wait();
6532e6b8de4SJeff Roberson #ifdef INVARIANTS
6542e6b8de4SJeff Roberson 	LIST_FOREACH(l, &td->td_lprof[0], lpo_link)
6552e6b8de4SJeff Roberson 		printf("thread still holds lock acquired at %s:%d\n",
6562e6b8de4SJeff Roberson 		    l->lpo_file, l->lpo_line);
6572e6b8de4SJeff Roberson 	LIST_FOREACH(l, &td->td_lprof[1], lpo_link)
6582e6b8de4SJeff Roberson 		printf("thread still holds lock acquired at %s:%d\n",
6592e6b8de4SJeff Roberson 		    l->lpo_file, l->lpo_line);
6602e6b8de4SJeff Roberson #endif
6612e6b8de4SJeff Roberson 	MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL);
6622e6b8de4SJeff Roberson 	MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL);
6637c0435b9SKip Macy }
6647c0435b9SKip Macy 
665eea4f254SJeff Roberson void
666eea4f254SJeff Roberson lock_profile_release_lock(struct lock_object *lo)
6677c0435b9SKip Macy {
668eea4f254SJeff Roberson 	struct lock_profile_object *l;
669eea4f254SJeff Roberson 	struct lock_prof_type *type;
670eea4f254SJeff Roberson 	struct lock_prof *lp;
67160ae52f7SEd Schouten 	uint64_t curtime, holdtime;
672eea4f254SJeff Roberson 	struct lpohead *head;
673eea4f254SJeff Roberson 	int spin;
6747c0435b9SKip Macy 
67535370593SAndriy Gapon 	if (SCHEDULER_STOPPED())
67635370593SAndriy Gapon 		return;
6772e6b8de4SJeff Roberson 	if (lo->lo_flags & LO_NOPROFILE)
6787c0435b9SKip Macy 		return;
67913ddf72dSAttilio Rao 	spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
680eea4f254SJeff Roberson 	head = &curthread->td_lprof[spin];
6812e6b8de4SJeff Roberson 	if (LIST_FIRST(head) == NULL)
6822e6b8de4SJeff Roberson 		return;
683eea4f254SJeff Roberson 	critical_enter();
6842e6b8de4SJeff Roberson 	/* Recheck enabled now that we're in a critical section. */
6852e6b8de4SJeff Roberson 	if (lock_prof_enable == 0 && lock_prof_resetting == 1)
6862e6b8de4SJeff Roberson 		goto out;
6872e6b8de4SJeff Roberson 	/*
6882e6b8de4SJeff Roberson 	 * If lock profiling is not enabled we still want to remove the
6892e6b8de4SJeff Roberson 	 * lpo from our queue.
6902e6b8de4SJeff Roberson 	 */
691eea4f254SJeff Roberson 	LIST_FOREACH(l, head, lpo_link)
692eea4f254SJeff Roberson 		if (l->lpo_obj == lo)
6937c0435b9SKip Macy 			break;
694eea4f254SJeff Roberson 	if (l == NULL)
695eea4f254SJeff Roberson 		goto out;
696eea4f254SJeff Roberson 	if (--l->lpo_ref > 0)
697eea4f254SJeff Roberson 		goto out;
698eea4f254SJeff Roberson 	lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line);
699eea4f254SJeff Roberson 	if (lp == NULL)
700eea4f254SJeff Roberson 		goto release;
701e7154e7eSAndriy Gapon 	curtime = nanoseconds();
702e7154e7eSAndriy Gapon 	if (curtime < l->lpo_acqtime)
703eea4f254SJeff Roberson 		goto release;
704e7154e7eSAndriy Gapon 	holdtime = curtime - l->lpo_acqtime;
705e7154e7eSAndriy Gapon 
7067c0435b9SKip Macy 	/*
70783b72e3eSKip Macy 	 * Record if the lock has been held longer now than ever
7087c0435b9SKip Macy 	 * before.
7097c0435b9SKip Macy 	 */
710eea4f254SJeff Roberson 	if (holdtime > lp->cnt_max)
711eea4f254SJeff Roberson 		lp->cnt_max = holdtime;
712947265b6SKip Macy 	if (l->lpo_waittime > lp->cnt_wait_max)
713947265b6SKip Macy 		lp->cnt_wait_max = l->lpo_waittime;
714eea4f254SJeff Roberson 	lp->cnt_tot += holdtime;
715eea4f254SJeff Roberson 	lp->cnt_wait += l->lpo_waittime;
716eea4f254SJeff Roberson 	lp->cnt_contest_locking += l->lpo_contest_locking;
717eea4f254SJeff Roberson 	lp->cnt_cur += l->lpo_cnt;
718eea4f254SJeff Roberson release:
719eea4f254SJeff Roberson 	LIST_REMOVE(l, lpo_link);
720d2be3ef0SMateusz Guzik 	type = &LP_CPU_SELF->lpc_types[spin];
721eea4f254SJeff Roberson 	LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link);
722eea4f254SJeff Roberson out:
7233ac2ac2eSMateusz Guzik 	/*
7243ac2ac2eSMateusz Guzik 	 * Paired with cpus_fence_seq_cst().
7253ac2ac2eSMateusz Guzik 	 */
7263ac2ac2eSMateusz Guzik 	atomic_thread_fence_rel();
727eea4f254SJeff Roberson 	critical_exit();
728eea4f254SJeff Roberson }
7297c0435b9SKip Macy 
730*7029da5cSPawel Biernacki static SYSCTL_NODE(_debug_lock, OID_AUTO, prof,
731*7029da5cSPawel Biernacki     CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
7326472ac3dSEd Schouten     "lock profiling");
733eea4f254SJeff Roberson SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW,
734eea4f254SJeff Roberson     &lock_prof_skipspin, 0, "Skip profiling on spinlocks.");
735eea4f254SJeff Roberson SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW,
736eea4f254SJeff Roberson     &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions.");
737eea4f254SJeff Roberson SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
738eea4f254SJeff Roberson     &lock_prof_rejected, 0, "Number of rejected profiling records");
739*7029da5cSPawel Biernacki SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats,
740*7029da5cSPawel Biernacki     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
741*7029da5cSPawel Biernacki     dump_lock_prof_stats, "A",
742*7029da5cSPawel Biernacki     "Lock profiling statistics");
743*7029da5cSPawel Biernacki SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset,
744*7029da5cSPawel Biernacki     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0,
745*7029da5cSPawel Biernacki     reset_lock_prof_stats, "I",
746*7029da5cSPawel Biernacki     "Reset lock profiling statistics");
747*7029da5cSPawel Biernacki SYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable,
748*7029da5cSPawel Biernacki     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
749*7029da5cSPawel Biernacki     enable_lock_prof, "I",
750*7029da5cSPawel Biernacki     "Enable lock profiling");
751eea4f254SJeff Roberson 
7527c0435b9SKip Macy #endif
753