xref: /freebsd/sys/kern/subr_lock.c (revision ae2cbf4c649fecd3302a3bea16672345582d2562)
1 /*-
2  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * This module holds the global variables and functions used to maintain
32  * lock_object structures.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_ddb.h"
39 #include "opt_mprof.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/ktr.h>
45 #include <sys/linker_set.h>
46 #include <sys/lock.h>
47 #include <sys/lock_profile.h>
48 #include <sys/malloc.h>
49 #include <sys/pcpu.h>
50 #include <sys/proc.h>
51 #include <sys/sbuf.h>
52 #include <sys/smp.h>
53 #include <sys/sysctl.h>
54 
55 #ifdef DDB
56 #include <ddb/ddb.h>
57 #endif
58 
59 #include <machine/cpufunc.h>
60 
61 CTASSERT(LOCK_CLASS_MAX == 15);
62 
63 struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
64 	&lock_class_mtx_spin,
65 	&lock_class_mtx_sleep,
66 	&lock_class_sx,
67 	&lock_class_rm,
68 	&lock_class_rw,
69 	&lock_class_lockmgr,
70 };
71 
72 void
73 lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
74     const char *type, int flags)
75 {
76 	int i;
77 
78 	/* Check for double-init and zero object. */
79 	KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized",
80 	    name, lock));
81 
82 	/* Look up lock class to find its index. */
83 	for (i = 0; i < LOCK_CLASS_MAX; i++)
84 		if (lock_classes[i] == class) {
85 			lock->lo_flags = i << LO_CLASSSHIFT;
86 			break;
87 		}
88 	KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
89 
90 	/* Initialize the lock object. */
91 	lock->lo_name = name;
92 	lock->lo_flags |= flags | LO_INITIALIZED;
93 	LOCK_LOG_INIT(lock, 0);
94 	WITNESS_INIT(lock, (type != NULL) ? type : name);
95 }
96 
97 void
98 lock_destroy(struct lock_object *lock)
99 {
100 
101 	KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock));
102 	WITNESS_DESTROY(lock);
103 	LOCK_LOG_DESTROY(lock, 0);
104 	lock->lo_flags &= ~LO_INITIALIZED;
105 }
106 
107 #ifdef DDB
108 DB_SHOW_COMMAND(lock, db_show_lock)
109 {
110 	struct lock_object *lock;
111 	struct lock_class *class;
112 
113 	if (!have_addr)
114 		return;
115 	lock = (struct lock_object *)addr;
116 	if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) {
117 		db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock));
118 		return;
119 	}
120 	class = LOCK_CLASS(lock);
121 	db_printf(" class: %s\n", class->lc_name);
122 	db_printf(" name: %s\n", lock->lo_name);
123 	class->lc_ddb_show(lock);
124 }
125 #endif
126 
127 #ifdef LOCK_PROFILING
128 
129 /*
130  * One object per-thread for each lock the thread owns.  Tracks individual
131  * lock instances.
132  */
133 struct lock_profile_object {
134 	LIST_ENTRY(lock_profile_object) lpo_link;
135 	struct lock_object *lpo_obj;
136 	const char	*lpo_file;
137 	int		lpo_line;
138 	uint16_t	lpo_ref;
139 	uint16_t	lpo_cnt;
140 	u_int64_t	lpo_acqtime;
141 	u_int64_t	lpo_waittime;
142 	u_int		lpo_contest_locking;
143 };
144 
145 /*
146  * One lock_prof for each (file, line, lock object) triple.
147  */
148 struct lock_prof {
149 	SLIST_ENTRY(lock_prof) link;
150 	struct lock_class *class;
151 	const char	*file;
152 	const char	*name;
153 	int		line;
154 	int		ticks;
155 	uintmax_t	cnt_wait_max;
156 	uintmax_t	cnt_max;
157 	uintmax_t	cnt_tot;
158 	uintmax_t	cnt_wait;
159 	uintmax_t	cnt_cur;
160 	uintmax_t	cnt_contest_locking;
161 };
162 
163 SLIST_HEAD(lphead, lock_prof);
164 
165 #define	LPROF_HASH_SIZE		4096
166 #define	LPROF_HASH_MASK		(LPROF_HASH_SIZE - 1)
167 #define	LPROF_CACHE_SIZE	4096
168 
169 /*
170  * Array of objects and profs for each type of object for each cpu.  Spinlocks
171  * are handled seperately because a thread may be preempted and acquire a
172  * spinlock while in the lock profiling code of a non-spinlock.  In this way
173  * we only need a critical section to protect the per-cpu lists.
174  */
175 struct lock_prof_type {
176 	struct lphead		lpt_lpalloc;
177 	struct lpohead		lpt_lpoalloc;
178 	struct lphead		lpt_hash[LPROF_HASH_SIZE];
179 	struct lock_prof	lpt_prof[LPROF_CACHE_SIZE];
180 	struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE];
181 };
182 
183 struct lock_prof_cpu {
184 	struct lock_prof_type	lpc_types[2]; /* One for spin one for other. */
185 };
186 
187 struct lock_prof_cpu *lp_cpu[MAXCPU];
188 
189 int lock_prof_enable = 0;
190 
191 /* SWAG: sbuf size = avg stat. line size * number of locks */
192 #define LPROF_SBUF_SIZE		256 * 400
193 
194 static int lock_prof_rejected;
195 static int lock_prof_skipspin;
196 static int lock_prof_skipcount;
197 
198 #ifndef USE_CPU_NANOSECONDS
199 u_int64_t
200 nanoseconds(void)
201 {
202 	struct bintime bt;
203 	u_int64_t ns;
204 
205 	binuptime(&bt);
206 	/* From bintime2timespec */
207 	ns = bt.sec * (u_int64_t)1000000000;
208 	ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32;
209 	return (ns);
210 }
211 #endif
212 
213 static void
214 lock_prof_init_type(struct lock_prof_type *type)
215 {
216 	int i;
217 
218 	SLIST_INIT(&type->lpt_lpalloc);
219 	LIST_INIT(&type->lpt_lpoalloc);
220 	for (i = 0; i < LPROF_CACHE_SIZE; i++) {
221 		SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i],
222 		    link);
223 		LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i],
224 		    lpo_link);
225 	}
226 }
227 
228 static void
229 lock_prof_init(void *arg)
230 {
231 	int cpu;
232 
233 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
234 		lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF,
235 		    M_WAITOK | M_ZERO);
236 		lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]);
237 		lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]);
238 	}
239 }
240 SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL);
241 
242 static void
243 lock_prof_reset(void)
244 {
245 	struct lock_prof_cpu *lpc;
246 	int enabled, i, cpu;
247 
248 	enabled = lock_prof_enable;
249 	lock_prof_enable = 0;
250 	pause("lpreset", hz / 10);
251 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
252 		lpc = lp_cpu[cpu];
253 		for (i = 0; i < LPROF_CACHE_SIZE; i++) {
254 			LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link);
255 			LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link);
256 		}
257 		bzero(lpc, sizeof(*lpc));
258 		lock_prof_init_type(&lpc->lpc_types[0]);
259 		lock_prof_init_type(&lpc->lpc_types[1]);
260 	}
261 	lock_prof_enable = enabled;
262 }
263 
264 static void
265 lock_prof_output(struct lock_prof *lp, struct sbuf *sb)
266 {
267 	const char *p;
268 
269 	for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3);
270 	sbuf_printf(sb,
271 	    "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n",
272 	    lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000,
273 	    lp->cnt_wait / 1000, lp->cnt_cur,
274 	    lp->cnt_cur == 0 ? (uintmax_t)0 :
275 	    lp->cnt_tot / (lp->cnt_cur * 1000),
276 	    lp->cnt_cur == 0 ? (uintmax_t)0 :
277 	    lp->cnt_wait / (lp->cnt_cur * 1000),
278 	    (uintmax_t)0, lp->cnt_contest_locking,
279 	    p, lp->line, lp->class->lc_name, lp->name);
280 }
281 
282 static void
283 lock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash,
284     int spin, int t)
285 {
286 	struct lock_prof_type *type;
287 	struct lock_prof *l;
288 	int cpu;
289 
290 	dst->file = match->file;
291 	dst->line = match->line;
292 	dst->class = match->class;
293 	dst->name = match->name;
294 
295 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
296 		if (lp_cpu[cpu] == NULL)
297 			continue;
298 		type = &lp_cpu[cpu]->lpc_types[spin];
299 		SLIST_FOREACH(l, &type->lpt_hash[hash], link) {
300 			if (l->ticks == t)
301 				continue;
302 			if (l->file != match->file || l->line != match->line ||
303 			    l->name != match->name)
304 				continue;
305 			l->ticks = t;
306 			if (l->cnt_max > dst->cnt_max)
307 				dst->cnt_max = l->cnt_max;
308 			if (l->cnt_wait_max > dst->cnt_wait_max)
309 				dst->cnt_wait_max = l->cnt_wait_max;
310 			dst->cnt_tot += l->cnt_tot;
311 			dst->cnt_wait += l->cnt_wait;
312 			dst->cnt_cur += l->cnt_cur;
313 			dst->cnt_contest_locking += l->cnt_contest_locking;
314 		}
315 	}
316 
317 }
318 
319 static void
320 lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin,
321     int t)
322 {
323 	struct lock_prof *l;
324 	int i;
325 
326 	for (i = 0; i < LPROF_HASH_SIZE; ++i) {
327 		SLIST_FOREACH(l, &type->lpt_hash[i], link) {
328 			struct lock_prof lp = {};
329 
330 			if (l->ticks == t)
331 				continue;
332 			lock_prof_sum(l, &lp, i, spin, t);
333 			lock_prof_output(&lp, sb);
334 			if (sbuf_overflowed(sb))
335 				return;
336 		}
337 	}
338 }
339 
340 static int
341 dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
342 {
343 	static int multiplier = 1;
344 	struct sbuf *sb;
345 	int error, cpu, t;
346 	int enabled;
347 
348 retry_sbufops:
349 	sb = sbuf_new(NULL, NULL, LPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
350 	sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n",
351 	    "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
352 	enabled = lock_prof_enable;
353 	lock_prof_enable = 0;
354 	pause("lpreset", hz / 10);
355 	t = ticks;
356 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
357 		if (lp_cpu[cpu] == NULL)
358 			continue;
359 		lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t);
360 		lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t);
361 		if (sbuf_overflowed(sb)) {
362 			sbuf_delete(sb);
363 			multiplier++;
364 			goto retry_sbufops;
365 		}
366 	}
367 	lock_prof_enable = enabled;
368 
369 	sbuf_finish(sb);
370 	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
371 	sbuf_delete(sb);
372 	return (error);
373 }
374 
375 static int
376 enable_lock_prof(SYSCTL_HANDLER_ARGS)
377 {
378 	int error, v;
379 
380 	v = lock_prof_enable;
381 	error = sysctl_handle_int(oidp, &v, v, req);
382 	if (error)
383 		return (error);
384 	if (req->newptr == NULL)
385 		return (error);
386 	if (v == lock_prof_enable)
387 		return (0);
388 	if (v == 1)
389 		lock_prof_reset();
390 	lock_prof_enable = !!v;
391 
392 	return (0);
393 }
394 
395 static int
396 reset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
397 {
398 	int error, v;
399 
400 	v = 0;
401 	error = sysctl_handle_int(oidp, &v, 0, req);
402 	if (error)
403 		return (error);
404 	if (req->newptr == NULL)
405 		return (error);
406 	if (v == 0)
407 		return (0);
408 	lock_prof_reset();
409 
410 	return (0);
411 }
412 
413 static struct lock_prof *
414 lock_profile_lookup(struct lock_object *lo, int spin, const char *file,
415     int line)
416 {
417 	const char *unknown = "(unknown)";
418 	struct lock_prof_type *type;
419 	struct lock_prof *lp;
420 	struct lphead *head;
421 	const char *p;
422 	u_int hash;
423 
424 	p = file;
425 	if (p == NULL || *p == '\0')
426 		p = unknown;
427 	hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line;
428 	hash &= LPROF_HASH_MASK;
429 	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
430 	head = &type->lpt_hash[hash];
431 	SLIST_FOREACH(lp, head, link) {
432 		if (lp->line == line && lp->file == p &&
433 		    lp->name == lo->lo_name)
434 			return (lp);
435 
436 	}
437 	lp = SLIST_FIRST(&type->lpt_lpalloc);
438 	if (lp == NULL) {
439 		lock_prof_rejected++;
440 		return (lp);
441 	}
442 	SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link);
443 	lp->file = p;
444 	lp->line = line;
445 	lp->class = LOCK_CLASS(lo);
446 	lp->name = lo->lo_name;
447 	SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link);
448 	return (lp);
449 }
450 
451 static struct lock_profile_object *
452 lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file,
453     int line)
454 {
455 	struct lock_profile_object *l;
456 	struct lock_prof_type *type;
457 	struct lpohead *head;
458 
459 	head = &curthread->td_lprof[spin];
460 	LIST_FOREACH(l, head, lpo_link)
461 		if (l->lpo_obj == lo && l->lpo_file == file &&
462 		    l->lpo_line == line)
463 			return (l);
464 	critical_enter();
465 	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
466 	l = LIST_FIRST(&type->lpt_lpoalloc);
467 	if (l == NULL) {
468 		lock_prof_rejected++;
469 		critical_exit();
470 		return (NULL);
471 	}
472 	LIST_REMOVE(l, lpo_link);
473 	critical_exit();
474 	l->lpo_obj = lo;
475 	l->lpo_file = file;
476 	l->lpo_line = line;
477 	l->lpo_cnt = 0;
478 	LIST_INSERT_HEAD(head, l, lpo_link);
479 
480 	return (l);
481 }
482 
483 void
484 lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
485     uint64_t waittime, const char *file, int line)
486 {
487 	static int lock_prof_count;
488 	struct lock_profile_object *l;
489 	int spin;
490 
491 	/* don't reset the timer when/if recursing */
492 	if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
493 		return;
494 	if (lock_prof_skipcount &&
495 	    (++lock_prof_count % lock_prof_skipcount) != 0)
496 		return;
497 	spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
498 	if (spin && lock_prof_skipspin == 1)
499 		return;
500 	l = lock_profile_object_lookup(lo, spin, file, line);
501 	if (l == NULL)
502 		return;
503 	l->lpo_cnt++;
504 	if (++l->lpo_ref > 1)
505 		return;
506 	l->lpo_contest_locking = contested;
507 	l->lpo_acqtime = nanoseconds();
508 	if (waittime && (l->lpo_acqtime > waittime))
509 		l->lpo_waittime = l->lpo_acqtime - waittime;
510 	else
511 		l->lpo_waittime = 0;
512 }
513 
514 void
515 lock_profile_release_lock(struct lock_object *lo)
516 {
517 	struct lock_profile_object *l;
518 	struct lock_prof_type *type;
519 	struct lock_prof *lp;
520 	u_int64_t holdtime;
521 	struct lpohead *head;
522 	int spin;
523 
524 	if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
525 		return;
526 	spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
527 	head = &curthread->td_lprof[spin];
528 	critical_enter();
529 	LIST_FOREACH(l, head, lpo_link)
530 		if (l->lpo_obj == lo)
531 			break;
532 	if (l == NULL)
533 		goto out;
534 	if (--l->lpo_ref > 0)
535 		goto out;
536 	lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line);
537 	if (lp == NULL)
538 		goto release;
539 	holdtime = nanoseconds() - l->lpo_acqtime;
540 	if (holdtime < 0)
541 		goto release;
542 	/*
543 	 * Record if the lock has been held longer now than ever
544 	 * before.
545 	 */
546 	if (holdtime > lp->cnt_max)
547 		lp->cnt_max = holdtime;
548 	if (l->lpo_waittime > lp->cnt_wait_max)
549 		lp->cnt_wait_max = l->lpo_waittime;
550 	lp->cnt_tot += holdtime;
551 	lp->cnt_wait += l->lpo_waittime;
552 	lp->cnt_contest_locking += l->lpo_contest_locking;
553 	lp->cnt_cur += l->lpo_cnt;
554 release:
555 	LIST_REMOVE(l, lpo_link);
556 	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
557 	LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link);
558 out:
559 	critical_exit();
560 }
561 
562 SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging");
563 SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, "lock profiling");
564 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW,
565     &lock_prof_skipspin, 0, "Skip profiling on spinlocks.");
566 SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW,
567     &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions.");
568 SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
569     &lock_prof_rejected, 0, "Number of rejected profiling records");
570 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
571     NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics");
572 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
573     NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics");
574 SYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
575     NULL, 0, enable_lock_prof, "I", "Enable lock profiling");
576 
577 #endif
578