xref: /illumos-gate/usr/src/cmd/mdb/common/modules/genunix/leaky_subr.c (revision f498645a3eecf2ddd304b4ea9c7f1b4c155ff79e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <mdb/mdb_param.h>
30 #include <mdb/mdb_modapi.h>
31 
32 #include <sys/fs/ufs_inode.h>
33 #include <sys/kmem_impl.h>
34 #include <sys/vmem_impl.h>
35 #include <sys/modctl.h>
36 #include <sys/kobj.h>
37 #include <sys/kobj_impl.h>
38 #include <vm/seg_vn.h>
39 #include <vm/as.h>
40 #include <vm/seg_map.h>
41 #include <mdb/mdb_ctf.h>
42 
43 #include "kmem.h"
44 #include "leaky_impl.h"
45 
46 /*
47  * This file defines the genunix target for leaky.c.  There are three types
48  * of buffers in the kernel's heap:  TYPE_VMEM, for kmem_oversize allocations,
49  * TYPE_KMEM, for kmem_cache_alloc() allocations bufctl_audit_ts, and
50  * TYPE_CACHE, for kmem_cache_alloc() allocation without bufctl_audit_ts.
51  *
52  * See "leaky_impl.h" for the target interface definition.
53  */
54 
55 #define	TYPE_VMEM	0		/* lkb_data is the vmem_seg's size */
56 #define	TYPE_CACHE	1		/* lkb_cid is the bufctl's cache */
57 #define	TYPE_KMEM	2		/* lkb_cid is the bufctl's cache */
58 
59 #define	LKM_CTL_BUFCTL	0	/* normal allocation, PTR is bufctl */
60 #define	LKM_CTL_VMSEG	1	/* oversize allocation, PTR is vmem_seg_t */
61 #define	LKM_CTL_CACHE	2	/* normal alloc, non-debug, PTR is cache */
62 #define	LKM_CTL_MASK	3L
63 
64 #define	LKM_CTL(ptr, type)	(LKM_CTLPTR(ptr) | (type))
65 #define	LKM_CTLPTR(ctl)		((uintptr_t)(ctl) & ~(LKM_CTL_MASK))
66 #define	LKM_CTLTYPE(ctl)	((uintptr_t)(ctl) &  (LKM_CTL_MASK))
67 
68 static int kmem_lite_count = 0;	/* cache of the kernel's version */
69 
70 /*ARGSUSED*/
71 static int
72 leaky_mtab(uintptr_t addr, const kmem_bufctl_audit_t *bcp, leak_mtab_t **lmp)
73 {
74 	leak_mtab_t *lm = (*lmp)++;
75 
76 	lm->lkm_base = (uintptr_t)bcp->bc_addr;
77 	lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_BUFCTL);
78 
79 	return (WALK_NEXT);
80 }
81 
82 /*ARGSUSED*/
83 static int
84 leaky_mtab_addr(uintptr_t addr, void *ignored, leak_mtab_t **lmp)
85 {
86 	leak_mtab_t *lm = (*lmp)++;
87 
88 	lm->lkm_base = addr;
89 
90 	return (WALK_NEXT);
91 }
92 
93 static int
94 leaky_seg(uintptr_t addr, const vmem_seg_t *seg, leak_mtab_t **lmp)
95 {
96 	leak_mtab_t *lm = (*lmp)++;
97 
98 	lm->lkm_base = seg->vs_start;
99 	lm->lkm_limit = seg->vs_end;
100 	lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_VMSEG);
101 
102 	return (WALK_NEXT);
103 }
104 
105 static int
106 leaky_vmem_interested(const vmem_t *vmem)
107 {
108 	if (strcmp(vmem->vm_name, "kmem_oversize") != 0 &&
109 	    strcmp(vmem->vm_name, "static_alloc") != 0)
110 		return (0);
111 	return (1);
112 }
113 
114 static int
115 leaky_vmem(uintptr_t addr, const vmem_t *vmem, leak_mtab_t **lmp)
116 {
117 	if (!leaky_vmem_interested(vmem))
118 		return (WALK_NEXT);
119 
120 	if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_seg, lmp, addr) == -1)
121 		mdb_warn("can't walk vmem_alloc for kmem_oversize (%p)", addr);
122 
123 	return (WALK_NEXT);
124 }
125 
126 /*ARGSUSED*/
127 static int
128 leaky_estimate_vmem(uintptr_t addr, const vmem_t *vmem, size_t *est)
129 {
130 	if (!leaky_vmem_interested(vmem))
131 		return (WALK_NEXT);
132 
133 	*est += (int)(vmem->vm_kstat.vk_alloc.value.ui64 -
134 	    vmem->vm_kstat.vk_free.value.ui64);
135 
136 	return (WALK_NEXT);
137 }
138 
139 static int
140 leaky_interested(const kmem_cache_t *c)
141 {
142 	vmem_t vmem;
143 
144 	/*
145 	 * ignore HAT-related caches that happen to derive from kmem_default
146 	 */
147 	if (strcmp(c->cache_name, "sfmmu1_cache") == 0 ||
148 	    strcmp(c->cache_name, "sf_hment_cache") == 0 ||
149 	    strcmp(c->cache_name, "pa_hment_cache") == 0)
150 		return (0);
151 
152 	if (mdb_vread(&vmem, sizeof (vmem), (uintptr_t)c->cache_arena) == -1) {
153 		mdb_warn("cannot read arena %p for cache '%s'",
154 		    (uintptr_t)c->cache_arena, c->cache_name);
155 		return (0);
156 	}
157 
158 	/*
159 	 * If this cache isn't allocating from the kmem_default,
160 	 * kmem_firewall, or static vmem arenas, we're not interested.
161 	 */
162 	if (strcmp(vmem.vm_name, "kmem_default") != 0 &&
163 	    strcmp(vmem.vm_name, "kmem_firewall") != 0 &&
164 	    strcmp(vmem.vm_name, "static") != 0)
165 		return (0);
166 
167 	return (1);
168 }
169 
170 static int
171 leaky_estimate(uintptr_t addr, const kmem_cache_t *c, size_t *est)
172 {
173 	if (!leaky_interested(c))
174 		return (WALK_NEXT);
175 
176 	*est += kmem_estimate_allocated(addr, c);
177 
178 	return (WALK_NEXT);
179 }
180 
181 /*ARGSUSED*/
182 static int
183 leaky_cache(uintptr_t addr, const kmem_cache_t *c, leak_mtab_t **lmp)
184 {
185 	leak_mtab_t *lm = *lmp;
186 	mdb_walk_cb_t cb;
187 	const char *walk;
188 	int audit = (c->cache_flags & KMF_AUDIT);
189 
190 	if (!leaky_interested(c))
191 		return (WALK_NEXT);
192 
193 	if (audit) {
194 		walk = "bufctl";
195 		cb = (mdb_walk_cb_t)leaky_mtab;
196 	} else {
197 		walk = "kmem";
198 		cb = (mdb_walk_cb_t)leaky_mtab_addr;
199 	}
200 	if (mdb_pwalk(walk, cb, lmp, addr) == -1) {
201 		mdb_warn("can't walk kmem for cache %p (%s)", addr,
202 		    c->cache_name);
203 		return (WALK_DONE);
204 	}
205 
206 	for (; lm < *lmp; lm++) {
207 		lm->lkm_limit = lm->lkm_base + c->cache_bufsize;
208 		if (!audit)
209 			lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_CACHE);
210 	}
211 
212 	return (WALK_NEXT);
213 }
214 
215 /*ARGSUSED*/
216 static int
217 leaky_scan_buffer(uintptr_t addr, const void *ignored, const kmem_cache_t *c)
218 {
219 	leaky_grep(addr, c->cache_bufsize);
220 
221 	/*
222 	 * free, constructed KMF_LITE buffers keep their first uint64_t in
223 	 * their buftag's redzone.
224 	 */
225 	if (c->cache_flags & KMF_LITE) {
226 		/* LINTED alignment */
227 		kmem_buftag_t *btp = KMEM_BUFTAG(c, addr);
228 		leaky_grep((uintptr_t)&btp->bt_redzone,
229 		    sizeof (btp->bt_redzone));
230 	}
231 
232 	return (WALK_NEXT);
233 }
234 
235 /*ARGSUSED*/
236 static int
237 leaky_scan_cache(uintptr_t addr, const kmem_cache_t *c, void *ignored)
238 {
239 	if (!leaky_interested(c))
240 		return (WALK_NEXT);
241 
242 	/*
243 	 * Scan all of the free, constructed buffers, since they may have
244 	 * pointers to allocated objects.
245 	 */
246 	if (mdb_pwalk("freemem_constructed",
247 	    (mdb_walk_cb_t)leaky_scan_buffer, (void *)c, addr) == -1) {
248 		mdb_warn("can't walk freemem_constructed for cache %p (%s)",
249 		    addr, c->cache_name);
250 		return (WALK_DONE);
251 	}
252 
253 	return (WALK_NEXT);
254 }
255 
256 /*ARGSUSED*/
257 static int
258 leaky_modctl(uintptr_t addr, const struct modctl *m, int *ignored)
259 {
260 	struct module mod;
261 	char name[MODMAXNAMELEN];
262 
263 	if (m->mod_mp == NULL)
264 		return (WALK_NEXT);
265 
266 	if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) {
267 		mdb_warn("couldn't read modctl %p's module", addr);
268 		return (WALK_NEXT);
269 	}
270 
271 	if (mdb_readstr(name, sizeof (name), (uintptr_t)m->mod_modname) == -1)
272 		(void) mdb_snprintf(name, sizeof (name), "0x%p", addr);
273 
274 	leaky_grep((uintptr_t)m->mod_mp, sizeof (struct module));
275 	leaky_grep((uintptr_t)mod.data, mod.data_size);
276 	leaky_grep((uintptr_t)mod.bss, mod.bss_size);
277 
278 	return (WALK_NEXT);
279 }
280 
281 static int
282 leaky_thread(uintptr_t addr, const kthread_t *t, unsigned long *pagesize)
283 {
284 	uintptr_t size, base = (uintptr_t)t->t_stkbase;
285 	uintptr_t stk = (uintptr_t)t->t_stk;
286 
287 	/*
288 	 * If this thread isn't in memory, we can't look at its stack.  This
289 	 * may result in false positives, so we print a warning.
290 	 */
291 	if (!(t->t_schedflag & TS_LOAD)) {
292 		mdb_printf("findleaks: thread %p's stack swapped out; "
293 		    "false positives possible\n", addr);
294 		return (WALK_NEXT);
295 	}
296 
297 	if (t->t_state != TS_FREE)
298 		leaky_grep(base, stk - base);
299 
300 	/*
301 	 * There is always gunk hanging out between t_stk and the page
302 	 * boundary.  If this thread structure wasn't kmem allocated,
303 	 * this will include the thread structure itself.  If the thread
304 	 * _is_ kmem allocated, we'll be able to get to it via allthreads.
305 	 */
306 	size = *pagesize - (stk & (*pagesize - 1));
307 
308 	leaky_grep(stk, size);
309 
310 	return (WALK_NEXT);
311 }
312 
313 /*ARGSUSED*/
314 static int
315 leaky_kstat(uintptr_t addr, vmem_seg_t *seg, void *ignored)
316 {
317 	leaky_grep(seg->vs_start, seg->vs_end - seg->vs_start);
318 
319 	return (WALK_NEXT);
320 }
321 
322 static void
323 leaky_kludge(void)
324 {
325 	GElf_Sym sym;
326 	mdb_ctf_id_t id, rid;
327 
328 	int max_mem_nodes;
329 	uintptr_t *counters;
330 	size_t ncounters;
331 	ssize_t hwpm_size;
332 	int idx;
333 
334 	/*
335 	 * Because of DR, the page counters (which live in the kmem64 segment)
336 	 * can point into kmem_alloc()ed memory.  The "page_counters" array
337 	 * is multi-dimensional, and each entry points to an array of
338 	 * "hw_page_map_t"s which is "max_mem_nodes" in length.
339 	 *
340 	 * To keep this from having too much grotty knowledge of internals,
341 	 * we use CTF data to get the size of the structure.  For simplicity,
342 	 * we treat the page_counters array as a flat array of pointers, and
343 	 * use its size to determine how much to scan.  Unused entries will
344 	 * be NULL.
345 	 */
346 	if (mdb_lookup_by_name("page_counters", &sym) == -1) {
347 		mdb_warn("unable to lookup page_counters");
348 		return;
349 	}
350 
351 	if (mdb_readvar(&max_mem_nodes, "max_mem_nodes") == -1) {
352 		mdb_warn("unable to read max_mem_nodes");
353 		return;
354 	}
355 
356 	if (mdb_ctf_lookup_by_name("unix`hw_page_map_t", &id) == -1 ||
357 	    mdb_ctf_type_resolve(id, &rid) == -1 ||
358 	    (hwpm_size = mdb_ctf_type_size(rid)) < 0) {
359 		mdb_warn("unable to lookup unix`hw_page_map_t");
360 		return;
361 	}
362 
363 	counters = mdb_alloc(sym.st_size, UM_SLEEP | UM_GC);
364 
365 	if (mdb_vread(counters, sym.st_size, (uintptr_t)sym.st_value) == -1) {
366 		mdb_warn("unable to read page_counters");
367 		return;
368 	}
369 
370 	ncounters = sym.st_size / sizeof (counters);
371 
372 	for (idx = 0; idx < ncounters; idx++) {
373 		uintptr_t addr = counters[idx];
374 		if (addr != 0)
375 			leaky_grep(addr, hwpm_size * max_mem_nodes);
376 	}
377 }
378 
379 int
380 leaky_subr_estimate(size_t *estp)
381 {
382 	uintptr_t panicstr;
383 	int state;
384 
385 	if ((state = mdb_get_state()) == MDB_STATE_RUNNING) {
386 		mdb_warn("findleaks: can only be run on a system "
387 		    "dump or under kmdb; see dumpadm(1M)\n");
388 		return (DCMD_ERR);
389 	}
390 
391 	if (mdb_readvar(&panicstr, "panicstr") == -1) {
392 		mdb_warn("can't read variable 'panicstr'");
393 		return (DCMD_ERR);
394 	}
395 
396 	if (state != MDB_STATE_STOPPED && panicstr == NULL) {
397 		mdb_warn("findleaks: cannot be run on a live dump.\n");
398 		return (DCMD_ERR);
399 	}
400 
401 	if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_estimate, estp) == -1) {
402 		mdb_warn("couldn't walk 'kmem_cache'");
403 		return (DCMD_ERR);
404 	}
405 
406 	if (*estp == 0) {
407 		mdb_warn("findleaks: no buffers found\n");
408 		return (DCMD_ERR);
409 	}
410 
411 	if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_estimate_vmem, estp) == -1) {
412 		mdb_warn("couldn't walk 'vmem'");
413 		return (DCMD_ERR);
414 	}
415 
416 	return (DCMD_OK);
417 }
418 
419 int
420 leaky_subr_fill(leak_mtab_t **lmpp)
421 {
422 	if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_vmem, lmpp) == -1) {
423 		mdb_warn("couldn't walk 'vmem'");
424 		return (DCMD_ERR);
425 	}
426 
427 	if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_cache, lmpp) == -1) {
428 		mdb_warn("couldn't walk 'kmem_cache'");
429 		return (DCMD_ERR);
430 	}
431 
432 	if (mdb_readvar(&kmem_lite_count, "kmem_lite_count") == -1) {
433 		mdb_warn("couldn't read 'kmem_lite_count'");
434 		kmem_lite_count = 0;
435 	} else if (kmem_lite_count > 16) {
436 		mdb_warn("kmem_lite_count nonsensical, ignored\n");
437 		kmem_lite_count = 0;
438 	}
439 
440 	return (DCMD_OK);
441 }
442 
443 int
444 leaky_subr_run(void)
445 {
446 	unsigned long ps;
447 	uintptr_t kstat_arena;
448 	uintptr_t dmods;
449 
450 	if (mdb_readvar(&ps, "_pagesize") == -1) {
451 		mdb_warn("couldn't read '_pagesize'");
452 		return (DCMD_ERR);
453 	}
454 
455 	leaky_kludge();
456 
457 	if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_scan_cache,
458 	    NULL) == -1) {
459 		mdb_warn("couldn't walk 'kmem_cache'");
460 		return (DCMD_ERR);
461 	}
462 
463 	if (mdb_walk("modctl", (mdb_walk_cb_t)leaky_modctl, NULL) == -1) {
464 		mdb_warn("couldn't walk 'modctl'");
465 		return (DCMD_ERR);
466 	}
467 
468 	/*
469 	 * If kmdb is loaded, we need to walk it's module list, since kmdb
470 	 * modctl structures can reference kmem allocations.
471 	 */
472 	if ((mdb_readvar(&dmods, "kdi_dmods") != -1) && (dmods != NULL))
473 		(void) mdb_pwalk("modctl", (mdb_walk_cb_t)leaky_modctl,
474 		    NULL, dmods);
475 
476 	if (mdb_walk("thread", (mdb_walk_cb_t)leaky_thread, &ps) == -1) {
477 		mdb_warn("couldn't walk 'thread'");
478 		return (DCMD_ERR);
479 	}
480 
481 	if (mdb_walk("deathrow", (mdb_walk_cb_t)leaky_thread, &ps) == -1) {
482 		mdb_warn("couldn't walk 'deathrow'");
483 		return (DCMD_ERR);
484 	}
485 
486 	if (mdb_readvar(&kstat_arena, "kstat_arena") == -1) {
487 		mdb_warn("couldn't read 'kstat_arena'");
488 		return (DCMD_ERR);
489 	}
490 
491 	if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_kstat,
492 	    NULL, kstat_arena) == -1) {
493 		mdb_warn("couldn't walk kstat vmem arena");
494 		return (DCMD_ERR);
495 	}
496 
497 	return (DCMD_OK);
498 }
499 
500 void
501 leaky_subr_add_leak(leak_mtab_t *lmp)
502 {
503 	uintptr_t addr = LKM_CTLPTR(lmp->lkm_bufctl);
504 	size_t depth;
505 
506 	switch (LKM_CTLTYPE(lmp->lkm_bufctl)) {
507 	case LKM_CTL_VMSEG: {
508 		vmem_seg_t vs;
509 
510 		if (mdb_vread(&vs, sizeof (vs), addr) == -1) {
511 			mdb_warn("couldn't read leaked vmem_seg at addr %p",
512 			    addr);
513 			return;
514 		}
515 		depth = MIN(vs.vs_depth, VMEM_STACK_DEPTH);
516 
517 		leaky_add_leak(TYPE_VMEM, addr, vs.vs_start, vs.vs_timestamp,
518 		    vs.vs_stack, depth, 0, (vs.vs_end - vs.vs_start));
519 		break;
520 	}
521 	case LKM_CTL_BUFCTL: {
522 		kmem_bufctl_audit_t bc;
523 
524 		if (mdb_vread(&bc, sizeof (bc), addr) == -1) {
525 			mdb_warn("couldn't read leaked bufctl at addr %p",
526 			    addr);
527 			return;
528 		}
529 
530 		depth = MIN(bc.bc_depth, KMEM_STACK_DEPTH);
531 
532 		/*
533 		 * The top of the stack will be kmem_cache_alloc+offset.
534 		 * Since the offset in kmem_cache_alloc() isn't interesting
535 		 * we skip that frame for the purposes of uniquifying stacks.
536 		 *
537 		 * We also use the cache pointer as the leaks's cid, to
538 		 * prevent the coalescing of leaks from different caches.
539 		 */
540 		if (depth > 0)
541 			depth--;
542 		leaky_add_leak(TYPE_KMEM, addr, (uintptr_t)bc.bc_addr,
543 		    bc.bc_timestamp, bc.bc_stack + 1, depth,
544 		    (uintptr_t)bc.bc_cache, 0);
545 		break;
546 	}
547 	case LKM_CTL_CACHE: {
548 		kmem_cache_t cache;
549 		kmem_buftag_lite_t bt;
550 		pc_t caller;
551 		int depth = 0;
552 
553 		/*
554 		 * For KMF_LITE caches, we can get the allocation PC
555 		 * out of the buftag structure.
556 		 */
557 		if (mdb_vread(&cache, sizeof (cache), addr) != -1 &&
558 		    (cache.cache_flags & KMF_LITE) &&
559 		    kmem_lite_count > 0 &&
560 		    mdb_vread(&bt, sizeof (bt),
561 		    /* LINTED alignment */
562 		    (uintptr_t)KMEM_BUFTAG(&cache, lmp->lkm_base)) != -1) {
563 			caller = bt.bt_history[0];
564 			depth = 1;
565 		}
566 		leaky_add_leak(TYPE_CACHE, lmp->lkm_base, lmp->lkm_base, 0,
567 		    &caller, depth, addr, addr);
568 		break;
569 	}
570 	default:
571 		mdb_warn("internal error: invalid leak_bufctl_t\n");
572 		break;
573 	}
574 }
575 
576 static void
577 leaky_subr_caller(const pc_t *stack, uint_t depth, char *buf, uintptr_t *pcp)
578 {
579 	int i;
580 	GElf_Sym sym;
581 	uintptr_t pc = 0;
582 
583 	buf[0] = 0;
584 
585 	for (i = 0; i < depth; i++) {
586 		pc = stack[i];
587 
588 		if (mdb_lookup_by_addr(pc,
589 		    MDB_SYM_FUZZY, buf, MDB_SYM_NAMLEN, &sym) == -1)
590 			continue;
591 		if (strncmp(buf, "kmem_", 5) == 0)
592 			continue;
593 		if (strncmp(buf, "vmem_", 5) == 0)
594 			continue;
595 		*pcp = pc;
596 
597 		return;
598 	}
599 
600 	/*
601 	 * We're only here if the entire call chain begins with "kmem_";
602 	 * this shouldn't happen, but we'll just use the last caller.
603 	 */
604 	*pcp = pc;
605 }
606 
607 int
608 leaky_subr_bufctl_cmp(const leak_bufctl_t *lhs, const leak_bufctl_t *rhs)
609 {
610 	char lbuf[MDB_SYM_NAMLEN], rbuf[MDB_SYM_NAMLEN];
611 	uintptr_t lcaller, rcaller;
612 	int rval;
613 
614 	leaky_subr_caller(lhs->lkb_stack, lhs->lkb_depth, lbuf, &lcaller);
615 	leaky_subr_caller(rhs->lkb_stack, lhs->lkb_depth, rbuf, &rcaller);
616 
617 	if (rval = strcmp(lbuf, rbuf))
618 		return (rval);
619 
620 	if (lcaller < rcaller)
621 		return (-1);
622 
623 	if (lcaller > rcaller)
624 		return (1);
625 
626 	if (lhs->lkb_data < rhs->lkb_data)
627 		return (-1);
628 
629 	if (lhs->lkb_data > rhs->lkb_data)
630 		return (1);
631 
632 	return (0);
633 }
634 
635 /*
636  * Global state variables used by the leaky_subr_dump_* routines.  Note that
637  * they are carefully cleared before use.
638  */
639 static int lk_vmem_seen;
640 static int lk_cache_seen;
641 static int lk_kmem_seen;
642 static size_t lk_ttl;
643 static size_t lk_bytes;
644 
645 void
646 leaky_subr_dump_start(int type)
647 {
648 	switch (type) {
649 	case TYPE_VMEM:
650 		lk_vmem_seen = 0;
651 		break;
652 	case TYPE_CACHE:
653 		lk_cache_seen = 0;
654 		break;
655 	case TYPE_KMEM:
656 		lk_kmem_seen = 0;
657 		break;
658 	default:
659 		break;
660 	}
661 
662 	lk_ttl = 0;
663 	lk_bytes = 0;
664 }
665 
666 void
667 leaky_subr_dump(const leak_bufctl_t *lkb, int verbose)
668 {
669 	const leak_bufctl_t *cur;
670 	kmem_cache_t cache;
671 	size_t min, max, size;
672 	char sz[30];
673 	char c[MDB_SYM_NAMLEN];
674 	uintptr_t caller;
675 
676 	if (verbose) {
677 		lk_ttl = 0;
678 		lk_bytes = 0;
679 	}
680 
681 	switch (lkb->lkb_type) {
682 	case TYPE_VMEM:
683 		if (!verbose && !lk_vmem_seen) {
684 			lk_vmem_seen = 1;
685 			mdb_printf("%-16s %7s %?s %s\n",
686 			    "BYTES", "LEAKED", "VMEM_SEG", "CALLER");
687 		}
688 
689 		min = max = lkb->lkb_data;
690 
691 		for (cur = lkb; cur != NULL; cur = cur->lkb_next) {
692 			size = cur->lkb_data;
693 
694 			if (size < min)
695 				min = size;
696 			if (size > max)
697 				max = size;
698 
699 			lk_ttl++;
700 			lk_bytes += size;
701 		}
702 
703 		if (min == max)
704 			(void) mdb_snprintf(sz, sizeof (sz), "%ld", min);
705 		else
706 			(void) mdb_snprintf(sz, sizeof (sz), "%ld-%ld",
707 			    min, max);
708 
709 		if (!verbose) {
710 			leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
711 			    c, &caller);
712 
713 			if (caller != 0) {
714 				(void) mdb_snprintf(c, sizeof (c),
715 				    "%a", caller);
716 			} else {
717 				(void) mdb_snprintf(c, sizeof (c),
718 				    "%s", "?");
719 			}
720 			mdb_printf("%-16s %7d %?p %s\n", sz, lkb->lkb_dups + 1,
721 			    lkb->lkb_addr, c);
722 		} else {
723 			mdb_arg_t v;
724 
725 			if (lk_ttl == 1)
726 				mdb_printf("kmem_oversize leak: 1 vmem_seg, "
727 				    "%ld bytes\n", lk_bytes);
728 			else
729 				mdb_printf("kmem_oversize leak: %d vmem_segs, "
730 				    "%s bytes each, %ld bytes total\n",
731 				    lk_ttl, sz, lk_bytes);
732 
733 			v.a_type = MDB_TYPE_STRING;
734 			v.a_un.a_str = "-v";
735 
736 			if (mdb_call_dcmd("vmem_seg", lkb->lkb_addr,
737 			    DCMD_ADDRSPEC, 1, &v) == -1) {
738 				mdb_warn("'%p::vmem_seg -v' failed",
739 				    lkb->lkb_addr);
740 			}
741 		}
742 		return;
743 
744 	case TYPE_CACHE:
745 		if (!verbose && !lk_cache_seen) {
746 			lk_cache_seen = 1;
747 			if (lk_vmem_seen)
748 				mdb_printf("\n");
749 			mdb_printf("%-?s %7s %?s %s\n",
750 			    "CACHE", "LEAKED", "BUFFER", "CALLER");
751 		}
752 
753 		if (mdb_vread(&cache, sizeof (cache), lkb->lkb_data) == -1) {
754 			/*
755 			 * This _really_ shouldn't happen; we shouldn't
756 			 * have been able to get this far if this
757 			 * cache wasn't readable.
758 			 */
759 			mdb_warn("can't read cache %p for leaked "
760 			    "buffer %p", lkb->lkb_data, lkb->lkb_addr);
761 			return;
762 		}
763 
764 		lk_ttl += lkb->lkb_dups + 1;
765 		lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
766 
767 		caller = (lkb->lkb_depth == 0) ? 0 : lkb->lkb_stack[0];
768 		if (caller != 0) {
769 			(void) mdb_snprintf(c, sizeof (c), "%a", caller);
770 		} else {
771 			(void) mdb_snprintf(c, sizeof (c),
772 			    "%s", (verbose) ? "" : "?");
773 		}
774 
775 		if (!verbose) {
776 			mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
777 			    lkb->lkb_dups + 1, lkb->lkb_addr, c);
778 		} else {
779 			if (lk_ttl == 1)
780 				mdb_printf("%s leak: 1 buffer, %ld bytes,\n",
781 				    cache.cache_name, lk_bytes);
782 			else
783 				mdb_printf("%s leak: %d buffers, "
784 				    "%ld bytes each, %ld bytes total,\n",
785 				    cache.cache_name, lk_ttl,
786 				    cache.cache_bufsize, lk_bytes);
787 
788 			mdb_printf("    sample addr %p%s%s\n",
789 			    lkb->lkb_addr, (caller == 0) ? "" : ", caller ", c);
790 		}
791 		return;
792 
793 	case TYPE_KMEM:
794 		if (!verbose && !lk_kmem_seen) {
795 			lk_kmem_seen = 1;
796 			if (lk_vmem_seen || lk_cache_seen)
797 				mdb_printf("\n");
798 			mdb_printf("%-?s %7s %?s %s\n",
799 			    "CACHE", "LEAKED", "BUFCTL", "CALLER");
800 		}
801 
802 		if (mdb_vread(&cache, sizeof (cache), lkb->lkb_cid) == -1) {
803 			/*
804 			 * This _really_ shouldn't happen; we shouldn't
805 			 * have been able to get this far if this
806 			 * cache wasn't readable.
807 			 */
808 			mdb_warn("can't read cache %p for leaked "
809 			    "bufctl %p", lkb->lkb_cid, lkb->lkb_addr);
810 			return;
811 		}
812 
813 		lk_ttl += lkb->lkb_dups + 1;
814 		lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
815 
816 		if (!verbose) {
817 			leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
818 			    c, &caller);
819 
820 			if (caller != 0) {
821 				(void) mdb_snprintf(c, sizeof (c),
822 				    "%a", caller);
823 			} else {
824 				(void) mdb_snprintf(c, sizeof (c),
825 				    "%s", "?");
826 			}
827 			mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
828 			    lkb->lkb_dups + 1, lkb->lkb_addr, c);
829 		} else {
830 			mdb_arg_t v;
831 
832 			if (lk_ttl == 1)
833 				mdb_printf("%s leak: 1 buffer, %ld bytes\n",
834 				    cache.cache_name, lk_bytes);
835 			else
836 				mdb_printf("%s leak: %d buffers, "
837 				    "%ld bytes each, %ld bytes total\n",
838 				    cache.cache_name, lk_ttl,
839 				    cache.cache_bufsize, lk_bytes);
840 
841 			v.a_type = MDB_TYPE_STRING;
842 			v.a_un.a_str = "-v";
843 
844 			if (mdb_call_dcmd("bufctl", lkb->lkb_addr,
845 			    DCMD_ADDRSPEC, 1, &v) == -1) {
846 				mdb_warn("'%p::bufctl -v' failed",
847 				    lkb->lkb_addr);
848 			}
849 		}
850 		return;
851 
852 	default:
853 		return;
854 	}
855 }
856 
857 void
858 leaky_subr_dump_end(int type)
859 {
860 	int i;
861 	int width;
862 	const char *leaks;
863 
864 	switch (type) {
865 	case TYPE_VMEM:
866 		if (!lk_vmem_seen)
867 			return;
868 
869 		width = 16;
870 		leaks = "kmem_oversize leak";
871 		break;
872 
873 	case TYPE_CACHE:
874 		if (!lk_cache_seen)
875 			return;
876 
877 		width = sizeof (uintptr_t) * 2;
878 		leaks = "buffer";
879 		break;
880 
881 	case TYPE_KMEM:
882 		if (!lk_kmem_seen)
883 			return;
884 
885 		width = sizeof (uintptr_t) * 2;
886 		leaks = "buffer";
887 		break;
888 
889 	default:
890 		return;
891 	}
892 
893 	for (i = 0; i < 72; i++)
894 		mdb_printf("-");
895 	mdb_printf("\n%*s %7ld %s%s, %ld byte%s\n",
896 	    width, "Total", lk_ttl, leaks, (lk_ttl == 1) ? "" : "s",
897 	    lk_bytes, (lk_bytes == 1) ? "" : "s");
898 }
899 
900 int
901 leaky_subr_invoke_callback(const leak_bufctl_t *lkb, mdb_walk_cb_t cb,
902     void *cbdata)
903 {
904 	kmem_bufctl_audit_t bc;
905 	vmem_seg_t vs;
906 
907 	switch (lkb->lkb_type) {
908 	case TYPE_VMEM:
909 		if (mdb_vread(&vs, sizeof (vs), lkb->lkb_addr) == -1) {
910 			mdb_warn("unable to read vmem_seg at %p",
911 			    lkb->lkb_addr);
912 			return (WALK_NEXT);
913 		}
914 		return (cb(lkb->lkb_addr, &vs, cbdata));
915 
916 	case TYPE_CACHE:
917 		return (cb(lkb->lkb_addr, NULL, cbdata));
918 
919 	case TYPE_KMEM:
920 		if (mdb_vread(&bc, sizeof (bc), lkb->lkb_addr) == -1) {
921 			mdb_warn("unable to read bufctl at %p",
922 			    lkb->lkb_addr);
923 			return (WALK_NEXT);
924 		}
925 		return (cb(lkb->lkb_addr, &bc, cbdata));
926 	default:
927 		return (WALK_NEXT);
928 	}
929 }
930