xref: /illumos-gate/usr/src/cmd/mdb/common/modules/genunix/leaky_subr.c (revision 66582b606a8194f7f3ba5b3a3a6dca5b0d346361)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <mdb/mdb_param.h>
28 #include <mdb/mdb_modapi.h>
29 
30 #include <sys/fs/ufs_inode.h>
31 #include <sys/kmem_impl.h>
32 #include <sys/vmem_impl.h>
33 #include <sys/modctl.h>
34 #include <sys/kobj.h>
35 #include <sys/kobj_impl.h>
36 #include <vm/seg_vn.h>
37 #include <vm/as.h>
38 #include <vm/seg_map.h>
39 #include <mdb/mdb_ctf.h>
40 
41 #include "kmem.h"
42 #include "leaky_impl.h"
43 
44 /*
45  * This file defines the genunix target for leaky.c.  There are three types
46  * of buffers in the kernel's heap:  TYPE_VMEM, for kmem_oversize allocations,
47  * TYPE_KMEM, for kmem_cache_alloc() allocations bufctl_audit_ts, and
48  * TYPE_CACHE, for kmem_cache_alloc() allocation without bufctl_audit_ts.
49  *
50  * See "leaky_impl.h" for the target interface definition.
51  */
52 
53 #define	TYPE_VMEM	0		/* lkb_data is the vmem_seg's size */
54 #define	TYPE_CACHE	1		/* lkb_cid is the bufctl's cache */
55 #define	TYPE_KMEM	2		/* lkb_cid is the bufctl's cache */
56 
57 #define	LKM_CTL_BUFCTL	0	/* normal allocation, PTR is bufctl */
58 #define	LKM_CTL_VMSEG	1	/* oversize allocation, PTR is vmem_seg_t */
59 #define	LKM_CTL_CACHE	2	/* normal alloc, non-debug, PTR is cache */
60 #define	LKM_CTL_MASK	3L
61 
62 #define	LKM_CTL(ptr, type)	(LKM_CTLPTR(ptr) | (type))
63 #define	LKM_CTLPTR(ctl)		((uintptr_t)(ctl) & ~(LKM_CTL_MASK))
64 #define	LKM_CTLTYPE(ctl)	((uintptr_t)(ctl) &  (LKM_CTL_MASK))
65 
66 static int kmem_lite_count = 0;	/* cache of the kernel's version */
67 
68 /*ARGSUSED*/
69 static int
70 leaky_mtab(uintptr_t addr, const kmem_bufctl_audit_t *bcp, leak_mtab_t **lmp)
71 {
72 	leak_mtab_t *lm = (*lmp)++;
73 
74 	lm->lkm_base = (uintptr_t)bcp->bc_addr;
75 	lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_BUFCTL);
76 
77 	return (WALK_NEXT);
78 }
79 
80 /*ARGSUSED*/
81 static int
82 leaky_mtab_addr(uintptr_t addr, void *ignored, leak_mtab_t **lmp)
83 {
84 	leak_mtab_t *lm = (*lmp)++;
85 
86 	lm->lkm_base = addr;
87 
88 	return (WALK_NEXT);
89 }
90 
91 static int
92 leaky_seg(uintptr_t addr, const vmem_seg_t *seg, leak_mtab_t **lmp)
93 {
94 	leak_mtab_t *lm = (*lmp)++;
95 
96 	lm->lkm_base = seg->vs_start;
97 	lm->lkm_limit = seg->vs_end;
98 	lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_VMSEG);
99 
100 	return (WALK_NEXT);
101 }
102 
103 static int
104 leaky_vmem_interested(const vmem_t *vmem)
105 {
106 	if (strcmp(vmem->vm_name, "kmem_oversize") != 0 &&
107 	    strcmp(vmem->vm_name, "static_alloc") != 0)
108 		return (0);
109 	return (1);
110 }
111 
112 static int
113 leaky_vmem(uintptr_t addr, const vmem_t *vmem, leak_mtab_t **lmp)
114 {
115 	if (!leaky_vmem_interested(vmem))
116 		return (WALK_NEXT);
117 
118 	if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_seg, lmp, addr) == -1)
119 		mdb_warn("can't walk vmem_alloc for kmem_oversize (%p)", addr);
120 
121 	return (WALK_NEXT);
122 }
123 
124 /*ARGSUSED*/
125 static int
126 leaky_estimate_vmem(uintptr_t addr, const vmem_t *vmem, size_t *est)
127 {
128 	if (!leaky_vmem_interested(vmem))
129 		return (WALK_NEXT);
130 
131 	*est += (int)(vmem->vm_kstat.vk_alloc.value.ui64 -
132 	    vmem->vm_kstat.vk_free.value.ui64);
133 
134 	return (WALK_NEXT);
135 }
136 
137 static int
138 leaky_interested(const kmem_cache_t *c)
139 {
140 	vmem_t vmem;
141 
142 	/*
143 	 * ignore HAT-related caches that happen to derive from kmem_default
144 	 */
145 	if (strcmp(c->cache_name, "sfmmu1_cache") == 0 ||
146 	    strcmp(c->cache_name, "sf_hment_cache") == 0 ||
147 	    strcmp(c->cache_name, "pa_hment_cache") == 0)
148 		return (0);
149 
150 	if (mdb_vread(&vmem, sizeof (vmem), (uintptr_t)c->cache_arena) == -1) {
151 		mdb_warn("cannot read arena %p for cache '%s'",
152 		    (uintptr_t)c->cache_arena, c->cache_name);
153 		return (0);
154 	}
155 
156 	/*
157 	 * If this cache isn't allocating from the kmem_default,
158 	 * kmem_firewall, or static vmem arenas, we're not interested.
159 	 */
160 	if (strcmp(vmem.vm_name, "kmem_default") != 0 &&
161 	    strcmp(vmem.vm_name, "kmem_firewall") != 0 &&
162 	    strcmp(vmem.vm_name, "static") != 0)
163 		return (0);
164 
165 	return (1);
166 }
167 
168 static int
169 leaky_estimate(uintptr_t addr, const kmem_cache_t *c, size_t *est)
170 {
171 	if (!leaky_interested(c))
172 		return (WALK_NEXT);
173 
174 	*est += kmem_estimate_allocated(addr, c);
175 
176 	return (WALK_NEXT);
177 }
178 
179 /*ARGSUSED*/
180 static int
181 leaky_cache(uintptr_t addr, const kmem_cache_t *c, leak_mtab_t **lmp)
182 {
183 	leak_mtab_t *lm = *lmp;
184 	mdb_walk_cb_t cb;
185 	const char *walk;
186 	int audit = (c->cache_flags & KMF_AUDIT);
187 
188 	if (!leaky_interested(c))
189 		return (WALK_NEXT);
190 
191 	if (audit) {
192 		walk = "bufctl";
193 		cb = (mdb_walk_cb_t)leaky_mtab;
194 	} else {
195 		walk = "kmem";
196 		cb = (mdb_walk_cb_t)leaky_mtab_addr;
197 	}
198 	if (mdb_pwalk(walk, cb, lmp, addr) == -1) {
199 		mdb_warn("can't walk kmem for cache %p (%s)", addr,
200 		    c->cache_name);
201 		return (WALK_DONE);
202 	}
203 
204 	for (; lm < *lmp; lm++) {
205 		lm->lkm_limit = lm->lkm_base + c->cache_bufsize;
206 		if (!audit)
207 			lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_CACHE);
208 	}
209 
210 	return (WALK_NEXT);
211 }
212 
213 /*ARGSUSED*/
214 static int
215 leaky_scan_buffer(uintptr_t addr, const void *ignored, const kmem_cache_t *c)
216 {
217 	leaky_grep(addr, c->cache_bufsize);
218 
219 	/*
220 	 * free, constructed KMF_LITE buffers keep their first uint64_t in
221 	 * their buftag's redzone.
222 	 */
223 	if (c->cache_flags & KMF_LITE) {
224 		/* LINTED alignment */
225 		kmem_buftag_t *btp = KMEM_BUFTAG(c, addr);
226 		leaky_grep((uintptr_t)&btp->bt_redzone,
227 		    sizeof (btp->bt_redzone));
228 	}
229 
230 	return (WALK_NEXT);
231 }
232 
233 /*ARGSUSED*/
234 static int
235 leaky_scan_cache(uintptr_t addr, const kmem_cache_t *c, void *ignored)
236 {
237 	if (!leaky_interested(c))
238 		return (WALK_NEXT);
239 
240 	/*
241 	 * Scan all of the free, constructed buffers, since they may have
242 	 * pointers to allocated objects.
243 	 */
244 	if (mdb_pwalk("freemem_constructed",
245 	    (mdb_walk_cb_t)leaky_scan_buffer, (void *)c, addr) == -1) {
246 		mdb_warn("can't walk freemem_constructed for cache %p (%s)",
247 		    addr, c->cache_name);
248 		return (WALK_DONE);
249 	}
250 
251 	return (WALK_NEXT);
252 }
253 
254 /*ARGSUSED*/
255 static int
256 leaky_modctl(uintptr_t addr, const struct modctl *m, int *ignored)
257 {
258 	struct module mod;
259 	char name[MODMAXNAMELEN];
260 
261 	if (m->mod_mp == NULL)
262 		return (WALK_NEXT);
263 
264 	if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) {
265 		mdb_warn("couldn't read modctl %p's module", addr);
266 		return (WALK_NEXT);
267 	}
268 
269 	if (mdb_readstr(name, sizeof (name), (uintptr_t)m->mod_modname) == -1)
270 		(void) mdb_snprintf(name, sizeof (name), "0x%p", addr);
271 
272 	leaky_grep((uintptr_t)m->mod_mp, sizeof (struct module));
273 	leaky_grep((uintptr_t)mod.data, mod.data_size);
274 	leaky_grep((uintptr_t)mod.bss, mod.bss_size);
275 
276 	return (WALK_NEXT);
277 }
278 
279 static int
280 leaky_thread(uintptr_t addr, const kthread_t *t, unsigned long *pagesize)
281 {
282 	uintptr_t size, base = (uintptr_t)t->t_stkbase;
283 	uintptr_t stk = (uintptr_t)t->t_stk;
284 
285 	/*
286 	 * If this thread isn't in memory, we can't look at its stack.  This
287 	 * may result in false positives, so we print a warning.
288 	 */
289 	if (!(t->t_schedflag & TS_LOAD)) {
290 		mdb_printf("findleaks: thread %p's stack swapped out; "
291 		    "false positives possible\n", addr);
292 		return (WALK_NEXT);
293 	}
294 
295 	if (t->t_state != TS_FREE)
296 		leaky_grep(base, stk - base);
297 
298 	/*
299 	 * There is always gunk hanging out between t_stk and the page
300 	 * boundary.  If this thread structure wasn't kmem allocated,
301 	 * this will include the thread structure itself.  If the thread
302 	 * _is_ kmem allocated, we'll be able to get to it via allthreads.
303 	 */
304 	size = *pagesize - (stk & (*pagesize - 1));
305 
306 	leaky_grep(stk, size);
307 
308 	return (WALK_NEXT);
309 }
310 
311 /*ARGSUSED*/
312 static int
313 leaky_kstat(uintptr_t addr, vmem_seg_t *seg, void *ignored)
314 {
315 	leaky_grep(seg->vs_start, seg->vs_end - seg->vs_start);
316 
317 	return (WALK_NEXT);
318 }
319 
320 static void
321 leaky_kludge(void)
322 {
323 	GElf_Sym sym;
324 	mdb_ctf_id_t id, rid;
325 
326 	int max_mem_nodes;
327 	uintptr_t *counters;
328 	size_t ncounters;
329 	ssize_t hwpm_size;
330 	int idx;
331 
332 	/*
333 	 * Because of DR, the page counters (which live in the kmem64 segment)
334 	 * can point into kmem_alloc()ed memory.  The "page_counters" array
335 	 * is multi-dimensional, and each entry points to an array of
336 	 * "hw_page_map_t"s which is "max_mem_nodes" in length.
337 	 *
338 	 * To keep this from having too much grotty knowledge of internals,
339 	 * we use CTF data to get the size of the structure.  For simplicity,
340 	 * we treat the page_counters array as a flat array of pointers, and
341 	 * use its size to determine how much to scan.  Unused entries will
342 	 * be NULL.
343 	 */
344 	if (mdb_lookup_by_name("page_counters", &sym) == -1) {
345 		mdb_warn("unable to lookup page_counters");
346 		return;
347 	}
348 
349 	if (mdb_readvar(&max_mem_nodes, "max_mem_nodes") == -1) {
350 		mdb_warn("unable to read max_mem_nodes");
351 		return;
352 	}
353 
354 	if (mdb_ctf_lookup_by_name("unix`hw_page_map_t", &id) == -1 ||
355 	    mdb_ctf_type_resolve(id, &rid) == -1 ||
356 	    (hwpm_size = mdb_ctf_type_size(rid)) < 0) {
357 		mdb_warn("unable to lookup unix`hw_page_map_t");
358 		return;
359 	}
360 
361 	counters = mdb_alloc(sym.st_size, UM_SLEEP | UM_GC);
362 
363 	if (mdb_vread(counters, sym.st_size, (uintptr_t)sym.st_value) == -1) {
364 		mdb_warn("unable to read page_counters");
365 		return;
366 	}
367 
368 	ncounters = sym.st_size / sizeof (counters);
369 
370 	for (idx = 0; idx < ncounters; idx++) {
371 		uintptr_t addr = counters[idx];
372 		if (addr != 0)
373 			leaky_grep(addr, hwpm_size * max_mem_nodes);
374 	}
375 }
376 
377 int
378 leaky_subr_estimate(size_t *estp)
379 {
380 	uintptr_t panicstr;
381 	int state;
382 
383 	if ((state = mdb_get_state()) == MDB_STATE_RUNNING) {
384 		mdb_warn("findleaks: can only be run on a system "
385 		    "dump or under kmdb; see dumpadm(1M)\n");
386 		return (DCMD_ERR);
387 	}
388 
389 	if (mdb_readvar(&panicstr, "panicstr") == -1) {
390 		mdb_warn("can't read variable 'panicstr'");
391 		return (DCMD_ERR);
392 	}
393 
394 	if (state != MDB_STATE_STOPPED && panicstr == 0) {
395 		mdb_warn("findleaks: cannot be run on a live dump.\n");
396 		return (DCMD_ERR);
397 	}
398 
399 	if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_estimate, estp) == -1) {
400 		mdb_warn("couldn't walk 'kmem_cache'");
401 		return (DCMD_ERR);
402 	}
403 
404 	if (*estp == 0) {
405 		mdb_warn("findleaks: no buffers found\n");
406 		return (DCMD_ERR);
407 	}
408 
409 	if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_estimate_vmem, estp) == -1) {
410 		mdb_warn("couldn't walk 'vmem'");
411 		return (DCMD_ERR);
412 	}
413 
414 	return (DCMD_OK);
415 }
416 
417 int
418 leaky_subr_fill(leak_mtab_t **lmpp)
419 {
420 	if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_vmem, lmpp) == -1) {
421 		mdb_warn("couldn't walk 'vmem'");
422 		return (DCMD_ERR);
423 	}
424 
425 	if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_cache, lmpp) == -1) {
426 		mdb_warn("couldn't walk 'kmem_cache'");
427 		return (DCMD_ERR);
428 	}
429 
430 	if (mdb_readvar(&kmem_lite_count, "kmem_lite_count") == -1) {
431 		mdb_warn("couldn't read 'kmem_lite_count'");
432 		kmem_lite_count = 0;
433 	} else if (kmem_lite_count > 16) {
434 		mdb_warn("kmem_lite_count nonsensical, ignored\n");
435 		kmem_lite_count = 0;
436 	}
437 
438 	return (DCMD_OK);
439 }
440 
441 int
442 leaky_subr_run(void)
443 {
444 	unsigned long ps = PAGESIZE;
445 	uintptr_t kstat_arena;
446 	uintptr_t dmods;
447 
448 	leaky_kludge();
449 
450 	if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_scan_cache,
451 	    NULL) == -1) {
452 		mdb_warn("couldn't walk 'kmem_cache'");
453 		return (DCMD_ERR);
454 	}
455 
456 	if (mdb_walk("modctl", (mdb_walk_cb_t)leaky_modctl, NULL) == -1) {
457 		mdb_warn("couldn't walk 'modctl'");
458 		return (DCMD_ERR);
459 	}
460 
461 	/*
462 	 * If kmdb is loaded, we need to walk it's module list, since kmdb
463 	 * modctl structures can reference kmem allocations.
464 	 */
465 	if ((mdb_readvar(&dmods, "kdi_dmods") != -1) && (dmods != 0))
466 		(void) mdb_pwalk("modctl", (mdb_walk_cb_t)leaky_modctl,
467 		    NULL, dmods);
468 
469 	if (mdb_walk("thread", (mdb_walk_cb_t)leaky_thread, &ps) == -1) {
470 		mdb_warn("couldn't walk 'thread'");
471 		return (DCMD_ERR);
472 	}
473 
474 	if (mdb_walk("deathrow", (mdb_walk_cb_t)leaky_thread, &ps) == -1) {
475 		mdb_warn("couldn't walk 'deathrow'");
476 		return (DCMD_ERR);
477 	}
478 
479 	if (mdb_readvar(&kstat_arena, "kstat_arena") == -1) {
480 		mdb_warn("couldn't read 'kstat_arena'");
481 		return (DCMD_ERR);
482 	}
483 
484 	if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_kstat,
485 	    NULL, kstat_arena) == -1) {
486 		mdb_warn("couldn't walk kstat vmem arena");
487 		return (DCMD_ERR);
488 	}
489 
490 	return (DCMD_OK);
491 }
492 
493 void
494 leaky_subr_add_leak(leak_mtab_t *lmp)
495 {
496 	uintptr_t addr = LKM_CTLPTR(lmp->lkm_bufctl);
497 	size_t depth;
498 
499 	switch (LKM_CTLTYPE(lmp->lkm_bufctl)) {
500 	case LKM_CTL_VMSEG: {
501 		vmem_seg_t vs;
502 
503 		if (mdb_vread(&vs, sizeof (vs), addr) == -1) {
504 			mdb_warn("couldn't read leaked vmem_seg at addr %p",
505 			    addr);
506 			return;
507 		}
508 		depth = MIN(vs.vs_depth, VMEM_STACK_DEPTH);
509 
510 		leaky_add_leak(TYPE_VMEM, addr, vs.vs_start, vs.vs_timestamp,
511 		    vs.vs_stack, depth, 0, (vs.vs_end - vs.vs_start));
512 		break;
513 	}
514 	case LKM_CTL_BUFCTL: {
515 		kmem_bufctl_audit_t bc;
516 
517 		if (mdb_vread(&bc, sizeof (bc), addr) == -1) {
518 			mdb_warn("couldn't read leaked bufctl at addr %p",
519 			    addr);
520 			return;
521 		}
522 
523 		depth = MIN(bc.bc_depth, KMEM_STACK_DEPTH);
524 
525 		/*
526 		 * The top of the stack will be kmem_cache_alloc+offset.
527 		 * Since the offset in kmem_cache_alloc() isn't interesting
528 		 * we skip that frame for the purposes of uniquifying stacks.
529 		 *
530 		 * We also use the cache pointer as the leaks's cid, to
531 		 * prevent the coalescing of leaks from different caches.
532 		 */
533 		if (depth > 0)
534 			depth--;
535 		leaky_add_leak(TYPE_KMEM, addr, (uintptr_t)bc.bc_addr,
536 		    bc.bc_timestamp, bc.bc_stack + 1, depth,
537 		    (uintptr_t)bc.bc_cache, 0);
538 		break;
539 	}
540 	case LKM_CTL_CACHE: {
541 		kmem_cache_t cache;
542 		kmem_buftag_lite_t bt;
543 		pc_t caller;
544 		int depth = 0;
545 
546 		/*
547 		 * For KMF_LITE caches, we can get the allocation PC
548 		 * out of the buftag structure.
549 		 */
550 		if (mdb_vread(&cache, sizeof (cache), addr) != -1 &&
551 		    (cache.cache_flags & KMF_LITE) &&
552 		    kmem_lite_count > 0 &&
553 		    mdb_vread(&bt, sizeof (bt),
554 		    /* LINTED alignment */
555 		    (uintptr_t)KMEM_BUFTAG(&cache, lmp->lkm_base)) != -1) {
556 			caller = bt.bt_history[0];
557 			depth = 1;
558 		}
559 		leaky_add_leak(TYPE_CACHE, lmp->lkm_base, lmp->lkm_base, 0,
560 		    &caller, depth, addr, addr);
561 		break;
562 	}
563 	default:
564 		mdb_warn("internal error: invalid leak_bufctl_t\n");
565 		break;
566 	}
567 }
568 
569 static void
570 leaky_subr_caller(const pc_t *stack, uint_t depth, char *buf, uintptr_t *pcp)
571 {
572 	int i;
573 	GElf_Sym sym;
574 	uintptr_t pc = 0;
575 
576 	buf[0] = 0;
577 
578 	for (i = 0; i < depth; i++) {
579 		pc = stack[i];
580 
581 		if (mdb_lookup_by_addr(pc,
582 		    MDB_SYM_FUZZY, buf, MDB_SYM_NAMLEN, &sym) == -1)
583 			continue;
584 		if (strncmp(buf, "kmem_", 5) == 0)
585 			continue;
586 		if (strncmp(buf, "vmem_", 5) == 0)
587 			continue;
588 		*pcp = pc;
589 
590 		return;
591 	}
592 
593 	/*
594 	 * We're only here if the entire call chain begins with "kmem_";
595 	 * this shouldn't happen, but we'll just use the last caller.
596 	 */
597 	*pcp = pc;
598 }
599 
600 int
601 leaky_subr_bufctl_cmp(const leak_bufctl_t *lhs, const leak_bufctl_t *rhs)
602 {
603 	char lbuf[MDB_SYM_NAMLEN], rbuf[MDB_SYM_NAMLEN];
604 	uintptr_t lcaller, rcaller;
605 	int rval;
606 
607 	leaky_subr_caller(lhs->lkb_stack, lhs->lkb_depth, lbuf, &lcaller);
608 	leaky_subr_caller(rhs->lkb_stack, lhs->lkb_depth, rbuf, &rcaller);
609 
610 	if (rval = strcmp(lbuf, rbuf))
611 		return (rval);
612 
613 	if (lcaller < rcaller)
614 		return (-1);
615 
616 	if (lcaller > rcaller)
617 		return (1);
618 
619 	if (lhs->lkb_data < rhs->lkb_data)
620 		return (-1);
621 
622 	if (lhs->lkb_data > rhs->lkb_data)
623 		return (1);
624 
625 	return (0);
626 }
627 
628 /*
629  * Global state variables used by the leaky_subr_dump_* routines.  Note that
630  * they are carefully cleared before use.
631  */
632 static int lk_vmem_seen;
633 static int lk_cache_seen;
634 static int lk_kmem_seen;
635 static size_t lk_ttl;
636 static size_t lk_bytes;
637 
638 void
639 leaky_subr_dump_start(int type)
640 {
641 	switch (type) {
642 	case TYPE_VMEM:
643 		lk_vmem_seen = 0;
644 		break;
645 	case TYPE_CACHE:
646 		lk_cache_seen = 0;
647 		break;
648 	case TYPE_KMEM:
649 		lk_kmem_seen = 0;
650 		break;
651 	default:
652 		break;
653 	}
654 
655 	lk_ttl = 0;
656 	lk_bytes = 0;
657 }
658 
659 void
660 leaky_subr_dump(const leak_bufctl_t *lkb, int verbose)
661 {
662 	const leak_bufctl_t *cur;
663 	kmem_cache_t cache;
664 	size_t min, max, size;
665 	char sz[30];
666 	char c[MDB_SYM_NAMLEN];
667 	uintptr_t caller;
668 
669 	if (verbose) {
670 		lk_ttl = 0;
671 		lk_bytes = 0;
672 	}
673 
674 	switch (lkb->lkb_type) {
675 	case TYPE_VMEM:
676 		if (!verbose && !lk_vmem_seen) {
677 			lk_vmem_seen = 1;
678 			mdb_printf("%-16s %7s %?s %s\n",
679 			    "BYTES", "LEAKED", "VMEM_SEG", "CALLER");
680 		}
681 
682 		min = max = lkb->lkb_data;
683 
684 		for (cur = lkb; cur != NULL; cur = cur->lkb_next) {
685 			size = cur->lkb_data;
686 
687 			if (size < min)
688 				min = size;
689 			if (size > max)
690 				max = size;
691 
692 			lk_ttl++;
693 			lk_bytes += size;
694 		}
695 
696 		if (min == max)
697 			(void) mdb_snprintf(sz, sizeof (sz), "%ld", min);
698 		else
699 			(void) mdb_snprintf(sz, sizeof (sz), "%ld-%ld",
700 			    min, max);
701 
702 		if (!verbose) {
703 			leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
704 			    c, &caller);
705 
706 			if (caller != 0) {
707 				(void) mdb_snprintf(c, sizeof (c),
708 				    "%a", caller);
709 			} else {
710 				(void) mdb_snprintf(c, sizeof (c),
711 				    "%s", "?");
712 			}
713 			mdb_printf("%-16s %7d %?p %s\n", sz, lkb->lkb_dups + 1,
714 			    lkb->lkb_addr, c);
715 		} else {
716 			mdb_arg_t v;
717 
718 			if (lk_ttl == 1)
719 				mdb_printf("kmem_oversize leak: 1 vmem_seg, "
720 				    "%ld bytes\n", lk_bytes);
721 			else
722 				mdb_printf("kmem_oversize leak: %d vmem_segs, "
723 				    "%s bytes each, %ld bytes total\n",
724 				    lk_ttl, sz, lk_bytes);
725 
726 			v.a_type = MDB_TYPE_STRING;
727 			v.a_un.a_str = "-v";
728 
729 			if (mdb_call_dcmd("vmem_seg", lkb->lkb_addr,
730 			    DCMD_ADDRSPEC, 1, &v) == -1) {
731 				mdb_warn("'%p::vmem_seg -v' failed",
732 				    lkb->lkb_addr);
733 			}
734 		}
735 		return;
736 
737 	case TYPE_CACHE:
738 		if (!verbose && !lk_cache_seen) {
739 			lk_cache_seen = 1;
740 			if (lk_vmem_seen)
741 				mdb_printf("\n");
742 			mdb_printf("%-?s %7s %?s %s\n",
743 			    "CACHE", "LEAKED", "BUFFER", "CALLER");
744 		}
745 
746 		if (mdb_vread(&cache, sizeof (cache), lkb->lkb_data) == -1) {
747 			/*
748 			 * This _really_ shouldn't happen; we shouldn't
749 			 * have been able to get this far if this
750 			 * cache wasn't readable.
751 			 */
752 			mdb_warn("can't read cache %p for leaked "
753 			    "buffer %p", lkb->lkb_data, lkb->lkb_addr);
754 			return;
755 		}
756 
757 		lk_ttl += lkb->lkb_dups + 1;
758 		lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
759 
760 		caller = (lkb->lkb_depth == 0) ? 0 : lkb->lkb_stack[0];
761 		if (caller != 0) {
762 			(void) mdb_snprintf(c, sizeof (c), "%a", caller);
763 		} else {
764 			(void) mdb_snprintf(c, sizeof (c),
765 			    "%s", (verbose) ? "" : "?");
766 		}
767 
768 		if (!verbose) {
769 			mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
770 			    lkb->lkb_dups + 1, lkb->lkb_addr, c);
771 		} else {
772 			if (lk_ttl == 1)
773 				mdb_printf("%s leak: 1 buffer, %ld bytes,\n",
774 				    cache.cache_name, lk_bytes);
775 			else
776 				mdb_printf("%s leak: %d buffers, "
777 				    "%ld bytes each, %ld bytes total,\n",
778 				    cache.cache_name, lk_ttl,
779 				    cache.cache_bufsize, lk_bytes);
780 
781 			mdb_printf("    sample addr %p%s%s\n",
782 			    lkb->lkb_addr, (caller == 0) ? "" : ", caller ", c);
783 		}
784 		return;
785 
786 	case TYPE_KMEM:
787 		if (!verbose && !lk_kmem_seen) {
788 			lk_kmem_seen = 1;
789 			if (lk_vmem_seen || lk_cache_seen)
790 				mdb_printf("\n");
791 			mdb_printf("%-?s %7s %?s %s\n",
792 			    "CACHE", "LEAKED", "BUFCTL", "CALLER");
793 		}
794 
795 		if (mdb_vread(&cache, sizeof (cache), lkb->lkb_cid) == -1) {
796 			/*
797 			 * This _really_ shouldn't happen; we shouldn't
798 			 * have been able to get this far if this
799 			 * cache wasn't readable.
800 			 */
801 			mdb_warn("can't read cache %p for leaked "
802 			    "bufctl %p", lkb->lkb_cid, lkb->lkb_addr);
803 			return;
804 		}
805 
806 		lk_ttl += lkb->lkb_dups + 1;
807 		lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
808 
809 		if (!verbose) {
810 			leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
811 			    c, &caller);
812 
813 			if (caller != 0) {
814 				(void) mdb_snprintf(c, sizeof (c),
815 				    "%a", caller);
816 			} else {
817 				(void) mdb_snprintf(c, sizeof (c),
818 				    "%s", "?");
819 			}
820 			mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
821 			    lkb->lkb_dups + 1, lkb->lkb_addr, c);
822 		} else {
823 			mdb_arg_t v;
824 
825 			if (lk_ttl == 1)
826 				mdb_printf("%s leak: 1 buffer, %ld bytes\n",
827 				    cache.cache_name, lk_bytes);
828 			else
829 				mdb_printf("%s leak: %d buffers, "
830 				    "%ld bytes each, %ld bytes total\n",
831 				    cache.cache_name, lk_ttl,
832 				    cache.cache_bufsize, lk_bytes);
833 
834 			v.a_type = MDB_TYPE_STRING;
835 			v.a_un.a_str = "-v";
836 
837 			if (mdb_call_dcmd("bufctl", lkb->lkb_addr,
838 			    DCMD_ADDRSPEC, 1, &v) == -1) {
839 				mdb_warn("'%p::bufctl -v' failed",
840 				    lkb->lkb_addr);
841 			}
842 		}
843 		return;
844 
845 	default:
846 		return;
847 	}
848 }
849 
850 void
851 leaky_subr_dump_end(int type)
852 {
853 	int i;
854 	int width;
855 	const char *leaks;
856 
857 	switch (type) {
858 	case TYPE_VMEM:
859 		if (!lk_vmem_seen)
860 			return;
861 
862 		width = 16;
863 		leaks = "kmem_oversize leak";
864 		break;
865 
866 	case TYPE_CACHE:
867 		if (!lk_cache_seen)
868 			return;
869 
870 		width = sizeof (uintptr_t) * 2;
871 		leaks = "buffer";
872 		break;
873 
874 	case TYPE_KMEM:
875 		if (!lk_kmem_seen)
876 			return;
877 
878 		width = sizeof (uintptr_t) * 2;
879 		leaks = "buffer";
880 		break;
881 
882 	default:
883 		return;
884 	}
885 
886 	for (i = 0; i < 72; i++)
887 		mdb_printf("-");
888 	mdb_printf("\n%*s %7ld %s%s, %ld byte%s\n",
889 	    width, "Total", lk_ttl, leaks, (lk_ttl == 1) ? "" : "s",
890 	    lk_bytes, (lk_bytes == 1) ? "" : "s");
891 }
892 
893 int
894 leaky_subr_invoke_callback(const leak_bufctl_t *lkb, mdb_walk_cb_t cb,
895     void *cbdata)
896 {
897 	kmem_bufctl_audit_t bc;
898 	vmem_seg_t vs;
899 
900 	switch (lkb->lkb_type) {
901 	case TYPE_VMEM:
902 		if (mdb_vread(&vs, sizeof (vs), lkb->lkb_addr) == -1) {
903 			mdb_warn("unable to read vmem_seg at %p",
904 			    lkb->lkb_addr);
905 			return (WALK_NEXT);
906 		}
907 		return (cb(lkb->lkb_addr, &vs, cbdata));
908 
909 	case TYPE_CACHE:
910 		return (cb(lkb->lkb_addr, NULL, cbdata));
911 
912 	case TYPE_KMEM:
913 		if (mdb_vread(&bc, sizeof (bc), lkb->lkb_addr) == -1) {
914 			mdb_warn("unable to read bufctl at %p",
915 			    lkb->lkb_addr);
916 			return (WALK_NEXT);
917 		}
918 		return (cb(lkb->lkb_addr, &bc, cbdata));
919 	default:
920 		return (WALK_NEXT);
921 	}
922 }
923