xref: /titanic_44/usr/src/cmd/mdb/common/modules/genunix/leaky.c (revision 7aec1d6e253b21f9e9b7ef68b4d81ab9859b51fe)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * A generic memory leak detector.  The target interface, defined in
31  * <leaky_impl.h>, is implemented by the genunix and libumem dmods to fill
32  * in the details of operation.
33  */
34 
35 #include <mdb/mdb_modapi.h>
36 
37 #include "leaky.h"
38 #include "leaky_impl.h"
39 
40 #define	LK_BUFCTLHSIZE	127
41 
42 /*
43  * We re-use the low bit of the lkm_addr as the 'marked' bit.
44  */
45 #define	LK_MARKED(b)	((uintptr_t)(b) & 1)
46 #define	LK_MARK(b)	((b) |= 1)
47 #define	LK_ADDR(b)	((uintptr_t)(b) & ~1UL)
48 
49 /*
50  * Possible values for lk_state.
51  */
52 #define	LK_CLEAN	0	/* No outstanding mdb_alloc()'s */
53 #define	LK_SWEEPING	1	/* Potentially some outstanding mdb_alloc()'s */
54 #define	LK_DONE		2	/* All mdb_alloc()'s complete */
55 #define	LK_CLEANING	3	/* Currently cleaning prior mdb_alloc()'s */
56 
57 static volatile int lk_state;
58 
59 #define	LK_STATE_SIZE	10000	/* completely arbitrary */
60 
61 typedef int leak_ndx_t;		/* change if >2 billion buffers are needed */
62 
63 typedef struct leak_state {
64 	struct leak_state *lks_next;
65 	leak_ndx_t lks_stack[LK_STATE_SIZE];
66 } leak_state_t;
67 
68 typedef struct leak_beans {
69 	int lkb_dups;
70 	int lkb_follows;
71 	int lkb_misses;
72 	int lkb_dismissals;
73 	int lkb_pushes;
74 	int lkb_deepest;
75 } leak_beans_t;
76 
77 typedef struct leak_type {
78 	int		lt_type;
79 	size_t		lt_leaks;
80 	leak_bufctl_t	**lt_sorted;
81 } leak_type_t;
82 
83 typedef struct leak_walk {
84 	int lkw_ndx;
85 	leak_bufctl_t *lkw_current;
86 	leak_bufctl_t *lkw_hash_next;
87 } leak_walk_t;
88 
89 #define	LK_SCAN_BUFFER_SIZE	16384
90 static uintptr_t *lk_scan_buffer;
91 
92 static leak_mtab_t *lk_mtab;
93 static leak_state_t *lk_free_state;
94 static leak_ndx_t lk_nbuffers;
95 static leak_beans_t lk_beans;
96 static leak_bufctl_t *lk_bufctl[LK_BUFCTLHSIZE];
97 static leak_type_t lk_types[LK_NUM_TYPES];
98 static size_t lk_memusage;
99 #ifndef _KMDB
100 static hrtime_t lk_begin;
101 static hrtime_t lk_vbegin;
102 #endif
103 static uint_t lk_verbose = FALSE;
104 
105 static void
106 leaky_verbose(char *str, uint64_t stat)
107 {
108 	if (lk_verbose == FALSE)
109 		return;
110 
111 	mdb_printf("findleaks: ");
112 
113 	if (str == NULL) {
114 		mdb_printf("\n");
115 		return;
116 	}
117 
118 	mdb_printf("%*s => %lld\n", 30, str, stat);
119 }
120 
121 static void
122 leaky_verbose_perc(char *str, uint64_t stat, uint64_t total)
123 {
124 	uint_t perc = (stat * 100) / total;
125 	uint_t tenths = ((stat * 1000) / total) % 10;
126 
127 	if (lk_verbose == FALSE)
128 		return;
129 
130 	mdb_printf("findleaks: %*s => %-13lld (%2d.%1d%%)\n",
131 	    30, str, stat, perc, tenths);
132 }
133 
134 static void
135 leaky_verbose_begin(void)
136 {
137 	/* kmdb can't tell time */
138 #ifndef _KMDB
139 	extern hrtime_t gethrvtime(void);
140 	lk_begin = gethrtime();
141 	lk_vbegin = gethrvtime();
142 #endif
143 	lk_memusage = 0;
144 }
145 
146 static void
147 leaky_verbose_end(void)
148 {
149 	/* kmdb can't tell time */
150 #ifndef _KMDB
151 	extern hrtime_t gethrvtime(void);
152 
153 	hrtime_t ts = gethrtime() - lk_begin;
154 	hrtime_t sec = ts / (hrtime_t)NANOSEC;
155 	hrtime_t nsec = ts % (hrtime_t)NANOSEC;
156 
157 	hrtime_t vts = gethrvtime() - lk_vbegin;
158 	hrtime_t vsec = vts / (hrtime_t)NANOSEC;
159 	hrtime_t vnsec = vts % (hrtime_t)NANOSEC;
160 #endif
161 
162 	if (lk_verbose == FALSE)
163 		return;
164 
165 	mdb_printf("findleaks: %*s => %lu kB\n",
166 	    30, "peak memory usage", (lk_memusage + 1023)/1024);
167 #ifndef _KMDB
168 	mdb_printf("findleaks: %*s => %lld.%lld seconds\n",
169 	    30, "elapsed CPU time", vsec, (vnsec * 10)/(hrtime_t)NANOSEC);
170 	mdb_printf("findleaks: %*s => %lld.%lld seconds\n",
171 	    30, "elapsed wall time", sec, (nsec * 10)/(hrtime_t)NANOSEC);
172 #endif
173 	leaky_verbose(NULL, 0);
174 }
175 
176 static void *
177 leaky_alloc(size_t sz, uint_t flags)
178 {
179 	void *buf = mdb_alloc(sz, flags);
180 
181 	if (buf != NULL)
182 		lk_memusage += sz;
183 
184 	return (buf);
185 }
186 
187 static void *
188 leaky_zalloc(size_t sz, uint_t flags)
189 {
190 	void *buf = mdb_zalloc(sz, flags);
191 
192 	if (buf != NULL)
193 		lk_memusage += sz;
194 
195 	return (buf);
196 }
197 
198 static int
199 leaky_mtabcmp(const void *l, const void *r)
200 {
201 	const leak_mtab_t *lhs = (const leak_mtab_t *)l;
202 	const leak_mtab_t *rhs = (const leak_mtab_t *)r;
203 
204 	if (lhs->lkm_base < rhs->lkm_base)
205 		return (-1);
206 	if (lhs->lkm_base > rhs->lkm_base)
207 		return (1);
208 
209 	return (0);
210 }
211 
212 static leak_ndx_t
213 leaky_search(uintptr_t addr)
214 {
215 	leak_ndx_t left = 0, right = lk_nbuffers - 1, guess;
216 
217 	while (right >= left) {
218 		guess = (right + left) >> 1;
219 
220 		if (addr < LK_ADDR(lk_mtab[guess].lkm_base)) {
221 			right = guess - 1;
222 			continue;
223 		}
224 
225 		if (addr >= lk_mtab[guess].lkm_limit) {
226 			left = guess + 1;
227 			continue;
228 		}
229 
230 		return (guess);
231 	}
232 
233 	return (-1);
234 }
235 
236 void
237 leaky_grep(uintptr_t addr, size_t size)
238 {
239 	uintptr_t *buf, *cur, *end;
240 	size_t bytes, newsz, nptrs;
241 	leak_state_t *state = NULL, *new_state;
242 	uint_t state_idx;
243 	uintptr_t min = LK_ADDR(lk_mtab[0].lkm_base);
244 	uintptr_t max = lk_mtab[lk_nbuffers - 1].lkm_limit;
245 	int dups = 0, misses = 0, depth = 0, deepest = 0;
246 	int follows = 0, dismissals = 0, pushes = 0;
247 	leak_ndx_t mtab_ndx;
248 	leak_mtab_t *lmp;
249 	uintptr_t nbase;
250 	uintptr_t base;
251 	size_t base_size;
252 	const uintptr_t mask = sizeof (uintptr_t) - 1;
253 
254 	if (addr == NULL || size == 0)
255 		return;
256 
257 	state_idx = 0;
258 
259 	/*
260 	 * Our main loop, led by the 'pop' label:
261 	 *	1)  read in a buffer piece by piece,
262 	 *	2)  mark all unmarked mtab entries reachable from it, and
263 	 *	    either scan them in-line or push them onto our stack of
264 	 *	    unfinished work.
265 	 *	3)  pop the top mtab entry off the stack, and loop.
266 	 */
267 pop:
268 	base = addr;
269 	base_size = size;
270 
271 	/*
272 	 * If our address isn't pointer-aligned, we need to align it and
273 	 * whack the size appropriately.
274 	 */
275 	if (size < mask) {
276 		size = 0;
277 	} else if (addr & mask) {
278 		size -= (mask + 1) - (addr & mask);
279 		addr += (mask + 1) - (addr & mask);
280 	}
281 	size -= (size & mask);
282 
283 	while (size > 0) {
284 		buf = lk_scan_buffer;
285 		end = &buf[LK_SCAN_BUFFER_SIZE / sizeof (uintptr_t)];
286 
287 		bytes = MIN(size, LK_SCAN_BUFFER_SIZE);
288 		cur = end - (bytes / sizeof (uintptr_t));
289 
290 		if (mdb_vread(cur, bytes, addr) == -1) {
291 			mdb_warn("[%p, %p): couldn't read %ld bytes at %p",
292 			    base, base + base_size, bytes, addr);
293 			break;
294 		}
295 
296 		addr += bytes;
297 		size -= bytes;
298 
299 		/*
300 		 * The buffer looks like:  ('+'s are unscanned data)
301 		 *
302 		 * -----------------------------++++++++++++++++
303 		 * |				|		|
304 		 * buf				cur		end
305 		 *
306 		 * cur scans forward.  When we encounter a new buffer, and
307 		 * it will fit behind "cur", we read it in and back up cur,
308 		 * processing it immediately.
309 		 */
310 		while (cur < end) {
311 			uintptr_t ptr = *cur++;
312 
313 			if (ptr < min || ptr > max) {
314 				dismissals++;
315 				continue;
316 			}
317 
318 			if ((mtab_ndx = leaky_search(ptr)) == -1) {
319 				misses++;
320 				continue;
321 			}
322 
323 			lmp = &lk_mtab[mtab_ndx];
324 			if (LK_MARKED(lmp->lkm_base)) {
325 				dups++;			/* already seen */
326 				continue;
327 			}
328 
329 			/*
330 			 * Found an unmarked buffer.  Mark it, then either
331 			 * read it in, or add it to the stack of pending work.
332 			 */
333 			follows++;
334 			LK_MARK(lmp->lkm_base);
335 
336 			nbase = LK_ADDR(lmp->lkm_base);
337 			newsz = lmp->lkm_limit - nbase;
338 
339 			nptrs = newsz / sizeof (uintptr_t);
340 			newsz = nptrs * sizeof (uintptr_t);
341 
342 			if ((nbase & mask) == 0 && nptrs <= (cur - buf) &&
343 			    mdb_vread(cur - nptrs, newsz, nbase) != -1) {
344 				cur -= nptrs;
345 				continue;
346 			}
347 
348 			/*
349 			 * couldn't process it in-place -- add it to the
350 			 * stack.
351 			 */
352 			if (state == NULL || state_idx == LK_STATE_SIZE) {
353 				if ((new_state = lk_free_state) != NULL)
354 					lk_free_state = new_state->lks_next;
355 				else
356 					new_state = leaky_zalloc(
357 					    sizeof (*state), UM_SLEEP | UM_GC);
358 
359 				new_state->lks_next = state;
360 				state = new_state;
361 				state_idx = 0;
362 			}
363 
364 			pushes++;
365 			state->lks_stack[state_idx++] = mtab_ndx;
366 			if (++depth > deepest)
367 				deepest = depth;
368 		}
369 	}
370 
371 	/*
372 	 * Retrieve the next mtab index, extract its info, and loop around
373 	 * to process it.
374 	 */
375 	if (state_idx == 0 && state != NULL) {
376 		new_state = state->lks_next;
377 
378 		state->lks_next = lk_free_state;
379 		lk_free_state = state;
380 
381 		state = new_state;
382 		state_idx = LK_STATE_SIZE;
383 	}
384 
385 	if (depth > 0) {
386 		mtab_ndx = state->lks_stack[--state_idx];
387 
388 		addr = LK_ADDR(lk_mtab[mtab_ndx].lkm_base);
389 		size = lk_mtab[mtab_ndx].lkm_limit - addr;
390 		depth--;
391 
392 		goto pop;
393 	}
394 
395 	/*
396 	 * update the beans
397 	 */
398 	lk_beans.lkb_dups += dups;
399 	lk_beans.lkb_dismissals += dismissals;
400 	lk_beans.lkb_misses += misses;
401 	lk_beans.lkb_follows += follows;
402 	lk_beans.lkb_pushes += pushes;
403 
404 	if (deepest > lk_beans.lkb_deepest)
405 		lk_beans.lkb_deepest = deepest;
406 }
407 
408 static void
409 leaky_do_grep_ptr(uintptr_t loc, int process)
410 {
411 	leak_ndx_t ndx;
412 	leak_mtab_t *lkmp;
413 	size_t sz;
414 
415 	if (loc < LK_ADDR(lk_mtab[0].lkm_base) ||
416 	    loc > lk_mtab[lk_nbuffers - 1].lkm_limit) {
417 		lk_beans.lkb_dismissals++;
418 		return;
419 	}
420 	if ((ndx = leaky_search(loc)) == -1) {
421 		lk_beans.lkb_misses++;
422 		return;
423 	}
424 
425 	lkmp = &lk_mtab[ndx];
426 	sz = lkmp->lkm_limit - lkmp->lkm_base;
427 
428 	if (LK_MARKED(lkmp->lkm_base)) {
429 		lk_beans.lkb_dups++;
430 	} else {
431 		LK_MARK(lkmp->lkm_base);
432 		lk_beans.lkb_follows++;
433 		if (process)
434 			leaky_grep(lkmp->lkm_base, sz);
435 	}
436 }
437 
438 void
439 leaky_grep_ptr(uintptr_t loc)
440 {
441 	leaky_do_grep_ptr(loc, 1);
442 }
443 
444 void
445 leaky_mark_ptr(uintptr_t loc)
446 {
447 	leaky_do_grep_ptr(loc, 0);
448 }
449 
450 /*
451  * This may be used to manually process a marked buffer.
452  */
453 int
454 leaky_lookup_marked(uintptr_t loc, uintptr_t *addr_out, size_t *size_out)
455 {
456 	leak_ndx_t ndx;
457 	leak_mtab_t *lkmp;
458 
459 	if ((ndx = leaky_search(loc)) == -1)
460 		return (0);
461 
462 	lkmp = &lk_mtab[ndx];
463 	*addr_out = LK_ADDR(lkmp->lkm_base);
464 	*size_out = lkmp->lkm_limit - LK_ADDR(lkmp->lkm_base);
465 	return (1);
466 }
467 
468 void
469 leaky_add_leak(int type, uintptr_t addr, uintptr_t bufaddr, hrtime_t timestamp,
470     leak_pc_t *stack, uint_t depth, uintptr_t cid, uintptr_t data)
471 {
472 	leak_bufctl_t *nlkb, *lkb;
473 	uintptr_t total = 0;
474 	size_t ndx;
475 	int i;
476 
477 	if (type < 0 || type >= LK_NUM_TYPES || depth != (uint8_t)depth) {
478 		mdb_warn("invalid arguments to leaky_add_leak()\n");
479 		return;
480 	}
481 
482 	nlkb = leaky_zalloc(LEAK_BUFCTL_SIZE(depth), UM_SLEEP);
483 	nlkb->lkb_type = type;
484 	nlkb->lkb_addr = addr;
485 	nlkb->lkb_bufaddr = bufaddr;
486 	nlkb->lkb_cid = cid;
487 	nlkb->lkb_data = data;
488 	nlkb->lkb_depth = depth;
489 	nlkb->lkb_timestamp = timestamp;
490 
491 	total = type;
492 	for (i = 0; i < depth; i++) {
493 		total += stack[i];
494 		nlkb->lkb_stack[i] = stack[i];
495 	}
496 
497 	ndx = total % LK_BUFCTLHSIZE;
498 
499 	if ((lkb = lk_bufctl[ndx]) == NULL) {
500 		lk_types[type].lt_leaks++;
501 		lk_bufctl[ndx] = nlkb;
502 		return;
503 	}
504 
505 	for (;;) {
506 		if (lkb->lkb_type != type || lkb->lkb_depth != depth ||
507 		    lkb->lkb_cid != cid)
508 			goto no_match;
509 
510 		for (i = 0; i < depth; i++)
511 			if (lkb->lkb_stack[i] != stack[i])
512 				goto no_match;
513 
514 		/*
515 		 * If we're here, we've found a matching stack; link it in.
516 		 * Note that the volatile cast assures that these stores
517 		 * will occur in program order (thus assuring that we can
518 		 * take an interrupt and still be in a sane enough state to
519 		 * throw away the data structure later, in leaky_cleanup()).
520 		 */
521 		((volatile leak_bufctl_t *)nlkb)->lkb_next = lkb->lkb_next;
522 		((volatile leak_bufctl_t *)lkb)->lkb_next = nlkb;
523 		lkb->lkb_dups++;
524 
525 		/*
526 		 * If we're older, swap places so that we are the
527 		 * representative leak.
528 		 */
529 		if (timestamp < lkb->lkb_timestamp) {
530 			nlkb->lkb_addr = lkb->lkb_addr;
531 			nlkb->lkb_bufaddr = lkb->lkb_bufaddr;
532 			nlkb->lkb_data = lkb->lkb_data;
533 			nlkb->lkb_timestamp = lkb->lkb_timestamp;
534 
535 			lkb->lkb_addr = addr;
536 			lkb->lkb_bufaddr = bufaddr;
537 			lkb->lkb_data = data;
538 			lkb->lkb_timestamp = timestamp;
539 		}
540 		break;
541 
542 no_match:
543 		if (lkb->lkb_hash_next == NULL) {
544 			lkb->lkb_hash_next = nlkb;
545 			lk_types[type].lt_leaks++;
546 			break;
547 		}
548 		lkb = lkb->lkb_hash_next;
549 	}
550 }
551 
552 int
553 leaky_ctlcmp(const void *l, const void *r)
554 {
555 	const leak_bufctl_t *lhs = *((const leak_bufctl_t **)l);
556 	const leak_bufctl_t *rhs = *((const leak_bufctl_t **)r);
557 
558 	return (leaky_subr_bufctl_cmp(lhs, rhs));
559 }
560 
561 void
562 leaky_sort(void)
563 {
564 	int type, i, j;
565 	leak_bufctl_t *lkb;
566 	leak_type_t *ltp;
567 
568 	for (type = 0; type < LK_NUM_TYPES; type++) {
569 		ltp = &lk_types[type];
570 
571 		if (ltp->lt_leaks == 0)
572 			continue;
573 
574 		ltp->lt_sorted = leaky_alloc(ltp->lt_leaks *
575 		    sizeof (leak_bufctl_t *), UM_SLEEP);
576 
577 		j = 0;
578 		for (i = 0; i < LK_BUFCTLHSIZE; i++) {
579 			for (lkb = lk_bufctl[i]; lkb != NULL;
580 			    lkb = lkb->lkb_hash_next) {
581 				if (lkb->lkb_type == type)
582 					ltp->lt_sorted[j++] = lkb;
583 			}
584 		}
585 		if (j != ltp->lt_leaks)
586 			mdb_warn("expected %d leaks, got %d\n", ltp->lt_leaks,
587 			    j);
588 
589 		qsort(ltp->lt_sorted, ltp->lt_leaks, sizeof (leak_bufctl_t *),
590 		    leaky_ctlcmp);
591 	}
592 }
593 
594 void
595 leaky_cleanup(int force)
596 {
597 	int i;
598 	leak_bufctl_t *lkb, *l, *next;
599 
600 	/*
601 	 * State structures are allocated UM_GC, so we just need to nuke
602 	 * the freelist pointer.
603 	 */
604 	lk_free_state = NULL;
605 
606 	if (lk_state == LK_CLEANING) {
607 		mdb_warn("interrupted during ::findleaks cleanup; some mdb "
608 		    "memory will be leaked\n");
609 
610 		for (i = 0; i < LK_BUFCTLHSIZE; i++)
611 			lk_bufctl[i] = NULL;
612 
613 		for (i = 0; i < LK_NUM_TYPES; i++) {
614 			lk_types[i].lt_leaks = 0;
615 			lk_types[i].lt_sorted = NULL;
616 		}
617 
618 		bzero(&lk_beans, sizeof (lk_beans));
619 		lk_state = LK_CLEAN;
620 		return;
621 	}
622 
623 	if (!force && lk_state != LK_SWEEPING)
624 		return;
625 
626 	lk_state = LK_CLEANING;
627 
628 	for (i = 0; i < LK_NUM_TYPES; i++) {
629 		if (lk_types[i].lt_sorted != NULL) {
630 			mdb_free(lk_types[i].lt_sorted,
631 			    lk_types[i].lt_leaks * sizeof (leak_bufctl_t *));
632 			lk_types[i].lt_sorted = NULL;
633 		}
634 		lk_types[i].lt_leaks = 0;
635 	}
636 
637 	for (i = 0; i < LK_BUFCTLHSIZE; i++) {
638 		for (lkb = lk_bufctl[i]; lkb != NULL; lkb = next) {
639 			for (l = lkb->lkb_next; l != NULL; l = next) {
640 				next = l->lkb_next;
641 				mdb_free(l, LEAK_BUFCTL_SIZE(l->lkb_depth));
642 			}
643 			next = lkb->lkb_hash_next;
644 			mdb_free(lkb, LEAK_BUFCTL_SIZE(lkb->lkb_depth));
645 		}
646 		lk_bufctl[i] = NULL;
647 	}
648 
649 	bzero(&lk_beans, sizeof (lk_beans));
650 	lk_state = LK_CLEAN;
651 }
652 
653 int
654 leaky_filter(const leak_pc_t *stack, int depth, uintptr_t filter)
655 {
656 	int i;
657 	GElf_Sym sym;
658 	char c;
659 
660 	if (filter == NULL)
661 		return (1);
662 
663 	for (i = 0; i < depth; i++) {
664 		if (stack[i] == filter)
665 			return (1);
666 
667 		if (mdb_lookup_by_addr(stack[i], MDB_SYM_FUZZY,
668 		    &c, sizeof (c), &sym) == -1)
669 			continue;
670 
671 		if ((uintptr_t)sym.st_value == filter)
672 			return (1);
673 	}
674 
675 	return (0);
676 }
677 
678 void
679 leaky_dump(uintptr_t filter, uint_t dump_verbose)
680 {
681 	int i;
682 	size_t leaks;
683 	leak_bufctl_t **sorted;
684 	leak_bufctl_t *lkb;
685 	int seen = 0;
686 
687 	for (i = 0; i < LK_NUM_TYPES; i++) {
688 		leaks = lk_types[i].lt_leaks;
689 		sorted = lk_types[i].lt_sorted;
690 
691 		leaky_subr_dump_start(i);
692 		while (leaks-- > 0) {
693 			lkb = *sorted++;
694 
695 			if (!leaky_filter(lkb->lkb_stack, lkb->lkb_depth,
696 			    filter))
697 				continue;
698 
699 			seen = 1;
700 			leaky_subr_dump(lkb, 0);
701 		}
702 		leaky_subr_dump_end(i);
703 	}
704 
705 	if (!seen) {
706 		if (filter != NULL)
707 			mdb_printf(
708 			    "findleaks: no memory leaks matching %a found\n",
709 			    filter);
710 		else
711 			mdb_printf(
712 			    "findleaks: no memory leaks detected\n");
713 	}
714 
715 	if (!dump_verbose || !seen)
716 		return;
717 
718 	mdb_printf("\n");
719 
720 	for (i = 0; i < LK_NUM_TYPES; i++) {
721 		leaks = lk_types[i].lt_leaks;
722 		sorted = lk_types[i].lt_sorted;
723 
724 		while (leaks-- > 0) {
725 			lkb = *sorted++;
726 
727 			if (!leaky_filter(lkb->lkb_stack, lkb->lkb_depth,
728 			    filter))
729 				continue;
730 
731 			leaky_subr_dump(lkb, 1);
732 		}
733 	}
734 }
735 
736 static const char *const findleaks_desc =
737 	"Does a conservative garbage collection of the heap in order to find\n"
738 	"potentially leaked buffers.  Similar leaks are coalesced by stack\n"
739 	"trace, with the oldest leak picked as representative.  The leak\n"
740 	"table is cached between invocations.\n"
741 	"\n"
742 	"addr, if provided, should be a function or PC location.  Reported\n"
743 	"leaks will then be limited to those with that function or PC in\n"
744 	"their stack trace.\n"
745 	"\n"
746 	"The 'leak' and 'leakbuf' walkers can be used to retrieve coalesced\n"
747 	"leaks.\n";
748 
749 static const char *const findleaks_args =
750 	"  -d    detail each representative leak (long)\n"
751 	"  -f    throw away cached state, and do a full run\n"
752 	"  -v    report verbose information about the findleaks run\n";
753 
754 void
755 findleaks_help(void)
756 {
757 	mdb_printf("%s\n", findleaks_desc);
758 	mdb_dec_indent(2);
759 	mdb_printf("%<b>OPTIONS%</b>\n");
760 	mdb_inc_indent(2);
761 	mdb_printf("%s", findleaks_args);
762 }
763 
764 #define	LK_REPORT_BEAN(x) leaky_verbose_perc(#x, lk_beans.lkb_##x, total);
765 
766 /*ARGSUSED*/
767 int
768 findleaks(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
769 {
770 	size_t est = 0;
771 	leak_ndx_t i;
772 	leak_mtab_t *lmp;
773 	ssize_t total;
774 	uintptr_t filter = NULL;
775 	uint_t dump = 0;
776 	uint_t force = 0;
777 	uint_t verbose = 0;
778 	int ret;
779 
780 	if (flags & DCMD_ADDRSPEC)
781 		filter = addr;
782 
783 	if (mdb_getopts(argc, argv,
784 	    'd', MDB_OPT_SETBITS, TRUE, &dump,
785 	    'f', MDB_OPT_SETBITS, TRUE, &force,
786 	    'v', MDB_OPT_SETBITS, TRUE, &verbose, NULL) != argc)
787 		return (DCMD_USAGE);
788 
789 	if (verbose || force)
790 		lk_verbose = verbose;
791 
792 	/*
793 	 * Clean any previous ::findleaks.
794 	 */
795 	leaky_cleanup(force);
796 
797 	if (lk_state == LK_DONE) {
798 		if (lk_verbose)
799 			mdb_printf("findleaks: using cached results "
800 			    "(-f will force a full run)\n");
801 		goto dump;
802 	}
803 
804 	leaky_verbose_begin();
805 
806 	if ((ret = leaky_subr_estimate(&est)) != DCMD_OK)
807 		return (ret);
808 
809 	leaky_verbose("maximum buffers", est);
810 
811 	/*
812 	 * Now we have an upper bound on the number of buffers.  Allocate
813 	 * our mtab array.
814 	 */
815 	lk_mtab = leaky_zalloc(est * sizeof (leak_mtab_t), UM_SLEEP | UM_GC);
816 	lmp = lk_mtab;
817 
818 	if ((ret = leaky_subr_fill(&lmp)) != DCMD_OK)
819 		return (ret);
820 
821 	lk_nbuffers = lmp - lk_mtab;
822 
823 	qsort(lk_mtab, lk_nbuffers, sizeof (leak_mtab_t), leaky_mtabcmp);
824 
825 	/*
826 	 * validate the mtab table now that it is sorted
827 	 */
828 	for (i = 0; i < lk_nbuffers; i++) {
829 		if (lk_mtab[i].lkm_base >= lk_mtab[i].lkm_limit) {
830 			mdb_warn("[%p, %p): invalid mtab\n",
831 			    lk_mtab[i].lkm_base, lk_mtab[i].lkm_limit);
832 			return (DCMD_ERR);
833 		}
834 
835 		if (i < lk_nbuffers - 1 &&
836 		    lk_mtab[i].lkm_limit > lk_mtab[i + 1].lkm_base) {
837 			mdb_warn("[%p, %p) and [%p, %p): overlapping mtabs\n",
838 			    lk_mtab[i].lkm_base, lk_mtab[i].lkm_limit,
839 			    lk_mtab[i + 1].lkm_base, lk_mtab[i + 1].lkm_limit);
840 			return (DCMD_ERR);
841 		}
842 	}
843 
844 	leaky_verbose("actual buffers", lk_nbuffers);
845 
846 	lk_scan_buffer = leaky_zalloc(LK_SCAN_BUFFER_SIZE, UM_SLEEP | UM_GC);
847 
848 	if ((ret = leaky_subr_run()) != DCMD_OK)
849 		return (ret);
850 
851 	lk_state = LK_SWEEPING;
852 
853 	for (i = 0; i < lk_nbuffers; i++) {
854 		if (LK_MARKED(lk_mtab[i].lkm_base))
855 			continue;
856 		leaky_subr_add_leak(&lk_mtab[i]);
857 	}
858 
859 	total = lk_beans.lkb_dismissals + lk_beans.lkb_misses +
860 	    lk_beans.lkb_dups + lk_beans.lkb_follows;
861 
862 	leaky_verbose(NULL, 0);
863 	leaky_verbose("potential pointers", total);
864 	LK_REPORT_BEAN(dismissals);
865 	LK_REPORT_BEAN(misses);
866 	LK_REPORT_BEAN(dups);
867 	LK_REPORT_BEAN(follows);
868 
869 	leaky_verbose(NULL, 0);
870 	leaky_verbose_end();
871 
872 	leaky_sort();
873 	lk_state = LK_DONE;
874 dump:
875 	leaky_dump(filter, dump);
876 
877 	return (DCMD_OK);
878 }
879 
880 int
881 leaky_walk_init(mdb_walk_state_t *wsp)
882 {
883 	leak_walk_t *lw;
884 	leak_bufctl_t *lkb, *cur;
885 
886 	uintptr_t addr;
887 	int i;
888 
889 	if (lk_state != LK_DONE) {
890 		mdb_warn("::findleaks must be run %sbefore leaks can be"
891 		    " walked\n", lk_state != LK_CLEAN ? "to completion " : "");
892 		return (WALK_ERR);
893 	}
894 
895 	if (wsp->walk_addr == NULL) {
896 		lkb = NULL;
897 		goto found;
898 	}
899 
900 	addr = wsp->walk_addr;
901 
902 	/*
903 	 * Search the representative leaks first, since that's what we
904 	 * report in the table.  If that fails, search everything.
905 	 *
906 	 * Note that we goto found with lkb as the head of desired dup list.
907 	 */
908 	for (i = 0; i < LK_BUFCTLHSIZE; i++) {
909 		for (lkb = lk_bufctl[i]; lkb != NULL; lkb = lkb->lkb_hash_next)
910 			if (lkb->lkb_addr == addr)
911 				goto found;
912 	}
913 
914 	for (i = 0; i < LK_BUFCTLHSIZE; i++) {
915 		for (lkb = lk_bufctl[i]; lkb != NULL; lkb = lkb->lkb_hash_next)
916 			for (cur = lkb; cur != NULL; cur = cur->lkb_next)
917 				if (cur->lkb_addr == addr)
918 					goto found;
919 	}
920 
921 	mdb_warn("%p is not a leaked ctl address\n", addr);
922 	return (WALK_ERR);
923 
924 found:
925 	wsp->walk_data = lw = mdb_zalloc(sizeof (*lw), UM_SLEEP);
926 	lw->lkw_ndx = 0;
927 	lw->lkw_current = lkb;
928 	lw->lkw_hash_next = NULL;
929 
930 	return (WALK_NEXT);
931 }
932 
933 leak_bufctl_t *
934 leaky_walk_step_common(mdb_walk_state_t *wsp)
935 {
936 	leak_walk_t *lw = wsp->walk_data;
937 	leak_bufctl_t *lk;
938 
939 	if ((lk = lw->lkw_current) == NULL) {
940 		if ((lk = lw->lkw_hash_next) == NULL) {
941 			if (wsp->walk_addr)
942 				return (NULL);
943 
944 			while (lk == NULL && lw->lkw_ndx < LK_BUFCTLHSIZE)
945 				lk = lk_bufctl[lw->lkw_ndx++];
946 
947 			if (lw->lkw_ndx == LK_BUFCTLHSIZE)
948 				return (NULL);
949 		}
950 
951 		lw->lkw_hash_next = lk->lkb_hash_next;
952 	}
953 
954 	lw->lkw_current = lk->lkb_next;
955 	return (lk);
956 }
957 
958 int
959 leaky_walk_step(mdb_walk_state_t *wsp)
960 {
961 	leak_bufctl_t *lk;
962 
963 	if ((lk = leaky_walk_step_common(wsp)) == NULL)
964 		return (WALK_DONE);
965 
966 	return (leaky_subr_invoke_callback(lk, wsp->walk_callback,
967 	    wsp->walk_cbdata));
968 }
969 
970 void
971 leaky_walk_fini(mdb_walk_state_t *wsp)
972 {
973 	leak_walk_t *lw = wsp->walk_data;
974 
975 	mdb_free(lw, sizeof (leak_walk_t));
976 }
977 
978 int
979 leaky_buf_walk_step(mdb_walk_state_t *wsp)
980 {
981 	leak_bufctl_t *lk;
982 
983 	if ((lk = leaky_walk_step_common(wsp)) == NULL)
984 		return (WALK_DONE);
985 
986 	return (wsp->walk_callback(lk->lkb_bufaddr, NULL, wsp->walk_cbdata));
987 }
988