xref: /titanic_50/usr/src/cmd/mdb/common/modules/genunix/leaky.c (revision 7a286c471efbab8562f7655a82931904703fffe0)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * A generic memory leak detector.  The target interface, defined in
30  * <leaky_impl.h>, is implemented by the genunix and libumem dmods to fill
31  * in the details of operation.
32  */
33 
34 #include <mdb/mdb_modapi.h>
35 
36 #include "leaky.h"
37 #include "leaky_impl.h"
38 
39 #define	LK_BUFCTLHSIZE	127
40 
41 /*
42  * We re-use the low bit of the lkm_addr as the 'marked' bit.
43  */
44 #define	LK_MARKED(b)	((uintptr_t)(b) & 1)
45 #define	LK_MARK(b)	((b) |= 1)
46 #define	LK_ADDR(b)	((uintptr_t)(b) & ~1UL)
47 
48 /*
49  * Possible values for lk_state.
50  */
51 #define	LK_CLEAN	0	/* No outstanding mdb_alloc()'s */
52 #define	LK_SWEEPING	1	/* Potentially some outstanding mdb_alloc()'s */
53 #define	LK_DONE		2	/* All mdb_alloc()'s complete */
54 #define	LK_CLEANING	3	/* Currently cleaning prior mdb_alloc()'s */
55 
56 static volatile int lk_state;
57 
58 #define	LK_STATE_SIZE	10000	/* completely arbitrary */
59 
60 typedef int leak_ndx_t;		/* change if >2 billion buffers are needed */
61 
62 typedef struct leak_state {
63 	struct leak_state *lks_next;
64 	leak_ndx_t lks_stack[LK_STATE_SIZE];
65 } leak_state_t;
66 
67 typedef struct leak_beans {
68 	int lkb_dups;
69 	int lkb_follows;
70 	int lkb_misses;
71 	int lkb_dismissals;
72 	int lkb_pushes;
73 	int lkb_deepest;
74 } leak_beans_t;
75 
76 typedef struct leak_type {
77 	int		lt_type;
78 	size_t		lt_leaks;
79 	leak_bufctl_t	**lt_sorted;
80 } leak_type_t;
81 
82 typedef struct leak_walk {
83 	int lkw_ndx;
84 	leak_bufctl_t *lkw_current;
85 	leak_bufctl_t *lkw_hash_next;
86 } leak_walk_t;
87 
88 #define	LK_SCAN_BUFFER_SIZE	16384
89 static uintptr_t *lk_scan_buffer;
90 
91 static leak_mtab_t *lk_mtab;
92 static leak_state_t *lk_free_state;
93 static leak_ndx_t lk_nbuffers;
94 static leak_beans_t lk_beans;
95 static leak_bufctl_t *lk_bufctl[LK_BUFCTLHSIZE];
96 static leak_type_t lk_types[LK_NUM_TYPES];
97 static size_t lk_memusage;
98 #ifndef _KMDB
99 static hrtime_t lk_begin;
100 static hrtime_t lk_vbegin;
101 #endif
102 static uint_t lk_verbose = FALSE;
103 
104 static void
105 leaky_verbose(char *str, uint64_t stat)
106 {
107 	if (lk_verbose == FALSE)
108 		return;
109 
110 	mdb_printf("findleaks: ");
111 
112 	if (str == NULL) {
113 		mdb_printf("\n");
114 		return;
115 	}
116 
117 	mdb_printf("%*s => %lld\n", 30, str, stat);
118 }
119 
120 static void
121 leaky_verbose_perc(char *str, uint64_t stat, uint64_t total)
122 {
123 	uint_t perc = (stat * 100) / total;
124 	uint_t tenths = ((stat * 1000) / total) % 10;
125 
126 	if (lk_verbose == FALSE)
127 		return;
128 
129 	mdb_printf("findleaks: %*s => %-13lld (%2d.%1d%%)\n",
130 	    30, str, stat, perc, tenths);
131 }
132 
133 static void
134 leaky_verbose_begin(void)
135 {
136 	/* kmdb can't tell time */
137 #ifndef _KMDB
138 	extern hrtime_t gethrvtime(void);
139 	lk_begin = gethrtime();
140 	lk_vbegin = gethrvtime();
141 #endif
142 	lk_memusage = 0;
143 }
144 
145 static void
146 leaky_verbose_end(void)
147 {
148 	/* kmdb can't tell time */
149 #ifndef _KMDB
150 	extern hrtime_t gethrvtime(void);
151 
152 	hrtime_t ts = gethrtime() - lk_begin;
153 	hrtime_t sec = ts / (hrtime_t)NANOSEC;
154 	hrtime_t nsec = ts % (hrtime_t)NANOSEC;
155 
156 	hrtime_t vts = gethrvtime() - lk_vbegin;
157 	hrtime_t vsec = vts / (hrtime_t)NANOSEC;
158 	hrtime_t vnsec = vts % (hrtime_t)NANOSEC;
159 #endif
160 
161 	if (lk_verbose == FALSE)
162 		return;
163 
164 	mdb_printf("findleaks: %*s => %lu kB\n",
165 	    30, "peak memory usage", (lk_memusage + 1023)/1024);
166 #ifndef _KMDB
167 	mdb_printf("findleaks: %*s => %lld.%lld seconds\n",
168 	    30, "elapsed CPU time", vsec, (vnsec * 10)/(hrtime_t)NANOSEC);
169 	mdb_printf("findleaks: %*s => %lld.%lld seconds\n",
170 	    30, "elapsed wall time", sec, (nsec * 10)/(hrtime_t)NANOSEC);
171 #endif
172 	leaky_verbose(NULL, 0);
173 }
174 
175 static void *
176 leaky_alloc(size_t sz, uint_t flags)
177 {
178 	void *buf = mdb_alloc(sz, flags);
179 
180 	if (buf != NULL)
181 		lk_memusage += sz;
182 
183 	return (buf);
184 }
185 
186 static void *
187 leaky_zalloc(size_t sz, uint_t flags)
188 {
189 	void *buf = mdb_zalloc(sz, flags);
190 
191 	if (buf != NULL)
192 		lk_memusage += sz;
193 
194 	return (buf);
195 }
196 
197 static int
198 leaky_mtabcmp(const void *l, const void *r)
199 {
200 	const leak_mtab_t *lhs = (const leak_mtab_t *)l;
201 	const leak_mtab_t *rhs = (const leak_mtab_t *)r;
202 
203 	if (lhs->lkm_base < rhs->lkm_base)
204 		return (-1);
205 	if (lhs->lkm_base > rhs->lkm_base)
206 		return (1);
207 
208 	return (0);
209 }
210 
211 static leak_ndx_t
212 leaky_search(uintptr_t addr)
213 {
214 	leak_ndx_t left = 0, right = lk_nbuffers - 1, guess;
215 
216 	while (right >= left) {
217 		guess = (right + left) >> 1;
218 
219 		if (addr < LK_ADDR(lk_mtab[guess].lkm_base)) {
220 			right = guess - 1;
221 			continue;
222 		}
223 
224 		if (addr >= lk_mtab[guess].lkm_limit) {
225 			left = guess + 1;
226 			continue;
227 		}
228 
229 		return (guess);
230 	}
231 
232 	return (-1);
233 }
234 
235 void
236 leaky_grep(uintptr_t addr, size_t size)
237 {
238 	uintptr_t *buf, *cur, *end;
239 	size_t bytes, newsz, nptrs;
240 	leak_state_t *state = NULL, *new_state;
241 	uint_t state_idx;
242 	uintptr_t min = LK_ADDR(lk_mtab[0].lkm_base);
243 	uintptr_t max = lk_mtab[lk_nbuffers - 1].lkm_limit;
244 	int dups = 0, misses = 0, depth = 0, deepest = 0;
245 	int follows = 0, dismissals = 0, pushes = 0;
246 	leak_ndx_t mtab_ndx;
247 	leak_mtab_t *lmp;
248 	uintptr_t nbase;
249 	uintptr_t base;
250 	size_t base_size;
251 	const uintptr_t mask = sizeof (uintptr_t) - 1;
252 
253 	if (addr == NULL || size == 0)
254 		return;
255 
256 	state_idx = 0;
257 
258 	/*
259 	 * Our main loop, led by the 'pop' label:
260 	 *	1)  read in a buffer piece by piece,
261 	 *	2)  mark all unmarked mtab entries reachable from it, and
262 	 *	    either scan them in-line or push them onto our stack of
263 	 *	    unfinished work.
264 	 *	3)  pop the top mtab entry off the stack, and loop.
265 	 */
266 pop:
267 	base = addr;
268 	base_size = size;
269 
270 	/*
271 	 * If our address isn't pointer-aligned, we need to align it and
272 	 * whack the size appropriately.
273 	 */
274 	if (size < mask) {
275 		size = 0;
276 	} else if (addr & mask) {
277 		size -= (mask + 1) - (addr & mask);
278 		addr += (mask + 1) - (addr & mask);
279 	}
280 	size -= (size & mask);
281 
282 	while (size > 0) {
283 		buf = lk_scan_buffer;
284 		end = &buf[LK_SCAN_BUFFER_SIZE / sizeof (uintptr_t)];
285 
286 		bytes = MIN(size, LK_SCAN_BUFFER_SIZE);
287 		cur = end - (bytes / sizeof (uintptr_t));
288 
289 		if (mdb_vread(cur, bytes, addr) == -1) {
290 			mdb_warn("[%p, %p): couldn't read %ld bytes at %p",
291 			    base, base + base_size, bytes, addr);
292 			break;
293 		}
294 
295 		addr += bytes;
296 		size -= bytes;
297 
298 		/*
299 		 * The buffer looks like:  ('+'s are unscanned data)
300 		 *
301 		 * -----------------------------++++++++++++++++
302 		 * |				|		|
303 		 * buf				cur		end
304 		 *
305 		 * cur scans forward.  When we encounter a new buffer, and
306 		 * it will fit behind "cur", we read it in and back up cur,
307 		 * processing it immediately.
308 		 */
309 		while (cur < end) {
310 			uintptr_t ptr = *cur++;
311 
312 			if (ptr < min || ptr > max) {
313 				dismissals++;
314 				continue;
315 			}
316 
317 			if ((mtab_ndx = leaky_search(ptr)) == -1) {
318 				misses++;
319 				continue;
320 			}
321 
322 			lmp = &lk_mtab[mtab_ndx];
323 			if (LK_MARKED(lmp->lkm_base)) {
324 				dups++;			/* already seen */
325 				continue;
326 			}
327 
328 			/*
329 			 * Found an unmarked buffer.  Mark it, then either
330 			 * read it in, or add it to the stack of pending work.
331 			 */
332 			follows++;
333 			LK_MARK(lmp->lkm_base);
334 
335 			nbase = LK_ADDR(lmp->lkm_base);
336 			newsz = lmp->lkm_limit - nbase;
337 
338 			nptrs = newsz / sizeof (uintptr_t);
339 			newsz = nptrs * sizeof (uintptr_t);
340 
341 			if ((nbase & mask) == 0 && nptrs <= (cur - buf) &&
342 			    mdb_vread(cur - nptrs, newsz, nbase) != -1) {
343 				cur -= nptrs;
344 				continue;
345 			}
346 
347 			/*
348 			 * couldn't process it in-place -- add it to the
349 			 * stack.
350 			 */
351 			if (state == NULL || state_idx == LK_STATE_SIZE) {
352 				if ((new_state = lk_free_state) != NULL)
353 					lk_free_state = new_state->lks_next;
354 				else
355 					new_state = leaky_zalloc(
356 					    sizeof (*state), UM_SLEEP | UM_GC);
357 
358 				new_state->lks_next = state;
359 				state = new_state;
360 				state_idx = 0;
361 			}
362 
363 			pushes++;
364 			state->lks_stack[state_idx++] = mtab_ndx;
365 			if (++depth > deepest)
366 				deepest = depth;
367 		}
368 	}
369 
370 	/*
371 	 * Retrieve the next mtab index, extract its info, and loop around
372 	 * to process it.
373 	 */
374 	if (state_idx == 0 && state != NULL) {
375 		new_state = state->lks_next;
376 
377 		state->lks_next = lk_free_state;
378 		lk_free_state = state;
379 
380 		state = new_state;
381 		state_idx = LK_STATE_SIZE;
382 	}
383 
384 	if (depth > 0) {
385 		mtab_ndx = state->lks_stack[--state_idx];
386 
387 		addr = LK_ADDR(lk_mtab[mtab_ndx].lkm_base);
388 		size = lk_mtab[mtab_ndx].lkm_limit - addr;
389 		depth--;
390 
391 		goto pop;
392 	}
393 
394 	/*
395 	 * update the beans
396 	 */
397 	lk_beans.lkb_dups += dups;
398 	lk_beans.lkb_dismissals += dismissals;
399 	lk_beans.lkb_misses += misses;
400 	lk_beans.lkb_follows += follows;
401 	lk_beans.lkb_pushes += pushes;
402 
403 	if (deepest > lk_beans.lkb_deepest)
404 		lk_beans.lkb_deepest = deepest;
405 }
406 
407 static void
408 leaky_do_grep_ptr(uintptr_t loc, int process)
409 {
410 	leak_ndx_t ndx;
411 	leak_mtab_t *lkmp;
412 	size_t sz;
413 
414 	if (loc < LK_ADDR(lk_mtab[0].lkm_base) ||
415 	    loc > lk_mtab[lk_nbuffers - 1].lkm_limit) {
416 		lk_beans.lkb_dismissals++;
417 		return;
418 	}
419 	if ((ndx = leaky_search(loc)) == -1) {
420 		lk_beans.lkb_misses++;
421 		return;
422 	}
423 
424 	lkmp = &lk_mtab[ndx];
425 	sz = lkmp->lkm_limit - lkmp->lkm_base;
426 
427 	if (LK_MARKED(lkmp->lkm_base)) {
428 		lk_beans.lkb_dups++;
429 	} else {
430 		LK_MARK(lkmp->lkm_base);
431 		lk_beans.lkb_follows++;
432 		if (process)
433 			leaky_grep(lkmp->lkm_base, sz);
434 	}
435 }
436 
437 void
438 leaky_grep_ptr(uintptr_t loc)
439 {
440 	leaky_do_grep_ptr(loc, 1);
441 }
442 
443 void
444 leaky_mark_ptr(uintptr_t loc)
445 {
446 	leaky_do_grep_ptr(loc, 0);
447 }
448 
449 /*
450  * This may be used to manually process a marked buffer.
451  */
452 int
453 leaky_lookup_marked(uintptr_t loc, uintptr_t *addr_out, size_t *size_out)
454 {
455 	leak_ndx_t ndx;
456 	leak_mtab_t *lkmp;
457 
458 	if ((ndx = leaky_search(loc)) == -1)
459 		return (0);
460 
461 	lkmp = &lk_mtab[ndx];
462 	*addr_out = LK_ADDR(lkmp->lkm_base);
463 	*size_out = lkmp->lkm_limit - LK_ADDR(lkmp->lkm_base);
464 	return (1);
465 }
466 
467 void
468 leaky_add_leak(int type, uintptr_t addr, uintptr_t bufaddr, hrtime_t timestamp,
469     leak_pc_t *stack, uint_t depth, uintptr_t cid, uintptr_t data)
470 {
471 	leak_bufctl_t *nlkb, *lkb;
472 	uintptr_t total = 0;
473 	size_t ndx;
474 	int i;
475 
476 	if (type < 0 || type >= LK_NUM_TYPES || depth != (uint8_t)depth) {
477 		mdb_warn("invalid arguments to leaky_add_leak()\n");
478 		return;
479 	}
480 
481 	nlkb = leaky_zalloc(LEAK_BUFCTL_SIZE(depth), UM_SLEEP);
482 	nlkb->lkb_type = type;
483 	nlkb->lkb_addr = addr;
484 	nlkb->lkb_bufaddr = bufaddr;
485 	nlkb->lkb_cid = cid;
486 	nlkb->lkb_data = data;
487 	nlkb->lkb_depth = depth;
488 	nlkb->lkb_timestamp = timestamp;
489 
490 	total = type;
491 	for (i = 0; i < depth; i++) {
492 		total += stack[i];
493 		nlkb->lkb_stack[i] = stack[i];
494 	}
495 
496 	ndx = total % LK_BUFCTLHSIZE;
497 
498 	if ((lkb = lk_bufctl[ndx]) == NULL) {
499 		lk_types[type].lt_leaks++;
500 		lk_bufctl[ndx] = nlkb;
501 		return;
502 	}
503 
504 	for (;;) {
505 		if (lkb->lkb_type != type || lkb->lkb_depth != depth ||
506 		    lkb->lkb_cid != cid)
507 			goto no_match;
508 
509 		for (i = 0; i < depth; i++)
510 			if (lkb->lkb_stack[i] != stack[i])
511 				goto no_match;
512 
513 		/*
514 		 * If we're here, we've found a matching stack; link it in.
515 		 * Note that the volatile cast assures that these stores
516 		 * will occur in program order (thus assuring that we can
517 		 * take an interrupt and still be in a sane enough state to
518 		 * throw away the data structure later, in leaky_cleanup()).
519 		 */
520 		((volatile leak_bufctl_t *)nlkb)->lkb_next = lkb->lkb_next;
521 		((volatile leak_bufctl_t *)lkb)->lkb_next = nlkb;
522 		lkb->lkb_dups++;
523 
524 		/*
525 		 * If we're older, swap places so that we are the
526 		 * representative leak.
527 		 */
528 		if (timestamp < lkb->lkb_timestamp) {
529 			nlkb->lkb_addr = lkb->lkb_addr;
530 			nlkb->lkb_bufaddr = lkb->lkb_bufaddr;
531 			nlkb->lkb_data = lkb->lkb_data;
532 			nlkb->lkb_timestamp = lkb->lkb_timestamp;
533 
534 			lkb->lkb_addr = addr;
535 			lkb->lkb_bufaddr = bufaddr;
536 			lkb->lkb_data = data;
537 			lkb->lkb_timestamp = timestamp;
538 		}
539 		break;
540 
541 no_match:
542 		if (lkb->lkb_hash_next == NULL) {
543 			lkb->lkb_hash_next = nlkb;
544 			lk_types[type].lt_leaks++;
545 			break;
546 		}
547 		lkb = lkb->lkb_hash_next;
548 	}
549 }
550 
551 int
552 leaky_ctlcmp(const void *l, const void *r)
553 {
554 	const leak_bufctl_t *lhs = *((const leak_bufctl_t **)l);
555 	const leak_bufctl_t *rhs = *((const leak_bufctl_t **)r);
556 
557 	return (leaky_subr_bufctl_cmp(lhs, rhs));
558 }
559 
560 void
561 leaky_sort(void)
562 {
563 	int type, i, j;
564 	leak_bufctl_t *lkb;
565 	leak_type_t *ltp;
566 
567 	for (type = 0; type < LK_NUM_TYPES; type++) {
568 		ltp = &lk_types[type];
569 
570 		if (ltp->lt_leaks == 0)
571 			continue;
572 
573 		ltp->lt_sorted = leaky_alloc(ltp->lt_leaks *
574 		    sizeof (leak_bufctl_t *), UM_SLEEP);
575 
576 		j = 0;
577 		for (i = 0; i < LK_BUFCTLHSIZE; i++) {
578 			for (lkb = lk_bufctl[i]; lkb != NULL;
579 			    lkb = lkb->lkb_hash_next) {
580 				if (lkb->lkb_type == type)
581 					ltp->lt_sorted[j++] = lkb;
582 			}
583 		}
584 		if (j != ltp->lt_leaks)
585 			mdb_warn("expected %d leaks, got %d\n", ltp->lt_leaks,
586 			    j);
587 
588 		qsort(ltp->lt_sorted, ltp->lt_leaks, sizeof (leak_bufctl_t *),
589 		    leaky_ctlcmp);
590 	}
591 }
592 
593 void
594 leaky_cleanup(int force)
595 {
596 	int i;
597 	leak_bufctl_t *lkb, *l, *next;
598 
599 	/*
600 	 * State structures are allocated UM_GC, so we just need to nuke
601 	 * the freelist pointer.
602 	 */
603 	lk_free_state = NULL;
604 
605 	switch (lk_state) {
606 	case LK_CLEAN:
607 		return;		/* nothing to do */
608 
609 	case LK_CLEANING:
610 		mdb_warn("interrupted during ::findleaks cleanup; some mdb "
611 		    "memory will be leaked\n");
612 
613 		for (i = 0; i < LK_BUFCTLHSIZE; i++)
614 			lk_bufctl[i] = NULL;
615 
616 		for (i = 0; i < LK_NUM_TYPES; i++) {
617 			lk_types[i].lt_leaks = 0;
618 			lk_types[i].lt_sorted = NULL;
619 		}
620 
621 		bzero(&lk_beans, sizeof (lk_beans));
622 		lk_state = LK_CLEAN;
623 		return;
624 
625 	case LK_SWEEPING:
626 		break;		/* must clean up */
627 
628 	case LK_DONE:
629 	default:
630 		if (!force)
631 			return;
632 		break;		/* only clean up if forced */
633 	}
634 
635 	lk_state = LK_CLEANING;
636 
637 	for (i = 0; i < LK_NUM_TYPES; i++) {
638 		if (lk_types[i].lt_sorted != NULL) {
639 			mdb_free(lk_types[i].lt_sorted,
640 			    lk_types[i].lt_leaks * sizeof (leak_bufctl_t *));
641 			lk_types[i].lt_sorted = NULL;
642 		}
643 		lk_types[i].lt_leaks = 0;
644 	}
645 
646 	for (i = 0; i < LK_BUFCTLHSIZE; i++) {
647 		for (lkb = lk_bufctl[i]; lkb != NULL; lkb = next) {
648 			for (l = lkb->lkb_next; l != NULL; l = next) {
649 				next = l->lkb_next;
650 				mdb_free(l, LEAK_BUFCTL_SIZE(l->lkb_depth));
651 			}
652 			next = lkb->lkb_hash_next;
653 			mdb_free(lkb, LEAK_BUFCTL_SIZE(lkb->lkb_depth));
654 		}
655 		lk_bufctl[i] = NULL;
656 	}
657 
658 	bzero(&lk_beans, sizeof (lk_beans));
659 	lk_state = LK_CLEAN;
660 }
661 
662 int
663 leaky_filter(const leak_pc_t *stack, int depth, uintptr_t filter)
664 {
665 	int i;
666 	GElf_Sym sym;
667 	char c;
668 
669 	if (filter == NULL)
670 		return (1);
671 
672 	for (i = 0; i < depth; i++) {
673 		if (stack[i] == filter)
674 			return (1);
675 
676 		if (mdb_lookup_by_addr(stack[i], MDB_SYM_FUZZY,
677 		    &c, sizeof (c), &sym) == -1)
678 			continue;
679 
680 		if ((uintptr_t)sym.st_value == filter)
681 			return (1);
682 	}
683 
684 	return (0);
685 }
686 
687 void
688 leaky_dump(uintptr_t filter, uint_t dump_verbose)
689 {
690 	int i;
691 	size_t leaks;
692 	leak_bufctl_t **sorted;
693 	leak_bufctl_t *lkb;
694 	int seen = 0;
695 
696 	for (i = 0; i < LK_NUM_TYPES; i++) {
697 		leaks = lk_types[i].lt_leaks;
698 		sorted = lk_types[i].lt_sorted;
699 
700 		leaky_subr_dump_start(i);
701 		while (leaks-- > 0) {
702 			lkb = *sorted++;
703 
704 			if (!leaky_filter(lkb->lkb_stack, lkb->lkb_depth,
705 			    filter))
706 				continue;
707 
708 			seen = 1;
709 			leaky_subr_dump(lkb, 0);
710 		}
711 		leaky_subr_dump_end(i);
712 	}
713 
714 	if (!seen) {
715 		if (filter != NULL)
716 			mdb_printf(
717 			    "findleaks: no memory leaks matching %a found\n",
718 			    filter);
719 		else
720 			mdb_printf(
721 			    "findleaks: no memory leaks detected\n");
722 	}
723 
724 	if (!dump_verbose || !seen)
725 		return;
726 
727 	mdb_printf("\n");
728 
729 	for (i = 0; i < LK_NUM_TYPES; i++) {
730 		leaks = lk_types[i].lt_leaks;
731 		sorted = lk_types[i].lt_sorted;
732 
733 		while (leaks-- > 0) {
734 			lkb = *sorted++;
735 
736 			if (!leaky_filter(lkb->lkb_stack, lkb->lkb_depth,
737 			    filter))
738 				continue;
739 
740 			leaky_subr_dump(lkb, 1);
741 		}
742 	}
743 }
744 
745 static const char *const findleaks_desc =
746 	"Does a conservative garbage collection of the heap in order to find\n"
747 	"potentially leaked buffers.  Similar leaks are coalesced by stack\n"
748 	"trace, with the oldest leak picked as representative.  The leak\n"
749 	"table is cached between invocations.\n"
750 	"\n"
751 	"addr, if provided, should be a function or PC location.  Reported\n"
752 	"leaks will then be limited to those with that function or PC in\n"
753 	"their stack trace.\n"
754 	"\n"
755 	"The 'leak' and 'leakbuf' walkers can be used to retrieve coalesced\n"
756 	"leaks.\n";
757 
758 static const char *const findleaks_args =
759 	"  -d    detail each representative leak (long)\n"
760 	"  -f    throw away cached state, and do a full run\n"
761 	"  -v    report verbose information about the findleaks run\n";
762 
763 void
764 findleaks_help(void)
765 {
766 	mdb_printf("%s\n", findleaks_desc);
767 	mdb_dec_indent(2);
768 	mdb_printf("%<b>OPTIONS%</b>\n");
769 	mdb_inc_indent(2);
770 	mdb_printf("%s", findleaks_args);
771 }
772 
773 #define	LK_REPORT_BEAN(x) leaky_verbose_perc(#x, lk_beans.lkb_##x, total);
774 
775 /*ARGSUSED*/
776 int
777 findleaks(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
778 {
779 	size_t est = 0;
780 	leak_ndx_t i;
781 	leak_mtab_t *lmp;
782 	ssize_t total;
783 	uintptr_t filter = NULL;
784 	uint_t dump = 0;
785 	uint_t force = 0;
786 	uint_t verbose = 0;
787 	int ret;
788 
789 	if (flags & DCMD_ADDRSPEC)
790 		filter = addr;
791 
792 	if (mdb_getopts(argc, argv,
793 	    'd', MDB_OPT_SETBITS, TRUE, &dump,
794 	    'f', MDB_OPT_SETBITS, TRUE, &force,
795 	    'v', MDB_OPT_SETBITS, TRUE, &verbose, NULL) != argc)
796 		return (DCMD_USAGE);
797 
798 	if (verbose || force)
799 		lk_verbose = verbose;
800 
801 	/*
802 	 * Clean any previous ::findleaks.
803 	 */
804 	leaky_cleanup(force);
805 
806 	if (lk_state == LK_DONE) {
807 		if (lk_verbose)
808 			mdb_printf("findleaks: using cached results "
809 			    "(use '-f' to force a full run)\n");
810 		goto dump;
811 	}
812 
813 	leaky_verbose_begin();
814 
815 	if ((ret = leaky_subr_estimate(&est)) != DCMD_OK)
816 		return (ret);
817 
818 	leaky_verbose("maximum buffers", est);
819 
820 	/*
821 	 * Now we have an upper bound on the number of buffers.  Allocate
822 	 * our mtab array.
823 	 */
824 	lk_mtab = leaky_zalloc(est * sizeof (leak_mtab_t), UM_SLEEP | UM_GC);
825 	lmp = lk_mtab;
826 
827 	if ((ret = leaky_subr_fill(&lmp)) != DCMD_OK)
828 		return (ret);
829 
830 	lk_nbuffers = lmp - lk_mtab;
831 
832 	qsort(lk_mtab, lk_nbuffers, sizeof (leak_mtab_t), leaky_mtabcmp);
833 
834 	/*
835 	 * validate the mtab table now that it is sorted
836 	 */
837 	for (i = 0; i < lk_nbuffers; i++) {
838 		if (lk_mtab[i].lkm_base >= lk_mtab[i].lkm_limit) {
839 			mdb_warn("[%p, %p): invalid mtab\n",
840 			    lk_mtab[i].lkm_base, lk_mtab[i].lkm_limit);
841 			return (DCMD_ERR);
842 		}
843 
844 		if (i < lk_nbuffers - 1 &&
845 		    lk_mtab[i].lkm_limit > lk_mtab[i + 1].lkm_base) {
846 			mdb_warn("[%p, %p) and [%p, %p): overlapping mtabs\n",
847 			    lk_mtab[i].lkm_base, lk_mtab[i].lkm_limit,
848 			    lk_mtab[i + 1].lkm_base, lk_mtab[i + 1].lkm_limit);
849 			return (DCMD_ERR);
850 		}
851 	}
852 
853 	leaky_verbose("actual buffers", lk_nbuffers);
854 
855 	lk_scan_buffer = leaky_zalloc(LK_SCAN_BUFFER_SIZE, UM_SLEEP | UM_GC);
856 
857 	if ((ret = leaky_subr_run()) != DCMD_OK)
858 		return (ret);
859 
860 	lk_state = LK_SWEEPING;
861 
862 	for (i = 0; i < lk_nbuffers; i++) {
863 		if (LK_MARKED(lk_mtab[i].lkm_base))
864 			continue;
865 		leaky_subr_add_leak(&lk_mtab[i]);
866 	}
867 
868 	total = lk_beans.lkb_dismissals + lk_beans.lkb_misses +
869 	    lk_beans.lkb_dups + lk_beans.lkb_follows;
870 
871 	leaky_verbose(NULL, 0);
872 	leaky_verbose("potential pointers", total);
873 	LK_REPORT_BEAN(dismissals);
874 	LK_REPORT_BEAN(misses);
875 	LK_REPORT_BEAN(dups);
876 	LK_REPORT_BEAN(follows);
877 
878 	leaky_verbose(NULL, 0);
879 	leaky_verbose_end();
880 
881 	leaky_sort();
882 	lk_state = LK_DONE;
883 dump:
884 	leaky_dump(filter, dump);
885 
886 	return (DCMD_OK);
887 }
888 
889 int
890 leaky_walk_init(mdb_walk_state_t *wsp)
891 {
892 	leak_walk_t *lw;
893 	leak_bufctl_t *lkb, *cur;
894 
895 	uintptr_t addr;
896 	int i;
897 
898 	if (lk_state != LK_DONE) {
899 		mdb_warn("::findleaks must be run %sbefore leaks can be"
900 		    " walked\n", lk_state != LK_CLEAN ? "to completion " : "");
901 		return (WALK_ERR);
902 	}
903 
904 	if (wsp->walk_addr == NULL) {
905 		lkb = NULL;
906 		goto found;
907 	}
908 
909 	addr = wsp->walk_addr;
910 
911 	/*
912 	 * Search the representative leaks first, since that's what we
913 	 * report in the table.  If that fails, search everything.
914 	 *
915 	 * Note that we goto found with lkb as the head of desired dup list.
916 	 */
917 	for (i = 0; i < LK_BUFCTLHSIZE; i++) {
918 		for (lkb = lk_bufctl[i]; lkb != NULL; lkb = lkb->lkb_hash_next)
919 			if (lkb->lkb_addr == addr)
920 				goto found;
921 	}
922 
923 	for (i = 0; i < LK_BUFCTLHSIZE; i++) {
924 		for (lkb = lk_bufctl[i]; lkb != NULL; lkb = lkb->lkb_hash_next)
925 			for (cur = lkb; cur != NULL; cur = cur->lkb_next)
926 				if (cur->lkb_addr == addr)
927 					goto found;
928 	}
929 
930 	mdb_warn("%p is not a leaked ctl address\n", addr);
931 	return (WALK_ERR);
932 
933 found:
934 	wsp->walk_data = lw = mdb_zalloc(sizeof (*lw), UM_SLEEP);
935 	lw->lkw_ndx = 0;
936 	lw->lkw_current = lkb;
937 	lw->lkw_hash_next = NULL;
938 
939 	return (WALK_NEXT);
940 }
941 
942 leak_bufctl_t *
943 leaky_walk_step_common(mdb_walk_state_t *wsp)
944 {
945 	leak_walk_t *lw = wsp->walk_data;
946 	leak_bufctl_t *lk;
947 
948 	if ((lk = lw->lkw_current) == NULL) {
949 		if ((lk = lw->lkw_hash_next) == NULL) {
950 			if (wsp->walk_addr)
951 				return (NULL);
952 
953 			while (lk == NULL && lw->lkw_ndx < LK_BUFCTLHSIZE)
954 				lk = lk_bufctl[lw->lkw_ndx++];
955 
956 			if (lw->lkw_ndx == LK_BUFCTLHSIZE)
957 				return (NULL);
958 		}
959 
960 		lw->lkw_hash_next = lk->lkb_hash_next;
961 	}
962 
963 	lw->lkw_current = lk->lkb_next;
964 	return (lk);
965 }
966 
967 int
968 leaky_walk_step(mdb_walk_state_t *wsp)
969 {
970 	leak_bufctl_t *lk;
971 
972 	if ((lk = leaky_walk_step_common(wsp)) == NULL)
973 		return (WALK_DONE);
974 
975 	return (leaky_subr_invoke_callback(lk, wsp->walk_callback,
976 	    wsp->walk_cbdata));
977 }
978 
979 void
980 leaky_walk_fini(mdb_walk_state_t *wsp)
981 {
982 	leak_walk_t *lw = wsp->walk_data;
983 
984 	mdb_free(lw, sizeof (leak_walk_t));
985 }
986 
987 int
988 leaky_buf_walk_step(mdb_walk_state_t *wsp)
989 {
990 	leak_bufctl_t *lk;
991 
992 	if ((lk = leaky_walk_step_common(wsp)) == NULL)
993 		return (WALK_DONE);
994 
995 	return (wsp->walk_callback(lk->lkb_bufaddr, NULL, wsp->walk_cbdata));
996 }
997