xref: /illumos-gate/usr/src/cmd/mdb/common/modules/genunix/leaky.c (revision ed093b41a93e8563e6e1e5dae0768dda2a7bcc27)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * A generic memory leak detector.  The target interface, defined in
28  * <leaky_impl.h>, is implemented by the genunix and libumem dmods to fill
29  * in the details of operation.
30  */
31 
32 #include <mdb/mdb_modapi.h>
33 
34 #include "leaky.h"
35 #include "leaky_impl.h"
36 
37 #define	LK_BUFCTLHSIZE	127
38 
39 /*
40  * We re-use the low bit of the lkm_addr as the 'marked' bit.
41  */
42 #define	LK_MARKED(b)	((uintptr_t)(b) & 1)
43 #define	LK_MARK(b)	((b) |= 1)
44 #define	LK_ADDR(b)	((uintptr_t)(b) & ~1UL)
45 
46 /*
47  * Possible values for lk_state.
48  */
49 #define	LK_CLEAN	0	/* No outstanding mdb_alloc()'s */
50 #define	LK_SWEEPING	1	/* Potentially some outstanding mdb_alloc()'s */
51 #define	LK_DONE		2	/* All mdb_alloc()'s complete */
52 #define	LK_CLEANING	3	/* Currently cleaning prior mdb_alloc()'s */
53 
54 static volatile int lk_state;
55 
56 #define	LK_STATE_SIZE	10000	/* completely arbitrary */
57 
58 typedef int leak_ndx_t;		/* change if >2 billion buffers are needed */
59 
60 typedef struct leak_state {
61 	struct leak_state *lks_next;
62 	leak_ndx_t lks_stack[LK_STATE_SIZE];
63 } leak_state_t;
64 
65 typedef struct leak_beans {
66 	int lkb_dups;
67 	int lkb_follows;
68 	int lkb_misses;
69 	int lkb_dismissals;
70 	int lkb_pushes;
71 	int lkb_deepest;
72 } leak_beans_t;
73 
74 typedef struct leak_type {
75 	int		lt_type;
76 	size_t		lt_leaks;
77 	leak_bufctl_t	**lt_sorted;
78 } leak_type_t;
79 
80 typedef struct leak_walk {
81 	int lkw_ndx;
82 	leak_bufctl_t *lkw_current;
83 	leak_bufctl_t *lkw_hash_next;
84 } leak_walk_t;
85 
86 #define	LK_SCAN_BUFFER_SIZE	16384
87 static uintptr_t *lk_scan_buffer;
88 
89 static leak_mtab_t *lk_mtab;
90 static leak_state_t *lk_free_state;
91 static leak_ndx_t lk_nbuffers;
92 static leak_beans_t lk_beans;
93 static leak_bufctl_t *lk_bufctl[LK_BUFCTLHSIZE];
94 static leak_type_t lk_types[LK_NUM_TYPES];
95 static size_t lk_memusage;
96 #ifndef _KMDB
97 static hrtime_t lk_begin;
98 static hrtime_t lk_vbegin;
99 #endif
100 static uint_t lk_verbose = FALSE;
101 
102 static void
103 leaky_verbose(char *str, uint64_t stat)
104 {
105 	if (lk_verbose == FALSE)
106 		return;
107 
108 	mdb_printf("findleaks: ");
109 
110 	if (str == NULL) {
111 		mdb_printf("\n");
112 		return;
113 	}
114 
115 	mdb_printf("%*s => %lld\n", 30, str, stat);
116 }
117 
118 static void
119 leaky_verbose_perc(char *str, uint64_t stat, uint64_t total)
120 {
121 	uint_t perc = (stat * 100) / total;
122 	uint_t tenths = ((stat * 1000) / total) % 10;
123 
124 	if (lk_verbose == FALSE)
125 		return;
126 
127 	mdb_printf("findleaks: %*s => %-13lld (%2d.%1d%%)\n",
128 	    30, str, stat, perc, tenths);
129 }
130 
131 static void
132 leaky_verbose_begin(void)
133 {
134 	/* kmdb can't tell time */
135 #ifndef _KMDB
136 	extern hrtime_t gethrvtime(void);
137 	lk_begin = gethrtime();
138 	lk_vbegin = gethrvtime();
139 #endif
140 	lk_memusage = 0;
141 }
142 
143 static void
144 leaky_verbose_end(void)
145 {
146 	/* kmdb can't tell time */
147 #ifndef _KMDB
148 	extern hrtime_t gethrvtime(void);
149 
150 	hrtime_t ts = gethrtime() - lk_begin;
151 	hrtime_t sec = ts / (hrtime_t)NANOSEC;
152 	hrtime_t nsec = ts % (hrtime_t)NANOSEC;
153 
154 	hrtime_t vts = gethrvtime() - lk_vbegin;
155 	hrtime_t vsec = vts / (hrtime_t)NANOSEC;
156 	hrtime_t vnsec = vts % (hrtime_t)NANOSEC;
157 #endif
158 
159 	if (lk_verbose == FALSE)
160 		return;
161 
162 	mdb_printf("findleaks: %*s => %lu kB\n",
163 	    30, "peak memory usage", (lk_memusage + 1023)/1024);
164 #ifndef _KMDB
165 	mdb_printf("findleaks: %*s => %lld.%lld seconds\n",
166 	    30, "elapsed CPU time", vsec, (vnsec * 10)/(hrtime_t)NANOSEC);
167 	mdb_printf("findleaks: %*s => %lld.%lld seconds\n",
168 	    30, "elapsed wall time", sec, (nsec * 10)/(hrtime_t)NANOSEC);
169 #endif
170 	leaky_verbose(NULL, 0);
171 }
172 
173 static void *
174 leaky_alloc(size_t sz, uint_t flags)
175 {
176 	void *buf = mdb_alloc(sz, flags);
177 
178 	if (buf != NULL)
179 		lk_memusage += sz;
180 
181 	return (buf);
182 }
183 
184 static void *
185 leaky_zalloc(size_t sz, uint_t flags)
186 {
187 	void *buf = mdb_zalloc(sz, flags);
188 
189 	if (buf != NULL)
190 		lk_memusage += sz;
191 
192 	return (buf);
193 }
194 
195 static int
196 leaky_mtabcmp(const void *l, const void *r)
197 {
198 	const leak_mtab_t *lhs = (const leak_mtab_t *)l;
199 	const leak_mtab_t *rhs = (const leak_mtab_t *)r;
200 
201 	if (lhs->lkm_base < rhs->lkm_base)
202 		return (-1);
203 	if (lhs->lkm_base > rhs->lkm_base)
204 		return (1);
205 
206 	return (0);
207 }
208 
209 static leak_ndx_t
210 leaky_search(uintptr_t addr)
211 {
212 	leak_ndx_t left = 0, right = lk_nbuffers - 1, guess;
213 
214 	while (right >= left) {
215 		guess = (right + left) >> 1;
216 
217 		if (addr < LK_ADDR(lk_mtab[guess].lkm_base)) {
218 			right = guess - 1;
219 			continue;
220 		}
221 
222 		if (addr >= lk_mtab[guess].lkm_limit) {
223 			left = guess + 1;
224 			continue;
225 		}
226 
227 		return (guess);
228 	}
229 
230 	return (-1);
231 }
232 
233 void
234 leaky_grep(uintptr_t addr, size_t size)
235 {
236 	uintptr_t *buf, *cur, *end;
237 	size_t bytes, newsz, nptrs;
238 	leak_state_t *state = NULL, *new_state;
239 	uint_t state_idx;
240 	uintptr_t min = LK_ADDR(lk_mtab[0].lkm_base);
241 	uintptr_t max = lk_mtab[lk_nbuffers - 1].lkm_limit;
242 	int dups = 0, misses = 0, depth = 0, deepest = 0;
243 	int follows = 0, dismissals = 0, pushes = 0;
244 	leak_ndx_t mtab_ndx;
245 	leak_mtab_t *lmp;
246 	uintptr_t nbase;
247 	uintptr_t base;
248 	size_t base_size;
249 	const uintptr_t mask = sizeof (uintptr_t) - 1;
250 
251 	if (addr == 0 || size == 0)
252 		return;
253 
254 	state_idx = 0;
255 
256 	/*
257 	 * Our main loop, led by the 'pop' label:
258 	 *	1)  read in a buffer piece by piece,
259 	 *	2)  mark all unmarked mtab entries reachable from it, and
260 	 *	    either scan them in-line or push them onto our stack of
261 	 *	    unfinished work.
262 	 *	3)  pop the top mtab entry off the stack, and loop.
263 	 */
264 pop:
265 	base = addr;
266 	base_size = size;
267 
268 	/*
269 	 * If our address isn't pointer-aligned, we need to align it and
270 	 * whack the size appropriately.
271 	 */
272 	if (size < mask) {
273 		size = 0;
274 	} else if (addr & mask) {
275 		size -= (mask + 1) - (addr & mask);
276 		addr += (mask + 1) - (addr & mask);
277 	}
278 	size -= (size & mask);
279 
280 	while (size > 0) {
281 		buf = lk_scan_buffer;
282 		end = &buf[LK_SCAN_BUFFER_SIZE / sizeof (uintptr_t)];
283 
284 		bytes = MIN(size, LK_SCAN_BUFFER_SIZE);
285 		cur = end - (bytes / sizeof (uintptr_t));
286 
287 		if (mdb_vread(cur, bytes, addr) == -1) {
288 			mdb_warn("[%p, %p): couldn't read %ld bytes at %p",
289 			    base, base + base_size, bytes, addr);
290 			break;
291 		}
292 
293 		addr += bytes;
294 		size -= bytes;
295 
296 		/*
297 		 * The buffer looks like:  ('+'s are unscanned data)
298 		 *
299 		 * -----------------------------++++++++++++++++
300 		 * |				|		|
301 		 * buf				cur		end
302 		 *
303 		 * cur scans forward.  When we encounter a new buffer, and
304 		 * it will fit behind "cur", we read it in and back up cur,
305 		 * processing it immediately.
306 		 */
307 		while (cur < end) {
308 			uintptr_t ptr = *cur++;
309 
310 			if (ptr < min || ptr > max) {
311 				dismissals++;
312 				continue;
313 			}
314 
315 			if ((mtab_ndx = leaky_search(ptr)) == -1) {
316 				misses++;
317 				continue;
318 			}
319 
320 			lmp = &lk_mtab[mtab_ndx];
321 			if (LK_MARKED(lmp->lkm_base)) {
322 				dups++;			/* already seen */
323 				continue;
324 			}
325 
326 			/*
327 			 * Found an unmarked buffer.  Mark it, then either
328 			 * read it in, or add it to the stack of pending work.
329 			 */
330 			follows++;
331 			LK_MARK(lmp->lkm_base);
332 
333 			nbase = LK_ADDR(lmp->lkm_base);
334 			newsz = lmp->lkm_limit - nbase;
335 
336 			nptrs = newsz / sizeof (uintptr_t);
337 			newsz = nptrs * sizeof (uintptr_t);
338 
339 			if ((nbase & mask) == 0 && nptrs <= (cur - buf) &&
340 			    mdb_vread(cur - nptrs, newsz, nbase) != -1) {
341 				cur -= nptrs;
342 				continue;
343 			}
344 
345 			/*
346 			 * couldn't process it in-place -- add it to the
347 			 * stack.
348 			 */
349 			if (state == NULL || state_idx == LK_STATE_SIZE) {
350 				if ((new_state = lk_free_state) != NULL)
351 					lk_free_state = new_state->lks_next;
352 				else
353 					new_state = leaky_zalloc(
354 					    sizeof (*state), UM_SLEEP | UM_GC);
355 
356 				new_state->lks_next = state;
357 				state = new_state;
358 				state_idx = 0;
359 			}
360 
361 			pushes++;
362 			state->lks_stack[state_idx++] = mtab_ndx;
363 			if (++depth > deepest)
364 				deepest = depth;
365 		}
366 	}
367 
368 	/*
369 	 * Retrieve the next mtab index, extract its info, and loop around
370 	 * to process it.
371 	 */
372 	if (state_idx == 0 && state != NULL) {
373 		new_state = state->lks_next;
374 
375 		state->lks_next = lk_free_state;
376 		lk_free_state = state;
377 
378 		state = new_state;
379 		state_idx = LK_STATE_SIZE;
380 	}
381 
382 	if (depth > 0) {
383 		mtab_ndx = state->lks_stack[--state_idx];
384 
385 		addr = LK_ADDR(lk_mtab[mtab_ndx].lkm_base);
386 		size = lk_mtab[mtab_ndx].lkm_limit - addr;
387 		depth--;
388 
389 		goto pop;
390 	}
391 
392 	/*
393 	 * update the beans
394 	 */
395 	lk_beans.lkb_dups += dups;
396 	lk_beans.lkb_dismissals += dismissals;
397 	lk_beans.lkb_misses += misses;
398 	lk_beans.lkb_follows += follows;
399 	lk_beans.lkb_pushes += pushes;
400 
401 	if (deepest > lk_beans.lkb_deepest)
402 		lk_beans.lkb_deepest = deepest;
403 }
404 
405 static void
406 leaky_do_grep_ptr(uintptr_t loc, int process)
407 {
408 	leak_ndx_t ndx;
409 	leak_mtab_t *lkmp;
410 	size_t sz;
411 
412 	if (loc < LK_ADDR(lk_mtab[0].lkm_base) ||
413 	    loc > lk_mtab[lk_nbuffers - 1].lkm_limit) {
414 		lk_beans.lkb_dismissals++;
415 		return;
416 	}
417 	if ((ndx = leaky_search(loc)) == -1) {
418 		lk_beans.lkb_misses++;
419 		return;
420 	}
421 
422 	lkmp = &lk_mtab[ndx];
423 	sz = lkmp->lkm_limit - lkmp->lkm_base;
424 
425 	if (LK_MARKED(lkmp->lkm_base)) {
426 		lk_beans.lkb_dups++;
427 	} else {
428 		LK_MARK(lkmp->lkm_base);
429 		lk_beans.lkb_follows++;
430 		if (process)
431 			leaky_grep(lkmp->lkm_base, sz);
432 	}
433 }
434 
435 void
436 leaky_grep_ptr(uintptr_t loc)
437 {
438 	leaky_do_grep_ptr(loc, 1);
439 }
440 
441 void
442 leaky_mark_ptr(uintptr_t loc)
443 {
444 	leaky_do_grep_ptr(loc, 0);
445 }
446 
447 /*
448  * This may be used to manually process a marked buffer.
449  */
450 int
451 leaky_lookup_marked(uintptr_t loc, uintptr_t *addr_out, size_t *size_out)
452 {
453 	leak_ndx_t ndx;
454 	leak_mtab_t *lkmp;
455 
456 	if ((ndx = leaky_search(loc)) == -1)
457 		return (0);
458 
459 	lkmp = &lk_mtab[ndx];
460 	*addr_out = LK_ADDR(lkmp->lkm_base);
461 	*size_out = lkmp->lkm_limit - LK_ADDR(lkmp->lkm_base);
462 	return (1);
463 }
464 
465 void
466 leaky_add_leak(int type, uintptr_t addr, uintptr_t bufaddr, hrtime_t timestamp,
467     leak_pc_t *stack, uint_t depth, uintptr_t cid, uintptr_t data)
468 {
469 	leak_bufctl_t *nlkb, *lkb;
470 	uintptr_t total = 0;
471 	size_t ndx;
472 	int i;
473 
474 	if (type < 0 || type >= LK_NUM_TYPES || depth != (uint8_t)depth) {
475 		mdb_warn("invalid arguments to leaky_add_leak()\n");
476 		return;
477 	}
478 
479 	nlkb = leaky_zalloc(LEAK_BUFCTL_SIZE(depth), UM_SLEEP);
480 	nlkb->lkb_type = type;
481 	nlkb->lkb_addr = addr;
482 	nlkb->lkb_bufaddr = bufaddr;
483 	nlkb->lkb_cid = cid;
484 	nlkb->lkb_data = data;
485 	nlkb->lkb_depth = depth;
486 	nlkb->lkb_timestamp = timestamp;
487 
488 	total = type;
489 	for (i = 0; i < depth; i++) {
490 		total += stack[i];
491 		nlkb->lkb_stack[i] = stack[i];
492 	}
493 
494 	ndx = total % LK_BUFCTLHSIZE;
495 
496 	if ((lkb = lk_bufctl[ndx]) == NULL) {
497 		lk_types[type].lt_leaks++;
498 		lk_bufctl[ndx] = nlkb;
499 		return;
500 	}
501 
502 	for (;;) {
503 		if (lkb->lkb_type != type || lkb->lkb_depth != depth ||
504 		    lkb->lkb_cid != cid)
505 			goto no_match;
506 
507 		for (i = 0; i < depth; i++)
508 			if (lkb->lkb_stack[i] != stack[i])
509 				goto no_match;
510 
511 		/*
512 		 * If we're here, we've found a matching stack; link it in.
513 		 * Note that the volatile cast assures that these stores
514 		 * will occur in program order (thus assuring that we can
515 		 * take an interrupt and still be in a sane enough state to
516 		 * throw away the data structure later, in leaky_cleanup()).
517 		 */
518 		((volatile leak_bufctl_t *)nlkb)->lkb_next = lkb->lkb_next;
519 		((volatile leak_bufctl_t *)lkb)->lkb_next = nlkb;
520 		lkb->lkb_dups++;
521 
522 		/*
523 		 * If we're older, swap places so that we are the
524 		 * representative leak.
525 		 */
526 		if (timestamp < lkb->lkb_timestamp) {
527 			nlkb->lkb_addr = lkb->lkb_addr;
528 			nlkb->lkb_bufaddr = lkb->lkb_bufaddr;
529 			nlkb->lkb_data = lkb->lkb_data;
530 			nlkb->lkb_timestamp = lkb->lkb_timestamp;
531 
532 			lkb->lkb_addr = addr;
533 			lkb->lkb_bufaddr = bufaddr;
534 			lkb->lkb_data = data;
535 			lkb->lkb_timestamp = timestamp;
536 		}
537 		break;
538 
539 no_match:
540 		if (lkb->lkb_hash_next == NULL) {
541 			lkb->lkb_hash_next = nlkb;
542 			lk_types[type].lt_leaks++;
543 			break;
544 		}
545 		lkb = lkb->lkb_hash_next;
546 	}
547 }
548 
549 int
550 leaky_ctlcmp(const void *l, const void *r)
551 {
552 	const leak_bufctl_t *lhs = *((const leak_bufctl_t **)l);
553 	const leak_bufctl_t *rhs = *((const leak_bufctl_t **)r);
554 
555 	return (leaky_subr_bufctl_cmp(lhs, rhs));
556 }
557 
558 void
559 leaky_sort(void)
560 {
561 	int type, i, j;
562 	leak_bufctl_t *lkb;
563 	leak_type_t *ltp;
564 
565 	for (type = 0; type < LK_NUM_TYPES; type++) {
566 		ltp = &lk_types[type];
567 
568 		if (ltp->lt_leaks == 0)
569 			continue;
570 
571 		ltp->lt_sorted = leaky_alloc(ltp->lt_leaks *
572 		    sizeof (leak_bufctl_t *), UM_SLEEP);
573 
574 		j = 0;
575 		for (i = 0; i < LK_BUFCTLHSIZE; i++) {
576 			for (lkb = lk_bufctl[i]; lkb != NULL;
577 			    lkb = lkb->lkb_hash_next) {
578 				if (lkb->lkb_type == type)
579 					ltp->lt_sorted[j++] = lkb;
580 			}
581 		}
582 		if (j != ltp->lt_leaks)
583 			mdb_warn("expected %d leaks, got %d\n", ltp->lt_leaks,
584 			    j);
585 
586 		qsort(ltp->lt_sorted, ltp->lt_leaks, sizeof (leak_bufctl_t *),
587 		    leaky_ctlcmp);
588 	}
589 }
590 
591 void
592 leaky_cleanup(int force)
593 {
594 	int i;
595 	leak_bufctl_t *lkb, *l, *next;
596 
597 	/*
598 	 * State structures are allocated UM_GC, so we just need to nuke
599 	 * the freelist pointer.
600 	 */
601 	lk_free_state = NULL;
602 
603 	switch (lk_state) {
604 	case LK_CLEAN:
605 		return;		/* nothing to do */
606 
607 	case LK_CLEANING:
608 		mdb_warn("interrupted during ::findleaks cleanup; some mdb "
609 		    "memory will be leaked\n");
610 
611 		for (i = 0; i < LK_BUFCTLHSIZE; i++)
612 			lk_bufctl[i] = NULL;
613 
614 		for (i = 0; i < LK_NUM_TYPES; i++) {
615 			lk_types[i].lt_leaks = 0;
616 			lk_types[i].lt_sorted = NULL;
617 		}
618 
619 		bzero(&lk_beans, sizeof (lk_beans));
620 		lk_state = LK_CLEAN;
621 		return;
622 
623 	case LK_SWEEPING:
624 		break;		/* must clean up */
625 
626 	case LK_DONE:
627 	default:
628 		if (!force)
629 			return;
630 		break;		/* only clean up if forced */
631 	}
632 
633 	lk_state = LK_CLEANING;
634 
635 	for (i = 0; i < LK_NUM_TYPES; i++) {
636 		if (lk_types[i].lt_sorted != NULL) {
637 			mdb_free(lk_types[i].lt_sorted,
638 			    lk_types[i].lt_leaks * sizeof (leak_bufctl_t *));
639 			lk_types[i].lt_sorted = NULL;
640 		}
641 		lk_types[i].lt_leaks = 0;
642 	}
643 
644 	for (i = 0; i < LK_BUFCTLHSIZE; i++) {
645 		for (lkb = lk_bufctl[i]; lkb != NULL; lkb = next) {
646 			for (l = lkb->lkb_next; l != NULL; l = next) {
647 				next = l->lkb_next;
648 				mdb_free(l, LEAK_BUFCTL_SIZE(l->lkb_depth));
649 			}
650 			next = lkb->lkb_hash_next;
651 			mdb_free(lkb, LEAK_BUFCTL_SIZE(lkb->lkb_depth));
652 		}
653 		lk_bufctl[i] = NULL;
654 	}
655 
656 	bzero(&lk_beans, sizeof (lk_beans));
657 	lk_state = LK_CLEAN;
658 }
659 
660 int
661 leaky_filter(const leak_pc_t *stack, int depth, uintptr_t filter)
662 {
663 	int i;
664 	GElf_Sym sym;
665 	char c;
666 
667 	if (filter == 0)
668 		return (1);
669 
670 	for (i = 0; i < depth; i++) {
671 		if (stack[i] == filter)
672 			return (1);
673 
674 		if (mdb_lookup_by_addr(stack[i], MDB_SYM_FUZZY,
675 		    &c, sizeof (c), &sym) == -1)
676 			continue;
677 
678 		if ((uintptr_t)sym.st_value == filter)
679 			return (1);
680 	}
681 
682 	return (0);
683 }
684 
685 void
686 leaky_dump(uintptr_t filter, uint_t dump_verbose)
687 {
688 	int i;
689 	size_t leaks;
690 	leak_bufctl_t **sorted;
691 	leak_bufctl_t *lkb;
692 	int seen = 0;
693 
694 	for (i = 0; i < LK_NUM_TYPES; i++) {
695 		leaks = lk_types[i].lt_leaks;
696 		sorted = lk_types[i].lt_sorted;
697 
698 		leaky_subr_dump_start(i);
699 		while (leaks-- > 0) {
700 			lkb = *sorted++;
701 
702 			if (!leaky_filter(lkb->lkb_stack, lkb->lkb_depth,
703 			    filter))
704 				continue;
705 
706 			seen = 1;
707 			leaky_subr_dump(lkb, 0);
708 		}
709 		leaky_subr_dump_end(i);
710 	}
711 
712 	if (!seen) {
713 		if (filter != 0)
714 			mdb_printf(
715 			    "findleaks: no memory leaks matching %a found\n",
716 			    filter);
717 		else
718 			mdb_printf(
719 			    "findleaks: no memory leaks detected\n");
720 	}
721 
722 	if (!dump_verbose || !seen)
723 		return;
724 
725 	mdb_printf("\n");
726 
727 	for (i = 0; i < LK_NUM_TYPES; i++) {
728 		leaks = lk_types[i].lt_leaks;
729 		sorted = lk_types[i].lt_sorted;
730 
731 		while (leaks-- > 0) {
732 			lkb = *sorted++;
733 
734 			if (!leaky_filter(lkb->lkb_stack, lkb->lkb_depth,
735 			    filter))
736 				continue;
737 
738 			leaky_subr_dump(lkb, 1);
739 		}
740 	}
741 }
742 
743 static const char *const findleaks_desc =
744 	"Does a conservative garbage collection of the heap in order to find\n"
745 	"potentially leaked buffers.  Similar leaks are coalesced by stack\n"
746 	"trace, with the oldest leak picked as representative.  The leak\n"
747 	"table is cached between invocations.\n"
748 	"\n"
749 	"addr, if provided, should be a function or PC location.  Reported\n"
750 	"leaks will then be limited to those with that function or PC in\n"
751 	"their stack trace.\n"
752 	"\n"
753 	"The 'leak' and 'leakbuf' walkers can be used to retrieve coalesced\n"
754 	"leaks.\n";
755 
756 static const char *const findleaks_args =
757 	"  -d    detail each representative leak (long)\n"
758 	"  -f    throw away cached state, and do a full run\n"
759 	"  -v    report verbose information about the findleaks run\n";
760 
761 void
762 findleaks_help(void)
763 {
764 	mdb_printf("%s\n", findleaks_desc);
765 	mdb_dec_indent(2);
766 	mdb_printf("%<b>OPTIONS%</b>\n");
767 	mdb_inc_indent(2);
768 	mdb_printf("%s", findleaks_args);
769 }
770 
771 #define	LK_REPORT_BEAN(x) leaky_verbose_perc(#x, lk_beans.lkb_##x, total);
772 
773 /*ARGSUSED*/
774 int
775 findleaks(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
776 {
777 	size_t est = 0;
778 	leak_ndx_t i;
779 	leak_mtab_t *lmp;
780 	ssize_t total;
781 	uintptr_t filter = 0;
782 	uint_t dump = 0;
783 	uint_t force = 0;
784 	uint_t verbose = 0;
785 	int ret;
786 
787 	if (flags & DCMD_ADDRSPEC)
788 		filter = addr;
789 
790 	if (mdb_getopts(argc, argv,
791 	    'd', MDB_OPT_SETBITS, TRUE, &dump,
792 	    'f', MDB_OPT_SETBITS, TRUE, &force,
793 	    'v', MDB_OPT_SETBITS, TRUE, &verbose, NULL) != argc)
794 		return (DCMD_USAGE);
795 
796 	if (verbose || force)
797 		lk_verbose = verbose;
798 
799 	/*
800 	 * Clean any previous ::findleaks.
801 	 */
802 	leaky_cleanup(force);
803 
804 	if (lk_state == LK_DONE) {
805 		if (lk_verbose)
806 			mdb_printf("findleaks: using cached results "
807 			    "(use '-f' to force a full run)\n");
808 		goto dump;
809 	}
810 
811 	leaky_verbose_begin();
812 
813 	if ((ret = leaky_subr_estimate(&est)) != DCMD_OK)
814 		return (ret);
815 
816 	leaky_verbose("maximum buffers", est);
817 
818 	/*
819 	 * Now we have an upper bound on the number of buffers.  Allocate
820 	 * our mtab array.
821 	 */
822 	lk_mtab = leaky_zalloc(est * sizeof (leak_mtab_t), UM_SLEEP | UM_GC);
823 	lmp = lk_mtab;
824 
825 	if ((ret = leaky_subr_fill(&lmp)) != DCMD_OK)
826 		return (ret);
827 
828 	lk_nbuffers = lmp - lk_mtab;
829 
830 	qsort(lk_mtab, lk_nbuffers, sizeof (leak_mtab_t), leaky_mtabcmp);
831 
832 	/*
833 	 * validate the mtab table now that it is sorted
834 	 */
835 	for (i = 0; i < lk_nbuffers; i++) {
836 		if (lk_mtab[i].lkm_base >= lk_mtab[i].lkm_limit) {
837 			mdb_warn("[%p, %p): invalid mtab\n",
838 			    lk_mtab[i].lkm_base, lk_mtab[i].lkm_limit);
839 			return (DCMD_ERR);
840 		}
841 
842 		if (i < lk_nbuffers - 1 &&
843 		    lk_mtab[i].lkm_limit > lk_mtab[i + 1].lkm_base) {
844 			mdb_warn("[%p, %p) and [%p, %p): overlapping mtabs\n",
845 			    lk_mtab[i].lkm_base, lk_mtab[i].lkm_limit,
846 			    lk_mtab[i + 1].lkm_base, lk_mtab[i + 1].lkm_limit);
847 			return (DCMD_ERR);
848 		}
849 	}
850 
851 	leaky_verbose("actual buffers", lk_nbuffers);
852 
853 	lk_scan_buffer = leaky_zalloc(LK_SCAN_BUFFER_SIZE, UM_SLEEP | UM_GC);
854 
855 	if ((ret = leaky_subr_run()) != DCMD_OK)
856 		return (ret);
857 
858 	lk_state = LK_SWEEPING;
859 
860 	for (i = 0; i < lk_nbuffers; i++) {
861 		if (LK_MARKED(lk_mtab[i].lkm_base))
862 			continue;
863 		leaky_subr_add_leak(&lk_mtab[i]);
864 	}
865 
866 	total = lk_beans.lkb_dismissals + lk_beans.lkb_misses +
867 	    lk_beans.lkb_dups + lk_beans.lkb_follows;
868 
869 	leaky_verbose(NULL, 0);
870 	leaky_verbose("potential pointers", total);
871 	LK_REPORT_BEAN(dismissals);
872 	LK_REPORT_BEAN(misses);
873 	LK_REPORT_BEAN(dups);
874 	LK_REPORT_BEAN(follows);
875 
876 	leaky_verbose(NULL, 0);
877 	leaky_verbose_end();
878 
879 	leaky_sort();
880 	lk_state = LK_DONE;
881 dump:
882 	leaky_dump(filter, dump);
883 
884 	return (DCMD_OK);
885 }
886 
887 int
888 leaky_walk_init(mdb_walk_state_t *wsp)
889 {
890 	leak_walk_t *lw;
891 	leak_bufctl_t *lkb, *cur;
892 
893 	uintptr_t addr;
894 	int i;
895 
896 	if (lk_state != LK_DONE) {
897 		mdb_warn("::findleaks must be run %sbefore leaks can be"
898 		    " walked\n", lk_state != LK_CLEAN ? "to completion " : "");
899 		return (WALK_ERR);
900 	}
901 
902 	if (wsp->walk_addr == 0) {
903 		lkb = NULL;
904 		goto found;
905 	}
906 
907 	addr = wsp->walk_addr;
908 
909 	/*
910 	 * Search the representative leaks first, since that's what we
911 	 * report in the table.  If that fails, search everything.
912 	 *
913 	 * Note that we goto found with lkb as the head of desired dup list.
914 	 */
915 	for (i = 0; i < LK_BUFCTLHSIZE; i++) {
916 		for (lkb = lk_bufctl[i]; lkb != NULL; lkb = lkb->lkb_hash_next)
917 			if (lkb->lkb_addr == addr)
918 				goto found;
919 	}
920 
921 	for (i = 0; i < LK_BUFCTLHSIZE; i++) {
922 		for (lkb = lk_bufctl[i]; lkb != NULL; lkb = lkb->lkb_hash_next)
923 			for (cur = lkb; cur != NULL; cur = cur->lkb_next)
924 				if (cur->lkb_addr == addr)
925 					goto found;
926 	}
927 
928 	mdb_warn("%p is not a leaked ctl address\n", addr);
929 	return (WALK_ERR);
930 
931 found:
932 	wsp->walk_data = lw = mdb_zalloc(sizeof (*lw), UM_SLEEP);
933 	lw->lkw_ndx = 0;
934 	lw->lkw_current = lkb;
935 	lw->lkw_hash_next = NULL;
936 
937 	return (WALK_NEXT);
938 }
939 
940 leak_bufctl_t *
941 leaky_walk_step_common(mdb_walk_state_t *wsp)
942 {
943 	leak_walk_t *lw = wsp->walk_data;
944 	leak_bufctl_t *lk;
945 
946 	if ((lk = lw->lkw_current) == NULL) {
947 		if ((lk = lw->lkw_hash_next) == NULL) {
948 			if (wsp->walk_addr)
949 				return (NULL);
950 
951 			while (lk == NULL && lw->lkw_ndx < LK_BUFCTLHSIZE)
952 				lk = lk_bufctl[lw->lkw_ndx++];
953 
954 			if (lw->lkw_ndx == LK_BUFCTLHSIZE)
955 				return (NULL);
956 		}
957 
958 		lw->lkw_hash_next = lk->lkb_hash_next;
959 	}
960 
961 	lw->lkw_current = lk->lkb_next;
962 	return (lk);
963 }
964 
965 int
966 leaky_walk_step(mdb_walk_state_t *wsp)
967 {
968 	leak_bufctl_t *lk;
969 
970 	if ((lk = leaky_walk_step_common(wsp)) == NULL)
971 		return (WALK_DONE);
972 
973 	return (leaky_subr_invoke_callback(lk, wsp->walk_callback,
974 	    wsp->walk_cbdata));
975 }
976 
977 void
978 leaky_walk_fini(mdb_walk_state_t *wsp)
979 {
980 	leak_walk_t *lw = wsp->walk_data;
981 
982 	mdb_free(lw, sizeof (leak_walk_t));
983 }
984 
985 int
986 leaky_buf_walk_step(mdb_walk_state_t *wsp)
987 {
988 	leak_bufctl_t *lk;
989 
990 	if ((lk = leaky_walk_step_common(wsp)) == NULL)
991 		return (WALK_DONE);
992 
993 	return (wsp->walk_callback(lk->lkb_bufaddr, NULL, wsp->walk_cbdata));
994 }
995