xref: /illumos-gate/usr/src/cmd/mdb/common/modules/libumem/leaky_subr.c (revision 4de2612967d06c4fdbf524a62556a1e8118a006f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "umem.h"
30 
31 #include <sys/vmem_impl_user.h>
32 #include <umem_impl.h>
33 
34 #include <alloca.h>
35 #include <libproc.h>
36 #include <stdio.h>
37 #include <string.h>
38 #include <sys/stack.h>
39 
40 #include "leaky_impl.h"
41 #include "misc.h"
42 #include "proc_kludges.h"
43 
44 #include "umem_pagesize.h"
45 
46 /*
47  * This file defines the libumem target for ../genunix/leaky.c.
48  *
49  * See ../genunix/leaky_impl.h for the target interface definition.
50  */
51 
52 /*
53  * leaky_subr_dump_start()/_end() depend on the ordering of TYPE_VMEM,
54  * TYPE_MMAP and TYPE_SBRK.
55  */
56 #define	TYPE_MMAP	0		/* lkb_data is the size */
57 #define	TYPE_SBRK	1		/* lkb_data is the size */
58 #define	TYPE_VMEM	2		/* lkb_data is the vmem_seg's size */
59 #define	TYPE_CACHE	3		/* lkb_cid is the bufctl's cache */
60 #define	TYPE_UMEM	4		/* lkb_cid is the bufctl's cache */
61 
62 #define	LKM_CTL_BUFCTL	0	/* normal allocation, PTR is bufctl */
63 #define	LKM_CTL_VMSEG	1	/* oversize allocation, PTR is vmem_seg_t */
64 #define	LKM_CTL_MEMORY	2	/* non-umem mmap or brk, PTR is region start */
65 #define	LKM_CTL_CACHE	3	/* normal alloc, non-debug, PTR is cache */
66 #define	LKM_CTL_MASK	3L
67 
68 /*
69  * create a lkm_bufctl from a pointer and a type
70  */
71 #define	LKM_CTL(ptr, type)	(LKM_CTLPTR(ptr) | (type))
72 #define	LKM_CTLPTR(ctl)		((uintptr_t)(ctl) & ~(LKM_CTL_MASK))
73 #define	LKM_CTLTYPE(ctl)	((uintptr_t)(ctl) &  (LKM_CTL_MASK))
74 
75 static uintptr_t leak_brkbase;
76 static uintptr_t leak_brksize;
77 
78 #define	LEAKY_INBRK(ptr) \
79 	(((uintptr_t)(ptr) - leak_brkbase) < leak_brksize)
80 
81 typedef struct leaky_seg_info {
82 	uintptr_t ls_start;
83 	uintptr_t ls_end;
84 } leaky_seg_info_t;
85 
86 typedef struct leaky_maps {
87 	leaky_seg_info_t	*lm_segs;
88 	uintptr_t		lm_seg_count;
89 	uintptr_t		lm_seg_max;
90 
91 	pstatus_t		*lm_pstatus;
92 
93 	leak_mtab_t		**lm_lmp;
94 } leaky_maps_t;
95 
96 /*ARGSUSED*/
97 static int
98 leaky_mtab(uintptr_t addr, const umem_bufctl_audit_t *bcp, leak_mtab_t **lmp)
99 {
100 	leak_mtab_t *lm = (*lmp)++;
101 
102 	lm->lkm_base = (uintptr_t)bcp->bc_addr;
103 	lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_BUFCTL);
104 
105 	return (WALK_NEXT);
106 }
107 
108 /*ARGSUSED*/
109 static int
110 leaky_mtab_addr(uintptr_t addr, void *ignored, leak_mtab_t **lmp)
111 {
112 	leak_mtab_t *lm = (*lmp)++;
113 
114 	lm->lkm_base = addr;
115 
116 	return (WALK_NEXT);
117 }
118 
119 static int
120 leaky_seg(uintptr_t addr, const vmem_seg_t *seg, leak_mtab_t **lmp)
121 {
122 	leak_mtab_t *lm = (*lmp)++;
123 
124 	lm->lkm_base = seg->vs_start;
125 	lm->lkm_limit = seg->vs_end;
126 	lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_VMSEG);
127 	return (WALK_NEXT);
128 }
129 
130 static int
131 leaky_vmem(uintptr_t addr, const vmem_t *vmem, leak_mtab_t **lmp)
132 {
133 	if (strcmp(vmem->vm_name, "umem_oversize") != 0 &&
134 	    strcmp(vmem->vm_name, "umem_memalign") != 0)
135 		return (WALK_NEXT);
136 
137 	if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_seg, lmp, addr) == -1)
138 		mdb_warn("can't walk vmem_alloc for %s (%p)", vmem->vm_name,
139 		    addr);
140 
141 	return (WALK_NEXT);
142 }
143 
144 /*ARGSUSED*/
145 static int
146 leaky_estimate_vmem(uintptr_t addr, const vmem_t *vmem, size_t *est)
147 {
148 	if (strcmp(vmem->vm_name, "umem_oversize") != 0 &&
149 	    strcmp(vmem->vm_name, "umem_memalign") != 0)
150 		return (WALK_NEXT);
151 
152 	*est += (int)(vmem->vm_kstat.vk_alloc - vmem->vm_kstat.vk_free);
153 
154 	return (WALK_NEXT);
155 }
156 
157 static int
158 leaky_seg_cmp(const void *l, const void *r)
159 {
160 	const leaky_seg_info_t *lhs = (const leaky_seg_info_t *)l;
161 	const leaky_seg_info_t *rhs = (const leaky_seg_info_t *)r;
162 
163 	if (lhs->ls_start < rhs->ls_start)
164 		return (-1);
165 	if (lhs->ls_start > rhs->ls_start)
166 		return (1);
167 
168 	return (0);
169 }
170 
171 static ssize_t
172 leaky_seg_search(uintptr_t addr, leaky_seg_info_t *listp, unsigned count)
173 {
174 	ssize_t left = 0, right = count - 1, guess;
175 
176 	while (right >= left) {
177 		guess = (right + left) >> 1;
178 
179 		if (addr < listp[guess].ls_start) {
180 			right = guess - 1;
181 			continue;
182 		}
183 
184 		if (addr >= listp[guess].ls_end) {
185 			left = guess + 1;
186 			continue;
187 		}
188 
189 		return (guess);
190 	}
191 
192 	return (-1);
193 }
194 
195 /*ARGSUSED*/
196 static int
197 leaky_count(uintptr_t addr, void *unused, size_t *total)
198 {
199 	++*total;
200 
201 	return (WALK_NEXT);
202 }
203 
204 /*ARGSUSED*/
205 static int
206 leaky_read_segs(uintptr_t addr, const vmem_seg_t *seg, leaky_maps_t *lmp)
207 {
208 	leaky_seg_info_t *my_si = lmp->lm_segs + lmp->lm_seg_count;
209 
210 	if (seg->vs_start == seg->vs_end && seg->vs_start == 0)
211 		return (WALK_NEXT);
212 
213 	if (lmp->lm_seg_count++ >= lmp->lm_seg_max)
214 		return (WALK_ERR);
215 
216 	my_si->ls_start = seg->vs_start;
217 	my_si->ls_end = seg->vs_end;
218 
219 	return (WALK_NEXT);
220 }
221 
222 /* ARGSUSED */
223 static int
224 leaky_process_anon_mappings(uintptr_t ignored, const prmap_t *pmp,
225     leaky_maps_t *lmp)
226 {
227 	uintptr_t start = pmp->pr_vaddr;
228 	uintptr_t end = pmp->pr_vaddr + pmp->pr_size;
229 
230 	leak_mtab_t *lm;
231 	pstatus_t *Psp = lmp->lm_pstatus;
232 
233 	uintptr_t brk_start = Psp->pr_brkbase;
234 	uintptr_t brk_end = Psp->pr_brkbase + Psp->pr_brksize;
235 
236 	int has_brk = 0;
237 	int in_vmem = 0;
238 
239 	/*
240 	 * This checks if there is any overlap between the segment and the brk.
241 	 */
242 	if (end > brk_start && start < brk_end)
243 		has_brk = 1;
244 
245 	if (leaky_seg_search(start, lmp->lm_segs, lmp->lm_seg_count) != -1)
246 		in_vmem = 1;
247 
248 	/*
249 	 * We only want anonymous, mmaped memory.  That means:
250 	 *
251 	 * 1. Must be read-write
252 	 * 2. Cannot be shared
253 	 * 3. Cannot have backing
254 	 * 4. Cannot be in the brk
255 	 * 5. Cannot be part of the vmem heap.
256 	 */
257 	if ((pmp->pr_mflags & (MA_READ | MA_WRITE)) == (MA_READ | MA_WRITE) &&
258 	    (pmp->pr_mflags & MA_SHARED) == 0 &&
259 	    (pmp->pr_mapname[0] == 0) &&
260 	    !has_brk &&
261 	    !in_vmem) {
262 		dprintf(("mmaped region: [%p, %p)\n", start, end));
263 		lm = (*lmp->lm_lmp)++;
264 		lm->lkm_base = start;
265 		lm->lkm_limit = end;
266 		lm->lkm_bufctl = LKM_CTL(pmp->pr_vaddr, LKM_CTL_MEMORY);
267 	}
268 
269 	return (WALK_NEXT);
270 }
271 
272 static void
273 leaky_handle_sbrk(leaky_maps_t *lmp)
274 {
275 	uintptr_t brkbase = lmp->lm_pstatus->pr_brkbase;
276 	uintptr_t brkend = brkbase + lmp->lm_pstatus->pr_brksize;
277 
278 	leak_mtab_t *lm;
279 
280 	leaky_seg_info_t *segs = lmp->lm_segs;
281 
282 	int x, first = -1, last = -1;
283 
284 	dprintf(("brk: [%p, %p)\n", brkbase, brkend));
285 
286 	for (x = 0; x < lmp->lm_seg_count; x++) {
287 		if (segs[x].ls_start >= brkbase && segs[x].ls_end <= brkend) {
288 			if (first == -1)
289 				first = x;
290 			last = x;
291 		}
292 	}
293 
294 	if (brkbase == brkend) {
295 		dprintf(("empty brk -- do nothing\n"));
296 	} else if (first == -1) {
297 		dprintf(("adding [%p, %p) whole brk\n", brkbase, brkend));
298 
299 		lm = (*lmp->lm_lmp)++;
300 		lm->lkm_base = brkbase;
301 		lm->lkm_limit = brkend;
302 		lm->lkm_bufctl = LKM_CTL(brkbase, LKM_CTL_MEMORY);
303 	} else {
304 		uintptr_t curbrk = P2ROUNDUP(brkbase, umem_pagesize);
305 
306 		if (curbrk != segs[first].ls_start) {
307 			dprintf(("adding [%p, %p) in brk, before first seg\n",
308 			    brkbase, segs[first].ls_start));
309 
310 			lm = (*lmp->lm_lmp)++;
311 			lm->lkm_base = brkbase;
312 			lm->lkm_limit = segs[first].ls_start;
313 			lm->lkm_bufctl = LKM_CTL(brkbase, LKM_CTL_MEMORY);
314 
315 			curbrk = segs[first].ls_start;
316 
317 		} else if (curbrk != brkbase) {
318 			dprintf(("ignore [%p, %p) -- realign\n", brkbase,
319 			    curbrk));
320 		}
321 
322 		for (x = first; x <= last; x++) {
323 			if (curbrk < segs[x].ls_start) {
324 				dprintf(("adding [%p, %p) in brk\n", curbrk,
325 				    segs[x].ls_start));
326 
327 				lm = (*lmp->lm_lmp)++;
328 				lm->lkm_base = curbrk;
329 				lm->lkm_limit = segs[x].ls_start;
330 				lm->lkm_bufctl = LKM_CTL(curbrk,
331 				    LKM_CTL_MEMORY);
332 			}
333 			curbrk = segs[x].ls_end;
334 		}
335 
336 		if (curbrk < brkend) {
337 			dprintf(("adding [%p, %p) in brk, after last seg\n",
338 			    curbrk, brkend));
339 
340 			lm = (*lmp->lm_lmp)++;
341 			lm->lkm_base = curbrk;
342 			lm->lkm_limit = brkend;
343 			lm->lkm_bufctl = LKM_CTL(curbrk, LKM_CTL_MEMORY);
344 		}
345 	}
346 }
347 
348 static int
349 leaky_handle_anon_mappings(leak_mtab_t **lmp)
350 {
351 	leaky_maps_t		lm;
352 
353 	vmem_t *heap_arena;
354 	vmem_t *vm_next;
355 	vmem_t *heap_top;
356 	vmem_t vmem;
357 
358 	pstatus_t Ps;
359 
360 	if (mdb_get_xdata("pstatus", &Ps, sizeof (Ps)) == -1) {
361 		mdb_warn("couldn't read pstatus xdata");
362 		return (DCMD_ERR);
363 	}
364 	lm.lm_pstatus = &Ps;
365 
366 	leak_brkbase = Ps.pr_brkbase;
367 	leak_brksize = Ps.pr_brksize;
368 
369 	if (umem_readvar(&heap_arena, "heap_arena") == -1) {
370 		mdb_warn("couldn't read heap_arena");
371 		return (DCMD_ERR);
372 	}
373 
374 	if (heap_arena == NULL) {
375 		mdb_warn("heap_arena is NULL.\n");
376 		return (DCMD_ERR);
377 	}
378 
379 	for (vm_next = heap_arena; vm_next != NULL; vm_next = vmem.vm_source) {
380 		if (mdb_vread(&vmem, sizeof (vmem), (uintptr_t)vm_next) == -1) {
381 			mdb_warn("couldn't read vmem at %p", vm_next);
382 			return (DCMD_ERR);
383 		}
384 		heap_top = vm_next;
385 	}
386 
387 	lm.lm_seg_count = 0;
388 	lm.lm_seg_max = 0;
389 
390 	if (mdb_pwalk("vmem_span", (mdb_walk_cb_t)leaky_count,
391 	    &lm.lm_seg_max, (uintptr_t)heap_top) == -1) {
392 		mdb_warn("couldn't walk vmem_span for vmem %p", heap_top);
393 		return (DCMD_ERR);
394 	}
395 	lm.lm_segs = mdb_alloc(lm.lm_seg_max * sizeof (*lm.lm_segs),
396 	    UM_SLEEP | UM_GC);
397 
398 	if (mdb_pwalk("vmem_span", (mdb_walk_cb_t)leaky_read_segs, &lm,
399 	    (uintptr_t)heap_top) == -1) {
400 		mdb_warn("couldn't walk vmem_span for vmem %p",
401 		    heap_top);
402 		return (DCMD_ERR);
403 	}
404 
405 	if (lm.lm_seg_count > lm.lm_seg_max) {
406 		mdb_warn("segment list for vmem %p grew\n", heap_top);
407 		return (DCMD_ERR);
408 	}
409 
410 	qsort(lm.lm_segs, lm.lm_seg_count, sizeof (*lm.lm_segs), leaky_seg_cmp);
411 
412 	lm.lm_lmp = lmp;
413 
414 	prockludge_add_walkers();
415 
416 	if (mdb_walk(KLUDGE_MAPWALK_NAME,
417 	    (mdb_walk_cb_t)leaky_process_anon_mappings, &lm) == -1) {
418 		mdb_warn("Couldn't walk "KLUDGE_MAPWALK_NAME);
419 		prockludge_remove_walkers();
420 		return (DCMD_ERR);
421 	}
422 
423 	prockludge_remove_walkers();
424 	leaky_handle_sbrk(&lm);
425 
426 	return (DCMD_OK);
427 }
428 
429 static int
430 leaky_interested(const umem_cache_t *c)
431 {
432 	vmem_t vmem;
433 
434 	if (mdb_vread(&vmem, sizeof (vmem), (uintptr_t)c->cache_arena) == -1) {
435 		mdb_warn("cannot read arena %p for cache '%s'",
436 		    (uintptr_t)c->cache_arena, c->cache_name);
437 		return (0);
438 	}
439 
440 	/*
441 	 * If this cache isn't allocating from either the umem_default or
442 	 * umem_firewall vmem arena, we're not interested.
443 	 */
444 	if (strcmp(vmem.vm_name, "umem_default") != 0 &&
445 	    strcmp(vmem.vm_name, "umem_firewall") != 0) {
446 		dprintf(("Skipping cache '%s' with arena '%s'\n",
447 		    c->cache_name, vmem.vm_name));
448 		return (0);
449 	}
450 
451 	return (1);
452 }
453 
454 /*ARGSUSED*/
455 static int
456 leaky_estimate(uintptr_t addr, const umem_cache_t *c, size_t *est)
457 {
458 	if (!leaky_interested(c))
459 		return (WALK_NEXT);
460 
461 	*est += umem_estimate_allocated(addr, c);
462 
463 	return (WALK_NEXT);
464 }
465 
466 /*ARGSUSED*/
467 static int
468 leaky_cache(uintptr_t addr, const umem_cache_t *c, leak_mtab_t **lmp)
469 {
470 	leak_mtab_t *lm = *lmp;
471 	mdb_walk_cb_t cb;
472 	const char *walk;
473 	int audit = (c->cache_flags & UMF_AUDIT);
474 
475 	if (!leaky_interested(c))
476 		return (WALK_NEXT);
477 
478 	if (audit) {
479 		walk = "bufctl";
480 		cb = (mdb_walk_cb_t)leaky_mtab;
481 	} else {
482 		walk = "umem";
483 		cb = (mdb_walk_cb_t)leaky_mtab_addr;
484 	}
485 	if (mdb_pwalk(walk, cb, lmp, addr) == -1) {
486 		mdb_warn("can't walk umem for cache %p (%s)", addr,
487 		    c->cache_name);
488 		return (WALK_DONE);
489 	}
490 
491 	for (; lm < *lmp; lm++) {
492 		lm->lkm_limit = lm->lkm_base + c->cache_bufsize;
493 		if (!audit)
494 			lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_CACHE);
495 	}
496 	return (WALK_NEXT);
497 }
498 
499 static char *map_head = "%-?s  %?s  %-10s used reason\n";
500 static char *map_fmt  = "[%?p,%?p) %-10s ";
501 #define	BACKING_LEN 10 /* must match the third field's width in map_fmt */
502 
503 static void
504 leaky_mappings_header(void)
505 {
506 	dprintf((map_head, "mapping", "", "backing"));
507 }
508 
509 /* ARGSUSED */
510 static int
511 leaky_grep_mappings(uintptr_t ignored, const prmap_t *pmp,
512     const pstatus_t *Psp)
513 {
514 	const char *map_libname_ptr;
515 	char db_mp_name[BACKING_LEN+1];
516 
517 	map_libname_ptr = strrchr(pmp->pr_mapname, '/');
518 	if (map_libname_ptr != NULL)
519 		map_libname_ptr++;
520 	else
521 		map_libname_ptr = pmp->pr_mapname;
522 
523 	strlcpy(db_mp_name, map_libname_ptr, sizeof (db_mp_name));
524 
525 	dprintf((map_fmt, pmp->pr_vaddr, (char *)pmp->pr_vaddr + pmp->pr_size,
526 	    db_mp_name));
527 
528 #define	USE(rsn)	dprintf_cont(("yes  %s\n", (rsn)))
529 #define	IGNORE(rsn)	dprintf_cont(("no   %s\n", (rsn)))
530 
531 	if (!(pmp->pr_mflags & MA_WRITE) || !(pmp->pr_mflags & MA_READ)) {
532 		IGNORE("read-only");
533 	} else if (pmp->pr_vaddr <= Psp->pr_brkbase &&
534 	    pmp->pr_vaddr + pmp->pr_size > Psp->pr_brkbase) {
535 		USE("bss");			/* grab up to brkbase */
536 		leaky_grep(pmp->pr_vaddr, Psp->pr_brkbase - pmp->pr_vaddr);
537 	} else if (pmp->pr_vaddr >= Psp->pr_brkbase &&
538 	    pmp->pr_vaddr < Psp->pr_brkbase + Psp->pr_brksize) {
539 		IGNORE("in brk");
540 	} else if (pmp->pr_vaddr == Psp->pr_stkbase &&
541 	    pmp->pr_size == Psp->pr_stksize) {
542 		IGNORE("stack");
543 	} else if (0 == strcmp(map_libname_ptr, "a.out")) {
544 		USE("a.out data");
545 		leaky_grep(pmp->pr_vaddr, pmp->pr_size);
546 	} else if (0 == strncmp(map_libname_ptr, "libumem.so", 10)) {
547 		IGNORE("part of umem");
548 	} else if (pmp->pr_mapname[0] != 0) {
549 		USE("lib data");		/* library data/bss */
550 		leaky_grep(pmp->pr_vaddr, pmp->pr_size);
551 	} else if ((pmp->pr_mflags & MA_ANON) && pmp->pr_mapname[0] == 0) {
552 		IGNORE("anon");
553 	} else {
554 		IGNORE("");		/* default to ignoring */
555 	}
556 
557 #undef	USE
558 #undef	IGNORE
559 
560 	return (WALK_NEXT);
561 }
562 
563 /*ARGSUSED*/
564 static int
565 leaky_mark_lwp(void *ignored, const lwpstatus_t *lwp)
566 {
567 	leaky_mark_ptr(lwp->pr_reg[R_SP] + STACK_BIAS);
568 	return (0);
569 }
570 
571 /*ARGSUSED*/
572 static int
573 leaky_process_lwp(void *ignored, const lwpstatus_t *lwp)
574 {
575 	const uintptr_t *regs = (const uintptr_t *)&lwp->pr_reg;
576 	int i;
577 	uintptr_t sp;
578 	uintptr_t addr;
579 	size_t size;
580 
581 	for (i = 0; i < R_SP; i++)
582 		leaky_grep_ptr(regs[i]);
583 
584 	sp = regs[i++] + STACK_BIAS;
585 	if (leaky_lookup_marked(sp, &addr, &size))
586 		leaky_grep(sp, size - (sp - addr));
587 
588 	for (; i < NPRGREG; i++)
589 		leaky_grep_ptr(regs[i]);
590 
591 	return (0);
592 }
593 
594 /*
595  * Handles processing various proc-related things:
596  * 1. calls leaky_process_lwp on each the LWP
597  * 2. leaky_greps the bss/data of libraries and a.out, and the a.out stack.
598  */
599 static int
600 leaky_process_proc(void)
601 {
602 	pstatus_t Ps;
603 	struct ps_prochandle *Pr;
604 
605 	if (mdb_get_xdata("pstatus", &Ps, sizeof (Ps)) == -1) {
606 		mdb_warn("couldn't read pstatus xdata");
607 		return (DCMD_ERR);
608 	}
609 
610 	dprintf(("pstatus says:\n"));
611 	dprintf(("\tbrk: base %p size %p\n",
612 	    Ps.pr_brkbase, Ps.pr_brksize));
613 	dprintf(("\tstk: base %p size %p\n",
614 	    Ps.pr_stkbase, Ps.pr_stksize));
615 
616 	if (mdb_get_xdata("pshandle", &Pr, sizeof (Pr)) == -1) {
617 		mdb_warn("couldn't read pshandle xdata");
618 		return (DCMD_ERR);
619 	}
620 
621 	if (Plwp_iter(Pr, leaky_mark_lwp, NULL) != 0) {
622 		mdb_warn("findleaks: Failed to iterate lwps\n");
623 		return (DCMD_ERR);
624 	}
625 
626 	if (Plwp_iter(Pr, leaky_process_lwp, NULL) != 0) {
627 		mdb_warn("findleaks: Failed to iterate lwps\n");
628 		return (DCMD_ERR);
629 	}
630 
631 	prockludge_add_walkers();
632 
633 	leaky_mappings_header();
634 
635 	if (mdb_walk(KLUDGE_MAPWALK_NAME, (mdb_walk_cb_t)leaky_grep_mappings,
636 	    &Ps) == -1) {
637 		mdb_warn("Couldn't walk "KLUDGE_MAPWALK_NAME);
638 		prockludge_remove_walkers();
639 		return (-1);
640 	}
641 
642 	prockludge_remove_walkers();
643 
644 	return (0);
645 }
646 
647 static void
648 leaky_subr_caller(const uintptr_t *stack, uint_t depth, char *buf,
649     uintptr_t *pcp)
650 {
651 	int i;
652 	GElf_Sym sym;
653 	uintptr_t pc = 0;
654 
655 	buf[0] = 0;
656 
657 	for (i = 0; i < depth; i++) {
658 		pc = stack[i];
659 
660 		if (mdb_lookup_by_addr(pc,
661 		    MDB_SYM_FUZZY, buf, MDB_SYM_NAMLEN, &sym) == -1)
662 			continue;
663 		if (strncmp(buf, "libumem.so", 10) == 0)
664 			continue;
665 
666 		*pcp = pc;
667 		return;
668 	}
669 
670 	/*
671 	 * We're only here if the entire call chain is in libumem.so;
672 	 * this shouldn't happen, but we'll just use the last caller.
673 	 */
674 	*pcp = pc;
675 }
676 
677 int
678 leaky_subr_bufctl_cmp(const leak_bufctl_t *lhs, const leak_bufctl_t *rhs)
679 {
680 	char lbuf[MDB_SYM_NAMLEN], rbuf[MDB_SYM_NAMLEN];
681 	uintptr_t lcaller, rcaller;
682 	int rval;
683 
684 	leaky_subr_caller(lhs->lkb_stack, lhs->lkb_depth, lbuf, &lcaller);
685 	leaky_subr_caller(rhs->lkb_stack, lhs->lkb_depth, rbuf, &rcaller);
686 
687 	if (rval = strcmp(lbuf, rbuf))
688 		return (rval);
689 
690 	if (lcaller < rcaller)
691 		return (-1);
692 
693 	if (lcaller > rcaller)
694 		return (1);
695 
696 	if (lhs->lkb_data < rhs->lkb_data)
697 		return (-1);
698 
699 	if (lhs->lkb_data > rhs->lkb_data)
700 		return (1);
701 
702 	return (0);
703 }
704 
705 /*ARGSUSED*/
706 int
707 leaky_subr_estimate(size_t *estp)
708 {
709 	int umem_flags;
710 	int umem_ready;
711 
712 	if (umem_readvar(&umem_ready, "umem_ready") == -1) {
713 		mdb_warn("couldn't read 'umem_ready'");
714 		return (DCMD_ERR);
715 	}
716 
717 	if (umem_ready != UMEM_READY) {
718 		mdb_warn("findleaks: No allocations have occured -- no "
719 		    "possible leaks.\n");
720 		return (DCMD_ERR);
721 	}
722 
723 	if (umem_readvar(&umem_flags, "umem_flags") == -1) {
724 		mdb_warn("couldn't read 'umem_flags'");
725 		return (DCMD_ERR);
726 	}
727 
728 	if (umem_flags & UMF_RANDOMIZE) {
729 		mdb_warn("findleaks: might not work with "
730 		    "UMEM_DEBUG=randomize\n");
731 	}
732 
733 	if (mdb_walk("umem_cache", (mdb_walk_cb_t)leaky_estimate, estp) == -1) {
734 		mdb_warn("couldn't walk 'umem_cache'");
735 		return (DCMD_ERR);
736 	}
737 
738 	if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_estimate_vmem, estp) == -1) {
739 		mdb_warn("couldn't walk 'vmem'");
740 		return (DCMD_ERR);
741 	}
742 
743 	if (*estp == 0) {
744 		mdb_warn("findleaks: No allocated buffers found.\n");
745 		return (DCMD_ERR);
746 	}
747 
748 	prockludge_add_walkers();
749 
750 	if (mdb_walk(KLUDGE_MAPWALK_NAME, (mdb_walk_cb_t)leaky_count,
751 	    estp) == -1) {
752 		mdb_warn("Couldn't walk "KLUDGE_MAPWALK_NAME);
753 		prockludge_remove_walkers();
754 		return (DCMD_ERR);
755 	}
756 
757 	prockludge_remove_walkers();
758 
759 	return (DCMD_OK);
760 }
761 
762 int
763 leaky_subr_fill(leak_mtab_t **lmpp)
764 {
765 	if (leaky_handle_anon_mappings(lmpp) != DCMD_OK) {
766 		mdb_warn("unable to process mappings\n");
767 		return (DCMD_ERR);
768 	}
769 
770 	if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_vmem, lmpp) == -1) {
771 		mdb_warn("couldn't walk 'vmem'");
772 		return (DCMD_ERR);
773 	}
774 
775 	if (mdb_walk("umem_cache", (mdb_walk_cb_t)leaky_cache, lmpp) == -1) {
776 		mdb_warn("couldn't walk 'umem_cache'");
777 		return (DCMD_ERR);
778 	}
779 
780 	return (DCMD_OK);
781 }
782 
783 int
784 leaky_subr_run(void)
785 {
786 	if (leaky_process_proc() == DCMD_ERR) {
787 		mdb_warn("failed to process proc");
788 		return (DCMD_ERR);
789 	}
790 	return (DCMD_OK);
791 }
792 
793 void
794 leaky_subr_add_leak(leak_mtab_t *lmp)
795 {
796 	uintptr_t addr = LKM_CTLPTR(lmp->lkm_bufctl);
797 	uint_t depth;
798 
799 	vmem_seg_t vs;
800 	umem_bufctl_audit_t *bcp;
801 	UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
802 
803 	switch (LKM_CTLTYPE(lmp->lkm_bufctl)) {
804 	case LKM_CTL_BUFCTL:
805 		if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) {
806 			mdb_warn("couldn't read leaked bufctl at addr %p",
807 			    addr);
808 			return;
809 		}
810 
811 		depth = MIN(bcp->bc_depth, umem_stack_depth);
812 
813 		/*
814 		 * The top of the stack will be in umem_cache_alloc().
815 		 * Since the offset in umem_cache_alloc() isn't interesting
816 		 * we skip that frame for the purposes of uniquifying stacks.
817 		 *
818 		 * Also, we use the cache pointer as the leaks's cid, to
819 		 * prevent the coalescing of leaks from different caches.
820 		 */
821 		if (depth > 0)
822 			depth--;
823 		leaky_add_leak(TYPE_UMEM, addr, (uintptr_t)bcp->bc_addr,
824 		    bcp->bc_timestamp, bcp->bc_stack + 1, depth,
825 		    (uintptr_t)bcp->bc_cache, (uintptr_t)bcp->bc_cache);
826 		break;
827 	case LKM_CTL_VMSEG:
828 		if (mdb_vread(&vs, sizeof (vs), addr) == -1) {
829 			mdb_warn("couldn't read leaked vmem_seg at addr %p",
830 			    addr);
831 			return;
832 		}
833 		depth = MIN(vs.vs_depth, VMEM_STACK_DEPTH);
834 
835 		leaky_add_leak(TYPE_VMEM, addr, vs.vs_start, vs.vs_timestamp,
836 		    vs.vs_stack, depth, 0, (vs.vs_end - vs.vs_start));
837 		break;
838 	case LKM_CTL_MEMORY:
839 		if (LEAKY_INBRK(addr))
840 			leaky_add_leak(TYPE_SBRK, addr, addr, 0, NULL, 0, 0,
841 			    lmp->lkm_limit - addr);
842 		else
843 			leaky_add_leak(TYPE_MMAP, addr, addr, 0, NULL, 0, 0,
844 			    lmp->lkm_limit - addr);
845 		break;
846 	case LKM_CTL_CACHE:
847 		leaky_add_leak(TYPE_CACHE, lmp->lkm_base, lmp->lkm_base, 0,
848 		    NULL, 0, addr, addr);
849 		break;
850 	default:
851 		mdb_warn("internal error:  invalid leak_bufctl_t\n");
852 		break;
853 	}
854 }
855 
856 static int lk_vmem_seen;
857 static int lk_cache_seen;
858 static int lk_umem_seen;
859 static size_t lk_ttl;
860 static size_t lk_bytes;
861 
862 void
863 leaky_subr_dump_start(int type)
864 {
865 	switch (type) {
866 	case TYPE_MMAP:
867 		lk_vmem_seen = 0;
868 		break;
869 
870 	case TYPE_SBRK:
871 	case TYPE_VMEM:
872 		return;			/* don't zero counts */
873 
874 	case TYPE_CACHE:
875 		lk_cache_seen = 0;
876 		break;
877 
878 	case TYPE_UMEM:
879 		lk_umem_seen = 0;
880 		break;
881 
882 	default:
883 		break;
884 	}
885 
886 	lk_ttl = 0;
887 	lk_bytes = 0;
888 }
889 
890 void
891 leaky_subr_dump(const leak_bufctl_t *lkb, int verbose)
892 {
893 	const leak_bufctl_t *cur;
894 	umem_cache_t cache;
895 	size_t min, max, size;
896 	char sz[30];
897 	char c[MDB_SYM_NAMLEN];
898 	uintptr_t caller;
899 	const char *nm, *nm_lc;
900 	uint8_t type = lkb->lkb_type;
901 
902 	if (verbose) {
903 		lk_ttl = 0;
904 		lk_bytes = 0;
905 	} else if (!lk_vmem_seen && (type == TYPE_VMEM || type == TYPE_MMAP ||
906 	    type == TYPE_SBRK)) {
907 		lk_vmem_seen = 1;
908 		mdb_printf("%-16s %7s %?s %s\n",
909 		    "BYTES", "LEAKED", "VMEM_SEG", "CALLER");
910 	}
911 
912 	switch (lkb->lkb_type) {
913 	case TYPE_MMAP:
914 	case TYPE_SBRK:
915 		nm = (lkb->lkb_type == TYPE_MMAP) ? "MMAP" : "SBRK";
916 		nm_lc = (lkb->lkb_type == TYPE_MMAP) ? "mmap(2)" : "sbrk(2)";
917 
918 		for (; lkb != NULL; lkb = lkb->lkb_next) {
919 			if (!verbose)
920 				mdb_printf("%-16d %7d %?p %s\n", lkb->lkb_data,
921 				    lkb->lkb_dups + 1, lkb->lkb_addr, nm);
922 			else
923 				mdb_printf("%s leak: [%p, %p), %ld bytes\n",
924 				    nm_lc, lkb->lkb_addr,
925 				    lkb->lkb_addr + lkb->lkb_data,
926 				    lkb->lkb_data);
927 			lk_ttl++;
928 			lk_bytes += lkb->lkb_data;
929 		}
930 		return;
931 
932 	case TYPE_VMEM:
933 		min = max = lkb->lkb_data;
934 
935 		for (cur = lkb; cur != NULL; cur = cur->lkb_next) {
936 			size = cur->lkb_data;
937 
938 			if (size < min)
939 				min = size;
940 			if (size > max)
941 				max = size;
942 
943 			lk_ttl++;
944 			lk_bytes += size;
945 		}
946 
947 		if (min == max)
948 			(void) mdb_snprintf(sz, sizeof (sz), "%ld", min);
949 		else
950 			(void) mdb_snprintf(sz, sizeof (sz), "%ld-%ld",
951 			    min, max);
952 
953 		if (!verbose) {
954 			leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
955 			    c, &caller);
956 
957 			mdb_printf("%-16s %7d %?p %a\n", sz, lkb->lkb_dups + 1,
958 			    lkb->lkb_addr, caller);
959 		} else {
960 			mdb_arg_t v;
961 
962 			if (lk_ttl == 1)
963 				mdb_printf("umem_oversize leak: 1 vmem_seg, "
964 				    "%ld bytes\n", lk_bytes);
965 			else
966 				mdb_printf("umem_oversize leak: %d vmem_segs, "
967 				    "%s bytes each, %ld bytes total\n",
968 				    lk_ttl, sz, lk_bytes);
969 
970 			v.a_type = MDB_TYPE_STRING;
971 			v.a_un.a_str = "-v";
972 
973 			if (mdb_call_dcmd("vmem_seg", lkb->lkb_addr,
974 			    DCMD_ADDRSPEC, 1, &v) == -1) {
975 				mdb_warn("'%p::vmem_seg -v' failed",
976 				    lkb->lkb_addr);
977 			}
978 		}
979 		return;
980 
981 	case TYPE_CACHE:
982 		if (!lk_cache_seen) {
983 			lk_cache_seen = 1;
984 			if (lk_vmem_seen)
985 				mdb_printf("\n");
986 			mdb_printf("%-?s %7s %?s %s\n",
987 			    "CACHE", "LEAKED", "BUFFER", "CALLER");
988 		}
989 
990 		if (mdb_vread(&cache, sizeof (cache), lkb->lkb_data) == -1) {
991 			/*
992 			 * This _really_ shouldn't happen; we shouldn't
993 			 * have been able to get this far if this
994 			 * cache wasn't readable.
995 			 */
996 			mdb_warn("can't read cache %p for leaked "
997 			    "buffer %p", lkb->lkb_data, lkb->lkb_addr);
998 			return;
999 		}
1000 
1001 		lk_ttl += lkb->lkb_dups + 1;
1002 		lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
1003 
1004 		caller = (lkb->lkb_depth == 0) ? 0 : lkb->lkb_stack[0];
1005 		if (caller != 0) {
1006 			(void) mdb_snprintf(c, sizeof (c), "%a", caller);
1007 		} else {
1008 			(void) mdb_snprintf(c, sizeof (c), "%s",
1009 			    (verbose) ? "" : "?");
1010 		}
1011 
1012 		if (!verbose) {
1013 			mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
1014 			    lkb->lkb_dups + 1, lkb->lkb_addr, c);
1015 		} else {
1016 			if (lk_ttl == 1)
1017 				mdb_printf("%s leak: 1 buffer, %ld bytes,\n",
1018 				    cache.cache_name, lk_bytes);
1019 			else
1020 				mdb_printf("%s leak: %d buffers, "
1021 				    "%ld bytes each, %ld bytes total,\n",
1022 				    cache.cache_name, lk_ttl,
1023 				    cache.cache_bufsize, lk_bytes);
1024 			mdb_printf("    %s%s%ssample addr %p\n",
1025 			    (caller == 0) ? "" : "caller ", c,
1026 			    (caller == 0) ? "" : ", ", lkb->lkb_addr);
1027 		}
1028 		return;
1029 
1030 	case TYPE_UMEM:
1031 		if (!lk_umem_seen) {
1032 			lk_umem_seen = 1;
1033 			if (lk_vmem_seen || lk_cache_seen)
1034 				mdb_printf("\n");
1035 			mdb_printf("%-?s %7s %?s %s\n",
1036 			    "CACHE", "LEAKED", "BUFCTL", "CALLER");
1037 		}
1038 		if (mdb_vread(&cache, sizeof (cache), lkb->lkb_data) == -1) {
1039 			/*
1040 			 * This _really_ shouldn't happen; we shouldn't
1041 			 * have been able to get this far if this
1042 			 * cache wasn't readable.
1043 			 */
1044 			mdb_warn("can't read cache %p for leaked "
1045 			    "bufctl %p", lkb->lkb_data, lkb->lkb_addr);
1046 			return;
1047 		}
1048 
1049 		lk_ttl += lkb->lkb_dups + 1;
1050 		lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
1051 
1052 		if (!verbose) {
1053 			leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth, c,
1054 			    &caller);
1055 
1056 			mdb_printf("%0?p %7d %0?p %a\n", lkb->lkb_data,
1057 			    lkb->lkb_dups + 1, lkb->lkb_addr, caller);
1058 		} else {
1059 			mdb_arg_t v;
1060 
1061 			if (lk_ttl == 1)
1062 				mdb_printf("%s leak: 1 buffer, %ld bytes\n",
1063 				    cache.cache_name, lk_bytes);
1064 			else
1065 				mdb_printf("%s leak: %d buffers, "
1066 				    "%ld bytes each, %ld bytes total\n",
1067 				    cache.cache_name, lk_ttl,
1068 				    cache.cache_bufsize, lk_bytes);
1069 
1070 			v.a_type = MDB_TYPE_STRING;
1071 			v.a_un.a_str = "-v";
1072 
1073 			if (mdb_call_dcmd("bufctl", lkb->lkb_addr,
1074 			    DCMD_ADDRSPEC, 1, &v) == -1) {
1075 				mdb_warn("'%p::bufctl -v' failed",
1076 				    lkb->lkb_addr);
1077 			}
1078 		}
1079 		return;
1080 
1081 	default:
1082 		return;
1083 	}
1084 }
1085 
1086 void
1087 leaky_subr_dump_end(int type)
1088 {
1089 	int i;
1090 	int width;
1091 	const char *leak;
1092 
1093 	switch (type) {
1094 	case TYPE_VMEM:
1095 		if (!lk_vmem_seen)
1096 			return;
1097 
1098 		width = 16;
1099 		leak = "oversized leak";
1100 		break;
1101 
1102 	case TYPE_CACHE:
1103 		if (!lk_cache_seen)
1104 			return;
1105 
1106 		width = sizeof (uintptr_t) * 2;
1107 		leak = "buffer";
1108 		break;
1109 
1110 	case TYPE_UMEM:
1111 		if (!lk_umem_seen)
1112 			return;
1113 
1114 		width = sizeof (uintptr_t) * 2;
1115 		leak = "buffer";
1116 		break;
1117 
1118 	default:
1119 		return;
1120 	}
1121 
1122 	for (i = 0; i < 72; i++)
1123 		mdb_printf("-");
1124 	mdb_printf("\n%*s %7ld %s%s, %ld byte%s\n",
1125 	    width, "Total", lk_ttl, leak, (lk_ttl == 1) ? "" : "s",
1126 	    lk_bytes, (lk_bytes == 1) ? "" : "s");
1127 }
1128 
1129 int
1130 leaky_subr_invoke_callback(const leak_bufctl_t *lkb, mdb_walk_cb_t cb,
1131     void *cbdata)
1132 {
1133 	vmem_seg_t vs;
1134 	umem_bufctl_audit_t *bcp;
1135 	UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
1136 
1137 	switch (lkb->lkb_type) {
1138 	case TYPE_VMEM:
1139 		if (mdb_vread(&vs, sizeof (vs), lkb->lkb_addr) == -1) {
1140 			mdb_warn("unable to read vmem_seg at %p",
1141 			    lkb->lkb_addr);
1142 			return (WALK_NEXT);
1143 		}
1144 		return (cb(lkb->lkb_addr, &vs, cbdata));
1145 
1146 	case TYPE_UMEM:
1147 		if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE,
1148 		    lkb->lkb_addr) == -1) {
1149 			mdb_warn("unable to read bufctl at %p",
1150 			    lkb->lkb_addr);
1151 			return (WALK_NEXT);
1152 		}
1153 		return (cb(lkb->lkb_addr, bcp, cbdata));
1154 
1155 	default:
1156 		return (cb(lkb->lkb_addr, NULL, cbdata));
1157 	}
1158 }
1159