xref: /illumos-gate/usr/src/cmd/mdb/common/modules/libumem/leaky_subr.c (revision 2a8bcb4efb45d99ac41c94a75c396b362c414f7f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include "umem.h"
27 
28 #include <sys/vmem_impl_user.h>
29 #include <umem_impl.h>
30 
31 #include <alloca.h>
32 #include <libproc.h>
33 #include <stdio.h>
34 #include <string.h>
35 #include <sys/stack.h>
36 
37 #include "leaky_impl.h"
38 #include "misc.h"
39 #include "proc_kludges.h"
40 
41 #include "umem_pagesize.h"
42 
43 /*
44  * This file defines the libumem target for ../genunix/leaky.c.
45  *
46  * See ../genunix/leaky_impl.h for the target interface definition.
47  */
48 
49 /*
50  * leaky_subr_dump_start()/_end() depend on the ordering of TYPE_VMEM,
51  * TYPE_MMAP and TYPE_SBRK.
52  */
53 #define	TYPE_MMAP	0		/* lkb_data is the size */
54 #define	TYPE_SBRK	1		/* lkb_data is the size */
55 #define	TYPE_VMEM	2		/* lkb_data is the vmem_seg's size */
56 #define	TYPE_CACHE	3		/* lkb_cid is the bufctl's cache */
57 #define	TYPE_UMEM	4		/* lkb_cid is the bufctl's cache */
58 
59 #define	LKM_CTL_BUFCTL	0	/* normal allocation, PTR is bufctl */
60 #define	LKM_CTL_VMSEG	1	/* oversize allocation, PTR is vmem_seg_t */
61 #define	LKM_CTL_MEMORY	2	/* non-umem mmap or brk, PTR is region start */
62 #define	LKM_CTL_CACHE	3	/* normal alloc, non-debug, PTR is cache */
63 #define	LKM_CTL_MASK	3L
64 
65 /*
66  * create a lkm_bufctl from a pointer and a type
67  */
68 #define	LKM_CTL(ptr, type)	(LKM_CTLPTR(ptr) | (type))
69 #define	LKM_CTLPTR(ctl)		((uintptr_t)(ctl) & ~(LKM_CTL_MASK))
70 #define	LKM_CTLTYPE(ctl)	((uintptr_t)(ctl) &  (LKM_CTL_MASK))
71 
72 static uintptr_t leak_brkbase;
73 static uintptr_t leak_brksize;
74 
75 #define	LEAKY_INBRK(ptr) \
76 	(((uintptr_t)(ptr) - leak_brkbase) < leak_brksize)
77 
78 typedef struct leaky_seg_info {
79 	uintptr_t ls_start;
80 	uintptr_t ls_end;
81 } leaky_seg_info_t;
82 
83 typedef struct leaky_maps {
84 	leaky_seg_info_t	*lm_segs;
85 	uintptr_t		lm_seg_count;
86 	uintptr_t		lm_seg_max;
87 
88 	pstatus_t		*lm_pstatus;
89 
90 	leak_mtab_t		**lm_lmp;
91 } leaky_maps_t;
92 
93 /*ARGSUSED*/
94 static int
leaky_mtab(uintptr_t addr,const umem_bufctl_audit_t * bcp,leak_mtab_t ** lmp)95 leaky_mtab(uintptr_t addr, const umem_bufctl_audit_t *bcp, leak_mtab_t **lmp)
96 {
97 	leak_mtab_t *lm = (*lmp)++;
98 
99 	lm->lkm_base = (uintptr_t)bcp->bc_addr;
100 	lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_BUFCTL);
101 
102 	return (WALK_NEXT);
103 }
104 
105 /*ARGSUSED*/
106 static int
leaky_mtab_addr(uintptr_t addr,void * ignored,leak_mtab_t ** lmp)107 leaky_mtab_addr(uintptr_t addr, void *ignored, leak_mtab_t **lmp)
108 {
109 	leak_mtab_t *lm = (*lmp)++;
110 
111 	lm->lkm_base = addr;
112 
113 	return (WALK_NEXT);
114 }
115 
116 static int
leaky_seg(uintptr_t addr,const vmem_seg_t * seg,leak_mtab_t ** lmp)117 leaky_seg(uintptr_t addr, const vmem_seg_t *seg, leak_mtab_t **lmp)
118 {
119 	leak_mtab_t *lm = (*lmp)++;
120 
121 	lm->lkm_base = seg->vs_start;
122 	lm->lkm_limit = seg->vs_end;
123 	lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_VMSEG);
124 	return (WALK_NEXT);
125 }
126 
127 static int
leaky_vmem(uintptr_t addr,const vmem_t * vmem,leak_mtab_t ** lmp)128 leaky_vmem(uintptr_t addr, const vmem_t *vmem, leak_mtab_t **lmp)
129 {
130 	if (strcmp(vmem->vm_name, "umem_oversize") != 0 &&
131 	    strcmp(vmem->vm_name, "umem_memalign") != 0)
132 		return (WALK_NEXT);
133 
134 	if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_seg, lmp, addr) == -1)
135 		mdb_warn("can't walk vmem_alloc for %s (%p)", vmem->vm_name,
136 		    addr);
137 
138 	return (WALK_NEXT);
139 }
140 
141 /*ARGSUSED*/
142 static int
leaky_estimate_vmem(uintptr_t addr,const vmem_t * vmem,size_t * est)143 leaky_estimate_vmem(uintptr_t addr, const vmem_t *vmem, size_t *est)
144 {
145 	if (strcmp(vmem->vm_name, "umem_oversize") != 0 &&
146 	    strcmp(vmem->vm_name, "umem_memalign") != 0)
147 		return (WALK_NEXT);
148 
149 	*est += (int)(vmem->vm_kstat.vk_alloc - vmem->vm_kstat.vk_free);
150 
151 	return (WALK_NEXT);
152 }
153 
154 static int
leaky_seg_cmp(const void * l,const void * r)155 leaky_seg_cmp(const void *l, const void *r)
156 {
157 	const leaky_seg_info_t *lhs = (const leaky_seg_info_t *)l;
158 	const leaky_seg_info_t *rhs = (const leaky_seg_info_t *)r;
159 
160 	if (lhs->ls_start < rhs->ls_start)
161 		return (-1);
162 	if (lhs->ls_start > rhs->ls_start)
163 		return (1);
164 
165 	return (0);
166 }
167 
168 static ssize_t
leaky_seg_search(uintptr_t addr,leaky_seg_info_t * listp,unsigned count)169 leaky_seg_search(uintptr_t addr, leaky_seg_info_t *listp, unsigned count)
170 {
171 	ssize_t left = 0, right = count - 1, guess;
172 
173 	while (right >= left) {
174 		guess = (right + left) >> 1;
175 
176 		if (addr < listp[guess].ls_start) {
177 			right = guess - 1;
178 			continue;
179 		}
180 
181 		if (addr >= listp[guess].ls_end) {
182 			left = guess + 1;
183 			continue;
184 		}
185 
186 		return (guess);
187 	}
188 
189 	return (-1);
190 }
191 
192 /*ARGSUSED*/
193 static int
leaky_count(uintptr_t addr,void * unused,size_t * total)194 leaky_count(uintptr_t addr, void *unused, size_t *total)
195 {
196 	++*total;
197 
198 	return (WALK_NEXT);
199 }
200 
201 /*ARGSUSED*/
202 static int
leaky_read_segs(uintptr_t addr,const vmem_seg_t * seg,leaky_maps_t * lmp)203 leaky_read_segs(uintptr_t addr, const vmem_seg_t *seg, leaky_maps_t *lmp)
204 {
205 	leaky_seg_info_t *my_si = lmp->lm_segs + lmp->lm_seg_count;
206 
207 	if (seg->vs_start == seg->vs_end && seg->vs_start == 0)
208 		return (WALK_NEXT);
209 
210 	if (lmp->lm_seg_count++ >= lmp->lm_seg_max)
211 		return (WALK_ERR);
212 
213 	my_si->ls_start = seg->vs_start;
214 	my_si->ls_end = seg->vs_end;
215 
216 	return (WALK_NEXT);
217 }
218 
219 /* ARGSUSED */
220 static int
leaky_process_anon_mappings(uintptr_t ignored,const prmap_t * pmp,leaky_maps_t * lmp)221 leaky_process_anon_mappings(uintptr_t ignored, const prmap_t *pmp,
222     leaky_maps_t *lmp)
223 {
224 	uintptr_t start = pmp->pr_vaddr;
225 	uintptr_t end = pmp->pr_vaddr + pmp->pr_size;
226 
227 	leak_mtab_t *lm;
228 	pstatus_t *Psp = lmp->lm_pstatus;
229 
230 	uintptr_t brk_start = Psp->pr_brkbase;
231 	uintptr_t brk_end = Psp->pr_brkbase + Psp->pr_brksize;
232 
233 	int has_brk = 0;
234 	int in_vmem = 0;
235 
236 	/*
237 	 * This checks if there is any overlap between the segment and the brk.
238 	 */
239 	if (end > brk_start && start < brk_end)
240 		has_brk = 1;
241 
242 	if (leaky_seg_search(start, lmp->lm_segs, lmp->lm_seg_count) != -1)
243 		in_vmem = 1;
244 
245 	/*
246 	 * We only want anonymous, mmaped memory.  That means:
247 	 *
248 	 * 1. Must be read-write
249 	 * 2. Cannot be shared
250 	 * 3. Cannot have backing
251 	 * 4. Cannot be in the brk
252 	 * 5. Cannot be part of the vmem heap.
253 	 */
254 	if ((pmp->pr_mflags & (MA_READ | MA_WRITE)) == (MA_READ | MA_WRITE) &&
255 	    (pmp->pr_mflags & MA_SHARED) == 0 &&
256 	    (pmp->pr_mapname[0] == 0) &&
257 	    !has_brk &&
258 	    !in_vmem) {
259 		dprintf(("mmaped region: [%p, %p)\n", start, end));
260 		lm = (*lmp->lm_lmp)++;
261 		lm->lkm_base = start;
262 		lm->lkm_limit = end;
263 		lm->lkm_bufctl = LKM_CTL(pmp->pr_vaddr, LKM_CTL_MEMORY);
264 	}
265 
266 	return (WALK_NEXT);
267 }
268 
269 static void
leaky_handle_sbrk(leaky_maps_t * lmp)270 leaky_handle_sbrk(leaky_maps_t *lmp)
271 {
272 	uintptr_t brkbase = lmp->lm_pstatus->pr_brkbase;
273 	uintptr_t brkend = brkbase + lmp->lm_pstatus->pr_brksize;
274 
275 	leak_mtab_t *lm;
276 
277 	leaky_seg_info_t *segs = lmp->lm_segs;
278 
279 	int x, first = -1, last = -1;
280 
281 	dprintf(("brk: [%p, %p)\n", brkbase, brkend));
282 
283 	for (x = 0; x < lmp->lm_seg_count; x++) {
284 		if (segs[x].ls_start >= brkbase && segs[x].ls_end <= brkend) {
285 			if (first == -1)
286 				first = x;
287 			last = x;
288 		}
289 	}
290 
291 	if (brkbase == brkend) {
292 		dprintf(("empty brk -- do nothing\n"));
293 	} else if (first == -1) {
294 		dprintf(("adding [%p, %p) whole brk\n", brkbase, brkend));
295 
296 		lm = (*lmp->lm_lmp)++;
297 		lm->lkm_base = brkbase;
298 		lm->lkm_limit = brkend;
299 		lm->lkm_bufctl = LKM_CTL(brkbase, LKM_CTL_MEMORY);
300 	} else {
301 		uintptr_t curbrk = P2ROUNDUP(brkbase, umem_pagesize);
302 
303 		if (curbrk != segs[first].ls_start) {
304 			dprintf(("adding [%p, %p) in brk, before first seg\n",
305 			    brkbase, segs[first].ls_start));
306 
307 			lm = (*lmp->lm_lmp)++;
308 			lm->lkm_base = brkbase;
309 			lm->lkm_limit = segs[first].ls_start;
310 			lm->lkm_bufctl = LKM_CTL(brkbase, LKM_CTL_MEMORY);
311 
312 			curbrk = segs[first].ls_start;
313 
314 		} else if (curbrk != brkbase) {
315 			dprintf(("ignore [%p, %p) -- realign\n", brkbase,
316 			    curbrk));
317 		}
318 
319 		for (x = first; x <= last; x++) {
320 			if (curbrk < segs[x].ls_start) {
321 				dprintf(("adding [%p, %p) in brk\n", curbrk,
322 				    segs[x].ls_start));
323 
324 				lm = (*lmp->lm_lmp)++;
325 				lm->lkm_base = curbrk;
326 				lm->lkm_limit = segs[x].ls_start;
327 				lm->lkm_bufctl = LKM_CTL(curbrk,
328 				    LKM_CTL_MEMORY);
329 			}
330 			curbrk = segs[x].ls_end;
331 		}
332 
333 		if (curbrk < brkend) {
334 			dprintf(("adding [%p, %p) in brk, after last seg\n",
335 			    curbrk, brkend));
336 
337 			lm = (*lmp->lm_lmp)++;
338 			lm->lkm_base = curbrk;
339 			lm->lkm_limit = brkend;
340 			lm->lkm_bufctl = LKM_CTL(curbrk, LKM_CTL_MEMORY);
341 		}
342 	}
343 }
344 
345 static int
leaky_handle_anon_mappings(leak_mtab_t ** lmp)346 leaky_handle_anon_mappings(leak_mtab_t **lmp)
347 {
348 	leaky_maps_t		lm;
349 
350 	vmem_t *heap_arena;
351 	vmem_t *vm_next;
352 	vmem_t *heap_top;
353 	vmem_t vmem;
354 
355 	pstatus_t Ps;
356 
357 	if (mdb_get_xdata("pstatus", &Ps, sizeof (Ps)) == -1) {
358 		mdb_warn("couldn't read pstatus xdata");
359 		return (DCMD_ERR);
360 	}
361 	lm.lm_pstatus = &Ps;
362 
363 	leak_brkbase = Ps.pr_brkbase;
364 	leak_brksize = Ps.pr_brksize;
365 
366 	if (umem_readvar(&heap_arena, "heap_arena") == -1) {
367 		mdb_warn("couldn't read heap_arena");
368 		return (DCMD_ERR);
369 	}
370 
371 	if (heap_arena == NULL) {
372 		mdb_warn("heap_arena is NULL.\n");
373 		return (DCMD_ERR);
374 	}
375 
376 	for (vm_next = heap_arena; vm_next != NULL; vm_next = vmem.vm_source) {
377 		if (mdb_vread(&vmem, sizeof (vmem), (uintptr_t)vm_next) == -1) {
378 			mdb_warn("couldn't read vmem at %p", vm_next);
379 			return (DCMD_ERR);
380 		}
381 		heap_top = vm_next;
382 	}
383 
384 	lm.lm_seg_count = 0;
385 	lm.lm_seg_max = 0;
386 
387 	if (mdb_pwalk("vmem_span", (mdb_walk_cb_t)leaky_count,
388 	    &lm.lm_seg_max, (uintptr_t)heap_top) == -1) {
389 		mdb_warn("couldn't walk vmem_span for vmem %p", heap_top);
390 		return (DCMD_ERR);
391 	}
392 	lm.lm_segs = mdb_alloc(lm.lm_seg_max * sizeof (*lm.lm_segs),
393 	    UM_SLEEP | UM_GC);
394 
395 	if (mdb_pwalk("vmem_span", (mdb_walk_cb_t)leaky_read_segs, &lm,
396 	    (uintptr_t)heap_top) == -1) {
397 		mdb_warn("couldn't walk vmem_span for vmem %p",
398 		    heap_top);
399 		return (DCMD_ERR);
400 	}
401 
402 	if (lm.lm_seg_count > lm.lm_seg_max) {
403 		mdb_warn("segment list for vmem %p grew\n", heap_top);
404 		return (DCMD_ERR);
405 	}
406 
407 	qsort(lm.lm_segs, lm.lm_seg_count, sizeof (*lm.lm_segs), leaky_seg_cmp);
408 
409 	lm.lm_lmp = lmp;
410 
411 	prockludge_add_walkers();
412 
413 	if (mdb_walk(KLUDGE_MAPWALK_NAME,
414 	    (mdb_walk_cb_t)leaky_process_anon_mappings, &lm) == -1) {
415 		mdb_warn("Couldn't walk "KLUDGE_MAPWALK_NAME);
416 		prockludge_remove_walkers();
417 		return (DCMD_ERR);
418 	}
419 
420 	prockludge_remove_walkers();
421 	leaky_handle_sbrk(&lm);
422 
423 	return (DCMD_OK);
424 }
425 
426 static int
leaky_interested(const umem_cache_t * c)427 leaky_interested(const umem_cache_t *c)
428 {
429 	vmem_t vmem;
430 
431 	if (mdb_vread(&vmem, sizeof (vmem), (uintptr_t)c->cache_arena) == -1) {
432 		mdb_warn("cannot read arena %p for cache '%s'",
433 		    (uintptr_t)c->cache_arena, c->cache_name);
434 		return (0);
435 	}
436 
437 	/*
438 	 * If this cache isn't allocating from either the umem_default or
439 	 * umem_firewall vmem arena, we're not interested.
440 	 */
441 	if (strcmp(vmem.vm_name, "umem_default") != 0 &&
442 	    strcmp(vmem.vm_name, "umem_firewall") != 0) {
443 		dprintf(("Skipping cache '%s' with arena '%s'\n",
444 		    c->cache_name, vmem.vm_name));
445 		return (0);
446 	}
447 
448 	return (1);
449 }
450 
451 /*ARGSUSED*/
452 static int
leaky_estimate(uintptr_t addr,const umem_cache_t * c,size_t * est)453 leaky_estimate(uintptr_t addr, const umem_cache_t *c, size_t *est)
454 {
455 	if (!leaky_interested(c))
456 		return (WALK_NEXT);
457 
458 	*est += umem_estimate_allocated(addr, c);
459 
460 	return (WALK_NEXT);
461 }
462 
463 /*ARGSUSED*/
464 static int
leaky_cache(uintptr_t addr,const umem_cache_t * c,leak_mtab_t ** lmp)465 leaky_cache(uintptr_t addr, const umem_cache_t *c, leak_mtab_t **lmp)
466 {
467 	leak_mtab_t *lm = *lmp;
468 	mdb_walk_cb_t cb;
469 	const char *walk;
470 	int audit = (c->cache_flags & UMF_AUDIT);
471 
472 	if (!leaky_interested(c))
473 		return (WALK_NEXT);
474 
475 	if (audit) {
476 		walk = "bufctl";
477 		cb = (mdb_walk_cb_t)leaky_mtab;
478 	} else {
479 		walk = "umem";
480 		cb = (mdb_walk_cb_t)leaky_mtab_addr;
481 	}
482 	if (mdb_pwalk(walk, cb, lmp, addr) == -1) {
483 		mdb_warn("can't walk umem for cache %p (%s)", addr,
484 		    c->cache_name);
485 		return (WALK_DONE);
486 	}
487 
488 	for (; lm < *lmp; lm++) {
489 		lm->lkm_limit = lm->lkm_base + c->cache_bufsize;
490 		if (!audit)
491 			lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_CACHE);
492 	}
493 	return (WALK_NEXT);
494 }
495 
496 static char *map_head = "%-?s  %?s  %-10s used reason\n";
497 static char *map_fmt  = "[%?p,%?p) %-10s ";
498 #define	BACKING_LEN 10 /* must match the third field's width in map_fmt */
499 
500 static void
leaky_mappings_header(void)501 leaky_mappings_header(void)
502 {
503 	dprintf((map_head, "mapping", "", "backing"));
504 }
505 
506 /* ARGSUSED */
507 static int
leaky_grep_mappings(uintptr_t ignored,const prmap_t * pmp,const pstatus_t * Psp)508 leaky_grep_mappings(uintptr_t ignored, const prmap_t *pmp,
509     const pstatus_t *Psp)
510 {
511 	const char *map_libname_ptr;
512 	char db_mp_name[BACKING_LEN+1];
513 
514 	map_libname_ptr = strrchr(pmp->pr_mapname, '/');
515 	if (map_libname_ptr != NULL)
516 		map_libname_ptr++;
517 	else
518 		map_libname_ptr = pmp->pr_mapname;
519 
520 	strlcpy(db_mp_name, map_libname_ptr, sizeof (db_mp_name));
521 
522 	dprintf((map_fmt, pmp->pr_vaddr, (char *)pmp->pr_vaddr + pmp->pr_size,
523 	    db_mp_name));
524 
525 #define	USE(rsn)	dprintf_cont(("yes  %s\n", (rsn)))
526 #define	IGNORE(rsn)	dprintf_cont(("no   %s\n", (rsn)))
527 
528 	if (!(pmp->pr_mflags & MA_WRITE) || !(pmp->pr_mflags & MA_READ)) {
529 		IGNORE("read-only");
530 	} else if (pmp->pr_vaddr <= Psp->pr_brkbase &&
531 	    pmp->pr_vaddr + pmp->pr_size > Psp->pr_brkbase) {
532 		USE("bss");			/* grab up to brkbase */
533 		leaky_grep(pmp->pr_vaddr, Psp->pr_brkbase - pmp->pr_vaddr);
534 	} else if (pmp->pr_vaddr >= Psp->pr_brkbase &&
535 	    pmp->pr_vaddr < Psp->pr_brkbase + Psp->pr_brksize) {
536 		IGNORE("in brk");
537 	} else if (pmp->pr_vaddr == Psp->pr_stkbase &&
538 	    pmp->pr_size == Psp->pr_stksize) {
539 		IGNORE("stack");
540 	} else if (0 == strcmp(map_libname_ptr, "a.out")) {
541 		USE("a.out data");
542 		leaky_grep(pmp->pr_vaddr, pmp->pr_size);
543 	} else if (0 == strncmp(map_libname_ptr, "libumem.so", 10)) {
544 		IGNORE("part of umem");
545 	} else if (pmp->pr_mapname[0] != 0) {
546 		USE("lib data");		/* library data/bss */
547 		leaky_grep(pmp->pr_vaddr, pmp->pr_size);
548 	} else if ((pmp->pr_mflags & MA_ANON) && pmp->pr_mapname[0] == 0) {
549 		IGNORE("anon");
550 	} else {
551 		IGNORE("");		/* default to ignoring */
552 	}
553 
554 #undef	USE
555 #undef	IGNORE
556 
557 	return (WALK_NEXT);
558 }
559 
560 /*ARGSUSED*/
561 static int
leaky_mark_lwp(void * ignored,const lwpstatus_t * lwp)562 leaky_mark_lwp(void *ignored, const lwpstatus_t *lwp)
563 {
564 	leaky_mark_ptr(lwp->pr_reg[R_SP] + STACK_BIAS);
565 	return (0);
566 }
567 
568 /*ARGSUSED*/
569 static int
leaky_process_lwp(void * ignored,const lwpstatus_t * lwp)570 leaky_process_lwp(void *ignored, const lwpstatus_t *lwp)
571 {
572 	const uintptr_t *regs = (const uintptr_t *)&lwp->pr_reg;
573 	int i;
574 	uintptr_t sp;
575 	uintptr_t addr;
576 	size_t size;
577 
578 	for (i = 0; i < R_SP; i++)
579 		leaky_grep_ptr(regs[i]);
580 
581 	sp = regs[i++] + STACK_BIAS;
582 	if (leaky_lookup_marked(sp, &addr, &size))
583 		leaky_grep(sp, size - (sp - addr));
584 
585 	for (; i < NPRGREG; i++)
586 		leaky_grep_ptr(regs[i]);
587 
588 	return (0);
589 }
590 
591 /*
592  * Handles processing various proc-related things:
593  * 1. calls leaky_process_lwp on each the LWP
594  * 2. leaky_greps the bss/data of libraries and a.out, and the a.out stack.
595  */
596 static int
leaky_process_proc(void)597 leaky_process_proc(void)
598 {
599 	pstatus_t Ps;
600 	struct ps_prochandle *Pr;
601 
602 	if (mdb_get_xdata("pstatus", &Ps, sizeof (Ps)) == -1) {
603 		mdb_warn("couldn't read pstatus xdata");
604 		return (DCMD_ERR);
605 	}
606 
607 	dprintf(("pstatus says:\n"));
608 	dprintf(("\tbrk: base %p size %p\n",
609 	    Ps.pr_brkbase, Ps.pr_brksize));
610 	dprintf(("\tstk: base %p size %p\n",
611 	    Ps.pr_stkbase, Ps.pr_stksize));
612 
613 	if (mdb_get_xdata("pshandle", &Pr, sizeof (Pr)) == -1) {
614 		mdb_warn("couldn't read pshandle xdata");
615 		return (DCMD_ERR);
616 	}
617 
618 	if (Plwp_iter(Pr, leaky_mark_lwp, NULL) != 0) {
619 		mdb_warn("findleaks: Failed to iterate lwps\n");
620 		return (DCMD_ERR);
621 	}
622 
623 	if (Plwp_iter(Pr, leaky_process_lwp, NULL) != 0) {
624 		mdb_warn("findleaks: Failed to iterate lwps\n");
625 		return (DCMD_ERR);
626 	}
627 
628 	prockludge_add_walkers();
629 
630 	leaky_mappings_header();
631 
632 	if (mdb_walk(KLUDGE_MAPWALK_NAME, (mdb_walk_cb_t)leaky_grep_mappings,
633 	    &Ps) == -1) {
634 		mdb_warn("Couldn't walk "KLUDGE_MAPWALK_NAME);
635 		prockludge_remove_walkers();
636 		return (-1);
637 	}
638 
639 	prockludge_remove_walkers();
640 
641 	return (0);
642 }
643 
644 static void
leaky_subr_caller(const uintptr_t * stack,uint_t depth,char * buf,uintptr_t * pcp)645 leaky_subr_caller(const uintptr_t *stack, uint_t depth, char *buf,
646     uintptr_t *pcp)
647 {
648 	int i;
649 	GElf_Sym sym;
650 	uintptr_t pc = 0;
651 
652 	buf[0] = 0;
653 
654 	for (i = 0; i < depth; i++) {
655 		pc = stack[i];
656 
657 		if (mdb_lookup_by_addr(pc,
658 		    MDB_SYM_FUZZY, buf, MDB_SYM_NAMLEN, &sym) == -1)
659 			continue;
660 		if (strncmp(buf, "libumem.so", 10) == 0)
661 			continue;
662 
663 		*pcp = pc;
664 		return;
665 	}
666 
667 	/*
668 	 * We're only here if the entire call chain is in libumem.so;
669 	 * this shouldn't happen, but we'll just use the last caller.
670 	 */
671 	*pcp = pc;
672 }
673 
674 int
leaky_subr_bufctl_cmp(const leak_bufctl_t * lhs,const leak_bufctl_t * rhs)675 leaky_subr_bufctl_cmp(const leak_bufctl_t *lhs, const leak_bufctl_t *rhs)
676 {
677 	char lbuf[MDB_SYM_NAMLEN], rbuf[MDB_SYM_NAMLEN];
678 	uintptr_t lcaller, rcaller;
679 	int rval;
680 
681 	leaky_subr_caller(lhs->lkb_stack, lhs->lkb_depth, lbuf, &lcaller);
682 	leaky_subr_caller(rhs->lkb_stack, lhs->lkb_depth, rbuf, &rcaller);
683 
684 	if (rval = strcmp(lbuf, rbuf))
685 		return (rval);
686 
687 	if (lcaller < rcaller)
688 		return (-1);
689 
690 	if (lcaller > rcaller)
691 		return (1);
692 
693 	if (lhs->lkb_data < rhs->lkb_data)
694 		return (-1);
695 
696 	if (lhs->lkb_data > rhs->lkb_data)
697 		return (1);
698 
699 	return (0);
700 }
701 
702 /*ARGSUSED*/
703 int
leaky_subr_estimate(size_t * estp)704 leaky_subr_estimate(size_t *estp)
705 {
706 	if (umem_ready == 0) {
707 		mdb_warn(
708 		    "findleaks: umem is not loaded in the address space\n");
709 		return (DCMD_ERR);
710 	}
711 
712 	if (umem_ready == UMEM_READY_INIT_FAILED) {
713 		mdb_warn("findleaks: umem initialization failed -- no "
714 		    "possible leaks.\n");
715 		return (DCMD_ERR);
716 	}
717 
718 	if (umem_ready != UMEM_READY) {
719 		mdb_warn("findleaks: No allocations have occured -- no "
720 		    "possible leaks.\n");
721 		return (DCMD_ERR);
722 	}
723 
724 	if (mdb_walk("umem_cache", (mdb_walk_cb_t)leaky_estimate, estp) == -1) {
725 		mdb_warn("couldn't walk 'umem_cache'");
726 		return (DCMD_ERR);
727 	}
728 
729 	if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_estimate_vmem, estp) == -1) {
730 		mdb_warn("couldn't walk 'vmem'");
731 		return (DCMD_ERR);
732 	}
733 
734 	if (*estp == 0) {
735 		mdb_warn("findleaks: No allocated buffers found.\n");
736 		return (DCMD_ERR);
737 	}
738 
739 	prockludge_add_walkers();
740 
741 	if (mdb_walk(KLUDGE_MAPWALK_NAME, (mdb_walk_cb_t)leaky_count,
742 	    estp) == -1) {
743 		mdb_warn("Couldn't walk "KLUDGE_MAPWALK_NAME);
744 		prockludge_remove_walkers();
745 		return (DCMD_ERR);
746 	}
747 
748 	prockludge_remove_walkers();
749 
750 	return (DCMD_OK);
751 }
752 
753 int
leaky_subr_fill(leak_mtab_t ** lmpp)754 leaky_subr_fill(leak_mtab_t **lmpp)
755 {
756 	if (leaky_handle_anon_mappings(lmpp) != DCMD_OK) {
757 		mdb_warn("unable to process mappings\n");
758 		return (DCMD_ERR);
759 	}
760 
761 	if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_vmem, lmpp) == -1) {
762 		mdb_warn("couldn't walk 'vmem'");
763 		return (DCMD_ERR);
764 	}
765 
766 	if (mdb_walk("umem_cache", (mdb_walk_cb_t)leaky_cache, lmpp) == -1) {
767 		mdb_warn("couldn't walk 'umem_cache'");
768 		return (DCMD_ERR);
769 	}
770 
771 	return (DCMD_OK);
772 }
773 
774 int
leaky_subr_run(void)775 leaky_subr_run(void)
776 {
777 	if (leaky_process_proc() == DCMD_ERR) {
778 		mdb_warn("failed to process proc");
779 		return (DCMD_ERR);
780 	}
781 	return (DCMD_OK);
782 }
783 
784 void
leaky_subr_add_leak(leak_mtab_t * lmp)785 leaky_subr_add_leak(leak_mtab_t *lmp)
786 {
787 	uintptr_t addr = LKM_CTLPTR(lmp->lkm_bufctl);
788 	uint_t depth;
789 
790 	vmem_seg_t vs;
791 	umem_bufctl_audit_t *bcp;
792 	UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
793 
794 	switch (LKM_CTLTYPE(lmp->lkm_bufctl)) {
795 	case LKM_CTL_BUFCTL:
796 		if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE, addr) == -1) {
797 			mdb_warn("couldn't read leaked bufctl at addr %p",
798 			    addr);
799 			return;
800 		}
801 
802 		depth = MIN(bcp->bc_depth, umem_stack_depth);
803 
804 		/*
805 		 * The top of the stack will be in umem_cache_alloc().
806 		 * Since the offset in umem_cache_alloc() isn't interesting
807 		 * we skip that frame for the purposes of uniquifying stacks.
808 		 *
809 		 * Also, we use the cache pointer as the leaks's cid, to
810 		 * prevent the coalescing of leaks from different caches.
811 		 */
812 		if (depth > 0)
813 			depth--;
814 		leaky_add_leak(TYPE_UMEM, addr, (uintptr_t)bcp->bc_addr,
815 		    bcp->bc_timestamp, bcp->bc_stack + 1, depth,
816 		    (uintptr_t)bcp->bc_cache, (uintptr_t)bcp->bc_cache);
817 		break;
818 	case LKM_CTL_VMSEG:
819 		if (mdb_vread(&vs, sizeof (vs), addr) == -1) {
820 			mdb_warn("couldn't read leaked vmem_seg at addr %p",
821 			    addr);
822 			return;
823 		}
824 		depth = MIN(vs.vs_depth, VMEM_STACK_DEPTH);
825 
826 		leaky_add_leak(TYPE_VMEM, addr, vs.vs_start, vs.vs_timestamp,
827 		    vs.vs_stack, depth, 0, (vs.vs_end - vs.vs_start));
828 		break;
829 	case LKM_CTL_MEMORY:
830 		if (LEAKY_INBRK(addr))
831 			leaky_add_leak(TYPE_SBRK, addr, addr, 0, NULL, 0, 0,
832 			    lmp->lkm_limit - addr);
833 		else
834 			leaky_add_leak(TYPE_MMAP, addr, addr, 0, NULL, 0, 0,
835 			    lmp->lkm_limit - addr);
836 		break;
837 	case LKM_CTL_CACHE:
838 		leaky_add_leak(TYPE_CACHE, lmp->lkm_base, lmp->lkm_base, 0,
839 		    NULL, 0, addr, addr);
840 		break;
841 	default:
842 		mdb_warn("internal error:  invalid leak_bufctl_t\n");
843 		break;
844 	}
845 }
846 
847 static int lk_vmem_seen;
848 static int lk_cache_seen;
849 static int lk_umem_seen;
850 static size_t lk_ttl;
851 static size_t lk_bytes;
852 
853 void
leaky_subr_dump_start(int type)854 leaky_subr_dump_start(int type)
855 {
856 	switch (type) {
857 	case TYPE_MMAP:
858 		lk_vmem_seen = 0;
859 		break;
860 
861 	case TYPE_SBRK:
862 	case TYPE_VMEM:
863 		return;			/* don't zero counts */
864 
865 	case TYPE_CACHE:
866 		lk_cache_seen = 0;
867 		break;
868 
869 	case TYPE_UMEM:
870 		lk_umem_seen = 0;
871 		break;
872 
873 	default:
874 		break;
875 	}
876 
877 	lk_ttl = 0;
878 	lk_bytes = 0;
879 }
880 
881 void
leaky_subr_dump(const leak_bufctl_t * lkb,int verbose)882 leaky_subr_dump(const leak_bufctl_t *lkb, int verbose)
883 {
884 	const leak_bufctl_t *cur;
885 	umem_cache_t cache;
886 	size_t min, max, size;
887 	char sz[30];
888 	char c[MDB_SYM_NAMLEN];
889 	uintptr_t caller;
890 	const char *nm, *nm_lc;
891 	uint8_t type = lkb->lkb_type;
892 
893 	if (verbose) {
894 		lk_ttl = 0;
895 		lk_bytes = 0;
896 	} else if (!lk_vmem_seen && (type == TYPE_VMEM || type == TYPE_MMAP ||
897 	    type == TYPE_SBRK)) {
898 		lk_vmem_seen = 1;
899 		mdb_printf("%-16s %7s %?s %s\n",
900 		    "BYTES", "LEAKED", "VMEM_SEG", "CALLER");
901 	}
902 
903 	switch (lkb->lkb_type) {
904 	case TYPE_MMAP:
905 	case TYPE_SBRK:
906 		nm = (lkb->lkb_type == TYPE_MMAP) ? "MMAP" : "SBRK";
907 		nm_lc = (lkb->lkb_type == TYPE_MMAP) ? "mmap(2)" : "sbrk(2)";
908 
909 		for (; lkb != NULL; lkb = lkb->lkb_next) {
910 			if (!verbose)
911 				mdb_printf("%-16d %7d %?p %s\n", lkb->lkb_data,
912 				    lkb->lkb_dups + 1, lkb->lkb_addr, nm);
913 			else
914 				mdb_printf("%s leak: [%p, %p), %ld bytes\n",
915 				    nm_lc, lkb->lkb_addr,
916 				    lkb->lkb_addr + lkb->lkb_data,
917 				    lkb->lkb_data);
918 			lk_ttl++;
919 			lk_bytes += lkb->lkb_data;
920 		}
921 		return;
922 
923 	case TYPE_VMEM:
924 		min = max = lkb->lkb_data;
925 
926 		for (cur = lkb; cur != NULL; cur = cur->lkb_next) {
927 			size = cur->lkb_data;
928 
929 			if (size < min)
930 				min = size;
931 			if (size > max)
932 				max = size;
933 
934 			lk_ttl++;
935 			lk_bytes += size;
936 		}
937 
938 		if (min == max)
939 			(void) mdb_snprintf(sz, sizeof (sz), "%ld", min);
940 		else
941 			(void) mdb_snprintf(sz, sizeof (sz), "%ld-%ld",
942 			    min, max);
943 
944 		if (!verbose) {
945 			leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
946 			    c, &caller);
947 
948 			mdb_printf("%-16s %7d %?p %a\n", sz, lkb->lkb_dups + 1,
949 			    lkb->lkb_addr, caller);
950 		} else {
951 			mdb_arg_t v;
952 
953 			if (lk_ttl == 1)
954 				mdb_printf("umem_oversize leak: 1 vmem_seg, "
955 				    "%ld bytes\n", lk_bytes);
956 			else
957 				mdb_printf("umem_oversize leak: %d vmem_segs, "
958 				    "%s bytes each, %ld bytes total\n",
959 				    lk_ttl, sz, lk_bytes);
960 
961 			v.a_type = MDB_TYPE_STRING;
962 			v.a_un.a_str = "-v";
963 
964 			if (mdb_call_dcmd("vmem_seg", lkb->lkb_addr,
965 			    DCMD_ADDRSPEC, 1, &v) == -1) {
966 				mdb_warn("'%p::vmem_seg -v' failed",
967 				    lkb->lkb_addr);
968 			}
969 		}
970 		return;
971 
972 	case TYPE_CACHE:
973 		if (!lk_cache_seen) {
974 			lk_cache_seen = 1;
975 			if (lk_vmem_seen)
976 				mdb_printf("\n");
977 			mdb_printf("%-?s %7s %?s %s\n",
978 			    "CACHE", "LEAKED", "BUFFER", "CALLER");
979 		}
980 
981 		if (mdb_vread(&cache, sizeof (cache), lkb->lkb_data) == -1) {
982 			/*
983 			 * This _really_ shouldn't happen; we shouldn't
984 			 * have been able to get this far if this
985 			 * cache wasn't readable.
986 			 */
987 			mdb_warn("can't read cache %p for leaked "
988 			    "buffer %p", lkb->lkb_data, lkb->lkb_addr);
989 			return;
990 		}
991 
992 		lk_ttl += lkb->lkb_dups + 1;
993 		lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
994 
995 		caller = (lkb->lkb_depth == 0) ? 0 : lkb->lkb_stack[0];
996 		if (caller != 0) {
997 			(void) mdb_snprintf(c, sizeof (c), "%a", caller);
998 		} else {
999 			(void) mdb_snprintf(c, sizeof (c), "%s",
1000 			    (verbose) ? "" : "?");
1001 		}
1002 
1003 		if (!verbose) {
1004 			mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
1005 			    lkb->lkb_dups + 1, lkb->lkb_addr, c);
1006 		} else {
1007 			if (lk_ttl == 1)
1008 				mdb_printf("%s leak: 1 buffer, %ld bytes,\n",
1009 				    cache.cache_name, lk_bytes);
1010 			else
1011 				mdb_printf("%s leak: %d buffers, "
1012 				    "%ld bytes each, %ld bytes total,\n",
1013 				    cache.cache_name, lk_ttl,
1014 				    cache.cache_bufsize, lk_bytes);
1015 			mdb_printf("    %s%s%ssample addr %p\n",
1016 			    (caller == 0) ? "" : "caller ", c,
1017 			    (caller == 0) ? "" : ", ", lkb->lkb_addr);
1018 		}
1019 		return;
1020 
1021 	case TYPE_UMEM:
1022 		if (!lk_umem_seen) {
1023 			lk_umem_seen = 1;
1024 			if (lk_vmem_seen || lk_cache_seen)
1025 				mdb_printf("\n");
1026 			mdb_printf("%-?s %7s %?s %s\n",
1027 			    "CACHE", "LEAKED", "BUFCTL", "CALLER");
1028 		}
1029 		if (mdb_vread(&cache, sizeof (cache), lkb->lkb_data) == -1) {
1030 			/*
1031 			 * This _really_ shouldn't happen; we shouldn't
1032 			 * have been able to get this far if this
1033 			 * cache wasn't readable.
1034 			 */
1035 			mdb_warn("can't read cache %p for leaked "
1036 			    "bufctl %p", lkb->lkb_data, lkb->lkb_addr);
1037 			return;
1038 		}
1039 
1040 		lk_ttl += lkb->lkb_dups + 1;
1041 		lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
1042 
1043 		if (!verbose) {
1044 			leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth, c,
1045 			    &caller);
1046 
1047 			mdb_printf("%0?p %7d %0?p %a\n", lkb->lkb_data,
1048 			    lkb->lkb_dups + 1, lkb->lkb_addr, caller);
1049 		} else {
1050 			mdb_arg_t v;
1051 
1052 			if (lk_ttl == 1)
1053 				mdb_printf("%s leak: 1 buffer, %ld bytes\n",
1054 				    cache.cache_name, lk_bytes);
1055 			else
1056 				mdb_printf("%s leak: %d buffers, "
1057 				    "%ld bytes each, %ld bytes total\n",
1058 				    cache.cache_name, lk_ttl,
1059 				    cache.cache_bufsize, lk_bytes);
1060 
1061 			v.a_type = MDB_TYPE_STRING;
1062 			v.a_un.a_str = "-v";
1063 
1064 			if (mdb_call_dcmd("bufctl", lkb->lkb_addr,
1065 			    DCMD_ADDRSPEC, 1, &v) == -1) {
1066 				mdb_warn("'%p::bufctl -v' failed",
1067 				    lkb->lkb_addr);
1068 			}
1069 		}
1070 		return;
1071 
1072 	default:
1073 		return;
1074 	}
1075 }
1076 
1077 void
leaky_subr_dump_end(int type)1078 leaky_subr_dump_end(int type)
1079 {
1080 	int i;
1081 	int width;
1082 	const char *leak;
1083 
1084 	switch (type) {
1085 	case TYPE_VMEM:
1086 		if (!lk_vmem_seen)
1087 			return;
1088 
1089 		width = 16;
1090 		leak = "oversized leak";
1091 		break;
1092 
1093 	case TYPE_CACHE:
1094 		if (!lk_cache_seen)
1095 			return;
1096 
1097 		width = sizeof (uintptr_t) * 2;
1098 		leak = "buffer";
1099 		break;
1100 
1101 	case TYPE_UMEM:
1102 		if (!lk_umem_seen)
1103 			return;
1104 
1105 		width = sizeof (uintptr_t) * 2;
1106 		leak = "buffer";
1107 		break;
1108 
1109 	default:
1110 		return;
1111 	}
1112 
1113 	for (i = 0; i < 72; i++)
1114 		mdb_printf("-");
1115 	mdb_printf("\n%*s %7ld %s%s, %ld byte%s\n",
1116 	    width, "Total", lk_ttl, leak, (lk_ttl == 1) ? "" : "s",
1117 	    lk_bytes, (lk_bytes == 1) ? "" : "s");
1118 }
1119 
1120 int
leaky_subr_invoke_callback(const leak_bufctl_t * lkb,mdb_walk_cb_t cb,void * cbdata)1121 leaky_subr_invoke_callback(const leak_bufctl_t *lkb, mdb_walk_cb_t cb,
1122     void *cbdata)
1123 {
1124 	vmem_seg_t vs;
1125 	umem_bufctl_audit_t *bcp;
1126 	UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
1127 
1128 	switch (lkb->lkb_type) {
1129 	case TYPE_VMEM:
1130 		if (mdb_vread(&vs, sizeof (vs), lkb->lkb_addr) == -1) {
1131 			mdb_warn("unable to read vmem_seg at %p",
1132 			    lkb->lkb_addr);
1133 			return (WALK_NEXT);
1134 		}
1135 		return (cb(lkb->lkb_addr, &vs, cbdata));
1136 
1137 	case TYPE_UMEM:
1138 		if (mdb_vread(bcp, UMEM_BUFCTL_AUDIT_SIZE,
1139 		    lkb->lkb_addr) == -1) {
1140 			mdb_warn("unable to read bufctl at %p",
1141 			    lkb->lkb_addr);
1142 			return (WALK_NEXT);
1143 		}
1144 		return (cb(lkb->lkb_addr, bcp, cbdata));
1145 
1146 	default:
1147 		return (cb(lkb->lkb_addr, NULL, cbdata));
1148 	}
1149 }
1150