17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5789d94c2Sjwadams * Common Development and Distribution License (the "License").
6789d94c2Sjwadams * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
217c478bd9Sstevel@tonic-gate /*
22346799e8SJonathan W Adams * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
237c478bd9Sstevel@tonic-gate * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate */
257c478bd9Sstevel@tonic-gate
26d7dba7e5SBryan Cantrill /*
27*76d3b15cSJohn Levon * Copyright 2018 Joyent, Inc. All rights reserved.
2828e4da25SMatthew Ahrens * Copyright (c) 2012 by Delphix. All rights reserved.
29d7dba7e5SBryan Cantrill */
30d7dba7e5SBryan Cantrill
317c478bd9Sstevel@tonic-gate #include <mdb/mdb_param.h>
327c478bd9Sstevel@tonic-gate #include <mdb/mdb_modapi.h>
337c478bd9Sstevel@tonic-gate #include <mdb/mdb_ctf.h>
344a1c2431SJonathan Adams #include <mdb/mdb_whatis.h>
357c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
367c478bd9Sstevel@tonic-gate #include <sys/kmem_impl.h>
377c478bd9Sstevel@tonic-gate #include <sys/vmem_impl.h>
387c478bd9Sstevel@tonic-gate #include <sys/machelf.h>
397c478bd9Sstevel@tonic-gate #include <sys/modctl.h>
407c478bd9Sstevel@tonic-gate #include <sys/kobj.h>
417c478bd9Sstevel@tonic-gate #include <sys/panic.h>
427c478bd9Sstevel@tonic-gate #include <sys/stack.h>
437c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
447c478bd9Sstevel@tonic-gate #include <vm/page.h>
457c478bd9Sstevel@tonic-gate
46b5fca8f8Stomee #include "avl.h"
47b5fca8f8Stomee #include "combined.h"
48087e1372Stomee #include "dist.h"
497c478bd9Sstevel@tonic-gate #include "kmem.h"
50b5fca8f8Stomee #include "list.h"
517c478bd9Sstevel@tonic-gate
527c478bd9Sstevel@tonic-gate #define dprintf(x) if (mdb_debug_level) { \
537c478bd9Sstevel@tonic-gate mdb_printf("kmem debug: "); \
547c478bd9Sstevel@tonic-gate /*CSTYLED*/\
557c478bd9Sstevel@tonic-gate mdb_printf x ;\
567c478bd9Sstevel@tonic-gate }
577c478bd9Sstevel@tonic-gate
587c478bd9Sstevel@tonic-gate #define KM_ALLOCATED 0x01
597c478bd9Sstevel@tonic-gate #define KM_FREE 0x02
607c478bd9Sstevel@tonic-gate #define KM_BUFCTL 0x04
617c478bd9Sstevel@tonic-gate #define KM_CONSTRUCTED 0x08 /* only constructed free buffers */
627c478bd9Sstevel@tonic-gate #define KM_HASH 0x10
637c478bd9Sstevel@tonic-gate
647c478bd9Sstevel@tonic-gate static int mdb_debug_level = 0;
657c478bd9Sstevel@tonic-gate
667c478bd9Sstevel@tonic-gate /*ARGSUSED*/
677c478bd9Sstevel@tonic-gate static int
kmem_init_walkers(uintptr_t addr,const kmem_cache_t * c,void * ignored)687c478bd9Sstevel@tonic-gate kmem_init_walkers(uintptr_t addr, const kmem_cache_t *c, void *ignored)
697c478bd9Sstevel@tonic-gate {
707c478bd9Sstevel@tonic-gate mdb_walker_t w;
717c478bd9Sstevel@tonic-gate char descr[64];
727c478bd9Sstevel@tonic-gate
737c478bd9Sstevel@tonic-gate (void) mdb_snprintf(descr, sizeof (descr),
747c478bd9Sstevel@tonic-gate "walk the %s cache", c->cache_name);
757c478bd9Sstevel@tonic-gate
767c478bd9Sstevel@tonic-gate w.walk_name = c->cache_name;
777c478bd9Sstevel@tonic-gate w.walk_descr = descr;
787c478bd9Sstevel@tonic-gate w.walk_init = kmem_walk_init;
797c478bd9Sstevel@tonic-gate w.walk_step = kmem_walk_step;
807c478bd9Sstevel@tonic-gate w.walk_fini = kmem_walk_fini;
817c478bd9Sstevel@tonic-gate w.walk_init_arg = (void *)addr;
827c478bd9Sstevel@tonic-gate
837c478bd9Sstevel@tonic-gate if (mdb_add_walker(&w) == -1)
847c478bd9Sstevel@tonic-gate mdb_warn("failed to add %s walker", c->cache_name);
857c478bd9Sstevel@tonic-gate
867c478bd9Sstevel@tonic-gate return (WALK_NEXT);
877c478bd9Sstevel@tonic-gate }
887c478bd9Sstevel@tonic-gate
897c478bd9Sstevel@tonic-gate /*ARGSUSED*/
907c478bd9Sstevel@tonic-gate int
kmem_debug(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)917c478bd9Sstevel@tonic-gate kmem_debug(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
927c478bd9Sstevel@tonic-gate {
937c478bd9Sstevel@tonic-gate mdb_debug_level ^= 1;
947c478bd9Sstevel@tonic-gate
957c478bd9Sstevel@tonic-gate mdb_printf("kmem: debugging is now %s\n",
967c478bd9Sstevel@tonic-gate mdb_debug_level ? "on" : "off");
977c478bd9Sstevel@tonic-gate
987c478bd9Sstevel@tonic-gate return (DCMD_OK);
997c478bd9Sstevel@tonic-gate }
1007c478bd9Sstevel@tonic-gate
1017c478bd9Sstevel@tonic-gate int
kmem_cache_walk_init(mdb_walk_state_t * wsp)1027c478bd9Sstevel@tonic-gate kmem_cache_walk_init(mdb_walk_state_t *wsp)
1037c478bd9Sstevel@tonic-gate {
1047c478bd9Sstevel@tonic-gate GElf_Sym sym;
1057c478bd9Sstevel@tonic-gate
106b5fca8f8Stomee if (mdb_lookup_by_name("kmem_caches", &sym) == -1) {
107b5fca8f8Stomee mdb_warn("couldn't find kmem_caches");
1087c478bd9Sstevel@tonic-gate return (WALK_ERR);
1097c478bd9Sstevel@tonic-gate }
1107c478bd9Sstevel@tonic-gate
111b5fca8f8Stomee wsp->walk_addr = (uintptr_t)sym.st_value;
1127c478bd9Sstevel@tonic-gate
113b5fca8f8Stomee return (list_walk_init_named(wsp, "cache list", "cache"));
1147c478bd9Sstevel@tonic-gate }
1157c478bd9Sstevel@tonic-gate
1167c478bd9Sstevel@tonic-gate int
kmem_cpu_cache_walk_init(mdb_walk_state_t * wsp)1177c478bd9Sstevel@tonic-gate kmem_cpu_cache_walk_init(mdb_walk_state_t *wsp)
1187c478bd9Sstevel@tonic-gate {
1197c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL) {
1207c478bd9Sstevel@tonic-gate mdb_warn("kmem_cpu_cache doesn't support global walks");
1217c478bd9Sstevel@tonic-gate return (WALK_ERR);
1227c478bd9Sstevel@tonic-gate }
1237c478bd9Sstevel@tonic-gate
1247c478bd9Sstevel@tonic-gate if (mdb_layered_walk("cpu", wsp) == -1) {
1257c478bd9Sstevel@tonic-gate mdb_warn("couldn't walk 'cpu'");
1267c478bd9Sstevel@tonic-gate return (WALK_ERR);
1277c478bd9Sstevel@tonic-gate }
1287c478bd9Sstevel@tonic-gate
1297c478bd9Sstevel@tonic-gate wsp->walk_data = (void *)wsp->walk_addr;
1307c478bd9Sstevel@tonic-gate
1317c478bd9Sstevel@tonic-gate return (WALK_NEXT);
1327c478bd9Sstevel@tonic-gate }
1337c478bd9Sstevel@tonic-gate
1347c478bd9Sstevel@tonic-gate int
kmem_cpu_cache_walk_step(mdb_walk_state_t * wsp)1357c478bd9Sstevel@tonic-gate kmem_cpu_cache_walk_step(mdb_walk_state_t *wsp)
1367c478bd9Sstevel@tonic-gate {
1377c478bd9Sstevel@tonic-gate uintptr_t caddr = (uintptr_t)wsp->walk_data;
1387c478bd9Sstevel@tonic-gate const cpu_t *cpu = wsp->walk_layer;
1397c478bd9Sstevel@tonic-gate kmem_cpu_cache_t cc;
1407c478bd9Sstevel@tonic-gate
1411db3a682SMichael Corcoran caddr += OFFSETOF(kmem_cache_t, cache_cpu[cpu->cpu_seqid]);
1427c478bd9Sstevel@tonic-gate
1437c478bd9Sstevel@tonic-gate if (mdb_vread(&cc, sizeof (kmem_cpu_cache_t), caddr) == -1) {
1447c478bd9Sstevel@tonic-gate mdb_warn("couldn't read kmem_cpu_cache at %p", caddr);
1457c478bd9Sstevel@tonic-gate return (WALK_ERR);
1467c478bd9Sstevel@tonic-gate }
1477c478bd9Sstevel@tonic-gate
1487c478bd9Sstevel@tonic-gate return (wsp->walk_callback(caddr, &cc, wsp->walk_cbdata));
1497c478bd9Sstevel@tonic-gate }
1507c478bd9Sstevel@tonic-gate
151b5fca8f8Stomee static int
kmem_slab_check(void * p,uintptr_t saddr,void * arg)152b5fca8f8Stomee kmem_slab_check(void *p, uintptr_t saddr, void *arg)
153b5fca8f8Stomee {
154b5fca8f8Stomee kmem_slab_t *sp = p;
155b5fca8f8Stomee uintptr_t caddr = (uintptr_t)arg;
156b5fca8f8Stomee if ((uintptr_t)sp->slab_cache != caddr) {
157b5fca8f8Stomee mdb_warn("slab %p isn't in cache %p (in cache %p)\n",
158b5fca8f8Stomee saddr, caddr, sp->slab_cache);
159b5fca8f8Stomee return (-1);
160b5fca8f8Stomee }
161b5fca8f8Stomee
162b5fca8f8Stomee return (0);
163b5fca8f8Stomee }
164b5fca8f8Stomee
165b5fca8f8Stomee static int
kmem_partial_slab_check(void * p,uintptr_t saddr,void * arg)166b5fca8f8Stomee kmem_partial_slab_check(void *p, uintptr_t saddr, void *arg)
167b5fca8f8Stomee {
168b5fca8f8Stomee kmem_slab_t *sp = p;
169b5fca8f8Stomee
170b5fca8f8Stomee int rc = kmem_slab_check(p, saddr, arg);
171b5fca8f8Stomee if (rc != 0) {
172b5fca8f8Stomee return (rc);
173b5fca8f8Stomee }
174b5fca8f8Stomee
175b5fca8f8Stomee if (!KMEM_SLAB_IS_PARTIAL(sp)) {
176b5fca8f8Stomee mdb_warn("slab %p is not a partial slab\n", saddr);
177b5fca8f8Stomee return (-1);
178b5fca8f8Stomee }
179b5fca8f8Stomee
180b5fca8f8Stomee return (0);
181b5fca8f8Stomee }
182b5fca8f8Stomee
183b5fca8f8Stomee static int
kmem_complete_slab_check(void * p,uintptr_t saddr,void * arg)184b5fca8f8Stomee kmem_complete_slab_check(void *p, uintptr_t saddr, void *arg)
185b5fca8f8Stomee {
186b5fca8f8Stomee kmem_slab_t *sp = p;
187b5fca8f8Stomee
188b5fca8f8Stomee int rc = kmem_slab_check(p, saddr, arg);
189b5fca8f8Stomee if (rc != 0) {
190b5fca8f8Stomee return (rc);
191b5fca8f8Stomee }
192b5fca8f8Stomee
193b5fca8f8Stomee if (!KMEM_SLAB_IS_ALL_USED(sp)) {
194b5fca8f8Stomee mdb_warn("slab %p is not completely allocated\n", saddr);
195b5fca8f8Stomee return (-1);
196b5fca8f8Stomee }
197b5fca8f8Stomee
198b5fca8f8Stomee return (0);
199b5fca8f8Stomee }
200b5fca8f8Stomee
201b5fca8f8Stomee typedef struct {
202b5fca8f8Stomee uintptr_t kns_cache_addr;
203b5fca8f8Stomee int kns_nslabs;
204b5fca8f8Stomee } kmem_nth_slab_t;
205b5fca8f8Stomee
206b5fca8f8Stomee static int
kmem_nth_slab_check(void * p,uintptr_t saddr,void * arg)207b5fca8f8Stomee kmem_nth_slab_check(void *p, uintptr_t saddr, void *arg)
208b5fca8f8Stomee {
209b5fca8f8Stomee kmem_nth_slab_t *chkp = arg;
210b5fca8f8Stomee
211b5fca8f8Stomee int rc = kmem_slab_check(p, saddr, (void *)chkp->kns_cache_addr);
212b5fca8f8Stomee if (rc != 0) {
213b5fca8f8Stomee return (rc);
214b5fca8f8Stomee }
215b5fca8f8Stomee
216b5fca8f8Stomee return (chkp->kns_nslabs-- == 0 ? 1 : 0);
217b5fca8f8Stomee }
218b5fca8f8Stomee
219b5fca8f8Stomee static int
kmem_complete_slab_walk_init(mdb_walk_state_t * wsp)220b5fca8f8Stomee kmem_complete_slab_walk_init(mdb_walk_state_t *wsp)
221b5fca8f8Stomee {
222b5fca8f8Stomee uintptr_t caddr = wsp->walk_addr;
223b5fca8f8Stomee
224b5fca8f8Stomee wsp->walk_addr = (uintptr_t)(caddr +
225b5fca8f8Stomee offsetof(kmem_cache_t, cache_complete_slabs));
226b5fca8f8Stomee
227b5fca8f8Stomee return (list_walk_init_checked(wsp, "slab list", "slab",
228b5fca8f8Stomee kmem_complete_slab_check, (void *)caddr));
229b5fca8f8Stomee }
230b5fca8f8Stomee
231b5fca8f8Stomee static int
kmem_partial_slab_walk_init(mdb_walk_state_t * wsp)232b5fca8f8Stomee kmem_partial_slab_walk_init(mdb_walk_state_t *wsp)
233b5fca8f8Stomee {
234b5fca8f8Stomee uintptr_t caddr = wsp->walk_addr;
235b5fca8f8Stomee
236b5fca8f8Stomee wsp->walk_addr = (uintptr_t)(caddr +
237b5fca8f8Stomee offsetof(kmem_cache_t, cache_partial_slabs));
238b5fca8f8Stomee
239b5fca8f8Stomee return (avl_walk_init_checked(wsp, "slab list", "slab",
240b5fca8f8Stomee kmem_partial_slab_check, (void *)caddr));
241b5fca8f8Stomee }
242b5fca8f8Stomee
2437c478bd9Sstevel@tonic-gate int
kmem_slab_walk_init(mdb_walk_state_t * wsp)2447c478bd9Sstevel@tonic-gate kmem_slab_walk_init(mdb_walk_state_t *wsp)
2457c478bd9Sstevel@tonic-gate {
2467c478bd9Sstevel@tonic-gate uintptr_t caddr = wsp->walk_addr;
2477c478bd9Sstevel@tonic-gate
2487c478bd9Sstevel@tonic-gate if (caddr == NULL) {
2497c478bd9Sstevel@tonic-gate mdb_warn("kmem_slab doesn't support global walks\n");
2507c478bd9Sstevel@tonic-gate return (WALK_ERR);
2517c478bd9Sstevel@tonic-gate }
2527c478bd9Sstevel@tonic-gate
253b5fca8f8Stomee combined_walk_init(wsp);
254b5fca8f8Stomee combined_walk_add(wsp,
255b5fca8f8Stomee kmem_complete_slab_walk_init, list_walk_step, list_walk_fini);
256b5fca8f8Stomee combined_walk_add(wsp,
257b5fca8f8Stomee kmem_partial_slab_walk_init, avl_walk_step, avl_walk_fini);
2587c478bd9Sstevel@tonic-gate
2597c478bd9Sstevel@tonic-gate return (WALK_NEXT);
2607c478bd9Sstevel@tonic-gate }
2617c478bd9Sstevel@tonic-gate
262b5fca8f8Stomee static int
kmem_first_complete_slab_walk_init(mdb_walk_state_t * wsp)263b5fca8f8Stomee kmem_first_complete_slab_walk_init(mdb_walk_state_t *wsp)
264b5fca8f8Stomee {
265b5fca8f8Stomee uintptr_t caddr = wsp->walk_addr;
266b5fca8f8Stomee kmem_nth_slab_t *chk;
267b5fca8f8Stomee
268b5fca8f8Stomee chk = mdb_alloc(sizeof (kmem_nth_slab_t),
269b5fca8f8Stomee UM_SLEEP | UM_GC);
270b5fca8f8Stomee chk->kns_cache_addr = caddr;
271b5fca8f8Stomee chk->kns_nslabs = 1;
272b5fca8f8Stomee wsp->walk_addr = (uintptr_t)(caddr +
273b5fca8f8Stomee offsetof(kmem_cache_t, cache_complete_slabs));
274b5fca8f8Stomee
275b5fca8f8Stomee return (list_walk_init_checked(wsp, "slab list", "slab",
276b5fca8f8Stomee kmem_nth_slab_check, chk));
277b5fca8f8Stomee }
278b5fca8f8Stomee
2797c478bd9Sstevel@tonic-gate int
kmem_slab_walk_partial_init(mdb_walk_state_t * wsp)2807c478bd9Sstevel@tonic-gate kmem_slab_walk_partial_init(mdb_walk_state_t *wsp)
2817c478bd9Sstevel@tonic-gate {
2827c478bd9Sstevel@tonic-gate uintptr_t caddr = wsp->walk_addr;
2837c478bd9Sstevel@tonic-gate kmem_cache_t c;
2847c478bd9Sstevel@tonic-gate
2857c478bd9Sstevel@tonic-gate if (caddr == NULL) {
2867c478bd9Sstevel@tonic-gate mdb_warn("kmem_slab_partial doesn't support global walks\n");
2877c478bd9Sstevel@tonic-gate return (WALK_ERR);
2887c478bd9Sstevel@tonic-gate }
2897c478bd9Sstevel@tonic-gate
2907c478bd9Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), caddr) == -1) {
2917c478bd9Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache at %p", caddr);
2927c478bd9Sstevel@tonic-gate return (WALK_ERR);
2937c478bd9Sstevel@tonic-gate }
2947c478bd9Sstevel@tonic-gate
295b5fca8f8Stomee combined_walk_init(wsp);
2967c478bd9Sstevel@tonic-gate
2977c478bd9Sstevel@tonic-gate /*
2987c478bd9Sstevel@tonic-gate * Some consumers (umem_walk_step(), in particular) require at
2997c478bd9Sstevel@tonic-gate * least one callback if there are any buffers in the cache. So
300b5fca8f8Stomee * if there are *no* partial slabs, report the first full slab, if
3017c478bd9Sstevel@tonic-gate * any.
3027c478bd9Sstevel@tonic-gate *
3037c478bd9Sstevel@tonic-gate * Yes, this is ugly, but it's cleaner than the other possibilities.
3047c478bd9Sstevel@tonic-gate */
305b5fca8f8Stomee if (c.cache_partial_slabs.avl_numnodes == 0) {
306b5fca8f8Stomee combined_walk_add(wsp, kmem_first_complete_slab_walk_init,
307b5fca8f8Stomee list_walk_step, list_walk_fini);
308b5fca8f8Stomee } else {
309b5fca8f8Stomee combined_walk_add(wsp, kmem_partial_slab_walk_init,
310b5fca8f8Stomee avl_walk_step, avl_walk_fini);
311b5fca8f8Stomee }
3127c478bd9Sstevel@tonic-gate
3137c478bd9Sstevel@tonic-gate return (WALK_NEXT);
3147c478bd9Sstevel@tonic-gate }
3157c478bd9Sstevel@tonic-gate
3167c478bd9Sstevel@tonic-gate int
kmem_cache(uintptr_t addr,uint_t flags,int ac,const mdb_arg_t * argv)3177c478bd9Sstevel@tonic-gate kmem_cache(uintptr_t addr, uint_t flags, int ac, const mdb_arg_t *argv)
3187c478bd9Sstevel@tonic-gate {
3197c478bd9Sstevel@tonic-gate kmem_cache_t c;
320b5fca8f8Stomee const char *filter = NULL;
321b5fca8f8Stomee
322b5fca8f8Stomee if (mdb_getopts(ac, argv,
323b5fca8f8Stomee 'n', MDB_OPT_STR, &filter,
324b5fca8f8Stomee NULL) != ac) {
325b5fca8f8Stomee return (DCMD_USAGE);
326b5fca8f8Stomee }
3277c478bd9Sstevel@tonic-gate
3287c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) {
3297c478bd9Sstevel@tonic-gate if (mdb_walk_dcmd("kmem_cache", "kmem_cache", ac, argv) == -1) {
3307c478bd9Sstevel@tonic-gate mdb_warn("can't walk kmem_cache");
3317c478bd9Sstevel@tonic-gate return (DCMD_ERR);
3327c478bd9Sstevel@tonic-gate }
3337c478bd9Sstevel@tonic-gate return (DCMD_OK);
3347c478bd9Sstevel@tonic-gate }
3357c478bd9Sstevel@tonic-gate
3367c478bd9Sstevel@tonic-gate if (DCMD_HDRSPEC(flags))
3377c478bd9Sstevel@tonic-gate mdb_printf("%-?s %-25s %4s %6s %8s %8s\n", "ADDR", "NAME",
3387c478bd9Sstevel@tonic-gate "FLAG", "CFLAG", "BUFSIZE", "BUFTOTL");
3397c478bd9Sstevel@tonic-gate
3407c478bd9Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), addr) == -1) {
3417c478bd9Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache at %p", addr);
3427c478bd9Sstevel@tonic-gate return (DCMD_ERR);
3437c478bd9Sstevel@tonic-gate }
3447c478bd9Sstevel@tonic-gate
345b5fca8f8Stomee if ((filter != NULL) && (strstr(c.cache_name, filter) == NULL))
346b5fca8f8Stomee return (DCMD_OK);
347b5fca8f8Stomee
3487c478bd9Sstevel@tonic-gate mdb_printf("%0?p %-25s %04x %06x %8ld %8lld\n", addr, c.cache_name,
3497c478bd9Sstevel@tonic-gate c.cache_flags, c.cache_cflags, c.cache_bufsize, c.cache_buftotal);
3507c478bd9Sstevel@tonic-gate
3517c478bd9Sstevel@tonic-gate return (DCMD_OK);
3527c478bd9Sstevel@tonic-gate }
3537c478bd9Sstevel@tonic-gate
354b5fca8f8Stomee void
kmem_cache_help(void)355b5fca8f8Stomee kmem_cache_help(void)
356b5fca8f8Stomee {
357b5fca8f8Stomee mdb_printf("%s", "Print kernel memory caches.\n\n");
358b5fca8f8Stomee mdb_dec_indent(2);
359b5fca8f8Stomee mdb_printf("%<b>OPTIONS%</b>\n");
360b5fca8f8Stomee mdb_inc_indent(2);
361b5fca8f8Stomee mdb_printf("%s",
362b5fca8f8Stomee " -n name\n"
363b5fca8f8Stomee " name of kmem cache (or matching partial name)\n"
364b5fca8f8Stomee "\n"
365b5fca8f8Stomee "Column\tDescription\n"
366b5fca8f8Stomee "\n"
367b5fca8f8Stomee "ADDR\t\taddress of kmem cache\n"
368b5fca8f8Stomee "NAME\t\tname of kmem cache\n"
369b5fca8f8Stomee "FLAG\t\tvarious cache state flags\n"
370b5fca8f8Stomee "CFLAG\t\tcache creation flags\n"
371b5fca8f8Stomee "BUFSIZE\tobject size in bytes\n"
372b5fca8f8Stomee "BUFTOTL\tcurrent total buffers in cache (allocated and free)\n");
373b5fca8f8Stomee }
3743893cb7fStomee
3753893cb7fStomee #define LABEL_WIDTH 11
3763893cb7fStomee static void
kmem_slabs_print_dist(uint_t * ks_bucket,size_t buffers_per_slab,size_t maxbuckets,size_t minbucketsize)3773893cb7fStomee kmem_slabs_print_dist(uint_t *ks_bucket, size_t buffers_per_slab,
3783893cb7fStomee size_t maxbuckets, size_t minbucketsize)
3793893cb7fStomee {
3803893cb7fStomee uint64_t total;
3813893cb7fStomee int buckets;
3823893cb7fStomee int i;
3833893cb7fStomee const int *distarray;
3843893cb7fStomee int complete[2];
3853893cb7fStomee
3863893cb7fStomee buckets = buffers_per_slab;
3873893cb7fStomee
3883893cb7fStomee total = 0;
3893893cb7fStomee for (i = 0; i <= buffers_per_slab; i++)
3903893cb7fStomee total += ks_bucket[i];
3913893cb7fStomee
3923893cb7fStomee if (maxbuckets > 1)
3933893cb7fStomee buckets = MIN(buckets, maxbuckets);
3943893cb7fStomee
3953893cb7fStomee if (minbucketsize > 1) {
3963893cb7fStomee /*
3973893cb7fStomee * minbucketsize does not apply to the first bucket reserved
3983893cb7fStomee * for completely allocated slabs
3993893cb7fStomee */
4003893cb7fStomee buckets = MIN(buckets, 1 + ((buffers_per_slab - 1) /
4013893cb7fStomee minbucketsize));
4023893cb7fStomee if ((buckets < 2) && (buffers_per_slab > 1)) {
4033893cb7fStomee buckets = 2;
4043893cb7fStomee minbucketsize = (buffers_per_slab - 1);
4053893cb7fStomee }
4063893cb7fStomee }
4073893cb7fStomee
4083893cb7fStomee /*
4093893cb7fStomee * The first printed bucket is reserved for completely allocated slabs.
4103893cb7fStomee * Passing (buckets - 1) excludes that bucket from the generated
4113893cb7fStomee * distribution, since we're handling it as a special case.
4123893cb7fStomee */
4133893cb7fStomee complete[0] = buffers_per_slab;
4143893cb7fStomee complete[1] = buffers_per_slab + 1;
415087e1372Stomee distarray = dist_linear(buckets - 1, 1, buffers_per_slab - 1);
4163893cb7fStomee
4173893cb7fStomee mdb_printf("%*s\n", LABEL_WIDTH, "Allocated");
418087e1372Stomee dist_print_header("Buffers", LABEL_WIDTH, "Slabs");
4193893cb7fStomee
420087e1372Stomee dist_print_bucket(complete, 0, ks_bucket, total, LABEL_WIDTH);
4213893cb7fStomee /*
4223893cb7fStomee * Print bucket ranges in descending order after the first bucket for
4233893cb7fStomee * completely allocated slabs, so a person can see immediately whether
4243893cb7fStomee * or not there is fragmentation without having to scan possibly
4253893cb7fStomee * multiple screens of output. Starting at (buckets - 2) excludes the
4263893cb7fStomee * extra terminating bucket.
4273893cb7fStomee */
4283893cb7fStomee for (i = buckets - 2; i >= 0; i--) {
429087e1372Stomee dist_print_bucket(distarray, i, ks_bucket, total, LABEL_WIDTH);
4303893cb7fStomee }
4313893cb7fStomee mdb_printf("\n");
4323893cb7fStomee }
4333893cb7fStomee #undef LABEL_WIDTH
4343893cb7fStomee
4353893cb7fStomee /*ARGSUSED*/
4363893cb7fStomee static int
kmem_first_slab(uintptr_t addr,const kmem_slab_t * sp,boolean_t * is_slab)4373893cb7fStomee kmem_first_slab(uintptr_t addr, const kmem_slab_t *sp, boolean_t *is_slab)
4383893cb7fStomee {
4393893cb7fStomee *is_slab = B_TRUE;
4403893cb7fStomee return (WALK_DONE);
4413893cb7fStomee }
4423893cb7fStomee
4433893cb7fStomee /*ARGSUSED*/
4443893cb7fStomee static int
kmem_first_partial_slab(uintptr_t addr,const kmem_slab_t * sp,boolean_t * is_slab)4453893cb7fStomee kmem_first_partial_slab(uintptr_t addr, const kmem_slab_t *sp,
4463893cb7fStomee boolean_t *is_slab)
4473893cb7fStomee {
4483893cb7fStomee /*
449b5fca8f8Stomee * The "kmem_partial_slab" walker reports the first full slab if there
4503893cb7fStomee * are no partial slabs (for the sake of consumers that require at least
4513893cb7fStomee * one callback if there are any buffers in the cache).
4523893cb7fStomee */
453b5fca8f8Stomee *is_slab = KMEM_SLAB_IS_PARTIAL(sp);
4543893cb7fStomee return (WALK_DONE);
4553893cb7fStomee }
4563893cb7fStomee
457b5fca8f8Stomee typedef struct kmem_slab_usage {
458b5fca8f8Stomee int ksu_refcnt; /* count of allocated buffers on slab */
459b5fca8f8Stomee boolean_t ksu_nomove; /* slab marked non-reclaimable */
460b5fca8f8Stomee } kmem_slab_usage_t;
461b5fca8f8Stomee
462b5fca8f8Stomee typedef struct kmem_slab_stats {
463b5fca8f8Stomee const kmem_cache_t *ks_cp;
464b5fca8f8Stomee int ks_slabs; /* slabs in cache */
465b5fca8f8Stomee int ks_partial_slabs; /* partially allocated slabs in cache */
466b5fca8f8Stomee uint64_t ks_unused_buffers; /* total unused buffers in cache */
467b5fca8f8Stomee int ks_max_buffers_per_slab; /* max buffers per slab */
468b5fca8f8Stomee int ks_usage_len; /* ks_usage array length */
469b5fca8f8Stomee kmem_slab_usage_t *ks_usage; /* partial slab usage */
470b5fca8f8Stomee uint_t *ks_bucket; /* slab usage distribution */
471b5fca8f8Stomee } kmem_slab_stats_t;
472b5fca8f8Stomee
4733893cb7fStomee /*ARGSUSED*/
4743893cb7fStomee static int
kmem_slablist_stat(uintptr_t addr,const kmem_slab_t * sp,kmem_slab_stats_t * ks)4753893cb7fStomee kmem_slablist_stat(uintptr_t addr, const kmem_slab_t *sp,
4763893cb7fStomee kmem_slab_stats_t *ks)
4773893cb7fStomee {
4783893cb7fStomee kmem_slab_usage_t *ksu;
4793893cb7fStomee long unused;
4803893cb7fStomee
4813893cb7fStomee ks->ks_slabs++;
4823893cb7fStomee ks->ks_bucket[sp->slab_refcnt]++;
4833893cb7fStomee
4843893cb7fStomee unused = (sp->slab_chunks - sp->slab_refcnt);
4853893cb7fStomee if (unused == 0) {
4863893cb7fStomee return (WALK_NEXT);
4873893cb7fStomee }
4883893cb7fStomee
4893893cb7fStomee ks->ks_partial_slabs++;
4903893cb7fStomee ks->ks_unused_buffers += unused;
4913893cb7fStomee
4923893cb7fStomee if (ks->ks_partial_slabs > ks->ks_usage_len) {
4933893cb7fStomee kmem_slab_usage_t *usage;
4943893cb7fStomee int len = ks->ks_usage_len;
4953893cb7fStomee
4963893cb7fStomee len = (len == 0 ? 16 : len * 2);
4973893cb7fStomee usage = mdb_zalloc(len * sizeof (kmem_slab_usage_t), UM_SLEEP);
4983893cb7fStomee if (ks->ks_usage != NULL) {
4993893cb7fStomee bcopy(ks->ks_usage, usage,
5003893cb7fStomee ks->ks_usage_len * sizeof (kmem_slab_usage_t));
5013893cb7fStomee mdb_free(ks->ks_usage,
5023893cb7fStomee ks->ks_usage_len * sizeof (kmem_slab_usage_t));
5033893cb7fStomee }
5043893cb7fStomee ks->ks_usage = usage;
5053893cb7fStomee ks->ks_usage_len = len;
5063893cb7fStomee }
5073893cb7fStomee
5083893cb7fStomee ksu = &ks->ks_usage[ks->ks_partial_slabs - 1];
5093893cb7fStomee ksu->ksu_refcnt = sp->slab_refcnt;
510b5fca8f8Stomee ksu->ksu_nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE);
5113893cb7fStomee return (WALK_NEXT);
5123893cb7fStomee }
5133893cb7fStomee
5143893cb7fStomee static void
kmem_slabs_header()5153893cb7fStomee kmem_slabs_header()
5163893cb7fStomee {
5173893cb7fStomee mdb_printf("%-25s %8s %8s %9s %9s %6s\n",
5183893cb7fStomee "", "", "Partial", "", "Unused", "");
5193893cb7fStomee mdb_printf("%-25s %8s %8s %9s %9s %6s\n",
5203893cb7fStomee "Cache Name", "Slabs", "Slabs", "Buffers", "Buffers", "Waste");
5213893cb7fStomee mdb_printf("%-25s %8s %8s %9s %9s %6s\n",
5223893cb7fStomee "-------------------------", "--------", "--------", "---------",
5233893cb7fStomee "---------", "------");
5243893cb7fStomee }
5253893cb7fStomee
5263893cb7fStomee int
kmem_slabs(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)5273893cb7fStomee kmem_slabs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
5283893cb7fStomee {
5293893cb7fStomee kmem_cache_t c;
5303893cb7fStomee kmem_slab_stats_t stats;
5313893cb7fStomee mdb_walk_cb_t cb;
5323893cb7fStomee int pct;
5333893cb7fStomee int tenths_pct;
5343893cb7fStomee size_t maxbuckets = 1;
5353893cb7fStomee size_t minbucketsize = 0;
5363893cb7fStomee const char *filter = NULL;
537b5fca8f8Stomee const char *name = NULL;
5383893cb7fStomee uint_t opt_v = FALSE;
539b5fca8f8Stomee boolean_t buckets = B_FALSE;
5403893cb7fStomee boolean_t skip = B_FALSE;
5413893cb7fStomee
5423893cb7fStomee if (mdb_getopts(argc, argv,
5433893cb7fStomee 'B', MDB_OPT_UINTPTR, &minbucketsize,
5443893cb7fStomee 'b', MDB_OPT_UINTPTR, &maxbuckets,
5453893cb7fStomee 'n', MDB_OPT_STR, &filter,
546b5fca8f8Stomee 'N', MDB_OPT_STR, &name,
5473893cb7fStomee 'v', MDB_OPT_SETBITS, TRUE, &opt_v,
5483893cb7fStomee NULL) != argc) {
5493893cb7fStomee return (DCMD_USAGE);
5503893cb7fStomee }
5513893cb7fStomee
552b5fca8f8Stomee if ((maxbuckets != 1) || (minbucketsize != 0)) {
553b5fca8f8Stomee buckets = B_TRUE;
5543893cb7fStomee }
5553893cb7fStomee
5563893cb7fStomee if (!(flags & DCMD_ADDRSPEC)) {
5573893cb7fStomee if (mdb_walk_dcmd("kmem_cache", "kmem_slabs", argc,
5583893cb7fStomee argv) == -1) {
5593893cb7fStomee mdb_warn("can't walk kmem_cache");
5603893cb7fStomee return (DCMD_ERR);
5613893cb7fStomee }
5623893cb7fStomee return (DCMD_OK);
5633893cb7fStomee }
5643893cb7fStomee
5653893cb7fStomee if (mdb_vread(&c, sizeof (c), addr) == -1) {
5663893cb7fStomee mdb_warn("couldn't read kmem_cache at %p", addr);
5673893cb7fStomee return (DCMD_ERR);
5683893cb7fStomee }
5693893cb7fStomee
570b5fca8f8Stomee if (name == NULL) {
571b5fca8f8Stomee skip = ((filter != NULL) &&
572b5fca8f8Stomee (strstr(c.cache_name, filter) == NULL));
573b5fca8f8Stomee } else if (filter == NULL) {
574b5fca8f8Stomee skip = (strcmp(c.cache_name, name) != 0);
575b5fca8f8Stomee } else {
576b5fca8f8Stomee /* match either -n or -N */
577b5fca8f8Stomee skip = ((strcmp(c.cache_name, name) != 0) &&
578b5fca8f8Stomee (strstr(c.cache_name, filter) == NULL));
5793893cb7fStomee }
5803893cb7fStomee
581b5fca8f8Stomee if (!(opt_v || buckets) && DCMD_HDRSPEC(flags)) {
5823893cb7fStomee kmem_slabs_header();
583b5fca8f8Stomee } else if ((opt_v || buckets) && !skip) {
5843893cb7fStomee if (DCMD_HDRSPEC(flags)) {
5853893cb7fStomee kmem_slabs_header();
5863893cb7fStomee } else {
5873893cb7fStomee boolean_t is_slab = B_FALSE;
5883893cb7fStomee const char *walker_name;
5893893cb7fStomee if (opt_v) {
5903893cb7fStomee cb = (mdb_walk_cb_t)kmem_first_partial_slab;
5913893cb7fStomee walker_name = "kmem_slab_partial";
5923893cb7fStomee } else {
5933893cb7fStomee cb = (mdb_walk_cb_t)kmem_first_slab;
5943893cb7fStomee walker_name = "kmem_slab";
5953893cb7fStomee }
5963893cb7fStomee (void) mdb_pwalk(walker_name, cb, &is_slab, addr);
5973893cb7fStomee if (is_slab) {
5983893cb7fStomee kmem_slabs_header();
5993893cb7fStomee }
6003893cb7fStomee }
6013893cb7fStomee }
6023893cb7fStomee
6033893cb7fStomee if (skip) {
6043893cb7fStomee return (DCMD_OK);
6053893cb7fStomee }
6063893cb7fStomee
6073893cb7fStomee bzero(&stats, sizeof (kmem_slab_stats_t));
608b5fca8f8Stomee stats.ks_cp = &c;
609b5fca8f8Stomee stats.ks_max_buffers_per_slab = c.cache_maxchunks;
610b5fca8f8Stomee /* +1 to include a zero bucket */
611b5fca8f8Stomee stats.ks_bucket = mdb_zalloc((stats.ks_max_buffers_per_slab + 1) *
612b5fca8f8Stomee sizeof (*stats.ks_bucket), UM_SLEEP);
6133893cb7fStomee cb = (mdb_walk_cb_t)kmem_slablist_stat;
6143893cb7fStomee (void) mdb_pwalk("kmem_slab", cb, &stats, addr);
6153893cb7fStomee
6163893cb7fStomee if (c.cache_buftotal == 0) {
6173893cb7fStomee pct = 0;
6183893cb7fStomee tenths_pct = 0;
6193893cb7fStomee } else {
6203893cb7fStomee uint64_t n = stats.ks_unused_buffers * 10000;
6213893cb7fStomee pct = (int)(n / c.cache_buftotal);
6223893cb7fStomee tenths_pct = pct - ((pct / 100) * 100);
6233893cb7fStomee tenths_pct = (tenths_pct + 5) / 10; /* round nearest tenth */
6243893cb7fStomee if (tenths_pct == 10) {
6253893cb7fStomee pct += 100;
6263893cb7fStomee tenths_pct = 0;
6273893cb7fStomee }
6283893cb7fStomee }
6293893cb7fStomee
6303893cb7fStomee pct /= 100;
6313893cb7fStomee mdb_printf("%-25s %8d %8d %9lld %9lld %3d.%1d%%\n", c.cache_name,
6323893cb7fStomee stats.ks_slabs, stats.ks_partial_slabs, c.cache_buftotal,
6333893cb7fStomee stats.ks_unused_buffers, pct, tenths_pct);
6343893cb7fStomee
6353893cb7fStomee if (maxbuckets == 0) {
636b5fca8f8Stomee maxbuckets = stats.ks_max_buffers_per_slab;
6373893cb7fStomee }
6383893cb7fStomee
6393893cb7fStomee if (((maxbuckets > 1) || (minbucketsize > 0)) &&
6403893cb7fStomee (stats.ks_slabs > 0)) {
6413893cb7fStomee mdb_printf("\n");
6423893cb7fStomee kmem_slabs_print_dist(stats.ks_bucket,
643b5fca8f8Stomee stats.ks_max_buffers_per_slab, maxbuckets, minbucketsize);
644b5fca8f8Stomee }
645b5fca8f8Stomee
646b5fca8f8Stomee mdb_free(stats.ks_bucket, (stats.ks_max_buffers_per_slab + 1) *
647b5fca8f8Stomee sizeof (*stats.ks_bucket));
648b5fca8f8Stomee
649b5fca8f8Stomee if (!opt_v) {
650b5fca8f8Stomee return (DCMD_OK);
6513893cb7fStomee }
6523893cb7fStomee
6533893cb7fStomee if (opt_v && (stats.ks_partial_slabs > 0)) {
6543893cb7fStomee int i;
6553893cb7fStomee kmem_slab_usage_t *ksu;
6563893cb7fStomee
657686031edSTom Erickson mdb_printf(" %d complete (%d), %d partial:",
6583893cb7fStomee (stats.ks_slabs - stats.ks_partial_slabs),
659686031edSTom Erickson stats.ks_max_buffers_per_slab,
6603893cb7fStomee stats.ks_partial_slabs);
661686031edSTom Erickson
6623893cb7fStomee for (i = 0; i < stats.ks_partial_slabs; i++) {
6633893cb7fStomee ksu = &stats.ks_usage[i];
664686031edSTom Erickson mdb_printf(" %d%s", ksu->ksu_refcnt,
665686031edSTom Erickson (ksu->ksu_nomove ? "*" : ""));
666b5fca8f8Stomee }
6673893cb7fStomee mdb_printf("\n\n");
6683893cb7fStomee }
6693893cb7fStomee
6703893cb7fStomee if (stats.ks_usage_len > 0) {
6713893cb7fStomee mdb_free(stats.ks_usage,
6723893cb7fStomee stats.ks_usage_len * sizeof (kmem_slab_usage_t));
6733893cb7fStomee }
6743893cb7fStomee
6753893cb7fStomee return (DCMD_OK);
6763893cb7fStomee }
6773893cb7fStomee
6783893cb7fStomee void
kmem_slabs_help(void)6793893cb7fStomee kmem_slabs_help(void)
6803893cb7fStomee {
681b5fca8f8Stomee mdb_printf("%s",
682b5fca8f8Stomee "Display slab usage per kmem cache.\n\n");
6833893cb7fStomee mdb_dec_indent(2);
6843893cb7fStomee mdb_printf("%<b>OPTIONS%</b>\n");
6853893cb7fStomee mdb_inc_indent(2);
6863893cb7fStomee mdb_printf("%s",
6873893cb7fStomee " -n name\n"
6883893cb7fStomee " name of kmem cache (or matching partial name)\n"
689b5fca8f8Stomee " -N name\n"
690b5fca8f8Stomee " exact name of kmem cache\n"
6913893cb7fStomee " -b maxbins\n"
6923893cb7fStomee " Print a distribution of allocated buffers per slab using at\n"
6933893cb7fStomee " most maxbins bins. The first bin is reserved for completely\n"
6943893cb7fStomee " allocated slabs. Setting maxbins to zero (-b 0) has the same\n"
6953893cb7fStomee " effect as specifying the maximum allocated buffers per slab\n"
6963893cb7fStomee " or setting minbinsize to 1 (-B 1).\n"
6973893cb7fStomee " -B minbinsize\n"
6983893cb7fStomee " Print a distribution of allocated buffers per slab, making\n"
6993893cb7fStomee " all bins (except the first, reserved for completely allocated\n"
7003893cb7fStomee " slabs) at least minbinsize buffers apart.\n"
7013893cb7fStomee " -v verbose output: List the allocated buffer count of each partial\n"
7023893cb7fStomee " slab on the free list in order from front to back to show how\n"
7033893cb7fStomee " closely the slabs are ordered by usage. For example\n"
7043893cb7fStomee "\n"
7053893cb7fStomee " 10 complete, 3 partial (8): 7 3 1\n"
7063893cb7fStomee "\n"
7073893cb7fStomee " means there are thirteen slabs with eight buffers each, including\n"
7083893cb7fStomee " three partially allocated slabs with less than all eight buffers\n"
7093893cb7fStomee " allocated.\n"
7103893cb7fStomee "\n"
7113893cb7fStomee " Buffer allocations are always from the front of the partial slab\n"
7123893cb7fStomee " list. When a buffer is freed from a completely used slab, that\n"
7133893cb7fStomee " slab is added to the front of the partial slab list. Assuming\n"
7143893cb7fStomee " that all buffers are equally likely to be freed soon, the\n"
7153893cb7fStomee " desired order of partial slabs is most-used at the front of the\n"
7163893cb7fStomee " list and least-used at the back (as in the example above).\n"
7173893cb7fStomee " However, if a slab contains an allocated buffer that will not\n"
7183893cb7fStomee " soon be freed, it would be better for that slab to be at the\n"
719b5fca8f8Stomee " front where all of its buffers can be allocated. Taking a slab\n"
720b5fca8f8Stomee " off the partial slab list (either with all buffers freed or all\n"
721b5fca8f8Stomee " buffers allocated) reduces cache fragmentation.\n"
722b5fca8f8Stomee "\n"
723b5fca8f8Stomee " A slab's allocated buffer count representing a partial slab (9 in\n"
724b5fca8f8Stomee " the example below) may be marked as follows:\n"
725b5fca8f8Stomee "\n"
726b5fca8f8Stomee " 9* An asterisk indicates that kmem has marked the slab non-\n"
727b5fca8f8Stomee " reclaimable because the kmem client refused to move one of the\n"
728b5fca8f8Stomee " slab's buffers. Since kmem does not expect to completely free the\n"
729b5fca8f8Stomee " slab, it moves it to the front of the list in the hope of\n"
730b5fca8f8Stomee " completely allocating it instead. A slab marked with an asterisk\n"
731b5fca8f8Stomee " stays marked for as long as it remains on the partial slab list.\n"
7323893cb7fStomee "\n"
7333893cb7fStomee "Column\t\tDescription\n"
7343893cb7fStomee "\n"
7353893cb7fStomee "Cache Name\t\tname of kmem cache\n"
7363893cb7fStomee "Slabs\t\t\ttotal slab count\n"
7373893cb7fStomee "Partial Slabs\t\tcount of partially allocated slabs on the free list\n"
7383893cb7fStomee "Buffers\t\ttotal buffer count (Slabs * (buffers per slab))\n"
7393893cb7fStomee "Unused Buffers\tcount of unallocated buffers across all partial slabs\n"
7403893cb7fStomee "Waste\t\t\t(Unused Buffers / Buffers) does not include space\n"
7413893cb7fStomee "\t\t\t for accounting structures (debug mode), slab\n"
7423893cb7fStomee "\t\t\t coloring (incremental small offsets to stagger\n"
7433893cb7fStomee "\t\t\t buffer alignment), or the per-CPU magazine layer\n");
7443893cb7fStomee }
7453893cb7fStomee
7467c478bd9Sstevel@tonic-gate static int
addrcmp(const void * lhs,const void * rhs)7477c478bd9Sstevel@tonic-gate addrcmp(const void *lhs, const void *rhs)
7487c478bd9Sstevel@tonic-gate {
7497c478bd9Sstevel@tonic-gate uintptr_t p1 = *((uintptr_t *)lhs);
7507c478bd9Sstevel@tonic-gate uintptr_t p2 = *((uintptr_t *)rhs);
7517c478bd9Sstevel@tonic-gate
7527c478bd9Sstevel@tonic-gate if (p1 < p2)
7537c478bd9Sstevel@tonic-gate return (-1);
7547c478bd9Sstevel@tonic-gate if (p1 > p2)
7557c478bd9Sstevel@tonic-gate return (1);
7567c478bd9Sstevel@tonic-gate return (0);
7577c478bd9Sstevel@tonic-gate }
7587c478bd9Sstevel@tonic-gate
7597c478bd9Sstevel@tonic-gate static int
bufctlcmp(const kmem_bufctl_audit_t ** lhs,const kmem_bufctl_audit_t ** rhs)7607c478bd9Sstevel@tonic-gate bufctlcmp(const kmem_bufctl_audit_t **lhs, const kmem_bufctl_audit_t **rhs)
7617c478bd9Sstevel@tonic-gate {
7627c478bd9Sstevel@tonic-gate const kmem_bufctl_audit_t *bcp1 = *lhs;
7637c478bd9Sstevel@tonic-gate const kmem_bufctl_audit_t *bcp2 = *rhs;
7647c478bd9Sstevel@tonic-gate
7657c478bd9Sstevel@tonic-gate if (bcp1->bc_timestamp > bcp2->bc_timestamp)
7667c478bd9Sstevel@tonic-gate return (-1);
7677c478bd9Sstevel@tonic-gate
7687c478bd9Sstevel@tonic-gate if (bcp1->bc_timestamp < bcp2->bc_timestamp)
7697c478bd9Sstevel@tonic-gate return (1);
7707c478bd9Sstevel@tonic-gate
7717c478bd9Sstevel@tonic-gate return (0);
7727c478bd9Sstevel@tonic-gate }
7737c478bd9Sstevel@tonic-gate
7747c478bd9Sstevel@tonic-gate typedef struct kmem_hash_walk {
7757c478bd9Sstevel@tonic-gate uintptr_t *kmhw_table;
7767c478bd9Sstevel@tonic-gate size_t kmhw_nelems;
7777c478bd9Sstevel@tonic-gate size_t kmhw_pos;
7787c478bd9Sstevel@tonic-gate kmem_bufctl_t kmhw_cur;
7797c478bd9Sstevel@tonic-gate } kmem_hash_walk_t;
7807c478bd9Sstevel@tonic-gate
7817c478bd9Sstevel@tonic-gate int
kmem_hash_walk_init(mdb_walk_state_t * wsp)7827c478bd9Sstevel@tonic-gate kmem_hash_walk_init(mdb_walk_state_t *wsp)
7837c478bd9Sstevel@tonic-gate {
7847c478bd9Sstevel@tonic-gate kmem_hash_walk_t *kmhw;
7857c478bd9Sstevel@tonic-gate uintptr_t *hash;
7867c478bd9Sstevel@tonic-gate kmem_cache_t c;
7877c478bd9Sstevel@tonic-gate uintptr_t haddr, addr = wsp->walk_addr;
7887c478bd9Sstevel@tonic-gate size_t nelems;
7897c478bd9Sstevel@tonic-gate size_t hsize;
7907c478bd9Sstevel@tonic-gate
7917c478bd9Sstevel@tonic-gate if (addr == NULL) {
7927c478bd9Sstevel@tonic-gate mdb_warn("kmem_hash doesn't support global walks\n");
7937c478bd9Sstevel@tonic-gate return (WALK_ERR);
7947c478bd9Sstevel@tonic-gate }
7957c478bd9Sstevel@tonic-gate
7967c478bd9Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), addr) == -1) {
7977c478bd9Sstevel@tonic-gate mdb_warn("couldn't read cache at addr %p", addr);
7987c478bd9Sstevel@tonic-gate return (WALK_ERR);
7997c478bd9Sstevel@tonic-gate }
8007c478bd9Sstevel@tonic-gate
8017c478bd9Sstevel@tonic-gate if (!(c.cache_flags & KMF_HASH)) {
8027c478bd9Sstevel@tonic-gate mdb_warn("cache %p doesn't have a hash table\n", addr);
8037c478bd9Sstevel@tonic-gate return (WALK_DONE); /* nothing to do */
8047c478bd9Sstevel@tonic-gate }
8057c478bd9Sstevel@tonic-gate
8067c478bd9Sstevel@tonic-gate kmhw = mdb_zalloc(sizeof (kmem_hash_walk_t), UM_SLEEP);
8077c478bd9Sstevel@tonic-gate kmhw->kmhw_cur.bc_next = NULL;
8087c478bd9Sstevel@tonic-gate kmhw->kmhw_pos = 0;
8097c478bd9Sstevel@tonic-gate
8107c478bd9Sstevel@tonic-gate kmhw->kmhw_nelems = nelems = c.cache_hash_mask + 1;
8117c478bd9Sstevel@tonic-gate hsize = nelems * sizeof (uintptr_t);
8127c478bd9Sstevel@tonic-gate haddr = (uintptr_t)c.cache_hash_table;
8137c478bd9Sstevel@tonic-gate
8147c478bd9Sstevel@tonic-gate kmhw->kmhw_table = hash = mdb_alloc(hsize, UM_SLEEP);
8157c478bd9Sstevel@tonic-gate if (mdb_vread(hash, hsize, haddr) == -1) {
8167c478bd9Sstevel@tonic-gate mdb_warn("failed to read hash table at %p", haddr);
8177c478bd9Sstevel@tonic-gate mdb_free(hash, hsize);
8187c478bd9Sstevel@tonic-gate mdb_free(kmhw, sizeof (kmem_hash_walk_t));
8197c478bd9Sstevel@tonic-gate return (WALK_ERR);
8207c478bd9Sstevel@tonic-gate }
8217c478bd9Sstevel@tonic-gate
8227c478bd9Sstevel@tonic-gate wsp->walk_data = kmhw;
8237c478bd9Sstevel@tonic-gate
8247c478bd9Sstevel@tonic-gate return (WALK_NEXT);
8257c478bd9Sstevel@tonic-gate }
8267c478bd9Sstevel@tonic-gate
8277c478bd9Sstevel@tonic-gate int
kmem_hash_walk_step(mdb_walk_state_t * wsp)8287c478bd9Sstevel@tonic-gate kmem_hash_walk_step(mdb_walk_state_t *wsp)
8297c478bd9Sstevel@tonic-gate {
8307c478bd9Sstevel@tonic-gate kmem_hash_walk_t *kmhw = wsp->walk_data;
8317c478bd9Sstevel@tonic-gate uintptr_t addr = NULL;
8327c478bd9Sstevel@tonic-gate
8337c478bd9Sstevel@tonic-gate if ((addr = (uintptr_t)kmhw->kmhw_cur.bc_next) == NULL) {
8347c478bd9Sstevel@tonic-gate while (kmhw->kmhw_pos < kmhw->kmhw_nelems) {
8357c478bd9Sstevel@tonic-gate if ((addr = kmhw->kmhw_table[kmhw->kmhw_pos++]) != NULL)
8367c478bd9Sstevel@tonic-gate break;
8377c478bd9Sstevel@tonic-gate }
8387c478bd9Sstevel@tonic-gate }
8397c478bd9Sstevel@tonic-gate if (addr == NULL)
8407c478bd9Sstevel@tonic-gate return (WALK_DONE);
8417c478bd9Sstevel@tonic-gate
8427c478bd9Sstevel@tonic-gate if (mdb_vread(&kmhw->kmhw_cur, sizeof (kmem_bufctl_t), addr) == -1) {
8437c478bd9Sstevel@tonic-gate mdb_warn("couldn't read kmem_bufctl_t at addr %p", addr);
8447c478bd9Sstevel@tonic-gate return (WALK_ERR);
8457c478bd9Sstevel@tonic-gate }
8467c478bd9Sstevel@tonic-gate
8477c478bd9Sstevel@tonic-gate return (wsp->walk_callback(addr, &kmhw->kmhw_cur, wsp->walk_cbdata));
8487c478bd9Sstevel@tonic-gate }
8497c478bd9Sstevel@tonic-gate
8507c478bd9Sstevel@tonic-gate void
kmem_hash_walk_fini(mdb_walk_state_t * wsp)8517c478bd9Sstevel@tonic-gate kmem_hash_walk_fini(mdb_walk_state_t *wsp)
8527c478bd9Sstevel@tonic-gate {
8537c478bd9Sstevel@tonic-gate kmem_hash_walk_t *kmhw = wsp->walk_data;
8547c478bd9Sstevel@tonic-gate
8557c478bd9Sstevel@tonic-gate if (kmhw == NULL)
8567c478bd9Sstevel@tonic-gate return;
8577c478bd9Sstevel@tonic-gate
8587c478bd9Sstevel@tonic-gate mdb_free(kmhw->kmhw_table, kmhw->kmhw_nelems * sizeof (uintptr_t));
8597c478bd9Sstevel@tonic-gate mdb_free(kmhw, sizeof (kmem_hash_walk_t));
8607c478bd9Sstevel@tonic-gate }
8617c478bd9Sstevel@tonic-gate
8627c478bd9Sstevel@tonic-gate /*
8637c478bd9Sstevel@tonic-gate * Find the address of the bufctl structure for the address 'buf' in cache
8647c478bd9Sstevel@tonic-gate * 'cp', which is at address caddr, and place it in *out.
8657c478bd9Sstevel@tonic-gate */
8667c478bd9Sstevel@tonic-gate static int
kmem_hash_lookup(kmem_cache_t * cp,uintptr_t caddr,void * buf,uintptr_t * out)8677c478bd9Sstevel@tonic-gate kmem_hash_lookup(kmem_cache_t *cp, uintptr_t caddr, void *buf, uintptr_t *out)
8687c478bd9Sstevel@tonic-gate {
8697c478bd9Sstevel@tonic-gate uintptr_t bucket = (uintptr_t)KMEM_HASH(cp, buf);
8707c478bd9Sstevel@tonic-gate kmem_bufctl_t *bcp;
8717c478bd9Sstevel@tonic-gate kmem_bufctl_t bc;
8727c478bd9Sstevel@tonic-gate
8737c478bd9Sstevel@tonic-gate if (mdb_vread(&bcp, sizeof (kmem_bufctl_t *), bucket) == -1) {
8747c478bd9Sstevel@tonic-gate mdb_warn("unable to read hash bucket for %p in cache %p",
8757c478bd9Sstevel@tonic-gate buf, caddr);
8767c478bd9Sstevel@tonic-gate return (-1);
8777c478bd9Sstevel@tonic-gate }
8787c478bd9Sstevel@tonic-gate
8797c478bd9Sstevel@tonic-gate while (bcp != NULL) {
8807c478bd9Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (kmem_bufctl_t),
8817c478bd9Sstevel@tonic-gate (uintptr_t)bcp) == -1) {
8827c478bd9Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", bcp);
8837c478bd9Sstevel@tonic-gate return (-1);
8847c478bd9Sstevel@tonic-gate }
8857c478bd9Sstevel@tonic-gate if (bc.bc_addr == buf) {
8867c478bd9Sstevel@tonic-gate *out = (uintptr_t)bcp;
8877c478bd9Sstevel@tonic-gate return (0);
8887c478bd9Sstevel@tonic-gate }
8897c478bd9Sstevel@tonic-gate bcp = bc.bc_next;
8907c478bd9Sstevel@tonic-gate }
8917c478bd9Sstevel@tonic-gate
8927c478bd9Sstevel@tonic-gate mdb_warn("unable to find bufctl for %p in cache %p\n", buf, caddr);
8937c478bd9Sstevel@tonic-gate return (-1);
8947c478bd9Sstevel@tonic-gate }
8957c478bd9Sstevel@tonic-gate
8967c478bd9Sstevel@tonic-gate int
kmem_get_magsize(const kmem_cache_t * cp)8977c478bd9Sstevel@tonic-gate kmem_get_magsize(const kmem_cache_t *cp)
8987c478bd9Sstevel@tonic-gate {
8997c478bd9Sstevel@tonic-gate uintptr_t addr = (uintptr_t)cp->cache_magtype;
9007c478bd9Sstevel@tonic-gate GElf_Sym mt_sym;
9017c478bd9Sstevel@tonic-gate kmem_magtype_t mt;
9027c478bd9Sstevel@tonic-gate int res;
9037c478bd9Sstevel@tonic-gate
9047c478bd9Sstevel@tonic-gate /*
9057c478bd9Sstevel@tonic-gate * if cpu 0 has a non-zero magsize, it must be correct. caches
9067c478bd9Sstevel@tonic-gate * with KMF_NOMAGAZINE have disabled their magazine layers, so
9077c478bd9Sstevel@tonic-gate * it is okay to return 0 for them.
9087c478bd9Sstevel@tonic-gate */
9097c478bd9Sstevel@tonic-gate if ((res = cp->cache_cpu[0].cc_magsize) != 0 ||
9107c478bd9Sstevel@tonic-gate (cp->cache_flags & KMF_NOMAGAZINE))
9117c478bd9Sstevel@tonic-gate return (res);
9127c478bd9Sstevel@tonic-gate
9137c478bd9Sstevel@tonic-gate if (mdb_lookup_by_name("kmem_magtype", &mt_sym) == -1) {
9147c478bd9Sstevel@tonic-gate mdb_warn("unable to read 'kmem_magtype'");
9157c478bd9Sstevel@tonic-gate } else if (addr < mt_sym.st_value ||
9167c478bd9Sstevel@tonic-gate addr + sizeof (mt) - 1 > mt_sym.st_value + mt_sym.st_size - 1 ||
9177c478bd9Sstevel@tonic-gate ((addr - mt_sym.st_value) % sizeof (mt)) != 0) {
9187c478bd9Sstevel@tonic-gate mdb_warn("cache '%s' has invalid magtype pointer (%p)\n",
9197c478bd9Sstevel@tonic-gate cp->cache_name, addr);
9207c478bd9Sstevel@tonic-gate return (0);
9217c478bd9Sstevel@tonic-gate }
9227c478bd9Sstevel@tonic-gate if (mdb_vread(&mt, sizeof (mt), addr) == -1) {
9237c478bd9Sstevel@tonic-gate mdb_warn("unable to read magtype at %a", addr);
9247c478bd9Sstevel@tonic-gate return (0);
9257c478bd9Sstevel@tonic-gate }
9267c478bd9Sstevel@tonic-gate return (mt.mt_magsize);
9277c478bd9Sstevel@tonic-gate }
9287c478bd9Sstevel@tonic-gate
9297c478bd9Sstevel@tonic-gate /*ARGSUSED*/
9307c478bd9Sstevel@tonic-gate static int
kmem_estimate_slab(uintptr_t addr,const kmem_slab_t * sp,size_t * est)9317c478bd9Sstevel@tonic-gate kmem_estimate_slab(uintptr_t addr, const kmem_slab_t *sp, size_t *est)
9327c478bd9Sstevel@tonic-gate {
9337c478bd9Sstevel@tonic-gate *est -= (sp->slab_chunks - sp->slab_refcnt);
9347c478bd9Sstevel@tonic-gate
9357c478bd9Sstevel@tonic-gate return (WALK_NEXT);
9367c478bd9Sstevel@tonic-gate }
9377c478bd9Sstevel@tonic-gate
9387c478bd9Sstevel@tonic-gate /*
9397c478bd9Sstevel@tonic-gate * Returns an upper bound on the number of allocated buffers in a given
9407c478bd9Sstevel@tonic-gate * cache.
9417c478bd9Sstevel@tonic-gate */
9427c478bd9Sstevel@tonic-gate size_t
kmem_estimate_allocated(uintptr_t addr,const kmem_cache_t * cp)9437c478bd9Sstevel@tonic-gate kmem_estimate_allocated(uintptr_t addr, const kmem_cache_t *cp)
9447c478bd9Sstevel@tonic-gate {
9457c478bd9Sstevel@tonic-gate int magsize;
9467c478bd9Sstevel@tonic-gate size_t cache_est;
9477c478bd9Sstevel@tonic-gate
9487c478bd9Sstevel@tonic-gate cache_est = cp->cache_buftotal;
9497c478bd9Sstevel@tonic-gate
9507c478bd9Sstevel@tonic-gate (void) mdb_pwalk("kmem_slab_partial",
9517c478bd9Sstevel@tonic-gate (mdb_walk_cb_t)kmem_estimate_slab, &cache_est, addr);
9527c478bd9Sstevel@tonic-gate
9537c478bd9Sstevel@tonic-gate if ((magsize = kmem_get_magsize(cp)) != 0) {
9547c478bd9Sstevel@tonic-gate size_t mag_est = cp->cache_full.ml_total * magsize;
9557c478bd9Sstevel@tonic-gate
9567c478bd9Sstevel@tonic-gate if (cache_est >= mag_est) {
9577c478bd9Sstevel@tonic-gate cache_est -= mag_est;
9587c478bd9Sstevel@tonic-gate } else {
9597c478bd9Sstevel@tonic-gate mdb_warn("cache %p's magazine layer holds more buffers "
9607c478bd9Sstevel@tonic-gate "than the slab layer.\n", addr);
9617c478bd9Sstevel@tonic-gate }
9627c478bd9Sstevel@tonic-gate }
9637c478bd9Sstevel@tonic-gate return (cache_est);
9647c478bd9Sstevel@tonic-gate }
9657c478bd9Sstevel@tonic-gate
9667c478bd9Sstevel@tonic-gate #define READMAG_ROUNDS(rounds) { \
9677c478bd9Sstevel@tonic-gate if (mdb_vread(mp, magbsize, (uintptr_t)kmp) == -1) { \
9687c478bd9Sstevel@tonic-gate mdb_warn("couldn't read magazine at %p", kmp); \
9697c478bd9Sstevel@tonic-gate goto fail; \
9707c478bd9Sstevel@tonic-gate } \
9717c478bd9Sstevel@tonic-gate for (i = 0; i < rounds; i++) { \
9727c478bd9Sstevel@tonic-gate maglist[magcnt++] = mp->mag_round[i]; \
9737c478bd9Sstevel@tonic-gate if (magcnt == magmax) { \
9747c478bd9Sstevel@tonic-gate mdb_warn("%d magazines exceeds fudge factor\n", \
9757c478bd9Sstevel@tonic-gate magcnt); \
9767c478bd9Sstevel@tonic-gate goto fail; \
9777c478bd9Sstevel@tonic-gate } \
9787c478bd9Sstevel@tonic-gate } \
9797c478bd9Sstevel@tonic-gate }
9807c478bd9Sstevel@tonic-gate
9817c478bd9Sstevel@tonic-gate int
kmem_read_magazines(kmem_cache_t * cp,uintptr_t addr,int ncpus,void *** maglistp,size_t * magcntp,size_t * magmaxp,int alloc_flags)9827c478bd9Sstevel@tonic-gate kmem_read_magazines(kmem_cache_t *cp, uintptr_t addr, int ncpus,
9837c478bd9Sstevel@tonic-gate void ***maglistp, size_t *magcntp, size_t *magmaxp, int alloc_flags)
9847c478bd9Sstevel@tonic-gate {
9857c478bd9Sstevel@tonic-gate kmem_magazine_t *kmp, *mp;
9867c478bd9Sstevel@tonic-gate void **maglist = NULL;
9877c478bd9Sstevel@tonic-gate int i, cpu;
9887c478bd9Sstevel@tonic-gate size_t magsize, magmax, magbsize;
9897c478bd9Sstevel@tonic-gate size_t magcnt = 0;
9907c478bd9Sstevel@tonic-gate
9917c478bd9Sstevel@tonic-gate /*
9927c478bd9Sstevel@tonic-gate * Read the magtype out of the cache, after verifying the pointer's
9937c478bd9Sstevel@tonic-gate * correctness.
9947c478bd9Sstevel@tonic-gate */
9957c478bd9Sstevel@tonic-gate magsize = kmem_get_magsize(cp);
996789d94c2Sjwadams if (magsize == 0) {
997789d94c2Sjwadams *maglistp = NULL;
998789d94c2Sjwadams *magcntp = 0;
999789d94c2Sjwadams *magmaxp = 0;
1000789d94c2Sjwadams return (WALK_NEXT);
1001789d94c2Sjwadams }
10027c478bd9Sstevel@tonic-gate
10037c478bd9Sstevel@tonic-gate /*
10047c478bd9Sstevel@tonic-gate * There are several places where we need to go buffer hunting:
10057c478bd9Sstevel@tonic-gate * the per-CPU loaded magazine, the per-CPU spare full magazine,
10067c478bd9Sstevel@tonic-gate * and the full magazine list in the depot.
10077c478bd9Sstevel@tonic-gate *
10087c478bd9Sstevel@tonic-gate * For an upper bound on the number of buffers in the magazine
10097c478bd9Sstevel@tonic-gate * layer, we have the number of magazines on the cache_full
10107c478bd9Sstevel@tonic-gate * list plus at most two magazines per CPU (the loaded and the
10117c478bd9Sstevel@tonic-gate * spare). Toss in 100 magazines as a fudge factor in case this
10127c478bd9Sstevel@tonic-gate * is live (the number "100" comes from the same fudge factor in
10137c478bd9Sstevel@tonic-gate * crash(1M)).
10147c478bd9Sstevel@tonic-gate */
10157c478bd9Sstevel@tonic-gate magmax = (cp->cache_full.ml_total + 2 * ncpus + 100) * magsize;
10167c478bd9Sstevel@tonic-gate magbsize = offsetof(kmem_magazine_t, mag_round[magsize]);
10177c478bd9Sstevel@tonic-gate
10187c478bd9Sstevel@tonic-gate if (magbsize >= PAGESIZE / 2) {
10197c478bd9Sstevel@tonic-gate mdb_warn("magazine size for cache %p unreasonable (%x)\n",
10207c478bd9Sstevel@tonic-gate addr, magbsize);
1021789d94c2Sjwadams return (WALK_ERR);
10227c478bd9Sstevel@tonic-gate }
10237c478bd9Sstevel@tonic-gate
10247c478bd9Sstevel@tonic-gate maglist = mdb_alloc(magmax * sizeof (void *), alloc_flags);
10257c478bd9Sstevel@tonic-gate mp = mdb_alloc(magbsize, alloc_flags);
10267c478bd9Sstevel@tonic-gate if (mp == NULL || maglist == NULL)
10277c478bd9Sstevel@tonic-gate goto fail;
10287c478bd9Sstevel@tonic-gate
10297c478bd9Sstevel@tonic-gate /*
10307c478bd9Sstevel@tonic-gate * First up: the magazines in the depot (i.e. on the cache_full list).
10317c478bd9Sstevel@tonic-gate */
10327c478bd9Sstevel@tonic-gate for (kmp = cp->cache_full.ml_list; kmp != NULL; ) {
10337c478bd9Sstevel@tonic-gate READMAG_ROUNDS(magsize);
10347c478bd9Sstevel@tonic-gate kmp = mp->mag_next;
10357c478bd9Sstevel@tonic-gate
10367c478bd9Sstevel@tonic-gate if (kmp == cp->cache_full.ml_list)
10377c478bd9Sstevel@tonic-gate break; /* cache_full list loop detected */
10387c478bd9Sstevel@tonic-gate }
10397c478bd9Sstevel@tonic-gate
10407c478bd9Sstevel@tonic-gate dprintf(("cache_full list done\n"));
10417c478bd9Sstevel@tonic-gate
10427c478bd9Sstevel@tonic-gate /*
10437c478bd9Sstevel@tonic-gate * Now whip through the CPUs, snagging the loaded magazines
10447c478bd9Sstevel@tonic-gate * and full spares.
10459dd77bc8SDave Plauger *
10469dd77bc8SDave Plauger * In order to prevent inconsistent dumps, rounds and prounds
10479dd77bc8SDave Plauger * are copied aside before dumping begins.
10487c478bd9Sstevel@tonic-gate */
10497c478bd9Sstevel@tonic-gate for (cpu = 0; cpu < ncpus; cpu++) {
10507c478bd9Sstevel@tonic-gate kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu];
10519dd77bc8SDave Plauger short rounds, prounds;
10529dd77bc8SDave Plauger
10539dd77bc8SDave Plauger if (KMEM_DUMPCC(ccp)) {
10549dd77bc8SDave Plauger rounds = ccp->cc_dump_rounds;
10559dd77bc8SDave Plauger prounds = ccp->cc_dump_prounds;
10569dd77bc8SDave Plauger } else {
10579dd77bc8SDave Plauger rounds = ccp->cc_rounds;
10589dd77bc8SDave Plauger prounds = ccp->cc_prounds;
10599dd77bc8SDave Plauger }
10607c478bd9Sstevel@tonic-gate
10617c478bd9Sstevel@tonic-gate dprintf(("reading cpu cache %p\n",
10627c478bd9Sstevel@tonic-gate (uintptr_t)ccp - (uintptr_t)cp + addr));
10637c478bd9Sstevel@tonic-gate
10649dd77bc8SDave Plauger if (rounds > 0 &&
10657c478bd9Sstevel@tonic-gate (kmp = ccp->cc_loaded) != NULL) {
10669dd77bc8SDave Plauger dprintf(("reading %d loaded rounds\n", rounds));
10679dd77bc8SDave Plauger READMAG_ROUNDS(rounds);
10687c478bd9Sstevel@tonic-gate }
10697c478bd9Sstevel@tonic-gate
10709dd77bc8SDave Plauger if (prounds > 0 &&
10717c478bd9Sstevel@tonic-gate (kmp = ccp->cc_ploaded) != NULL) {
10727c478bd9Sstevel@tonic-gate dprintf(("reading %d previously loaded rounds\n",
10739dd77bc8SDave Plauger prounds));
10749dd77bc8SDave Plauger READMAG_ROUNDS(prounds);
10757c478bd9Sstevel@tonic-gate }
10767c478bd9Sstevel@tonic-gate }
10777c478bd9Sstevel@tonic-gate
10787c478bd9Sstevel@tonic-gate dprintf(("magazine layer: %d buffers\n", magcnt));
10797c478bd9Sstevel@tonic-gate
10807c478bd9Sstevel@tonic-gate if (!(alloc_flags & UM_GC))
10817c478bd9Sstevel@tonic-gate mdb_free(mp, magbsize);
10827c478bd9Sstevel@tonic-gate
10837c478bd9Sstevel@tonic-gate *maglistp = maglist;
10847c478bd9Sstevel@tonic-gate *magcntp = magcnt;
10857c478bd9Sstevel@tonic-gate *magmaxp = magmax;
10867c478bd9Sstevel@tonic-gate
10877c478bd9Sstevel@tonic-gate return (WALK_NEXT);
10887c478bd9Sstevel@tonic-gate
10897c478bd9Sstevel@tonic-gate fail:
10907c478bd9Sstevel@tonic-gate if (!(alloc_flags & UM_GC)) {
10917c478bd9Sstevel@tonic-gate if (mp)
10927c478bd9Sstevel@tonic-gate mdb_free(mp, magbsize);
10937c478bd9Sstevel@tonic-gate if (maglist)
10947c478bd9Sstevel@tonic-gate mdb_free(maglist, magmax * sizeof (void *));
10957c478bd9Sstevel@tonic-gate }
10967c478bd9Sstevel@tonic-gate return (WALK_ERR);
10977c478bd9Sstevel@tonic-gate }
10987c478bd9Sstevel@tonic-gate
10997c478bd9Sstevel@tonic-gate static int
kmem_walk_callback(mdb_walk_state_t * wsp,uintptr_t buf)11007c478bd9Sstevel@tonic-gate kmem_walk_callback(mdb_walk_state_t *wsp, uintptr_t buf)
11017c478bd9Sstevel@tonic-gate {
11027c478bd9Sstevel@tonic-gate return (wsp->walk_callback(buf, NULL, wsp->walk_cbdata));
11037c478bd9Sstevel@tonic-gate }
11047c478bd9Sstevel@tonic-gate
11057c478bd9Sstevel@tonic-gate static int
bufctl_walk_callback(kmem_cache_t * cp,mdb_walk_state_t * wsp,uintptr_t buf)11067c478bd9Sstevel@tonic-gate bufctl_walk_callback(kmem_cache_t *cp, mdb_walk_state_t *wsp, uintptr_t buf)
11077c478bd9Sstevel@tonic-gate {
11087c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t b;
11097c478bd9Sstevel@tonic-gate
11107c478bd9Sstevel@tonic-gate /*
11117c478bd9Sstevel@tonic-gate * if KMF_AUDIT is not set, we know that we're looking at a
11127c478bd9Sstevel@tonic-gate * kmem_bufctl_t.
11137c478bd9Sstevel@tonic-gate */
11147c478bd9Sstevel@tonic-gate if (!(cp->cache_flags & KMF_AUDIT) ||
11157c478bd9Sstevel@tonic-gate mdb_vread(&b, sizeof (kmem_bufctl_audit_t), buf) == -1) {
11167c478bd9Sstevel@tonic-gate (void) memset(&b, 0, sizeof (b));
11177c478bd9Sstevel@tonic-gate if (mdb_vread(&b, sizeof (kmem_bufctl_t), buf) == -1) {
11187c478bd9Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", buf);
11197c478bd9Sstevel@tonic-gate return (WALK_ERR);
11207c478bd9Sstevel@tonic-gate }
11217c478bd9Sstevel@tonic-gate }
11227c478bd9Sstevel@tonic-gate
11237c478bd9Sstevel@tonic-gate return (wsp->walk_callback(buf, &b, wsp->walk_cbdata));
11247c478bd9Sstevel@tonic-gate }
11257c478bd9Sstevel@tonic-gate
11267c478bd9Sstevel@tonic-gate typedef struct kmem_walk {
11277c478bd9Sstevel@tonic-gate int kmw_type;
11287c478bd9Sstevel@tonic-gate
1129d7dba7e5SBryan Cantrill uintptr_t kmw_addr; /* cache address */
11307c478bd9Sstevel@tonic-gate kmem_cache_t *kmw_cp;
11317c478bd9Sstevel@tonic-gate size_t kmw_csize;
11327c478bd9Sstevel@tonic-gate
11337c478bd9Sstevel@tonic-gate /*
11347c478bd9Sstevel@tonic-gate * magazine layer
11357c478bd9Sstevel@tonic-gate */
11367c478bd9Sstevel@tonic-gate void **kmw_maglist;
11377c478bd9Sstevel@tonic-gate size_t kmw_max;
11387c478bd9Sstevel@tonic-gate size_t kmw_count;
11397c478bd9Sstevel@tonic-gate size_t kmw_pos;
11407c478bd9Sstevel@tonic-gate
11417c478bd9Sstevel@tonic-gate /*
11427c478bd9Sstevel@tonic-gate * slab layer
11437c478bd9Sstevel@tonic-gate */
11447c478bd9Sstevel@tonic-gate char *kmw_valid; /* to keep track of freed buffers */
11457c478bd9Sstevel@tonic-gate char *kmw_ubase; /* buffer for slab data */
11467c478bd9Sstevel@tonic-gate } kmem_walk_t;
11477c478bd9Sstevel@tonic-gate
11487c478bd9Sstevel@tonic-gate static int
kmem_walk_init_common(mdb_walk_state_t * wsp,int type)11497c478bd9Sstevel@tonic-gate kmem_walk_init_common(mdb_walk_state_t *wsp, int type)
11507c478bd9Sstevel@tonic-gate {
11517c478bd9Sstevel@tonic-gate kmem_walk_t *kmw;
11527c478bd9Sstevel@tonic-gate int ncpus, csize;
11537c478bd9Sstevel@tonic-gate kmem_cache_t *cp;
1154789d94c2Sjwadams size_t vm_quantum;
11557c478bd9Sstevel@tonic-gate
11567c478bd9Sstevel@tonic-gate size_t magmax, magcnt;
11577c478bd9Sstevel@tonic-gate void **maglist = NULL;
11587c478bd9Sstevel@tonic-gate uint_t chunksize, slabsize;
11597c478bd9Sstevel@tonic-gate int status = WALK_ERR;
11607c478bd9Sstevel@tonic-gate uintptr_t addr = wsp->walk_addr;
11617c478bd9Sstevel@tonic-gate const char *layered;
11627c478bd9Sstevel@tonic-gate
11637c478bd9Sstevel@tonic-gate type &= ~KM_HASH;
11647c478bd9Sstevel@tonic-gate
11657c478bd9Sstevel@tonic-gate if (addr == NULL) {
11667c478bd9Sstevel@tonic-gate mdb_warn("kmem walk doesn't support global walks\n");
11677c478bd9Sstevel@tonic-gate return (WALK_ERR);
11687c478bd9Sstevel@tonic-gate }
11697c478bd9Sstevel@tonic-gate
11707c478bd9Sstevel@tonic-gate dprintf(("walking %p\n", addr));
11717c478bd9Sstevel@tonic-gate
11727c478bd9Sstevel@tonic-gate /*
11737c478bd9Sstevel@tonic-gate * First we need to figure out how many CPUs are configured in the
11747c478bd9Sstevel@tonic-gate * system to know how much to slurp out.
11757c478bd9Sstevel@tonic-gate */
11767c478bd9Sstevel@tonic-gate mdb_readvar(&ncpus, "max_ncpus");
11777c478bd9Sstevel@tonic-gate
11787c478bd9Sstevel@tonic-gate csize = KMEM_CACHE_SIZE(ncpus);
11797c478bd9Sstevel@tonic-gate cp = mdb_alloc(csize, UM_SLEEP);
11807c478bd9Sstevel@tonic-gate
11817c478bd9Sstevel@tonic-gate if (mdb_vread(cp, csize, addr) == -1) {
11827c478bd9Sstevel@tonic-gate mdb_warn("couldn't read cache at addr %p", addr);
11837c478bd9Sstevel@tonic-gate goto out2;
11847c478bd9Sstevel@tonic-gate }
11857c478bd9Sstevel@tonic-gate
1186789d94c2Sjwadams /*
1187789d94c2Sjwadams * It's easy for someone to hand us an invalid cache address.
1188789d94c2Sjwadams * Unfortunately, it is hard for this walker to survive an
1189789d94c2Sjwadams * invalid cache cleanly. So we make sure that:
1190789d94c2Sjwadams *
1191789d94c2Sjwadams * 1. the vmem arena for the cache is readable,
1192789d94c2Sjwadams * 2. the vmem arena's quantum is a power of 2,
1193789d94c2Sjwadams * 3. our slabsize is a multiple of the quantum, and
1194789d94c2Sjwadams * 4. our chunksize is >0 and less than our slabsize.
1195789d94c2Sjwadams */
1196789d94c2Sjwadams if (mdb_vread(&vm_quantum, sizeof (vm_quantum),
1197789d94c2Sjwadams (uintptr_t)&cp->cache_arena->vm_quantum) == -1 ||
1198789d94c2Sjwadams vm_quantum == 0 ||
1199789d94c2Sjwadams (vm_quantum & (vm_quantum - 1)) != 0 ||
1200789d94c2Sjwadams cp->cache_slabsize < vm_quantum ||
1201789d94c2Sjwadams P2PHASE(cp->cache_slabsize, vm_quantum) != 0 ||
1202789d94c2Sjwadams cp->cache_chunksize == 0 ||
1203789d94c2Sjwadams cp->cache_chunksize > cp->cache_slabsize) {
1204789d94c2Sjwadams mdb_warn("%p is not a valid kmem_cache_t\n", addr);
1205789d94c2Sjwadams goto out2;
1206789d94c2Sjwadams }
1207789d94c2Sjwadams
12087c478bd9Sstevel@tonic-gate dprintf(("buf total is %d\n", cp->cache_buftotal));
12097c478bd9Sstevel@tonic-gate
12107c478bd9Sstevel@tonic-gate if (cp->cache_buftotal == 0) {
12117c478bd9Sstevel@tonic-gate mdb_free(cp, csize);
12127c478bd9Sstevel@tonic-gate return (WALK_DONE);
12137c478bd9Sstevel@tonic-gate }
12147c478bd9Sstevel@tonic-gate
12157c478bd9Sstevel@tonic-gate /*
12167c478bd9Sstevel@tonic-gate * If they ask for bufctls, but it's a small-slab cache,
12177c478bd9Sstevel@tonic-gate * there is nothing to report.
12187c478bd9Sstevel@tonic-gate */
12197c478bd9Sstevel@tonic-gate if ((type & KM_BUFCTL) && !(cp->cache_flags & KMF_HASH)) {
12207c478bd9Sstevel@tonic-gate dprintf(("bufctl requested, not KMF_HASH (flags: %p)\n",
12217c478bd9Sstevel@tonic-gate cp->cache_flags));
12227c478bd9Sstevel@tonic-gate mdb_free(cp, csize);
12237c478bd9Sstevel@tonic-gate return (WALK_DONE);
12247c478bd9Sstevel@tonic-gate }
12257c478bd9Sstevel@tonic-gate
12267c478bd9Sstevel@tonic-gate /*
12277c478bd9Sstevel@tonic-gate * If they want constructed buffers, but there's no constructor or
12287c478bd9Sstevel@tonic-gate * the cache has DEADBEEF checking enabled, there is nothing to report.
12297c478bd9Sstevel@tonic-gate */
12307c478bd9Sstevel@tonic-gate if ((type & KM_CONSTRUCTED) && (!(type & KM_FREE) ||
12317c478bd9Sstevel@tonic-gate cp->cache_constructor == NULL ||
12327c478bd9Sstevel@tonic-gate (cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) == KMF_DEADBEEF)) {
12337c478bd9Sstevel@tonic-gate mdb_free(cp, csize);
12347c478bd9Sstevel@tonic-gate return (WALK_DONE);
12357c478bd9Sstevel@tonic-gate }
12367c478bd9Sstevel@tonic-gate
12377c478bd9Sstevel@tonic-gate /*
12387c478bd9Sstevel@tonic-gate * Read in the contents of the magazine layer
12397c478bd9Sstevel@tonic-gate */
12407c478bd9Sstevel@tonic-gate if (kmem_read_magazines(cp, addr, ncpus, &maglist, &magcnt,
12417c478bd9Sstevel@tonic-gate &magmax, UM_SLEEP) == WALK_ERR)
12427c478bd9Sstevel@tonic-gate goto out2;
12437c478bd9Sstevel@tonic-gate
12447c478bd9Sstevel@tonic-gate /*
12457c478bd9Sstevel@tonic-gate * We have all of the buffers from the magazines; if we are walking
12467c478bd9Sstevel@tonic-gate * allocated buffers, sort them so we can bsearch them later.
12477c478bd9Sstevel@tonic-gate */
12487c478bd9Sstevel@tonic-gate if (type & KM_ALLOCATED)
12497c478bd9Sstevel@tonic-gate qsort(maglist, magcnt, sizeof (void *), addrcmp);
12507c478bd9Sstevel@tonic-gate
12517c478bd9Sstevel@tonic-gate wsp->walk_data = kmw = mdb_zalloc(sizeof (kmem_walk_t), UM_SLEEP);
12527c478bd9Sstevel@tonic-gate
12537c478bd9Sstevel@tonic-gate kmw->kmw_type = type;
12547c478bd9Sstevel@tonic-gate kmw->kmw_addr = addr;
12557c478bd9Sstevel@tonic-gate kmw->kmw_cp = cp;
12567c478bd9Sstevel@tonic-gate kmw->kmw_csize = csize;
12577c478bd9Sstevel@tonic-gate kmw->kmw_maglist = maglist;
12587c478bd9Sstevel@tonic-gate kmw->kmw_max = magmax;
12597c478bd9Sstevel@tonic-gate kmw->kmw_count = magcnt;
12607c478bd9Sstevel@tonic-gate kmw->kmw_pos = 0;
12617c478bd9Sstevel@tonic-gate
12627c478bd9Sstevel@tonic-gate /*
12637c478bd9Sstevel@tonic-gate * When walking allocated buffers in a KMF_HASH cache, we walk the
12647c478bd9Sstevel@tonic-gate * hash table instead of the slab layer.
12657c478bd9Sstevel@tonic-gate */
12667c478bd9Sstevel@tonic-gate if ((cp->cache_flags & KMF_HASH) && (type & KM_ALLOCATED)) {
12677c478bd9Sstevel@tonic-gate layered = "kmem_hash";
12687c478bd9Sstevel@tonic-gate
12697c478bd9Sstevel@tonic-gate kmw->kmw_type |= KM_HASH;
12707c478bd9Sstevel@tonic-gate } else {
12717c478bd9Sstevel@tonic-gate /*
12727c478bd9Sstevel@tonic-gate * If we are walking freed buffers, we only need the
12737c478bd9Sstevel@tonic-gate * magazine layer plus the partially allocated slabs.
12747c478bd9Sstevel@tonic-gate * To walk allocated buffers, we need all of the slabs.
12757c478bd9Sstevel@tonic-gate */
12767c478bd9Sstevel@tonic-gate if (type & KM_ALLOCATED)
12777c478bd9Sstevel@tonic-gate layered = "kmem_slab";
12787c478bd9Sstevel@tonic-gate else
12797c478bd9Sstevel@tonic-gate layered = "kmem_slab_partial";
12807c478bd9Sstevel@tonic-gate
12817c478bd9Sstevel@tonic-gate /*
12827c478bd9Sstevel@tonic-gate * for small-slab caches, we read in the entire slab. For
12837c478bd9Sstevel@tonic-gate * freed buffers, we can just walk the freelist. For
12847c478bd9Sstevel@tonic-gate * allocated buffers, we use a 'valid' array to track
12857c478bd9Sstevel@tonic-gate * the freed buffers.
12867c478bd9Sstevel@tonic-gate */
12877c478bd9Sstevel@tonic-gate if (!(cp->cache_flags & KMF_HASH)) {
12887c478bd9Sstevel@tonic-gate chunksize = cp->cache_chunksize;
12897c478bd9Sstevel@tonic-gate slabsize = cp->cache_slabsize;
12907c478bd9Sstevel@tonic-gate
12917c478bd9Sstevel@tonic-gate kmw->kmw_ubase = mdb_alloc(slabsize +
12927c478bd9Sstevel@tonic-gate sizeof (kmem_bufctl_t), UM_SLEEP);
12937c478bd9Sstevel@tonic-gate
12947c478bd9Sstevel@tonic-gate if (type & KM_ALLOCATED)
12957c478bd9Sstevel@tonic-gate kmw->kmw_valid =
12967c478bd9Sstevel@tonic-gate mdb_alloc(slabsize / chunksize, UM_SLEEP);
12977c478bd9Sstevel@tonic-gate }
12987c478bd9Sstevel@tonic-gate }
12997c478bd9Sstevel@tonic-gate
13007c478bd9Sstevel@tonic-gate status = WALK_NEXT;
13017c478bd9Sstevel@tonic-gate
13027c478bd9Sstevel@tonic-gate if (mdb_layered_walk(layered, wsp) == -1) {
13037c478bd9Sstevel@tonic-gate mdb_warn("unable to start layered '%s' walk", layered);
13047c478bd9Sstevel@tonic-gate status = WALK_ERR;
13057c478bd9Sstevel@tonic-gate }
13067c478bd9Sstevel@tonic-gate
13077c478bd9Sstevel@tonic-gate out1:
13087c478bd9Sstevel@tonic-gate if (status == WALK_ERR) {
13097c478bd9Sstevel@tonic-gate if (kmw->kmw_valid)
13107c478bd9Sstevel@tonic-gate mdb_free(kmw->kmw_valid, slabsize / chunksize);
13117c478bd9Sstevel@tonic-gate
13127c478bd9Sstevel@tonic-gate if (kmw->kmw_ubase)
13137c478bd9Sstevel@tonic-gate mdb_free(kmw->kmw_ubase, slabsize +
13147c478bd9Sstevel@tonic-gate sizeof (kmem_bufctl_t));
13157c478bd9Sstevel@tonic-gate
1316789d94c2Sjwadams if (kmw->kmw_maglist)
1317789d94c2Sjwadams mdb_free(kmw->kmw_maglist,
1318789d94c2Sjwadams kmw->kmw_max * sizeof (uintptr_t));
1319789d94c2Sjwadams
13207c478bd9Sstevel@tonic-gate mdb_free(kmw, sizeof (kmem_walk_t));
13217c478bd9Sstevel@tonic-gate wsp->walk_data = NULL;
13227c478bd9Sstevel@tonic-gate }
13237c478bd9Sstevel@tonic-gate
13247c478bd9Sstevel@tonic-gate out2:
13257c478bd9Sstevel@tonic-gate if (status == WALK_ERR)
13267c478bd9Sstevel@tonic-gate mdb_free(cp, csize);
13277c478bd9Sstevel@tonic-gate
13287c478bd9Sstevel@tonic-gate return (status);
13297c478bd9Sstevel@tonic-gate }
13307c478bd9Sstevel@tonic-gate
13317c478bd9Sstevel@tonic-gate int
kmem_walk_step(mdb_walk_state_t * wsp)13327c478bd9Sstevel@tonic-gate kmem_walk_step(mdb_walk_state_t *wsp)
13337c478bd9Sstevel@tonic-gate {
13347c478bd9Sstevel@tonic-gate kmem_walk_t *kmw = wsp->walk_data;
13357c478bd9Sstevel@tonic-gate int type = kmw->kmw_type;
13367c478bd9Sstevel@tonic-gate kmem_cache_t *cp = kmw->kmw_cp;
13377c478bd9Sstevel@tonic-gate
13387c478bd9Sstevel@tonic-gate void **maglist = kmw->kmw_maglist;
13397c478bd9Sstevel@tonic-gate int magcnt = kmw->kmw_count;
13407c478bd9Sstevel@tonic-gate
13417c478bd9Sstevel@tonic-gate uintptr_t chunksize, slabsize;
13427c478bd9Sstevel@tonic-gate uintptr_t addr;
13437c478bd9Sstevel@tonic-gate const kmem_slab_t *sp;
13447c478bd9Sstevel@tonic-gate const kmem_bufctl_t *bcp;
13457c478bd9Sstevel@tonic-gate kmem_bufctl_t bc;
13467c478bd9Sstevel@tonic-gate
13477c478bd9Sstevel@tonic-gate int chunks;
13487c478bd9Sstevel@tonic-gate char *kbase;
13497c478bd9Sstevel@tonic-gate void *buf;
13507c478bd9Sstevel@tonic-gate int i, ret;
13517c478bd9Sstevel@tonic-gate
13527c478bd9Sstevel@tonic-gate char *valid, *ubase;
13537c478bd9Sstevel@tonic-gate
13547c478bd9Sstevel@tonic-gate /*
13557c478bd9Sstevel@tonic-gate * first, handle the 'kmem_hash' layered walk case
13567c478bd9Sstevel@tonic-gate */
13577c478bd9Sstevel@tonic-gate if (type & KM_HASH) {
13587c478bd9Sstevel@tonic-gate /*
13597c478bd9Sstevel@tonic-gate * We have a buffer which has been allocated out of the
13607c478bd9Sstevel@tonic-gate * global layer. We need to make sure that it's not
13617c478bd9Sstevel@tonic-gate * actually sitting in a magazine before we report it as
13627c478bd9Sstevel@tonic-gate * an allocated buffer.
13637c478bd9Sstevel@tonic-gate */
13647c478bd9Sstevel@tonic-gate buf = ((const kmem_bufctl_t *)wsp->walk_layer)->bc_addr;
13657c478bd9Sstevel@tonic-gate
13667c478bd9Sstevel@tonic-gate if (magcnt > 0 &&
13677c478bd9Sstevel@tonic-gate bsearch(&buf, maglist, magcnt, sizeof (void *),
13687c478bd9Sstevel@tonic-gate addrcmp) != NULL)
13697c478bd9Sstevel@tonic-gate return (WALK_NEXT);
13707c478bd9Sstevel@tonic-gate
13717c478bd9Sstevel@tonic-gate if (type & KM_BUFCTL)
13727c478bd9Sstevel@tonic-gate return (bufctl_walk_callback(cp, wsp, wsp->walk_addr));
13737c478bd9Sstevel@tonic-gate
13747c478bd9Sstevel@tonic-gate return (kmem_walk_callback(wsp, (uintptr_t)buf));
13757c478bd9Sstevel@tonic-gate }
13767c478bd9Sstevel@tonic-gate
13777c478bd9Sstevel@tonic-gate ret = WALK_NEXT;
13787c478bd9Sstevel@tonic-gate
13797c478bd9Sstevel@tonic-gate addr = kmw->kmw_addr;
13807c478bd9Sstevel@tonic-gate
13817c478bd9Sstevel@tonic-gate /*
13827c478bd9Sstevel@tonic-gate * If we're walking freed buffers, report everything in the
13837c478bd9Sstevel@tonic-gate * magazine layer before processing the first slab.
13847c478bd9Sstevel@tonic-gate */
13857c478bd9Sstevel@tonic-gate if ((type & KM_FREE) && magcnt != 0) {
13867c478bd9Sstevel@tonic-gate kmw->kmw_count = 0; /* only do this once */
13877c478bd9Sstevel@tonic-gate for (i = 0; i < magcnt; i++) {
13887c478bd9Sstevel@tonic-gate buf = maglist[i];
13897c478bd9Sstevel@tonic-gate
13907c478bd9Sstevel@tonic-gate if (type & KM_BUFCTL) {
13917c478bd9Sstevel@tonic-gate uintptr_t out;
13927c478bd9Sstevel@tonic-gate
13937c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_BUFTAG) {
13947c478bd9Sstevel@tonic-gate kmem_buftag_t *btp;
13957c478bd9Sstevel@tonic-gate kmem_buftag_t tag;
13967c478bd9Sstevel@tonic-gate
13977c478bd9Sstevel@tonic-gate /* LINTED - alignment */
13987c478bd9Sstevel@tonic-gate btp = KMEM_BUFTAG(cp, buf);
13997c478bd9Sstevel@tonic-gate if (mdb_vread(&tag, sizeof (tag),
14007c478bd9Sstevel@tonic-gate (uintptr_t)btp) == -1) {
14017c478bd9Sstevel@tonic-gate mdb_warn("reading buftag for "
14027c478bd9Sstevel@tonic-gate "%p at %p", buf, btp);
14037c478bd9Sstevel@tonic-gate continue;
14047c478bd9Sstevel@tonic-gate }
14057c478bd9Sstevel@tonic-gate out = (uintptr_t)tag.bt_bufctl;
14067c478bd9Sstevel@tonic-gate } else {
14077c478bd9Sstevel@tonic-gate if (kmem_hash_lookup(cp, addr, buf,
14087c478bd9Sstevel@tonic-gate &out) == -1)
14097c478bd9Sstevel@tonic-gate continue;
14107c478bd9Sstevel@tonic-gate }
14117c478bd9Sstevel@tonic-gate ret = bufctl_walk_callback(cp, wsp, out);
14127c478bd9Sstevel@tonic-gate } else {
14137c478bd9Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf);
14147c478bd9Sstevel@tonic-gate }
14157c478bd9Sstevel@tonic-gate
14167c478bd9Sstevel@tonic-gate if (ret != WALK_NEXT)
14177c478bd9Sstevel@tonic-gate return (ret);
14187c478bd9Sstevel@tonic-gate }
14197c478bd9Sstevel@tonic-gate }
14207c478bd9Sstevel@tonic-gate
14217c478bd9Sstevel@tonic-gate /*
14227c478bd9Sstevel@tonic-gate * If they want constructed buffers, we're finished, since the
14237c478bd9Sstevel@tonic-gate * magazine layer holds them all.
14247c478bd9Sstevel@tonic-gate */
14257c478bd9Sstevel@tonic-gate if (type & KM_CONSTRUCTED)
14267c478bd9Sstevel@tonic-gate return (WALK_DONE);
14277c478bd9Sstevel@tonic-gate
14287c478bd9Sstevel@tonic-gate /*
14297c478bd9Sstevel@tonic-gate * Handle the buffers in the current slab
14307c478bd9Sstevel@tonic-gate */
14317c478bd9Sstevel@tonic-gate chunksize = cp->cache_chunksize;
14327c478bd9Sstevel@tonic-gate slabsize = cp->cache_slabsize;
14337c478bd9Sstevel@tonic-gate
14347c478bd9Sstevel@tonic-gate sp = wsp->walk_layer;
14357c478bd9Sstevel@tonic-gate chunks = sp->slab_chunks;
14367c478bd9Sstevel@tonic-gate kbase = sp->slab_base;
14377c478bd9Sstevel@tonic-gate
14387c478bd9Sstevel@tonic-gate dprintf(("kbase is %p\n", kbase));
14397c478bd9Sstevel@tonic-gate
14407c478bd9Sstevel@tonic-gate if (!(cp->cache_flags & KMF_HASH)) {
14417c478bd9Sstevel@tonic-gate valid = kmw->kmw_valid;
14427c478bd9Sstevel@tonic-gate ubase = kmw->kmw_ubase;
14437c478bd9Sstevel@tonic-gate
14447c478bd9Sstevel@tonic-gate if (mdb_vread(ubase, chunks * chunksize,
14457c478bd9Sstevel@tonic-gate (uintptr_t)kbase) == -1) {
14467c478bd9Sstevel@tonic-gate mdb_warn("failed to read slab contents at %p", kbase);
14477c478bd9Sstevel@tonic-gate return (WALK_ERR);
14487c478bd9Sstevel@tonic-gate }
14497c478bd9Sstevel@tonic-gate
14507c478bd9Sstevel@tonic-gate /*
14517c478bd9Sstevel@tonic-gate * Set up the valid map as fully allocated -- we'll punch
14527c478bd9Sstevel@tonic-gate * out the freelist.
14537c478bd9Sstevel@tonic-gate */
14547c478bd9Sstevel@tonic-gate if (type & KM_ALLOCATED)
14557c478bd9Sstevel@tonic-gate (void) memset(valid, 1, chunks);
14567c478bd9Sstevel@tonic-gate } else {
14577c478bd9Sstevel@tonic-gate valid = NULL;
14587c478bd9Sstevel@tonic-gate ubase = NULL;
14597c478bd9Sstevel@tonic-gate }
14607c478bd9Sstevel@tonic-gate
14617c478bd9Sstevel@tonic-gate /*
14627c478bd9Sstevel@tonic-gate * walk the slab's freelist
14637c478bd9Sstevel@tonic-gate */
14647c478bd9Sstevel@tonic-gate bcp = sp->slab_head;
14657c478bd9Sstevel@tonic-gate
14667c478bd9Sstevel@tonic-gate dprintf(("refcnt is %d; chunks is %d\n", sp->slab_refcnt, chunks));
14677c478bd9Sstevel@tonic-gate
14687c478bd9Sstevel@tonic-gate /*
14697c478bd9Sstevel@tonic-gate * since we could be in the middle of allocating a buffer,
14707c478bd9Sstevel@tonic-gate * our refcnt could be one higher than it aught. So we
14717c478bd9Sstevel@tonic-gate * check one further on the freelist than the count allows.
14727c478bd9Sstevel@tonic-gate */
14737c478bd9Sstevel@tonic-gate for (i = sp->slab_refcnt; i <= chunks; i++) {
14747c478bd9Sstevel@tonic-gate uint_t ndx;
14757c478bd9Sstevel@tonic-gate
14767c478bd9Sstevel@tonic-gate dprintf(("bcp is %p\n", bcp));
14777c478bd9Sstevel@tonic-gate
14787c478bd9Sstevel@tonic-gate if (bcp == NULL) {
14797c478bd9Sstevel@tonic-gate if (i == chunks)
14807c478bd9Sstevel@tonic-gate break;
14817c478bd9Sstevel@tonic-gate mdb_warn(
14827c478bd9Sstevel@tonic-gate "slab %p in cache %p freelist too short by %d\n",
14837c478bd9Sstevel@tonic-gate sp, addr, chunks - i);
14847c478bd9Sstevel@tonic-gate break;
14857c478bd9Sstevel@tonic-gate }
14867c478bd9Sstevel@tonic-gate
14877c478bd9Sstevel@tonic-gate if (cp->cache_flags & KMF_HASH) {
14887c478bd9Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), (uintptr_t)bcp) == -1) {
14897c478bd9Sstevel@tonic-gate mdb_warn("failed to read bufctl ptr at %p",
14907c478bd9Sstevel@tonic-gate bcp);
14917c478bd9Sstevel@tonic-gate break;
14927c478bd9Sstevel@tonic-gate }
14937c478bd9Sstevel@tonic-gate buf = bc.bc_addr;
14947c478bd9Sstevel@tonic-gate } else {
14957c478bd9Sstevel@tonic-gate /*
1496d7dba7e5SBryan Cantrill * Otherwise the buffer is (or should be) in the slab
1497d7dba7e5SBryan Cantrill * that we've read in; determine its offset in the
1498d7dba7e5SBryan Cantrill * slab, validate that it's not corrupt, and add to
1499d7dba7e5SBryan Cantrill * our base address to find the umem_bufctl_t. (Note
1500d7dba7e5SBryan Cantrill * that we don't need to add the size of the bufctl
1501d7dba7e5SBryan Cantrill * to our offset calculation because of the slop that's
1502d7dba7e5SBryan Cantrill * allocated for the buffer at ubase.)
15037c478bd9Sstevel@tonic-gate */
1504d7dba7e5SBryan Cantrill uintptr_t offs = (uintptr_t)bcp - (uintptr_t)kbase;
15057c478bd9Sstevel@tonic-gate
1506d7dba7e5SBryan Cantrill if (offs > chunks * chunksize) {
1507d7dba7e5SBryan Cantrill mdb_warn("found corrupt bufctl ptr %p"
1508d7dba7e5SBryan Cantrill " in slab %p in cache %p\n", bcp,
1509d7dba7e5SBryan Cantrill wsp->walk_addr, addr);
1510d7dba7e5SBryan Cantrill break;
1511d7dba7e5SBryan Cantrill }
1512d7dba7e5SBryan Cantrill
1513d7dba7e5SBryan Cantrill bc = *((kmem_bufctl_t *)((uintptr_t)ubase + offs));
15147c478bd9Sstevel@tonic-gate buf = KMEM_BUF(cp, bcp);
15157c478bd9Sstevel@tonic-gate }
15167c478bd9Sstevel@tonic-gate
15177c478bd9Sstevel@tonic-gate ndx = ((uintptr_t)buf - (uintptr_t)kbase) / chunksize;
15187c478bd9Sstevel@tonic-gate
15197c478bd9Sstevel@tonic-gate if (ndx > slabsize / cp->cache_bufsize) {
15207c478bd9Sstevel@tonic-gate /*
15217c478bd9Sstevel@tonic-gate * This is very wrong; we have managed to find
15227c478bd9Sstevel@tonic-gate * a buffer in the slab which shouldn't
15237c478bd9Sstevel@tonic-gate * actually be here. Emit a warning, and
15247c478bd9Sstevel@tonic-gate * try to continue.
15257c478bd9Sstevel@tonic-gate */
15267c478bd9Sstevel@tonic-gate mdb_warn("buf %p is out of range for "
15277c478bd9Sstevel@tonic-gate "slab %p, cache %p\n", buf, sp, addr);
15287c478bd9Sstevel@tonic-gate } else if (type & KM_ALLOCATED) {
15297c478bd9Sstevel@tonic-gate /*
15307c478bd9Sstevel@tonic-gate * we have found a buffer on the slab's freelist;
15317c478bd9Sstevel@tonic-gate * clear its entry
15327c478bd9Sstevel@tonic-gate */
15337c478bd9Sstevel@tonic-gate valid[ndx] = 0;
15347c478bd9Sstevel@tonic-gate } else {
15357c478bd9Sstevel@tonic-gate /*
15367c478bd9Sstevel@tonic-gate * Report this freed buffer
15377c478bd9Sstevel@tonic-gate */
15387c478bd9Sstevel@tonic-gate if (type & KM_BUFCTL) {
15397c478bd9Sstevel@tonic-gate ret = bufctl_walk_callback(cp, wsp,
15407c478bd9Sstevel@tonic-gate (uintptr_t)bcp);
15417c478bd9Sstevel@tonic-gate } else {
15427c478bd9Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf);
15437c478bd9Sstevel@tonic-gate }
15447c478bd9Sstevel@tonic-gate if (ret != WALK_NEXT)
15457c478bd9Sstevel@tonic-gate return (ret);
15467c478bd9Sstevel@tonic-gate }
15477c478bd9Sstevel@tonic-gate
15487c478bd9Sstevel@tonic-gate bcp = bc.bc_next;
15497c478bd9Sstevel@tonic-gate }
15507c478bd9Sstevel@tonic-gate
15517c478bd9Sstevel@tonic-gate if (bcp != NULL) {
15527c478bd9Sstevel@tonic-gate dprintf(("slab %p in cache %p freelist too long (%p)\n",
15537c478bd9Sstevel@tonic-gate sp, addr, bcp));
15547c478bd9Sstevel@tonic-gate }
15557c478bd9Sstevel@tonic-gate
15567c478bd9Sstevel@tonic-gate /*
15577c478bd9Sstevel@tonic-gate * If we are walking freed buffers, the loop above handled reporting
15587c478bd9Sstevel@tonic-gate * them.
15597c478bd9Sstevel@tonic-gate */
15607c478bd9Sstevel@tonic-gate if (type & KM_FREE)
15617c478bd9Sstevel@tonic-gate return (WALK_NEXT);
15627c478bd9Sstevel@tonic-gate
15637c478bd9Sstevel@tonic-gate if (type & KM_BUFCTL) {
15647c478bd9Sstevel@tonic-gate mdb_warn("impossible situation: small-slab KM_BUFCTL walk for "
15657c478bd9Sstevel@tonic-gate "cache %p\n", addr);
15667c478bd9Sstevel@tonic-gate return (WALK_ERR);
15677c478bd9Sstevel@tonic-gate }
15687c478bd9Sstevel@tonic-gate
15697c478bd9Sstevel@tonic-gate /*
15707c478bd9Sstevel@tonic-gate * Report allocated buffers, skipping buffers in the magazine layer.
15717c478bd9Sstevel@tonic-gate * We only get this far for small-slab caches.
15727c478bd9Sstevel@tonic-gate */
15737c478bd9Sstevel@tonic-gate for (i = 0; ret == WALK_NEXT && i < chunks; i++) {
15747c478bd9Sstevel@tonic-gate buf = (char *)kbase + i * chunksize;
15757c478bd9Sstevel@tonic-gate
15767c478bd9Sstevel@tonic-gate if (!valid[i])
15777c478bd9Sstevel@tonic-gate continue; /* on slab freelist */
15787c478bd9Sstevel@tonic-gate
15797c478bd9Sstevel@tonic-gate if (magcnt > 0 &&
15807c478bd9Sstevel@tonic-gate bsearch(&buf, maglist, magcnt, sizeof (void *),
15817c478bd9Sstevel@tonic-gate addrcmp) != NULL)
15827c478bd9Sstevel@tonic-gate continue; /* in magazine layer */
15837c478bd9Sstevel@tonic-gate
15847c478bd9Sstevel@tonic-gate ret = kmem_walk_callback(wsp, (uintptr_t)buf);
15857c478bd9Sstevel@tonic-gate }
15867c478bd9Sstevel@tonic-gate return (ret);
15877c478bd9Sstevel@tonic-gate }
15887c478bd9Sstevel@tonic-gate
15897c478bd9Sstevel@tonic-gate void
kmem_walk_fini(mdb_walk_state_t * wsp)15907c478bd9Sstevel@tonic-gate kmem_walk_fini(mdb_walk_state_t *wsp)
15917c478bd9Sstevel@tonic-gate {
15927c478bd9Sstevel@tonic-gate kmem_walk_t *kmw = wsp->walk_data;
15937c478bd9Sstevel@tonic-gate uintptr_t chunksize;
15947c478bd9Sstevel@tonic-gate uintptr_t slabsize;
15957c478bd9Sstevel@tonic-gate
15967c478bd9Sstevel@tonic-gate if (kmw == NULL)
15977c478bd9Sstevel@tonic-gate return;
15987c478bd9Sstevel@tonic-gate
15997c478bd9Sstevel@tonic-gate if (kmw->kmw_maglist != NULL)
16007c478bd9Sstevel@tonic-gate mdb_free(kmw->kmw_maglist, kmw->kmw_max * sizeof (void *));
16017c478bd9Sstevel@tonic-gate
16027c478bd9Sstevel@tonic-gate chunksize = kmw->kmw_cp->cache_chunksize;
16037c478bd9Sstevel@tonic-gate slabsize = kmw->kmw_cp->cache_slabsize;
16047c478bd9Sstevel@tonic-gate
16057c478bd9Sstevel@tonic-gate if (kmw->kmw_valid != NULL)
16067c478bd9Sstevel@tonic-gate mdb_free(kmw->kmw_valid, slabsize / chunksize);
16077c478bd9Sstevel@tonic-gate if (kmw->kmw_ubase != NULL)
16087c478bd9Sstevel@tonic-gate mdb_free(kmw->kmw_ubase, slabsize + sizeof (kmem_bufctl_t));
16097c478bd9Sstevel@tonic-gate
16107c478bd9Sstevel@tonic-gate mdb_free(kmw->kmw_cp, kmw->kmw_csize);
16117c478bd9Sstevel@tonic-gate mdb_free(kmw, sizeof (kmem_walk_t));
16127c478bd9Sstevel@tonic-gate }
16137c478bd9Sstevel@tonic-gate
16147c478bd9Sstevel@tonic-gate /*ARGSUSED*/
16157c478bd9Sstevel@tonic-gate static int
kmem_walk_all(uintptr_t addr,const kmem_cache_t * c,mdb_walk_state_t * wsp)16167c478bd9Sstevel@tonic-gate kmem_walk_all(uintptr_t addr, const kmem_cache_t *c, mdb_walk_state_t *wsp)
16177c478bd9Sstevel@tonic-gate {
16187c478bd9Sstevel@tonic-gate /*
16197c478bd9Sstevel@tonic-gate * Buffers allocated from NOTOUCH caches can also show up as freed
16207c478bd9Sstevel@tonic-gate * memory in other caches. This can be a little confusing, so we
16217c478bd9Sstevel@tonic-gate * don't walk NOTOUCH caches when walking all caches (thereby assuring
16227c478bd9Sstevel@tonic-gate * that "::walk kmem" and "::walk freemem" yield disjoint output).
16237c478bd9Sstevel@tonic-gate */
16247c478bd9Sstevel@tonic-gate if (c->cache_cflags & KMC_NOTOUCH)
16257c478bd9Sstevel@tonic-gate return (WALK_NEXT);
16267c478bd9Sstevel@tonic-gate
16277c478bd9Sstevel@tonic-gate if (mdb_pwalk(wsp->walk_data, wsp->walk_callback,
16287c478bd9Sstevel@tonic-gate wsp->walk_cbdata, addr) == -1)
16297c478bd9Sstevel@tonic-gate return (WALK_DONE);
16307c478bd9Sstevel@tonic-gate
16317c478bd9Sstevel@tonic-gate return (WALK_NEXT);
16327c478bd9Sstevel@tonic-gate }
16337c478bd9Sstevel@tonic-gate
16347c478bd9Sstevel@tonic-gate #define KMEM_WALK_ALL(name, wsp) { \
16357c478bd9Sstevel@tonic-gate wsp->walk_data = (name); \
16367c478bd9Sstevel@tonic-gate if (mdb_walk("kmem_cache", (mdb_walk_cb_t)kmem_walk_all, wsp) == -1) \
16377c478bd9Sstevel@tonic-gate return (WALK_ERR); \
16387c478bd9Sstevel@tonic-gate return (WALK_DONE); \
16397c478bd9Sstevel@tonic-gate }
16407c478bd9Sstevel@tonic-gate
16417c478bd9Sstevel@tonic-gate int
kmem_walk_init(mdb_walk_state_t * wsp)16427c478bd9Sstevel@tonic-gate kmem_walk_init(mdb_walk_state_t *wsp)
16437c478bd9Sstevel@tonic-gate {
16447c478bd9Sstevel@tonic-gate if (wsp->walk_arg != NULL)
16457c478bd9Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)wsp->walk_arg;
16467c478bd9Sstevel@tonic-gate
16477c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL)
16487c478bd9Sstevel@tonic-gate KMEM_WALK_ALL("kmem", wsp);
16497c478bd9Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_ALLOCATED));
16507c478bd9Sstevel@tonic-gate }
16517c478bd9Sstevel@tonic-gate
16527c478bd9Sstevel@tonic-gate int
bufctl_walk_init(mdb_walk_state_t * wsp)16537c478bd9Sstevel@tonic-gate bufctl_walk_init(mdb_walk_state_t *wsp)
16547c478bd9Sstevel@tonic-gate {
16557c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL)
16567c478bd9Sstevel@tonic-gate KMEM_WALK_ALL("bufctl", wsp);
16577c478bd9Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_ALLOCATED | KM_BUFCTL));
16587c478bd9Sstevel@tonic-gate }
16597c478bd9Sstevel@tonic-gate
16607c478bd9Sstevel@tonic-gate int
freemem_walk_init(mdb_walk_state_t * wsp)16617c478bd9Sstevel@tonic-gate freemem_walk_init(mdb_walk_state_t *wsp)
16627c478bd9Sstevel@tonic-gate {
16637c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL)
16647c478bd9Sstevel@tonic-gate KMEM_WALK_ALL("freemem", wsp);
16657c478bd9Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE));
16667c478bd9Sstevel@tonic-gate }
16677c478bd9Sstevel@tonic-gate
16687c478bd9Sstevel@tonic-gate int
freemem_constructed_walk_init(mdb_walk_state_t * wsp)16697c478bd9Sstevel@tonic-gate freemem_constructed_walk_init(mdb_walk_state_t *wsp)
16707c478bd9Sstevel@tonic-gate {
16717c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL)
16727c478bd9Sstevel@tonic-gate KMEM_WALK_ALL("freemem_constructed", wsp);
16737c478bd9Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE | KM_CONSTRUCTED));
16747c478bd9Sstevel@tonic-gate }
16757c478bd9Sstevel@tonic-gate
16767c478bd9Sstevel@tonic-gate int
freectl_walk_init(mdb_walk_state_t * wsp)16777c478bd9Sstevel@tonic-gate freectl_walk_init(mdb_walk_state_t *wsp)
16787c478bd9Sstevel@tonic-gate {
16797c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL)
16807c478bd9Sstevel@tonic-gate KMEM_WALK_ALL("freectl", wsp);
16817c478bd9Sstevel@tonic-gate return (kmem_walk_init_common(wsp, KM_FREE | KM_BUFCTL));
16827c478bd9Sstevel@tonic-gate }
16837c478bd9Sstevel@tonic-gate
16847c478bd9Sstevel@tonic-gate int
freectl_constructed_walk_init(mdb_walk_state_t * wsp)16857c478bd9Sstevel@tonic-gate freectl_constructed_walk_init(mdb_walk_state_t *wsp)
16867c478bd9Sstevel@tonic-gate {
16877c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL)
16887c478bd9Sstevel@tonic-gate KMEM_WALK_ALL("freectl_constructed", wsp);
16897c478bd9Sstevel@tonic-gate return (kmem_walk_init_common(wsp,
16907c478bd9Sstevel@tonic-gate KM_FREE | KM_BUFCTL | KM_CONSTRUCTED));
16917c478bd9Sstevel@tonic-gate }
16927c478bd9Sstevel@tonic-gate
16937c478bd9Sstevel@tonic-gate typedef struct bufctl_history_walk {
16947c478bd9Sstevel@tonic-gate void *bhw_next;
16957c478bd9Sstevel@tonic-gate kmem_cache_t *bhw_cache;
16967c478bd9Sstevel@tonic-gate kmem_slab_t *bhw_slab;
16977c478bd9Sstevel@tonic-gate hrtime_t bhw_timestamp;
16987c478bd9Sstevel@tonic-gate } bufctl_history_walk_t;
16997c478bd9Sstevel@tonic-gate
17007c478bd9Sstevel@tonic-gate int
bufctl_history_walk_init(mdb_walk_state_t * wsp)17017c478bd9Sstevel@tonic-gate bufctl_history_walk_init(mdb_walk_state_t *wsp)
17027c478bd9Sstevel@tonic-gate {
17037c478bd9Sstevel@tonic-gate bufctl_history_walk_t *bhw;
17047c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t bc;
17057c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t bcn;
17067c478bd9Sstevel@tonic-gate
17077c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL) {
17087c478bd9Sstevel@tonic-gate mdb_warn("bufctl_history walk doesn't support global walks\n");
17097c478bd9Sstevel@tonic-gate return (WALK_ERR);
17107c478bd9Sstevel@tonic-gate }
17117c478bd9Sstevel@tonic-gate
17127c478bd9Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), wsp->walk_addr) == -1) {
17137c478bd9Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", wsp->walk_addr);
17147c478bd9Sstevel@tonic-gate return (WALK_ERR);
17157c478bd9Sstevel@tonic-gate }
17167c478bd9Sstevel@tonic-gate
17177c478bd9Sstevel@tonic-gate bhw = mdb_zalloc(sizeof (*bhw), UM_SLEEP);
17187c478bd9Sstevel@tonic-gate bhw->bhw_timestamp = 0;
17197c478bd9Sstevel@tonic-gate bhw->bhw_cache = bc.bc_cache;
17207c478bd9Sstevel@tonic-gate bhw->bhw_slab = bc.bc_slab;
17217c478bd9Sstevel@tonic-gate
17227c478bd9Sstevel@tonic-gate /*
17237c478bd9Sstevel@tonic-gate * sometimes the first log entry matches the base bufctl; in that
17247c478bd9Sstevel@tonic-gate * case, skip the base bufctl.
17257c478bd9Sstevel@tonic-gate */
17267c478bd9Sstevel@tonic-gate if (bc.bc_lastlog != NULL &&
17277c478bd9Sstevel@tonic-gate mdb_vread(&bcn, sizeof (bcn), (uintptr_t)bc.bc_lastlog) != -1 &&
17287c478bd9Sstevel@tonic-gate bc.bc_addr == bcn.bc_addr &&
17297c478bd9Sstevel@tonic-gate bc.bc_cache == bcn.bc_cache &&
17307c478bd9Sstevel@tonic-gate bc.bc_slab == bcn.bc_slab &&
17317c478bd9Sstevel@tonic-gate bc.bc_timestamp == bcn.bc_timestamp &&
17327c478bd9Sstevel@tonic-gate bc.bc_thread == bcn.bc_thread)
17337c478bd9Sstevel@tonic-gate bhw->bhw_next = bc.bc_lastlog;
17347c478bd9Sstevel@tonic-gate else
17357c478bd9Sstevel@tonic-gate bhw->bhw_next = (void *)wsp->walk_addr;
17367c478bd9Sstevel@tonic-gate
17377c478bd9Sstevel@tonic-gate wsp->walk_addr = (uintptr_t)bc.bc_addr;
17387c478bd9Sstevel@tonic-gate wsp->walk_data = bhw;
17397c478bd9Sstevel@tonic-gate
17407c478bd9Sstevel@tonic-gate return (WALK_NEXT);
17417c478bd9Sstevel@tonic-gate }
17427c478bd9Sstevel@tonic-gate
17437c478bd9Sstevel@tonic-gate int
bufctl_history_walk_step(mdb_walk_state_t * wsp)17447c478bd9Sstevel@tonic-gate bufctl_history_walk_step(mdb_walk_state_t *wsp)
17457c478bd9Sstevel@tonic-gate {
17467c478bd9Sstevel@tonic-gate bufctl_history_walk_t *bhw = wsp->walk_data;
17477c478bd9Sstevel@tonic-gate uintptr_t addr = (uintptr_t)bhw->bhw_next;
17487c478bd9Sstevel@tonic-gate uintptr_t baseaddr = wsp->walk_addr;
17497c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t bc;
17507c478bd9Sstevel@tonic-gate
17517c478bd9Sstevel@tonic-gate if (addr == NULL)
17527c478bd9Sstevel@tonic-gate return (WALK_DONE);
17537c478bd9Sstevel@tonic-gate
17547c478bd9Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) {
17557c478bd9Sstevel@tonic-gate mdb_warn("unable to read bufctl at %p", bhw->bhw_next);
17567c478bd9Sstevel@tonic-gate return (WALK_ERR);
17577c478bd9Sstevel@tonic-gate }
17587c478bd9Sstevel@tonic-gate
17597c478bd9Sstevel@tonic-gate /*
17607c478bd9Sstevel@tonic-gate * The bufctl is only valid if the address, cache, and slab are
17617c478bd9Sstevel@tonic-gate * correct. We also check that the timestamp is decreasing, to
17627c478bd9Sstevel@tonic-gate * prevent infinite loops.
17637c478bd9Sstevel@tonic-gate */
17647c478bd9Sstevel@tonic-gate if ((uintptr_t)bc.bc_addr != baseaddr ||
17657c478bd9Sstevel@tonic-gate bc.bc_cache != bhw->bhw_cache ||
17667c478bd9Sstevel@tonic-gate bc.bc_slab != bhw->bhw_slab ||
17677c478bd9Sstevel@tonic-gate (bhw->bhw_timestamp != 0 && bc.bc_timestamp >= bhw->bhw_timestamp))
17687c478bd9Sstevel@tonic-gate return (WALK_DONE);
17697c478bd9Sstevel@tonic-gate
17707c478bd9Sstevel@tonic-gate bhw->bhw_next = bc.bc_lastlog;
17717c478bd9Sstevel@tonic-gate bhw->bhw_timestamp = bc.bc_timestamp;
17727c478bd9Sstevel@tonic-gate
17737c478bd9Sstevel@tonic-gate return (wsp->walk_callback(addr, &bc, wsp->walk_cbdata));
17747c478bd9Sstevel@tonic-gate }
17757c478bd9Sstevel@tonic-gate
17767c478bd9Sstevel@tonic-gate void
bufctl_history_walk_fini(mdb_walk_state_t * wsp)17777c478bd9Sstevel@tonic-gate bufctl_history_walk_fini(mdb_walk_state_t *wsp)
17787c478bd9Sstevel@tonic-gate {
17797c478bd9Sstevel@tonic-gate bufctl_history_walk_t *bhw = wsp->walk_data;
17807c478bd9Sstevel@tonic-gate
17817c478bd9Sstevel@tonic-gate mdb_free(bhw, sizeof (*bhw));
17827c478bd9Sstevel@tonic-gate }
17837c478bd9Sstevel@tonic-gate
17847c478bd9Sstevel@tonic-gate typedef struct kmem_log_walk {
17857c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t *klw_base;
17867c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t **klw_sorted;
17877c478bd9Sstevel@tonic-gate kmem_log_header_t klw_lh;
17887c478bd9Sstevel@tonic-gate size_t klw_size;
17897c478bd9Sstevel@tonic-gate size_t klw_maxndx;
17907c478bd9Sstevel@tonic-gate size_t klw_ndx;
17917c478bd9Sstevel@tonic-gate } kmem_log_walk_t;
17927c478bd9Sstevel@tonic-gate
17937c478bd9Sstevel@tonic-gate int
kmem_log_walk_init(mdb_walk_state_t * wsp)17947c478bd9Sstevel@tonic-gate kmem_log_walk_init(mdb_walk_state_t *wsp)
17957c478bd9Sstevel@tonic-gate {
17967c478bd9Sstevel@tonic-gate uintptr_t lp = wsp->walk_addr;
17977c478bd9Sstevel@tonic-gate kmem_log_walk_t *klw;
17987c478bd9Sstevel@tonic-gate kmem_log_header_t *lhp;
17997c478bd9Sstevel@tonic-gate int maxndx, i, j, k;
18007c478bd9Sstevel@tonic-gate
18017c478bd9Sstevel@tonic-gate /*
18027c478bd9Sstevel@tonic-gate * By default (global walk), walk the kmem_transaction_log. Otherwise
18037c478bd9Sstevel@tonic-gate * read the log whose kmem_log_header_t is stored at walk_addr.
18047c478bd9Sstevel@tonic-gate */
18057c478bd9Sstevel@tonic-gate if (lp == NULL && mdb_readvar(&lp, "kmem_transaction_log") == -1) {
18067c478bd9Sstevel@tonic-gate mdb_warn("failed to read 'kmem_transaction_log'");
18077c478bd9Sstevel@tonic-gate return (WALK_ERR);
18087c478bd9Sstevel@tonic-gate }
18097c478bd9Sstevel@tonic-gate
18107c478bd9Sstevel@tonic-gate if (lp == NULL) {
18117c478bd9Sstevel@tonic-gate mdb_warn("log is disabled\n");
18127c478bd9Sstevel@tonic-gate return (WALK_ERR);
18137c478bd9Sstevel@tonic-gate }
18147c478bd9Sstevel@tonic-gate
18157c478bd9Sstevel@tonic-gate klw = mdb_zalloc(sizeof (kmem_log_walk_t), UM_SLEEP);
18167c478bd9Sstevel@tonic-gate lhp = &klw->klw_lh;
18177c478bd9Sstevel@tonic-gate
18187c478bd9Sstevel@tonic-gate if (mdb_vread(lhp, sizeof (kmem_log_header_t), lp) == -1) {
18197c478bd9Sstevel@tonic-gate mdb_warn("failed to read log header at %p", lp);
18207c478bd9Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t));
18217c478bd9Sstevel@tonic-gate return (WALK_ERR);
18227c478bd9Sstevel@tonic-gate }
18237c478bd9Sstevel@tonic-gate
18247c478bd9Sstevel@tonic-gate klw->klw_size = lhp->lh_chunksize * lhp->lh_nchunks;
18257c478bd9Sstevel@tonic-gate klw->klw_base = mdb_alloc(klw->klw_size, UM_SLEEP);
18267c478bd9Sstevel@tonic-gate maxndx = lhp->lh_chunksize / sizeof (kmem_bufctl_audit_t) - 1;
18277c478bd9Sstevel@tonic-gate
18287c478bd9Sstevel@tonic-gate if (mdb_vread(klw->klw_base, klw->klw_size,
18297c478bd9Sstevel@tonic-gate (uintptr_t)lhp->lh_base) == -1) {
18307c478bd9Sstevel@tonic-gate mdb_warn("failed to read log at base %p", lhp->lh_base);
18317c478bd9Sstevel@tonic-gate mdb_free(klw->klw_base, klw->klw_size);
18327c478bd9Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t));
18337c478bd9Sstevel@tonic-gate return (WALK_ERR);
18347c478bd9Sstevel@tonic-gate }
18357c478bd9Sstevel@tonic-gate
18367c478bd9Sstevel@tonic-gate klw->klw_sorted = mdb_alloc(maxndx * lhp->lh_nchunks *
18377c478bd9Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t *), UM_SLEEP);
18387c478bd9Sstevel@tonic-gate
18397c478bd9Sstevel@tonic-gate for (i = 0, k = 0; i < lhp->lh_nchunks; i++) {
18407c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t *chunk = (kmem_bufctl_audit_t *)
18417c478bd9Sstevel@tonic-gate ((uintptr_t)klw->klw_base + i * lhp->lh_chunksize);
18427c478bd9Sstevel@tonic-gate
18437c478bd9Sstevel@tonic-gate for (j = 0; j < maxndx; j++)
18447c478bd9Sstevel@tonic-gate klw->klw_sorted[k++] = &chunk[j];
18457c478bd9Sstevel@tonic-gate }
18467c478bd9Sstevel@tonic-gate
18477c478bd9Sstevel@tonic-gate qsort(klw->klw_sorted, k, sizeof (kmem_bufctl_audit_t *),
18487c478bd9Sstevel@tonic-gate (int(*)(const void *, const void *))bufctlcmp);
18497c478bd9Sstevel@tonic-gate
18507c478bd9Sstevel@tonic-gate klw->klw_maxndx = k;
18517c478bd9Sstevel@tonic-gate wsp->walk_data = klw;
18527c478bd9Sstevel@tonic-gate
18537c478bd9Sstevel@tonic-gate return (WALK_NEXT);
18547c478bd9Sstevel@tonic-gate }
18557c478bd9Sstevel@tonic-gate
18567c478bd9Sstevel@tonic-gate int
kmem_log_walk_step(mdb_walk_state_t * wsp)18577c478bd9Sstevel@tonic-gate kmem_log_walk_step(mdb_walk_state_t *wsp)
18587c478bd9Sstevel@tonic-gate {
18597c478bd9Sstevel@tonic-gate kmem_log_walk_t *klw = wsp->walk_data;
18607c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t *bcp;
18617c478bd9Sstevel@tonic-gate
18627c478bd9Sstevel@tonic-gate if (klw->klw_ndx == klw->klw_maxndx)
18637c478bd9Sstevel@tonic-gate return (WALK_DONE);
18647c478bd9Sstevel@tonic-gate
18657c478bd9Sstevel@tonic-gate bcp = klw->klw_sorted[klw->klw_ndx++];
18667c478bd9Sstevel@tonic-gate
18677c478bd9Sstevel@tonic-gate return (wsp->walk_callback((uintptr_t)bcp - (uintptr_t)klw->klw_base +
18687c478bd9Sstevel@tonic-gate (uintptr_t)klw->klw_lh.lh_base, bcp, wsp->walk_cbdata));
18697c478bd9Sstevel@tonic-gate }
18707c478bd9Sstevel@tonic-gate
18717c478bd9Sstevel@tonic-gate void
kmem_log_walk_fini(mdb_walk_state_t * wsp)18727c478bd9Sstevel@tonic-gate kmem_log_walk_fini(mdb_walk_state_t *wsp)
18737c478bd9Sstevel@tonic-gate {
18747c478bd9Sstevel@tonic-gate kmem_log_walk_t *klw = wsp->walk_data;
18757c478bd9Sstevel@tonic-gate
18767c478bd9Sstevel@tonic-gate mdb_free(klw->klw_base, klw->klw_size);
18777c478bd9Sstevel@tonic-gate mdb_free(klw->klw_sorted, klw->klw_maxndx *
18787c478bd9Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t *));
18797c478bd9Sstevel@tonic-gate mdb_free(klw, sizeof (kmem_log_walk_t));
18807c478bd9Sstevel@tonic-gate }
18817c478bd9Sstevel@tonic-gate
18827c478bd9Sstevel@tonic-gate typedef struct allocdby_bufctl {
18837c478bd9Sstevel@tonic-gate uintptr_t abb_addr;
18847c478bd9Sstevel@tonic-gate hrtime_t abb_ts;
18857c478bd9Sstevel@tonic-gate } allocdby_bufctl_t;
18867c478bd9Sstevel@tonic-gate
18877c478bd9Sstevel@tonic-gate typedef struct allocdby_walk {
18887c478bd9Sstevel@tonic-gate const char *abw_walk;
18897c478bd9Sstevel@tonic-gate uintptr_t abw_thread;
18907c478bd9Sstevel@tonic-gate size_t abw_nbufs;
18917c478bd9Sstevel@tonic-gate size_t abw_size;
18927c478bd9Sstevel@tonic-gate allocdby_bufctl_t *abw_buf;
18937c478bd9Sstevel@tonic-gate size_t abw_ndx;
18947c478bd9Sstevel@tonic-gate } allocdby_walk_t;
18957c478bd9Sstevel@tonic-gate
18967c478bd9Sstevel@tonic-gate int
allocdby_walk_bufctl(uintptr_t addr,const kmem_bufctl_audit_t * bcp,allocdby_walk_t * abw)18977c478bd9Sstevel@tonic-gate allocdby_walk_bufctl(uintptr_t addr, const kmem_bufctl_audit_t *bcp,
18987c478bd9Sstevel@tonic-gate allocdby_walk_t *abw)
18997c478bd9Sstevel@tonic-gate {
19007c478bd9Sstevel@tonic-gate if ((uintptr_t)bcp->bc_thread != abw->abw_thread)
19017c478bd9Sstevel@tonic-gate return (WALK_NEXT);
19027c478bd9Sstevel@tonic-gate
19037c478bd9Sstevel@tonic-gate if (abw->abw_nbufs == abw->abw_size) {
19047c478bd9Sstevel@tonic-gate allocdby_bufctl_t *buf;
19057c478bd9Sstevel@tonic-gate size_t oldsize = sizeof (allocdby_bufctl_t) * abw->abw_size;
19067c478bd9Sstevel@tonic-gate
19077c478bd9Sstevel@tonic-gate buf = mdb_zalloc(oldsize << 1, UM_SLEEP);
19087c478bd9Sstevel@tonic-gate
19097c478bd9Sstevel@tonic-gate bcopy(abw->abw_buf, buf, oldsize);
19107c478bd9Sstevel@tonic-gate mdb_free(abw->abw_buf, oldsize);
19117c478bd9Sstevel@tonic-gate
19127c478bd9Sstevel@tonic-gate abw->abw_size <<= 1;
19137c478bd9Sstevel@tonic-gate abw->abw_buf = buf;
19147c478bd9Sstevel@tonic-gate }
19157c478bd9Sstevel@tonic-gate
19167c478bd9Sstevel@tonic-gate abw->abw_buf[abw->abw_nbufs].abb_addr = addr;
19177c478bd9Sstevel@tonic-gate abw->abw_buf[abw->abw_nbufs].abb_ts = bcp->bc_timestamp;
19187c478bd9Sstevel@tonic-gate abw->abw_nbufs++;
19197c478bd9Sstevel@tonic-gate
19207c478bd9Sstevel@tonic-gate return (WALK_NEXT);
19217c478bd9Sstevel@tonic-gate }
19227c478bd9Sstevel@tonic-gate
19237c478bd9Sstevel@tonic-gate /*ARGSUSED*/
19247c478bd9Sstevel@tonic-gate int
allocdby_walk_cache(uintptr_t addr,const kmem_cache_t * c,allocdby_walk_t * abw)19257c478bd9Sstevel@tonic-gate allocdby_walk_cache(uintptr_t addr, const kmem_cache_t *c, allocdby_walk_t *abw)
19267c478bd9Sstevel@tonic-gate {
19277c478bd9Sstevel@tonic-gate if (mdb_pwalk(abw->abw_walk, (mdb_walk_cb_t)allocdby_walk_bufctl,
19287c478bd9Sstevel@tonic-gate abw, addr) == -1) {
19297c478bd9Sstevel@tonic-gate mdb_warn("couldn't walk bufctl for cache %p", addr);
19307c478bd9Sstevel@tonic-gate return (WALK_DONE);
19317c478bd9Sstevel@tonic-gate }
19327c478bd9Sstevel@tonic-gate
19337c478bd9Sstevel@tonic-gate return (WALK_NEXT);
19347c478bd9Sstevel@tonic-gate }
19357c478bd9Sstevel@tonic-gate
19367c478bd9Sstevel@tonic-gate static int
allocdby_cmp(const allocdby_bufctl_t * lhs,const allocdby_bufctl_t * rhs)19377c478bd9Sstevel@tonic-gate allocdby_cmp(const allocdby_bufctl_t *lhs, const allocdby_bufctl_t *rhs)
19387c478bd9Sstevel@tonic-gate {
19397c478bd9Sstevel@tonic-gate if (lhs->abb_ts < rhs->abb_ts)
19407c478bd9Sstevel@tonic-gate return (1);
19417c478bd9Sstevel@tonic-gate if (lhs->abb_ts > rhs->abb_ts)
19427c478bd9Sstevel@tonic-gate return (-1);
19437c478bd9Sstevel@tonic-gate return (0);
19447c478bd9Sstevel@tonic-gate }
19457c478bd9Sstevel@tonic-gate
19467c478bd9Sstevel@tonic-gate static int
allocdby_walk_init_common(mdb_walk_state_t * wsp,const char * walk)19477c478bd9Sstevel@tonic-gate allocdby_walk_init_common(mdb_walk_state_t *wsp, const char *walk)
19487c478bd9Sstevel@tonic-gate {
19497c478bd9Sstevel@tonic-gate allocdby_walk_t *abw;
19507c478bd9Sstevel@tonic-gate
19517c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL) {
19527c478bd9Sstevel@tonic-gate mdb_warn("allocdby walk doesn't support global walks\n");
19537c478bd9Sstevel@tonic-gate return (WALK_ERR);
19547c478bd9Sstevel@tonic-gate }
19557c478bd9Sstevel@tonic-gate
19567c478bd9Sstevel@tonic-gate abw = mdb_zalloc(sizeof (allocdby_walk_t), UM_SLEEP);
19577c478bd9Sstevel@tonic-gate
19587c478bd9Sstevel@tonic-gate abw->abw_thread = wsp->walk_addr;
19597c478bd9Sstevel@tonic-gate abw->abw_walk = walk;
19607c478bd9Sstevel@tonic-gate abw->abw_size = 128; /* something reasonable */
19617c478bd9Sstevel@tonic-gate abw->abw_buf =
19627c478bd9Sstevel@tonic-gate mdb_zalloc(abw->abw_size * sizeof (allocdby_bufctl_t), UM_SLEEP);
19637c478bd9Sstevel@tonic-gate
19647c478bd9Sstevel@tonic-gate wsp->walk_data = abw;
19657c478bd9Sstevel@tonic-gate
19667c478bd9Sstevel@tonic-gate if (mdb_walk("kmem_cache",
19677c478bd9Sstevel@tonic-gate (mdb_walk_cb_t)allocdby_walk_cache, abw) == -1) {
19687c478bd9Sstevel@tonic-gate mdb_warn("couldn't walk kmem_cache");
19697c478bd9Sstevel@tonic-gate allocdby_walk_fini(wsp);
19707c478bd9Sstevel@tonic-gate return (WALK_ERR);
19717c478bd9Sstevel@tonic-gate }
19727c478bd9Sstevel@tonic-gate
19737c478bd9Sstevel@tonic-gate qsort(abw->abw_buf, abw->abw_nbufs, sizeof (allocdby_bufctl_t),
19747c478bd9Sstevel@tonic-gate (int(*)(const void *, const void *))allocdby_cmp);
19757c478bd9Sstevel@tonic-gate
19767c478bd9Sstevel@tonic-gate return (WALK_NEXT);
19777c478bd9Sstevel@tonic-gate }
19787c478bd9Sstevel@tonic-gate
19797c478bd9Sstevel@tonic-gate int
allocdby_walk_init(mdb_walk_state_t * wsp)19807c478bd9Sstevel@tonic-gate allocdby_walk_init(mdb_walk_state_t *wsp)
19817c478bd9Sstevel@tonic-gate {
19827c478bd9Sstevel@tonic-gate return (allocdby_walk_init_common(wsp, "bufctl"));
19837c478bd9Sstevel@tonic-gate }
19847c478bd9Sstevel@tonic-gate
19857c478bd9Sstevel@tonic-gate int
freedby_walk_init(mdb_walk_state_t * wsp)19867c478bd9Sstevel@tonic-gate freedby_walk_init(mdb_walk_state_t *wsp)
19877c478bd9Sstevel@tonic-gate {
19887c478bd9Sstevel@tonic-gate return (allocdby_walk_init_common(wsp, "freectl"));
19897c478bd9Sstevel@tonic-gate }
19907c478bd9Sstevel@tonic-gate
19917c478bd9Sstevel@tonic-gate int
allocdby_walk_step(mdb_walk_state_t * wsp)19927c478bd9Sstevel@tonic-gate allocdby_walk_step(mdb_walk_state_t *wsp)
19937c478bd9Sstevel@tonic-gate {
19947c478bd9Sstevel@tonic-gate allocdby_walk_t *abw = wsp->walk_data;
19957c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t bc;
19967c478bd9Sstevel@tonic-gate uintptr_t addr;
19977c478bd9Sstevel@tonic-gate
19987c478bd9Sstevel@tonic-gate if (abw->abw_ndx == abw->abw_nbufs)
19997c478bd9Sstevel@tonic-gate return (WALK_DONE);
20007c478bd9Sstevel@tonic-gate
20017c478bd9Sstevel@tonic-gate addr = abw->abw_buf[abw->abw_ndx++].abb_addr;
20027c478bd9Sstevel@tonic-gate
20037c478bd9Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) {
20047c478bd9Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr);
20057c478bd9Sstevel@tonic-gate return (WALK_DONE);
20067c478bd9Sstevel@tonic-gate }
20077c478bd9Sstevel@tonic-gate
20087c478bd9Sstevel@tonic-gate return (wsp->walk_callback(addr, &bc, wsp->walk_cbdata));
20097c478bd9Sstevel@tonic-gate }
20107c478bd9Sstevel@tonic-gate
20117c478bd9Sstevel@tonic-gate void
allocdby_walk_fini(mdb_walk_state_t * wsp)20127c478bd9Sstevel@tonic-gate allocdby_walk_fini(mdb_walk_state_t *wsp)
20137c478bd9Sstevel@tonic-gate {
20147c478bd9Sstevel@tonic-gate allocdby_walk_t *abw = wsp->walk_data;
20157c478bd9Sstevel@tonic-gate
20167c478bd9Sstevel@tonic-gate mdb_free(abw->abw_buf, sizeof (allocdby_bufctl_t) * abw->abw_size);
20177c478bd9Sstevel@tonic-gate mdb_free(abw, sizeof (allocdby_walk_t));
20187c478bd9Sstevel@tonic-gate }
20197c478bd9Sstevel@tonic-gate
20207c478bd9Sstevel@tonic-gate /*ARGSUSED*/
20217c478bd9Sstevel@tonic-gate int
allocdby_walk(uintptr_t addr,const kmem_bufctl_audit_t * bcp,void * ignored)20227c478bd9Sstevel@tonic-gate allocdby_walk(uintptr_t addr, const kmem_bufctl_audit_t *bcp, void *ignored)
20237c478bd9Sstevel@tonic-gate {
20247c478bd9Sstevel@tonic-gate char c[MDB_SYM_NAMLEN];
20257c478bd9Sstevel@tonic-gate GElf_Sym sym;
20267c478bd9Sstevel@tonic-gate int i;
20277c478bd9Sstevel@tonic-gate
20287c478bd9Sstevel@tonic-gate mdb_printf("%0?p %12llx ", addr, bcp->bc_timestamp);
20297c478bd9Sstevel@tonic-gate for (i = 0; i < bcp->bc_depth; i++) {
20307c478bd9Sstevel@tonic-gate if (mdb_lookup_by_addr(bcp->bc_stack[i],
20317c478bd9Sstevel@tonic-gate MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1)
20327c478bd9Sstevel@tonic-gate continue;
20337c478bd9Sstevel@tonic-gate if (strncmp(c, "kmem_", 5) == 0)
20347c478bd9Sstevel@tonic-gate continue;
20357c478bd9Sstevel@tonic-gate mdb_printf("%s+0x%lx",
20367c478bd9Sstevel@tonic-gate c, bcp->bc_stack[i] - (uintptr_t)sym.st_value);
20377c478bd9Sstevel@tonic-gate break;
20387c478bd9Sstevel@tonic-gate }
20397c478bd9Sstevel@tonic-gate mdb_printf("\n");
20407c478bd9Sstevel@tonic-gate
20417c478bd9Sstevel@tonic-gate return (WALK_NEXT);
20427c478bd9Sstevel@tonic-gate }
20437c478bd9Sstevel@tonic-gate
20447c478bd9Sstevel@tonic-gate static int
allocdby_common(uintptr_t addr,uint_t flags,const char * w)20457c478bd9Sstevel@tonic-gate allocdby_common(uintptr_t addr, uint_t flags, const char *w)
20467c478bd9Sstevel@tonic-gate {
20477c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC))
20487c478bd9Sstevel@tonic-gate return (DCMD_USAGE);
20497c478bd9Sstevel@tonic-gate
20507c478bd9Sstevel@tonic-gate mdb_printf("%-?s %12s %s\n", "BUFCTL", "TIMESTAMP", "CALLER");
20517c478bd9Sstevel@tonic-gate
20527c478bd9Sstevel@tonic-gate if (mdb_pwalk(w, (mdb_walk_cb_t)allocdby_walk, NULL, addr) == -1) {
20537c478bd9Sstevel@tonic-gate mdb_warn("can't walk '%s' for %p", w, addr);
20547c478bd9Sstevel@tonic-gate return (DCMD_ERR);
20557c478bd9Sstevel@tonic-gate }
20567c478bd9Sstevel@tonic-gate
20577c478bd9Sstevel@tonic-gate return (DCMD_OK);
20587c478bd9Sstevel@tonic-gate }
20597c478bd9Sstevel@tonic-gate
20607c478bd9Sstevel@tonic-gate /*ARGSUSED*/
20617c478bd9Sstevel@tonic-gate int
allocdby(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)20627c478bd9Sstevel@tonic-gate allocdby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
20637c478bd9Sstevel@tonic-gate {
20647c478bd9Sstevel@tonic-gate return (allocdby_common(addr, flags, "allocdby"));
20657c478bd9Sstevel@tonic-gate }
20667c478bd9Sstevel@tonic-gate
20677c478bd9Sstevel@tonic-gate /*ARGSUSED*/
20687c478bd9Sstevel@tonic-gate int
freedby(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)20697c478bd9Sstevel@tonic-gate freedby(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
20707c478bd9Sstevel@tonic-gate {
20717c478bd9Sstevel@tonic-gate return (allocdby_common(addr, flags, "freedby"));
20727c478bd9Sstevel@tonic-gate }
20737c478bd9Sstevel@tonic-gate
20747c478bd9Sstevel@tonic-gate /*
20757c478bd9Sstevel@tonic-gate * Return a string describing the address in relation to the given thread's
20767c478bd9Sstevel@tonic-gate * stack.
20777c478bd9Sstevel@tonic-gate *
20787c478bd9Sstevel@tonic-gate * - If the thread state is TS_FREE, return " (inactive interrupt thread)".
20797c478bd9Sstevel@tonic-gate *
20807c478bd9Sstevel@tonic-gate * - If the address is above the stack pointer, return an empty string
20817c478bd9Sstevel@tonic-gate * signifying that the address is active.
20827c478bd9Sstevel@tonic-gate *
20837c478bd9Sstevel@tonic-gate * - If the address is below the stack pointer, and the thread is not on proc,
20847c478bd9Sstevel@tonic-gate * return " (below sp)".
20857c478bd9Sstevel@tonic-gate *
20867c478bd9Sstevel@tonic-gate * - If the address is below the stack pointer, and the thread is on proc,
20877c478bd9Sstevel@tonic-gate * return " (possibly below sp)". Depending on context, we may or may not
20887c478bd9Sstevel@tonic-gate * have an accurate t_sp.
20897c478bd9Sstevel@tonic-gate */
20907c478bd9Sstevel@tonic-gate static const char *
stack_active(const kthread_t * t,uintptr_t addr)20917c478bd9Sstevel@tonic-gate stack_active(const kthread_t *t, uintptr_t addr)
20927c478bd9Sstevel@tonic-gate {
20937c478bd9Sstevel@tonic-gate uintptr_t panicstk;
20947c478bd9Sstevel@tonic-gate GElf_Sym sym;
20957c478bd9Sstevel@tonic-gate
20967c478bd9Sstevel@tonic-gate if (t->t_state == TS_FREE)
20977c478bd9Sstevel@tonic-gate return (" (inactive interrupt thread)");
20987c478bd9Sstevel@tonic-gate
20997c478bd9Sstevel@tonic-gate /*
21007c478bd9Sstevel@tonic-gate * Check to see if we're on the panic stack. If so, ignore t_sp, as it
21017c478bd9Sstevel@tonic-gate * no longer relates to the thread's real stack.
21027c478bd9Sstevel@tonic-gate */
21037c478bd9Sstevel@tonic-gate if (mdb_lookup_by_name("panic_stack", &sym) == 0) {
21047c478bd9Sstevel@tonic-gate panicstk = (uintptr_t)sym.st_value;
21057c478bd9Sstevel@tonic-gate
21067c478bd9Sstevel@tonic-gate if (t->t_sp >= panicstk && t->t_sp < panicstk + PANICSTKSIZE)
21077c478bd9Sstevel@tonic-gate return ("");
21087c478bd9Sstevel@tonic-gate }
21097c478bd9Sstevel@tonic-gate
21107c478bd9Sstevel@tonic-gate if (addr >= t->t_sp + STACK_BIAS)
21117c478bd9Sstevel@tonic-gate return ("");
21127c478bd9Sstevel@tonic-gate
21137c478bd9Sstevel@tonic-gate if (t->t_state == TS_ONPROC)
21147c478bd9Sstevel@tonic-gate return (" (possibly below sp)");
21157c478bd9Sstevel@tonic-gate
21167c478bd9Sstevel@tonic-gate return (" (below sp)");
21177c478bd9Sstevel@tonic-gate }
21187c478bd9Sstevel@tonic-gate
21194a1c2431SJonathan Adams /*
21204a1c2431SJonathan Adams * Additional state for the kmem and vmem ::whatis handlers
21214a1c2431SJonathan Adams */
21224a1c2431SJonathan Adams typedef struct whatis_info {
21234a1c2431SJonathan Adams mdb_whatis_t *wi_w;
21244a1c2431SJonathan Adams const kmem_cache_t *wi_cache;
21254a1c2431SJonathan Adams const vmem_t *wi_vmem;
21264a1c2431SJonathan Adams vmem_t *wi_msb_arena;
21274a1c2431SJonathan Adams size_t wi_slab_size;
21284a1c2431SJonathan Adams uint_t wi_slab_found;
21294a1c2431SJonathan Adams uint_t wi_kmem_lite_count;
21304a1c2431SJonathan Adams uint_t wi_freemem;
21314a1c2431SJonathan Adams } whatis_info_t;
21320c3b83b1SJonathan Adams
21330c3b83b1SJonathan Adams /* call one of our dcmd functions with "-v" and the provided address */
21340c3b83b1SJonathan Adams static void
whatis_call_printer(mdb_dcmd_f * dcmd,uintptr_t addr)21350c3b83b1SJonathan Adams whatis_call_printer(mdb_dcmd_f *dcmd, uintptr_t addr)
21360c3b83b1SJonathan Adams {
21370c3b83b1SJonathan Adams mdb_arg_t a;
21380c3b83b1SJonathan Adams a.a_type = MDB_TYPE_STRING;
21390c3b83b1SJonathan Adams a.a_un.a_str = "-v";
21400c3b83b1SJonathan Adams
21414a1c2431SJonathan Adams mdb_printf(":\n");
21420c3b83b1SJonathan Adams (void) (*dcmd)(addr, DCMD_ADDRSPEC, 1, &a);
21430c3b83b1SJonathan Adams }
21440c3b83b1SJonathan Adams
21457c478bd9Sstevel@tonic-gate static void
whatis_print_kmf_lite(uintptr_t btaddr,size_t count)21464a1c2431SJonathan Adams whatis_print_kmf_lite(uintptr_t btaddr, size_t count)
21477c478bd9Sstevel@tonic-gate {
21484a1c2431SJonathan Adams #define KMEM_LITE_MAX 16
21494a1c2431SJonathan Adams pc_t callers[KMEM_LITE_MAX];
21504a1c2431SJonathan Adams pc_t uninit = (pc_t)KMEM_UNINITIALIZED_PATTERN;
21517c478bd9Sstevel@tonic-gate
21527c478bd9Sstevel@tonic-gate kmem_buftag_t bt;
21534a1c2431SJonathan Adams intptr_t stat;
21544a1c2431SJonathan Adams const char *plural = "";
21554a1c2431SJonathan Adams int i;
21567c478bd9Sstevel@tonic-gate
21574a1c2431SJonathan Adams /* validate our arguments and read in the buftag */
21584a1c2431SJonathan Adams if (count == 0 || count > KMEM_LITE_MAX ||
21594a1c2431SJonathan Adams mdb_vread(&bt, sizeof (bt), btaddr) == -1)
21604a1c2431SJonathan Adams return;
21617c478bd9Sstevel@tonic-gate
21624a1c2431SJonathan Adams /* validate the buffer state and read in the callers */
21637c478bd9Sstevel@tonic-gate stat = (intptr_t)bt.bt_bufctl ^ bt.bt_bxstat;
21647c478bd9Sstevel@tonic-gate
216528e4da25SMatthew Ahrens if (stat != KMEM_BUFTAG_ALLOC && stat != KMEM_BUFTAG_FREE)
216628e4da25SMatthew Ahrens return;
216728e4da25SMatthew Ahrens
216828e4da25SMatthew Ahrens if (mdb_vread(callers, count * sizeof (pc_t),
21694a1c2431SJonathan Adams btaddr + offsetof(kmem_buftag_lite_t, bt_history)) == -1)
21704a1c2431SJonathan Adams return;
21717c478bd9Sstevel@tonic-gate
21724a1c2431SJonathan Adams /* If there aren't any filled in callers, bail */
21734a1c2431SJonathan Adams if (callers[0] == uninit)
21744a1c2431SJonathan Adams return;
21754a1c2431SJonathan Adams
21764a1c2431SJonathan Adams plural = (callers[1] == uninit) ? "" : "s";
21774a1c2431SJonathan Adams
21784a1c2431SJonathan Adams /* Everything's done and checked; print them out */
21794a1c2431SJonathan Adams mdb_printf(":\n");
21804a1c2431SJonathan Adams
21814a1c2431SJonathan Adams mdb_inc_indent(8);
21824a1c2431SJonathan Adams mdb_printf("recent caller%s: %a", plural, callers[0]);
21834a1c2431SJonathan Adams for (i = 1; i < count; i++) {
21844a1c2431SJonathan Adams if (callers[i] == uninit)
21854a1c2431SJonathan Adams break;
21864a1c2431SJonathan Adams mdb_printf(", %a", callers[i]);
21877c478bd9Sstevel@tonic-gate }
21884a1c2431SJonathan Adams mdb_dec_indent(8);
21897c478bd9Sstevel@tonic-gate }
21907c478bd9Sstevel@tonic-gate
21914a1c2431SJonathan Adams static void
whatis_print_kmem(whatis_info_t * wi,uintptr_t maddr,uintptr_t addr,uintptr_t baddr)21924a1c2431SJonathan Adams whatis_print_kmem(whatis_info_t *wi, uintptr_t maddr, uintptr_t addr,
21934a1c2431SJonathan Adams uintptr_t baddr)
21944a1c2431SJonathan Adams {
21954a1c2431SJonathan Adams mdb_whatis_t *w = wi->wi_w;
21967c478bd9Sstevel@tonic-gate
21974a1c2431SJonathan Adams const kmem_cache_t *cp = wi->wi_cache;
21984a1c2431SJonathan Adams /* LINTED pointer cast may result in improper alignment */
21994a1c2431SJonathan Adams uintptr_t btaddr = (uintptr_t)KMEM_BUFTAG(cp, addr);
22004a1c2431SJonathan Adams int quiet = (mdb_whatis_flags(w) & WHATIS_QUIET);
22014a1c2431SJonathan Adams int call_printer = (!quiet && (cp->cache_flags & KMF_AUDIT));
22024a1c2431SJonathan Adams
22034a1c2431SJonathan Adams mdb_whatis_report_object(w, maddr, addr, "");
22040c3b83b1SJonathan Adams
22050c3b83b1SJonathan Adams if (baddr != 0 && !call_printer)
22060c3b83b1SJonathan Adams mdb_printf("bufctl %p ", baddr);
22070c3b83b1SJonathan Adams
22084a1c2431SJonathan Adams mdb_printf("%s from %s",
22094a1c2431SJonathan Adams (wi->wi_freemem == FALSE) ? "allocated" : "freed", cp->cache_name);
22100c3b83b1SJonathan Adams
22114a1c2431SJonathan Adams if (baddr != 0 && call_printer) {
22120c3b83b1SJonathan Adams whatis_call_printer(bufctl, baddr);
22134a1c2431SJonathan Adams return;
22147c478bd9Sstevel@tonic-gate }
22154a1c2431SJonathan Adams
22164a1c2431SJonathan Adams /* for KMF_LITE caches, try to print out the previous callers */
22174a1c2431SJonathan Adams if (!quiet && (cp->cache_flags & KMF_LITE))
22184a1c2431SJonathan Adams whatis_print_kmf_lite(btaddr, wi->wi_kmem_lite_count);
22194a1c2431SJonathan Adams
22204a1c2431SJonathan Adams mdb_printf("\n");
22217c478bd9Sstevel@tonic-gate }
22227c478bd9Sstevel@tonic-gate
22237c478bd9Sstevel@tonic-gate /*ARGSUSED*/
22247c478bd9Sstevel@tonic-gate static int
whatis_walk_kmem(uintptr_t addr,void * ignored,whatis_info_t * wi)22254a1c2431SJonathan Adams whatis_walk_kmem(uintptr_t addr, void *ignored, whatis_info_t *wi)
22267c478bd9Sstevel@tonic-gate {
22274a1c2431SJonathan Adams mdb_whatis_t *w = wi->wi_w;
22287c478bd9Sstevel@tonic-gate
22294a1c2431SJonathan Adams uintptr_t cur;
22304a1c2431SJonathan Adams size_t size = wi->wi_cache->cache_bufsize;
22314a1c2431SJonathan Adams
22324a1c2431SJonathan Adams while (mdb_whatis_match(w, addr, size, &cur))
22334a1c2431SJonathan Adams whatis_print_kmem(wi, cur, addr, NULL);
22344a1c2431SJonathan Adams
22354a1c2431SJonathan Adams return (WHATIS_WALKRET(w));
22364a1c2431SJonathan Adams }
22374a1c2431SJonathan Adams
22384a1c2431SJonathan Adams /*ARGSUSED*/
22394a1c2431SJonathan Adams static int
whatis_walk_bufctl(uintptr_t baddr,const kmem_bufctl_t * bcp,whatis_info_t * wi)22404a1c2431SJonathan Adams whatis_walk_bufctl(uintptr_t baddr, const kmem_bufctl_t *bcp, whatis_info_t *wi)
22414a1c2431SJonathan Adams {
22424a1c2431SJonathan Adams mdb_whatis_t *w = wi->wi_w;
22434a1c2431SJonathan Adams
22444a1c2431SJonathan Adams uintptr_t cur;
22454a1c2431SJonathan Adams uintptr_t addr = (uintptr_t)bcp->bc_addr;
22464a1c2431SJonathan Adams size_t size = wi->wi_cache->cache_bufsize;
22474a1c2431SJonathan Adams
22484a1c2431SJonathan Adams while (mdb_whatis_match(w, addr, size, &cur))
22494a1c2431SJonathan Adams whatis_print_kmem(wi, cur, addr, baddr);
22504a1c2431SJonathan Adams
22514a1c2431SJonathan Adams return (WHATIS_WALKRET(w));
22527c478bd9Sstevel@tonic-gate }
22537c478bd9Sstevel@tonic-gate
22547c478bd9Sstevel@tonic-gate static int
whatis_walk_seg(uintptr_t addr,const vmem_seg_t * vs,whatis_info_t * wi)22554a1c2431SJonathan Adams whatis_walk_seg(uintptr_t addr, const vmem_seg_t *vs, whatis_info_t *wi)
22567c478bd9Sstevel@tonic-gate {
22574a1c2431SJonathan Adams mdb_whatis_t *w = wi->wi_w;
22584a1c2431SJonathan Adams
22594a1c2431SJonathan Adams size_t size = vs->vs_end - vs->vs_start;
22604a1c2431SJonathan Adams uintptr_t cur;
22614a1c2431SJonathan Adams
22624a1c2431SJonathan Adams /* We're not interested in anything but alloc and free segments */
22634a1c2431SJonathan Adams if (vs->vs_type != VMEM_ALLOC && vs->vs_type != VMEM_FREE)
22647c478bd9Sstevel@tonic-gate return (WALK_NEXT);
22657c478bd9Sstevel@tonic-gate
22664a1c2431SJonathan Adams while (mdb_whatis_match(w, vs->vs_start, size, &cur)) {
22674a1c2431SJonathan Adams mdb_whatis_report_object(w, cur, vs->vs_start, "");
22687c478bd9Sstevel@tonic-gate
22697c478bd9Sstevel@tonic-gate /*
22700c3b83b1SJonathan Adams * If we're not printing it seperately, provide the vmem_seg
22710c3b83b1SJonathan Adams * pointer if it has a stack trace.
22727c478bd9Sstevel@tonic-gate */
22734a1c2431SJonathan Adams if ((mdb_whatis_flags(w) & WHATIS_QUIET) &&
22744a1c2431SJonathan Adams (!(mdb_whatis_flags(w) & WHATIS_BUFCTL) ||
22750c3b83b1SJonathan Adams (vs->vs_type == VMEM_ALLOC && vs->vs_depth != 0))) {
22760c3b83b1SJonathan Adams mdb_printf("vmem_seg %p ", addr);
22777c478bd9Sstevel@tonic-gate }
22787c478bd9Sstevel@tonic-gate
22794a1c2431SJonathan Adams mdb_printf("%s from the %s vmem arena",
22804a1c2431SJonathan Adams (vs->vs_type == VMEM_ALLOC) ? "allocated" : "freed",
22814a1c2431SJonathan Adams wi->wi_vmem->vm_name);
22820c3b83b1SJonathan Adams
22834a1c2431SJonathan Adams if (!(mdb_whatis_flags(w) & WHATIS_QUIET))
22840c3b83b1SJonathan Adams whatis_call_printer(vmem_seg, addr);
22854a1c2431SJonathan Adams else
22864a1c2431SJonathan Adams mdb_printf("\n");
22874a1c2431SJonathan Adams }
22887c478bd9Sstevel@tonic-gate
22894a1c2431SJonathan Adams return (WHATIS_WALKRET(w));
22907c478bd9Sstevel@tonic-gate }
22917c478bd9Sstevel@tonic-gate
22927c478bd9Sstevel@tonic-gate static int
whatis_walk_vmem(uintptr_t addr,const vmem_t * vmem,whatis_info_t * wi)22934a1c2431SJonathan Adams whatis_walk_vmem(uintptr_t addr, const vmem_t *vmem, whatis_info_t *wi)
22947c478bd9Sstevel@tonic-gate {
22954a1c2431SJonathan Adams mdb_whatis_t *w = wi->wi_w;
22967c478bd9Sstevel@tonic-gate const char *nm = vmem->vm_name;
22977c478bd9Sstevel@tonic-gate
22984a1c2431SJonathan Adams int identifier = ((vmem->vm_cflags & VMC_IDENTIFIER) != 0);
22994a1c2431SJonathan Adams int idspace = ((mdb_whatis_flags(w) & WHATIS_IDSPACE) != 0);
23004a1c2431SJonathan Adams
23014a1c2431SJonathan Adams if (identifier != idspace)
23027c478bd9Sstevel@tonic-gate return (WALK_NEXT);
23037c478bd9Sstevel@tonic-gate
23044a1c2431SJonathan Adams wi->wi_vmem = vmem;
23054a1c2431SJonathan Adams
23064a1c2431SJonathan Adams if (mdb_whatis_flags(w) & WHATIS_VERBOSE)
23077c478bd9Sstevel@tonic-gate mdb_printf("Searching vmem arena %s...\n", nm);
23087c478bd9Sstevel@tonic-gate
23094a1c2431SJonathan Adams if (mdb_pwalk("vmem_seg",
23104a1c2431SJonathan Adams (mdb_walk_cb_t)whatis_walk_seg, wi, addr) == -1) {
23114a1c2431SJonathan Adams mdb_warn("can't walk vmem_seg for %p", addr);
23127c478bd9Sstevel@tonic-gate return (WALK_NEXT);
23137c478bd9Sstevel@tonic-gate }
23147c478bd9Sstevel@tonic-gate
23154a1c2431SJonathan Adams return (WHATIS_WALKRET(w));
23167c478bd9Sstevel@tonic-gate }
23177c478bd9Sstevel@tonic-gate
23187c478bd9Sstevel@tonic-gate /*ARGSUSED*/
23197c478bd9Sstevel@tonic-gate static int
whatis_walk_slab(uintptr_t saddr,const kmem_slab_t * sp,whatis_info_t * wi)23204a1c2431SJonathan Adams whatis_walk_slab(uintptr_t saddr, const kmem_slab_t *sp, whatis_info_t *wi)
23217c478bd9Sstevel@tonic-gate {
23224a1c2431SJonathan Adams mdb_whatis_t *w = wi->wi_w;
23237c478bd9Sstevel@tonic-gate
23244a1c2431SJonathan Adams /* It must overlap with the slab data, or it's not interesting */
23254a1c2431SJonathan Adams if (mdb_whatis_overlaps(w,
23264a1c2431SJonathan Adams (uintptr_t)sp->slab_base, wi->wi_slab_size)) {
23274a1c2431SJonathan Adams wi->wi_slab_found++;
23287c478bd9Sstevel@tonic-gate return (WALK_DONE);
23297c478bd9Sstevel@tonic-gate }
23304a1c2431SJonathan Adams return (WALK_NEXT);
23314a1c2431SJonathan Adams }
23327c478bd9Sstevel@tonic-gate
23337c478bd9Sstevel@tonic-gate static int
whatis_walk_cache(uintptr_t addr,const kmem_cache_t * c,whatis_info_t * wi)23344a1c2431SJonathan Adams whatis_walk_cache(uintptr_t addr, const kmem_cache_t *c, whatis_info_t *wi)
23357c478bd9Sstevel@tonic-gate {
23364a1c2431SJonathan Adams mdb_whatis_t *w = wi->wi_w;
23374a1c2431SJonathan Adams
23387c478bd9Sstevel@tonic-gate char *walk, *freewalk;
23397c478bd9Sstevel@tonic-gate mdb_walk_cb_t func;
23404a1c2431SJonathan Adams int do_bufctl;
23417c478bd9Sstevel@tonic-gate
23424a1c2431SJonathan Adams int identifier = ((c->cache_flags & KMC_IDENTIFIER) != 0);
23434a1c2431SJonathan Adams int idspace = ((mdb_whatis_flags(w) & WHATIS_IDSPACE) != 0);
23444a1c2431SJonathan Adams
23454a1c2431SJonathan Adams if (identifier != idspace)
23467c478bd9Sstevel@tonic-gate return (WALK_NEXT);
23477c478bd9Sstevel@tonic-gate
23484a1c2431SJonathan Adams /* Override the '-b' flag as necessary */
23494a1c2431SJonathan Adams if (!(c->cache_flags & KMF_HASH))
23504a1c2431SJonathan Adams do_bufctl = FALSE; /* no bufctls to walk */
23514a1c2431SJonathan Adams else if (c->cache_flags & KMF_AUDIT)
23524a1c2431SJonathan Adams do_bufctl = TRUE; /* we always want debugging info */
23534a1c2431SJonathan Adams else
23544a1c2431SJonathan Adams do_bufctl = ((mdb_whatis_flags(w) & WHATIS_BUFCTL) != 0);
23554a1c2431SJonathan Adams
23564a1c2431SJonathan Adams if (do_bufctl) {
23577c478bd9Sstevel@tonic-gate walk = "bufctl";
23587c478bd9Sstevel@tonic-gate freewalk = "freectl";
23597c478bd9Sstevel@tonic-gate func = (mdb_walk_cb_t)whatis_walk_bufctl;
23600c3b83b1SJonathan Adams } else {
23610c3b83b1SJonathan Adams walk = "kmem";
23620c3b83b1SJonathan Adams freewalk = "freemem";
23630c3b83b1SJonathan Adams func = (mdb_walk_cb_t)whatis_walk_kmem;
23647c478bd9Sstevel@tonic-gate }
23657c478bd9Sstevel@tonic-gate
23664a1c2431SJonathan Adams wi->wi_cache = c;
23677c478bd9Sstevel@tonic-gate
23684a1c2431SJonathan Adams if (mdb_whatis_flags(w) & WHATIS_VERBOSE)
23694a1c2431SJonathan Adams mdb_printf("Searching %s...\n", c->cache_name);
23707c478bd9Sstevel@tonic-gate
23717c478bd9Sstevel@tonic-gate /*
23724a1c2431SJonathan Adams * If more then two buffers live on each slab, figure out if we're
23734a1c2431SJonathan Adams * interested in anything in any slab before doing the more expensive
23744a1c2431SJonathan Adams * kmem/freemem (bufctl/freectl) walkers.
23757c478bd9Sstevel@tonic-gate */
23764a1c2431SJonathan Adams wi->wi_slab_size = c->cache_slabsize - c->cache_maxcolor;
23774a1c2431SJonathan Adams if (!(c->cache_flags & KMF_HASH))
23784a1c2431SJonathan Adams wi->wi_slab_size -= sizeof (kmem_slab_t);
23797c478bd9Sstevel@tonic-gate
23804a1c2431SJonathan Adams if ((wi->wi_slab_size / c->cache_chunksize) > 2) {
23814a1c2431SJonathan Adams wi->wi_slab_found = 0;
23824a1c2431SJonathan Adams if (mdb_pwalk("kmem_slab", (mdb_walk_cb_t)whatis_walk_slab, wi,
23837c478bd9Sstevel@tonic-gate addr) == -1) {
23847c478bd9Sstevel@tonic-gate mdb_warn("can't find kmem_slab walker");
23857c478bd9Sstevel@tonic-gate return (WALK_DONE);
23867c478bd9Sstevel@tonic-gate }
23874a1c2431SJonathan Adams if (wi->wi_slab_found == 0)
23887c478bd9Sstevel@tonic-gate return (WALK_NEXT);
23897c478bd9Sstevel@tonic-gate }
23907c478bd9Sstevel@tonic-gate
23914a1c2431SJonathan Adams wi->wi_freemem = FALSE;
23924a1c2431SJonathan Adams if (mdb_pwalk(walk, func, wi, addr) == -1) {
23937c478bd9Sstevel@tonic-gate mdb_warn("can't find %s walker", walk);
23947c478bd9Sstevel@tonic-gate return (WALK_DONE);
23957c478bd9Sstevel@tonic-gate }
23967c478bd9Sstevel@tonic-gate
23974a1c2431SJonathan Adams if (mdb_whatis_done(w))
23987c478bd9Sstevel@tonic-gate return (WALK_DONE);
23997c478bd9Sstevel@tonic-gate
24007c478bd9Sstevel@tonic-gate /*
24017c478bd9Sstevel@tonic-gate * We have searched for allocated memory; now search for freed memory.
24027c478bd9Sstevel@tonic-gate */
24034a1c2431SJonathan Adams if (mdb_whatis_flags(w) & WHATIS_VERBOSE)
24047c478bd9Sstevel@tonic-gate mdb_printf("Searching %s for free memory...\n", c->cache_name);
24057c478bd9Sstevel@tonic-gate
24064a1c2431SJonathan Adams wi->wi_freemem = TRUE;
24074a1c2431SJonathan Adams if (mdb_pwalk(freewalk, func, wi, addr) == -1) {
24087c478bd9Sstevel@tonic-gate mdb_warn("can't find %s walker", freewalk);
24097c478bd9Sstevel@tonic-gate return (WALK_DONE);
24107c478bd9Sstevel@tonic-gate }
24117c478bd9Sstevel@tonic-gate
24124a1c2431SJonathan Adams return (WHATIS_WALKRET(w));
24137c478bd9Sstevel@tonic-gate }
24147c478bd9Sstevel@tonic-gate
24157c478bd9Sstevel@tonic-gate static int
whatis_walk_touch(uintptr_t addr,const kmem_cache_t * c,whatis_info_t * wi)24164a1c2431SJonathan Adams whatis_walk_touch(uintptr_t addr, const kmem_cache_t *c, whatis_info_t *wi)
24177c478bd9Sstevel@tonic-gate {
24184a1c2431SJonathan Adams if (c->cache_arena == wi->wi_msb_arena ||
24194a1c2431SJonathan Adams (c->cache_cflags & KMC_NOTOUCH))
24207c478bd9Sstevel@tonic-gate return (WALK_NEXT);
24217c478bd9Sstevel@tonic-gate
24224a1c2431SJonathan Adams return (whatis_walk_cache(addr, c, wi));
24237c478bd9Sstevel@tonic-gate }
24247c478bd9Sstevel@tonic-gate
24257c478bd9Sstevel@tonic-gate static int
whatis_walk_metadata(uintptr_t addr,const kmem_cache_t * c,whatis_info_t * wi)24264a1c2431SJonathan Adams whatis_walk_metadata(uintptr_t addr, const kmem_cache_t *c, whatis_info_t *wi)
24277c478bd9Sstevel@tonic-gate {
24284a1c2431SJonathan Adams if (c->cache_arena != wi->wi_msb_arena)
24297c478bd9Sstevel@tonic-gate return (WALK_NEXT);
24307c478bd9Sstevel@tonic-gate
24314a1c2431SJonathan Adams return (whatis_walk_cache(addr, c, wi));
24327c478bd9Sstevel@tonic-gate }
24337c478bd9Sstevel@tonic-gate
24347c478bd9Sstevel@tonic-gate static int
whatis_walk_notouch(uintptr_t addr,const kmem_cache_t * c,whatis_info_t * wi)24354a1c2431SJonathan Adams whatis_walk_notouch(uintptr_t addr, const kmem_cache_t *c, whatis_info_t *wi)
24367c478bd9Sstevel@tonic-gate {
24374a1c2431SJonathan Adams if (c->cache_arena == wi->wi_msb_arena ||
24384a1c2431SJonathan Adams !(c->cache_cflags & KMC_NOTOUCH))
24394a1c2431SJonathan Adams return (WALK_NEXT);
24404a1c2431SJonathan Adams
24414a1c2431SJonathan Adams return (whatis_walk_cache(addr, c, wi));
24424a1c2431SJonathan Adams }
24434a1c2431SJonathan Adams
24444a1c2431SJonathan Adams static int
whatis_walk_thread(uintptr_t addr,const kthread_t * t,mdb_whatis_t * w)24454a1c2431SJonathan Adams whatis_walk_thread(uintptr_t addr, const kthread_t *t, mdb_whatis_t *w)
24464a1c2431SJonathan Adams {
24474a1c2431SJonathan Adams uintptr_t cur;
24484a1c2431SJonathan Adams uintptr_t saddr;
24494a1c2431SJonathan Adams size_t size;
24504a1c2431SJonathan Adams
24517c478bd9Sstevel@tonic-gate /*
24527c478bd9Sstevel@tonic-gate * Often, one calls ::whatis on an address from a thread structure.
24537c478bd9Sstevel@tonic-gate * We use this opportunity to short circuit this case...
24547c478bd9Sstevel@tonic-gate */
24554a1c2431SJonathan Adams while (mdb_whatis_match(w, addr, sizeof (kthread_t), &cur))
24564a1c2431SJonathan Adams mdb_whatis_report_object(w, cur, addr,
24570c3b83b1SJonathan Adams "allocated as a thread structure\n");
24587c478bd9Sstevel@tonic-gate
24594a1c2431SJonathan Adams /*
24604a1c2431SJonathan Adams * Now check the stack
24614a1c2431SJonathan Adams */
24627c478bd9Sstevel@tonic-gate if (t->t_stkbase == NULL)
24637c478bd9Sstevel@tonic-gate return (WALK_NEXT);
24647c478bd9Sstevel@tonic-gate
24654a1c2431SJonathan Adams /*
24664a1c2431SJonathan Adams * This assumes that t_stk is the end of the stack, but it's really
24674a1c2431SJonathan Adams * only the initial stack pointer for the thread. Arguments to the
24684a1c2431SJonathan Adams * initial procedure, SA(MINFRAME), etc. are all after t_stk. So
24694a1c2431SJonathan Adams * that 't->t_stk::whatis' reports "part of t's stack", we include
24704a1c2431SJonathan Adams * t_stk in the range (the "+ 1", below), but the kernel should
24714a1c2431SJonathan Adams * really include the full stack bounds where we can find it.
24724a1c2431SJonathan Adams */
24734a1c2431SJonathan Adams saddr = (uintptr_t)t->t_stkbase;
24744a1c2431SJonathan Adams size = (uintptr_t)t->t_stk - saddr + 1;
24754a1c2431SJonathan Adams while (mdb_whatis_match(w, saddr, size, &cur))
24764a1c2431SJonathan Adams mdb_whatis_report_object(w, cur, cur,
24774a1c2431SJonathan Adams "in thread %p's stack%s\n", addr, stack_active(t, cur));
24787c478bd9Sstevel@tonic-gate
24794a1c2431SJonathan Adams return (WHATIS_WALKRET(w));
24804a1c2431SJonathan Adams }
24814a1c2431SJonathan Adams
24824a1c2431SJonathan Adams static void
whatis_modctl_match(mdb_whatis_t * w,const char * name,uintptr_t base,size_t size,const char * where)24834a1c2431SJonathan Adams whatis_modctl_match(mdb_whatis_t *w, const char *name,
24844a1c2431SJonathan Adams uintptr_t base, size_t size, const char *where)
24854a1c2431SJonathan Adams {
24864a1c2431SJonathan Adams uintptr_t cur;
24874a1c2431SJonathan Adams
24884a1c2431SJonathan Adams /*
24894a1c2431SJonathan Adams * Since we're searching for addresses inside a module, we report
24904a1c2431SJonathan Adams * them as symbols.
24914a1c2431SJonathan Adams */
24924a1c2431SJonathan Adams while (mdb_whatis_match(w, base, size, &cur))
24934a1c2431SJonathan Adams mdb_whatis_report_address(w, cur, "in %s's %s\n", name, where);
24947c478bd9Sstevel@tonic-gate }
24957c478bd9Sstevel@tonic-gate
24967c478bd9Sstevel@tonic-gate static int
whatis_walk_modctl(uintptr_t addr,const struct modctl * m,mdb_whatis_t * w)24974a1c2431SJonathan Adams whatis_walk_modctl(uintptr_t addr, const struct modctl *m, mdb_whatis_t *w)
24987c478bd9Sstevel@tonic-gate {
24994a1c2431SJonathan Adams char name[MODMAXNAMELEN];
25007c478bd9Sstevel@tonic-gate struct module mod;
25017c478bd9Sstevel@tonic-gate Shdr shdr;
25027c478bd9Sstevel@tonic-gate
25037c478bd9Sstevel@tonic-gate if (m->mod_mp == NULL)
25047c478bd9Sstevel@tonic-gate return (WALK_NEXT);
25057c478bd9Sstevel@tonic-gate
25067c478bd9Sstevel@tonic-gate if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) {
25077c478bd9Sstevel@tonic-gate mdb_warn("couldn't read modctl %p's module", addr);
25087c478bd9Sstevel@tonic-gate return (WALK_NEXT);
25097c478bd9Sstevel@tonic-gate }
25107c478bd9Sstevel@tonic-gate
25114a1c2431SJonathan Adams if (mdb_readstr(name, sizeof (name), (uintptr_t)m->mod_modname) == -1)
25124a1c2431SJonathan Adams (void) mdb_snprintf(name, sizeof (name), "0x%p", addr);
25137c478bd9Sstevel@tonic-gate
25144a1c2431SJonathan Adams whatis_modctl_match(w, name,
25154a1c2431SJonathan Adams (uintptr_t)mod.text, mod.text_size, "text segment");
25164a1c2431SJonathan Adams whatis_modctl_match(w, name,
25174a1c2431SJonathan Adams (uintptr_t)mod.data, mod.data_size, "data segment");
25184a1c2431SJonathan Adams whatis_modctl_match(w, name,
25194a1c2431SJonathan Adams (uintptr_t)mod.bss, mod.bss_size, "bss segment");
25207c478bd9Sstevel@tonic-gate
25217c478bd9Sstevel@tonic-gate if (mdb_vread(&shdr, sizeof (shdr), (uintptr_t)mod.symhdr) == -1) {
25227c478bd9Sstevel@tonic-gate mdb_warn("couldn't read symbol header for %p's module", addr);
25237c478bd9Sstevel@tonic-gate return (WALK_NEXT);
25247c478bd9Sstevel@tonic-gate }
25257c478bd9Sstevel@tonic-gate
25264a1c2431SJonathan Adams whatis_modctl_match(w, name,
25274a1c2431SJonathan Adams (uintptr_t)mod.symtbl, mod.nsyms * shdr.sh_entsize, "symtab");
25284a1c2431SJonathan Adams whatis_modctl_match(w, name,
25294a1c2431SJonathan Adams (uintptr_t)mod.symspace, mod.symsize, "symtab");
25307c478bd9Sstevel@tonic-gate
25314a1c2431SJonathan Adams return (WHATIS_WALKRET(w));
25327c478bd9Sstevel@tonic-gate }
25337c478bd9Sstevel@tonic-gate
25347c478bd9Sstevel@tonic-gate /*ARGSUSED*/
25357c478bd9Sstevel@tonic-gate static int
whatis_walk_memseg(uintptr_t addr,const struct memseg * seg,mdb_whatis_t * w)25364a1c2431SJonathan Adams whatis_walk_memseg(uintptr_t addr, const struct memseg *seg, mdb_whatis_t *w)
25377c478bd9Sstevel@tonic-gate {
25384a1c2431SJonathan Adams uintptr_t cur;
25397c478bd9Sstevel@tonic-gate
25404a1c2431SJonathan Adams uintptr_t base = (uintptr_t)seg->pages;
25414a1c2431SJonathan Adams size_t size = (uintptr_t)seg->epages - base;
25427c478bd9Sstevel@tonic-gate
25434a1c2431SJonathan Adams while (mdb_whatis_match(w, base, size, &cur)) {
25444a1c2431SJonathan Adams /* round our found pointer down to the page_t base. */
25454a1c2431SJonathan Adams size_t offset = (cur - base) % sizeof (page_t);
25467c478bd9Sstevel@tonic-gate
25474a1c2431SJonathan Adams mdb_whatis_report_object(w, cur, cur - offset,
25480c3b83b1SJonathan Adams "allocated as a page structure\n");
25497c478bd9Sstevel@tonic-gate }
25507c478bd9Sstevel@tonic-gate
25514a1c2431SJonathan Adams return (WHATIS_WALKRET(w));
25524a1c2431SJonathan Adams }
25534a1c2431SJonathan Adams
25544a1c2431SJonathan Adams /*ARGSUSED*/
25554a1c2431SJonathan Adams static int
whatis_run_modules(mdb_whatis_t * w,void * arg)25564a1c2431SJonathan Adams whatis_run_modules(mdb_whatis_t *w, void *arg)
25577c478bd9Sstevel@tonic-gate {
25584a1c2431SJonathan Adams if (mdb_walk("modctl", (mdb_walk_cb_t)whatis_walk_modctl, w) == -1) {
25597c478bd9Sstevel@tonic-gate mdb_warn("couldn't find modctl walker");
25604a1c2431SJonathan Adams return (1);
25614a1c2431SJonathan Adams }
25624a1c2431SJonathan Adams return (0);
25637c478bd9Sstevel@tonic-gate }
25647c478bd9Sstevel@tonic-gate
25654a1c2431SJonathan Adams /*ARGSUSED*/
25664a1c2431SJonathan Adams static int
whatis_run_threads(mdb_whatis_t * w,void * ignored)25674a1c2431SJonathan Adams whatis_run_threads(mdb_whatis_t *w, void *ignored)
25684a1c2431SJonathan Adams {
25697c478bd9Sstevel@tonic-gate /*
25707c478bd9Sstevel@tonic-gate * Now search all thread stacks. Yes, this is a little weak; we
25717c478bd9Sstevel@tonic-gate * can save a lot of work by first checking to see if the
25727c478bd9Sstevel@tonic-gate * address is in segkp vs. segkmem. But hey, computers are
25737c478bd9Sstevel@tonic-gate * fast.
25747c478bd9Sstevel@tonic-gate */
25754a1c2431SJonathan Adams if (mdb_walk("thread", (mdb_walk_cb_t)whatis_walk_thread, w) == -1) {
25767c478bd9Sstevel@tonic-gate mdb_warn("couldn't find thread walker");
25774a1c2431SJonathan Adams return (1);
25784a1c2431SJonathan Adams }
25794a1c2431SJonathan Adams return (0);
25807c478bd9Sstevel@tonic-gate }
25817c478bd9Sstevel@tonic-gate
25824a1c2431SJonathan Adams /*ARGSUSED*/
25834a1c2431SJonathan Adams static int
whatis_run_pages(mdb_whatis_t * w,void * ignored)25844a1c2431SJonathan Adams whatis_run_pages(mdb_whatis_t *w, void *ignored)
25854a1c2431SJonathan Adams {
25864a1c2431SJonathan Adams if (mdb_walk("memseg", (mdb_walk_cb_t)whatis_walk_memseg, w) == -1) {
25874a1c2431SJonathan Adams mdb_warn("couldn't find memseg walker");
25884a1c2431SJonathan Adams return (1);
25894a1c2431SJonathan Adams }
25904a1c2431SJonathan Adams return (0);
25917c478bd9Sstevel@tonic-gate }
25927c478bd9Sstevel@tonic-gate
25934a1c2431SJonathan Adams /*ARGSUSED*/
25944a1c2431SJonathan Adams static int
whatis_run_kmem(mdb_whatis_t * w,void * ignored)25954a1c2431SJonathan Adams whatis_run_kmem(mdb_whatis_t *w, void *ignored)
25964a1c2431SJonathan Adams {
25974a1c2431SJonathan Adams whatis_info_t wi;
25987c478bd9Sstevel@tonic-gate
25994a1c2431SJonathan Adams bzero(&wi, sizeof (wi));
26004a1c2431SJonathan Adams wi.wi_w = w;
26014a1c2431SJonathan Adams
26024a1c2431SJonathan Adams if (mdb_readvar(&wi.wi_msb_arena, "kmem_msb_arena") == -1)
26034a1c2431SJonathan Adams mdb_warn("unable to readvar \"kmem_msb_arena\"");
26044a1c2431SJonathan Adams
26054a1c2431SJonathan Adams if (mdb_readvar(&wi.wi_kmem_lite_count,
26064a1c2431SJonathan Adams "kmem_lite_count") == -1 || wi.wi_kmem_lite_count > 16)
26074a1c2431SJonathan Adams wi.wi_kmem_lite_count = 0;
26084a1c2431SJonathan Adams
26094a1c2431SJonathan Adams /*
26104a1c2431SJonathan Adams * We process kmem caches in the following order:
26114a1c2431SJonathan Adams *
26124a1c2431SJonathan Adams * non-KMC_NOTOUCH, non-metadata (typically the most interesting)
26134a1c2431SJonathan Adams * metadata (can be huge with KMF_AUDIT)
26144a1c2431SJonathan Adams * KMC_NOTOUCH, non-metadata (see kmem_walk_all())
26154a1c2431SJonathan Adams */
26164a1c2431SJonathan Adams if (mdb_walk("kmem_cache", (mdb_walk_cb_t)whatis_walk_touch,
26174a1c2431SJonathan Adams &wi) == -1 ||
26184a1c2431SJonathan Adams mdb_walk("kmem_cache", (mdb_walk_cb_t)whatis_walk_metadata,
26194a1c2431SJonathan Adams &wi) == -1 ||
26204a1c2431SJonathan Adams mdb_walk("kmem_cache", (mdb_walk_cb_t)whatis_walk_notouch,
26214a1c2431SJonathan Adams &wi) == -1) {
26227c478bd9Sstevel@tonic-gate mdb_warn("couldn't find kmem_cache walker");
26234a1c2431SJonathan Adams return (1);
26244a1c2431SJonathan Adams }
26254a1c2431SJonathan Adams return (0);
26267c478bd9Sstevel@tonic-gate }
26277c478bd9Sstevel@tonic-gate
26284a1c2431SJonathan Adams /*ARGSUSED*/
26294a1c2431SJonathan Adams static int
whatis_run_vmem(mdb_whatis_t * w,void * ignored)26304a1c2431SJonathan Adams whatis_run_vmem(mdb_whatis_t *w, void *ignored)
26314a1c2431SJonathan Adams {
26324a1c2431SJonathan Adams whatis_info_t wi;
26337c478bd9Sstevel@tonic-gate
26344a1c2431SJonathan Adams bzero(&wi, sizeof (wi));
26354a1c2431SJonathan Adams wi.wi_w = w;
26367c478bd9Sstevel@tonic-gate
26377c478bd9Sstevel@tonic-gate if (mdb_walk("vmem_postfix",
26384a1c2431SJonathan Adams (mdb_walk_cb_t)whatis_walk_vmem, &wi) == -1) {
26397c478bd9Sstevel@tonic-gate mdb_warn("couldn't find vmem_postfix walker");
26404a1c2431SJonathan Adams return (1);
26417c478bd9Sstevel@tonic-gate }
26424a1c2431SJonathan Adams return (0);
26437c478bd9Sstevel@tonic-gate }
26447c478bd9Sstevel@tonic-gate
26457c478bd9Sstevel@tonic-gate typedef struct kmem_log_cpu {
26467c478bd9Sstevel@tonic-gate uintptr_t kmc_low;
26477c478bd9Sstevel@tonic-gate uintptr_t kmc_high;
26487c478bd9Sstevel@tonic-gate } kmem_log_cpu_t;
26497c478bd9Sstevel@tonic-gate
26507c478bd9Sstevel@tonic-gate typedef struct kmem_log_data {
26517c478bd9Sstevel@tonic-gate uintptr_t kmd_addr;
26527c478bd9Sstevel@tonic-gate kmem_log_cpu_t *kmd_cpu;
26537c478bd9Sstevel@tonic-gate } kmem_log_data_t;
26547c478bd9Sstevel@tonic-gate
26557c478bd9Sstevel@tonic-gate int
kmem_log_walk(uintptr_t addr,const kmem_bufctl_audit_t * b,kmem_log_data_t * kmd)26567c478bd9Sstevel@tonic-gate kmem_log_walk(uintptr_t addr, const kmem_bufctl_audit_t *b,
26577c478bd9Sstevel@tonic-gate kmem_log_data_t *kmd)
26587c478bd9Sstevel@tonic-gate {
26597c478bd9Sstevel@tonic-gate int i;
26607c478bd9Sstevel@tonic-gate kmem_log_cpu_t *kmc = kmd->kmd_cpu;
26617c478bd9Sstevel@tonic-gate size_t bufsize;
26627c478bd9Sstevel@tonic-gate
26637c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU; i++) {
26647c478bd9Sstevel@tonic-gate if (addr >= kmc[i].kmc_low && addr < kmc[i].kmc_high)
26657c478bd9Sstevel@tonic-gate break;
26667c478bd9Sstevel@tonic-gate }
26677c478bd9Sstevel@tonic-gate
26687c478bd9Sstevel@tonic-gate if (kmd->kmd_addr) {
26697c478bd9Sstevel@tonic-gate if (b->bc_cache == NULL)
26707c478bd9Sstevel@tonic-gate return (WALK_NEXT);
26717c478bd9Sstevel@tonic-gate
26727c478bd9Sstevel@tonic-gate if (mdb_vread(&bufsize, sizeof (bufsize),
26737c478bd9Sstevel@tonic-gate (uintptr_t)&b->bc_cache->cache_bufsize) == -1) {
26747c478bd9Sstevel@tonic-gate mdb_warn(
26757c478bd9Sstevel@tonic-gate "failed to read cache_bufsize for cache at %p",
26767c478bd9Sstevel@tonic-gate b->bc_cache);
26777c478bd9Sstevel@tonic-gate return (WALK_ERR);
26787c478bd9Sstevel@tonic-gate }
26797c478bd9Sstevel@tonic-gate
26807c478bd9Sstevel@tonic-gate if (kmd->kmd_addr < (uintptr_t)b->bc_addr ||
26817c478bd9Sstevel@tonic-gate kmd->kmd_addr >= (uintptr_t)b->bc_addr + bufsize)
26827c478bd9Sstevel@tonic-gate return (WALK_NEXT);
26837c478bd9Sstevel@tonic-gate }
26847c478bd9Sstevel@tonic-gate
26857c478bd9Sstevel@tonic-gate if (i == NCPU)
26867c478bd9Sstevel@tonic-gate mdb_printf(" ");
26877c478bd9Sstevel@tonic-gate else
26887c478bd9Sstevel@tonic-gate mdb_printf("%3d", i);
26897c478bd9Sstevel@tonic-gate
26907c478bd9Sstevel@tonic-gate mdb_printf(" %0?p %0?p %16llx %0?p\n", addr, b->bc_addr,
26917c478bd9Sstevel@tonic-gate b->bc_timestamp, b->bc_thread);
26927c478bd9Sstevel@tonic-gate
26937c478bd9Sstevel@tonic-gate return (WALK_NEXT);
26947c478bd9Sstevel@tonic-gate }
26957c478bd9Sstevel@tonic-gate
26967c478bd9Sstevel@tonic-gate /*ARGSUSED*/
26977c478bd9Sstevel@tonic-gate int
kmem_log(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)26987c478bd9Sstevel@tonic-gate kmem_log(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
26997c478bd9Sstevel@tonic-gate {
27007c478bd9Sstevel@tonic-gate kmem_log_header_t lh;
27017c478bd9Sstevel@tonic-gate kmem_cpu_log_header_t clh;
27027c478bd9Sstevel@tonic-gate uintptr_t lhp, clhp;
27037c478bd9Sstevel@tonic-gate int ncpus;
27047c478bd9Sstevel@tonic-gate uintptr_t *cpu;
27057c478bd9Sstevel@tonic-gate GElf_Sym sym;
27067c478bd9Sstevel@tonic-gate kmem_log_cpu_t *kmc;
27077c478bd9Sstevel@tonic-gate int i;
27087c478bd9Sstevel@tonic-gate kmem_log_data_t kmd;
27097c478bd9Sstevel@tonic-gate uint_t opt_b = FALSE;
27107c478bd9Sstevel@tonic-gate
27117c478bd9Sstevel@tonic-gate if (mdb_getopts(argc, argv,
27127c478bd9Sstevel@tonic-gate 'b', MDB_OPT_SETBITS, TRUE, &opt_b, NULL) != argc)
27137c478bd9Sstevel@tonic-gate return (DCMD_USAGE);
27147c478bd9Sstevel@tonic-gate
27157c478bd9Sstevel@tonic-gate if (mdb_readvar(&lhp, "kmem_transaction_log") == -1) {
27167c478bd9Sstevel@tonic-gate mdb_warn("failed to read 'kmem_transaction_log'");
27177c478bd9Sstevel@tonic-gate return (DCMD_ERR);
27187c478bd9Sstevel@tonic-gate }
27197c478bd9Sstevel@tonic-gate
27207c478bd9Sstevel@tonic-gate if (lhp == NULL) {
27217c478bd9Sstevel@tonic-gate mdb_warn("no kmem transaction log\n");
27227c478bd9Sstevel@tonic-gate return (DCMD_ERR);
27237c478bd9Sstevel@tonic-gate }
27247c478bd9Sstevel@tonic-gate
27257c478bd9Sstevel@tonic-gate mdb_readvar(&ncpus, "ncpus");
27267c478bd9Sstevel@tonic-gate
27277c478bd9Sstevel@tonic-gate if (mdb_vread(&lh, sizeof (kmem_log_header_t), lhp) == -1) {
27287c478bd9Sstevel@tonic-gate mdb_warn("failed to read log header at %p", lhp);
27297c478bd9Sstevel@tonic-gate return (DCMD_ERR);
27307c478bd9Sstevel@tonic-gate }
27317c478bd9Sstevel@tonic-gate
27327c478bd9Sstevel@tonic-gate clhp = lhp + ((uintptr_t)&lh.lh_cpu[0] - (uintptr_t)&lh);
27337c478bd9Sstevel@tonic-gate
27347c478bd9Sstevel@tonic-gate cpu = mdb_alloc(sizeof (uintptr_t) * NCPU, UM_SLEEP | UM_GC);
27357c478bd9Sstevel@tonic-gate
27367c478bd9Sstevel@tonic-gate if (mdb_lookup_by_name("cpu", &sym) == -1) {
27377c478bd9Sstevel@tonic-gate mdb_warn("couldn't find 'cpu' array");
27387c478bd9Sstevel@tonic-gate return (DCMD_ERR);
27397c478bd9Sstevel@tonic-gate }
27407c478bd9Sstevel@tonic-gate
27417c478bd9Sstevel@tonic-gate if (sym.st_size != NCPU * sizeof (uintptr_t)) {
27427c478bd9Sstevel@tonic-gate mdb_warn("expected 'cpu' to be of size %d; found %d\n",
27437c478bd9Sstevel@tonic-gate NCPU * sizeof (uintptr_t), sym.st_size);
27447c478bd9Sstevel@tonic-gate return (DCMD_ERR);
27457c478bd9Sstevel@tonic-gate }
27467c478bd9Sstevel@tonic-gate
27477c478bd9Sstevel@tonic-gate if (mdb_vread(cpu, sym.st_size, (uintptr_t)sym.st_value) == -1) {
27487c478bd9Sstevel@tonic-gate mdb_warn("failed to read cpu array at %p", sym.st_value);
27497c478bd9Sstevel@tonic-gate return (DCMD_ERR);
27507c478bd9Sstevel@tonic-gate }
27517c478bd9Sstevel@tonic-gate
27527c478bd9Sstevel@tonic-gate kmc = mdb_zalloc(sizeof (kmem_log_cpu_t) * NCPU, UM_SLEEP | UM_GC);
27537c478bd9Sstevel@tonic-gate kmd.kmd_addr = NULL;
27547c478bd9Sstevel@tonic-gate kmd.kmd_cpu = kmc;
27557c478bd9Sstevel@tonic-gate
27567c478bd9Sstevel@tonic-gate for (i = 0; i < NCPU; i++) {
27577c478bd9Sstevel@tonic-gate
27587c478bd9Sstevel@tonic-gate if (cpu[i] == NULL)
27597c478bd9Sstevel@tonic-gate continue;
27607c478bd9Sstevel@tonic-gate
27617c478bd9Sstevel@tonic-gate if (mdb_vread(&clh, sizeof (clh), clhp) == -1) {
27627c478bd9Sstevel@tonic-gate mdb_warn("cannot read cpu %d's log header at %p",
27637c478bd9Sstevel@tonic-gate i, clhp);
27647c478bd9Sstevel@tonic-gate return (DCMD_ERR);
27657c478bd9Sstevel@tonic-gate }
27667c478bd9Sstevel@tonic-gate
27677c478bd9Sstevel@tonic-gate kmc[i].kmc_low = clh.clh_chunk * lh.lh_chunksize +
27687c478bd9Sstevel@tonic-gate (uintptr_t)lh.lh_base;
27697c478bd9Sstevel@tonic-gate kmc[i].kmc_high = (uintptr_t)clh.clh_current;
27707c478bd9Sstevel@tonic-gate
27717c478bd9Sstevel@tonic-gate clhp += sizeof (kmem_cpu_log_header_t);
27727c478bd9Sstevel@tonic-gate }
27737c478bd9Sstevel@tonic-gate
27747c478bd9Sstevel@tonic-gate mdb_printf("%3s %-?s %-?s %16s %-?s\n", "CPU", "ADDR", "BUFADDR",
27757c478bd9Sstevel@tonic-gate "TIMESTAMP", "THREAD");
27767c478bd9Sstevel@tonic-gate
27777c478bd9Sstevel@tonic-gate /*
27787c478bd9Sstevel@tonic-gate * If we have been passed an address, print out only log entries
27797c478bd9Sstevel@tonic-gate * corresponding to that address. If opt_b is specified, then interpret
27807c478bd9Sstevel@tonic-gate * the address as a bufctl.
27817c478bd9Sstevel@tonic-gate */
27827c478bd9Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) {
27837c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t b;
27847c478bd9Sstevel@tonic-gate
27857c478bd9Sstevel@tonic-gate if (opt_b) {
27867c478bd9Sstevel@tonic-gate kmd.kmd_addr = addr;
27877c478bd9Sstevel@tonic-gate } else {
27887c478bd9Sstevel@tonic-gate if (mdb_vread(&b,
27897c478bd9Sstevel@tonic-gate sizeof (kmem_bufctl_audit_t), addr) == -1) {
27907c478bd9Sstevel@tonic-gate mdb_warn("failed to read bufctl at %p", addr);
27917c478bd9Sstevel@tonic-gate return (DCMD_ERR);
27927c478bd9Sstevel@tonic-gate }
27937c478bd9Sstevel@tonic-gate
27947c478bd9Sstevel@tonic-gate (void) kmem_log_walk(addr, &b, &kmd);
27957c478bd9Sstevel@tonic-gate
27967c478bd9Sstevel@tonic-gate return (DCMD_OK);
27977c478bd9Sstevel@tonic-gate }
27987c478bd9Sstevel@tonic-gate }
27997c478bd9Sstevel@tonic-gate
28007c478bd9Sstevel@tonic-gate if (mdb_walk("kmem_log", (mdb_walk_cb_t)kmem_log_walk, &kmd) == -1) {
28017c478bd9Sstevel@tonic-gate mdb_warn("can't find kmem log walker");
28027c478bd9Sstevel@tonic-gate return (DCMD_ERR);
28037c478bd9Sstevel@tonic-gate }
28047c478bd9Sstevel@tonic-gate
28057c478bd9Sstevel@tonic-gate return (DCMD_OK);
28067c478bd9Sstevel@tonic-gate }
28077c478bd9Sstevel@tonic-gate
28087c478bd9Sstevel@tonic-gate typedef struct bufctl_history_cb {
28097c478bd9Sstevel@tonic-gate int bhc_flags;
28107c478bd9Sstevel@tonic-gate int bhc_argc;
28117c478bd9Sstevel@tonic-gate const mdb_arg_t *bhc_argv;
28127c478bd9Sstevel@tonic-gate int bhc_ret;
28137c478bd9Sstevel@tonic-gate } bufctl_history_cb_t;
28147c478bd9Sstevel@tonic-gate
28157c478bd9Sstevel@tonic-gate /*ARGSUSED*/
28167c478bd9Sstevel@tonic-gate static int
bufctl_history_callback(uintptr_t addr,const void * ign,void * arg)28177c478bd9Sstevel@tonic-gate bufctl_history_callback(uintptr_t addr, const void *ign, void *arg)
28187c478bd9Sstevel@tonic-gate {
28197c478bd9Sstevel@tonic-gate bufctl_history_cb_t *bhc = arg;
28207c478bd9Sstevel@tonic-gate
28217c478bd9Sstevel@tonic-gate bhc->bhc_ret =
28227c478bd9Sstevel@tonic-gate bufctl(addr, bhc->bhc_flags, bhc->bhc_argc, bhc->bhc_argv);
28237c478bd9Sstevel@tonic-gate
28247c478bd9Sstevel@tonic-gate bhc->bhc_flags &= ~DCMD_LOOPFIRST;
28257c478bd9Sstevel@tonic-gate
28267c478bd9Sstevel@tonic-gate return ((bhc->bhc_ret == DCMD_OK)? WALK_NEXT : WALK_DONE);
28277c478bd9Sstevel@tonic-gate }
28287c478bd9Sstevel@tonic-gate
28297c478bd9Sstevel@tonic-gate void
bufctl_help(void)28307c478bd9Sstevel@tonic-gate bufctl_help(void)
28317c478bd9Sstevel@tonic-gate {
2832b5fca8f8Stomee mdb_printf("%s",
2833b5fca8f8Stomee "Display the contents of kmem_bufctl_audit_ts, with optional filtering.\n\n");
28347c478bd9Sstevel@tonic-gate mdb_dec_indent(2);
28357c478bd9Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n");
28367c478bd9Sstevel@tonic-gate mdb_inc_indent(2);
28377c478bd9Sstevel@tonic-gate mdb_printf("%s",
28387c478bd9Sstevel@tonic-gate " -v Display the full content of the bufctl, including its stack trace\n"
28397c478bd9Sstevel@tonic-gate " -h retrieve the bufctl's transaction history, if available\n"
28407c478bd9Sstevel@tonic-gate " -a addr\n"
28417c478bd9Sstevel@tonic-gate " filter out bufctls not involving the buffer at addr\n"
28427c478bd9Sstevel@tonic-gate " -c caller\n"
28437c478bd9Sstevel@tonic-gate " filter out bufctls without the function/PC in their stack trace\n"
28447c478bd9Sstevel@tonic-gate " -e earliest\n"
28457c478bd9Sstevel@tonic-gate " filter out bufctls timestamped before earliest\n"
28467c478bd9Sstevel@tonic-gate " -l latest\n"
28477c478bd9Sstevel@tonic-gate " filter out bufctls timestamped after latest\n"
28487c478bd9Sstevel@tonic-gate " -t thread\n"
28497c478bd9Sstevel@tonic-gate " filter out bufctls not involving thread\n");
28507c478bd9Sstevel@tonic-gate }
28517c478bd9Sstevel@tonic-gate
28527c478bd9Sstevel@tonic-gate int
bufctl(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)28537c478bd9Sstevel@tonic-gate bufctl(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
28547c478bd9Sstevel@tonic-gate {
28557c478bd9Sstevel@tonic-gate kmem_bufctl_audit_t bc;
28567c478bd9Sstevel@tonic-gate uint_t verbose = FALSE;
28577c478bd9Sstevel@tonic-gate uint_t history = FALSE;
28587c478bd9Sstevel@tonic-gate uint_t in_history = FALSE;
28597c478bd9Sstevel@tonic-gate uintptr_t caller = NULL, thread = NULL;
28607c478bd9Sstevel@tonic-gate uintptr_t laddr, haddr, baddr = NULL;
28617c478bd9Sstevel@tonic-gate hrtime_t earliest = 0, latest = 0;
28627c478bd9Sstevel@tonic-gate int i, depth;
28637c478bd9Sstevel@tonic-gate char c[MDB_SYM_NAMLEN];
28647c478bd9Sstevel@tonic-gate GElf_Sym sym;
28657c478bd9Sstevel@tonic-gate
28667c478bd9Sstevel@tonic-gate if (mdb_getopts(argc, argv,
28677c478bd9Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose,
28687c478bd9Sstevel@tonic-gate 'h', MDB_OPT_SETBITS, TRUE, &history,
28697c478bd9Sstevel@tonic-gate 'H', MDB_OPT_SETBITS, TRUE, &in_history, /* internal */
28707c478bd9Sstevel@tonic-gate 'c', MDB_OPT_UINTPTR, &caller,
28717c478bd9Sstevel@tonic-gate 't', MDB_OPT_UINTPTR, &thread,
28727c478bd9Sstevel@tonic-gate 'e', MDB_OPT_UINT64, &earliest,
28737c478bd9Sstevel@tonic-gate 'l', MDB_OPT_UINT64, &latest,
28747c478bd9Sstevel@tonic-gate 'a', MDB_OPT_UINTPTR, &baddr, NULL) != argc)
28757c478bd9Sstevel@tonic-gate return (DCMD_USAGE);
28767c478bd9Sstevel@tonic-gate
28777c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC))
28787c478bd9Sstevel@tonic-gate return (DCMD_USAGE);
28797c478bd9Sstevel@tonic-gate
28807c478bd9Sstevel@tonic-gate if (in_history && !history)
28817c478bd9Sstevel@tonic-gate return (DCMD_USAGE);
28827c478bd9Sstevel@tonic-gate
28837c478bd9Sstevel@tonic-gate if (history && !in_history) {
28847c478bd9Sstevel@tonic-gate mdb_arg_t *nargv = mdb_zalloc(sizeof (*nargv) * (argc + 1),
28857c478bd9Sstevel@tonic-gate UM_SLEEP | UM_GC);
28867c478bd9Sstevel@tonic-gate bufctl_history_cb_t bhc;
28877c478bd9Sstevel@tonic-gate
28887c478bd9Sstevel@tonic-gate nargv[0].a_type = MDB_TYPE_STRING;
28897c478bd9Sstevel@tonic-gate nargv[0].a_un.a_str = "-H"; /* prevent recursion */
28907c478bd9Sstevel@tonic-gate
28917c478bd9Sstevel@tonic-gate for (i = 0; i < argc; i++)
28927c478bd9Sstevel@tonic-gate nargv[i + 1] = argv[i];
28937c478bd9Sstevel@tonic-gate
28947c478bd9Sstevel@tonic-gate /*
28957c478bd9Sstevel@tonic-gate * When in history mode, we treat each element as if it
28967c478bd9Sstevel@tonic-gate * were in a seperate loop, so that the headers group
28977c478bd9Sstevel@tonic-gate * bufctls with similar histories.
28987c478bd9Sstevel@tonic-gate */
28997c478bd9Sstevel@tonic-gate bhc.bhc_flags = flags | DCMD_LOOP | DCMD_LOOPFIRST;
29007c478bd9Sstevel@tonic-gate bhc.bhc_argc = argc + 1;
29017c478bd9Sstevel@tonic-gate bhc.bhc_argv = nargv;
29027c478bd9Sstevel@tonic-gate bhc.bhc_ret = DCMD_OK;
29037c478bd9Sstevel@tonic-gate
29047c478bd9Sstevel@tonic-gate if (mdb_pwalk("bufctl_history", bufctl_history_callback, &bhc,
29057c478bd9Sstevel@tonic-gate addr) == -1) {
29067c478bd9Sstevel@tonic-gate mdb_warn("unable to walk bufctl_history");
29077c478bd9Sstevel@tonic-gate return (DCMD_ERR);
29087c478bd9Sstevel@tonic-gate }
29097c478bd9Sstevel@tonic-gate
29107c478bd9Sstevel@tonic-gate if (bhc.bhc_ret == DCMD_OK && !(flags & DCMD_PIPE_OUT))
29117c478bd9Sstevel@tonic-gate mdb_printf("\n");
29127c478bd9Sstevel@tonic-gate
29137c478bd9Sstevel@tonic-gate return (bhc.bhc_ret);
29147c478bd9Sstevel@tonic-gate }
29157c478bd9Sstevel@tonic-gate
29167c478bd9Sstevel@tonic-gate if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) {
29177c478bd9Sstevel@tonic-gate if (verbose) {
29187c478bd9Sstevel@tonic-gate mdb_printf("%16s %16s %16s %16s\n"
29197c478bd9Sstevel@tonic-gate "%<u>%16s %16s %16s %16s%</u>\n",
29207c478bd9Sstevel@tonic-gate "ADDR", "BUFADDR", "TIMESTAMP", "THREAD",
29217c478bd9Sstevel@tonic-gate "", "CACHE", "LASTLOG", "CONTENTS");
29227c478bd9Sstevel@tonic-gate } else {
29237c478bd9Sstevel@tonic-gate mdb_printf("%<u>%-?s %-?s %-12s %-?s %s%</u>\n",
29247c478bd9Sstevel@tonic-gate "ADDR", "BUFADDR", "TIMESTAMP", "THREAD", "CALLER");
29257c478bd9Sstevel@tonic-gate }
29267c478bd9Sstevel@tonic-gate }
29277c478bd9Sstevel@tonic-gate
29287c478bd9Sstevel@tonic-gate if (mdb_vread(&bc, sizeof (bc), addr) == -1) {
29297c478bd9Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr);
29307c478bd9Sstevel@tonic-gate return (DCMD_ERR);
29317c478bd9Sstevel@tonic-gate }
29327c478bd9Sstevel@tonic-gate
29337c478bd9Sstevel@tonic-gate /*
29347c478bd9Sstevel@tonic-gate * Guard against bogus bc_depth in case the bufctl is corrupt or
29357c478bd9Sstevel@tonic-gate * the address does not really refer to a bufctl.
29367c478bd9Sstevel@tonic-gate */
29377c478bd9Sstevel@tonic-gate depth = MIN(bc.bc_depth, KMEM_STACK_DEPTH);
29387c478bd9Sstevel@tonic-gate
29397c478bd9Sstevel@tonic-gate if (caller != NULL) {
29407c478bd9Sstevel@tonic-gate laddr = caller;
29417c478bd9Sstevel@tonic-gate haddr = caller + sizeof (caller);
29427c478bd9Sstevel@tonic-gate
29437c478bd9Sstevel@tonic-gate if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c, sizeof (c),
29447c478bd9Sstevel@tonic-gate &sym) != -1 && caller == (uintptr_t)sym.st_value) {
29457c478bd9Sstevel@tonic-gate /*
29467c478bd9Sstevel@tonic-gate * We were provided an exact symbol value; any
29477c478bd9Sstevel@tonic-gate * address in the function is valid.
29487c478bd9Sstevel@tonic-gate */
29497c478bd9Sstevel@tonic-gate laddr = (uintptr_t)sym.st_value;
29507c478bd9Sstevel@tonic-gate haddr = (uintptr_t)sym.st_value + sym.st_size;
29517c478bd9Sstevel@tonic-gate }
29527c478bd9Sstevel@tonic-gate
29537c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++)
29547c478bd9Sstevel@tonic-gate if (bc.bc_stack[i] >= laddr && bc.bc_stack[i] < haddr)
29557c478bd9Sstevel@tonic-gate break;
29567c478bd9Sstevel@tonic-gate
29577c478bd9Sstevel@tonic-gate if (i == depth)
29587c478bd9Sstevel@tonic-gate return (DCMD_OK);
29597c478bd9Sstevel@tonic-gate }
29607c478bd9Sstevel@tonic-gate
29617c478bd9Sstevel@tonic-gate if (thread != NULL && (uintptr_t)bc.bc_thread != thread)
29627c478bd9Sstevel@tonic-gate return (DCMD_OK);
29637c478bd9Sstevel@tonic-gate
29647c478bd9Sstevel@tonic-gate if (earliest != 0 && bc.bc_timestamp < earliest)
29657c478bd9Sstevel@tonic-gate return (DCMD_OK);
29667c478bd9Sstevel@tonic-gate
29677c478bd9Sstevel@tonic-gate if (latest != 0 && bc.bc_timestamp > latest)
29687c478bd9Sstevel@tonic-gate return (DCMD_OK);
29697c478bd9Sstevel@tonic-gate
29707c478bd9Sstevel@tonic-gate if (baddr != 0 && (uintptr_t)bc.bc_addr != baddr)
29717c478bd9Sstevel@tonic-gate return (DCMD_OK);
29727c478bd9Sstevel@tonic-gate
29737c478bd9Sstevel@tonic-gate if (flags & DCMD_PIPE_OUT) {
29747c478bd9Sstevel@tonic-gate mdb_printf("%#lr\n", addr);
29757c478bd9Sstevel@tonic-gate return (DCMD_OK);
29767c478bd9Sstevel@tonic-gate }
29777c478bd9Sstevel@tonic-gate
29787c478bd9Sstevel@tonic-gate if (verbose) {
29797c478bd9Sstevel@tonic-gate mdb_printf(
29807c478bd9Sstevel@tonic-gate "%<b>%16p%</b> %16p %16llx %16p\n"
29817c478bd9Sstevel@tonic-gate "%16s %16p %16p %16p\n",
29827c478bd9Sstevel@tonic-gate addr, bc.bc_addr, bc.bc_timestamp, bc.bc_thread,
29837c478bd9Sstevel@tonic-gate "", bc.bc_cache, bc.bc_lastlog, bc.bc_contents);
29847c478bd9Sstevel@tonic-gate
29857c478bd9Sstevel@tonic-gate mdb_inc_indent(17);
29867c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++)
29877c478bd9Sstevel@tonic-gate mdb_printf("%a\n", bc.bc_stack[i]);
29887c478bd9Sstevel@tonic-gate mdb_dec_indent(17);
29897c478bd9Sstevel@tonic-gate mdb_printf("\n");
29907c478bd9Sstevel@tonic-gate } else {
29917c478bd9Sstevel@tonic-gate mdb_printf("%0?p %0?p %12llx %0?p", addr, bc.bc_addr,
29927c478bd9Sstevel@tonic-gate bc.bc_timestamp, bc.bc_thread);
29937c478bd9Sstevel@tonic-gate
29947c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) {
29957c478bd9Sstevel@tonic-gate if (mdb_lookup_by_addr(bc.bc_stack[i],
29967c478bd9Sstevel@tonic-gate MDB_SYM_FUZZY, c, sizeof (c), &sym) == -1)
29977c478bd9Sstevel@tonic-gate continue;
29987c478bd9Sstevel@tonic-gate if (strncmp(c, "kmem_", 5) == 0)
29997c478bd9Sstevel@tonic-gate continue;
30007c478bd9Sstevel@tonic-gate mdb_printf(" %a\n", bc.bc_stack[i]);
30017c478bd9Sstevel@tonic-gate break;
30027c478bd9Sstevel@tonic-gate }
30037c478bd9Sstevel@tonic-gate
30047c478bd9Sstevel@tonic-gate if (i >= depth)
30057c478bd9Sstevel@tonic-gate mdb_printf("\n");
30067c478bd9Sstevel@tonic-gate }
30077c478bd9Sstevel@tonic-gate
30087c478bd9Sstevel@tonic-gate return (DCMD_OK);
30097c478bd9Sstevel@tonic-gate }
30107c478bd9Sstevel@tonic-gate
30117c478bd9Sstevel@tonic-gate typedef struct kmem_verify {
30127c478bd9Sstevel@tonic-gate uint64_t *kmv_buf; /* buffer to read cache contents into */
30137c478bd9Sstevel@tonic-gate size_t kmv_size; /* number of bytes in kmv_buf */
30147c478bd9Sstevel@tonic-gate int kmv_corruption; /* > 0 if corruption found. */
3015*76d3b15cSJohn Levon uint_t kmv_flags; /* dcmd flags */
30167c478bd9Sstevel@tonic-gate struct kmem_cache kmv_cache; /* the cache we're operating on */
30177c478bd9Sstevel@tonic-gate } kmem_verify_t;
30187c478bd9Sstevel@tonic-gate
30197c478bd9Sstevel@tonic-gate /*
30207c478bd9Sstevel@tonic-gate * verify_pattern()
30217c478bd9Sstevel@tonic-gate * verify that buf is filled with the pattern pat.
30227c478bd9Sstevel@tonic-gate */
30237c478bd9Sstevel@tonic-gate static int64_t
verify_pattern(uint64_t * buf_arg,size_t size,uint64_t pat)30247c478bd9Sstevel@tonic-gate verify_pattern(uint64_t *buf_arg, size_t size, uint64_t pat)
30257c478bd9Sstevel@tonic-gate {
30267c478bd9Sstevel@tonic-gate /*LINTED*/
30277c478bd9Sstevel@tonic-gate uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
30287c478bd9Sstevel@tonic-gate uint64_t *buf;
30297c478bd9Sstevel@tonic-gate
30307c478bd9Sstevel@tonic-gate for (buf = buf_arg; buf < bufend; buf++)
30317c478bd9Sstevel@tonic-gate if (*buf != pat)
30327c478bd9Sstevel@tonic-gate return ((uintptr_t)buf - (uintptr_t)buf_arg);
30337c478bd9Sstevel@tonic-gate return (-1);
30347c478bd9Sstevel@tonic-gate }
30357c478bd9Sstevel@tonic-gate
30367c478bd9Sstevel@tonic-gate /*
30377c478bd9Sstevel@tonic-gate * verify_buftag()
30387c478bd9Sstevel@tonic-gate * verify that btp->bt_bxstat == (bcp ^ pat)
30397c478bd9Sstevel@tonic-gate */
30407c478bd9Sstevel@tonic-gate static int
verify_buftag(kmem_buftag_t * btp,uintptr_t pat)30417c478bd9Sstevel@tonic-gate verify_buftag(kmem_buftag_t *btp, uintptr_t pat)
30427c478bd9Sstevel@tonic-gate {
30437c478bd9Sstevel@tonic-gate return (btp->bt_bxstat == ((intptr_t)btp->bt_bufctl ^ pat) ? 0 : -1);
30447c478bd9Sstevel@tonic-gate }
30457c478bd9Sstevel@tonic-gate
30467c478bd9Sstevel@tonic-gate /*
30477c478bd9Sstevel@tonic-gate * verify_free()
30487c478bd9Sstevel@tonic-gate * verify the integrity of a free block of memory by checking
30497c478bd9Sstevel@tonic-gate * that it is filled with 0xdeadbeef and that its buftag is sane.
30507c478bd9Sstevel@tonic-gate */
30517c478bd9Sstevel@tonic-gate /*ARGSUSED1*/
30527c478bd9Sstevel@tonic-gate static int
verify_free(uintptr_t addr,const void * data,void * private)30537c478bd9Sstevel@tonic-gate verify_free(uintptr_t addr, const void *data, void *private)
30547c478bd9Sstevel@tonic-gate {
30557c478bd9Sstevel@tonic-gate kmem_verify_t *kmv = (kmem_verify_t *)private;
30567c478bd9Sstevel@tonic-gate uint64_t *buf = kmv->kmv_buf; /* buf to validate */
30577c478bd9Sstevel@tonic-gate int64_t corrupt; /* corruption offset */
30587c478bd9Sstevel@tonic-gate kmem_buftag_t *buftagp; /* ptr to buftag */
30597c478bd9Sstevel@tonic-gate kmem_cache_t *cp = &kmv->kmv_cache;
3060*76d3b15cSJohn Levon boolean_t besilent = !!(kmv->kmv_flags & (DCMD_LOOP | DCMD_PIPE_OUT));
30617c478bd9Sstevel@tonic-gate
30627c478bd9Sstevel@tonic-gate /*LINTED*/
30637c478bd9Sstevel@tonic-gate buftagp = KMEM_BUFTAG(cp, buf);
30647c478bd9Sstevel@tonic-gate
30657c478bd9Sstevel@tonic-gate /*
30667c478bd9Sstevel@tonic-gate * Read the buffer to check.
30677c478bd9Sstevel@tonic-gate */
30687c478bd9Sstevel@tonic-gate if (mdb_vread(buf, kmv->kmv_size, addr) == -1) {
30697c478bd9Sstevel@tonic-gate if (!besilent)
30707c478bd9Sstevel@tonic-gate mdb_warn("couldn't read %p", addr);
30717c478bd9Sstevel@tonic-gate return (WALK_NEXT);
30727c478bd9Sstevel@tonic-gate }
30737c478bd9Sstevel@tonic-gate
30747c478bd9Sstevel@tonic-gate if ((corrupt = verify_pattern(buf, cp->cache_verify,
30757c478bd9Sstevel@tonic-gate KMEM_FREE_PATTERN)) >= 0) {
30767c478bd9Sstevel@tonic-gate if (!besilent)
30777c478bd9Sstevel@tonic-gate mdb_printf("buffer %p (free) seems corrupted, at %p\n",
30787c478bd9Sstevel@tonic-gate addr, (uintptr_t)addr + corrupt);
30797c478bd9Sstevel@tonic-gate goto corrupt;
30807c478bd9Sstevel@tonic-gate }
30817c478bd9Sstevel@tonic-gate /*
30827c478bd9Sstevel@tonic-gate * When KMF_LITE is set, buftagp->bt_redzone is used to hold
30837c478bd9Sstevel@tonic-gate * the first bytes of the buffer, hence we cannot check for red
30847c478bd9Sstevel@tonic-gate * zone corruption.
30857c478bd9Sstevel@tonic-gate */
30867c478bd9Sstevel@tonic-gate if ((cp->cache_flags & (KMF_HASH | KMF_LITE)) == KMF_HASH &&
30877c478bd9Sstevel@tonic-gate buftagp->bt_redzone != KMEM_REDZONE_PATTERN) {
30887c478bd9Sstevel@tonic-gate if (!besilent)
30897c478bd9Sstevel@tonic-gate mdb_printf("buffer %p (free) seems to "
30907c478bd9Sstevel@tonic-gate "have a corrupt redzone pattern\n", addr);
30917c478bd9Sstevel@tonic-gate goto corrupt;
30927c478bd9Sstevel@tonic-gate }
30937c478bd9Sstevel@tonic-gate
30947c478bd9Sstevel@tonic-gate /*
30957c478bd9Sstevel@tonic-gate * confirm bufctl pointer integrity.
30967c478bd9Sstevel@tonic-gate */
30977c478bd9Sstevel@tonic-gate if (verify_buftag(buftagp, KMEM_BUFTAG_FREE) == -1) {
30987c478bd9Sstevel@tonic-gate if (!besilent)
30997c478bd9Sstevel@tonic-gate mdb_printf("buffer %p (free) has a corrupt "
31007c478bd9Sstevel@tonic-gate "buftag\n", addr);
31017c478bd9Sstevel@tonic-gate goto corrupt;
31027c478bd9Sstevel@tonic-gate }
31037c478bd9Sstevel@tonic-gate
31047c478bd9Sstevel@tonic-gate return (WALK_NEXT);
31057c478bd9Sstevel@tonic-gate corrupt:
3106*76d3b15cSJohn Levon if (kmv->kmv_flags & DCMD_PIPE_OUT)
3107*76d3b15cSJohn Levon mdb_printf("%p\n", addr);
31087c478bd9Sstevel@tonic-gate kmv->kmv_corruption++;
31097c478bd9Sstevel@tonic-gate return (WALK_NEXT);
31107c478bd9Sstevel@tonic-gate }
31117c478bd9Sstevel@tonic-gate
31127c478bd9Sstevel@tonic-gate /*
31137c478bd9Sstevel@tonic-gate * verify_alloc()
31147c478bd9Sstevel@tonic-gate * Verify that the buftag of an allocated buffer makes sense with respect
31157c478bd9Sstevel@tonic-gate * to the buffer.
31167c478bd9Sstevel@tonic-gate */
31177c478bd9Sstevel@tonic-gate /*ARGSUSED1*/
31187c478bd9Sstevel@tonic-gate static int
verify_alloc(uintptr_t addr,const void * data,void * private)31197c478bd9Sstevel@tonic-gate verify_alloc(uintptr_t addr, const void *data, void *private)
31207c478bd9Sstevel@tonic-gate {
31217c478bd9Sstevel@tonic-gate kmem_verify_t *kmv = (kmem_verify_t *)private;
31227c478bd9Sstevel@tonic-gate kmem_cache_t *cp = &kmv->kmv_cache;
31237c478bd9Sstevel@tonic-gate uint64_t *buf = kmv->kmv_buf; /* buf to validate */
31247c478bd9Sstevel@tonic-gate /*LINTED*/
31257c478bd9Sstevel@tonic-gate kmem_buftag_t *buftagp = KMEM_BUFTAG(cp, buf);
31267c478bd9Sstevel@tonic-gate uint32_t *ip = (uint32_t *)buftagp;
31277c478bd9Sstevel@tonic-gate uint8_t *bp = (uint8_t *)buf;
31287c478bd9Sstevel@tonic-gate int looks_ok = 0, size_ok = 1; /* flags for finding corruption */
3129*76d3b15cSJohn Levon boolean_t besilent = !!(kmv->kmv_flags & (DCMD_LOOP | DCMD_PIPE_OUT));
31307c478bd9Sstevel@tonic-gate
31317c478bd9Sstevel@tonic-gate /*
31327c478bd9Sstevel@tonic-gate * Read the buffer to check.
31337c478bd9Sstevel@tonic-gate */
31347c478bd9Sstevel@tonic-gate if (mdb_vread(buf, kmv->kmv_size, addr) == -1) {
31357c478bd9Sstevel@tonic-gate if (!besilent)
31367c478bd9Sstevel@tonic-gate mdb_warn("couldn't read %p", addr);
31377c478bd9Sstevel@tonic-gate return (WALK_NEXT);
31387c478bd9Sstevel@tonic-gate }
31397c478bd9Sstevel@tonic-gate
31407c478bd9Sstevel@tonic-gate /*
31417c478bd9Sstevel@tonic-gate * There are two cases to handle:
31427c478bd9Sstevel@tonic-gate * 1. If the buf was alloc'd using kmem_cache_alloc, it will have
31437c478bd9Sstevel@tonic-gate * 0xfeedfacefeedface at the end of it
31447c478bd9Sstevel@tonic-gate * 2. If the buf was alloc'd using kmem_alloc, it will have
31457c478bd9Sstevel@tonic-gate * 0xbb just past the end of the region in use. At the buftag,
31467c478bd9Sstevel@tonic-gate * it will have 0xfeedface (or, if the whole buffer is in use,
31477c478bd9Sstevel@tonic-gate * 0xfeedface & bb000000 or 0xfeedfacf & 000000bb depending on
31487c478bd9Sstevel@tonic-gate * endianness), followed by 32 bits containing the offset of the
31497c478bd9Sstevel@tonic-gate * 0xbb byte in the buffer.
31507c478bd9Sstevel@tonic-gate *
31517c478bd9Sstevel@tonic-gate * Finally, the two 32-bit words that comprise the second half of the
31527c478bd9Sstevel@tonic-gate * buftag should xor to KMEM_BUFTAG_ALLOC
31537c478bd9Sstevel@tonic-gate */
31547c478bd9Sstevel@tonic-gate
31557c478bd9Sstevel@tonic-gate if (buftagp->bt_redzone == KMEM_REDZONE_PATTERN)
31567c478bd9Sstevel@tonic-gate looks_ok = 1;
31577c478bd9Sstevel@tonic-gate else if (!KMEM_SIZE_VALID(ip[1]))
31587c478bd9Sstevel@tonic-gate size_ok = 0;
31597c478bd9Sstevel@tonic-gate else if (bp[KMEM_SIZE_DECODE(ip[1])] == KMEM_REDZONE_BYTE)
31607c478bd9Sstevel@tonic-gate looks_ok = 1;
31617c478bd9Sstevel@tonic-gate else
31627c478bd9Sstevel@tonic-gate size_ok = 0;
31637c478bd9Sstevel@tonic-gate
31647c478bd9Sstevel@tonic-gate if (!size_ok) {
31657c478bd9Sstevel@tonic-gate if (!besilent)
31667c478bd9Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a corrupt "
31677c478bd9Sstevel@tonic-gate "redzone size encoding\n", addr);
31687c478bd9Sstevel@tonic-gate goto corrupt;
31697c478bd9Sstevel@tonic-gate }
31707c478bd9Sstevel@tonic-gate
31717c478bd9Sstevel@tonic-gate if (!looks_ok) {
31727c478bd9Sstevel@tonic-gate if (!besilent)
31737c478bd9Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a corrupt "
31747c478bd9Sstevel@tonic-gate "redzone signature\n", addr);
31757c478bd9Sstevel@tonic-gate goto corrupt;
31767c478bd9Sstevel@tonic-gate }
31777c478bd9Sstevel@tonic-gate
31787c478bd9Sstevel@tonic-gate if (verify_buftag(buftagp, KMEM_BUFTAG_ALLOC) == -1) {
31797c478bd9Sstevel@tonic-gate if (!besilent)
31807c478bd9Sstevel@tonic-gate mdb_printf("buffer %p (allocated) has a "
31817c478bd9Sstevel@tonic-gate "corrupt buftag\n", addr);
31827c478bd9Sstevel@tonic-gate goto corrupt;
31837c478bd9Sstevel@tonic-gate }
31847c478bd9Sstevel@tonic-gate
31857c478bd9Sstevel@tonic-gate return (WALK_NEXT);
31867c478bd9Sstevel@tonic-gate corrupt:
3187*76d3b15cSJohn Levon if (kmv->kmv_flags & DCMD_PIPE_OUT)
3188*76d3b15cSJohn Levon mdb_printf("%p\n", addr);
3189*76d3b15cSJohn Levon
31907c478bd9Sstevel@tonic-gate kmv->kmv_corruption++;
31917c478bd9Sstevel@tonic-gate return (WALK_NEXT);
31927c478bd9Sstevel@tonic-gate }
31937c478bd9Sstevel@tonic-gate
31947c478bd9Sstevel@tonic-gate /*ARGSUSED2*/
31957c478bd9Sstevel@tonic-gate int
kmem_verify(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)31967c478bd9Sstevel@tonic-gate kmem_verify(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
31977c478bd9Sstevel@tonic-gate {
31987c478bd9Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) {
31997c478bd9Sstevel@tonic-gate int check_alloc = 0, check_free = 0;
32007c478bd9Sstevel@tonic-gate kmem_verify_t kmv;
32017c478bd9Sstevel@tonic-gate
32027c478bd9Sstevel@tonic-gate if (mdb_vread(&kmv.kmv_cache, sizeof (kmv.kmv_cache),
32037c478bd9Sstevel@tonic-gate addr) == -1) {
32047c478bd9Sstevel@tonic-gate mdb_warn("couldn't read kmem_cache %p", addr);
32057c478bd9Sstevel@tonic-gate return (DCMD_ERR);
32067c478bd9Sstevel@tonic-gate }
32077c478bd9Sstevel@tonic-gate
3208*76d3b15cSJohn Levon if ((kmv.kmv_cache.cache_dump.kd_unsafe ||
3209*76d3b15cSJohn Levon kmv.kmv_cache.cache_dump.kd_alloc_fails) &&
3210*76d3b15cSJohn Levon !(flags & (DCMD_LOOP | DCMD_PIPE_OUT))) {
3211*76d3b15cSJohn Levon mdb_warn("WARNING: cache was used during dump: "
3212*76d3b15cSJohn Levon "corruption may be incorrectly reported\n");
3213*76d3b15cSJohn Levon }
3214*76d3b15cSJohn Levon
32157c478bd9Sstevel@tonic-gate kmv.kmv_size = kmv.kmv_cache.cache_buftag +
32167c478bd9Sstevel@tonic-gate sizeof (kmem_buftag_t);
32177c478bd9Sstevel@tonic-gate kmv.kmv_buf = mdb_alloc(kmv.kmv_size, UM_SLEEP | UM_GC);
32187c478bd9Sstevel@tonic-gate kmv.kmv_corruption = 0;
3219*76d3b15cSJohn Levon kmv.kmv_flags = flags;
32207c478bd9Sstevel@tonic-gate
32217c478bd9Sstevel@tonic-gate if ((kmv.kmv_cache.cache_flags & KMF_REDZONE)) {
32227c478bd9Sstevel@tonic-gate check_alloc = 1;
32237c478bd9Sstevel@tonic-gate if (kmv.kmv_cache.cache_flags & KMF_DEADBEEF)
32247c478bd9Sstevel@tonic-gate check_free = 1;
32257c478bd9Sstevel@tonic-gate } else {
32267c478bd9Sstevel@tonic-gate if (!(flags & DCMD_LOOP)) {
32277c478bd9Sstevel@tonic-gate mdb_warn("cache %p (%s) does not have "
32287c478bd9Sstevel@tonic-gate "redzone checking enabled\n", addr,
32297c478bd9Sstevel@tonic-gate kmv.kmv_cache.cache_name);
32307c478bd9Sstevel@tonic-gate }
32317c478bd9Sstevel@tonic-gate return (DCMD_ERR);
32327c478bd9Sstevel@tonic-gate }
32337c478bd9Sstevel@tonic-gate
3234*76d3b15cSJohn Levon if (!(flags & (DCMD_LOOP | DCMD_PIPE_OUT))) {
32357c478bd9Sstevel@tonic-gate mdb_printf("Summary for cache '%s'\n",
32367c478bd9Sstevel@tonic-gate kmv.kmv_cache.cache_name);
32377c478bd9Sstevel@tonic-gate mdb_inc_indent(2);
32387c478bd9Sstevel@tonic-gate }
32397c478bd9Sstevel@tonic-gate
32407c478bd9Sstevel@tonic-gate if (check_alloc)
32417c478bd9Sstevel@tonic-gate (void) mdb_pwalk("kmem", verify_alloc, &kmv, addr);
32427c478bd9Sstevel@tonic-gate if (check_free)
32437c478bd9Sstevel@tonic-gate (void) mdb_pwalk("freemem", verify_free, &kmv, addr);
32447c478bd9Sstevel@tonic-gate
3245*76d3b15cSJohn Levon if (!(flags & DCMD_PIPE_OUT)) {
32467c478bd9Sstevel@tonic-gate if (flags & DCMD_LOOP) {
32477c478bd9Sstevel@tonic-gate if (kmv.kmv_corruption == 0) {
32487c478bd9Sstevel@tonic-gate mdb_printf("%-*s %?p clean\n",
32497c478bd9Sstevel@tonic-gate KMEM_CACHE_NAMELEN,
32507c478bd9Sstevel@tonic-gate kmv.kmv_cache.cache_name, addr);
32517c478bd9Sstevel@tonic-gate } else {
3252*76d3b15cSJohn Levon mdb_printf("%-*s %?p %d corrupt "
3253*76d3b15cSJohn Levon "buffer%s\n", KMEM_CACHE_NAMELEN,
32547c478bd9Sstevel@tonic-gate kmv.kmv_cache.cache_name, addr,
3255*76d3b15cSJohn Levon kmv.kmv_corruption,
3256*76d3b15cSJohn Levon kmv.kmv_corruption > 1 ? "s" : "");
32577c478bd9Sstevel@tonic-gate }
32587c478bd9Sstevel@tonic-gate } else {
32597c478bd9Sstevel@tonic-gate /*
3260*76d3b15cSJohn Levon * This is the more verbose mode, when the user
3261*76d3b15cSJohn Levon * typed addr::kmem_verify. If the cache was
3262*76d3b15cSJohn Levon * clean, nothing will have yet been printed. So
3263*76d3b15cSJohn Levon * say something.
32647c478bd9Sstevel@tonic-gate */
32657c478bd9Sstevel@tonic-gate if (kmv.kmv_corruption == 0)
32667c478bd9Sstevel@tonic-gate mdb_printf("clean\n");
32677c478bd9Sstevel@tonic-gate
32687c478bd9Sstevel@tonic-gate mdb_dec_indent(2);
32697c478bd9Sstevel@tonic-gate }
3270*76d3b15cSJohn Levon }
32717c478bd9Sstevel@tonic-gate } else {
32727c478bd9Sstevel@tonic-gate /*
32737c478bd9Sstevel@tonic-gate * If the user didn't specify a cache to verify, we'll walk all
32747c478bd9Sstevel@tonic-gate * kmem_cache's, specifying ourself as a callback for each...
32757c478bd9Sstevel@tonic-gate * this is the equivalent of '::walk kmem_cache .::kmem_verify'
32767c478bd9Sstevel@tonic-gate */
3277*76d3b15cSJohn Levon
3278*76d3b15cSJohn Levon if (!(flags & DCMD_PIPE_OUT)) {
3279*76d3b15cSJohn Levon uintptr_t dump_curr;
3280*76d3b15cSJohn Levon uintptr_t dump_end;
3281*76d3b15cSJohn Levon
3282*76d3b15cSJohn Levon if (mdb_readvar(&dump_curr, "kmem_dump_curr") != -1 &&
3283*76d3b15cSJohn Levon mdb_readvar(&dump_end, "kmem_dump_end") != -1 &&
3284*76d3b15cSJohn Levon dump_curr == dump_end) {
3285*76d3b15cSJohn Levon mdb_warn("WARNING: exceeded kmem_dump_size; "
3286*76d3b15cSJohn Levon "corruption may be incorrectly reported\n");
3287*76d3b15cSJohn Levon }
3288*76d3b15cSJohn Levon
3289*76d3b15cSJohn Levon mdb_printf("%<u>%-*s %-?s %-20s%</b>\n",
3290*76d3b15cSJohn Levon KMEM_CACHE_NAMELEN, "Cache Name", "Addr",
3291*76d3b15cSJohn Levon "Cache Integrity");
3292*76d3b15cSJohn Levon }
3293*76d3b15cSJohn Levon
32947c478bd9Sstevel@tonic-gate (void) (mdb_walk_dcmd("kmem_cache", "kmem_verify", 0, NULL));
32957c478bd9Sstevel@tonic-gate }
32967c478bd9Sstevel@tonic-gate
32977c478bd9Sstevel@tonic-gate return (DCMD_OK);
32987c478bd9Sstevel@tonic-gate }
32997c478bd9Sstevel@tonic-gate
33007c478bd9Sstevel@tonic-gate typedef struct vmem_node {
33017c478bd9Sstevel@tonic-gate struct vmem_node *vn_next;
33027c478bd9Sstevel@tonic-gate struct vmem_node *vn_parent;
33037c478bd9Sstevel@tonic-gate struct vmem_node *vn_sibling;
33047c478bd9Sstevel@tonic-gate struct vmem_node *vn_children;
33057c478bd9Sstevel@tonic-gate uintptr_t vn_addr;
33067c478bd9Sstevel@tonic-gate int vn_marked;
33077c478bd9Sstevel@tonic-gate vmem_t vn_vmem;
33087c478bd9Sstevel@tonic-gate } vmem_node_t;
33097c478bd9Sstevel@tonic-gate
33107c478bd9Sstevel@tonic-gate typedef struct vmem_walk {
33117c478bd9Sstevel@tonic-gate vmem_node_t *vw_root;
33127c478bd9Sstevel@tonic-gate vmem_node_t *vw_current;
33137c478bd9Sstevel@tonic-gate } vmem_walk_t;
33147c478bd9Sstevel@tonic-gate
33157c478bd9Sstevel@tonic-gate int
vmem_walk_init(mdb_walk_state_t * wsp)33167c478bd9Sstevel@tonic-gate vmem_walk_init(mdb_walk_state_t *wsp)
33177c478bd9Sstevel@tonic-gate {
33187c478bd9Sstevel@tonic-gate uintptr_t vaddr, paddr;
33197c478bd9Sstevel@tonic-gate vmem_node_t *head = NULL, *root = NULL, *current = NULL, *parent, *vp;
33207c478bd9Sstevel@tonic-gate vmem_walk_t *vw;
33217c478bd9Sstevel@tonic-gate
33227c478bd9Sstevel@tonic-gate if (mdb_readvar(&vaddr, "vmem_list") == -1) {
33237c478bd9Sstevel@tonic-gate mdb_warn("couldn't read 'vmem_list'");
33247c478bd9Sstevel@tonic-gate return (WALK_ERR);
33257c478bd9Sstevel@tonic-gate }
33267c478bd9Sstevel@tonic-gate
33277c478bd9Sstevel@tonic-gate while (vaddr != NULL) {
33287c478bd9Sstevel@tonic-gate vp = mdb_zalloc(sizeof (vmem_node_t), UM_SLEEP);
33297c478bd9Sstevel@tonic-gate vp->vn_addr = vaddr;
33307c478bd9Sstevel@tonic-gate vp->vn_next = head;
33317c478bd9Sstevel@tonic-gate head = vp;
33327c478bd9Sstevel@tonic-gate
33337c478bd9Sstevel@tonic-gate if (vaddr == wsp->walk_addr)
33347c478bd9Sstevel@tonic-gate current = vp;
33357c478bd9Sstevel@tonic-gate
33367c478bd9Sstevel@tonic-gate if (mdb_vread(&vp->vn_vmem, sizeof (vmem_t), vaddr) == -1) {
33377c478bd9Sstevel@tonic-gate mdb_warn("couldn't read vmem_t at %p", vaddr);
33387c478bd9Sstevel@tonic-gate goto err;
33397c478bd9Sstevel@tonic-gate }
33407c478bd9Sstevel@tonic-gate
33417c478bd9Sstevel@tonic-gate vaddr = (uintptr_t)vp->vn_vmem.vm_next;
33427c478bd9Sstevel@tonic-gate }
33437c478bd9Sstevel@tonic-gate
33447c478bd9Sstevel@tonic-gate for (vp = head; vp != NULL; vp = vp->vn_next) {
33457c478bd9Sstevel@tonic-gate
33467c478bd9Sstevel@tonic-gate if ((paddr = (uintptr_t)vp->vn_vmem.vm_source) == NULL) {
33477c478bd9Sstevel@tonic-gate vp->vn_sibling = root;
33487c478bd9Sstevel@tonic-gate root = vp;
33497c478bd9Sstevel@tonic-gate continue;
33507c478bd9Sstevel@tonic-gate }
33517c478bd9Sstevel@tonic-gate
33527c478bd9Sstevel@tonic-gate for (parent = head; parent != NULL; parent = parent->vn_next) {
33537c478bd9Sstevel@tonic-gate if (parent->vn_addr != paddr)
33547c478bd9Sstevel@tonic-gate continue;
33557c478bd9Sstevel@tonic-gate vp->vn_sibling = parent->vn_children;
33567c478bd9Sstevel@tonic-gate parent->vn_children = vp;
33577c478bd9Sstevel@tonic-gate vp->vn_parent = parent;
33587c478bd9Sstevel@tonic-gate break;
33597c478bd9Sstevel@tonic-gate }
33607c478bd9Sstevel@tonic-gate
33617c478bd9Sstevel@tonic-gate if (parent == NULL) {
33627c478bd9Sstevel@tonic-gate mdb_warn("couldn't find %p's parent (%p)\n",
33637c478bd9Sstevel@tonic-gate vp->vn_addr, paddr);
33647c478bd9Sstevel@tonic-gate goto err;
33657c478bd9Sstevel@tonic-gate }
33667c478bd9Sstevel@tonic-gate }
33677c478bd9Sstevel@tonic-gate
33687c478bd9Sstevel@tonic-gate vw = mdb_zalloc(sizeof (vmem_walk_t), UM_SLEEP);
33697c478bd9Sstevel@tonic-gate vw->vw_root = root;
33707c478bd9Sstevel@tonic-gate
33717c478bd9Sstevel@tonic-gate if (current != NULL)
33727c478bd9Sstevel@tonic-gate vw->vw_current = current;
33737c478bd9Sstevel@tonic-gate else
33747c478bd9Sstevel@tonic-gate vw->vw_current = root;
33757c478bd9Sstevel@tonic-gate
33767c478bd9Sstevel@tonic-gate wsp->walk_data = vw;
33777c478bd9Sstevel@tonic-gate return (WALK_NEXT);
33787c478bd9Sstevel@tonic-gate err:
33797c478bd9Sstevel@tonic-gate for (vp = head; head != NULL; vp = head) {
33807c478bd9Sstevel@tonic-gate head = vp->vn_next;
33817c478bd9Sstevel@tonic-gate mdb_free(vp, sizeof (vmem_node_t));
33827c478bd9Sstevel@tonic-gate }
33837c478bd9Sstevel@tonic-gate
33847c478bd9Sstevel@tonic-gate return (WALK_ERR);
33857c478bd9Sstevel@tonic-gate }
33867c478bd9Sstevel@tonic-gate
33877c478bd9Sstevel@tonic-gate int
vmem_walk_step(mdb_walk_state_t * wsp)33887c478bd9Sstevel@tonic-gate vmem_walk_step(mdb_walk_state_t *wsp)
33897c478bd9Sstevel@tonic-gate {
33907c478bd9Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data;
33917c478bd9Sstevel@tonic-gate vmem_node_t *vp;
33927c478bd9Sstevel@tonic-gate int rval;
33937c478bd9Sstevel@tonic-gate
33947c478bd9Sstevel@tonic-gate if ((vp = vw->vw_current) == NULL)
33957c478bd9Sstevel@tonic-gate return (WALK_DONE);
33967c478bd9Sstevel@tonic-gate
33977c478bd9Sstevel@tonic-gate rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata);
33987c478bd9Sstevel@tonic-gate
33997c478bd9Sstevel@tonic-gate if (vp->vn_children != NULL) {
34007c478bd9Sstevel@tonic-gate vw->vw_current = vp->vn_children;
34017c478bd9Sstevel@tonic-gate return (rval);
34027c478bd9Sstevel@tonic-gate }
34037c478bd9Sstevel@tonic-gate
34047c478bd9Sstevel@tonic-gate do {
34057c478bd9Sstevel@tonic-gate vw->vw_current = vp->vn_sibling;
34067c478bd9Sstevel@tonic-gate vp = vp->vn_parent;
34077c478bd9Sstevel@tonic-gate } while (vw->vw_current == NULL && vp != NULL);
34087c478bd9Sstevel@tonic-gate
34097c478bd9Sstevel@tonic-gate return (rval);
34107c478bd9Sstevel@tonic-gate }
34117c478bd9Sstevel@tonic-gate
34127c478bd9Sstevel@tonic-gate /*
34137c478bd9Sstevel@tonic-gate * The "vmem_postfix" walk walks the vmem arenas in post-fix order; all
34147c478bd9Sstevel@tonic-gate * children are visited before their parent. We perform the postfix walk
34157c478bd9Sstevel@tonic-gate * iteratively (rather than recursively) to allow mdb to regain control
34167c478bd9Sstevel@tonic-gate * after each callback.
34177c478bd9Sstevel@tonic-gate */
34187c478bd9Sstevel@tonic-gate int
vmem_postfix_walk_step(mdb_walk_state_t * wsp)34197c478bd9Sstevel@tonic-gate vmem_postfix_walk_step(mdb_walk_state_t *wsp)
34207c478bd9Sstevel@tonic-gate {
34217c478bd9Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data;
34227c478bd9Sstevel@tonic-gate vmem_node_t *vp = vw->vw_current;
34237c478bd9Sstevel@tonic-gate int rval;
34247c478bd9Sstevel@tonic-gate
34257c478bd9Sstevel@tonic-gate /*
34267c478bd9Sstevel@tonic-gate * If this node is marked, then we know that we have already visited
34277c478bd9Sstevel@tonic-gate * all of its children. If the node has any siblings, they need to
34287c478bd9Sstevel@tonic-gate * be visited next; otherwise, we need to visit the parent. Note
34297c478bd9Sstevel@tonic-gate * that vp->vn_marked will only be zero on the first invocation of
34307c478bd9Sstevel@tonic-gate * the step function.
34317c478bd9Sstevel@tonic-gate */
34327c478bd9Sstevel@tonic-gate if (vp->vn_marked) {
34337c478bd9Sstevel@tonic-gate if (vp->vn_sibling != NULL)
34347c478bd9Sstevel@tonic-gate vp = vp->vn_sibling;
34357c478bd9Sstevel@tonic-gate else if (vp->vn_parent != NULL)
34367c478bd9Sstevel@tonic-gate vp = vp->vn_parent;
34377c478bd9Sstevel@tonic-gate else {
34387c478bd9Sstevel@tonic-gate /*
34397c478bd9Sstevel@tonic-gate * We have neither a parent, nor a sibling, and we
34407c478bd9Sstevel@tonic-gate * have already been visited; we're done.
34417c478bd9Sstevel@tonic-gate */
34427c478bd9Sstevel@tonic-gate return (WALK_DONE);
34437c478bd9Sstevel@tonic-gate }
34447c478bd9Sstevel@tonic-gate }
34457c478bd9Sstevel@tonic-gate
34467c478bd9Sstevel@tonic-gate /*
34477c478bd9Sstevel@tonic-gate * Before we visit this node, visit its children.
34487c478bd9Sstevel@tonic-gate */
34497c478bd9Sstevel@tonic-gate while (vp->vn_children != NULL && !vp->vn_children->vn_marked)
34507c478bd9Sstevel@tonic-gate vp = vp->vn_children;
34517c478bd9Sstevel@tonic-gate
34527c478bd9Sstevel@tonic-gate vp->vn_marked = 1;
34537c478bd9Sstevel@tonic-gate vw->vw_current = vp;
34547c478bd9Sstevel@tonic-gate rval = wsp->walk_callback(vp->vn_addr, &vp->vn_vmem, wsp->walk_cbdata);
34557c478bd9Sstevel@tonic-gate
34567c478bd9Sstevel@tonic-gate return (rval);
34577c478bd9Sstevel@tonic-gate }
34587c478bd9Sstevel@tonic-gate
34597c478bd9Sstevel@tonic-gate void
vmem_walk_fini(mdb_walk_state_t * wsp)34607c478bd9Sstevel@tonic-gate vmem_walk_fini(mdb_walk_state_t *wsp)
34617c478bd9Sstevel@tonic-gate {
34627c478bd9Sstevel@tonic-gate vmem_walk_t *vw = wsp->walk_data;
34637c478bd9Sstevel@tonic-gate vmem_node_t *root = vw->vw_root;
34647c478bd9Sstevel@tonic-gate int done;
34657c478bd9Sstevel@tonic-gate
34667c478bd9Sstevel@tonic-gate if (root == NULL)
34677c478bd9Sstevel@tonic-gate return;
34687c478bd9Sstevel@tonic-gate
34697c478bd9Sstevel@tonic-gate if ((vw->vw_root = root->vn_children) != NULL)
34707c478bd9Sstevel@tonic-gate vmem_walk_fini(wsp);
34717c478bd9Sstevel@tonic-gate
34727c478bd9Sstevel@tonic-gate vw->vw_root = root->vn_sibling;
34737c478bd9Sstevel@tonic-gate done = (root->vn_sibling == NULL && root->vn_parent == NULL);
34747c478bd9Sstevel@tonic-gate mdb_free(root, sizeof (vmem_node_t));
34757c478bd9Sstevel@tonic-gate
34767c478bd9Sstevel@tonic-gate if (done) {
34777c478bd9Sstevel@tonic-gate mdb_free(vw, sizeof (vmem_walk_t));
34787c478bd9Sstevel@tonic-gate } else {
34797c478bd9Sstevel@tonic-gate vmem_walk_fini(wsp);
34807c478bd9Sstevel@tonic-gate }
34817c478bd9Sstevel@tonic-gate }
34827c478bd9Sstevel@tonic-gate
34837c478bd9Sstevel@tonic-gate typedef struct vmem_seg_walk {
34847c478bd9Sstevel@tonic-gate uint8_t vsw_type;
34857c478bd9Sstevel@tonic-gate uintptr_t vsw_start;
34867c478bd9Sstevel@tonic-gate uintptr_t vsw_current;
34877c478bd9Sstevel@tonic-gate } vmem_seg_walk_t;
34887c478bd9Sstevel@tonic-gate
34897c478bd9Sstevel@tonic-gate /*ARGSUSED*/
34907c478bd9Sstevel@tonic-gate int
vmem_seg_walk_common_init(mdb_walk_state_t * wsp,uint8_t type,char * name)34917c478bd9Sstevel@tonic-gate vmem_seg_walk_common_init(mdb_walk_state_t *wsp, uint8_t type, char *name)
34927c478bd9Sstevel@tonic-gate {
34937c478bd9Sstevel@tonic-gate vmem_seg_walk_t *vsw;
34947c478bd9Sstevel@tonic-gate
34957c478bd9Sstevel@tonic-gate if (wsp->walk_addr == NULL) {
34967c478bd9Sstevel@tonic-gate mdb_warn("vmem_%s does not support global walks\n", name);
34977c478bd9Sstevel@tonic-gate return (WALK_ERR);
34987c478bd9Sstevel@tonic-gate }
34997c478bd9Sstevel@tonic-gate
35007c478bd9Sstevel@tonic-gate wsp->walk_data = vsw = mdb_alloc(sizeof (vmem_seg_walk_t), UM_SLEEP);
35017c478bd9Sstevel@tonic-gate
35027c478bd9Sstevel@tonic-gate vsw->vsw_type = type;
35037c478bd9Sstevel@tonic-gate vsw->vsw_start = wsp->walk_addr + offsetof(vmem_t, vm_seg0);
35047c478bd9Sstevel@tonic-gate vsw->vsw_current = vsw->vsw_start;
35057c478bd9Sstevel@tonic-gate
35067c478bd9Sstevel@tonic-gate return (WALK_NEXT);
35077c478bd9Sstevel@tonic-gate }
35087c478bd9Sstevel@tonic-gate
35097c478bd9Sstevel@tonic-gate /*
35107c478bd9Sstevel@tonic-gate * vmem segments can't have type 0 (this should be added to vmem_impl.h).
35117c478bd9Sstevel@tonic-gate */
35127c478bd9Sstevel@tonic-gate #define VMEM_NONE 0
35137c478bd9Sstevel@tonic-gate
35147c478bd9Sstevel@tonic-gate int
vmem_alloc_walk_init(mdb_walk_state_t * wsp)35157c478bd9Sstevel@tonic-gate vmem_alloc_walk_init(mdb_walk_state_t *wsp)
35167c478bd9Sstevel@tonic-gate {
35177c478bd9Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_ALLOC, "alloc"));
35187c478bd9Sstevel@tonic-gate }
35197c478bd9Sstevel@tonic-gate
35207c478bd9Sstevel@tonic-gate int
vmem_free_walk_init(mdb_walk_state_t * wsp)35217c478bd9Sstevel@tonic-gate vmem_free_walk_init(mdb_walk_state_t *wsp)
35227c478bd9Sstevel@tonic-gate {
35237c478bd9Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_FREE, "free"));
35247c478bd9Sstevel@tonic-gate }
35257c478bd9Sstevel@tonic-gate
35267c478bd9Sstevel@tonic-gate int
vmem_span_walk_init(mdb_walk_state_t * wsp)35277c478bd9Sstevel@tonic-gate vmem_span_walk_init(mdb_walk_state_t *wsp)
35287c478bd9Sstevel@tonic-gate {
35297c478bd9Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_SPAN, "span"));
35307c478bd9Sstevel@tonic-gate }
35317c478bd9Sstevel@tonic-gate
35327c478bd9Sstevel@tonic-gate int
vmem_seg_walk_init(mdb_walk_state_t * wsp)35337c478bd9Sstevel@tonic-gate vmem_seg_walk_init(mdb_walk_state_t *wsp)
35347c478bd9Sstevel@tonic-gate {
35357c478bd9Sstevel@tonic-gate return (vmem_seg_walk_common_init(wsp, VMEM_NONE, "seg"));
35367c478bd9Sstevel@tonic-gate }
35377c478bd9Sstevel@tonic-gate
35387c478bd9Sstevel@tonic-gate int
vmem_seg_walk_step(mdb_walk_state_t * wsp)35397c478bd9Sstevel@tonic-gate vmem_seg_walk_step(mdb_walk_state_t *wsp)
35407c478bd9Sstevel@tonic-gate {
35417c478bd9Sstevel@tonic-gate vmem_seg_t seg;
35427c478bd9Sstevel@tonic-gate vmem_seg_walk_t *vsw = wsp->walk_data;
35437c478bd9Sstevel@tonic-gate uintptr_t addr = vsw->vsw_current;
35447c478bd9Sstevel@tonic-gate static size_t seg_size = 0;
35457c478bd9Sstevel@tonic-gate int rval;
35467c478bd9Sstevel@tonic-gate
35477c478bd9Sstevel@tonic-gate if (!seg_size) {
35487c478bd9Sstevel@tonic-gate if (mdb_readvar(&seg_size, "vmem_seg_size") == -1) {
35497c478bd9Sstevel@tonic-gate mdb_warn("failed to read 'vmem_seg_size'");
35507c478bd9Sstevel@tonic-gate seg_size = sizeof (vmem_seg_t);
35517c478bd9Sstevel@tonic-gate }
35527c478bd9Sstevel@tonic-gate }
35537c478bd9Sstevel@tonic-gate
35547c478bd9Sstevel@tonic-gate if (seg_size < sizeof (seg))
35557c478bd9Sstevel@tonic-gate bzero((caddr_t)&seg + seg_size, sizeof (seg) - seg_size);
35567c478bd9Sstevel@tonic-gate
35577c478bd9Sstevel@tonic-gate if (mdb_vread(&seg, seg_size, addr) == -1) {
35587c478bd9Sstevel@tonic-gate mdb_warn("couldn't read vmem_seg at %p", addr);
35597c478bd9Sstevel@tonic-gate return (WALK_ERR);
35607c478bd9Sstevel@tonic-gate }
35617c478bd9Sstevel@tonic-gate
35627c478bd9Sstevel@tonic-gate vsw->vsw_current = (uintptr_t)seg.vs_anext;
35637c478bd9Sstevel@tonic-gate if (vsw->vsw_type != VMEM_NONE && seg.vs_type != vsw->vsw_type) {
35647c478bd9Sstevel@tonic-gate rval = WALK_NEXT;
35657c478bd9Sstevel@tonic-gate } else {
35667c478bd9Sstevel@tonic-gate rval = wsp->walk_callback(addr, &seg, wsp->walk_cbdata);
35677c478bd9Sstevel@tonic-gate }
35687c478bd9Sstevel@tonic-gate
35697c478bd9Sstevel@tonic-gate if (vsw->vsw_current == vsw->vsw_start)
35707c478bd9Sstevel@tonic-gate return (WALK_DONE);
35717c478bd9Sstevel@tonic-gate
35727c478bd9Sstevel@tonic-gate return (rval);
35737c478bd9Sstevel@tonic-gate }
35747c478bd9Sstevel@tonic-gate
35757c478bd9Sstevel@tonic-gate void
vmem_seg_walk_fini(mdb_walk_state_t * wsp)35767c478bd9Sstevel@tonic-gate vmem_seg_walk_fini(mdb_walk_state_t *wsp)
35777c478bd9Sstevel@tonic-gate {
35787c478bd9Sstevel@tonic-gate vmem_seg_walk_t *vsw = wsp->walk_data;
35797c478bd9Sstevel@tonic-gate
35807c478bd9Sstevel@tonic-gate mdb_free(vsw, sizeof (vmem_seg_walk_t));
35817c478bd9Sstevel@tonic-gate }
35827c478bd9Sstevel@tonic-gate
35837c478bd9Sstevel@tonic-gate #define VMEM_NAMEWIDTH 22
35847c478bd9Sstevel@tonic-gate
35857c478bd9Sstevel@tonic-gate int
vmem(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)35867c478bd9Sstevel@tonic-gate vmem(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
35877c478bd9Sstevel@tonic-gate {
35887c478bd9Sstevel@tonic-gate vmem_t v, parent;
35897c478bd9Sstevel@tonic-gate vmem_kstat_t *vkp = &v.vm_kstat;
35907c478bd9Sstevel@tonic-gate uintptr_t paddr;
35917c478bd9Sstevel@tonic-gate int ident = 0;
35927c478bd9Sstevel@tonic-gate char c[VMEM_NAMEWIDTH];
35937c478bd9Sstevel@tonic-gate
35947c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC)) {
35957c478bd9Sstevel@tonic-gate if (mdb_walk_dcmd("vmem", "vmem", argc, argv) == -1) {
35967c478bd9Sstevel@tonic-gate mdb_warn("can't walk vmem");
35977c478bd9Sstevel@tonic-gate return (DCMD_ERR);
35987c478bd9Sstevel@tonic-gate }
35997c478bd9Sstevel@tonic-gate return (DCMD_OK);
36007c478bd9Sstevel@tonic-gate }
36017c478bd9Sstevel@tonic-gate
36027c478bd9Sstevel@tonic-gate if (DCMD_HDRSPEC(flags))
36037c478bd9Sstevel@tonic-gate mdb_printf("%-?s %-*s %10s %12s %9s %5s\n",
36047c478bd9Sstevel@tonic-gate "ADDR", VMEM_NAMEWIDTH, "NAME", "INUSE",
36057c478bd9Sstevel@tonic-gate "TOTAL", "SUCCEED", "FAIL");
36067c478bd9Sstevel@tonic-gate
36077c478bd9Sstevel@tonic-gate if (mdb_vread(&v, sizeof (v), addr) == -1) {
36087c478bd9Sstevel@tonic-gate mdb_warn("couldn't read vmem at %p", addr);
36097c478bd9Sstevel@tonic-gate return (DCMD_ERR);
36107c478bd9Sstevel@tonic-gate }
36117c478bd9Sstevel@tonic-gate
36127c478bd9Sstevel@tonic-gate for (paddr = (uintptr_t)v.vm_source; paddr != NULL; ident += 2) {
36137c478bd9Sstevel@tonic-gate if (mdb_vread(&parent, sizeof (parent), paddr) == -1) {
36147c478bd9Sstevel@tonic-gate mdb_warn("couldn't trace %p's ancestry", addr);
36157c478bd9Sstevel@tonic-gate ident = 0;
36167c478bd9Sstevel@tonic-gate break;
36177c478bd9Sstevel@tonic-gate }
36187c478bd9Sstevel@tonic-gate paddr = (uintptr_t)parent.vm_source;
36197c478bd9Sstevel@tonic-gate }
36207c478bd9Sstevel@tonic-gate
36217c478bd9Sstevel@tonic-gate (void) mdb_snprintf(c, VMEM_NAMEWIDTH, "%*s%s", ident, "", v.vm_name);
36227c478bd9Sstevel@tonic-gate
36237c478bd9Sstevel@tonic-gate mdb_printf("%0?p %-*s %10llu %12llu %9llu %5llu\n",
36247c478bd9Sstevel@tonic-gate addr, VMEM_NAMEWIDTH, c,
36257c478bd9Sstevel@tonic-gate vkp->vk_mem_inuse.value.ui64, vkp->vk_mem_total.value.ui64,
36267c478bd9Sstevel@tonic-gate vkp->vk_alloc.value.ui64, vkp->vk_fail.value.ui64);
36277c478bd9Sstevel@tonic-gate
36287c478bd9Sstevel@tonic-gate return (DCMD_OK);
36297c478bd9Sstevel@tonic-gate }
36307c478bd9Sstevel@tonic-gate
36317c478bd9Sstevel@tonic-gate void
vmem_seg_help(void)36327c478bd9Sstevel@tonic-gate vmem_seg_help(void)
36337c478bd9Sstevel@tonic-gate {
3634b5fca8f8Stomee mdb_printf("%s",
3635b5fca8f8Stomee "Display the contents of vmem_seg_ts, with optional filtering.\n\n"
36367c478bd9Sstevel@tonic-gate "\n"
36377c478bd9Sstevel@tonic-gate "A vmem_seg_t represents a range of addresses (or arbitrary numbers),\n"
36387c478bd9Sstevel@tonic-gate "representing a single chunk of data. Only ALLOC segments have debugging\n"
36397c478bd9Sstevel@tonic-gate "information.\n");
36407c478bd9Sstevel@tonic-gate mdb_dec_indent(2);
36417c478bd9Sstevel@tonic-gate mdb_printf("%<b>OPTIONS%</b>\n");
36427c478bd9Sstevel@tonic-gate mdb_inc_indent(2);
36437c478bd9Sstevel@tonic-gate mdb_printf("%s",
36447c478bd9Sstevel@tonic-gate " -v Display the full content of the vmem_seg, including its stack trace\n"
36457c478bd9Sstevel@tonic-gate " -s report the size of the segment, instead of the end address\n"
36467c478bd9Sstevel@tonic-gate " -c caller\n"
36477c478bd9Sstevel@tonic-gate " filter out segments without the function/PC in their stack trace\n"
36487c478bd9Sstevel@tonic-gate " -e earliest\n"
36497c478bd9Sstevel@tonic-gate " filter out segments timestamped before earliest\n"
36507c478bd9Sstevel@tonic-gate " -l latest\n"
36517c478bd9Sstevel@tonic-gate " filter out segments timestamped after latest\n"
36527c478bd9Sstevel@tonic-gate " -m minsize\n"
36537c478bd9Sstevel@tonic-gate " filer out segments smaller than minsize\n"
36547c478bd9Sstevel@tonic-gate " -M maxsize\n"
36557c478bd9Sstevel@tonic-gate " filer out segments larger than maxsize\n"
36567c478bd9Sstevel@tonic-gate " -t thread\n"
36577c478bd9Sstevel@tonic-gate " filter out segments not involving thread\n"
36587c478bd9Sstevel@tonic-gate " -T type\n"
36597c478bd9Sstevel@tonic-gate " filter out segments not of type 'type'\n"
36607c478bd9Sstevel@tonic-gate " type is one of: ALLOC/FREE/SPAN/ROTOR/WALKER\n");
36617c478bd9Sstevel@tonic-gate }
36627c478bd9Sstevel@tonic-gate
36637c478bd9Sstevel@tonic-gate /*ARGSUSED*/
36647c478bd9Sstevel@tonic-gate int
vmem_seg(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)36657c478bd9Sstevel@tonic-gate vmem_seg(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
36667c478bd9Sstevel@tonic-gate {
36677c478bd9Sstevel@tonic-gate vmem_seg_t vs;
36687c478bd9Sstevel@tonic-gate pc_t *stk = vs.vs_stack;
36697c478bd9Sstevel@tonic-gate uintptr_t sz;
36707c478bd9Sstevel@tonic-gate uint8_t t;
36717c478bd9Sstevel@tonic-gate const char *type = NULL;
36727c478bd9Sstevel@tonic-gate GElf_Sym sym;
36737c478bd9Sstevel@tonic-gate char c[MDB_SYM_NAMLEN];
36747c478bd9Sstevel@tonic-gate int no_debug;
36757c478bd9Sstevel@tonic-gate int i;
36767c478bd9Sstevel@tonic-gate int depth;
36777c478bd9Sstevel@tonic-gate uintptr_t laddr, haddr;
36787c478bd9Sstevel@tonic-gate
36797c478bd9Sstevel@tonic-gate uintptr_t caller = NULL, thread = NULL;
36807c478bd9Sstevel@tonic-gate uintptr_t minsize = 0, maxsize = 0;
36817c478bd9Sstevel@tonic-gate
36827c478bd9Sstevel@tonic-gate hrtime_t earliest = 0, latest = 0;
36837c478bd9Sstevel@tonic-gate
36847c478bd9Sstevel@tonic-gate uint_t size = 0;
36857c478bd9Sstevel@tonic-gate uint_t verbose = 0;
36867c478bd9Sstevel@tonic-gate
36877c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC))
36887c478bd9Sstevel@tonic-gate return (DCMD_USAGE);
36897c478bd9Sstevel@tonic-gate
36907c478bd9Sstevel@tonic-gate if (mdb_getopts(argc, argv,
36917c478bd9Sstevel@tonic-gate 'c', MDB_OPT_UINTPTR, &caller,
36927c478bd9Sstevel@tonic-gate 'e', MDB_OPT_UINT64, &earliest,
36937c478bd9Sstevel@tonic-gate 'l', MDB_OPT_UINT64, &latest,
36947c478bd9Sstevel@tonic-gate 's', MDB_OPT_SETBITS, TRUE, &size,
36957c478bd9Sstevel@tonic-gate 'm', MDB_OPT_UINTPTR, &minsize,
36967c478bd9Sstevel@tonic-gate 'M', MDB_OPT_UINTPTR, &maxsize,
36977c478bd9Sstevel@tonic-gate 't', MDB_OPT_UINTPTR, &thread,
36987c478bd9Sstevel@tonic-gate 'T', MDB_OPT_STR, &type,
36997c478bd9Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &verbose,
37007c478bd9Sstevel@tonic-gate NULL) != argc)
37017c478bd9Sstevel@tonic-gate return (DCMD_USAGE);
37027c478bd9Sstevel@tonic-gate
37037c478bd9Sstevel@tonic-gate if (DCMD_HDRSPEC(flags) && !(flags & DCMD_PIPE_OUT)) {
37047c478bd9Sstevel@tonic-gate if (verbose) {
37057c478bd9Sstevel@tonic-gate mdb_printf("%16s %4s %16s %16s %16s\n"
37067c478bd9Sstevel@tonic-gate "%<u>%16s %4s %16s %16s %16s%</u>\n",
37077c478bd9Sstevel@tonic-gate "ADDR", "TYPE", "START", "END", "SIZE",
37087c478bd9Sstevel@tonic-gate "", "", "THREAD", "TIMESTAMP", "");
37097c478bd9Sstevel@tonic-gate } else {
37107c478bd9Sstevel@tonic-gate mdb_printf("%?s %4s %?s %?s %s\n", "ADDR", "TYPE",
37117c478bd9Sstevel@tonic-gate "START", size? "SIZE" : "END", "WHO");
37127c478bd9Sstevel@tonic-gate }
37137c478bd9Sstevel@tonic-gate }
37147c478bd9Sstevel@tonic-gate
37157c478bd9Sstevel@tonic-gate if (mdb_vread(&vs, sizeof (vs), addr) == -1) {
37167c478bd9Sstevel@tonic-gate mdb_warn("couldn't read vmem_seg at %p", addr);
37177c478bd9Sstevel@tonic-gate return (DCMD_ERR);
37187c478bd9Sstevel@tonic-gate }
37197c478bd9Sstevel@tonic-gate
37207c478bd9Sstevel@tonic-gate if (type != NULL) {
37217c478bd9Sstevel@tonic-gate if (strcmp(type, "ALLC") == 0 || strcmp(type, "ALLOC") == 0)
37227c478bd9Sstevel@tonic-gate t = VMEM_ALLOC;
37237c478bd9Sstevel@tonic-gate else if (strcmp(type, "FREE") == 0)
37247c478bd9Sstevel@tonic-gate t = VMEM_FREE;
37257c478bd9Sstevel@tonic-gate else if (strcmp(type, "SPAN") == 0)
37267c478bd9Sstevel@tonic-gate t = VMEM_SPAN;
37277c478bd9Sstevel@tonic-gate else if (strcmp(type, "ROTR") == 0 ||
37287c478bd9Sstevel@tonic-gate strcmp(type, "ROTOR") == 0)
37297c478bd9Sstevel@tonic-gate t = VMEM_ROTOR;
37307c478bd9Sstevel@tonic-gate else if (strcmp(type, "WLKR") == 0 ||
37317c478bd9Sstevel@tonic-gate strcmp(type, "WALKER") == 0)
37327c478bd9Sstevel@tonic-gate t = VMEM_WALKER;
37337c478bd9Sstevel@tonic-gate else {
37347c478bd9Sstevel@tonic-gate mdb_warn("\"%s\" is not a recognized vmem_seg type\n",
37357c478bd9Sstevel@tonic-gate type);
37367c478bd9Sstevel@tonic-gate return (DCMD_ERR);
37377c478bd9Sstevel@tonic-gate }
37387c478bd9Sstevel@tonic-gate
37397c478bd9Sstevel@tonic-gate if (vs.vs_type != t)
37407c478bd9Sstevel@tonic-gate return (DCMD_OK);
37417c478bd9Sstevel@tonic-gate }
37427c478bd9Sstevel@tonic-gate
37437c478bd9Sstevel@tonic-gate sz = vs.vs_end - vs.vs_start;
37447c478bd9Sstevel@tonic-gate
37457c478bd9Sstevel@tonic-gate if (minsize != 0 && sz < minsize)
37467c478bd9Sstevel@tonic-gate return (DCMD_OK);
37477c478bd9Sstevel@tonic-gate
37487c478bd9Sstevel@tonic-gate if (maxsize != 0 && sz > maxsize)
37497c478bd9Sstevel@tonic-gate return (DCMD_OK);
37507c478bd9Sstevel@tonic-gate
37517c478bd9Sstevel@tonic-gate t = vs.vs_type;
37527c478bd9Sstevel@tonic-gate depth = vs.vs_depth;
37537c478bd9Sstevel@tonic-gate
37547c478bd9Sstevel@tonic-gate /*
37557c478bd9Sstevel@tonic-gate * debug info, when present, is only accurate for VMEM_ALLOC segments
37567c478bd9Sstevel@tonic-gate */
37577c478bd9Sstevel@tonic-gate no_debug = (t != VMEM_ALLOC) ||
37587c478bd9Sstevel@tonic-gate (depth == 0 || depth > VMEM_STACK_DEPTH);
37597c478bd9Sstevel@tonic-gate
37607c478bd9Sstevel@tonic-gate if (no_debug) {
37617c478bd9Sstevel@tonic-gate if (caller != NULL || thread != NULL || earliest != 0 ||
37627c478bd9Sstevel@tonic-gate latest != 0)
37637c478bd9Sstevel@tonic-gate return (DCMD_OK); /* not enough info */
37647c478bd9Sstevel@tonic-gate } else {
37657c478bd9Sstevel@tonic-gate if (caller != NULL) {
37667c478bd9Sstevel@tonic-gate laddr = caller;
37677c478bd9Sstevel@tonic-gate haddr = caller + sizeof (caller);
37687c478bd9Sstevel@tonic-gate
37697c478bd9Sstevel@tonic-gate if (mdb_lookup_by_addr(caller, MDB_SYM_FUZZY, c,
37707c478bd9Sstevel@tonic-gate sizeof (c), &sym) != -1 &&
37717c478bd9Sstevel@tonic-gate caller == (uintptr_t)sym.st_value) {
37727c478bd9Sstevel@tonic-gate /*
37737c478bd9Sstevel@tonic-gate * We were provided an exact symbol value; any
37747c478bd9Sstevel@tonic-gate * address in the function is valid.
37757c478bd9Sstevel@tonic-gate */
37767c478bd9Sstevel@tonic-gate laddr = (uintptr_t)sym.st_value;
37777c478bd9Sstevel@tonic-gate haddr = (uintptr_t)sym.st_value + sym.st_size;
37787c478bd9Sstevel@tonic-gate }
37797c478bd9Sstevel@tonic-gate
37807c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++)
37817c478bd9Sstevel@tonic-gate if (vs.vs_stack[i] >= laddr &&
37827c478bd9Sstevel@tonic-gate vs.vs_stack[i] < haddr)
37837c478bd9Sstevel@tonic-gate break;
37847c478bd9Sstevel@tonic-gate
37857c478bd9Sstevel@tonic-gate if (i == depth)
37867c478bd9Sstevel@tonic-gate return (DCMD_OK);
37877c478bd9Sstevel@tonic-gate }
37887c478bd9Sstevel@tonic-gate
37897c478bd9Sstevel@tonic-gate if (thread != NULL && (uintptr_t)vs.vs_thread != thread)
37907c478bd9Sstevel@tonic-gate return (DCMD_OK);
37917c478bd9Sstevel@tonic-gate
37927c478bd9Sstevel@tonic-gate if (earliest != 0 && vs.vs_timestamp < earliest)
37937c478bd9Sstevel@tonic-gate return (DCMD_OK);
37947c478bd9Sstevel@tonic-gate
37957c478bd9Sstevel@tonic-gate if (latest != 0 && vs.vs_timestamp > latest)
37967c478bd9Sstevel@tonic-gate return (DCMD_OK);
37977c478bd9Sstevel@tonic-gate }
37987c478bd9Sstevel@tonic-gate
37997c478bd9Sstevel@tonic-gate type = (t == VMEM_ALLOC ? "ALLC" :
38007c478bd9Sstevel@tonic-gate t == VMEM_FREE ? "FREE" :
38017c478bd9Sstevel@tonic-gate t == VMEM_SPAN ? "SPAN" :
38027c478bd9Sstevel@tonic-gate t == VMEM_ROTOR ? "ROTR" :
38037c478bd9Sstevel@tonic-gate t == VMEM_WALKER ? "WLKR" :
38047c478bd9Sstevel@tonic-gate "????");
38057c478bd9Sstevel@tonic-gate
38067c478bd9Sstevel@tonic-gate if (flags & DCMD_PIPE_OUT) {
38077c478bd9Sstevel@tonic-gate mdb_printf("%#lr\n", addr);
38087c478bd9Sstevel@tonic-gate return (DCMD_OK);
38097c478bd9Sstevel@tonic-gate }
38107c478bd9Sstevel@tonic-gate
38117c478bd9Sstevel@tonic-gate if (verbose) {
38127c478bd9Sstevel@tonic-gate mdb_printf("%<b>%16p%</b> %4s %16p %16p %16d\n",
38137c478bd9Sstevel@tonic-gate addr, type, vs.vs_start, vs.vs_end, sz);
38147c478bd9Sstevel@tonic-gate
38157c478bd9Sstevel@tonic-gate if (no_debug)
38167c478bd9Sstevel@tonic-gate return (DCMD_OK);
38177c478bd9Sstevel@tonic-gate
38187c478bd9Sstevel@tonic-gate mdb_printf("%16s %4s %16p %16llx\n",
38197c478bd9Sstevel@tonic-gate "", "", vs.vs_thread, vs.vs_timestamp);
38207c478bd9Sstevel@tonic-gate
38217c478bd9Sstevel@tonic-gate mdb_inc_indent(17);
38227c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) {
38237c478bd9Sstevel@tonic-gate mdb_printf("%a\n", stk[i]);
38247c478bd9Sstevel@tonic-gate }
38257c478bd9Sstevel@tonic-gate mdb_dec_indent(17);
38267c478bd9Sstevel@tonic-gate mdb_printf("\n");
38277c478bd9Sstevel@tonic-gate } else {
38287c478bd9Sstevel@tonic-gate mdb_printf("%0?p %4s %0?p %0?p", addr, type,
38297c478bd9Sstevel@tonic-gate vs.vs_start, size? sz : vs.vs_end);
38307c478bd9Sstevel@tonic-gate
38317c478bd9Sstevel@tonic-gate if (no_debug) {
38327c478bd9Sstevel@tonic-gate mdb_printf("\n");
38337c478bd9Sstevel@tonic-gate return (DCMD_OK);
38347c478bd9Sstevel@tonic-gate }
38357c478bd9Sstevel@tonic-gate
38367c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) {
38377c478bd9Sstevel@tonic-gate if (mdb_lookup_by_addr(stk[i], MDB_SYM_FUZZY,
38387c478bd9Sstevel@tonic-gate c, sizeof (c), &sym) == -1)
38397c478bd9Sstevel@tonic-gate continue;
38407c478bd9Sstevel@tonic-gate if (strncmp(c, "vmem_", 5) == 0)
38417c478bd9Sstevel@tonic-gate continue;
38427c478bd9Sstevel@tonic-gate break;
38437c478bd9Sstevel@tonic-gate }
38447c478bd9Sstevel@tonic-gate mdb_printf(" %a\n", stk[i]);
38457c478bd9Sstevel@tonic-gate }
38467c478bd9Sstevel@tonic-gate return (DCMD_OK);
38477c478bd9Sstevel@tonic-gate }
38487c478bd9Sstevel@tonic-gate
38497c478bd9Sstevel@tonic-gate typedef struct kmalog_data {
38507c478bd9Sstevel@tonic-gate uintptr_t kma_addr;
38517c478bd9Sstevel@tonic-gate hrtime_t kma_newest;
38527c478bd9Sstevel@tonic-gate } kmalog_data_t;
38537c478bd9Sstevel@tonic-gate
38547c478bd9Sstevel@tonic-gate /*ARGSUSED*/
38557c478bd9Sstevel@tonic-gate static int
showbc(uintptr_t addr,const kmem_bufctl_audit_t * bcp,kmalog_data_t * kma)38567c478bd9Sstevel@tonic-gate showbc(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmalog_data_t *kma)
38577c478bd9Sstevel@tonic-gate {
38587c478bd9Sstevel@tonic-gate char name[KMEM_CACHE_NAMELEN + 1];
38597c478bd9Sstevel@tonic-gate hrtime_t delta;
38607c478bd9Sstevel@tonic-gate int i, depth;
38617c478bd9Sstevel@tonic-gate size_t bufsize;
38627c478bd9Sstevel@tonic-gate
38637c478bd9Sstevel@tonic-gate if (bcp->bc_timestamp == 0)
38647c478bd9Sstevel@tonic-gate return (WALK_DONE);
38657c478bd9Sstevel@tonic-gate
38667c478bd9Sstevel@tonic-gate if (kma->kma_newest == 0)
38677c478bd9Sstevel@tonic-gate kma->kma_newest = bcp->bc_timestamp;
38687c478bd9Sstevel@tonic-gate
38697c478bd9Sstevel@tonic-gate if (kma->kma_addr) {
38707c478bd9Sstevel@tonic-gate if (mdb_vread(&bufsize, sizeof (bufsize),
38717c478bd9Sstevel@tonic-gate (uintptr_t)&bcp->bc_cache->cache_bufsize) == -1) {
38727c478bd9Sstevel@tonic-gate mdb_warn(
38737c478bd9Sstevel@tonic-gate "failed to read cache_bufsize for cache at %p",
38747c478bd9Sstevel@tonic-gate bcp->bc_cache);
38757c478bd9Sstevel@tonic-gate return (WALK_ERR);
38767c478bd9Sstevel@tonic-gate }
38777c478bd9Sstevel@tonic-gate
38787c478bd9Sstevel@tonic-gate if (kma->kma_addr < (uintptr_t)bcp->bc_addr ||
38797c478bd9Sstevel@tonic-gate kma->kma_addr >= (uintptr_t)bcp->bc_addr + bufsize)
38807c478bd9Sstevel@tonic-gate return (WALK_NEXT);
38817c478bd9Sstevel@tonic-gate }
38827c478bd9Sstevel@tonic-gate
38837c478bd9Sstevel@tonic-gate delta = kma->kma_newest - bcp->bc_timestamp;
38847c478bd9Sstevel@tonic-gate depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH);
38857c478bd9Sstevel@tonic-gate
38867c478bd9Sstevel@tonic-gate if (mdb_readstr(name, sizeof (name), (uintptr_t)
38877c478bd9Sstevel@tonic-gate &bcp->bc_cache->cache_name) <= 0)
38887c478bd9Sstevel@tonic-gate (void) mdb_snprintf(name, sizeof (name), "%a", bcp->bc_cache);
38897c478bd9Sstevel@tonic-gate
38907c478bd9Sstevel@tonic-gate mdb_printf("\nT-%lld.%09lld addr=%p %s\n",
38917c478bd9Sstevel@tonic-gate delta / NANOSEC, delta % NANOSEC, bcp->bc_addr, name);
38927c478bd9Sstevel@tonic-gate
38937c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++)
38947c478bd9Sstevel@tonic-gate mdb_printf("\t %a\n", bcp->bc_stack[i]);
38957c478bd9Sstevel@tonic-gate
38967c478bd9Sstevel@tonic-gate return (WALK_NEXT);
38977c478bd9Sstevel@tonic-gate }
38987c478bd9Sstevel@tonic-gate
38997c478bd9Sstevel@tonic-gate int
kmalog(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)39007c478bd9Sstevel@tonic-gate kmalog(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
39017c478bd9Sstevel@tonic-gate {
39027c478bd9Sstevel@tonic-gate const char *logname = "kmem_transaction_log";
39037c478bd9Sstevel@tonic-gate kmalog_data_t kma;
39047c478bd9Sstevel@tonic-gate
39057c478bd9Sstevel@tonic-gate if (argc > 1)
39067c478bd9Sstevel@tonic-gate return (DCMD_USAGE);
39077c478bd9Sstevel@tonic-gate
39087c478bd9Sstevel@tonic-gate kma.kma_newest = 0;
39097c478bd9Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC)
39107c478bd9Sstevel@tonic-gate kma.kma_addr = addr;
39117c478bd9Sstevel@tonic-gate else
39127c478bd9Sstevel@tonic-gate kma.kma_addr = NULL;
39137c478bd9Sstevel@tonic-gate
39147c478bd9Sstevel@tonic-gate if (argc > 0) {
39157c478bd9Sstevel@tonic-gate if (argv->a_type != MDB_TYPE_STRING)
39167c478bd9Sstevel@tonic-gate return (DCMD_USAGE);
39177c478bd9Sstevel@tonic-gate if (strcmp(argv->a_un.a_str, "fail") == 0)
39187c478bd9Sstevel@tonic-gate logname = "kmem_failure_log";
39197c478bd9Sstevel@tonic-gate else if (strcmp(argv->a_un.a_str, "slab") == 0)
39207c478bd9Sstevel@tonic-gate logname = "kmem_slab_log";
39217c478bd9Sstevel@tonic-gate else
39227c478bd9Sstevel@tonic-gate return (DCMD_USAGE);
39237c478bd9Sstevel@tonic-gate }
39247c478bd9Sstevel@tonic-gate
39257c478bd9Sstevel@tonic-gate if (mdb_readvar(&addr, logname) == -1) {
39267c478bd9Sstevel@tonic-gate mdb_warn("failed to read %s log header pointer");
39277c478bd9Sstevel@tonic-gate return (DCMD_ERR);
39287c478bd9Sstevel@tonic-gate }
39297c478bd9Sstevel@tonic-gate
39307c478bd9Sstevel@tonic-gate if (mdb_pwalk("kmem_log", (mdb_walk_cb_t)showbc, &kma, addr) == -1) {
39317c478bd9Sstevel@tonic-gate mdb_warn("failed to walk kmem log");
39327c478bd9Sstevel@tonic-gate return (DCMD_ERR);
39337c478bd9Sstevel@tonic-gate }
39347c478bd9Sstevel@tonic-gate
39357c478bd9Sstevel@tonic-gate return (DCMD_OK);
39367c478bd9Sstevel@tonic-gate }
39377c478bd9Sstevel@tonic-gate
39387c478bd9Sstevel@tonic-gate /*
39397c478bd9Sstevel@tonic-gate * As the final lure for die-hard crash(1M) users, we provide ::kmausers here.
39407c478bd9Sstevel@tonic-gate * The first piece is a structure which we use to accumulate kmem_cache_t
39417c478bd9Sstevel@tonic-gate * addresses of interest. The kmc_add is used as a callback for the kmem_cache
39427c478bd9Sstevel@tonic-gate * walker; we either add all caches, or ones named explicitly as arguments.
39437c478bd9Sstevel@tonic-gate */
39447c478bd9Sstevel@tonic-gate
39457c478bd9Sstevel@tonic-gate typedef struct kmclist {
39467c478bd9Sstevel@tonic-gate const char *kmc_name; /* Name to match (or NULL) */
39477c478bd9Sstevel@tonic-gate uintptr_t *kmc_caches; /* List of kmem_cache_t addrs */
39487c478bd9Sstevel@tonic-gate int kmc_nelems; /* Num entries in kmc_caches */
39497c478bd9Sstevel@tonic-gate int kmc_size; /* Size of kmc_caches array */
39507c478bd9Sstevel@tonic-gate } kmclist_t;
39517c478bd9Sstevel@tonic-gate
39527c478bd9Sstevel@tonic-gate static int
kmc_add(uintptr_t addr,const kmem_cache_t * cp,kmclist_t * kmc)39537c478bd9Sstevel@tonic-gate kmc_add(uintptr_t addr, const kmem_cache_t *cp, kmclist_t *kmc)
39547c478bd9Sstevel@tonic-gate {
39557c478bd9Sstevel@tonic-gate void *p;
39567c478bd9Sstevel@tonic-gate int s;
39577c478bd9Sstevel@tonic-gate
39587c478bd9Sstevel@tonic-gate if (kmc->kmc_name == NULL ||
39597c478bd9Sstevel@tonic-gate strcmp(cp->cache_name, kmc->kmc_name) == 0) {
39607c478bd9Sstevel@tonic-gate /*
39617c478bd9Sstevel@tonic-gate * If we have a match, grow our array (if necessary), and then
39627c478bd9Sstevel@tonic-gate * add the virtual address of the matching cache to our list.
39637c478bd9Sstevel@tonic-gate */
39647c478bd9Sstevel@tonic-gate if (kmc->kmc_nelems >= kmc->kmc_size) {
39657c478bd9Sstevel@tonic-gate s = kmc->kmc_size ? kmc->kmc_size * 2 : 256;
39667c478bd9Sstevel@tonic-gate p = mdb_alloc(sizeof (uintptr_t) * s, UM_SLEEP | UM_GC);
39677c478bd9Sstevel@tonic-gate
39687c478bd9Sstevel@tonic-gate bcopy(kmc->kmc_caches, p,
39697c478bd9Sstevel@tonic-gate sizeof (uintptr_t) * kmc->kmc_size);
39707c478bd9Sstevel@tonic-gate
39717c478bd9Sstevel@tonic-gate kmc->kmc_caches = p;
39727c478bd9Sstevel@tonic-gate kmc->kmc_size = s;
39737c478bd9Sstevel@tonic-gate }
39747c478bd9Sstevel@tonic-gate
39757c478bd9Sstevel@tonic-gate kmc->kmc_caches[kmc->kmc_nelems++] = addr;
39767c478bd9Sstevel@tonic-gate return (kmc->kmc_name ? WALK_DONE : WALK_NEXT);
39777c478bd9Sstevel@tonic-gate }
39787c478bd9Sstevel@tonic-gate
39797c478bd9Sstevel@tonic-gate return (WALK_NEXT);
39807c478bd9Sstevel@tonic-gate }
39817c478bd9Sstevel@tonic-gate
39827c478bd9Sstevel@tonic-gate /*
39837c478bd9Sstevel@tonic-gate * The second piece of ::kmausers is a hash table of allocations. Each
39847c478bd9Sstevel@tonic-gate * allocation owner is identified by its stack trace and data_size. We then
39857c478bd9Sstevel@tonic-gate * track the total bytes of all such allocations, and the number of allocations
39867c478bd9Sstevel@tonic-gate * to report at the end. Once we have a list of caches, we walk through the
39877c478bd9Sstevel@tonic-gate * allocated bufctls of each, and update our hash table accordingly.
39887c478bd9Sstevel@tonic-gate */
39897c478bd9Sstevel@tonic-gate
39907c478bd9Sstevel@tonic-gate typedef struct kmowner {
39917c478bd9Sstevel@tonic-gate struct kmowner *kmo_head; /* First hash elt in bucket */
39927c478bd9Sstevel@tonic-gate struct kmowner *kmo_next; /* Next hash elt in chain */
39937c478bd9Sstevel@tonic-gate size_t kmo_signature; /* Hash table signature */
39947c478bd9Sstevel@tonic-gate uint_t kmo_num; /* Number of allocations */
39957c478bd9Sstevel@tonic-gate size_t kmo_data_size; /* Size of each allocation */
39967c478bd9Sstevel@tonic-gate size_t kmo_total_size; /* Total bytes of allocation */
39977c478bd9Sstevel@tonic-gate int kmo_depth; /* Depth of stack trace */
39987c478bd9Sstevel@tonic-gate uintptr_t kmo_stack[KMEM_STACK_DEPTH]; /* Stack trace */
39997c478bd9Sstevel@tonic-gate } kmowner_t;
40007c478bd9Sstevel@tonic-gate
40017c478bd9Sstevel@tonic-gate typedef struct kmusers {
40027c478bd9Sstevel@tonic-gate uintptr_t kmu_addr; /* address of interest */
40037c478bd9Sstevel@tonic-gate const kmem_cache_t *kmu_cache; /* Current kmem cache */
40047c478bd9Sstevel@tonic-gate kmowner_t *kmu_hash; /* Hash table of owners */
40057c478bd9Sstevel@tonic-gate int kmu_nelems; /* Number of entries in use */
40067c478bd9Sstevel@tonic-gate int kmu_size; /* Total number of entries */
40077c478bd9Sstevel@tonic-gate } kmusers_t;
40087c478bd9Sstevel@tonic-gate
40097c478bd9Sstevel@tonic-gate static void
kmu_add(kmusers_t * kmu,const kmem_bufctl_audit_t * bcp,size_t size,size_t data_size)40107c478bd9Sstevel@tonic-gate kmu_add(kmusers_t *kmu, const kmem_bufctl_audit_t *bcp,
40117c478bd9Sstevel@tonic-gate size_t size, size_t data_size)
40127c478bd9Sstevel@tonic-gate {
40137c478bd9Sstevel@tonic-gate int i, depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH);
40147c478bd9Sstevel@tonic-gate size_t bucket, signature = data_size;
40157c478bd9Sstevel@tonic-gate kmowner_t *kmo, *kmoend;
40167c478bd9Sstevel@tonic-gate
40177c478bd9Sstevel@tonic-gate /*
40187c478bd9Sstevel@tonic-gate * If the hash table is full, double its size and rehash everything.
40197c478bd9Sstevel@tonic-gate */
40207c478bd9Sstevel@tonic-gate if (kmu->kmu_nelems >= kmu->kmu_size) {
40217c478bd9Sstevel@tonic-gate int s = kmu->kmu_size ? kmu->kmu_size * 2 : 1024;
40227c478bd9Sstevel@tonic-gate
40237c478bd9Sstevel@tonic-gate kmo = mdb_alloc(sizeof (kmowner_t) * s, UM_SLEEP | UM_GC);
40247c478bd9Sstevel@tonic-gate bcopy(kmu->kmu_hash, kmo, sizeof (kmowner_t) * kmu->kmu_size);
40257c478bd9Sstevel@tonic-gate kmu->kmu_hash = kmo;
40267c478bd9Sstevel@tonic-gate kmu->kmu_size = s;
40277c478bd9Sstevel@tonic-gate
40287c478bd9Sstevel@tonic-gate kmoend = kmu->kmu_hash + kmu->kmu_size;
40297c478bd9Sstevel@tonic-gate for (kmo = kmu->kmu_hash; kmo < kmoend; kmo++)
40307c478bd9Sstevel@tonic-gate kmo->kmo_head = NULL;
40317c478bd9Sstevel@tonic-gate
40327c478bd9Sstevel@tonic-gate kmoend = kmu->kmu_hash + kmu->kmu_nelems;
40337c478bd9Sstevel@tonic-gate for (kmo = kmu->kmu_hash; kmo < kmoend; kmo++) {
40347c478bd9Sstevel@tonic-gate bucket = kmo->kmo_signature & (kmu->kmu_size - 1);
40357c478bd9Sstevel@tonic-gate kmo->kmo_next = kmu->kmu_hash[bucket].kmo_head;
40367c478bd9Sstevel@tonic-gate kmu->kmu_hash[bucket].kmo_head = kmo;
40377c478bd9Sstevel@tonic-gate }
40387c478bd9Sstevel@tonic-gate }
40397c478bd9Sstevel@tonic-gate
40407c478bd9Sstevel@tonic-gate /*
40417c478bd9Sstevel@tonic-gate * Finish computing the hash signature from the stack trace, and then
40427c478bd9Sstevel@tonic-gate * see if the owner is in the hash table. If so, update our stats.
40437c478bd9Sstevel@tonic-gate */
40447c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++)
40457c478bd9Sstevel@tonic-gate signature += bcp->bc_stack[i];
40467c478bd9Sstevel@tonic-gate
40477c478bd9Sstevel@tonic-gate bucket = signature & (kmu->kmu_size - 1);
40487c478bd9Sstevel@tonic-gate
40497c478bd9Sstevel@tonic-gate for (kmo = kmu->kmu_hash[bucket].kmo_head; kmo; kmo = kmo->kmo_next) {
40507c478bd9Sstevel@tonic-gate if (kmo->kmo_signature == signature) {
40517c478bd9Sstevel@tonic-gate size_t difference = 0;
40527c478bd9Sstevel@tonic-gate
40537c478bd9Sstevel@tonic-gate difference |= kmo->kmo_data_size - data_size;
40547c478bd9Sstevel@tonic-gate difference |= kmo->kmo_depth - depth;
40557c478bd9Sstevel@tonic-gate
40567c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++) {
40577c478bd9Sstevel@tonic-gate difference |= kmo->kmo_stack[i] -
40587c478bd9Sstevel@tonic-gate bcp->bc_stack[i];
40597c478bd9Sstevel@tonic-gate }
40607c478bd9Sstevel@tonic-gate
40617c478bd9Sstevel@tonic-gate if (difference == 0) {
40627c478bd9Sstevel@tonic-gate kmo->kmo_total_size += size;
40637c478bd9Sstevel@tonic-gate kmo->kmo_num++;
40647c478bd9Sstevel@tonic-gate return;
40657c478bd9Sstevel@tonic-gate }
40667c478bd9Sstevel@tonic-gate }
40677c478bd9Sstevel@tonic-gate }
40687c478bd9Sstevel@tonic-gate
40697c478bd9Sstevel@tonic-gate /*
40707c478bd9Sstevel@tonic-gate * If the owner is not yet hashed, grab the next element and fill it
40717c478bd9Sstevel@tonic-gate * in based on the allocation information.
40727c478bd9Sstevel@tonic-gate */
40737c478bd9Sstevel@tonic-gate kmo = &kmu->kmu_hash[kmu->kmu_nelems++];
40747c478bd9Sstevel@tonic-gate kmo->kmo_next = kmu->kmu_hash[bucket].kmo_head;
40757c478bd9Sstevel@tonic-gate kmu->kmu_hash[bucket].kmo_head = kmo;
40767c478bd9Sstevel@tonic-gate
40777c478bd9Sstevel@tonic-gate kmo->kmo_signature = signature;
40787c478bd9Sstevel@tonic-gate kmo->kmo_num = 1;
40797c478bd9Sstevel@tonic-gate kmo->kmo_data_size = data_size;
40807c478bd9Sstevel@tonic-gate kmo->kmo_total_size = size;
40817c478bd9Sstevel@tonic-gate kmo->kmo_depth = depth;
40827c478bd9Sstevel@tonic-gate
40837c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++)
40847c478bd9Sstevel@tonic-gate kmo->kmo_stack[i] = bcp->bc_stack[i];
40857c478bd9Sstevel@tonic-gate }
40867c478bd9Sstevel@tonic-gate
40877c478bd9Sstevel@tonic-gate /*
40887c478bd9Sstevel@tonic-gate * When ::kmausers is invoked without the -f flag, we simply update our hash
40897c478bd9Sstevel@tonic-gate * table with the information from each allocated bufctl.
40907c478bd9Sstevel@tonic-gate */
40917c478bd9Sstevel@tonic-gate /*ARGSUSED*/
40927c478bd9Sstevel@tonic-gate static int
kmause1(uintptr_t addr,const kmem_bufctl_audit_t * bcp,kmusers_t * kmu)40937c478bd9Sstevel@tonic-gate kmause1(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmusers_t *kmu)
40947c478bd9Sstevel@tonic-gate {
40957c478bd9Sstevel@tonic-gate const kmem_cache_t *cp = kmu->kmu_cache;
40967c478bd9Sstevel@tonic-gate
40977c478bd9Sstevel@tonic-gate kmu_add(kmu, bcp, cp->cache_bufsize, cp->cache_bufsize);
40987c478bd9Sstevel@tonic-gate return (WALK_NEXT);
40997c478bd9Sstevel@tonic-gate }
41007c478bd9Sstevel@tonic-gate
41017c478bd9Sstevel@tonic-gate /*
41027c478bd9Sstevel@tonic-gate * When ::kmausers is invoked with the -f flag, we print out the information
41037c478bd9Sstevel@tonic-gate * for each bufctl as well as updating the hash table.
41047c478bd9Sstevel@tonic-gate */
41057c478bd9Sstevel@tonic-gate static int
kmause2(uintptr_t addr,const kmem_bufctl_audit_t * bcp,kmusers_t * kmu)41067c478bd9Sstevel@tonic-gate kmause2(uintptr_t addr, const kmem_bufctl_audit_t *bcp, kmusers_t *kmu)
41077c478bd9Sstevel@tonic-gate {
41087c478bd9Sstevel@tonic-gate int i, depth = MIN(bcp->bc_depth, KMEM_STACK_DEPTH);
41097c478bd9Sstevel@tonic-gate const kmem_cache_t *cp = kmu->kmu_cache;
41107c478bd9Sstevel@tonic-gate kmem_bufctl_t bufctl;
41117c478bd9Sstevel@tonic-gate
41127c478bd9Sstevel@tonic-gate if (kmu->kmu_addr) {
41137c478bd9Sstevel@tonic-gate if (mdb_vread(&bufctl, sizeof (bufctl), addr) == -1)
41147c478bd9Sstevel@tonic-gate mdb_warn("couldn't read bufctl at %p", addr);
41157c478bd9Sstevel@tonic-gate else if (kmu->kmu_addr < (uintptr_t)bufctl.bc_addr ||
41167c478bd9Sstevel@tonic-gate kmu->kmu_addr >= (uintptr_t)bufctl.bc_addr +
41177c478bd9Sstevel@tonic-gate cp->cache_bufsize)
41187c478bd9Sstevel@tonic-gate return (WALK_NEXT);
41197c478bd9Sstevel@tonic-gate }
41207c478bd9Sstevel@tonic-gate
41217c478bd9Sstevel@tonic-gate mdb_printf("size %d, addr %p, thread %p, cache %s\n",
41227c478bd9Sstevel@tonic-gate cp->cache_bufsize, addr, bcp->bc_thread, cp->cache_name);
41237c478bd9Sstevel@tonic-gate
41247c478bd9Sstevel@tonic-gate for (i = 0; i < depth; i++)
41257c478bd9Sstevel@tonic-gate mdb_printf("\t %a\n", bcp->bc_stack[i]);
41267c478bd9Sstevel@tonic-gate
41277c478bd9Sstevel@tonic-gate kmu_add(kmu, bcp, cp->cache_bufsize, cp->cache_bufsize);
41287c478bd9Sstevel@tonic-gate return (WALK_NEXT);
41297c478bd9Sstevel@tonic-gate }
41307c478bd9Sstevel@tonic-gate
41317c478bd9Sstevel@tonic-gate /*
41327c478bd9Sstevel@tonic-gate * We sort our results by allocation size before printing them.
41337c478bd9Sstevel@tonic-gate */
41347c478bd9Sstevel@tonic-gate static int
kmownercmp(const void * lp,const void * rp)41357c478bd9Sstevel@tonic-gate kmownercmp(const void *lp, const void *rp)
41367c478bd9Sstevel@tonic-gate {
41377c478bd9Sstevel@tonic-gate const kmowner_t *lhs = lp;
41387c478bd9Sstevel@tonic-gate const kmowner_t *rhs = rp;
41397c478bd9Sstevel@tonic-gate
41407c478bd9Sstevel@tonic-gate return (rhs->kmo_total_size - lhs->kmo_total_size);
41417c478bd9Sstevel@tonic-gate }
41427c478bd9Sstevel@tonic-gate
41437c478bd9Sstevel@tonic-gate /*
41447c478bd9Sstevel@tonic-gate * The main engine of ::kmausers is relatively straightforward: First we
41457c478bd9Sstevel@tonic-gate * accumulate our list of kmem_cache_t addresses into the kmclist_t. Next we
41467c478bd9Sstevel@tonic-gate * iterate over the allocated bufctls of each cache in the list. Finally,
41477c478bd9Sstevel@tonic-gate * we sort and print our results.
41487c478bd9Sstevel@tonic-gate */
41497c478bd9Sstevel@tonic-gate /*ARGSUSED*/
41507c478bd9Sstevel@tonic-gate int
kmausers(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)41517c478bd9Sstevel@tonic-gate kmausers(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
41527c478bd9Sstevel@tonic-gate {
41537c478bd9Sstevel@tonic-gate int mem_threshold = 8192; /* Minimum # bytes for printing */
41547c478bd9Sstevel@tonic-gate int cnt_threshold = 100; /* Minimum # blocks for printing */
41557c478bd9Sstevel@tonic-gate int audited_caches = 0; /* Number of KMF_AUDIT caches found */
41567c478bd9Sstevel@tonic-gate int do_all_caches = 1; /* Do all caches (no arguments) */
41577c478bd9Sstevel@tonic-gate int opt_e = FALSE; /* Include "small" users */
41587c478bd9Sstevel@tonic-gate int opt_f = FALSE; /* Print stack traces */
41597c478bd9Sstevel@tonic-gate
41607c478bd9Sstevel@tonic-gate mdb_walk_cb_t callback = (mdb_walk_cb_t)kmause1;
41617c478bd9Sstevel@tonic-gate kmowner_t *kmo, *kmoend;
41627c478bd9Sstevel@tonic-gate int i, oelems;
41637c478bd9Sstevel@tonic-gate
41647c478bd9Sstevel@tonic-gate kmclist_t kmc;
41657c478bd9Sstevel@tonic-gate kmusers_t kmu;
41667c478bd9Sstevel@tonic-gate
41677c478bd9Sstevel@tonic-gate bzero(&kmc, sizeof (kmc));
41687c478bd9Sstevel@tonic-gate bzero(&kmu, sizeof (kmu));
41697c478bd9Sstevel@tonic-gate
41707c478bd9Sstevel@tonic-gate while ((i = mdb_getopts(argc, argv,
41717c478bd9Sstevel@tonic-gate 'e', MDB_OPT_SETBITS, TRUE, &opt_e,
41727c478bd9Sstevel@tonic-gate 'f', MDB_OPT_SETBITS, TRUE, &opt_f, NULL)) != argc) {
41737c478bd9Sstevel@tonic-gate
41747c478bd9Sstevel@tonic-gate argv += i; /* skip past options we just processed */
41757c478bd9Sstevel@tonic-gate argc -= i; /* adjust argc */
41767c478bd9Sstevel@tonic-gate
41777c478bd9Sstevel@tonic-gate if (argv->a_type != MDB_TYPE_STRING || *argv->a_un.a_str == '-')
41787c478bd9Sstevel@tonic-gate return (DCMD_USAGE);
41797c478bd9Sstevel@tonic-gate
41807c478bd9Sstevel@tonic-gate oelems = kmc.kmc_nelems;
41817c478bd9Sstevel@tonic-gate kmc.kmc_name = argv->a_un.a_str;
41827c478bd9Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmc_add, &kmc);
41837c478bd9Sstevel@tonic-gate
41847c478bd9Sstevel@tonic-gate if (kmc.kmc_nelems == oelems) {
41857c478bd9Sstevel@tonic-gate mdb_warn("unknown kmem cache: %s\n", kmc.kmc_name);
41867c478bd9Sstevel@tonic-gate return (DCMD_ERR);
41877c478bd9Sstevel@tonic-gate }
41887c478bd9Sstevel@tonic-gate
41897c478bd9Sstevel@tonic-gate do_all_caches = 0;
41907c478bd9Sstevel@tonic-gate argv++;
41917c478bd9Sstevel@tonic-gate argc--;
41927c478bd9Sstevel@tonic-gate }
41937c478bd9Sstevel@tonic-gate
41947c478bd9Sstevel@tonic-gate if (flags & DCMD_ADDRSPEC) {
41957c478bd9Sstevel@tonic-gate opt_f = TRUE;
41967c478bd9Sstevel@tonic-gate kmu.kmu_addr = addr;
41977c478bd9Sstevel@tonic-gate } else {
41987c478bd9Sstevel@tonic-gate kmu.kmu_addr = NULL;
41997c478bd9Sstevel@tonic-gate }
42007c478bd9Sstevel@tonic-gate
42017c478bd9Sstevel@tonic-gate if (opt_e)
42027c478bd9Sstevel@tonic-gate mem_threshold = cnt_threshold = 0;
42037c478bd9Sstevel@tonic-gate
42047c478bd9Sstevel@tonic-gate if (opt_f)
42057c478bd9Sstevel@tonic-gate callback = (mdb_walk_cb_t)kmause2;
42067c478bd9Sstevel@tonic-gate
42077c478bd9Sstevel@tonic-gate if (do_all_caches) {
42087c478bd9Sstevel@tonic-gate kmc.kmc_name = NULL; /* match all cache names */
42097c478bd9Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmc_add, &kmc);
42107c478bd9Sstevel@tonic-gate }
42117c478bd9Sstevel@tonic-gate
42127c478bd9Sstevel@tonic-gate for (i = 0; i < kmc.kmc_nelems; i++) {
42137c478bd9Sstevel@tonic-gate uintptr_t cp = kmc.kmc_caches[i];
42147c478bd9Sstevel@tonic-gate kmem_cache_t c;
42157c478bd9Sstevel@tonic-gate
42167c478bd9Sstevel@tonic-gate if (mdb_vread(&c, sizeof (c), cp) == -1) {
42177c478bd9Sstevel@tonic-gate mdb_warn("failed to read cache at %p", cp);
42187c478bd9Sstevel@tonic-gate continue;
42197c478bd9Sstevel@tonic-gate }
42207c478bd9Sstevel@tonic-gate
42217c478bd9Sstevel@tonic-gate if (!(c.cache_flags & KMF_AUDIT)) {
42227c478bd9Sstevel@tonic-gate if (!do_all_caches) {
42237c478bd9Sstevel@tonic-gate mdb_warn("KMF_AUDIT is not enabled for %s\n",
42247c478bd9Sstevel@tonic-gate c.cache_name);
42257c478bd9Sstevel@tonic-gate }
42267c478bd9Sstevel@tonic-gate continue;
42277c478bd9Sstevel@tonic-gate }
42287c478bd9Sstevel@tonic-gate
42297c478bd9Sstevel@tonic-gate kmu.kmu_cache = &c;
42307c478bd9Sstevel@tonic-gate (void) mdb_pwalk("bufctl", callback, &kmu, cp);
42317c478bd9Sstevel@tonic-gate audited_caches++;
42327c478bd9Sstevel@tonic-gate }
42337c478bd9Sstevel@tonic-gate
42347c478bd9Sstevel@tonic-gate if (audited_caches == 0 && do_all_caches) {
42357c478bd9Sstevel@tonic-gate mdb_warn("KMF_AUDIT is not enabled for any caches\n");
42367c478bd9Sstevel@tonic-gate return (DCMD_ERR);
42377c478bd9Sstevel@tonic-gate }
42387c478bd9Sstevel@tonic-gate
42397c478bd9Sstevel@tonic-gate qsort(kmu.kmu_hash, kmu.kmu_nelems, sizeof (kmowner_t), kmownercmp);
42407c478bd9Sstevel@tonic-gate kmoend = kmu.kmu_hash + kmu.kmu_nelems;
42417c478bd9Sstevel@tonic-gate
42427c478bd9Sstevel@tonic-gate for (kmo = kmu.kmu_hash; kmo < kmoend; kmo++) {
42437c478bd9Sstevel@tonic-gate if (kmo->kmo_total_size < mem_threshold &&
42447c478bd9Sstevel@tonic-gate kmo->kmo_num < cnt_threshold)
42457c478bd9Sstevel@tonic-gate continue;
42467c478bd9Sstevel@tonic-gate mdb_printf("%lu bytes for %u allocations with data size %lu:\n",
42477c478bd9Sstevel@tonic-gate kmo->kmo_total_size, kmo->kmo_num, kmo->kmo_data_size);
42487c478bd9Sstevel@tonic-gate for (i = 0; i < kmo->kmo_depth; i++)
42497c478bd9Sstevel@tonic-gate mdb_printf("\t %a\n", kmo->kmo_stack[i]);
42507c478bd9Sstevel@tonic-gate }
42517c478bd9Sstevel@tonic-gate
42527c478bd9Sstevel@tonic-gate return (DCMD_OK);
42537c478bd9Sstevel@tonic-gate }
42547c478bd9Sstevel@tonic-gate
42557c478bd9Sstevel@tonic-gate void
kmausers_help(void)42567c478bd9Sstevel@tonic-gate kmausers_help(void)
42577c478bd9Sstevel@tonic-gate {
42587c478bd9Sstevel@tonic-gate mdb_printf(
42597c478bd9Sstevel@tonic-gate "Displays the largest users of the kmem allocator, sorted by \n"
42607c478bd9Sstevel@tonic-gate "trace. If one or more caches is specified, only those caches\n"
42617c478bd9Sstevel@tonic-gate "will be searched. By default, all caches are searched. If an\n"
42627c478bd9Sstevel@tonic-gate "address is specified, then only those allocations which include\n"
42637c478bd9Sstevel@tonic-gate "the given address are displayed. Specifying an address implies\n"
42647c478bd9Sstevel@tonic-gate "-f.\n"
42657c478bd9Sstevel@tonic-gate "\n"
42667c478bd9Sstevel@tonic-gate "\t-e\tInclude all users, not just the largest\n"
42677c478bd9Sstevel@tonic-gate "\t-f\tDisplay individual allocations. By default, users are\n"
42687c478bd9Sstevel@tonic-gate "\t\tgrouped by stack\n");
42697c478bd9Sstevel@tonic-gate }
42707c478bd9Sstevel@tonic-gate
42717c478bd9Sstevel@tonic-gate static int
kmem_ready_check(void)42727c478bd9Sstevel@tonic-gate kmem_ready_check(void)
42737c478bd9Sstevel@tonic-gate {
42747c478bd9Sstevel@tonic-gate int ready;
42757c478bd9Sstevel@tonic-gate
42767c478bd9Sstevel@tonic-gate if (mdb_readvar(&ready, "kmem_ready") < 0)
42777c478bd9Sstevel@tonic-gate return (-1); /* errno is set for us */
42787c478bd9Sstevel@tonic-gate
42797c478bd9Sstevel@tonic-gate return (ready);
42807c478bd9Sstevel@tonic-gate }
42817c478bd9Sstevel@tonic-gate
4282346799e8SJonathan W Adams void
kmem_statechange(void)4283346799e8SJonathan W Adams kmem_statechange(void)
42847c478bd9Sstevel@tonic-gate {
4285789d94c2Sjwadams static int been_ready = 0;
4286789d94c2Sjwadams
4287789d94c2Sjwadams if (been_ready)
4288789d94c2Sjwadams return;
4289789d94c2Sjwadams
42907c478bd9Sstevel@tonic-gate if (kmem_ready_check() <= 0)
42917c478bd9Sstevel@tonic-gate return;
42927c478bd9Sstevel@tonic-gate
4293789d94c2Sjwadams been_ready = 1;
42947c478bd9Sstevel@tonic-gate (void) mdb_walk("kmem_cache", (mdb_walk_cb_t)kmem_init_walkers, NULL);
42957c478bd9Sstevel@tonic-gate }
42967c478bd9Sstevel@tonic-gate
42977c478bd9Sstevel@tonic-gate void
kmem_init(void)42987c478bd9Sstevel@tonic-gate kmem_init(void)
42997c478bd9Sstevel@tonic-gate {
43007c478bd9Sstevel@tonic-gate mdb_walker_t w = {
43017c478bd9Sstevel@tonic-gate "kmem_cache", "walk list of kmem caches", kmem_cache_walk_init,
4302b5fca8f8Stomee list_walk_step, list_walk_fini
43037c478bd9Sstevel@tonic-gate };
43047c478bd9Sstevel@tonic-gate
43057c478bd9Sstevel@tonic-gate /*
43067c478bd9Sstevel@tonic-gate * If kmem is ready, we'll need to invoke the kmem_cache walker
43077c478bd9Sstevel@tonic-gate * immediately. Walkers in the linkage structure won't be ready until
43087c478bd9Sstevel@tonic-gate * _mdb_init returns, so we'll need to add this one manually. If kmem
43097c478bd9Sstevel@tonic-gate * is ready, we'll use the walker to initialize the caches. If kmem
43107c478bd9Sstevel@tonic-gate * isn't ready, we'll register a callback that will allow us to defer
43117c478bd9Sstevel@tonic-gate * cache walking until it is.
43127c478bd9Sstevel@tonic-gate */
43137c478bd9Sstevel@tonic-gate if (mdb_add_walker(&w) != 0) {
43147c478bd9Sstevel@tonic-gate mdb_warn("failed to add kmem_cache walker");
43157c478bd9Sstevel@tonic-gate return;
43167c478bd9Sstevel@tonic-gate }
43177c478bd9Sstevel@tonic-gate
4318346799e8SJonathan W Adams kmem_statechange();
43194a1c2431SJonathan Adams
43204a1c2431SJonathan Adams /* register our ::whatis handlers */
43214a1c2431SJonathan Adams mdb_whatis_register("modules", whatis_run_modules, NULL,
43224a1c2431SJonathan Adams WHATIS_PRIO_EARLY, WHATIS_REG_NO_ID);
43234a1c2431SJonathan Adams mdb_whatis_register("threads", whatis_run_threads, NULL,
43244a1c2431SJonathan Adams WHATIS_PRIO_EARLY, WHATIS_REG_NO_ID);
43254a1c2431SJonathan Adams mdb_whatis_register("pages", whatis_run_pages, NULL,
43264a1c2431SJonathan Adams WHATIS_PRIO_EARLY, WHATIS_REG_NO_ID);
43274a1c2431SJonathan Adams mdb_whatis_register("kmem", whatis_run_kmem, NULL,
43284a1c2431SJonathan Adams WHATIS_PRIO_ALLOCATOR, 0);
43294a1c2431SJonathan Adams mdb_whatis_register("vmem", whatis_run_vmem, NULL,
43304a1c2431SJonathan Adams WHATIS_PRIO_ALLOCATOR, 0);
43317c478bd9Sstevel@tonic-gate }
43327c478bd9Sstevel@tonic-gate
43337c478bd9Sstevel@tonic-gate typedef struct whatthread {
43347c478bd9Sstevel@tonic-gate uintptr_t wt_target;
43357c478bd9Sstevel@tonic-gate int wt_verbose;
43367c478bd9Sstevel@tonic-gate } whatthread_t;
43377c478bd9Sstevel@tonic-gate
43387c478bd9Sstevel@tonic-gate static int
whatthread_walk_thread(uintptr_t addr,const kthread_t * t,whatthread_t * w)43397c478bd9Sstevel@tonic-gate whatthread_walk_thread(uintptr_t addr, const kthread_t *t, whatthread_t *w)
43407c478bd9Sstevel@tonic-gate {
43417c478bd9Sstevel@tonic-gate uintptr_t current, data;
43427c478bd9Sstevel@tonic-gate
43437c478bd9Sstevel@tonic-gate if (t->t_stkbase == NULL)
43447c478bd9Sstevel@tonic-gate return (WALK_NEXT);
43457c478bd9Sstevel@tonic-gate
43467c478bd9Sstevel@tonic-gate /*
43477c478bd9Sstevel@tonic-gate * Warn about swapped out threads, but drive on anyway
43487c478bd9Sstevel@tonic-gate */
43497c478bd9Sstevel@tonic-gate if (!(t->t_schedflag & TS_LOAD)) {
43507c478bd9Sstevel@tonic-gate mdb_warn("thread %p's stack swapped out\n", addr);
43517c478bd9Sstevel@tonic-gate return (WALK_NEXT);
43527c478bd9Sstevel@tonic-gate }
43537c478bd9Sstevel@tonic-gate
43547c478bd9Sstevel@tonic-gate /*
43557c478bd9Sstevel@tonic-gate * Search the thread's stack for the given pointer. Note that it would
43567c478bd9Sstevel@tonic-gate * be more efficient to follow ::kgrep's lead and read in page-sized
43577c478bd9Sstevel@tonic-gate * chunks, but this routine is already fast and simple.
43587c478bd9Sstevel@tonic-gate */
43597c478bd9Sstevel@tonic-gate for (current = (uintptr_t)t->t_stkbase; current < (uintptr_t)t->t_stk;
43607c478bd9Sstevel@tonic-gate current += sizeof (uintptr_t)) {
43617c478bd9Sstevel@tonic-gate if (mdb_vread(&data, sizeof (data), current) == -1) {
43627c478bd9Sstevel@tonic-gate mdb_warn("couldn't read thread %p's stack at %p",
43637c478bd9Sstevel@tonic-gate addr, current);
43647c478bd9Sstevel@tonic-gate return (WALK_ERR);
43657c478bd9Sstevel@tonic-gate }
43667c478bd9Sstevel@tonic-gate
43677c478bd9Sstevel@tonic-gate if (data == w->wt_target) {
43687c478bd9Sstevel@tonic-gate if (w->wt_verbose) {
43697c478bd9Sstevel@tonic-gate mdb_printf("%p in thread %p's stack%s\n",
43707c478bd9Sstevel@tonic-gate current, addr, stack_active(t, current));
43717c478bd9Sstevel@tonic-gate } else {
43727c478bd9Sstevel@tonic-gate mdb_printf("%#lr\n", addr);
43737c478bd9Sstevel@tonic-gate return (WALK_NEXT);
43747c478bd9Sstevel@tonic-gate }
43757c478bd9Sstevel@tonic-gate }
43767c478bd9Sstevel@tonic-gate }
43777c478bd9Sstevel@tonic-gate
43787c478bd9Sstevel@tonic-gate return (WALK_NEXT);
43797c478bd9Sstevel@tonic-gate }
43807c478bd9Sstevel@tonic-gate
43817c478bd9Sstevel@tonic-gate int
whatthread(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)43827c478bd9Sstevel@tonic-gate whatthread(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
43837c478bd9Sstevel@tonic-gate {
43847c478bd9Sstevel@tonic-gate whatthread_t w;
43857c478bd9Sstevel@tonic-gate
43867c478bd9Sstevel@tonic-gate if (!(flags & DCMD_ADDRSPEC))
43877c478bd9Sstevel@tonic-gate return (DCMD_USAGE);
43887c478bd9Sstevel@tonic-gate
43897c478bd9Sstevel@tonic-gate w.wt_verbose = FALSE;
43907c478bd9Sstevel@tonic-gate w.wt_target = addr;
43917c478bd9Sstevel@tonic-gate
43927c478bd9Sstevel@tonic-gate if (mdb_getopts(argc, argv,
43937c478bd9Sstevel@tonic-gate 'v', MDB_OPT_SETBITS, TRUE, &w.wt_verbose, NULL) != argc)
43947c478bd9Sstevel@tonic-gate return (DCMD_USAGE);
43957c478bd9Sstevel@tonic-gate
43967c478bd9Sstevel@tonic-gate if (mdb_walk("thread", (mdb_walk_cb_t)whatthread_walk_thread, &w)
43977c478bd9Sstevel@tonic-gate == -1) {
43987c478bd9Sstevel@tonic-gate mdb_warn("couldn't walk threads");
43997c478bd9Sstevel@tonic-gate return (DCMD_ERR);
44007c478bd9Sstevel@tonic-gate }
44017c478bd9Sstevel@tonic-gate
44027c478bd9Sstevel@tonic-gate return (DCMD_OK);
44037c478bd9Sstevel@tonic-gate }
4404