1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <mdb/mdb_param.h> 27 #include <mdb/mdb_modapi.h> 28 #include <mdb/mdb_ks.h> 29 #include <sys/types.h> 30 #include <sys/memlist.h> 31 #include <sys/swap.h> 32 #include <sys/systm.h> 33 #include <sys/thread.h> 34 #include <vm/anon.h> 35 #include <vm/as.h> 36 #include <vm/page.h> 37 #include <sys/thread.h> 38 #include <sys/swap.h> 39 #include <sys/memlist.h> 40 #include <sys/vnode.h> 41 #include <vm/seg_map.h> 42 #include <vm/seg_vn.h> 43 #if defined(__i386) || defined(__amd64) 44 #include <sys/balloon_impl.h> 45 #endif 46 47 #include "avl.h" 48 49 /* 50 * Page walker. 51 * By default, this will walk all pages in the system. If given an 52 * address, it will walk all pages belonging to the vnode at that 53 * address. 54 */ 55 56 /* 57 * page_walk_data 58 * 59 * pw_hashleft is set to -1 when walking a vnode's pages, and holds the 60 * number of hash locations remaining in the page hash table when 61 * walking all pages. 62 * 63 * The astute reader will notice that pw_hashloc is only used when 64 * reading all pages (to hold a pointer to our location in the page 65 * hash table), and that pw_first is only used when reading the pages 66 * belonging to a particular vnode (to hold a pointer to the first 67 * page). While these could be combined to be a single pointer, they 68 * are left separate for clarity. 69 */ 70 typedef struct page_walk_data { 71 long pw_hashleft; 72 void **pw_hashloc; 73 uintptr_t pw_first; 74 } page_walk_data_t; 75 76 int 77 page_walk_init(mdb_walk_state_t *wsp) 78 { 79 page_walk_data_t *pwd; 80 void **ptr; 81 size_t hashsz; 82 vnode_t vn; 83 84 if (wsp->walk_addr == NULL) { 85 86 /* 87 * Walk all pages 88 */ 89 90 if ((mdb_readvar(&ptr, "page_hash") == -1) || 91 (mdb_readvar(&hashsz, "page_hashsz") == -1) || 92 (ptr == NULL) || (hashsz == 0)) { 93 mdb_warn("page_hash, page_hashsz not found or invalid"); 94 return (WALK_ERR); 95 } 96 97 /* 98 * Since we are walking all pages, initialize hashleft 99 * to be the remaining number of entries in the page 100 * hash. hashloc is set the start of the page hash 101 * table. Setting the walk address to 0 indicates that 102 * we aren't currently following a hash chain, and that 103 * we need to scan the page hash table for a page. 104 */ 105 pwd = mdb_alloc(sizeof (page_walk_data_t), UM_SLEEP); 106 pwd->pw_hashleft = hashsz; 107 pwd->pw_hashloc = ptr; 108 wsp->walk_addr = 0; 109 } else { 110 111 /* 112 * Walk just this vnode 113 */ 114 115 if (mdb_vread(&vn, sizeof (vnode_t), wsp->walk_addr) == -1) { 116 mdb_warn("unable to read vnode_t at %#lx", 117 wsp->walk_addr); 118 return (WALK_ERR); 119 } 120 121 /* 122 * We set hashleft to -1 to indicate that we are 123 * walking a vnode, and initialize first to 0 (it is 124 * used to terminate the walk, so it must not be set 125 * until after we have walked the first page). The 126 * walk address is set to the first page. 127 */ 128 pwd = mdb_alloc(sizeof (page_walk_data_t), UM_SLEEP); 129 pwd->pw_hashleft = -1; 130 pwd->pw_first = 0; 131 132 wsp->walk_addr = (uintptr_t)vn.v_pages; 133 } 134 135 wsp->walk_data = pwd; 136 137 return (WALK_NEXT); 138 } 139 140 int 141 page_walk_step(mdb_walk_state_t *wsp) 142 { 143 page_walk_data_t *pwd = wsp->walk_data; 144 page_t page; 145 uintptr_t pp; 146 147 pp = wsp->walk_addr; 148 149 if (pwd->pw_hashleft < 0) { 150 151 /* We're walking a vnode's pages */ 152 153 /* 154 * If we don't have any pages to walk, we have come 155 * back around to the first one (we finished), or we 156 * can't read the page we're looking at, we are done. 157 */ 158 if (pp == NULL || pp == pwd->pw_first) 159 return (WALK_DONE); 160 if (mdb_vread(&page, sizeof (page_t), pp) == -1) { 161 mdb_warn("unable to read page_t at %#lx", pp); 162 return (WALK_ERR); 163 } 164 165 /* 166 * Set the walk address to the next page, and if the 167 * first page hasn't been set yet (i.e. we are on the 168 * first page), set it. 169 */ 170 wsp->walk_addr = (uintptr_t)page.p_vpnext; 171 if (pwd->pw_first == NULL) 172 pwd->pw_first = pp; 173 174 } else if (pwd->pw_hashleft > 0) { 175 176 /* We're walking all pages */ 177 178 /* 179 * If pp (the walk address) is NULL, we scan through 180 * the page hash table until we find a page. 181 */ 182 if (pp == NULL) { 183 184 /* 185 * Iterate through the page hash table until we 186 * find a page or reach the end. 187 */ 188 do { 189 if (mdb_vread(&pp, sizeof (uintptr_t), 190 (uintptr_t)pwd->pw_hashloc) == -1) { 191 mdb_warn("unable to read from %#p", 192 pwd->pw_hashloc); 193 return (WALK_ERR); 194 } 195 pwd->pw_hashleft--; 196 pwd->pw_hashloc++; 197 } while (pwd->pw_hashleft && (pp == NULL)); 198 199 /* 200 * We've reached the end; exit. 201 */ 202 if (pp == NULL) 203 return (WALK_DONE); 204 } 205 206 if (mdb_vread(&page, sizeof (page_t), pp) == -1) { 207 mdb_warn("unable to read page_t at %#lx", pp); 208 return (WALK_ERR); 209 } 210 211 /* 212 * Set the walk address to the next page. 213 */ 214 wsp->walk_addr = (uintptr_t)page.p_hash; 215 216 } else { 217 /* We've finished walking all pages. */ 218 return (WALK_DONE); 219 } 220 221 return (wsp->walk_callback(pp, &page, wsp->walk_cbdata)); 222 } 223 224 void 225 page_walk_fini(mdb_walk_state_t *wsp) 226 { 227 mdb_free(wsp->walk_data, sizeof (page_walk_data_t)); 228 } 229 230 /* 231 * allpages walks all pages in the system in order they appear in 232 * the memseg structure 233 */ 234 235 #define PAGE_BUFFER 128 236 237 int 238 allpages_walk_init(mdb_walk_state_t *wsp) 239 { 240 if (wsp->walk_addr != 0) { 241 mdb_warn("allpages only supports global walks.\n"); 242 return (WALK_ERR); 243 } 244 245 if (mdb_layered_walk("memseg", wsp) == -1) { 246 mdb_warn("couldn't walk 'memseg'"); 247 return (WALK_ERR); 248 } 249 250 wsp->walk_data = mdb_alloc(sizeof (page_t) * PAGE_BUFFER, UM_SLEEP); 251 return (WALK_NEXT); 252 } 253 254 int 255 allpages_walk_step(mdb_walk_state_t *wsp) 256 { 257 const struct memseg *msp = wsp->walk_layer; 258 page_t *buf = wsp->walk_data; 259 size_t pg_read, i; 260 size_t pg_num = msp->pages_end - msp->pages_base; 261 const page_t *pg_addr = msp->pages; 262 263 while (pg_num > 0) { 264 pg_read = MIN(pg_num, PAGE_BUFFER); 265 266 if (mdb_vread(buf, pg_read * sizeof (page_t), 267 (uintptr_t)pg_addr) == -1) { 268 mdb_warn("can't read page_t's at %#lx", pg_addr); 269 return (WALK_ERR); 270 } 271 for (i = 0; i < pg_read; i++) { 272 int ret = wsp->walk_callback((uintptr_t)&pg_addr[i], 273 &buf[i], wsp->walk_cbdata); 274 275 if (ret != WALK_NEXT) 276 return (ret); 277 } 278 pg_num -= pg_read; 279 pg_addr += pg_read; 280 } 281 282 return (WALK_NEXT); 283 } 284 285 void 286 allpages_walk_fini(mdb_walk_state_t *wsp) 287 { 288 mdb_free(wsp->walk_data, sizeof (page_t) * PAGE_BUFFER); 289 } 290 291 /* 292 * Hash table + LRU queue. 293 * This table is used to cache recently read vnodes for the memstat 294 * command, to reduce the number of mdb_vread calls. This greatly 295 * speeds the memstat command on on live, large CPU count systems. 296 */ 297 298 #define VN_SMALL 401 299 #define VN_LARGE 10007 300 #define VN_HTABLE_KEY(p, hp) ((p) % ((hp)->vn_htable_buckets)) 301 302 struct vn_htable_list { 303 uint_t vn_flag; /* v_flag from vnode */ 304 uintptr_t vn_ptr; /* pointer to vnode */ 305 struct vn_htable_list *vn_q_next; /* queue next pointer */ 306 struct vn_htable_list *vn_q_prev; /* queue prev pointer */ 307 struct vn_htable_list *vn_h_next; /* hash table pointer */ 308 }; 309 310 /* 311 * vn_q_first -> points to to head of queue: the vnode that was most 312 * recently used 313 * vn_q_last -> points to the oldest used vnode, and is freed once a new 314 * vnode is read. 315 * vn_htable -> hash table 316 * vn_htable_buf -> contains htable objects 317 * vn_htable_size -> total number of items in the hash table 318 * vn_htable_buckets -> number of buckets in the hash table 319 */ 320 typedef struct vn_htable { 321 struct vn_htable_list *vn_q_first; 322 struct vn_htable_list *vn_q_last; 323 struct vn_htable_list **vn_htable; 324 struct vn_htable_list *vn_htable_buf; 325 int vn_htable_size; 326 int vn_htable_buckets; 327 } vn_htable_t; 328 329 330 /* allocate memory, initilize hash table and LRU queue */ 331 static void 332 vn_htable_init(vn_htable_t *hp, size_t vn_size) 333 { 334 int i; 335 int htable_size = MAX(vn_size, VN_LARGE); 336 337 if ((hp->vn_htable_buf = mdb_zalloc(sizeof (struct vn_htable_list) 338 * htable_size, UM_NOSLEEP|UM_GC)) == NULL) { 339 htable_size = VN_SMALL; 340 hp->vn_htable_buf = mdb_zalloc(sizeof (struct vn_htable_list) 341 * htable_size, UM_SLEEP|UM_GC); 342 } 343 344 hp->vn_htable = mdb_zalloc(sizeof (struct vn_htable_list *) 345 * htable_size, UM_SLEEP|UM_GC); 346 347 hp->vn_q_first = &hp->vn_htable_buf[0]; 348 hp->vn_q_last = &hp->vn_htable_buf[htable_size - 1]; 349 hp->vn_q_first->vn_q_next = &hp->vn_htable_buf[1]; 350 hp->vn_q_last->vn_q_prev = &hp->vn_htable_buf[htable_size - 2]; 351 352 for (i = 1; i < (htable_size-1); i++) { 353 hp->vn_htable_buf[i].vn_q_next = &hp->vn_htable_buf[i + 1]; 354 hp->vn_htable_buf[i].vn_q_prev = &hp->vn_htable_buf[i - 1]; 355 } 356 357 hp->vn_htable_size = htable_size; 358 hp->vn_htable_buckets = htable_size; 359 } 360 361 362 /* 363 * Find the vnode whose address is ptr, and return its v_flag in vp->v_flag. 364 * The function tries to find needed information in the following order: 365 * 366 * 1. check if ptr is the first in queue 367 * 2. check if ptr is in hash table (if so move it to the top of queue) 368 * 3. do mdb_vread, remove last queue item from queue and hash table. 369 * Insert new information to freed object, and put this object in to the 370 * top of the queue. 371 */ 372 static int 373 vn_get(vn_htable_t *hp, struct vnode *vp, uintptr_t ptr) 374 { 375 int hkey; 376 struct vn_htable_list *hent, **htmp, *q_next, *q_prev; 377 struct vn_htable_list *q_first = hp->vn_q_first; 378 379 /* 1. vnode ptr is the first in queue, just get v_flag and return */ 380 if (q_first->vn_ptr == ptr) { 381 vp->v_flag = q_first->vn_flag; 382 383 return (0); 384 } 385 386 /* 2. search the hash table for this ptr */ 387 hkey = VN_HTABLE_KEY(ptr, hp); 388 hent = hp->vn_htable[hkey]; 389 while (hent && (hent->vn_ptr != ptr)) 390 hent = hent->vn_h_next; 391 392 /* 3. if hent is NULL, we did not find in hash table, do mdb_vread */ 393 if (hent == NULL) { 394 struct vnode vn; 395 396 if (mdb_vread(&vn, sizeof (vnode_t), ptr) == -1) { 397 mdb_warn("unable to read vnode_t at %#lx", ptr); 398 return (-1); 399 } 400 401 /* we will insert read data into the last element in queue */ 402 hent = hp->vn_q_last; 403 404 /* remove last hp->vn_q_last object from hash table */ 405 if (hent->vn_ptr) { 406 htmp = &hp->vn_htable[VN_HTABLE_KEY(hent->vn_ptr, hp)]; 407 while (*htmp != hent) 408 htmp = &(*htmp)->vn_h_next; 409 *htmp = hent->vn_h_next; 410 } 411 412 /* insert data into new free object */ 413 hent->vn_ptr = ptr; 414 hent->vn_flag = vn.v_flag; 415 416 /* insert new object into hash table */ 417 hent->vn_h_next = hp->vn_htable[hkey]; 418 hp->vn_htable[hkey] = hent; 419 } 420 421 /* Remove from queue. hent is not first, vn_q_prev is not NULL */ 422 q_next = hent->vn_q_next; 423 q_prev = hent->vn_q_prev; 424 if (q_next == NULL) 425 hp->vn_q_last = q_prev; 426 else 427 q_next->vn_q_prev = q_prev; 428 q_prev->vn_q_next = q_next; 429 430 /* Add to the front of queue */ 431 hent->vn_q_prev = NULL; 432 hent->vn_q_next = q_first; 433 q_first->vn_q_prev = hent; 434 hp->vn_q_first = hent; 435 436 /* Set v_flag in vnode pointer from hent */ 437 vp->v_flag = hent->vn_flag; 438 439 return (0); 440 } 441 442 /* Summary statistics of pages */ 443 typedef struct memstat { 444 struct vnode *ms_kvp; /* Cached address of kernel vnode */ 445 struct vnode *ms_unused_vp; /* Unused pages vnode pointer */ 446 struct vnode *ms_zvp; /* Cached address of zio vnode */ 447 uint64_t ms_kmem; /* Pages of kernel memory */ 448 uint64_t ms_zfs_data; /* Pages of zfs data */ 449 uint64_t ms_anon; /* Pages of anonymous memory */ 450 uint64_t ms_vnode; /* Pages of named (vnode) memory */ 451 uint64_t ms_exec; /* Pages of exec/library memory */ 452 uint64_t ms_cachelist; /* Pages on the cachelist (free) */ 453 uint64_t ms_total; /* Pages on page hash */ 454 vn_htable_t *ms_vn_htable; /* Pointer to hash table */ 455 struct vnode ms_vn; /* vnode buffer */ 456 } memstat_t; 457 458 #define MS_PP_ISKAS(pp, stats) \ 459 ((pp)->p_vnode == (stats)->ms_kvp) 460 461 #define MS_PP_ISZFS_DATA(pp, stats) \ 462 (((stats)->ms_zvp != NULL) && ((pp)->p_vnode == (stats)->ms_zvp)) 463 464 /* 465 * Summarize pages by type and update stat information 466 */ 467 468 /* ARGSUSED */ 469 static int 470 memstat_callback(page_t *page, page_t *pp, memstat_t *stats) 471 { 472 struct vnode *vp = &stats->ms_vn; 473 474 if (pp->p_vnode == NULL || pp->p_vnode == stats->ms_unused_vp) 475 return (WALK_NEXT); 476 else if (MS_PP_ISKAS(pp, stats)) 477 stats->ms_kmem++; 478 else if (MS_PP_ISZFS_DATA(pp, stats)) 479 stats->ms_zfs_data++; 480 else if (PP_ISFREE(pp)) 481 stats->ms_cachelist++; 482 else if (vn_get(stats->ms_vn_htable, vp, (uintptr_t)pp->p_vnode)) 483 return (WALK_ERR); 484 else if (IS_SWAPFSVP(vp)) 485 stats->ms_anon++; 486 else if ((vp->v_flag & VVMEXEC) != 0) 487 stats->ms_exec++; 488 else 489 stats->ms_vnode++; 490 491 stats->ms_total++; 492 493 return (WALK_NEXT); 494 } 495 496 /* ARGSUSED */ 497 int 498 memstat(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 499 { 500 pgcnt_t total_pages, physmem; 501 ulong_t freemem; 502 memstat_t stats; 503 GElf_Sym sym; 504 vn_htable_t ht; 505 struct vnode *kvps; 506 uintptr_t vn_size = 0; 507 #if defined(__i386) || defined(__amd64) 508 bln_stats_t bln_stats; 509 ssize_t bln_size; 510 #endif 511 512 bzero(&stats, sizeof (memstat_t)); 513 514 /* 515 * -s size, is an internal option. It specifies the size of vn_htable. 516 * Hash table size is set in the following order: 517 * If user has specified the size that is larger than VN_LARGE: try it, 518 * but if malloc failed default to VN_SMALL. Otherwise try VN_LARGE, if 519 * failed to allocate default to VN_SMALL. 520 * For a better efficiency of hash table it is highly recommended to 521 * set size to a prime number. 522 */ 523 if ((flags & DCMD_ADDRSPEC) || mdb_getopts(argc, argv, 524 's', MDB_OPT_UINTPTR, &vn_size, NULL) != argc) 525 return (DCMD_USAGE); 526 527 /* Initialize vnode hash list and queue */ 528 vn_htable_init(&ht, vn_size); 529 stats.ms_vn_htable = &ht; 530 531 /* Total physical memory */ 532 if (mdb_readvar(&total_pages, "total_pages") == -1) { 533 mdb_warn("unable to read total_pages"); 534 return (DCMD_ERR); 535 } 536 537 /* Artificially limited memory */ 538 if (mdb_readvar(&physmem, "physmem") == -1) { 539 mdb_warn("unable to read physmem"); 540 return (DCMD_ERR); 541 } 542 543 /* read kernel vnode array pointer */ 544 if (mdb_lookup_by_obj(MDB_OBJ_EXEC, "kvps", 545 (GElf_Sym *)&sym) == -1) { 546 mdb_warn("unable to read kvps"); 547 return (DCMD_ERR); 548 } 549 kvps = (struct vnode *)(uintptr_t)sym.st_value; 550 stats.ms_kvp = &kvps[KV_KVP]; 551 552 /* 553 * Read the zio vnode pointer. 554 */ 555 stats.ms_zvp = &kvps[KV_ZVP]; 556 557 /* 558 * If physmem != total_pages, then the administrator has limited the 559 * number of pages available in the system. Excluded pages are 560 * associated with the unused pages vnode. Read this vnode so the 561 * pages can be excluded in the page accounting. 562 */ 563 if (mdb_lookup_by_obj(MDB_OBJ_EXEC, "unused_pages_vp", 564 (GElf_Sym *)&sym) == -1) { 565 mdb_warn("unable to read unused_pages_vp"); 566 return (DCMD_ERR); 567 } 568 stats.ms_unused_vp = (struct vnode *)(uintptr_t)sym.st_value; 569 570 /* walk all pages, collect statistics */ 571 if (mdb_walk("allpages", (mdb_walk_cb_t)memstat_callback, 572 &stats) == -1) { 573 mdb_warn("can't walk memseg"); 574 return (DCMD_ERR); 575 } 576 577 #define MS_PCT_TOTAL(x) ((ulong_t)((((5 * total_pages) + ((x) * 1000ull))) / \ 578 ((physmem) * 10))) 579 580 mdb_printf("Page Summary Pages MB" 581 " %%Tot\n"); 582 mdb_printf("------------ ---------------- ----------------" 583 " ----\n"); 584 mdb_printf("Kernel %16llu %16llu %3lu%%\n", 585 stats.ms_kmem, 586 (uint64_t)stats.ms_kmem * PAGESIZE / (1024 * 1024), 587 MS_PCT_TOTAL(stats.ms_kmem)); 588 589 if (stats.ms_zfs_data != 0) 590 mdb_printf("ZFS File Data %16llu %16llu %3lu%%\n", 591 stats.ms_zfs_data, 592 (uint64_t)stats.ms_zfs_data * PAGESIZE / (1024 * 1024), 593 MS_PCT_TOTAL(stats.ms_zfs_data)); 594 595 mdb_printf("Anon %16llu %16llu %3lu%%\n", 596 stats.ms_anon, 597 (uint64_t)stats.ms_anon * PAGESIZE / (1024 * 1024), 598 MS_PCT_TOTAL(stats.ms_anon)); 599 mdb_printf("Exec and libs %16llu %16llu %3lu%%\n", 600 stats.ms_exec, 601 (uint64_t)stats.ms_exec * PAGESIZE / (1024 * 1024), 602 MS_PCT_TOTAL(stats.ms_exec)); 603 mdb_printf("Page cache %16llu %16llu %3lu%%\n", 604 stats.ms_vnode, 605 (uint64_t)stats.ms_vnode * PAGESIZE / (1024 * 1024), 606 MS_PCT_TOTAL(stats.ms_vnode)); 607 mdb_printf("Free (cachelist) %16llu %16llu %3lu%%\n", 608 stats.ms_cachelist, 609 (uint64_t)stats.ms_cachelist * PAGESIZE / (1024 * 1024), 610 MS_PCT_TOTAL(stats.ms_cachelist)); 611 612 /* 613 * occasionally, we double count pages above. To avoid printing 614 * absurdly large values for freemem, we clamp it at zero. 615 */ 616 if (physmem > stats.ms_total) 617 freemem = physmem - stats.ms_total; 618 else 619 freemem = 0; 620 621 #if defined(__i386) || defined(__amd64) 622 /* Are we running under Xen? If so, get balloon memory usage. */ 623 if ((bln_size = mdb_readvar(&bln_stats, "bln_stats")) != -1) { 624 if (freemem > bln_stats.bln_hv_pages) 625 freemem -= bln_stats.bln_hv_pages; 626 else 627 freemem = 0; 628 } 629 #endif 630 631 mdb_printf("Free (freelist) %16lu %16llu %3lu%%\n", freemem, 632 (uint64_t)freemem * PAGESIZE / (1024 * 1024), 633 MS_PCT_TOTAL(freemem)); 634 635 #if defined(__i386) || defined(__amd64) 636 if (bln_size != -1) { 637 mdb_printf("Balloon %16lu %16llu %3lu%%\n", 638 bln_stats.bln_hv_pages, 639 (uint64_t)bln_stats.bln_hv_pages * PAGESIZE / (1024 * 1024), 640 MS_PCT_TOTAL(bln_stats.bln_hv_pages)); 641 } 642 #endif 643 644 mdb_printf("\nTotal %16lu %16lu\n", 645 physmem, 646 (uint64_t)physmem * PAGESIZE / (1024 * 1024)); 647 648 if (physmem != total_pages) { 649 mdb_printf("Physical %16lu %16lu\n", 650 total_pages, 651 (uint64_t)total_pages * PAGESIZE / (1024 * 1024)); 652 } 653 654 #undef MS_PCT_TOTAL 655 656 return (DCMD_OK); 657 } 658 659 void 660 pagelookup_help(void) 661 { 662 mdb_printf( 663 "Finds the page with name { %<b>vp%</b>, %<b>offset%</b> }.\n" 664 "\n" 665 "Can be invoked three different ways:\n\n" 666 " ::pagelookup -v %<b>vp%</b> -o %<b>offset%</b>\n" 667 " %<b>vp%</b>::pagelookup -o %<b>offset%</b>\n" 668 " %<b>offset%</b>::pagelookup -v %<b>vp%</b>\n" 669 "\n" 670 "The latter two forms are useful in pipelines.\n"); 671 } 672 673 int 674 pagelookup(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 675 { 676 uintptr_t vp = -(uintptr_t)1; 677 uint64_t offset = -(uint64_t)1; 678 679 uintptr_t pageaddr; 680 int hasaddr = (flags & DCMD_ADDRSPEC); 681 int usedaddr = 0; 682 683 if (mdb_getopts(argc, argv, 684 'v', MDB_OPT_UINTPTR, &vp, 685 'o', MDB_OPT_UINT64, &offset, 686 0) != argc) { 687 return (DCMD_USAGE); 688 } 689 690 if (vp == -(uintptr_t)1) { 691 if (offset == -(uint64_t)1) { 692 mdb_warn( 693 "pagelookup: at least one of -v vp or -o offset " 694 "required.\n"); 695 return (DCMD_USAGE); 696 } 697 vp = addr; 698 usedaddr = 1; 699 } else if (offset == -(uint64_t)1) { 700 offset = mdb_get_dot(); 701 usedaddr = 1; 702 } 703 if (usedaddr && !hasaddr) { 704 mdb_warn("pagelookup: address required\n"); 705 return (DCMD_USAGE); 706 } 707 if (!usedaddr && hasaddr) { 708 mdb_warn( 709 "pagelookup: address specified when both -v and -o were " 710 "passed"); 711 return (DCMD_USAGE); 712 } 713 714 pageaddr = mdb_page_lookup(vp, offset); 715 if (pageaddr == 0) { 716 mdb_warn("pagelookup: no page for {vp = %p, offset = %llp)\n", 717 vp, offset); 718 return (DCMD_OK); 719 } 720 mdb_printf("%#lr\n", pageaddr); /* this is PIPE_OUT friendly */ 721 return (DCMD_OK); 722 } 723 724 /*ARGSUSED*/ 725 int 726 page_num2pp(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 727 { 728 uintptr_t pp; 729 730 if (argc != 0 || !(flags & DCMD_ADDRSPEC)) { 731 return (DCMD_USAGE); 732 } 733 734 pp = mdb_pfn2page((pfn_t)addr); 735 if (pp == 0) { 736 return (DCMD_ERR); 737 } 738 739 if (flags & DCMD_PIPE_OUT) { 740 mdb_printf("%#lr\n", pp); 741 } else { 742 mdb_printf("%lx has page_t at %#lx\n", (pfn_t)addr, pp); 743 } 744 745 return (DCMD_OK); 746 } 747 748 int 749 page(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 750 { 751 page_t p; 752 753 if (!(flags & DCMD_ADDRSPEC)) { 754 if (mdb_walk_dcmd("page", "page", argc, argv) == -1) { 755 mdb_warn("can't walk pages"); 756 return (DCMD_ERR); 757 } 758 return (DCMD_OK); 759 } 760 761 if (DCMD_HDRSPEC(flags)) { 762 mdb_printf("%<u>%?s %?s %16s %8s %3s %3s %2s %2s %2s%</u>\n", 763 "PAGE", "VNODE", "OFFSET", "SELOCK", 764 "LCT", "COW", "IO", "FS", "ST"); 765 } 766 767 if (mdb_vread(&p, sizeof (page_t), addr) == -1) { 768 mdb_warn("can't read page_t at %#lx", addr); 769 return (DCMD_ERR); 770 } 771 772 mdb_printf("%0?lx %?p %16llx %8x %3d %3d %2x %2x %2x\n", 773 addr, p.p_vnode, p.p_offset, p.p_selock, p.p_lckcnt, p.p_cowcnt, 774 p.p_iolock_state, p.p_fsdata, p.p_state); 775 776 return (DCMD_OK); 777 } 778 779 int 780 swap_walk_init(mdb_walk_state_t *wsp) 781 { 782 void *ptr; 783 784 if ((mdb_readvar(&ptr, "swapinfo") == -1) || ptr == NULL) { 785 mdb_warn("swapinfo not found or invalid"); 786 return (WALK_ERR); 787 } 788 789 wsp->walk_addr = (uintptr_t)ptr; 790 791 return (WALK_NEXT); 792 } 793 794 int 795 swap_walk_step(mdb_walk_state_t *wsp) 796 { 797 uintptr_t sip; 798 struct swapinfo si; 799 800 sip = wsp->walk_addr; 801 802 if (sip == NULL) 803 return (WALK_DONE); 804 805 if (mdb_vread(&si, sizeof (struct swapinfo), sip) == -1) { 806 mdb_warn("unable to read swapinfo at %#lx", sip); 807 return (WALK_ERR); 808 } 809 810 wsp->walk_addr = (uintptr_t)si.si_next; 811 812 return (wsp->walk_callback(sip, &si, wsp->walk_cbdata)); 813 } 814 815 int 816 swapinfof(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 817 { 818 struct swapinfo si; 819 char *name; 820 821 if (!(flags & DCMD_ADDRSPEC)) { 822 if (mdb_walk_dcmd("swapinfo", "swapinfo", argc, argv) == -1) { 823 mdb_warn("can't walk swapinfo"); 824 return (DCMD_ERR); 825 } 826 return (DCMD_OK); 827 } 828 829 if (DCMD_HDRSPEC(flags)) { 830 mdb_printf("%<u>%?s %?s %9s %9s %s%</u>\n", 831 "ADDR", "VNODE", "PAGES", "FREE", "NAME"); 832 } 833 834 if (mdb_vread(&si, sizeof (struct swapinfo), addr) == -1) { 835 mdb_warn("can't read swapinfo at %#lx", addr); 836 return (DCMD_ERR); 837 } 838 839 name = mdb_alloc(si.si_pnamelen, UM_SLEEP | UM_GC); 840 if (mdb_vread(name, si.si_pnamelen, (uintptr_t)si.si_pname) == -1) 841 name = "*error*"; 842 843 mdb_printf("%0?lx %?p %9d %9d %s\n", 844 addr, si.si_vp, si.si_npgs, si.si_nfpgs, name); 845 846 return (DCMD_OK); 847 } 848 849 int 850 memlist_walk_step(mdb_walk_state_t *wsp) 851 { 852 uintptr_t mlp; 853 struct memlist ml; 854 855 mlp = wsp->walk_addr; 856 857 if (mlp == NULL) 858 return (WALK_DONE); 859 860 if (mdb_vread(&ml, sizeof (struct memlist), mlp) == -1) { 861 mdb_warn("unable to read memlist at %#lx", mlp); 862 return (WALK_ERR); 863 } 864 865 wsp->walk_addr = (uintptr_t)ml.ml_next; 866 867 return (wsp->walk_callback(mlp, &ml, wsp->walk_cbdata)); 868 } 869 870 int 871 memlist(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 872 { 873 struct memlist ml; 874 875 if (!(flags & DCMD_ADDRSPEC)) { 876 uintptr_t ptr; 877 uint_t list = 0; 878 int i; 879 static const char *lists[] = { 880 "phys_install", 881 "phys_avail", 882 "virt_avail" 883 }; 884 885 if (mdb_getopts(argc, argv, 886 'i', MDB_OPT_SETBITS, (1 << 0), &list, 887 'a', MDB_OPT_SETBITS, (1 << 1), &list, 888 'v', MDB_OPT_SETBITS, (1 << 2), &list, NULL) != argc) 889 return (DCMD_USAGE); 890 891 if (!list) 892 list = 1; 893 894 for (i = 0; list; i++, list >>= 1) { 895 if (!(list & 1)) 896 continue; 897 if ((mdb_readvar(&ptr, lists[i]) == -1) || 898 (ptr == NULL)) { 899 mdb_warn("%s not found or invalid", lists[i]); 900 return (DCMD_ERR); 901 } 902 903 mdb_printf("%s:\n", lists[i]); 904 if (mdb_pwalk_dcmd("memlist", "memlist", 0, NULL, 905 ptr) == -1) { 906 mdb_warn("can't walk memlist"); 907 return (DCMD_ERR); 908 } 909 } 910 return (DCMD_OK); 911 } 912 913 if (DCMD_HDRSPEC(flags)) 914 mdb_printf("%<u>%?s %16s %16s%</u>\n", "ADDR", "BASE", "SIZE"); 915 916 if (mdb_vread(&ml, sizeof (struct memlist), addr) == -1) { 917 mdb_warn("can't read memlist at %#lx", addr); 918 return (DCMD_ERR); 919 } 920 921 mdb_printf("%0?lx %16llx %16llx\n", addr, ml.ml_address, ml.ml_size); 922 923 return (DCMD_OK); 924 } 925 926 int 927 seg_walk_init(mdb_walk_state_t *wsp) 928 { 929 if (wsp->walk_addr == NULL) { 930 mdb_warn("seg walk must begin at struct as *\n"); 931 return (WALK_ERR); 932 } 933 934 /* 935 * this is really just a wrapper to AVL tree walk 936 */ 937 wsp->walk_addr = (uintptr_t)&((struct as *)wsp->walk_addr)->a_segtree; 938 return (avl_walk_init(wsp)); 939 } 940 941 /*ARGSUSED*/ 942 int 943 seg(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 944 { 945 struct seg s; 946 947 if (argc != 0) 948 return (DCMD_USAGE); 949 950 if ((flags & DCMD_LOOPFIRST) || !(flags & DCMD_LOOP)) { 951 mdb_printf("%<u>%?s %?s %?s %?s %s%</u>\n", 952 "SEG", "BASE", "SIZE", "DATA", "OPS"); 953 } 954 955 if (mdb_vread(&s, sizeof (s), addr) == -1) { 956 mdb_warn("failed to read seg at %p", addr); 957 return (DCMD_ERR); 958 } 959 960 mdb_printf("%?p %?p %?lx %?p %a\n", 961 addr, s.s_base, s.s_size, s.s_data, s.s_ops); 962 963 return (DCMD_OK); 964 } 965 966 /*ARGSUSED*/ 967 static int 968 pmap_walk_anon(uintptr_t addr, const struct anon *anon, int *nres) 969 { 970 uintptr_t pp = 971 mdb_page_lookup((uintptr_t)anon->an_vp, (u_offset_t)anon->an_off); 972 973 if (pp != NULL) 974 (*nres)++; 975 976 return (WALK_NEXT); 977 } 978 979 static int 980 pmap_walk_seg(uintptr_t addr, const struct seg *seg, uintptr_t segvn) 981 { 982 983 mdb_printf("%0?p %0?p %7dk", addr, seg->s_base, seg->s_size / 1024); 984 985 if (segvn == (uintptr_t)seg->s_ops) { 986 struct segvn_data svn; 987 int nres = 0; 988 989 (void) mdb_vread(&svn, sizeof (svn), (uintptr_t)seg->s_data); 990 991 if (svn.amp == NULL) { 992 mdb_printf(" %8s", ""); 993 goto drive_on; 994 } 995 996 /* 997 * We've got an amp for this segment; walk through 998 * the amp, and determine mappings. 999 */ 1000 if (mdb_pwalk("anon", (mdb_walk_cb_t)pmap_walk_anon, 1001 &nres, (uintptr_t)svn.amp) == -1) 1002 mdb_warn("failed to walk anon (amp=%p)", svn.amp); 1003 1004 mdb_printf(" %7dk", (nres * PAGESIZE) / 1024); 1005 drive_on: 1006 1007 if (svn.vp != NULL) { 1008 char buf[29]; 1009 1010 mdb_vnode2path((uintptr_t)svn.vp, buf, sizeof (buf)); 1011 mdb_printf(" %s", buf); 1012 } else 1013 mdb_printf(" [ anon ]"); 1014 } 1015 1016 mdb_printf("\n"); 1017 return (WALK_NEXT); 1018 } 1019 1020 static int 1021 pmap_walk_seg_quick(uintptr_t addr, const struct seg *seg, uintptr_t segvn) 1022 { 1023 mdb_printf("%0?p %0?p %7dk", addr, seg->s_base, seg->s_size / 1024); 1024 1025 if (segvn == (uintptr_t)seg->s_ops) { 1026 struct segvn_data svn; 1027 1028 (void) mdb_vread(&svn, sizeof (svn), (uintptr_t)seg->s_data); 1029 1030 if (svn.vp != NULL) { 1031 mdb_printf(" %0?p", svn.vp); 1032 } else { 1033 mdb_printf(" [ anon ]"); 1034 } 1035 } 1036 1037 mdb_printf("\n"); 1038 return (WALK_NEXT); 1039 } 1040 1041 /*ARGSUSED*/ 1042 int 1043 pmap(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 1044 { 1045 uintptr_t segvn; 1046 proc_t proc; 1047 uint_t quick = FALSE; 1048 mdb_walk_cb_t cb = (mdb_walk_cb_t)pmap_walk_seg; 1049 1050 GElf_Sym sym; 1051 1052 if (!(flags & DCMD_ADDRSPEC)) 1053 return (DCMD_USAGE); 1054 1055 if (mdb_getopts(argc, argv, 1056 'q', MDB_OPT_SETBITS, TRUE, &quick, NULL) != argc) 1057 return (DCMD_USAGE); 1058 1059 if (mdb_vread(&proc, sizeof (proc), addr) == -1) { 1060 mdb_warn("failed to read proc at %p", addr); 1061 return (DCMD_ERR); 1062 } 1063 1064 if (mdb_lookup_by_name("segvn_ops", &sym) == 0) 1065 segvn = (uintptr_t)sym.st_value; 1066 else 1067 segvn = NULL; 1068 1069 mdb_printf("%?s %?s %8s ", "SEG", "BASE", "SIZE"); 1070 1071 if (quick) { 1072 mdb_printf("VNODE\n"); 1073 cb = (mdb_walk_cb_t)pmap_walk_seg_quick; 1074 } else { 1075 mdb_printf("%8s %s\n", "RES", "PATH"); 1076 } 1077 1078 if (mdb_pwalk("seg", cb, (void *)segvn, (uintptr_t)proc.p_as) == -1) { 1079 mdb_warn("failed to walk segments of as %p", proc.p_as); 1080 return (DCMD_ERR); 1081 } 1082 1083 return (DCMD_OK); 1084 } 1085 1086 typedef struct anon_walk_data { 1087 uintptr_t *aw_levone; 1088 uintptr_t *aw_levtwo; 1089 int aw_nlevone; 1090 int aw_levone_ndx; 1091 int aw_levtwo_ndx; 1092 struct anon_map aw_amp; 1093 struct anon_hdr aw_ahp; 1094 } anon_walk_data_t; 1095 1096 int 1097 anon_walk_init(mdb_walk_state_t *wsp) 1098 { 1099 anon_walk_data_t *aw; 1100 1101 if (wsp->walk_addr == NULL) { 1102 mdb_warn("anon walk doesn't support global walks\n"); 1103 return (WALK_ERR); 1104 } 1105 1106 aw = mdb_alloc(sizeof (anon_walk_data_t), UM_SLEEP); 1107 1108 if (mdb_vread(&aw->aw_amp, sizeof (aw->aw_amp), wsp->walk_addr) == -1) { 1109 mdb_warn("failed to read anon map at %p", wsp->walk_addr); 1110 mdb_free(aw, sizeof (anon_walk_data_t)); 1111 return (WALK_ERR); 1112 } 1113 1114 if (mdb_vread(&aw->aw_ahp, sizeof (aw->aw_ahp), 1115 (uintptr_t)(aw->aw_amp.ahp)) == -1) { 1116 mdb_warn("failed to read anon hdr ptr at %p", aw->aw_amp.ahp); 1117 mdb_free(aw, sizeof (anon_walk_data_t)); 1118 return (WALK_ERR); 1119 } 1120 1121 if (aw->aw_ahp.size <= ANON_CHUNK_SIZE || 1122 (aw->aw_ahp.flags & ANON_ALLOC_FORCE)) { 1123 aw->aw_nlevone = aw->aw_ahp.size; 1124 aw->aw_levtwo = NULL; 1125 } else { 1126 aw->aw_nlevone = 1127 (aw->aw_ahp.size + ANON_CHUNK_OFF) >> ANON_CHUNK_SHIFT; 1128 aw->aw_levtwo = 1129 mdb_zalloc(ANON_CHUNK_SIZE * sizeof (uintptr_t), UM_SLEEP); 1130 } 1131 1132 aw->aw_levone = 1133 mdb_alloc(aw->aw_nlevone * sizeof (uintptr_t), UM_SLEEP); 1134 1135 aw->aw_levone_ndx = 0; 1136 aw->aw_levtwo_ndx = 0; 1137 1138 mdb_vread(aw->aw_levone, aw->aw_nlevone * sizeof (uintptr_t), 1139 (uintptr_t)aw->aw_ahp.array_chunk); 1140 1141 if (aw->aw_levtwo != NULL) { 1142 while (aw->aw_levone[aw->aw_levone_ndx] == NULL) { 1143 aw->aw_levone_ndx++; 1144 if (aw->aw_levone_ndx == aw->aw_nlevone) { 1145 mdb_warn("corrupt anon; couldn't" 1146 "find ptr to lev two map"); 1147 goto out; 1148 } 1149 } 1150 1151 mdb_vread(aw->aw_levtwo, ANON_CHUNK_SIZE * sizeof (uintptr_t), 1152 aw->aw_levone[aw->aw_levone_ndx]); 1153 } 1154 1155 out: 1156 wsp->walk_data = aw; 1157 return (0); 1158 } 1159 1160 int 1161 anon_walk_step(mdb_walk_state_t *wsp) 1162 { 1163 int status; 1164 anon_walk_data_t *aw = (anon_walk_data_t *)wsp->walk_data; 1165 struct anon anon; 1166 uintptr_t anonptr; 1167 1168 again: 1169 /* 1170 * Once we've walked through level one, we're done. 1171 */ 1172 if (aw->aw_levone_ndx == aw->aw_nlevone) 1173 return (WALK_DONE); 1174 1175 if (aw->aw_levtwo == NULL) { 1176 anonptr = aw->aw_levone[aw->aw_levone_ndx]; 1177 aw->aw_levone_ndx++; 1178 } else { 1179 anonptr = aw->aw_levtwo[aw->aw_levtwo_ndx]; 1180 aw->aw_levtwo_ndx++; 1181 1182 if (aw->aw_levtwo_ndx == ANON_CHUNK_SIZE) { 1183 aw->aw_levtwo_ndx = 0; 1184 1185 do { 1186 aw->aw_levone_ndx++; 1187 1188 if (aw->aw_levone_ndx == aw->aw_nlevone) 1189 return (WALK_DONE); 1190 } while (aw->aw_levone[aw->aw_levone_ndx] == NULL); 1191 1192 mdb_vread(aw->aw_levtwo, ANON_CHUNK_SIZE * 1193 sizeof (uintptr_t), 1194 aw->aw_levone[aw->aw_levone_ndx]); 1195 } 1196 } 1197 1198 if (anonptr != NULL) { 1199 mdb_vread(&anon, sizeof (anon), anonptr); 1200 status = wsp->walk_callback(anonptr, &anon, wsp->walk_cbdata); 1201 } else 1202 goto again; 1203 1204 return (status); 1205 } 1206 1207 void 1208 anon_walk_fini(mdb_walk_state_t *wsp) 1209 { 1210 anon_walk_data_t *aw = (anon_walk_data_t *)wsp->walk_data; 1211 1212 if (aw->aw_levtwo != NULL) 1213 mdb_free(aw->aw_levtwo, ANON_CHUNK_SIZE * sizeof (uintptr_t)); 1214 1215 mdb_free(aw->aw_levone, aw->aw_nlevone * sizeof (uintptr_t)); 1216 mdb_free(aw, sizeof (anon_walk_data_t)); 1217 } 1218 1219 /* 1220 * Grumble, grumble. 1221 */ 1222 #define SMAP_HASHFUNC(vp, off) \ 1223 ((((uintptr_t)(vp) >> 6) + ((uintptr_t)(vp) >> 3) + \ 1224 ((off) >> MAXBSHIFT)) & smd_hashmsk) 1225 1226 int 1227 vnode2smap(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 1228 { 1229 long smd_hashmsk; 1230 int hash; 1231 uintptr_t offset = 0; 1232 struct smap smp; 1233 uintptr_t saddr, kaddr; 1234 uintptr_t smd_hash, smd_smap; 1235 struct seg seg; 1236 1237 if (!(flags & DCMD_ADDRSPEC)) 1238 return (DCMD_USAGE); 1239 1240 if (mdb_readvar(&smd_hashmsk, "smd_hashmsk") == -1) { 1241 mdb_warn("failed to read smd_hashmsk"); 1242 return (DCMD_ERR); 1243 } 1244 1245 if (mdb_readvar(&smd_hash, "smd_hash") == -1) { 1246 mdb_warn("failed to read smd_hash"); 1247 return (DCMD_ERR); 1248 } 1249 1250 if (mdb_readvar(&smd_smap, "smd_smap") == -1) { 1251 mdb_warn("failed to read smd_hash"); 1252 return (DCMD_ERR); 1253 } 1254 1255 if (mdb_readvar(&kaddr, "segkmap") == -1) { 1256 mdb_warn("failed to read segkmap"); 1257 return (DCMD_ERR); 1258 } 1259 1260 if (mdb_vread(&seg, sizeof (seg), kaddr) == -1) { 1261 mdb_warn("failed to read segkmap at %p", kaddr); 1262 return (DCMD_ERR); 1263 } 1264 1265 if (argc != 0) { 1266 const mdb_arg_t *arg = &argv[0]; 1267 1268 if (arg->a_type == MDB_TYPE_IMMEDIATE) 1269 offset = arg->a_un.a_val; 1270 else 1271 offset = (uintptr_t)mdb_strtoull(arg->a_un.a_str); 1272 } 1273 1274 hash = SMAP_HASHFUNC(addr, offset); 1275 1276 if (mdb_vread(&saddr, sizeof (saddr), 1277 smd_hash + hash * sizeof (uintptr_t)) == -1) { 1278 mdb_warn("couldn't read smap at %p", 1279 smd_hash + hash * sizeof (uintptr_t)); 1280 return (DCMD_ERR); 1281 } 1282 1283 do { 1284 if (mdb_vread(&smp, sizeof (smp), saddr) == -1) { 1285 mdb_warn("couldn't read smap at %p", saddr); 1286 return (DCMD_ERR); 1287 } 1288 1289 if ((uintptr_t)smp.sm_vp == addr && smp.sm_off == offset) { 1290 mdb_printf("vnode %p, offs %p is smap %p, vaddr %p\n", 1291 addr, offset, saddr, ((saddr - smd_smap) / 1292 sizeof (smp)) * MAXBSIZE + seg.s_base); 1293 return (DCMD_OK); 1294 } 1295 1296 saddr = (uintptr_t)smp.sm_hash; 1297 } while (saddr != NULL); 1298 1299 mdb_printf("no smap for vnode %p, offs %p\n", addr, offset); 1300 return (DCMD_OK); 1301 } 1302 1303 /*ARGSUSED*/ 1304 int 1305 addr2smap(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 1306 { 1307 uintptr_t kaddr; 1308 struct seg seg; 1309 struct segmap_data sd; 1310 1311 if (!(flags & DCMD_ADDRSPEC)) 1312 return (DCMD_USAGE); 1313 1314 if (mdb_readvar(&kaddr, "segkmap") == -1) { 1315 mdb_warn("failed to read segkmap"); 1316 return (DCMD_ERR); 1317 } 1318 1319 if (mdb_vread(&seg, sizeof (seg), kaddr) == -1) { 1320 mdb_warn("failed to read segkmap at %p", kaddr); 1321 return (DCMD_ERR); 1322 } 1323 1324 if (mdb_vread(&sd, sizeof (sd), (uintptr_t)seg.s_data) == -1) { 1325 mdb_warn("failed to read segmap_data at %p", seg.s_data); 1326 return (DCMD_ERR); 1327 } 1328 1329 mdb_printf("%p is smap %p\n", addr, 1330 ((addr - (uintptr_t)seg.s_base) >> MAXBSHIFT) * 1331 sizeof (struct smap) + (uintptr_t)sd.smd_sm); 1332 1333 return (DCMD_OK); 1334 } 1335