1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <stdio.h> 30 #include <stdio_ext.h> 31 #include <stdlib.h> 32 #include <unistd.h> 33 #include <ctype.h> 34 #include <fcntl.h> 35 #include <string.h> 36 #include <dirent.h> 37 #include <limits.h> 38 #include <link.h> 39 #include <libelf.h> 40 #include <sys/types.h> 41 #include <signal.h> 42 #include <sys/stat.h> 43 #include <sys/mkdev.h> 44 #include <sys/mman.h> 45 #include <sys/lgrp_user.h> 46 #include <libproc.h> 47 #include <libzonecfg.h> 48 49 #define KILOBYTE 1024 50 #define MEGABYTE (KILOBYTE * KILOBYTE) 51 #define GIGABYTE (KILOBYTE * KILOBYTE * KILOBYTE) 52 53 /* 54 * Round up the value to the nearest kilobyte 55 */ 56 #define ROUNDUP_KB(x) (((x) + (KILOBYTE - 1)) / KILOBYTE) 57 58 /* 59 * The alignment should be a power of 2. 60 */ 61 #define P2ALIGN(x, align) ((x) & -(align)) 62 63 #define INVALID_ADDRESS (uintptr_t)(-1) 64 65 struct totals { 66 ulong_t total_size; 67 ulong_t total_swap; 68 ulong_t total_rss; 69 ulong_t total_anon; 70 ulong_t total_locked; 71 }; 72 73 /* 74 * -L option requires per-page information. The information is presented in an 75 * array of page_descr structures. 76 */ 77 typedef struct page_descr { 78 uintptr_t pd_start; /* start address of a page */ 79 size_t pd_pagesize; /* page size in bytes */ 80 lgrp_id_t pd_lgrp; /* lgroup of memory backing the page */ 81 int pd_valid; /* valid page description if non-zero */ 82 } page_descr_t; 83 84 /* 85 * Per-page information for a memory chunk. 86 * The meminfo(2) system call accepts up to MAX_MEMINFO_CNT pages at once. 87 * When we need to scan larger ranges we divide them in MAX_MEMINFO_CNT sized 88 * chunks. The chunk information is stored in the memory_chunk structure. 89 */ 90 typedef struct memory_chunk { 91 page_descr_t page_info[MAX_MEMINFO_CNT]; 92 uintptr_t end_addr; 93 uintptr_t chunk_start; /* Starting address */ 94 uintptr_t chunk_end; /* chunk_end is always <= end_addr */ 95 size_t page_size; 96 int page_index; /* Current page */ 97 int page_count; /* Number of pages */ 98 } memory_chunk_t; 99 100 static volatile int interrupt; 101 102 typedef int proc_xmap_f(void *, const prxmap_t *, const char *, int, int); 103 104 static int xmapping_iter(struct ps_prochandle *, proc_xmap_f *, void *, 105 int); 106 static int rmapping_iter(struct ps_prochandle *, proc_map_f *, void *); 107 108 static int look_map(void *, const prmap_t *, const char *); 109 static int look_smap(void *, const prxmap_t *, const char *, int, int); 110 static int look_xmap(void *, const prxmap_t *, const char *, int, int); 111 static int look_xmap_nopgsz(void *, const prxmap_t *, const char *, 112 int, int); 113 114 static int gather_map(void *, const prmap_t *, const char *); 115 static int gather_xmap(void *, const prxmap_t *, const char *, int, int); 116 static int iter_map(proc_map_f *, void *); 117 static int iter_xmap(proc_xmap_f *, void *); 118 static int parse_addr_range(char *, uintptr_t *, uintptr_t *); 119 static void mem_chunk_init(memory_chunk_t *, uintptr_t, size_t); 120 121 static int perr(char *); 122 static void printK(long, int); 123 static char *mflags(uint_t); 124 125 static size_t get_contiguous_region(memory_chunk_t *, uintptr_t, 126 uintptr_t, size_t, lgrp_id_t *); 127 static void mem_chunk_get(memory_chunk_t *, uintptr_t); 128 static lgrp_id_t addr_to_lgrp(memory_chunk_t *, uintptr_t, size_t *); 129 static char *lgrp2str(lgrp_id_t); 130 131 static int address_in_range(uintptr_t, uintptr_t, size_t); 132 static size_t adjust_addr_range(uintptr_t, uintptr_t, size_t, 133 uintptr_t *, uintptr_t *); 134 135 static int lflag = 0; 136 static int Lflag = 0; 137 static int aflag = 0; 138 139 /* 140 * The -A address range is represented as a pair of addresses 141 * <start_addr, end_addr>. Either one of these may be unspecified (set to 142 * INVALID_ADDRESS). If both are unspecified, no address range restrictions are 143 * in place. 144 */ 145 static uintptr_t start_addr = INVALID_ADDRESS; 146 static uintptr_t end_addr = INVALID_ADDRESS; 147 148 static int addr_width, size_width; 149 static char *command; 150 static char *procname; 151 static struct ps_prochandle *Pr; 152 153 static void intr(int); 154 155 typedef struct lwpstack { 156 lwpid_t lwps_lwpid; 157 stack_t lwps_stack; 158 } lwpstack_t; 159 160 typedef struct { 161 prxmap_t md_xmap; 162 prmap_t md_map; 163 char *md_objname; 164 boolean_t md_last; 165 int md_doswap; 166 } mapdata_t; 167 168 static mapdata_t *maps; 169 static int map_count; 170 static int map_alloc; 171 172 static lwpstack_t *stacks = NULL; 173 static uint_t nstacks = 0; 174 175 #define MAX_TRIES 5 176 177 static int 178 getstack(void *data, const lwpstatus_t *lsp) 179 { 180 int *np = (int *)data; 181 182 if (Plwp_alt_stack(Pr, lsp->pr_lwpid, &stacks[*np].lwps_stack) == 0) { 183 stacks[*np].lwps_stack.ss_flags |= SS_ONSTACK; 184 stacks[*np].lwps_lwpid = lsp->pr_lwpid; 185 (*np)++; 186 } 187 188 if (Plwp_main_stack(Pr, lsp->pr_lwpid, &stacks[*np].lwps_stack) == 0) { 189 stacks[*np].lwps_lwpid = lsp->pr_lwpid; 190 (*np)++; 191 } 192 193 return (0); 194 } 195 196 /* 197 * We compare the high memory addresses since stacks are faulted in from 198 * high memory addresses to low memory addresses, and our prmap_t 199 * structures identify only the range of addresses that have been faulted 200 * in so far. 201 */ 202 static int 203 cmpstacks(const void *ap, const void *bp) 204 { 205 const lwpstack_t *as = ap; 206 const lwpstack_t *bs = bp; 207 uintptr_t a = (uintptr_t)as->lwps_stack.ss_sp + as->lwps_stack.ss_size; 208 uintptr_t b = (uintptr_t)bs->lwps_stack.ss_sp + bs->lwps_stack.ss_size; 209 210 if (a < b) 211 return (1); 212 if (a > b) 213 return (-1); 214 return (0); 215 } 216 217 218 int 219 main(int argc, char **argv) 220 { 221 int rflag = 0, sflag = 0, xflag = 0, Fflag = 0; 222 int errflg = 0, Sflag = 0; 223 int rc = 0; 224 int opt; 225 const char *bar8 = "-------"; 226 const char *bar16 = "----------"; 227 const char *bar; 228 struct rlimit rlim; 229 struct stat64 statbuf; 230 char buf[128]; 231 int mapfd; 232 int prg_gflags = PGRAB_RDONLY; 233 int prr_flags = 0; 234 boolean_t use_agent_lwp = B_FALSE; 235 236 if ((command = strrchr(argv[0], '/')) != NULL) 237 command++; 238 else 239 command = argv[0]; 240 241 while ((opt = getopt(argc, argv, "arsxSlLFA:")) != EOF) { 242 switch (opt) { 243 case 'a': /* include shared mappings in -[xS] */ 244 aflag = 1; 245 break; 246 case 'r': /* show reserved mappings */ 247 rflag = 1; 248 break; 249 case 's': /* show hardware page sizes */ 250 sflag = 1; 251 break; 252 case 'S': /* show swap reservations */ 253 Sflag = 1; 254 break; 255 case 'x': /* show extended mappings */ 256 xflag = 1; 257 break; 258 case 'l': /* show unresolved link map names */ 259 lflag = 1; 260 break; 261 case 'L': /* show lgroup information */ 262 Lflag = 1; 263 use_agent_lwp = B_TRUE; 264 break; 265 case 'F': /* force grabbing (no O_EXCL) */ 266 Fflag = PGRAB_FORCE; 267 break; 268 case 'A': 269 if (parse_addr_range(optarg, &start_addr, &end_addr) 270 != 0) 271 errflg++; 272 break; 273 default: 274 errflg = 1; 275 break; 276 } 277 } 278 279 argc -= optind; 280 argv += optind; 281 282 if ((Sflag && (xflag || rflag || sflag)) || (xflag && rflag) || 283 (aflag && (!xflag && !Sflag)) || 284 (Lflag && (xflag || Sflag))) { 285 errflg = 1; 286 } 287 288 if (errflg || argc <= 0) { 289 (void) fprintf(stderr, 290 "usage:\t%s [-rslF] [-A start[,end]] { pid | core } ...\n", 291 command); 292 (void) fprintf(stderr, 293 "\t\t(report process address maps)\n"); 294 (void) fprintf(stderr, 295 "\t%s -L [-rslF] [-A start[,end]] pid ...\n", command); 296 (void) fprintf(stderr, 297 "\t\t(report process address maps lgroups mappings)\n"); 298 (void) fprintf(stderr, 299 "\t%s -x [-aslF] [-A start[,end]] pid ...\n", command); 300 (void) fprintf(stderr, 301 "\t\t(show resident/anon/locked mapping details)\n"); 302 (void) fprintf(stderr, 303 "\t%s -S [-alF] [-A start[,end]] { pid | core } ...\n", 304 command); 305 (void) fprintf(stderr, 306 "\t\t(show swap reservations)\n\n"); 307 (void) fprintf(stderr, 308 "\t-a: include shared mappings in -[xS] summary\n"); 309 (void) fprintf(stderr, 310 "\t-r: show reserved address maps\n"); 311 (void) fprintf(stderr, 312 "\t-s: show hardware page sizes\n"); 313 (void) fprintf(stderr, 314 "\t-l: show unresolved dynamic linker map names\n"); 315 (void) fprintf(stderr, 316 "\t-F: force grabbing of the target process\n"); 317 (void) fprintf(stderr, 318 "\t-L: show lgroup mappings\n"); 319 (void) fprintf(stderr, 320 "\t-A start,end: limit output to the specified range\n"); 321 return (2); 322 } 323 324 /* 325 * Make sure we'll have enough file descriptors to handle a target 326 * that has many many mappings. 327 */ 328 if (getrlimit(RLIMIT_NOFILE, &rlim) == 0) { 329 rlim.rlim_cur = rlim.rlim_max; 330 (void) setrlimit(RLIMIT_NOFILE, &rlim); 331 (void) enable_extended_FILE_stdio(-1, -1); 332 } 333 334 /* 335 * The implementation of -L option creates an agent LWP in the target 336 * process address space. The agent LWP issues meminfo(2) system calls 337 * on behalf of the target process. If we are interrupted prematurely, 338 * the target process remains in the stopped state with the agent still 339 * attached to it. To prevent such situation we catch signals from 340 * terminal and terminate gracefully. 341 */ 342 if (use_agent_lwp) { 343 /* 344 * Buffer output to stdout, stderr while process is grabbed. 345 * Prevents infamous deadlocks due to pmap `pgrep xterm` and 346 * other variants. 347 */ 348 (void) proc_initstdio(); 349 350 prg_gflags = PGRAB_RETAIN | Fflag; 351 prr_flags = PRELEASE_RETAIN; 352 353 if (sigset(SIGHUP, SIG_IGN) == SIG_DFL) 354 (void) sigset(SIGHUP, intr); 355 if (sigset(SIGINT, SIG_IGN) == SIG_DFL) 356 (void) sigset(SIGINT, intr); 357 if (sigset(SIGQUIT, SIG_IGN) == SIG_DFL) 358 (void) sigset(SIGQUIT, intr); 359 (void) sigset(SIGPIPE, intr); 360 (void) sigset(SIGTERM, intr); 361 } 362 363 while (argc-- > 0) { 364 char *arg; 365 int gcode; 366 psinfo_t psinfo; 367 int tries = 0; 368 369 if (use_agent_lwp) 370 (void) proc_flushstdio(); 371 372 if ((Pr = proc_arg_grab(arg = *argv++, PR_ARG_ANY, 373 prg_gflags, &gcode)) == NULL) { 374 (void) fprintf(stderr, "%s: cannot examine %s: %s\n", 375 command, arg, Pgrab_error(gcode)); 376 rc++; 377 continue; 378 } 379 380 procname = arg; /* for perr() */ 381 382 addr_width = (Pstatus(Pr)->pr_dmodel == PR_MODEL_LP64) ? 16 : 8; 383 size_width = (Pstatus(Pr)->pr_dmodel == PR_MODEL_LP64) ? 11 : 8; 384 bar = addr_width == 8 ? bar8 : bar16; 385 (void) memcpy(&psinfo, Ppsinfo(Pr), sizeof (psinfo_t)); 386 proc_unctrl_psinfo(&psinfo); 387 388 if (Pstate(Pr) != PS_DEAD) { 389 (void) snprintf(buf, sizeof (buf), 390 "/proc/%d/map", (int)psinfo.pr_pid); 391 if ((mapfd = open(buf, O_RDONLY)) < 0) { 392 (void) fprintf(stderr, "%s: cannot " 393 "examine %s: lost control of " 394 "process\n", command, arg); 395 rc++; 396 Prelease(Pr, prr_flags); 397 continue; 398 } 399 } else { 400 mapfd = -1; 401 } 402 403 again: 404 map_count = 0; 405 406 if (Pstate(Pr) == PS_DEAD) { 407 (void) printf("core '%s' of %d:\t%.70s\n", 408 arg, (int)psinfo.pr_pid, psinfo.pr_psargs); 409 410 if (rflag || sflag || xflag || Sflag || Lflag) { 411 (void) printf(" -%c option is not compatible " 412 "with core files\n", xflag ? 'x' : 413 sflag ? 's' : rflag ? 'r' : 414 Lflag ? 'L' : 'S'); 415 Prelease(Pr, prr_flags); 416 rc++; 417 continue; 418 } 419 420 } else { 421 (void) printf("%d:\t%.70s\n", 422 (int)psinfo.pr_pid, psinfo.pr_psargs); 423 } 424 425 if (!(Pstatus(Pr)->pr_flags & PR_ISSYS)) { 426 struct totals t; 427 428 /* 429 * Since we're grabbing the process readonly, we need 430 * to make sure the address space doesn't change during 431 * execution. 432 */ 433 if (Pstate(Pr) != PS_DEAD) { 434 if (tries++ == MAX_TRIES) { 435 Prelease(Pr, prr_flags); 436 (void) close(mapfd); 437 (void) fprintf(stderr, "%s: cannot " 438 "examine %s: address space is " 439 "changing\n", command, arg); 440 continue; 441 } 442 443 if (fstat64(mapfd, &statbuf) != 0) { 444 Prelease(Pr, prr_flags); 445 (void) close(mapfd); 446 (void) fprintf(stderr, "%s: cannot " 447 "examine %s: lost control of " 448 "process\n", command, arg); 449 continue; 450 } 451 } 452 453 nstacks = psinfo.pr_nlwp * 2; 454 stacks = calloc(nstacks, sizeof (stacks[0])); 455 if (stacks != NULL) { 456 int n = 0; 457 (void) Plwp_iter(Pr, getstack, &n); 458 qsort(stacks, nstacks, sizeof (stacks[0]), 459 cmpstacks); 460 } 461 462 (void) memset(&t, 0, sizeof (t)); 463 464 if (Pgetauxval(Pr, AT_BASE) != -1L && 465 Prd_agent(Pr) == NULL) { 466 (void) fprintf(stderr, "%s: warning: " 467 "librtld_db failed to initialize; " 468 "shared library information will not be " 469 "available\n", command); 470 } 471 472 /* 473 * Gather data 474 */ 475 if (xflag) 476 rc += xmapping_iter(Pr, gather_xmap, NULL, 0); 477 else if (Sflag) 478 rc += xmapping_iter(Pr, gather_xmap, NULL, 1); 479 else { 480 if (rflag) 481 rc += rmapping_iter(Pr, gather_map, 482 NULL); 483 else if (sflag) 484 rc += xmapping_iter(Pr, gather_xmap, 485 NULL, 0); 486 else 487 rc += Pmapping_iter(Pr, gather_map, 488 NULL); 489 } 490 491 /* 492 * Ensure mappings are consistent. 493 */ 494 if (Pstate(Pr) != PS_DEAD) { 495 struct stat64 newbuf; 496 497 if (fstat64(mapfd, &newbuf) != 0 || 498 memcmp(&newbuf.st_mtim, &statbuf.st_mtim, 499 sizeof (newbuf.st_mtim)) != 0) { 500 if (stacks != NULL) { 501 free(stacks); 502 stacks = NULL; 503 } 504 goto again; 505 } 506 } 507 508 /* 509 * Display data. 510 */ 511 if (xflag) { 512 (void) printf("%*s%*s%*s%*s%*s " 513 "%sMode Mapped File\n", 514 addr_width, "Address", 515 size_width, "Kbytes", 516 size_width, "RSS", 517 size_width, "Anon", 518 size_width, "Locked", 519 sflag ? "Pgsz " : ""); 520 521 rc += iter_xmap(sflag ? look_xmap : 522 look_xmap_nopgsz, &t); 523 524 (void) printf("%s%s %s %s %s %s\n", 525 addr_width == 8 ? "-" : "------", 526 bar, bar, bar, bar, bar); 527 528 (void) printf("%stotal Kb", addr_width == 16 ? 529 " " : ""); 530 531 printK(t.total_size, size_width); 532 printK(t.total_rss, size_width); 533 printK(t.total_anon, size_width); 534 printK(t.total_locked, size_width); 535 536 (void) printf("\n"); 537 538 } else if (Sflag) { 539 (void) printf("%*s%*s%*s Mode" 540 " Mapped File\n", 541 addr_width, "Address", 542 size_width, "Kbytes", 543 size_width, "Swap"); 544 545 rc += iter_xmap(look_xmap_nopgsz, &t); 546 547 (void) printf("%s%s %s %s\n", 548 addr_width == 8 ? "-" : "------", 549 bar, bar, bar); 550 551 (void) printf("%stotal Kb", addr_width == 16 ? 552 " " : ""); 553 554 printK(t.total_size, size_width); 555 printK(t.total_swap, size_width); 556 557 (void) printf("\n"); 558 559 } else { 560 561 if (rflag) { 562 rc += iter_map(look_map, &t); 563 } else if (sflag) { 564 if (Lflag) { 565 (void) printf("%*s %*s %4s" 566 " %-6s %s %s\n", 567 addr_width, "Address", 568 size_width, 569 "Bytes", "Pgsz", "Mode ", 570 "Lgrp", "Mapped File"); 571 rc += iter_xmap(look_smap, &t); 572 } else { 573 (void) printf("%*s %*s %4s" 574 " %-6s %s\n", 575 addr_width, "Address", 576 size_width, 577 "Bytes", "Pgsz", "Mode ", 578 "Mapped File"); 579 rc += iter_xmap(look_smap, &t); 580 } 581 } else { 582 rc += iter_map(look_map, &t); 583 } 584 585 (void) printf(" %stotal %*luK\n", 586 addr_width == 16 ? 587 " " : "", 588 size_width, t.total_size); 589 } 590 591 if (stacks != NULL) { 592 free(stacks); 593 stacks = NULL; 594 } 595 596 } 597 598 Prelease(Pr, prr_flags); 599 if (mapfd != -1) 600 (void) close(mapfd); 601 } 602 603 if (use_agent_lwp) 604 (void) proc_finistdio(); 605 606 return (rc); 607 } 608 609 static char * 610 make_name(struct ps_prochandle *Pr, uintptr_t addr, const char *mapname, 611 char *buf, size_t bufsz) 612 { 613 const pstatus_t *Psp = Pstatus(Pr); 614 const psinfo_t *pi = Ppsinfo(Pr); 615 char fname[100]; 616 struct stat statb; 617 int len; 618 char zname[ZONENAME_MAX]; 619 char zpath[PATH_MAX]; 620 char objname[PATH_MAX]; 621 622 if (!lflag && strcmp(mapname, "a.out") == 0 && 623 Pexecname(Pr, buf, bufsz) != NULL) 624 return (buf); 625 626 if (Pobjname(Pr, addr, objname, sizeof (objname)) != NULL) { 627 (void) strncpy(buf, objname, bufsz); 628 629 if (lflag) 630 return (buf); 631 632 if ((len = resolvepath(buf, buf, bufsz)) > 0) { 633 buf[len] = '\0'; 634 return (buf); 635 } 636 637 /* 638 * If the target is in a non-global zone, attempt to prepend 639 * the zone path in order to give the global-zone caller the 640 * real path to the file. 641 */ 642 if (getzonenamebyid(pi->pr_zoneid, zname, 643 sizeof (zname)) != -1 && strcmp(zname, "global") != 0) { 644 typedef int (*fptr)(char *, char *, size_t); 645 fptr zone_get_zonepath; 646 void *dlhdl; 647 648 if (((dlhdl = 649 dlopen(LIBZONECFG_PATH, RTLD_LAZY)) == NULL) || 650 ((zone_get_zonepath = 651 (fptr) dlsym(dlhdl, "zone_get_zonepath")) == NULL)) 652 return (NULL); 653 654 if ((*zone_get_zonepath)(zname, zpath, sizeof (zpath)) 655 == Z_OK) { 656 (void) strncat(zpath, "/root", 657 MAXPATHLEN - strlen(zpath)); 658 659 if (bufsz <= strlen(zpath)) { 660 (void) dlclose(dlhdl); 661 return (NULL); 662 } 663 664 (void) strncpy(buf, zpath, bufsz); 665 (void) strncat(buf, objname, 666 bufsz - strlen(zpath)); 667 } 668 (void) dlclose(dlhdl); 669 } 670 671 if ((len = resolvepath(buf, buf, bufsz)) > 0) { 672 buf[len] = '\0'; 673 return (buf); 674 } 675 } 676 677 if (Pstate(Pr) != PS_DEAD && *mapname != '\0') { 678 (void) snprintf(fname, sizeof (fname), "/proc/%d/path/%s", 679 (int)Psp->pr_pid, mapname); 680 len = readlink(fname, buf, bufsz - 1); 681 if (len >= 0) { 682 buf[len] = '\0'; 683 return (buf); 684 } else { /* there is no path and readlink() error */ 685 (void) snprintf(fname, sizeof (fname), 686 "/proc/%d/object/%s", (int)Psp->pr_pid, mapname); 687 if (stat(fname, &statb) == 0) { 688 dev_t dev = statb.st_dev; 689 ino_t ino = statb.st_ino; 690 (void) snprintf(buf, bufsz, 691 "dev:%lu,%lu ino:%lu", 692 (ulong_t)major(dev), 693 (ulong_t)minor(dev), ino); 694 return (buf); 695 } 696 } 697 } 698 699 return (NULL); 700 } 701 702 static char * 703 anon_name(char *name, const pstatus_t *Psp, 704 uintptr_t vaddr, size_t size, int mflags, int shmid) 705 { 706 if (mflags & MA_ISM) { 707 if (shmid == -1) 708 (void) snprintf(name, PATH_MAX, " [ %s shmid=null ]", 709 (mflags & MA_NORESERVE) ? "ism" : "dism"); 710 else 711 (void) snprintf(name, PATH_MAX, " [ %s shmid=0x%x ]", 712 (mflags & MA_NORESERVE) ? "ism" : "dism", shmid); 713 } else if (mflags & MA_SHM) { 714 if (shmid == -1) 715 (void) sprintf(name, " [ shmid=null ]"); 716 else 717 (void) sprintf(name, " [ shmid=0x%x ]", shmid); 718 } else if (vaddr + size > Psp->pr_stkbase && 719 vaddr < Psp->pr_stkbase + Psp->pr_stksize) { 720 (void) strcpy(name, " [ stack ]"); 721 } else if ((mflags & MA_ANON) && 722 vaddr + size > Psp->pr_brkbase && 723 vaddr < Psp->pr_brkbase + Psp->pr_brksize) { 724 (void) strcpy(name, " [ heap ]"); 725 } else { 726 lwpstack_t key, *stk; 727 728 key.lwps_stack.ss_sp = (void *)vaddr; 729 key.lwps_stack.ss_size = size; 730 if (nstacks > 0 && 731 (stk = bsearch(&key, stacks, nstacks, sizeof (stacks[0]), 732 cmpstacks)) != NULL) { 733 (void) snprintf(name, PATH_MAX, " [ %s tid=%d ]", 734 (stk->lwps_stack.ss_flags & SS_ONSTACK) ? 735 "altstack" : "stack", 736 stk->lwps_lwpid); 737 } else if (Pstate(Pr) != PS_DEAD) { 738 (void) strcpy(name, " [ anon ]"); 739 } else { 740 return (NULL); 741 } 742 } 743 744 return (name); 745 } 746 747 static int 748 rmapping_iter(struct ps_prochandle *Pr, proc_map_f *func, void *cd) 749 { 750 char mapname[PATH_MAX]; 751 int mapfd, nmap, i, rc; 752 struct stat st; 753 prmap_t *prmapp, *pmp; 754 ssize_t n; 755 756 (void) snprintf(mapname, sizeof (mapname), 757 "/proc/%d/rmap", (int)Pstatus(Pr)->pr_pid); 758 759 if ((mapfd = open(mapname, O_RDONLY)) < 0 || fstat(mapfd, &st) != 0) { 760 if (mapfd >= 0) 761 (void) close(mapfd); 762 return (perr(mapname)); 763 } 764 765 nmap = st.st_size / sizeof (prmap_t); 766 prmapp = malloc((nmap + 1) * sizeof (prmap_t)); 767 768 if ((n = pread(mapfd, prmapp, (nmap + 1) * sizeof (prmap_t), 0L)) < 0) { 769 (void) close(mapfd); 770 free(prmapp); 771 return (perr("read rmap")); 772 } 773 774 (void) close(mapfd); 775 nmap = n / sizeof (prmap_t); 776 777 for (i = 0, pmp = prmapp; i < nmap; i++, pmp++) { 778 if ((rc = func(cd, pmp, NULL)) != 0) { 779 free(prmapp); 780 return (rc); 781 } 782 } 783 784 free(prmapp); 785 return (0); 786 } 787 788 static int 789 xmapping_iter(struct ps_prochandle *Pr, proc_xmap_f *func, void *cd, int doswap) 790 { 791 char mapname[PATH_MAX]; 792 int mapfd, nmap, i, rc; 793 struct stat st; 794 prxmap_t *prmapp, *pmp; 795 ssize_t n; 796 797 (void) snprintf(mapname, sizeof (mapname), 798 "/proc/%d/xmap", (int)Pstatus(Pr)->pr_pid); 799 800 if ((mapfd = open(mapname, O_RDONLY)) < 0 || fstat(mapfd, &st) != 0) { 801 if (mapfd >= 0) 802 (void) close(mapfd); 803 return (perr(mapname)); 804 } 805 806 nmap = st.st_size / sizeof (prxmap_t); 807 nmap *= 2; 808 again: 809 prmapp = malloc((nmap + 1) * sizeof (prxmap_t)); 810 811 if ((n = pread(mapfd, prmapp, (nmap + 1) * sizeof (prxmap_t), 0)) < 0) { 812 (void) close(mapfd); 813 free(prmapp); 814 return (perr("read xmap")); 815 } 816 817 if (nmap < n / sizeof (prxmap_t)) { 818 free(prmapp); 819 nmap *= 2; 820 goto again; 821 } 822 823 (void) close(mapfd); 824 nmap = n / sizeof (prxmap_t); 825 826 for (i = 0, pmp = prmapp; i < nmap; i++, pmp++) { 827 if ((rc = func(cd, pmp, NULL, i == nmap - 1, doswap)) != 0) { 828 free(prmapp); 829 return (rc); 830 } 831 } 832 833 /* 834 * Mark the last element. 835 */ 836 if (map_count > 0) 837 maps[map_count - 1].md_last = B_TRUE; 838 839 free(prmapp); 840 return (0); 841 } 842 843 /*ARGSUSED*/ 844 static int 845 look_map(void *data, const prmap_t *pmp, const char *object_name) 846 { 847 struct totals *t = data; 848 const pstatus_t *Psp = Pstatus(Pr); 849 size_t size; 850 char mname[PATH_MAX]; 851 char *lname = NULL; 852 size_t psz = pmp->pr_pagesize; 853 uintptr_t vaddr = pmp->pr_vaddr; 854 uintptr_t segment_end = vaddr + pmp->pr_size; 855 lgrp_id_t lgrp; 856 memory_chunk_t mchunk; 857 858 /* 859 * If the mapping is not anon or not part of the heap, make a name 860 * for it. We don't want to report the heap as a.out's data. 861 */ 862 if (!(pmp->pr_mflags & MA_ANON) || 863 segment_end <= Psp->pr_brkbase || 864 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) { 865 lname = make_name(Pr, pmp->pr_vaddr, pmp->pr_mapname, 866 mname, sizeof (mname)); 867 } 868 869 if (lname == NULL && 870 ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD)) { 871 lname = anon_name(mname, Psp, pmp->pr_vaddr, 872 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid); 873 } 874 875 /* 876 * Adjust the address range if -A is specified. 877 */ 878 size = adjust_addr_range(pmp->pr_vaddr, segment_end, psz, 879 &vaddr, &segment_end); 880 881 if (size == 0) 882 return (0); 883 884 if (!Lflag) { 885 /* 886 * Display the whole mapping 887 */ 888 size = ROUNDUP_KB(size); 889 890 (void) printf(lname ? 891 "%.*lX %*luK %-6s %s\n" : 892 "%.*lX %*luK %s\n", 893 addr_width, vaddr, 894 size_width - 1, size, mflags(pmp->pr_mflags), lname); 895 896 t->total_size += size; 897 return (0); 898 } 899 900 /* 901 * We need to display lgroups backing physical memory, so we break the 902 * segment into individual pages and coalesce pages with the same lgroup 903 * into one "segment". 904 */ 905 906 /* 907 * Initialize address descriptions for the mapping. 908 */ 909 mem_chunk_init(&mchunk, segment_end, psz); 910 size = 0; 911 912 /* 913 * Walk mapping (page by page) and display contiguous ranges of memory 914 * allocated to same lgroup. 915 */ 916 do { 917 size_t size_contig; 918 919 /* 920 * Get contiguous region of memory starting from vaddr allocated 921 * from the same lgroup. 922 */ 923 size_contig = get_contiguous_region(&mchunk, vaddr, 924 segment_end, pmp->pr_pagesize, &lgrp); 925 926 (void) printf(lname ? "%.*lX %*luK %-6s%s %s\n" : 927 "%.*lX %*luK %s %s\n", 928 addr_width, vaddr, 929 size_width - 1, size_contig / KILOBYTE, 930 mflags(pmp->pr_mflags), 931 lgrp2str(lgrp), lname); 932 933 vaddr += size_contig; 934 size += size_contig; 935 } while (vaddr < segment_end && !interrupt); 936 937 /* Update the total size */ 938 t->total_size += ROUNDUP_KB(size); 939 return (0); 940 } 941 942 static void 943 printK(long value, int width) 944 { 945 if (value == 0) 946 (void) printf(width == 8 ? " -" : " -"); 947 else 948 (void) printf(" %*lu", width - 1, value); 949 } 950 951 static const char * 952 pagesize(const prxmap_t *pmp) 953 { 954 int pagesize = pmp->pr_hatpagesize; 955 static char buf[32]; 956 957 if (pagesize == 0) { 958 return ("-"); /* no underlying HAT mapping */ 959 } 960 961 if (pagesize >= KILOBYTE && (pagesize % KILOBYTE) == 0) { 962 if ((pagesize % GIGABYTE) == 0) 963 (void) snprintf(buf, sizeof (buf), "%dG", 964 pagesize / GIGABYTE); 965 else if ((pagesize % MEGABYTE) == 0) 966 (void) snprintf(buf, sizeof (buf), "%dM", 967 pagesize / MEGABYTE); 968 else 969 (void) snprintf(buf, sizeof (buf), "%dK", 970 pagesize / KILOBYTE); 971 } else 972 (void) snprintf(buf, sizeof (buf), "%db", pagesize); 973 974 return (buf); 975 } 976 977 /*ARGSUSED*/ 978 static int 979 look_smap(void *data, 980 const prxmap_t *pmp, 981 const char *object_name, 982 int last, int doswap) 983 { 984 struct totals *t = data; 985 const pstatus_t *Psp = Pstatus(Pr); 986 size_t size; 987 char mname[PATH_MAX]; 988 char *lname = NULL; 989 const char *format; 990 size_t psz = pmp->pr_pagesize; 991 uintptr_t vaddr = pmp->pr_vaddr; 992 uintptr_t segment_end = vaddr + pmp->pr_size; 993 lgrp_id_t lgrp; 994 memory_chunk_t mchunk; 995 996 /* 997 * If the mapping is not anon or not part of the heap, make a name 998 * for it. We don't want to report the heap as a.out's data. 999 */ 1000 if (!(pmp->pr_mflags & MA_ANON) || 1001 pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase || 1002 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) { 1003 lname = make_name(Pr, pmp->pr_vaddr, pmp->pr_mapname, 1004 mname, sizeof (mname)); 1005 } 1006 1007 if (lname == NULL && 1008 ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD)) { 1009 lname = anon_name(mname, Psp, pmp->pr_vaddr, 1010 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid); 1011 } 1012 1013 /* 1014 * Adjust the address range if -A is specified. 1015 */ 1016 size = adjust_addr_range(pmp->pr_vaddr, segment_end, psz, 1017 &vaddr, &segment_end); 1018 1019 if (size == 0) 1020 return (0); 1021 1022 if (!Lflag) { 1023 /* 1024 * Display the whole mapping 1025 */ 1026 if (lname != NULL) 1027 format = "%.*lX %*luK %4s %-6s %s\n"; 1028 else 1029 format = "%.*lX %*luK %4s %s\n"; 1030 1031 size = ROUNDUP_KB(size); 1032 1033 (void) printf(format, addr_width, vaddr, size_width - 1, size, 1034 pagesize(pmp), mflags(pmp->pr_mflags), lname); 1035 1036 t->total_size += size; 1037 return (0); 1038 } 1039 1040 if (lname != NULL) 1041 format = "%.*lX %*luK %4s %-6s%s %s\n"; 1042 else 1043 format = "%.*lX %*luK %4s%s %s\n"; 1044 1045 /* 1046 * We need to display lgroups backing physical memory, so we break the 1047 * segment into individual pages and coalesce pages with the same lgroup 1048 * into one "segment". 1049 */ 1050 1051 /* 1052 * Initialize address descriptions for the mapping. 1053 */ 1054 mem_chunk_init(&mchunk, segment_end, psz); 1055 size = 0; 1056 1057 /* 1058 * Walk mapping (page by page) and display contiguous ranges of memory 1059 * allocated to same lgroup. 1060 */ 1061 do { 1062 size_t size_contig; 1063 1064 /* 1065 * Get contiguous region of memory starting from vaddr allocated 1066 * from the same lgroup. 1067 */ 1068 size_contig = get_contiguous_region(&mchunk, vaddr, 1069 segment_end, pmp->pr_pagesize, &lgrp); 1070 1071 (void) printf(format, addr_width, vaddr, 1072 size_width - 1, size_contig / KILOBYTE, 1073 pagesize(pmp), mflags(pmp->pr_mflags), 1074 lgrp2str(lgrp), lname); 1075 1076 vaddr += size_contig; 1077 size += size_contig; 1078 } while (vaddr < segment_end && !interrupt); 1079 1080 t->total_size += ROUNDUP_KB(size); 1081 return (0); 1082 } 1083 1084 #define ANON(x) ((aflag || (((x)->pr_mflags & MA_SHARED) == 0)) ? \ 1085 ((x)->pr_anon) : 0) 1086 1087 /*ARGSUSED*/ 1088 static int 1089 look_xmap(void *data, 1090 const prxmap_t *pmp, 1091 const char *object_name, 1092 int last, int doswap) 1093 { 1094 struct totals *t = data; 1095 const pstatus_t *Psp = Pstatus(Pr); 1096 char mname[PATH_MAX]; 1097 char *lname = NULL; 1098 char *ln; 1099 1100 /* 1101 * If the mapping is not anon or not part of the heap, make a name 1102 * for it. We don't want to report the heap as a.out's data. 1103 */ 1104 if (!(pmp->pr_mflags & MA_ANON) || 1105 pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase || 1106 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) { 1107 lname = make_name(Pr, pmp->pr_vaddr, pmp->pr_mapname, 1108 mname, sizeof (mname)); 1109 } 1110 1111 if (lname != NULL) { 1112 if ((ln = strrchr(lname, '/')) != NULL) 1113 lname = ln + 1; 1114 } else if ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD) { 1115 lname = anon_name(mname, Psp, pmp->pr_vaddr, 1116 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid); 1117 } 1118 1119 (void) printf("%.*lX", addr_width, (ulong_t)pmp->pr_vaddr); 1120 1121 printK(ROUNDUP_KB(pmp->pr_size), size_width); 1122 printK(pmp->pr_rss * (pmp->pr_pagesize / KILOBYTE), size_width); 1123 printK(ANON(pmp) * (pmp->pr_pagesize / KILOBYTE), size_width); 1124 printK(pmp->pr_locked * (pmp->pr_pagesize / KILOBYTE), size_width); 1125 (void) printf(lname ? " %4s %-6s %s\n" : " %4s %s\n", 1126 pagesize(pmp), mflags(pmp->pr_mflags), lname); 1127 1128 t->total_size += ROUNDUP_KB(pmp->pr_size); 1129 t->total_rss += pmp->pr_rss * (pmp->pr_pagesize / KILOBYTE); 1130 t->total_anon += ANON(pmp) * (pmp->pr_pagesize / KILOBYTE); 1131 t->total_locked += (pmp->pr_locked * (pmp->pr_pagesize / KILOBYTE)); 1132 1133 return (0); 1134 } 1135 1136 /*ARGSUSED*/ 1137 static int 1138 look_xmap_nopgsz(void *data, 1139 const prxmap_t *pmp, 1140 const char *object_name, 1141 int last, int doswap) 1142 { 1143 struct totals *t = data; 1144 const pstatus_t *Psp = Pstatus(Pr); 1145 char mname[PATH_MAX]; 1146 char *lname = NULL; 1147 char *ln; 1148 static uintptr_t prev_vaddr; 1149 static size_t prev_size; 1150 static offset_t prev_offset; 1151 static int prev_mflags; 1152 static char *prev_lname; 1153 static char prev_mname[PATH_MAX]; 1154 static ulong_t prev_rss; 1155 static ulong_t prev_anon; 1156 static ulong_t prev_locked; 1157 static ulong_t prev_swap; 1158 int merged = 0; 1159 static int first = 1; 1160 ulong_t swap = 0; 1161 int kperpage; 1162 1163 /* 1164 * Calculate swap reservations 1165 */ 1166 if (pmp->pr_mflags & MA_SHARED) { 1167 if (aflag && (pmp->pr_mflags & MA_NORESERVE) == 0) { 1168 /* Swap reserved for entire non-ism SHM */ 1169 swap = pmp->pr_size / pmp->pr_pagesize; 1170 } 1171 } else if (pmp->pr_mflags & MA_NORESERVE) { 1172 /* Swap reserved on fault for each anon page */ 1173 swap = pmp->pr_anon; 1174 } else if (pmp->pr_mflags & MA_WRITE) { 1175 /* Swap reserve for entire writable segment */ 1176 swap = pmp->pr_size / pmp->pr_pagesize; 1177 } 1178 1179 /* 1180 * If the mapping is not anon or not part of the heap, make a name 1181 * for it. We don't want to report the heap as a.out's data. 1182 */ 1183 if (!(pmp->pr_mflags & MA_ANON) || 1184 pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase || 1185 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) { 1186 lname = make_name(Pr, pmp->pr_vaddr, pmp->pr_mapname, 1187 mname, sizeof (mname)); 1188 } 1189 1190 if (lname != NULL) { 1191 if ((ln = strrchr(lname, '/')) != NULL) 1192 lname = ln + 1; 1193 } else if ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD) { 1194 lname = anon_name(mname, Psp, pmp->pr_vaddr, 1195 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid); 1196 } 1197 1198 kperpage = pmp->pr_pagesize / KILOBYTE; 1199 1200 t->total_size += ROUNDUP_KB(pmp->pr_size); 1201 t->total_rss += pmp->pr_rss * kperpage; 1202 t->total_anon += ANON(pmp) * kperpage; 1203 t->total_locked += pmp->pr_locked * kperpage; 1204 t->total_swap += swap * kperpage; 1205 1206 if (first == 1) { 1207 first = 0; 1208 prev_vaddr = pmp->pr_vaddr; 1209 prev_size = pmp->pr_size; 1210 prev_offset = pmp->pr_offset; 1211 prev_mflags = pmp->pr_mflags; 1212 if (lname == NULL) { 1213 prev_lname = NULL; 1214 } else { 1215 (void) strcpy(prev_mname, lname); 1216 prev_lname = prev_mname; 1217 } 1218 prev_rss = pmp->pr_rss * kperpage; 1219 prev_anon = ANON(pmp) * kperpage; 1220 prev_locked = pmp->pr_locked * kperpage; 1221 prev_swap = swap * kperpage; 1222 if (last == 0) { 1223 return (0); 1224 } 1225 merged = 1; 1226 } else if (prev_vaddr + prev_size == pmp->pr_vaddr && 1227 prev_mflags == pmp->pr_mflags && 1228 ((prev_mflags & MA_ISM) || 1229 prev_offset + prev_size == pmp->pr_offset) && 1230 ((lname == NULL && prev_lname == NULL) || 1231 (lname != NULL && prev_lname != NULL && 1232 strcmp(lname, prev_lname) == 0))) { 1233 prev_size += pmp->pr_size; 1234 prev_rss += pmp->pr_rss * kperpage; 1235 prev_anon += ANON(pmp) * kperpage; 1236 prev_locked += pmp->pr_locked * kperpage; 1237 prev_swap += swap * kperpage; 1238 if (last == 0) { 1239 return (0); 1240 } 1241 merged = 1; 1242 } 1243 1244 (void) printf("%.*lX", addr_width, (ulong_t)prev_vaddr); 1245 printK(ROUNDUP_KB(prev_size), size_width); 1246 1247 if (doswap) 1248 printK(prev_swap, size_width); 1249 else { 1250 printK(prev_rss, size_width); 1251 printK(prev_anon, size_width); 1252 printK(prev_locked, size_width); 1253 } 1254 (void) printf(prev_lname ? " %-6s %s\n" : "%s\n", 1255 mflags(prev_mflags), prev_lname); 1256 1257 if (last == 0) { 1258 prev_vaddr = pmp->pr_vaddr; 1259 prev_size = pmp->pr_size; 1260 prev_offset = pmp->pr_offset; 1261 prev_mflags = pmp->pr_mflags; 1262 if (lname == NULL) { 1263 prev_lname = NULL; 1264 } else { 1265 (void) strcpy(prev_mname, lname); 1266 prev_lname = prev_mname; 1267 } 1268 prev_rss = pmp->pr_rss * kperpage; 1269 prev_anon = ANON(pmp) * kperpage; 1270 prev_locked = pmp->pr_locked * kperpage; 1271 prev_swap = swap * kperpage; 1272 } else if (merged == 0) { 1273 (void) printf("%.*lX", addr_width, (ulong_t)pmp->pr_vaddr); 1274 printK(ROUNDUP_KB(pmp->pr_size), size_width); 1275 if (doswap) 1276 printK(swap * kperpage, size_width); 1277 else { 1278 printK(pmp->pr_rss * kperpage, size_width); 1279 printK(ANON(pmp) * kperpage, size_width); 1280 printK(pmp->pr_locked * kperpage, size_width); 1281 } 1282 (void) printf(lname ? " %-6s %s\n" : " %s\n", 1283 mflags(pmp->pr_mflags), lname); 1284 } 1285 1286 if (last != 0) 1287 first = 1; 1288 1289 return (0); 1290 } 1291 1292 static int 1293 perr(char *s) 1294 { 1295 if (s) 1296 (void) fprintf(stderr, "%s: ", procname); 1297 else 1298 s = procname; 1299 perror(s); 1300 return (1); 1301 } 1302 1303 static char * 1304 mflags(uint_t arg) 1305 { 1306 static char code_buf[80]; 1307 char *str = code_buf; 1308 1309 /* 1310 * rwxsR 1311 * 1312 * r - segment is readable 1313 * w - segment is writable 1314 * x - segment is executable 1315 * s - segment is shared 1316 * R - segment is mapped MAP_NORESERVE 1317 * 1318 */ 1319 (void) sprintf(str, "%c%c%c%c%c%c", 1320 arg & MA_READ ? 'r' : '-', 1321 arg & MA_WRITE ? 'w' : '-', 1322 arg & MA_EXEC ? 'x' : '-', 1323 arg & MA_SHARED ? 's' : '-', 1324 arg & MA_NORESERVE ? 'R' : '-', 1325 arg & MA_RESERVED1 ? '*' : ' '); 1326 1327 return (str); 1328 } 1329 1330 static mapdata_t * 1331 nextmap(void) 1332 { 1333 mapdata_t *newmaps; 1334 int next; 1335 1336 if (map_count == map_alloc) { 1337 if (map_alloc == 0) 1338 next = 16; 1339 else 1340 next = map_alloc * 2; 1341 1342 newmaps = realloc(maps, next * sizeof (mapdata_t)); 1343 if (newmaps == NULL) { 1344 (void) perr("failed to allocate maps"); 1345 exit(1); 1346 } 1347 (void) memset(newmaps + map_alloc, '\0', 1348 (next - map_alloc) * sizeof (mapdata_t)); 1349 1350 map_alloc = next; 1351 maps = newmaps; 1352 } 1353 1354 return (&maps[map_count++]); 1355 } 1356 1357 /*ARGSUSED*/ 1358 static int 1359 gather_map(void *ignored, const prmap_t *map, const char *objname) 1360 { 1361 mapdata_t *data; 1362 1363 /* Skip mappings which are outside the range specified by -A */ 1364 if (!address_in_range(map->pr_vaddr, 1365 map->pr_vaddr + map->pr_size, map->pr_pagesize)) 1366 return (0); 1367 1368 data = nextmap(); 1369 data->md_map = *map; 1370 if (data->md_objname != NULL) 1371 free(data->md_objname); 1372 data->md_objname = objname ? strdup(objname) : NULL; 1373 1374 return (0); 1375 } 1376 1377 /*ARGSUSED*/ 1378 static int 1379 gather_xmap(void *ignored, const prxmap_t *xmap, const char *objname, 1380 int last, int doswap) 1381 { 1382 mapdata_t *data; 1383 1384 /* Skip mappings which are outside the range specified by -A */ 1385 if (!address_in_range(xmap->pr_vaddr, 1386 xmap->pr_vaddr + xmap->pr_size, xmap->pr_pagesize)) 1387 return (0); 1388 1389 data = nextmap(); 1390 data->md_xmap = *xmap; 1391 if (data->md_objname != NULL) 1392 free(data->md_objname); 1393 data->md_objname = objname ? strdup(objname) : NULL; 1394 data->md_last = last; 1395 data->md_doswap = doswap; 1396 1397 return (0); 1398 } 1399 1400 static int 1401 iter_map(proc_map_f *func, void *data) 1402 { 1403 int i; 1404 int ret; 1405 1406 for (i = 0; i < map_count; i++) { 1407 if (interrupt) 1408 break; 1409 if ((ret = func(data, &maps[i].md_map, 1410 maps[i].md_objname)) != 0) 1411 return (ret); 1412 } 1413 1414 return (0); 1415 } 1416 1417 static int 1418 iter_xmap(proc_xmap_f *func, void *data) 1419 { 1420 int i; 1421 int ret; 1422 1423 for (i = 0; i < map_count; i++) { 1424 if (interrupt) 1425 break; 1426 if ((ret = func(data, &maps[i].md_xmap, maps[i].md_objname, 1427 maps[i].md_last, maps[i].md_doswap)) != 0) 1428 return (ret); 1429 } 1430 1431 return (0); 1432 } 1433 1434 /* 1435 * Convert lgroup ID to string. 1436 * returns dash when lgroup ID is invalid. 1437 */ 1438 static char * 1439 lgrp2str(lgrp_id_t lgrp) 1440 { 1441 static char lgrp_buf[20]; 1442 char *str = lgrp_buf; 1443 1444 (void) sprintf(str, lgrp == LGRP_NONE ? " -" : "%4d", lgrp); 1445 return (str); 1446 } 1447 1448 /* 1449 * Parse address range specification for -A option. 1450 * The address range may have the following forms: 1451 * 1452 * address 1453 * start and end is set to address 1454 * address, 1455 * start is set to address, end is set to INVALID_ADDRESS 1456 * ,address 1457 * start is set to 0, end is set to address 1458 * address1,address2 1459 * start is set to address1, end is set to address2 1460 * 1461 */ 1462 static int 1463 parse_addr_range(char *input_str, uintptr_t *start, uintptr_t *end) 1464 { 1465 char *startp = input_str; 1466 char *endp = strchr(input_str, ','); 1467 ulong_t s = (ulong_t)INVALID_ADDRESS; 1468 ulong_t e = (ulong_t)INVALID_ADDRESS; 1469 1470 if (endp != NULL) { 1471 /* 1472 * Comma is present. If there is nothing after comma, the end 1473 * remains set at INVALID_ADDRESS. Otherwise it is set to the 1474 * value after comma. 1475 */ 1476 *endp = '\0'; 1477 endp++; 1478 1479 if ((*endp != '\0') && sscanf(endp, "%lx", &e) != 1) 1480 return (1); 1481 } 1482 1483 if (startp != NULL) { 1484 /* 1485 * Read the start address, if it is specified. If the address is 1486 * missing, start will be set to INVALID_ADDRESS. 1487 */ 1488 if ((*startp != '\0') && sscanf(startp, "%lx", &s) != 1) 1489 return (1); 1490 } 1491 1492 /* If there is no comma, end becomes equal to start */ 1493 if (endp == NULL) 1494 e = s; 1495 1496 /* 1497 * ,end implies 0..end range 1498 */ 1499 if (e != INVALID_ADDRESS && s == INVALID_ADDRESS) 1500 s = 0; 1501 1502 *start = (uintptr_t)s; 1503 *end = (uintptr_t)e; 1504 1505 /* Return error if neither start nor end address were specified */ 1506 return (! (s != INVALID_ADDRESS || e != INVALID_ADDRESS)); 1507 } 1508 1509 /* 1510 * Check whether any portion of [start, end] segment is within the 1511 * [start_addr, end_addr] range. 1512 * 1513 * Return values: 1514 * 0 - address is outside the range 1515 * 1 - address is within the range 1516 */ 1517 static int 1518 address_in_range(uintptr_t start, uintptr_t end, size_t psz) 1519 { 1520 int rc = 1; 1521 1522 /* 1523 * Nothing to do if there is no address range specified with -A 1524 */ 1525 if (start_addr != INVALID_ADDRESS || end_addr != INVALID_ADDRESS) { 1526 /* The segment end is below the range start */ 1527 if ((start_addr != INVALID_ADDRESS) && 1528 (end < P2ALIGN(start_addr, psz))) 1529 rc = 0; 1530 1531 /* The segment start is above the range end */ 1532 if ((end_addr != INVALID_ADDRESS) && 1533 (start > P2ALIGN(end_addr + psz, psz))) 1534 rc = 0; 1535 } 1536 return (rc); 1537 } 1538 1539 /* 1540 * Returns an intersection of the [start, end] interval and the range specified 1541 * by -A flag [start_addr, end_addr]. Unspecified parts of the address range 1542 * have value INVALID_ADDRESS. 1543 * 1544 * The start_addr address is rounded down to the beginning of page and end_addr 1545 * is rounded up to the end of page. 1546 * 1547 * Returns the size of the resulting interval or zero if the interval is empty 1548 * or invalid. 1549 */ 1550 static size_t 1551 adjust_addr_range(uintptr_t start, uintptr_t end, size_t psz, 1552 uintptr_t *new_start, uintptr_t *new_end) 1553 { 1554 uintptr_t from; /* start_addr rounded down */ 1555 uintptr_t to; /* end_addr rounded up */ 1556 1557 /* 1558 * Round down the lower address of the range to the beginning of page. 1559 */ 1560 if (start_addr == INVALID_ADDRESS) { 1561 /* 1562 * No start_addr specified by -A, the lower part of the interval 1563 * does not change. 1564 */ 1565 *new_start = start; 1566 } else { 1567 from = P2ALIGN(start_addr, psz); 1568 /* 1569 * If end address is outside the range, return an empty 1570 * interval 1571 */ 1572 if (end < from) { 1573 *new_start = *new_end = 0; 1574 return (0); 1575 } 1576 /* 1577 * The adjusted start address is the maximum of requested start 1578 * and the aligned start_addr of the -A range. 1579 */ 1580 *new_start = start < from ? from : start; 1581 } 1582 1583 /* 1584 * Round up the higher address of the range to the end of page. 1585 */ 1586 if (end_addr == INVALID_ADDRESS) { 1587 /* 1588 * No end_addr specified by -A, the upper part of the interval 1589 * does not change. 1590 */ 1591 *new_end = end; 1592 } else { 1593 /* 1594 * If only one address is specified and it is the beginning of a 1595 * segment, get information about the whole segment. This 1596 * function is called once per segment and the 'end' argument is 1597 * always the end of a segment, so just use the 'end' value. 1598 */ 1599 to = (end_addr == start_addr && start == start_addr) ? 1600 end : 1601 P2ALIGN(end_addr + psz, psz); 1602 /* 1603 * If start address is outside the range, return an empty 1604 * interval 1605 */ 1606 if (start > to) { 1607 *new_start = *new_end = 0; 1608 return (0); 1609 } 1610 /* 1611 * The adjusted end address is the minimum of requested end 1612 * and the aligned end_addr of the -A range. 1613 */ 1614 *new_end = end > to ? to : end; 1615 } 1616 1617 /* 1618 * Make sure that the resulting interval is legal. 1619 */ 1620 if (*new_end < *new_start) 1621 *new_start = *new_end = 0; 1622 1623 /* Return the size of the interval */ 1624 return (*new_end - *new_start); 1625 } 1626 1627 /* 1628 * Initialize memory_info data structure with information about a new segment. 1629 */ 1630 static void 1631 mem_chunk_init(memory_chunk_t *chunk, uintptr_t end, size_t psz) 1632 { 1633 chunk->end_addr = end; 1634 chunk->page_size = psz; 1635 chunk->page_index = 0; 1636 chunk->chunk_start = chunk->chunk_end = 0; 1637 } 1638 1639 /* 1640 * Create a new chunk of addresses starting from vaddr. 1641 * Pass the whole chunk to pr_meminfo to collect lgroup and page size 1642 * information for each page in the chunk. 1643 */ 1644 static void 1645 mem_chunk_get(memory_chunk_t *chunk, uintptr_t vaddr) 1646 { 1647 page_descr_t *pdp = chunk->page_info; 1648 size_t psz = chunk->page_size; 1649 uintptr_t addr = vaddr; 1650 uint64_t inaddr[MAX_MEMINFO_CNT]; 1651 uint64_t outdata[2 * MAX_MEMINFO_CNT]; 1652 uint_t info[2] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 1653 uint_t validity[MAX_MEMINFO_CNT]; 1654 uint64_t *dataptr = inaddr; 1655 uint64_t *outptr = outdata; 1656 uint_t *valptr = validity; 1657 int i, j, rc; 1658 1659 chunk->chunk_start = vaddr; 1660 chunk->page_index = 0; /* reset index for the new chunk */ 1661 1662 /* 1663 * Fill in MAX_MEMINFO_CNT wotrh of pages starting from vaddr. Also, 1664 * copy starting address of each page to inaddr array for pr_meminfo. 1665 */ 1666 for (i = 0, pdp = chunk->page_info; 1667 (i < MAX_MEMINFO_CNT) && (addr <= chunk->end_addr); 1668 i++, pdp++, dataptr++, addr += psz) { 1669 *dataptr = (uint64_t)addr; 1670 pdp->pd_start = addr; 1671 pdp->pd_lgrp = LGRP_NONE; 1672 pdp->pd_valid = 0; 1673 pdp->pd_pagesize = 0; 1674 } 1675 1676 /* Mark the number of entries in the chunk and the last address */ 1677 chunk->page_count = i; 1678 chunk->chunk_end = addr - psz; 1679 1680 if (interrupt) 1681 return; 1682 1683 /* Call meminfo for all collected addresses */ 1684 rc = pr_meminfo(Pr, inaddr, i, info, 2, outdata, validity); 1685 if (rc < 0) { 1686 (void) perr("can not get memory information"); 1687 return; 1688 } 1689 1690 /* Verify validity of each result and fill in the addrs array */ 1691 pdp = chunk->page_info; 1692 for (j = 0; j < i; j++, pdp++, valptr++, outptr += 2) { 1693 /* Skip invalid address pointers */ 1694 if ((*valptr & 1) == 0) { 1695 continue; 1696 } 1697 1698 /* Is lgroup information available? */ 1699 if ((*valptr & 2) != 0) { 1700 pdp->pd_lgrp = (lgrp_id_t)*outptr; 1701 pdp->pd_valid = 1; 1702 } 1703 1704 /* Is page size informaion available? */ 1705 if ((*valptr & 4) != 0) { 1706 pdp->pd_pagesize = *(outptr + 1); 1707 } 1708 } 1709 } 1710 1711 /* 1712 * Starting from address 'vaddr' find the region with pages allocated from the 1713 * same lgroup. 1714 * 1715 * Arguments: 1716 * mchunk Initialized memory chunk structure 1717 * vaddr Starting address of the region 1718 * maxaddr Upper bound of the region 1719 * pagesize Default page size to use 1720 * ret_lgrp On exit contains the lgroup ID of all pages in the 1721 * region. 1722 * 1723 * Returns: 1724 * Size of the contiguous region in bytes 1725 * The lgroup ID of all pages in the region in ret_lgrp argument. 1726 */ 1727 static size_t 1728 get_contiguous_region(memory_chunk_t *mchunk, uintptr_t vaddr, 1729 uintptr_t maxaddr, size_t pagesize, lgrp_id_t *ret_lgrp) 1730 { 1731 size_t size_contig = 0; 1732 lgrp_id_t lgrp; /* Lgroup of the region start */ 1733 lgrp_id_t curr_lgrp; /* Lgroup of the current page */ 1734 size_t psz = pagesize; /* Pagesize to use */ 1735 1736 /* Set both lgroup IDs to the lgroup of the first page */ 1737 curr_lgrp = lgrp = addr_to_lgrp(mchunk, vaddr, &psz); 1738 1739 /* 1740 * Starting from vaddr, walk page by page until either the end 1741 * of the segment is reached or a page is allocated from a different 1742 * lgroup. Also stop if interrupted from keyboard. 1743 */ 1744 while ((vaddr < maxaddr) && (curr_lgrp == lgrp) && !interrupt) { 1745 /* 1746 * Get lgroup ID and the page size of the current page. 1747 */ 1748 curr_lgrp = addr_to_lgrp(mchunk, vaddr, &psz); 1749 /* If there is no page size information, use the default */ 1750 if (psz == 0) 1751 psz = pagesize; 1752 1753 if (curr_lgrp == lgrp) { 1754 /* 1755 * This page belongs to the contiguous region. 1756 * Increase the region size and advance to the new page. 1757 */ 1758 size_contig += psz; 1759 vaddr += psz; 1760 } 1761 } 1762 1763 /* Return the region lgroup ID and the size */ 1764 *ret_lgrp = lgrp; 1765 return (size_contig); 1766 } 1767 1768 /* 1769 * Given a virtual address, return its lgroup and page size. If there is meminfo 1770 * information for an address, use it, otherwise shift the chunk window to the 1771 * vaddr and create a new chunk with known meminfo information. 1772 */ 1773 static lgrp_id_t 1774 addr_to_lgrp(memory_chunk_t *chunk, uintptr_t vaddr, size_t *psz) 1775 { 1776 page_descr_t *pdp; 1777 lgrp_id_t lgrp = LGRP_NONE; 1778 int i; 1779 1780 *psz = chunk->page_size; 1781 1782 if (interrupt) 1783 return (0); 1784 1785 /* 1786 * Is there information about this address? If not, create a new chunk 1787 * starting from vaddr and apply pr_meminfo() to the whole chunk. 1788 */ 1789 if (vaddr < chunk->chunk_start || vaddr > chunk->chunk_end) { 1790 /* 1791 * This address is outside the chunk, get the new chunk and 1792 * collect meminfo information for it. 1793 */ 1794 mem_chunk_get(chunk, vaddr); 1795 } 1796 1797 /* 1798 * Find information about the address. 1799 */ 1800 pdp = &chunk->page_info[chunk->page_index]; 1801 for (i = chunk->page_index; i < chunk->page_count; i++, pdp++) { 1802 if (pdp->pd_start == vaddr) { 1803 if (pdp->pd_valid) { 1804 lgrp = pdp->pd_lgrp; 1805 /* 1806 * Override page size information if it is 1807 * present. 1808 */ 1809 if (pdp->pd_pagesize > 0) 1810 *psz = pdp->pd_pagesize; 1811 } 1812 break; 1813 } 1814 } 1815 /* 1816 * Remember where we ended - the next search will start here. 1817 * We can query for the lgrp for the same address again, so do not 1818 * advance index past the current value. 1819 */ 1820 chunk->page_index = i; 1821 1822 return (lgrp); 1823 } 1824 1825 /* ARGSUSED */ 1826 static void 1827 intr(int sig) 1828 { 1829 interrupt = 1; 1830 } 1831