1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <stdio.h> 30 #include <stdio_ext.h> 31 #include <stdlib.h> 32 #include <unistd.h> 33 #include <ctype.h> 34 #include <fcntl.h> 35 #include <string.h> 36 #include <dirent.h> 37 #include <limits.h> 38 #include <link.h> 39 #include <libelf.h> 40 #include <sys/types.h> 41 #include <signal.h> 42 #include <sys/stat.h> 43 #include <sys/mkdev.h> 44 #include <sys/mman.h> 45 #include <sys/lgrp_user.h> 46 #include <libproc.h> 47 #include <libzonecfg.h> 48 49 #define KILOBYTE 1024 50 #define MEGABYTE (KILOBYTE * KILOBYTE) 51 #define GIGABYTE (KILOBYTE * KILOBYTE * KILOBYTE) 52 53 /* 54 * Round up the value to the nearest kilobyte 55 */ 56 #define ROUNDUP_KB(x) (((x) + (KILOBYTE - 1)) / KILOBYTE) 57 58 /* 59 * The alignment should be a power of 2. 60 */ 61 #define P2ALIGN(x, align) ((x) & -(align)) 62 63 #define INVALID_ADDRESS (uintptr_t)(-1) 64 65 struct totals { 66 ulong_t total_size; 67 ulong_t total_swap; 68 ulong_t total_rss; 69 ulong_t total_anon; 70 ulong_t total_locked; 71 }; 72 73 /* 74 * -L option requires per-page information. The information is presented in an 75 * array of page_descr structures. 76 */ 77 typedef struct page_descr { 78 uintptr_t pd_start; /* start address of a page */ 79 size_t pd_pagesize; /* page size in bytes */ 80 lgrp_id_t pd_lgrp; /* lgroup of memory backing the page */ 81 int pd_valid; /* valid page description if non-zero */ 82 } page_descr_t; 83 84 /* 85 * Per-page information for a memory chunk. 86 * The meminfo(2) system call accepts up to MAX_MEMINFO_CNT pages at once. 87 * When we need to scan larger ranges we divide them in MAX_MEMINFO_CNT sized 88 * chunks. The chunk information is stored in the memory_chunk structure. 89 */ 90 typedef struct memory_chunk { 91 page_descr_t page_info[MAX_MEMINFO_CNT]; 92 uintptr_t end_addr; 93 uintptr_t chunk_start; /* Starting address */ 94 uintptr_t chunk_end; /* chunk_end is always <= end_addr */ 95 size_t page_size; 96 int page_index; /* Current page */ 97 int page_count; /* Number of pages */ 98 } memory_chunk_t; 99 100 static volatile int interrupt; 101 102 typedef int proc_xmap_f(void *, const prxmap_t *, const char *, int, int); 103 104 static int xmapping_iter(struct ps_prochandle *, proc_xmap_f *, void *, 105 int); 106 static int rmapping_iter(struct ps_prochandle *, proc_map_f *, void *); 107 108 static int look_map(void *, const prmap_t *, const char *); 109 static int look_smap(void *, const prxmap_t *, const char *, int, int); 110 static int look_xmap(void *, const prxmap_t *, const char *, int, int); 111 static int look_xmap_nopgsz(void *, const prxmap_t *, const char *, 112 int, int); 113 114 static int gather_map(void *, const prmap_t *, const char *); 115 static int gather_xmap(void *, const prxmap_t *, const char *, int, int); 116 static int iter_map(proc_map_f *, void *); 117 static int iter_xmap(proc_xmap_f *, void *); 118 static int parse_addr_range(char *, uintptr_t *, uintptr_t *); 119 static void mem_chunk_init(memory_chunk_t *, uintptr_t, size_t); 120 121 static int perr(char *); 122 static void printK(long, int); 123 static char *mflags(uint_t); 124 125 static size_t get_contiguous_region(memory_chunk_t *, uintptr_t, 126 uintptr_t, size_t, lgrp_id_t *); 127 static void mem_chunk_get(memory_chunk_t *, uintptr_t); 128 static lgrp_id_t addr_to_lgrp(memory_chunk_t *, uintptr_t, size_t *); 129 static char *lgrp2str(lgrp_id_t); 130 131 static int address_in_range(uintptr_t, uintptr_t, size_t); 132 static size_t adjust_addr_range(uintptr_t, uintptr_t, size_t, 133 uintptr_t *, uintptr_t *); 134 135 static int lflag = 0; 136 static int Lflag = 0; 137 static int aflag = 0; 138 139 /* 140 * The -A address range is represented as a pair of addresses 141 * <start_addr, end_addr>. Either one of these may be unspecified (set to 142 * INVALID_ADDRESS). If both are unspecified, no address range restrictions are 143 * in place. 144 */ 145 static uintptr_t start_addr = INVALID_ADDRESS; 146 static uintptr_t end_addr = INVALID_ADDRESS; 147 148 static int addr_width, size_width; 149 static char *command; 150 static char *procname; 151 static struct ps_prochandle *Pr; 152 153 static void intr(int); 154 155 typedef struct lwpstack { 156 lwpid_t lwps_lwpid; 157 stack_t lwps_stack; 158 } lwpstack_t; 159 160 typedef struct { 161 prxmap_t md_xmap; 162 prmap_t md_map; 163 char *md_objname; 164 boolean_t md_last; 165 int md_doswap; 166 } mapdata_t; 167 168 static mapdata_t *maps; 169 static int map_count; 170 static int map_alloc; 171 172 static lwpstack_t *stacks = NULL; 173 static uint_t nstacks = 0; 174 175 #define MAX_TRIES 5 176 177 static int 178 getstack(void *data, const lwpstatus_t *lsp) 179 { 180 int *np = (int *)data; 181 182 if (Plwp_alt_stack(Pr, lsp->pr_lwpid, &stacks[*np].lwps_stack) == 0) { 183 stacks[*np].lwps_stack.ss_flags |= SS_ONSTACK; 184 stacks[*np].lwps_lwpid = lsp->pr_lwpid; 185 (*np)++; 186 } 187 188 if (Plwp_main_stack(Pr, lsp->pr_lwpid, &stacks[*np].lwps_stack) == 0) { 189 stacks[*np].lwps_lwpid = lsp->pr_lwpid; 190 (*np)++; 191 } 192 193 return (0); 194 } 195 196 /* 197 * We compare the high memory addresses since stacks are faulted in from 198 * high memory addresses to low memory addresses, and our prmap_t 199 * structures identify only the range of addresses that have been faulted 200 * in so far. 201 */ 202 static int 203 cmpstacks(const void *ap, const void *bp) 204 { 205 const lwpstack_t *as = ap; 206 const lwpstack_t *bs = bp; 207 uintptr_t a = (uintptr_t)as->lwps_stack.ss_sp + as->lwps_stack.ss_size; 208 uintptr_t b = (uintptr_t)bs->lwps_stack.ss_sp + bs->lwps_stack.ss_size; 209 210 if (a < b) 211 return (1); 212 if (a > b) 213 return (-1); 214 return (0); 215 } 216 217 218 int 219 main(int argc, char **argv) 220 { 221 int rflag = 0, sflag = 0, xflag = 0, Fflag = 0; 222 int errflg = 0, Sflag = 0; 223 int rc = 0; 224 int opt; 225 const char *bar8 = "-------"; 226 const char *bar16 = "----------"; 227 const char *bar; 228 struct rlimit rlim; 229 struct stat64 statbuf; 230 char buf[128]; 231 int mapfd; 232 233 if ((command = strrchr(argv[0], '/')) != NULL) 234 command++; 235 else 236 command = argv[0]; 237 238 while ((opt = getopt(argc, argv, "arsxSlLFA:")) != EOF) { 239 switch (opt) { 240 case 'a': /* include shared mappings in -[xS] */ 241 aflag = 1; 242 break; 243 case 'r': /* show reserved mappings */ 244 rflag = 1; 245 break; 246 case 's': /* show hardware page sizes */ 247 sflag = 1; 248 break; 249 case 'S': /* show swap reservations */ 250 Sflag = 1; 251 break; 252 case 'x': /* show extended mappings */ 253 xflag = 1; 254 break; 255 case 'l': /* show unresolved link map names */ 256 lflag = 1; 257 break; 258 case 'L': /* show lgroup information */ 259 Lflag = 1; 260 break; 261 case 'F': /* force grabbing (no O_EXCL) */ 262 Fflag = PGRAB_FORCE; 263 break; 264 case 'A': 265 if (parse_addr_range(optarg, &start_addr, &end_addr) 266 != 0) 267 errflg++; 268 break; 269 default: 270 errflg = 1; 271 break; 272 } 273 } 274 275 argc -= optind; 276 argv += optind; 277 278 if ((Sflag && (xflag || rflag || sflag)) || (xflag && rflag) || 279 (aflag && (!xflag && !Sflag)) || 280 (Lflag && (xflag || Sflag))) { 281 errflg = 1; 282 } 283 284 if (errflg || argc <= 0) { 285 (void) fprintf(stderr, 286 "usage:\t%s [-rslF] [-A start[,end]] { pid | core } ...\n", 287 command); 288 (void) fprintf(stderr, 289 "\t\t(report process address maps)\n"); 290 (void) fprintf(stderr, 291 "\t%s -L [-rslF] [-A start[,end]] pid ...\n", command); 292 (void) fprintf(stderr, 293 "\t\t(report process address maps lgroups mappings)\n"); 294 (void) fprintf(stderr, 295 "\t%s -x [-aslF] [-A start[,end]] pid ...\n", command); 296 (void) fprintf(stderr, 297 "\t\t(show resident/anon/locked mapping details)\n"); 298 (void) fprintf(stderr, 299 "\t%s -S [-alF] [-A start[,end]] { pid | core } ...\n", 300 command); 301 (void) fprintf(stderr, 302 "\t\t(show swap reservations)\n\n"); 303 (void) fprintf(stderr, 304 "\t-a: include shared mappings in -[xS] summary\n"); 305 (void) fprintf(stderr, 306 "\t-r: show reserved address maps\n"); 307 (void) fprintf(stderr, 308 "\t-s: show hardware page sizes\n"); 309 (void) fprintf(stderr, 310 "\t-l: show unresolved dynamic linker map names\n"); 311 (void) fprintf(stderr, 312 "\t-F: force grabbing of the target process\n"); 313 (void) fprintf(stderr, 314 "\t-L: show lgroup mappings\n"); 315 (void) fprintf(stderr, 316 "\t-A start,end: limit output to the specified range\n"); 317 return (2); 318 } 319 320 /* 321 * Make sure we'll have enough file descriptors to handle a target 322 * that has many many mappings. 323 */ 324 if (getrlimit(RLIMIT_NOFILE, &rlim) == 0) { 325 rlim.rlim_cur = rlim.rlim_max; 326 (void) setrlimit(RLIMIT_NOFILE, &rlim); 327 (void) enable_extended_FILE_stdio(-1, -1); 328 } 329 330 while (argc-- > 0) { 331 char *arg; 332 int gcode; 333 psinfo_t psinfo; 334 int tries = 0; 335 int prg_gflags = PGRAB_RDONLY; 336 int prr_flags = 0; 337 338 if (Lflag) { 339 prg_gflags = PGRAB_RETAIN | Fflag; 340 prr_flags = PRELEASE_RETAIN; 341 } 342 343 if ((Pr = proc_arg_grab(arg = *argv++, PR_ARG_ANY, 344 prg_gflags, &gcode)) == NULL) { 345 (void) fprintf(stderr, "%s: cannot examine %s: %s\n", 346 command, arg, Pgrab_error(gcode)); 347 rc++; 348 continue; 349 } 350 351 procname = arg; /* for perr() */ 352 353 addr_width = (Pstatus(Pr)->pr_dmodel == PR_MODEL_LP64) ? 16 : 8; 354 size_width = (Pstatus(Pr)->pr_dmodel == PR_MODEL_LP64) ? 11 : 8; 355 bar = addr_width == 8 ? bar8 : bar16; 356 (void) memcpy(&psinfo, Ppsinfo(Pr), sizeof (psinfo_t)); 357 proc_unctrl_psinfo(&psinfo); 358 359 if (Pstate(Pr) != PS_DEAD) { 360 (void) snprintf(buf, sizeof (buf), 361 "/proc/%d/map", (int)psinfo.pr_pid); 362 if ((mapfd = open(buf, O_RDONLY)) < 0) { 363 (void) fprintf(stderr, "%s: cannot " 364 "examine %s: lost control of " 365 "process\n", command, arg); 366 rc++; 367 Prelease(Pr, prr_flags); 368 continue; 369 } 370 } else { 371 mapfd = -1; 372 } 373 374 again: 375 map_count = 0; 376 377 if (Pstate(Pr) == PS_DEAD) { 378 (void) printf("core '%s' of %d:\t%.70s\n", 379 arg, (int)psinfo.pr_pid, psinfo.pr_psargs); 380 381 if (rflag || sflag || xflag || Sflag || Lflag) { 382 (void) printf(" -%c option is not compatible " 383 "with core files\n", xflag ? 'x' : 384 sflag ? 's' : rflag ? 'r' : 385 Lflag ? 'L' : 'S'); 386 Prelease(Pr, prr_flags); 387 rc++; 388 continue; 389 } 390 391 } else { 392 (void) printf("%d:\t%.70s\n", 393 (int)psinfo.pr_pid, psinfo.pr_psargs); 394 } 395 396 if (Lflag) { 397 /* 398 * The implementation of -L option creates an agent LWP 399 * in the target process address space. The agent LWP 400 * issues meminfo(2) system calls on behalf of the 401 * target process. If we are interrupted prematurely, 402 * the target process remains in the stopped state with 403 * the agent still attached to it. To prevent such 404 * situation we catch signals from terminal and 405 * terminate gracefully. 406 */ 407 if (sigset(SIGHUP, SIG_IGN) == SIG_DFL) 408 (void) sigset(SIGHUP, intr); 409 if (sigset(SIGINT, SIG_IGN) == SIG_DFL) 410 (void) sigset(SIGINT, intr); 411 if (sigset(SIGQUIT, SIG_IGN) == SIG_DFL) 412 (void) sigset(SIGQUIT, intr); 413 (void) sigset(SIGPIPE, intr); 414 (void) sigset(SIGTERM, intr); 415 } 416 417 if (!(Pstatus(Pr)->pr_flags & PR_ISSYS)) { 418 struct totals t; 419 420 /* 421 * Since we're grabbing the process readonly, we need 422 * to make sure the address space doesn't change during 423 * execution. 424 */ 425 if (Pstate(Pr) != PS_DEAD) { 426 if (tries++ == MAX_TRIES) { 427 Prelease(Pr, prr_flags); 428 (void) close(mapfd); 429 (void) fprintf(stderr, "%s: cannot " 430 "examine %s: address space is " 431 "changing\n", command, arg); 432 continue; 433 } 434 435 if (fstat64(mapfd, &statbuf) != 0) { 436 Prelease(Pr, prr_flags); 437 (void) close(mapfd); 438 (void) fprintf(stderr, "%s: cannot " 439 "examine %s: lost control of " 440 "process\n", command, arg); 441 continue; 442 } 443 } 444 445 nstacks = psinfo.pr_nlwp * 2; 446 stacks = calloc(nstacks, sizeof (stacks[0])); 447 if (stacks != NULL) { 448 int n = 0; 449 (void) Plwp_iter(Pr, getstack, &n); 450 qsort(stacks, nstacks, sizeof (stacks[0]), 451 cmpstacks); 452 } 453 454 (void) memset(&t, 0, sizeof (t)); 455 456 if (Pgetauxval(Pr, AT_BASE) != -1L && 457 Prd_agent(Pr) == NULL) { 458 (void) fprintf(stderr, "%s: warning: " 459 "librtld_db failed to initialize; " 460 "shared library information will not be " 461 "available\n", command); 462 } 463 464 /* 465 * Gather data 466 */ 467 if (xflag) 468 rc += xmapping_iter(Pr, gather_xmap, NULL, 0); 469 else if (Sflag) 470 rc += xmapping_iter(Pr, gather_xmap, NULL, 1); 471 else { 472 if (rflag) 473 rc += rmapping_iter(Pr, gather_map, 474 NULL); 475 else if (sflag) 476 rc += xmapping_iter(Pr, gather_xmap, 477 NULL, 0); 478 else 479 rc += Pmapping_iter(Pr, gather_map, 480 NULL); 481 } 482 483 /* 484 * Ensure mappings are consistent. 485 */ 486 if (Pstate(Pr) != PS_DEAD) { 487 struct stat64 newbuf; 488 489 if (fstat64(mapfd, &newbuf) != 0 || 490 memcmp(&newbuf.st_mtim, &statbuf.st_mtim, 491 sizeof (newbuf.st_mtim)) != 0) { 492 if (stacks != NULL) { 493 free(stacks); 494 stacks = NULL; 495 } 496 goto again; 497 } 498 } 499 500 /* 501 * Display data. 502 */ 503 if (xflag) { 504 (void) printf("%*s%*s%*s%*s%*s " 505 "%sMode Mapped File\n", 506 addr_width, "Address", 507 size_width, "Kbytes", 508 size_width, "RSS", 509 size_width, "Anon", 510 size_width, "Locked", 511 sflag ? "Pgsz " : ""); 512 513 rc += iter_xmap(sflag ? look_xmap : 514 look_xmap_nopgsz, &t); 515 516 (void) printf("%s%s %s %s %s %s\n", 517 addr_width == 8 ? "-" : "------", 518 bar, bar, bar, bar, bar); 519 520 (void) printf("%stotal Kb", addr_width == 16 ? 521 " " : ""); 522 523 printK(t.total_size, size_width); 524 printK(t.total_rss, size_width); 525 printK(t.total_anon, size_width); 526 printK(t.total_locked, size_width); 527 528 (void) printf("\n"); 529 530 } else if (Sflag) { 531 (void) printf("%*s%*s%*s Mode" 532 " Mapped File\n", 533 addr_width, "Address", 534 size_width, "Kbytes", 535 size_width, "Swap"); 536 537 rc += iter_xmap(look_xmap_nopgsz, &t); 538 539 (void) printf("%s%s %s %s\n", 540 addr_width == 8 ? "-" : "------", 541 bar, bar, bar); 542 543 (void) printf("%stotal Kb", addr_width == 16 ? 544 " " : ""); 545 546 printK(t.total_size, size_width); 547 printK(t.total_swap, size_width); 548 549 (void) printf("\n"); 550 551 } else { 552 553 if (rflag) { 554 rc += iter_map(look_map, &t); 555 } else if (sflag) { 556 if (Lflag) { 557 (void) printf("%*s %*s %4s" 558 " %-6s %s %s\n", 559 addr_width, "Address", 560 size_width, 561 "Bytes", "Pgsz", "Mode ", 562 "Lgrp", "Mapped File"); 563 rc += iter_xmap(look_smap, &t); 564 } else { 565 (void) printf("%*s %*s %4s" 566 " %-6s %s\n", 567 addr_width, "Address", 568 size_width, 569 "Bytes", "Pgsz", "Mode ", 570 "Mapped File"); 571 rc += iter_xmap(look_smap, &t); 572 } 573 } else { 574 rc += iter_map(look_map, &t); 575 } 576 577 (void) printf(" %stotal %*luK\n", 578 addr_width == 16 ? 579 " " : "", 580 size_width, t.total_size); 581 } 582 583 if (stacks != NULL) { 584 free(stacks); 585 stacks = NULL; 586 } 587 588 } 589 590 Prelease(Pr, prr_flags); 591 if (mapfd != -1) 592 (void) close(mapfd); 593 } 594 595 return (rc); 596 } 597 598 static char * 599 make_name(struct ps_prochandle *Pr, uintptr_t addr, const char *mapname, 600 char *buf, size_t bufsz) 601 { 602 const pstatus_t *Psp = Pstatus(Pr); 603 const psinfo_t *pi = Ppsinfo(Pr); 604 char fname[100]; 605 struct stat statb; 606 int len; 607 char zname[ZONENAME_MAX]; 608 char zpath[PATH_MAX]; 609 char objname[PATH_MAX]; 610 611 if (!lflag && strcmp(mapname, "a.out") == 0 && 612 Pexecname(Pr, buf, bufsz) != NULL) 613 return (buf); 614 615 if (Pobjname(Pr, addr, objname, sizeof (objname)) != NULL) { 616 (void) strncpy(buf, objname, bufsz); 617 618 if (lflag) 619 return (buf); 620 621 if ((len = resolvepath(buf, buf, bufsz)) > 0) { 622 buf[len] = '\0'; 623 return (buf); 624 } 625 626 /* 627 * If the target is in a non-global zone, attempt to prepend 628 * the zone path in order to give the global-zone caller the 629 * real path to the file. 630 */ 631 if (getzonenamebyid(pi->pr_zoneid, zname, 632 sizeof (zname)) != -1 && strcmp(zname, "global") != 0 && 633 zone_get_zonepath(zname, zpath, sizeof (zpath)) == Z_OK) { 634 (void) strncat(zpath, "/root", 635 MAXPATHLEN - strlen(zpath)); 636 637 if (bufsz <= strlen(zpath)) 638 return (NULL); 639 640 (void) strncpy(buf, zpath, bufsz); 641 (void) strncat(buf, objname, bufsz - strlen(zpath)); 642 } 643 644 if ((len = resolvepath(buf, buf, bufsz)) > 0) { 645 buf[len] = '\0'; 646 return (buf); 647 } 648 } 649 650 if (Pstate(Pr) != PS_DEAD && *mapname != '\0') { 651 (void) snprintf(fname, sizeof (fname), "/proc/%d/path/%s", 652 (int)Psp->pr_pid, mapname); 653 len = readlink(fname, buf, bufsz - 1); 654 if (len >= 0) { 655 buf[len] = '\0'; 656 return (buf); 657 } else { /* there is no path and readlink() error */ 658 (void) snprintf(fname, sizeof (fname), 659 "/proc/%d/object/%s", (int)Psp->pr_pid, mapname); 660 if (stat(fname, &statb) == 0) { 661 dev_t dev = statb.st_dev; 662 ino_t ino = statb.st_ino; 663 (void) snprintf(buf, bufsz, 664 "dev:%lu,%lu ino:%lu", 665 (ulong_t)major(dev), 666 (ulong_t)minor(dev), ino); 667 return (buf); 668 } 669 } 670 } 671 672 return (NULL); 673 } 674 675 static char * 676 anon_name(char *name, const pstatus_t *Psp, 677 uintptr_t vaddr, size_t size, int mflags, int shmid) 678 { 679 if (mflags & MA_ISM) { 680 if (shmid == -1) 681 (void) snprintf(name, PATH_MAX, " [ %s shmid=null ]", 682 (mflags & MA_NORESERVE) ? "ism" : "dism"); 683 else 684 (void) snprintf(name, PATH_MAX, " [ %s shmid=0x%x ]", 685 (mflags & MA_NORESERVE) ? "ism" : "dism", shmid); 686 } else if (mflags & MA_SHM) { 687 if (shmid == -1) 688 (void) sprintf(name, " [ shmid=null ]"); 689 else 690 (void) sprintf(name, " [ shmid=0x%x ]", shmid); 691 } else if (vaddr + size > Psp->pr_stkbase && 692 vaddr < Psp->pr_stkbase + Psp->pr_stksize) { 693 (void) strcpy(name, " [ stack ]"); 694 } else if ((mflags & MA_ANON) && 695 vaddr + size > Psp->pr_brkbase && 696 vaddr < Psp->pr_brkbase + Psp->pr_brksize) { 697 (void) strcpy(name, " [ heap ]"); 698 } else { 699 lwpstack_t key, *stk; 700 701 key.lwps_stack.ss_sp = (void *)vaddr; 702 key.lwps_stack.ss_size = size; 703 if (nstacks > 0 && 704 (stk = bsearch(&key, stacks, nstacks, sizeof (stacks[0]), 705 cmpstacks)) != NULL) { 706 (void) snprintf(name, PATH_MAX, " [ %s tid=%d ]", 707 (stk->lwps_stack.ss_flags & SS_ONSTACK) ? 708 "altstack" : "stack", 709 stk->lwps_lwpid); 710 } else if (Pstate(Pr) != PS_DEAD) { 711 (void) strcpy(name, " [ anon ]"); 712 } else { 713 return (NULL); 714 } 715 } 716 717 return (name); 718 } 719 720 static int 721 rmapping_iter(struct ps_prochandle *Pr, proc_map_f *func, void *cd) 722 { 723 char mapname[PATH_MAX]; 724 int mapfd, nmap, i, rc; 725 struct stat st; 726 prmap_t *prmapp, *pmp; 727 ssize_t n; 728 729 (void) snprintf(mapname, sizeof (mapname), 730 "/proc/%d/rmap", (int)Pstatus(Pr)->pr_pid); 731 732 if ((mapfd = open(mapname, O_RDONLY)) < 0 || fstat(mapfd, &st) != 0) { 733 if (mapfd >= 0) 734 (void) close(mapfd); 735 return (perr(mapname)); 736 } 737 738 nmap = st.st_size / sizeof (prmap_t); 739 prmapp = malloc((nmap + 1) * sizeof (prmap_t)); 740 741 if ((n = pread(mapfd, prmapp, (nmap + 1) * sizeof (prmap_t), 0L)) < 0) { 742 (void) close(mapfd); 743 free(prmapp); 744 return (perr("read rmap")); 745 } 746 747 (void) close(mapfd); 748 nmap = n / sizeof (prmap_t); 749 750 for (i = 0, pmp = prmapp; i < nmap; i++, pmp++) { 751 if ((rc = func(cd, pmp, NULL)) != 0) { 752 free(prmapp); 753 return (rc); 754 } 755 } 756 757 free(prmapp); 758 return (0); 759 } 760 761 static int 762 xmapping_iter(struct ps_prochandle *Pr, proc_xmap_f *func, void *cd, int doswap) 763 { 764 char mapname[PATH_MAX]; 765 int mapfd, nmap, i, rc; 766 struct stat st; 767 prxmap_t *prmapp, *pmp; 768 ssize_t n; 769 770 (void) snprintf(mapname, sizeof (mapname), 771 "/proc/%d/xmap", (int)Pstatus(Pr)->pr_pid); 772 773 if ((mapfd = open(mapname, O_RDONLY)) < 0 || fstat(mapfd, &st) != 0) { 774 if (mapfd >= 0) 775 (void) close(mapfd); 776 return (perr(mapname)); 777 } 778 779 nmap = st.st_size / sizeof (prxmap_t); 780 nmap *= 2; 781 again: 782 prmapp = malloc((nmap + 1) * sizeof (prxmap_t)); 783 784 if ((n = pread(mapfd, prmapp, (nmap + 1) * sizeof (prxmap_t), 0)) < 0) { 785 (void) close(mapfd); 786 free(prmapp); 787 return (perr("read xmap")); 788 } 789 790 if (nmap < n / sizeof (prxmap_t)) { 791 free(prmapp); 792 nmap *= 2; 793 goto again; 794 } 795 796 (void) close(mapfd); 797 nmap = n / sizeof (prxmap_t); 798 799 for (i = 0, pmp = prmapp; i < nmap; i++, pmp++) { 800 if ((rc = func(cd, pmp, NULL, i == nmap - 1, doswap)) != 0) { 801 free(prmapp); 802 return (rc); 803 } 804 } 805 806 /* 807 * Mark the last element. 808 */ 809 if (map_count > 0) 810 maps[map_count - 1].md_last = B_TRUE; 811 812 free(prmapp); 813 return (0); 814 } 815 816 /*ARGSUSED*/ 817 static int 818 look_map(void *data, const prmap_t *pmp, const char *object_name) 819 { 820 struct totals *t = data; 821 const pstatus_t *Psp = Pstatus(Pr); 822 size_t size; 823 char mname[PATH_MAX]; 824 char *lname = NULL; 825 size_t psz = pmp->pr_pagesize; 826 uintptr_t vaddr = pmp->pr_vaddr; 827 uintptr_t segment_end = vaddr + pmp->pr_size; 828 lgrp_id_t lgrp; 829 memory_chunk_t mchunk; 830 831 /* 832 * If the mapping is not anon or not part of the heap, make a name 833 * for it. We don't want to report the heap as a.out's data. 834 */ 835 if (!(pmp->pr_mflags & MA_ANON) || 836 segment_end <= Psp->pr_brkbase || 837 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) { 838 lname = make_name(Pr, pmp->pr_vaddr, pmp->pr_mapname, 839 mname, sizeof (mname)); 840 } 841 842 if (lname == NULL && 843 ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD)) { 844 lname = anon_name(mname, Psp, pmp->pr_vaddr, 845 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid); 846 } 847 848 /* 849 * Adjust the address range if -A is specified. 850 */ 851 size = adjust_addr_range(pmp->pr_vaddr, segment_end, psz, 852 &vaddr, &segment_end); 853 854 if (size == 0) 855 return (0); 856 857 if (!Lflag) { 858 /* 859 * Display the whole mapping 860 */ 861 size = ROUNDUP_KB(size); 862 863 (void) printf(lname ? 864 "%.*lX %*luK %-6s %s\n" : 865 "%.*lX %*luK %s\n", 866 addr_width, vaddr, 867 size_width - 1, size, mflags(pmp->pr_mflags), lname); 868 869 t->total_size += size; 870 return (0); 871 } 872 873 /* 874 * We need to display lgroups backing physical memory, so we break the 875 * segment into individual pages and coalesce pages with the same lgroup 876 * into one "segment". 877 */ 878 879 /* 880 * Initialize address descriptions for the mapping. 881 */ 882 mem_chunk_init(&mchunk, segment_end, psz); 883 size = 0; 884 885 /* 886 * Walk mapping (page by page) and display contiguous ranges of memory 887 * allocated to same lgroup. 888 */ 889 do { 890 size_t size_contig; 891 892 /* 893 * Get contiguous region of memory starting from vaddr allocated 894 * from the same lgroup. 895 */ 896 size_contig = get_contiguous_region(&mchunk, vaddr, 897 segment_end, pmp->pr_pagesize, &lgrp); 898 899 (void) printf(lname ? "%.*lX %*luK %-6s%s %s\n" : 900 "%.*lX %*luK %s %s\n", 901 addr_width, vaddr, 902 size_width - 1, size_contig / KILOBYTE, 903 mflags(pmp->pr_mflags), 904 lgrp2str(lgrp), lname); 905 906 vaddr += size_contig; 907 size += size_contig; 908 } while (vaddr < segment_end && !interrupt); 909 910 /* Update the total size */ 911 t->total_size += ROUNDUP_KB(size); 912 return (0); 913 } 914 915 static void 916 printK(long value, int width) 917 { 918 if (value == 0) 919 (void) printf(width == 8 ? " -" : " -"); 920 else 921 (void) printf(" %*lu", width - 1, value); 922 } 923 924 static const char * 925 pagesize(const prxmap_t *pmp) 926 { 927 int pagesize = pmp->pr_hatpagesize; 928 static char buf[32]; 929 930 if (pagesize == 0) { 931 return ("-"); /* no underlying HAT mapping */ 932 } 933 934 if (pagesize >= KILOBYTE && (pagesize % KILOBYTE) == 0) { 935 if ((pagesize % GIGABYTE) == 0) 936 (void) snprintf(buf, sizeof (buf), "%dG", 937 pagesize / GIGABYTE); 938 else if ((pagesize % MEGABYTE) == 0) 939 (void) snprintf(buf, sizeof (buf), "%dM", 940 pagesize / MEGABYTE); 941 else 942 (void) snprintf(buf, sizeof (buf), "%dK", 943 pagesize / KILOBYTE); 944 } else 945 (void) snprintf(buf, sizeof (buf), "%db", pagesize); 946 947 return (buf); 948 } 949 950 /*ARGSUSED*/ 951 static int 952 look_smap(void *data, 953 const prxmap_t *pmp, 954 const char *object_name, 955 int last, int doswap) 956 { 957 struct totals *t = data; 958 const pstatus_t *Psp = Pstatus(Pr); 959 size_t size; 960 char mname[PATH_MAX]; 961 char *lname = NULL; 962 const char *format; 963 size_t psz = pmp->pr_pagesize; 964 uintptr_t vaddr = pmp->pr_vaddr; 965 uintptr_t segment_end = vaddr + pmp->pr_size; 966 lgrp_id_t lgrp; 967 memory_chunk_t mchunk; 968 969 /* 970 * If the mapping is not anon or not part of the heap, make a name 971 * for it. We don't want to report the heap as a.out's data. 972 */ 973 if (!(pmp->pr_mflags & MA_ANON) || 974 pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase || 975 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) { 976 lname = make_name(Pr, pmp->pr_vaddr, pmp->pr_mapname, 977 mname, sizeof (mname)); 978 } 979 980 if (lname == NULL && 981 ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD)) { 982 lname = anon_name(mname, Psp, pmp->pr_vaddr, 983 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid); 984 } 985 986 /* 987 * Adjust the address range if -A is specified. 988 */ 989 size = adjust_addr_range(pmp->pr_vaddr, segment_end, psz, 990 &vaddr, &segment_end); 991 992 if (size == 0) 993 return (0); 994 995 if (!Lflag) { 996 /* 997 * Display the whole mapping 998 */ 999 if (lname != NULL) 1000 format = "%.*lX %*luK %4s %-6s %s\n"; 1001 else 1002 format = "%.*lX %*luK %4s %s\n"; 1003 1004 size = ROUNDUP_KB(size); 1005 1006 (void) printf(format, addr_width, vaddr, size_width - 1, size, 1007 pagesize(pmp), mflags(pmp->pr_mflags), lname); 1008 1009 t->total_size += size; 1010 return (0); 1011 } 1012 1013 if (lname != NULL) 1014 format = "%.*lX %*luK %4s %-6s%s %s\n"; 1015 else 1016 format = "%.*lX %*luK %4s%s %s\n"; 1017 1018 /* 1019 * We need to display lgroups backing physical memory, so we break the 1020 * segment into individual pages and coalesce pages with the same lgroup 1021 * into one "segment". 1022 */ 1023 1024 /* 1025 * Initialize address descriptions for the mapping. 1026 */ 1027 mem_chunk_init(&mchunk, segment_end, psz); 1028 size = 0; 1029 1030 /* 1031 * Walk mapping (page by page) and display contiguous ranges of memory 1032 * allocated to same lgroup. 1033 */ 1034 do { 1035 size_t size_contig; 1036 1037 /* 1038 * Get contiguous region of memory starting from vaddr allocated 1039 * from the same lgroup. 1040 */ 1041 size_contig = get_contiguous_region(&mchunk, vaddr, 1042 segment_end, pmp->pr_pagesize, &lgrp); 1043 1044 (void) printf(format, addr_width, vaddr, 1045 size_width - 1, size_contig / KILOBYTE, 1046 pagesize(pmp), mflags(pmp->pr_mflags), 1047 lgrp2str(lgrp), lname); 1048 1049 vaddr += size_contig; 1050 size += size_contig; 1051 } while (vaddr < segment_end && !interrupt); 1052 1053 t->total_size += ROUNDUP_KB(size); 1054 return (0); 1055 } 1056 1057 #define ANON(x) ((aflag || (((x)->pr_mflags & MA_SHARED) == 0)) ? \ 1058 ((x)->pr_anon) : 0) 1059 1060 /*ARGSUSED*/ 1061 static int 1062 look_xmap(void *data, 1063 const prxmap_t *pmp, 1064 const char *object_name, 1065 int last, int doswap) 1066 { 1067 struct totals *t = data; 1068 const pstatus_t *Psp = Pstatus(Pr); 1069 char mname[PATH_MAX]; 1070 char *lname = NULL; 1071 char *ln; 1072 1073 /* 1074 * If the mapping is not anon or not part of the heap, make a name 1075 * for it. We don't want to report the heap as a.out's data. 1076 */ 1077 if (!(pmp->pr_mflags & MA_ANON) || 1078 pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase || 1079 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) { 1080 lname = make_name(Pr, pmp->pr_vaddr, pmp->pr_mapname, 1081 mname, sizeof (mname)); 1082 } 1083 1084 if (lname != NULL) { 1085 if ((ln = strrchr(lname, '/')) != NULL) 1086 lname = ln + 1; 1087 } else if ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD) { 1088 lname = anon_name(mname, Psp, pmp->pr_vaddr, 1089 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid); 1090 } 1091 1092 (void) printf("%.*lX", addr_width, (ulong_t)pmp->pr_vaddr); 1093 1094 printK(ROUNDUP_KB(pmp->pr_size), size_width); 1095 printK(pmp->pr_rss * (pmp->pr_pagesize / KILOBYTE), size_width); 1096 printK(ANON(pmp) * (pmp->pr_pagesize / KILOBYTE), size_width); 1097 printK(pmp->pr_locked * (pmp->pr_pagesize / KILOBYTE), size_width); 1098 (void) printf(lname ? " %4s %-6s %s\n" : " %4s %s\n", 1099 pagesize(pmp), mflags(pmp->pr_mflags), lname); 1100 1101 t->total_size += ROUNDUP_KB(pmp->pr_size); 1102 t->total_rss += pmp->pr_rss * (pmp->pr_pagesize / KILOBYTE); 1103 t->total_anon += ANON(pmp) * (pmp->pr_pagesize / KILOBYTE); 1104 t->total_locked += (pmp->pr_locked * (pmp->pr_pagesize / KILOBYTE)); 1105 1106 return (0); 1107 } 1108 1109 /*ARGSUSED*/ 1110 static int 1111 look_xmap_nopgsz(void *data, 1112 const prxmap_t *pmp, 1113 const char *object_name, 1114 int last, int doswap) 1115 { 1116 struct totals *t = data; 1117 const pstatus_t *Psp = Pstatus(Pr); 1118 char mname[PATH_MAX]; 1119 char *lname = NULL; 1120 char *ln; 1121 static uintptr_t prev_vaddr; 1122 static size_t prev_size; 1123 static offset_t prev_offset; 1124 static int prev_mflags; 1125 static char *prev_lname; 1126 static char prev_mname[PATH_MAX]; 1127 static ulong_t prev_rss; 1128 static ulong_t prev_anon; 1129 static ulong_t prev_locked; 1130 static ulong_t prev_swap; 1131 int merged = 0; 1132 static int first = 1; 1133 ulong_t swap = 0; 1134 int kperpage; 1135 1136 /* 1137 * Calculate swap reservations 1138 */ 1139 if (pmp->pr_mflags & MA_SHARED) { 1140 if (aflag && (pmp->pr_mflags & MA_NORESERVE) == 0) { 1141 /* Swap reserved for entire non-ism SHM */ 1142 swap = pmp->pr_size / pmp->pr_pagesize; 1143 } 1144 } else if (pmp->pr_mflags & MA_NORESERVE) { 1145 /* Swap reserved on fault for each anon page */ 1146 swap = pmp->pr_anon; 1147 } else if (pmp->pr_mflags & MA_WRITE) { 1148 /* Swap reserve for entire writable segment */ 1149 swap = pmp->pr_size / pmp->pr_pagesize; 1150 } 1151 1152 /* 1153 * If the mapping is not anon or not part of the heap, make a name 1154 * for it. We don't want to report the heap as a.out's data. 1155 */ 1156 if (!(pmp->pr_mflags & MA_ANON) || 1157 pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase || 1158 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) { 1159 lname = make_name(Pr, pmp->pr_vaddr, pmp->pr_mapname, 1160 mname, sizeof (mname)); 1161 } 1162 1163 if (lname != NULL) { 1164 if ((ln = strrchr(lname, '/')) != NULL) 1165 lname = ln + 1; 1166 } else if ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD) { 1167 lname = anon_name(mname, Psp, pmp->pr_vaddr, 1168 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid); 1169 } 1170 1171 kperpage = pmp->pr_pagesize / KILOBYTE; 1172 1173 t->total_size += ROUNDUP_KB(pmp->pr_size); 1174 t->total_rss += pmp->pr_rss * kperpage; 1175 t->total_anon += ANON(pmp) * kperpage; 1176 t->total_locked += pmp->pr_locked * kperpage; 1177 t->total_swap += swap * kperpage; 1178 1179 if (first == 1) { 1180 first = 0; 1181 prev_vaddr = pmp->pr_vaddr; 1182 prev_size = pmp->pr_size; 1183 prev_offset = pmp->pr_offset; 1184 prev_mflags = pmp->pr_mflags; 1185 if (lname == NULL) { 1186 prev_lname = NULL; 1187 } else { 1188 (void) strcpy(prev_mname, lname); 1189 prev_lname = prev_mname; 1190 } 1191 prev_rss = pmp->pr_rss * kperpage; 1192 prev_anon = ANON(pmp) * kperpage; 1193 prev_locked = pmp->pr_locked * kperpage; 1194 prev_swap = swap * kperpage; 1195 if (last == 0) { 1196 return (0); 1197 } 1198 merged = 1; 1199 } else if (prev_vaddr + prev_size == pmp->pr_vaddr && 1200 prev_mflags == pmp->pr_mflags && 1201 ((prev_mflags & MA_ISM) || 1202 prev_offset + prev_size == pmp->pr_offset) && 1203 ((lname == NULL && prev_lname == NULL) || 1204 (lname != NULL && prev_lname != NULL && 1205 strcmp(lname, prev_lname) == 0))) { 1206 prev_size += pmp->pr_size; 1207 prev_rss += pmp->pr_rss * kperpage; 1208 prev_anon += ANON(pmp) * kperpage; 1209 prev_locked += pmp->pr_locked * kperpage; 1210 prev_swap += swap * kperpage; 1211 if (last == 0) { 1212 return (0); 1213 } 1214 merged = 1; 1215 } 1216 1217 (void) printf("%.*lX", addr_width, (ulong_t)prev_vaddr); 1218 printK(ROUNDUP_KB(prev_size), size_width); 1219 1220 if (doswap) 1221 printK(prev_swap, size_width); 1222 else { 1223 printK(prev_rss, size_width); 1224 printK(prev_anon, size_width); 1225 printK(prev_locked, size_width); 1226 } 1227 (void) printf(prev_lname ? " %-6s %s\n" : "%s\n", 1228 mflags(prev_mflags), prev_lname); 1229 1230 if (last == 0) { 1231 prev_vaddr = pmp->pr_vaddr; 1232 prev_size = pmp->pr_size; 1233 prev_offset = pmp->pr_offset; 1234 prev_mflags = pmp->pr_mflags; 1235 if (lname == NULL) { 1236 prev_lname = NULL; 1237 } else { 1238 (void) strcpy(prev_mname, lname); 1239 prev_lname = prev_mname; 1240 } 1241 prev_rss = pmp->pr_rss * kperpage; 1242 prev_anon = ANON(pmp) * kperpage; 1243 prev_locked = pmp->pr_locked * kperpage; 1244 prev_swap = swap * kperpage; 1245 } else if (merged == 0) { 1246 (void) printf("%.*lX", addr_width, (ulong_t)pmp->pr_vaddr); 1247 printK(ROUNDUP_KB(pmp->pr_size), size_width); 1248 if (doswap) 1249 printK(swap * kperpage, size_width); 1250 else { 1251 printK(pmp->pr_rss * kperpage, size_width); 1252 printK(ANON(pmp) * kperpage, size_width); 1253 printK(pmp->pr_locked * kperpage, size_width); 1254 } 1255 (void) printf(lname ? " %-6s %s\n" : " %s\n", 1256 mflags(pmp->pr_mflags), lname); 1257 } 1258 1259 if (last != 0) 1260 first = 1; 1261 1262 return (0); 1263 } 1264 1265 static int 1266 perr(char *s) 1267 { 1268 if (s) 1269 (void) fprintf(stderr, "%s: ", procname); 1270 else 1271 s = procname; 1272 perror(s); 1273 return (1); 1274 } 1275 1276 static char * 1277 mflags(uint_t arg) 1278 { 1279 static char code_buf[80]; 1280 char *str = code_buf; 1281 1282 /* 1283 * rwxsR 1284 * 1285 * r - segment is readable 1286 * w - segment is writable 1287 * x - segment is executable 1288 * s - segment is shared 1289 * R - segment is mapped MAP_NORESERVE 1290 * 1291 */ 1292 (void) sprintf(str, "%c%c%c%c%c%c", 1293 arg & MA_READ ? 'r' : '-', 1294 arg & MA_WRITE ? 'w' : '-', 1295 arg & MA_EXEC ? 'x' : '-', 1296 arg & MA_SHARED ? 's' : '-', 1297 arg & MA_NORESERVE ? 'R' : '-', 1298 arg & MA_RESERVED1 ? '*' : ' '); 1299 1300 return (str); 1301 } 1302 1303 static mapdata_t * 1304 nextmap(void) 1305 { 1306 mapdata_t *newmaps; 1307 int next; 1308 1309 if (map_count == map_alloc) { 1310 if (map_alloc == 0) 1311 next = 16; 1312 else 1313 next = map_alloc * 2; 1314 1315 newmaps = realloc(maps, next * sizeof (mapdata_t)); 1316 if (newmaps == NULL) { 1317 (void) perr("failed to allocate maps"); 1318 exit(1); 1319 } 1320 (void) memset(newmaps + map_alloc, '\0', 1321 (next - map_alloc) * sizeof (mapdata_t)); 1322 1323 map_alloc = next; 1324 maps = newmaps; 1325 } 1326 1327 return (&maps[map_count++]); 1328 } 1329 1330 /*ARGSUSED*/ 1331 static int 1332 gather_map(void *ignored, const prmap_t *map, const char *objname) 1333 { 1334 mapdata_t *data; 1335 1336 /* Skip mappings which are outside the range specified by -A */ 1337 if (!address_in_range(map->pr_vaddr, 1338 map->pr_vaddr + map->pr_size, map->pr_pagesize)) 1339 return (0); 1340 1341 data = nextmap(); 1342 data->md_map = *map; 1343 if (data->md_objname != NULL) 1344 free(data->md_objname); 1345 data->md_objname = objname ? strdup(objname) : NULL; 1346 1347 return (0); 1348 } 1349 1350 /*ARGSUSED*/ 1351 static int 1352 gather_xmap(void *ignored, const prxmap_t *xmap, const char *objname, 1353 int last, int doswap) 1354 { 1355 mapdata_t *data; 1356 1357 /* Skip mappings which are outside the range specified by -A */ 1358 if (!address_in_range(xmap->pr_vaddr, 1359 xmap->pr_vaddr + xmap->pr_size, xmap->pr_pagesize)) 1360 return (0); 1361 1362 data = nextmap(); 1363 data->md_xmap = *xmap; 1364 if (data->md_objname != NULL) 1365 free(data->md_objname); 1366 data->md_objname = objname ? strdup(objname) : NULL; 1367 data->md_last = last; 1368 data->md_doswap = doswap; 1369 1370 return (0); 1371 } 1372 1373 static int 1374 iter_map(proc_map_f *func, void *data) 1375 { 1376 int i; 1377 int ret; 1378 1379 for (i = 0; i < map_count; i++) { 1380 if (interrupt) 1381 break; 1382 if ((ret = func(data, &maps[i].md_map, 1383 maps[i].md_objname)) != 0) 1384 return (ret); 1385 } 1386 1387 return (0); 1388 } 1389 1390 static int 1391 iter_xmap(proc_xmap_f *func, void *data) 1392 { 1393 int i; 1394 int ret; 1395 1396 for (i = 0; i < map_count; i++) { 1397 if (interrupt) 1398 break; 1399 if ((ret = func(data, &maps[i].md_xmap, maps[i].md_objname, 1400 maps[i].md_last, maps[i].md_doswap)) != 0) 1401 return (ret); 1402 } 1403 1404 return (0); 1405 } 1406 1407 /* 1408 * Convert lgroup ID to string. 1409 * returns dash when lgroup ID is invalid. 1410 */ 1411 static char * 1412 lgrp2str(lgrp_id_t lgrp) 1413 { 1414 static char lgrp_buf[20]; 1415 char *str = lgrp_buf; 1416 1417 (void) sprintf(str, lgrp == LGRP_NONE ? " -" : "%4d", lgrp); 1418 return (str); 1419 } 1420 1421 /* 1422 * Parse address range specification for -A option. 1423 * The address range may have the following forms: 1424 * 1425 * address 1426 * start and end is set to address 1427 * address, 1428 * start is set to address, end is set to INVALID_ADDRESS 1429 * ,address 1430 * start is set to 0, end is set to address 1431 * address1,address2 1432 * start is set to address1, end is set to address2 1433 * 1434 */ 1435 static int 1436 parse_addr_range(char *input_str, uintptr_t *start, uintptr_t *end) 1437 { 1438 char *startp = input_str; 1439 char *endp = strchr(input_str, ','); 1440 ulong_t s = (ulong_t)INVALID_ADDRESS; 1441 ulong_t e = (ulong_t)INVALID_ADDRESS; 1442 1443 if (endp != NULL) { 1444 /* 1445 * Comma is present. If there is nothing after comma, the end 1446 * remains set at INVALID_ADDRESS. Otherwise it is set to the 1447 * value after comma. 1448 */ 1449 *endp = '\0'; 1450 endp++; 1451 1452 if ((*endp != '\0') && sscanf(endp, "%lx", &e) != 1) 1453 return (1); 1454 } 1455 1456 if (startp != NULL) { 1457 /* 1458 * Read the start address, if it is specified. If the address is 1459 * missing, start will be set to INVALID_ADDRESS. 1460 */ 1461 if ((*startp != '\0') && sscanf(startp, "%lx", &s) != 1) 1462 return (1); 1463 } 1464 1465 /* If there is no comma, end becomes equal to start */ 1466 if (endp == NULL) 1467 e = s; 1468 1469 /* 1470 * ,end implies 0..end range 1471 */ 1472 if (e != INVALID_ADDRESS && s == INVALID_ADDRESS) 1473 s = 0; 1474 1475 *start = (uintptr_t)s; 1476 *end = (uintptr_t)e; 1477 1478 /* Return error if neither start nor end address were specified */ 1479 return (! (s != INVALID_ADDRESS || e != INVALID_ADDRESS)); 1480 } 1481 1482 /* 1483 * Check whether any portion of [start, end] segment is within the 1484 * [start_addr, end_addr] range. 1485 * 1486 * Return values: 1487 * 0 - address is outside the range 1488 * 1 - address is within the range 1489 */ 1490 static int 1491 address_in_range(uintptr_t start, uintptr_t end, size_t psz) 1492 { 1493 int rc = 1; 1494 1495 /* 1496 * Nothing to do if there is no address range specified with -A 1497 */ 1498 if (start_addr != INVALID_ADDRESS || end_addr != INVALID_ADDRESS) { 1499 /* The segment end is below the range start */ 1500 if ((start_addr != INVALID_ADDRESS) && 1501 (end < P2ALIGN(start_addr, psz))) 1502 rc = 0; 1503 1504 /* The segment start is above the range end */ 1505 if ((end_addr != INVALID_ADDRESS) && 1506 (start > P2ALIGN(end_addr + psz, psz))) 1507 rc = 0; 1508 } 1509 return (rc); 1510 } 1511 1512 /* 1513 * Returns an intersection of the [start, end] interval and the range specified 1514 * by -A flag [start_addr, end_addr]. Unspecified parts of the address range 1515 * have value INVALID_ADDRESS. 1516 * 1517 * The start_addr address is rounded down to the beginning of page and end_addr 1518 * is rounded up to the end of page. 1519 * 1520 * Returns the size of the resulting interval or zero if the interval is empty 1521 * or invalid. 1522 */ 1523 static size_t 1524 adjust_addr_range(uintptr_t start, uintptr_t end, size_t psz, 1525 uintptr_t *new_start, uintptr_t *new_end) 1526 { 1527 uintptr_t from; /* start_addr rounded down */ 1528 uintptr_t to; /* end_addr rounded up */ 1529 1530 /* 1531 * Round down the lower address of the range to the beginning of page. 1532 */ 1533 if (start_addr == INVALID_ADDRESS) { 1534 /* 1535 * No start_addr specified by -A, the lower part of the interval 1536 * does not change. 1537 */ 1538 *new_start = start; 1539 } else { 1540 from = P2ALIGN(start_addr, psz); 1541 /* 1542 * If end address is outside the range, return an empty 1543 * interval 1544 */ 1545 if (end < from) { 1546 *new_start = *new_end = 0; 1547 return (0); 1548 } 1549 /* 1550 * The adjusted start address is the maximum of requested start 1551 * and the aligned start_addr of the -A range. 1552 */ 1553 *new_start = start < from ? from : start; 1554 } 1555 1556 /* 1557 * Round up the higher address of the range to the end of page. 1558 */ 1559 if (end_addr == INVALID_ADDRESS) { 1560 /* 1561 * No end_addr specified by -A, the upper part of the interval 1562 * does not change. 1563 */ 1564 *new_end = end; 1565 } else { 1566 /* 1567 * If only one address is specified and it is the beginning of a 1568 * segment, get information about the whole segment. This 1569 * function is called once per segment and the 'end' argument is 1570 * always the end of a segment, so just use the 'end' value. 1571 */ 1572 to = (end_addr == start_addr && start == start_addr) ? 1573 end : 1574 P2ALIGN(end_addr + psz, psz); 1575 /* 1576 * If start address is outside the range, return an empty 1577 * interval 1578 */ 1579 if (start > to) { 1580 *new_start = *new_end = 0; 1581 return (0); 1582 } 1583 /* 1584 * The adjusted end address is the minimum of requested end 1585 * and the aligned end_addr of the -A range. 1586 */ 1587 *new_end = end > to ? to : end; 1588 } 1589 1590 /* 1591 * Make sure that the resulting interval is legal. 1592 */ 1593 if (*new_end < *new_start) 1594 *new_start = *new_end = 0; 1595 1596 /* Return the size of the interval */ 1597 return (*new_end - *new_start); 1598 } 1599 1600 /* 1601 * Initialize memory_info data structure with information about a new segment. 1602 */ 1603 static void 1604 mem_chunk_init(memory_chunk_t *chunk, uintptr_t end, size_t psz) 1605 { 1606 chunk->end_addr = end; 1607 chunk->page_size = psz; 1608 chunk->page_index = 0; 1609 chunk->chunk_start = chunk->chunk_end = 0; 1610 } 1611 1612 /* 1613 * Create a new chunk of addresses starting from vaddr. 1614 * Pass the whole chunk to pr_meminfo to collect lgroup and page size 1615 * information for each page in the chunk. 1616 */ 1617 static void 1618 mem_chunk_get(memory_chunk_t *chunk, uintptr_t vaddr) 1619 { 1620 page_descr_t *pdp = chunk->page_info; 1621 size_t psz = chunk->page_size; 1622 uintptr_t addr = vaddr; 1623 uint64_t inaddr[MAX_MEMINFO_CNT]; 1624 uint64_t outdata[2 * MAX_MEMINFO_CNT]; 1625 uint_t info[2] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 1626 uint_t validity[MAX_MEMINFO_CNT]; 1627 uint64_t *dataptr = inaddr; 1628 uint64_t *outptr = outdata; 1629 uint_t *valptr = validity; 1630 int i, j, rc; 1631 1632 chunk->chunk_start = vaddr; 1633 chunk->page_index = 0; /* reset index for the new chunk */ 1634 1635 /* 1636 * Fill in MAX_MEMINFO_CNT wotrh of pages starting from vaddr. Also, 1637 * copy starting address of each page to inaddr array for pr_meminfo. 1638 */ 1639 for (i = 0, pdp = chunk->page_info; 1640 (i < MAX_MEMINFO_CNT) && (addr <= chunk->end_addr); 1641 i++, pdp++, dataptr++, addr += psz) { 1642 *dataptr = (uint64_t)addr; 1643 pdp->pd_start = addr; 1644 pdp->pd_lgrp = LGRP_NONE; 1645 pdp->pd_valid = 0; 1646 pdp->pd_pagesize = 0; 1647 } 1648 1649 /* Mark the number of entries in the chunk and the last address */ 1650 chunk->page_count = i; 1651 chunk->chunk_end = addr - psz; 1652 1653 if (interrupt) 1654 return; 1655 1656 /* Call meminfo for all collected addresses */ 1657 rc = pr_meminfo(Pr, inaddr, i, info, 2, outdata, validity); 1658 if (rc < 0) { 1659 (void) perr("can not get memory information"); 1660 return; 1661 } 1662 1663 /* Verify validity of each result and fill in the addrs array */ 1664 pdp = chunk->page_info; 1665 for (j = 0; j < i; j++, pdp++, valptr++, outptr += 2) { 1666 /* Skip invalid address pointers */ 1667 if ((*valptr & 1) == 0) { 1668 continue; 1669 } 1670 1671 /* Is lgroup information available? */ 1672 if ((*valptr & 2) != 0) { 1673 pdp->pd_lgrp = (lgrp_id_t)*outptr; 1674 pdp->pd_valid = 1; 1675 } 1676 1677 /* Is page size informaion available? */ 1678 if ((*valptr & 4) != 0) { 1679 pdp->pd_pagesize = *(outptr + 1); 1680 } 1681 } 1682 } 1683 1684 /* 1685 * Starting from address 'vaddr' find the region with pages allocated from the 1686 * same lgroup. 1687 * 1688 * Arguments: 1689 * mchunk Initialized memory chunk structure 1690 * vaddr Starting address of the region 1691 * maxaddr Upper bound of the region 1692 * pagesize Default page size to use 1693 * ret_lgrp On exit contains the lgroup ID of all pages in the 1694 * region. 1695 * 1696 * Returns: 1697 * Size of the contiguous region in bytes 1698 * The lgroup ID of all pages in the region in ret_lgrp argument. 1699 */ 1700 static size_t 1701 get_contiguous_region(memory_chunk_t *mchunk, uintptr_t vaddr, 1702 uintptr_t maxaddr, size_t pagesize, lgrp_id_t *ret_lgrp) 1703 { 1704 size_t size_contig = 0; 1705 lgrp_id_t lgrp; /* Lgroup of the region start */ 1706 lgrp_id_t curr_lgrp; /* Lgroup of the current page */ 1707 size_t psz = pagesize; /* Pagesize to use */ 1708 1709 /* Set both lgroup IDs to the lgroup of the first page */ 1710 curr_lgrp = lgrp = addr_to_lgrp(mchunk, vaddr, &psz); 1711 1712 /* 1713 * Starting from vaddr, walk page by page until either the end 1714 * of the segment is reached or a page is allocated from a different 1715 * lgroup. Also stop if interrupted from keyboard. 1716 */ 1717 while ((vaddr < maxaddr) && (curr_lgrp == lgrp) && !interrupt) { 1718 /* 1719 * Get lgroup ID and the page size of the current page. 1720 */ 1721 curr_lgrp = addr_to_lgrp(mchunk, vaddr, &psz); 1722 /* If there is no page size information, use the default */ 1723 if (psz == 0) 1724 psz = pagesize; 1725 1726 if (curr_lgrp == lgrp) { 1727 /* 1728 * This page belongs to the contiguous region. 1729 * Increase the region size and advance to the new page. 1730 */ 1731 size_contig += psz; 1732 vaddr += psz; 1733 } 1734 } 1735 1736 /* Return the region lgroup ID and the size */ 1737 *ret_lgrp = lgrp; 1738 return (size_contig); 1739 } 1740 1741 /* 1742 * Given a virtual address, return its lgroup and page size. If there is meminfo 1743 * information for an address, use it, otherwise shift the chunk window to the 1744 * vaddr and create a new chunk with known meminfo information. 1745 */ 1746 static lgrp_id_t 1747 addr_to_lgrp(memory_chunk_t *chunk, uintptr_t vaddr, size_t *psz) 1748 { 1749 page_descr_t *pdp; 1750 lgrp_id_t lgrp = LGRP_NONE; 1751 int i; 1752 1753 *psz = chunk->page_size; 1754 1755 if (interrupt) 1756 return (0); 1757 1758 /* 1759 * Is there information about this address? If not, create a new chunk 1760 * starting from vaddr and apply pr_meminfo() to the whole chunk. 1761 */ 1762 if (vaddr < chunk->chunk_start || vaddr > chunk->chunk_end) { 1763 /* 1764 * This address is outside the chunk, get the new chunk and 1765 * collect meminfo information for it. 1766 */ 1767 mem_chunk_get(chunk, vaddr); 1768 } 1769 1770 /* 1771 * Find information about the address. 1772 */ 1773 pdp = &chunk->page_info[chunk->page_index]; 1774 for (i = chunk->page_index; i < chunk->page_count; i++, pdp++) { 1775 if (pdp->pd_start == vaddr) { 1776 if (pdp->pd_valid) { 1777 lgrp = pdp->pd_lgrp; 1778 /* 1779 * Override page size information if it is 1780 * present. 1781 */ 1782 if (pdp->pd_pagesize > 0) 1783 *psz = pdp->pd_pagesize; 1784 } 1785 break; 1786 } 1787 } 1788 /* 1789 * Remember where we ended - the next search will start here. 1790 * We can query for the lgrp for the same address again, so do not 1791 * advance index past the current value. 1792 */ 1793 chunk->page_index = i; 1794 1795 return (lgrp); 1796 } 1797 1798 /* ARGSUSED */ 1799 static void 1800 intr(int sig) 1801 { 1802 interrupt = 1; 1803 } 1804