1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <stdio.h> 30 #include <stdio_ext.h> 31 #include <stdlib.h> 32 #include <unistd.h> 33 #include <ctype.h> 34 #include <fcntl.h> 35 #include <string.h> 36 #include <dirent.h> 37 #include <limits.h> 38 #include <link.h> 39 #include <libelf.h> 40 #include <sys/types.h> 41 #include <signal.h> 42 #include <sys/stat.h> 43 #include <sys/mkdev.h> 44 #include <sys/mman.h> 45 #include <sys/lgrp_user.h> 46 #include <libproc.h> 47 48 #define KILOBYTE 1024 49 #define MEGABYTE (KILOBYTE * KILOBYTE) 50 #define GIGABYTE (KILOBYTE * KILOBYTE * KILOBYTE) 51 52 /* 53 * Round up the value to the nearest kilobyte 54 */ 55 #define ROUNDUP_KB(x) (((x) + (KILOBYTE - 1)) / KILOBYTE) 56 57 /* 58 * The alignment should be a power of 2. 59 */ 60 #define P2ALIGN(x, align) ((x) & -(align)) 61 62 #define INVALID_ADDRESS (uintptr_t)(-1) 63 64 struct totals { 65 ulong_t total_size; 66 ulong_t total_swap; 67 ulong_t total_rss; 68 ulong_t total_anon; 69 ulong_t total_locked; 70 }; 71 72 /* 73 * -L option requires per-page information. The information is presented in an 74 * array of page_descr structures. 75 */ 76 typedef struct page_descr { 77 uintptr_t pd_start; /* start address of a page */ 78 size_t pd_pagesize; /* page size in bytes */ 79 lgrp_id_t pd_lgrp; /* lgroup of memory backing the page */ 80 int pd_valid; /* valid page description if non-zero */ 81 } page_descr_t; 82 83 /* 84 * Per-page information for a memory chunk. 85 * The meminfo(2) system call accepts up to MAX_MEMINFO_CNT pages at once. 86 * When we need to scan larger ranges we divide them in MAX_MEMINFO_CNT sized 87 * chunks. The chunk information is stored in the memory_chunk structure. 88 */ 89 typedef struct memory_chunk { 90 page_descr_t page_info[MAX_MEMINFO_CNT]; 91 uintptr_t end_addr; 92 uintptr_t chunk_start; /* Starting address */ 93 uintptr_t chunk_end; /* chunk_end is always <= end_addr */ 94 size_t page_size; 95 int page_index; /* Current page */ 96 int page_count; /* Number of pages */ 97 } memory_chunk_t; 98 99 static volatile int interrupt; 100 101 typedef int proc_xmap_f(void *, const prxmap_t *, const char *, int, int); 102 103 static int xmapping_iter(struct ps_prochandle *, proc_xmap_f *, void *, 104 int); 105 static int rmapping_iter(struct ps_prochandle *, proc_map_f *, void *); 106 107 static int look_map(void *, const prmap_t *, const char *); 108 static int look_smap(void *, const prxmap_t *, const char *, int, int); 109 static int look_xmap(void *, const prxmap_t *, const char *, int, int); 110 static int look_xmap_nopgsz(void *, const prxmap_t *, const char *, 111 int, int); 112 113 static int gather_map(void *, const prmap_t *, const char *); 114 static int gather_xmap(void *, const prxmap_t *, const char *, int, int); 115 static int iter_map(proc_map_f *, void *); 116 static int iter_xmap(proc_xmap_f *, void *); 117 static int parse_addr_range(char *, uintptr_t *, uintptr_t *); 118 static void mem_chunk_init(memory_chunk_t *, uintptr_t, size_t); 119 120 static int perr(char *); 121 static void printK(long, int); 122 static char *mflags(uint_t); 123 124 static size_t get_contiguous_region(memory_chunk_t *, uintptr_t, 125 uintptr_t, size_t, lgrp_id_t *); 126 static void mem_chunk_get(memory_chunk_t *, uintptr_t); 127 static lgrp_id_t addr_to_lgrp(memory_chunk_t *, uintptr_t, size_t *); 128 static char *lgrp2str(lgrp_id_t); 129 130 static int address_in_range(uintptr_t, uintptr_t, size_t); 131 static size_t adjust_addr_range(uintptr_t, uintptr_t, size_t, 132 uintptr_t *, uintptr_t *); 133 134 static int lflag = 0; 135 static int Lflag = 0; 136 static int aflag = 0; 137 138 /* 139 * The -A address range is represented as a pair of addresses 140 * <start_addr, end_addr>. Either one of these may be unspecified (set to 141 * INVALID_ADDRESS). If both are unspecified, no address range restrictions are 142 * in place. 143 */ 144 static uintptr_t start_addr = INVALID_ADDRESS; 145 static uintptr_t end_addr = INVALID_ADDRESS; 146 147 static int addr_width, size_width; 148 static char *command; 149 static char *procname; 150 static struct ps_prochandle *Pr; 151 152 static void intr(int); 153 154 typedef struct lwpstack { 155 lwpid_t lwps_lwpid; 156 stack_t lwps_stack; 157 } lwpstack_t; 158 159 typedef struct { 160 prxmap_t md_xmap; 161 prmap_t md_map; 162 char *md_objname; 163 boolean_t md_last; 164 int md_doswap; 165 } mapdata_t; 166 167 static mapdata_t *maps; 168 static int map_count; 169 static int map_alloc; 170 171 static lwpstack_t *stacks = NULL; 172 static uint_t nstacks = 0; 173 174 #define MAX_TRIES 5 175 176 static int 177 getstack(void *data, const lwpstatus_t *lsp) 178 { 179 int *np = (int *)data; 180 181 if (Plwp_alt_stack(Pr, lsp->pr_lwpid, &stacks[*np].lwps_stack) == 0) { 182 stacks[*np].lwps_stack.ss_flags |= SS_ONSTACK; 183 stacks[*np].lwps_lwpid = lsp->pr_lwpid; 184 (*np)++; 185 } 186 187 if (Plwp_main_stack(Pr, lsp->pr_lwpid, &stacks[*np].lwps_stack) == 0) { 188 stacks[*np].lwps_lwpid = lsp->pr_lwpid; 189 (*np)++; 190 } 191 192 return (0); 193 } 194 195 /* 196 * We compare the high memory addresses since stacks are faulted in from 197 * high memory addresses to low memory addresses, and our prmap_t 198 * structures identify only the range of addresses that have been faulted 199 * in so far. 200 */ 201 static int 202 cmpstacks(const void *ap, const void *bp) 203 { 204 const lwpstack_t *as = ap; 205 const lwpstack_t *bs = bp; 206 uintptr_t a = (uintptr_t)as->lwps_stack.ss_sp + as->lwps_stack.ss_size; 207 uintptr_t b = (uintptr_t)bs->lwps_stack.ss_sp + bs->lwps_stack.ss_size; 208 209 if (a < b) 210 return (1); 211 if (a > b) 212 return (-1); 213 return (0); 214 } 215 216 217 int 218 main(int argc, char **argv) 219 { 220 int rflag = 0, sflag = 0, xflag = 0, Fflag = 0; 221 int errflg = 0, Sflag = 0; 222 int rc = 0; 223 int opt; 224 const char *bar8 = "-------"; 225 const char *bar16 = "----------"; 226 const char *bar; 227 struct rlimit rlim; 228 struct stat64 statbuf; 229 char buf[128]; 230 int mapfd; 231 232 if ((command = strrchr(argv[0], '/')) != NULL) 233 command++; 234 else 235 command = argv[0]; 236 237 while ((opt = getopt(argc, argv, "arsxSlLFA:")) != EOF) { 238 switch (opt) { 239 case 'a': /* include shared mappings in -[xS] */ 240 aflag = 1; 241 break; 242 case 'r': /* show reserved mappings */ 243 rflag = 1; 244 break; 245 case 's': /* show hardware page sizes */ 246 sflag = 1; 247 break; 248 case 'S': /* show swap reservations */ 249 Sflag = 1; 250 break; 251 case 'x': /* show extended mappings */ 252 xflag = 1; 253 break; 254 case 'l': /* show unresolved link map names */ 255 lflag = 1; 256 break; 257 case 'L': /* show lgroup information */ 258 Lflag = 1; 259 break; 260 case 'F': /* force grabbing (no O_EXCL) */ 261 Fflag = PGRAB_FORCE; 262 break; 263 case 'A': 264 if (parse_addr_range(optarg, &start_addr, &end_addr) 265 != 0) 266 errflg++; 267 break; 268 default: 269 errflg = 1; 270 break; 271 } 272 } 273 274 argc -= optind; 275 argv += optind; 276 277 if ((Sflag && (xflag || rflag || sflag)) || (xflag && rflag) || 278 (aflag && (!xflag && !Sflag)) || 279 (Lflag && (xflag || Sflag))) { 280 errflg = 1; 281 } 282 283 if (errflg || argc <= 0) { 284 (void) fprintf(stderr, 285 "usage:\t%s [-rslF] [-A start[,end]] { pid | core } ...\n", 286 command); 287 (void) fprintf(stderr, 288 "\t\t(report process address maps)\n"); 289 (void) fprintf(stderr, 290 "\t%s -L [-rslF] [-A start[,end]] pid ...\n", command); 291 (void) fprintf(stderr, 292 "\t\t(report process address maps lgroups mappings)\n"); 293 (void) fprintf(stderr, 294 "\t%s -x [-aslF] [-A start[,end]] pid ...\n", command); 295 (void) fprintf(stderr, 296 "\t\t(show resident/anon/locked mapping details)\n"); 297 (void) fprintf(stderr, 298 "\t%s -S [-alF] [-A start[,end]] { pid | core } ...\n", 299 command); 300 (void) fprintf(stderr, 301 "\t\t(show swap reservations)\n\n"); 302 (void) fprintf(stderr, 303 "\t-a: include shared mappings in -[xS] summary\n"); 304 (void) fprintf(stderr, 305 "\t-r: show reserved address maps\n"); 306 (void) fprintf(stderr, 307 "\t-s: show hardware page sizes\n"); 308 (void) fprintf(stderr, 309 "\t-l: show unresolved dynamic linker map names\n"); 310 (void) fprintf(stderr, 311 "\t-F: force grabbing of the target process\n"); 312 (void) fprintf(stderr, 313 "\t-L: show lgroup mappings\n"); 314 (void) fprintf(stderr, 315 "\t-A start,end: limit output to the specified range\n"); 316 return (2); 317 } 318 319 /* 320 * Make sure we'll have enough file descriptors to handle a target 321 * that has many many mappings. 322 */ 323 if (getrlimit(RLIMIT_NOFILE, &rlim) == 0) { 324 rlim.rlim_cur = rlim.rlim_max; 325 (void) setrlimit(RLIMIT_NOFILE, &rlim); 326 (void) enable_extended_FILE_stdio(-1, -1); 327 } 328 329 while (argc-- > 0) { 330 char *arg; 331 int gcode; 332 psinfo_t psinfo; 333 int tries = 0; 334 int prg_gflags = PGRAB_RDONLY; 335 int prr_flags = 0; 336 337 if (Lflag) { 338 prg_gflags = PGRAB_RETAIN | Fflag; 339 prr_flags = PRELEASE_RETAIN; 340 } 341 342 if ((Pr = proc_arg_grab(arg = *argv++, PR_ARG_ANY, 343 prg_gflags, &gcode)) == NULL) { 344 (void) fprintf(stderr, "%s: cannot examine %s: %s\n", 345 command, arg, Pgrab_error(gcode)); 346 rc++; 347 continue; 348 } 349 350 procname = arg; /* for perr() */ 351 352 addr_width = (Pstatus(Pr)->pr_dmodel == PR_MODEL_LP64) ? 16 : 8; 353 size_width = (Pstatus(Pr)->pr_dmodel == PR_MODEL_LP64) ? 11 : 8; 354 bar = addr_width == 8 ? bar8 : bar16; 355 (void) memcpy(&psinfo, Ppsinfo(Pr), sizeof (psinfo_t)); 356 proc_unctrl_psinfo(&psinfo); 357 358 if (Pstate(Pr) != PS_DEAD) { 359 (void) snprintf(buf, sizeof (buf), 360 "/proc/%d/map", (int)psinfo.pr_pid); 361 if ((mapfd = open(buf, O_RDONLY)) < 0) { 362 (void) fprintf(stderr, "%s: cannot " 363 "examine %s: lost control of " 364 "process\n", command, arg); 365 rc++; 366 Prelease(Pr, prr_flags); 367 continue; 368 } 369 } else { 370 mapfd = -1; 371 } 372 373 again: 374 map_count = 0; 375 376 if (Pstate(Pr) == PS_DEAD) { 377 (void) printf("core '%s' of %d:\t%.70s\n", 378 arg, (int)psinfo.pr_pid, psinfo.pr_psargs); 379 380 if (rflag || sflag || xflag || Sflag || Lflag) { 381 (void) printf(" -%c option is not compatible " 382 "with core files\n", xflag ? 'x' : 383 sflag ? 's' : rflag ? 'r' : 384 Lflag ? 'L' : 'S'); 385 Prelease(Pr, prr_flags); 386 rc++; 387 continue; 388 } 389 390 } else { 391 (void) printf("%d:\t%.70s\n", 392 (int)psinfo.pr_pid, psinfo.pr_psargs); 393 } 394 395 if (Lflag) { 396 /* 397 * The implementation of -L option creates an agent LWP 398 * in the target process address space. The agent LWP 399 * issues meminfo(2) system calls on behalf of the 400 * target process. If we are interrupted prematurely, 401 * the target process remains in the stopped state with 402 * the agent still attached to it. To prevent such 403 * situation we catch signals from terminal and 404 * terminate gracefully. 405 */ 406 if (sigset(SIGHUP, SIG_IGN) == SIG_DFL) 407 (void) sigset(SIGHUP, intr); 408 if (sigset(SIGINT, SIG_IGN) == SIG_DFL) 409 (void) sigset(SIGINT, intr); 410 if (sigset(SIGQUIT, SIG_IGN) == SIG_DFL) 411 (void) sigset(SIGQUIT, intr); 412 (void) sigset(SIGPIPE, intr); 413 (void) sigset(SIGTERM, intr); 414 } 415 416 if (!(Pstatus(Pr)->pr_flags & PR_ISSYS)) { 417 struct totals t; 418 419 /* 420 * Since we're grabbing the process readonly, we need 421 * to make sure the address space doesn't change during 422 * execution. 423 */ 424 if (Pstate(Pr) != PS_DEAD) { 425 if (tries++ == MAX_TRIES) { 426 Prelease(Pr, prr_flags); 427 (void) close(mapfd); 428 (void) fprintf(stderr, "%s: cannot " 429 "examine %s: address space is " 430 "changing\n", command, arg); 431 continue; 432 } 433 434 if (fstat64(mapfd, &statbuf) != 0) { 435 Prelease(Pr, prr_flags); 436 (void) close(mapfd); 437 (void) fprintf(stderr, "%s: cannot " 438 "examine %s: lost control of " 439 "process\n", command, arg); 440 continue; 441 } 442 } 443 444 nstacks = psinfo.pr_nlwp * 2; 445 stacks = calloc(nstacks, sizeof (stacks[0])); 446 if (stacks != NULL) { 447 int n = 0; 448 (void) Plwp_iter(Pr, getstack, &n); 449 qsort(stacks, nstacks, sizeof (stacks[0]), 450 cmpstacks); 451 } 452 453 (void) memset(&t, 0, sizeof (t)); 454 455 if (Pgetauxval(Pr, AT_BASE) != -1L && 456 Prd_agent(Pr) == NULL) { 457 (void) fprintf(stderr, "%s: warning: " 458 "librtld_db failed to initialize; " 459 "shared library information will not be " 460 "available\n", command); 461 } 462 463 /* 464 * Gather data 465 */ 466 if (xflag) 467 rc += xmapping_iter(Pr, gather_xmap, NULL, 0); 468 else if (Sflag) 469 rc += xmapping_iter(Pr, gather_xmap, NULL, 1); 470 else { 471 if (rflag) 472 rc += rmapping_iter(Pr, gather_map, 473 NULL); 474 else if (sflag) 475 rc += xmapping_iter(Pr, gather_xmap, 476 NULL, 0); 477 else 478 rc += Pmapping_iter(Pr, gather_map, 479 NULL); 480 } 481 482 /* 483 * Ensure mappings are consistent. 484 */ 485 if (Pstate(Pr) != PS_DEAD) { 486 struct stat64 newbuf; 487 488 if (fstat64(mapfd, &newbuf) != 0 || 489 memcmp(&newbuf.st_mtim, &statbuf.st_mtim, 490 sizeof (newbuf.st_mtim)) != 0) { 491 if (stacks != NULL) { 492 free(stacks); 493 stacks = NULL; 494 } 495 goto again; 496 } 497 } 498 499 /* 500 * Display data. 501 */ 502 if (xflag) { 503 (void) printf("%*s%*s%*s%*s%*s " 504 "%sMode Mapped File\n", 505 addr_width, "Address", 506 size_width, "Kbytes", 507 size_width, "RSS", 508 size_width, "Anon", 509 size_width, "Locked", 510 sflag ? "Pgsz " : ""); 511 512 rc += iter_xmap(sflag ? look_xmap : 513 look_xmap_nopgsz, &t); 514 515 (void) printf("%s%s %s %s %s %s\n", 516 addr_width == 8 ? "-" : "------", 517 bar, bar, bar, bar, bar); 518 519 (void) printf("%stotal Kb", addr_width == 16 ? 520 " " : ""); 521 522 printK(t.total_size, size_width); 523 printK(t.total_rss, size_width); 524 printK(t.total_anon, size_width); 525 printK(t.total_locked, size_width); 526 527 (void) printf("\n"); 528 529 } else if (Sflag) { 530 (void) printf("%*s%*s%*s Mode" 531 " Mapped File\n", 532 addr_width, "Address", 533 size_width, "Kbytes", 534 size_width, "Swap"); 535 536 rc += iter_xmap(look_xmap_nopgsz, &t); 537 538 (void) printf("%s%s %s %s\n", 539 addr_width == 8 ? "-" : "------", 540 bar, bar, bar); 541 542 (void) printf("%stotal Kb", addr_width == 16 ? 543 " " : ""); 544 545 printK(t.total_size, size_width); 546 printK(t.total_swap, size_width); 547 548 (void) printf("\n"); 549 550 } else { 551 552 if (rflag) { 553 rc += iter_map(look_map, &t); 554 } else if (sflag) { 555 if (Lflag) { 556 (void) printf("%*s %*s %4s" 557 " %-6s %s %s\n", 558 addr_width, "Address", 559 size_width, 560 "Bytes", "Pgsz", "Mode ", 561 "Lgrp", "Mapped File"); 562 rc += iter_xmap(look_smap, &t); 563 } else { 564 (void) printf("%*s %*s %4s" 565 " %-6s %s\n", 566 addr_width, "Address", 567 size_width, 568 "Bytes", "Pgsz", "Mode ", 569 "Mapped File"); 570 rc += iter_xmap(look_smap, &t); 571 } 572 } else { 573 rc += iter_map(look_map, &t); 574 } 575 576 (void) printf(" %stotal %*luK\n", 577 addr_width == 16 ? 578 " " : "", 579 size_width, t.total_size); 580 } 581 582 if (stacks != NULL) { 583 free(stacks); 584 stacks = NULL; 585 } 586 587 } 588 589 Prelease(Pr, prr_flags); 590 if (mapfd != -1) 591 (void) close(mapfd); 592 } 593 594 return (rc); 595 } 596 597 static char * 598 make_name(struct ps_prochandle *Pr, uintptr_t addr, const char *mapname, 599 char *buf, size_t bufsz) 600 { 601 const pstatus_t *Psp = Pstatus(Pr); 602 char fname[100]; 603 struct stat statb; 604 int len; 605 606 if (!lflag && strcmp(mapname, "a.out") == 0 && 607 Pexecname(Pr, buf, bufsz) != NULL) 608 return (buf); 609 610 if (Pobjname(Pr, addr, buf, bufsz) != NULL) { 611 if (lflag) 612 return (buf); 613 if ((len = resolvepath(buf, buf, bufsz)) > 0) { 614 buf[len] = '\0'; 615 return (buf); 616 } 617 } 618 619 if (Pstate(Pr) != PS_DEAD && *mapname != '\0') { 620 (void) snprintf(fname, sizeof (fname), "/proc/%d/object/%s", 621 (int)Psp->pr_pid, mapname); 622 if (stat(fname, &statb) == 0) { 623 dev_t dev = statb.st_dev; 624 ino_t ino = statb.st_ino; 625 (void) snprintf(buf, bufsz, "dev:%lu,%lu ino:%lu", 626 (ulong_t)major(dev), (ulong_t)minor(dev), ino); 627 return (buf); 628 } 629 } 630 631 return (NULL); 632 } 633 634 static char * 635 anon_name(char *name, const pstatus_t *Psp, 636 uintptr_t vaddr, size_t size, int mflags, int shmid) 637 { 638 if (mflags & MA_ISM) { 639 if (shmid == -1) 640 (void) snprintf(name, PATH_MAX, " [ %s shmid=null ]", 641 (mflags & MA_NORESERVE) ? "ism" : "dism"); 642 else 643 (void) snprintf(name, PATH_MAX, " [ %s shmid=0x%x ]", 644 (mflags & MA_NORESERVE) ? "ism" : "dism", shmid); 645 } else if (mflags & MA_SHM) { 646 if (shmid == -1) 647 (void) sprintf(name, " [ shmid=null ]"); 648 else 649 (void) sprintf(name, " [ shmid=0x%x ]", shmid); 650 } else if (vaddr + size > Psp->pr_stkbase && 651 vaddr < Psp->pr_stkbase + Psp->pr_stksize) { 652 (void) strcpy(name, " [ stack ]"); 653 } else if ((mflags & MA_ANON) && 654 vaddr + size > Psp->pr_brkbase && 655 vaddr < Psp->pr_brkbase + Psp->pr_brksize) { 656 (void) strcpy(name, " [ heap ]"); 657 } else { 658 lwpstack_t key, *stk; 659 660 key.lwps_stack.ss_sp = (void *)vaddr; 661 key.lwps_stack.ss_size = size; 662 if (nstacks > 0 && 663 (stk = bsearch(&key, stacks, nstacks, sizeof (stacks[0]), 664 cmpstacks)) != NULL) { 665 (void) snprintf(name, PATH_MAX, " [ %s tid=%d ]", 666 (stk->lwps_stack.ss_flags & SS_ONSTACK) ? 667 "altstack" : "stack", 668 stk->lwps_lwpid); 669 } else if (Pstate(Pr) != PS_DEAD) { 670 (void) strcpy(name, " [ anon ]"); 671 } else { 672 return (NULL); 673 } 674 } 675 676 return (name); 677 } 678 679 static int 680 rmapping_iter(struct ps_prochandle *Pr, proc_map_f *func, void *cd) 681 { 682 char mapname[PATH_MAX]; 683 int mapfd, nmap, i, rc; 684 struct stat st; 685 prmap_t *prmapp, *pmp; 686 ssize_t n; 687 688 (void) snprintf(mapname, sizeof (mapname), 689 "/proc/%d/rmap", (int)Pstatus(Pr)->pr_pid); 690 691 if ((mapfd = open(mapname, O_RDONLY)) < 0 || fstat(mapfd, &st) != 0) { 692 if (mapfd >= 0) 693 (void) close(mapfd); 694 return (perr(mapname)); 695 } 696 697 nmap = st.st_size / sizeof (prmap_t); 698 prmapp = malloc((nmap + 1) * sizeof (prmap_t)); 699 700 if ((n = pread(mapfd, prmapp, (nmap + 1) * sizeof (prmap_t), 0L)) < 0) { 701 (void) close(mapfd); 702 free(prmapp); 703 return (perr("read rmap")); 704 } 705 706 (void) close(mapfd); 707 nmap = n / sizeof (prmap_t); 708 709 for (i = 0, pmp = prmapp; i < nmap; i++, pmp++) { 710 if ((rc = func(cd, pmp, NULL)) != 0) { 711 free(prmapp); 712 return (rc); 713 } 714 } 715 716 free(prmapp); 717 return (0); 718 } 719 720 static int 721 xmapping_iter(struct ps_prochandle *Pr, proc_xmap_f *func, void *cd, int doswap) 722 { 723 char mapname[PATH_MAX]; 724 int mapfd, nmap, i, rc; 725 struct stat st; 726 prxmap_t *prmapp, *pmp; 727 ssize_t n; 728 729 (void) snprintf(mapname, sizeof (mapname), 730 "/proc/%d/xmap", (int)Pstatus(Pr)->pr_pid); 731 732 if ((mapfd = open(mapname, O_RDONLY)) < 0 || fstat(mapfd, &st) != 0) { 733 if (mapfd >= 0) 734 (void) close(mapfd); 735 return (perr(mapname)); 736 } 737 738 nmap = st.st_size / sizeof (prxmap_t); 739 nmap *= 2; 740 again: 741 prmapp = malloc((nmap + 1) * sizeof (prxmap_t)); 742 743 if ((n = pread(mapfd, prmapp, (nmap + 1) * sizeof (prxmap_t), 0)) < 0) { 744 (void) close(mapfd); 745 free(prmapp); 746 return (perr("read xmap")); 747 } 748 749 if (nmap < n / sizeof (prxmap_t)) { 750 free(prmapp); 751 nmap *= 2; 752 goto again; 753 } 754 755 (void) close(mapfd); 756 nmap = n / sizeof (prxmap_t); 757 758 for (i = 0, pmp = prmapp; i < nmap; i++, pmp++) { 759 if ((rc = func(cd, pmp, NULL, i == nmap - 1, doswap)) != 0) { 760 free(prmapp); 761 return (rc); 762 } 763 } 764 765 /* 766 * Mark the last element. 767 */ 768 if (map_count > 0) 769 maps[map_count - 1].md_last = B_TRUE; 770 771 free(prmapp); 772 return (0); 773 } 774 775 /*ARGSUSED*/ 776 static int 777 look_map(void *data, const prmap_t *pmp, const char *object_name) 778 { 779 struct totals *t = data; 780 const pstatus_t *Psp = Pstatus(Pr); 781 size_t size; 782 char mname[PATH_MAX]; 783 char *lname = NULL; 784 size_t psz = pmp->pr_pagesize; 785 uintptr_t vaddr = pmp->pr_vaddr; 786 uintptr_t segment_end = vaddr + pmp->pr_size; 787 lgrp_id_t lgrp; 788 memory_chunk_t mchunk; 789 790 /* 791 * If the mapping is not anon or not part of the heap, make a name 792 * for it. We don't want to report the heap as a.out's data. 793 */ 794 if (!(pmp->pr_mflags & MA_ANON) || 795 segment_end <= Psp->pr_brkbase || 796 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) { 797 lname = make_name(Pr, pmp->pr_vaddr, pmp->pr_mapname, 798 mname, sizeof (mname)); 799 } 800 801 if (lname == NULL && 802 ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD)) { 803 lname = anon_name(mname, Psp, pmp->pr_vaddr, 804 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid); 805 } 806 807 /* 808 * Adjust the address range if -A is specified. 809 */ 810 size = adjust_addr_range(pmp->pr_vaddr, segment_end, psz, 811 &vaddr, &segment_end); 812 813 if (size == 0) 814 return (0); 815 816 if (!Lflag) { 817 /* 818 * Display the whole mapping 819 */ 820 size = ROUNDUP_KB(size); 821 822 (void) printf(lname ? 823 "%.*lX %*luK %-6s %s\n" : 824 "%.*lX %*luK %s\n", 825 addr_width, vaddr, 826 size_width - 1, size, mflags(pmp->pr_mflags), lname); 827 828 t->total_size += size; 829 return (0); 830 } 831 832 /* 833 * We need to display lgroups backing physical memory, so we break the 834 * segment into individual pages and coalesce pages with the same lgroup 835 * into one "segment". 836 */ 837 838 /* 839 * Initialize address descriptions for the mapping. 840 */ 841 mem_chunk_init(&mchunk, segment_end, psz); 842 size = 0; 843 844 /* 845 * Walk mapping (page by page) and display contiguous ranges of memory 846 * allocated to same lgroup. 847 */ 848 do { 849 size_t size_contig; 850 851 /* 852 * Get contiguous region of memory starting from vaddr allocated 853 * from the same lgroup. 854 */ 855 size_contig = get_contiguous_region(&mchunk, vaddr, 856 segment_end, pmp->pr_pagesize, &lgrp); 857 858 (void) printf(lname ? "%.*lX %*luK %-6s%s %s\n" : 859 "%.*lX %*luK %s %s\n", 860 addr_width, vaddr, 861 size_width - 1, size_contig / KILOBYTE, 862 mflags(pmp->pr_mflags), 863 lgrp2str(lgrp), lname); 864 865 vaddr += size_contig; 866 size += size_contig; 867 } while (vaddr < segment_end && !interrupt); 868 869 /* Update the total size */ 870 t->total_size += ROUNDUP_KB(size); 871 return (0); 872 } 873 874 static void 875 printK(long value, int width) 876 { 877 if (value == 0) 878 (void) printf(width == 8 ? " -" : " -"); 879 else 880 (void) printf(" %*lu", width - 1, value); 881 } 882 883 static const char * 884 pagesize(const prxmap_t *pmp) 885 { 886 int pagesize = pmp->pr_hatpagesize; 887 static char buf[32]; 888 889 if (pagesize == 0) { 890 return ("-"); /* no underlying HAT mapping */ 891 } 892 893 if (pagesize >= KILOBYTE && (pagesize % KILOBYTE) == 0) { 894 if ((pagesize % GIGABYTE) == 0) 895 (void) snprintf(buf, sizeof (buf), "%dG", 896 pagesize / GIGABYTE); 897 else if ((pagesize % MEGABYTE) == 0) 898 (void) snprintf(buf, sizeof (buf), "%dM", 899 pagesize / MEGABYTE); 900 else 901 (void) snprintf(buf, sizeof (buf), "%dK", 902 pagesize / KILOBYTE); 903 } else 904 (void) snprintf(buf, sizeof (buf), "%db", pagesize); 905 906 return (buf); 907 } 908 909 /*ARGSUSED*/ 910 static int 911 look_smap(void *data, 912 const prxmap_t *pmp, 913 const char *object_name, 914 int last, int doswap) 915 { 916 struct totals *t = data; 917 const pstatus_t *Psp = Pstatus(Pr); 918 size_t size; 919 char mname[PATH_MAX]; 920 char *lname = NULL; 921 const char *format; 922 size_t psz = pmp->pr_pagesize; 923 uintptr_t vaddr = pmp->pr_vaddr; 924 uintptr_t segment_end = vaddr + pmp->pr_size; 925 lgrp_id_t lgrp; 926 memory_chunk_t mchunk; 927 928 /* 929 * If the mapping is not anon or not part of the heap, make a name 930 * for it. We don't want to report the heap as a.out's data. 931 */ 932 if (!(pmp->pr_mflags & MA_ANON) || 933 pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase || 934 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) { 935 lname = make_name(Pr, pmp->pr_vaddr, pmp->pr_mapname, 936 mname, sizeof (mname)); 937 } 938 939 if (lname == NULL && 940 ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD)) { 941 lname = anon_name(mname, Psp, pmp->pr_vaddr, 942 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid); 943 } 944 945 /* 946 * Adjust the address range if -A is specified. 947 */ 948 size = adjust_addr_range(pmp->pr_vaddr, segment_end, psz, 949 &vaddr, &segment_end); 950 951 if (size == 0) 952 return (0); 953 954 if (!Lflag) { 955 /* 956 * Display the whole mapping 957 */ 958 if (lname != NULL) 959 format = "%.*lX %*luK %4s %-6s %s\n"; 960 else 961 format = "%.*lX %*luK %4s %s\n"; 962 963 size = ROUNDUP_KB(size); 964 965 (void) printf(format, addr_width, vaddr, size_width - 1, size, 966 pagesize(pmp), mflags(pmp->pr_mflags), lname); 967 968 t->total_size += size; 969 return (0); 970 } 971 972 if (lname != NULL) 973 format = "%.*lX %*luK %4s %-6s%s %s\n"; 974 else 975 format = "%.*lX %*luK %4s%s %s\n"; 976 977 /* 978 * We need to display lgroups backing physical memory, so we break the 979 * segment into individual pages and coalesce pages with the same lgroup 980 * into one "segment". 981 */ 982 983 /* 984 * Initialize address descriptions for the mapping. 985 */ 986 mem_chunk_init(&mchunk, segment_end, psz); 987 size = 0; 988 989 /* 990 * Walk mapping (page by page) and display contiguous ranges of memory 991 * allocated to same lgroup. 992 */ 993 do { 994 size_t size_contig; 995 996 /* 997 * Get contiguous region of memory starting from vaddr allocated 998 * from the same lgroup. 999 */ 1000 size_contig = get_contiguous_region(&mchunk, vaddr, 1001 segment_end, pmp->pr_pagesize, &lgrp); 1002 1003 (void) printf(format, addr_width, vaddr, 1004 size_width - 1, size_contig / KILOBYTE, 1005 pagesize(pmp), mflags(pmp->pr_mflags), 1006 lgrp2str(lgrp), lname); 1007 1008 vaddr += size_contig; 1009 size += size_contig; 1010 } while (vaddr < segment_end && !interrupt); 1011 1012 t->total_size += ROUNDUP_KB(size); 1013 return (0); 1014 } 1015 1016 #define ANON(x) ((aflag || (((x)->pr_mflags & MA_SHARED) == 0)) ? \ 1017 ((x)->pr_anon) : 0) 1018 1019 /*ARGSUSED*/ 1020 static int 1021 look_xmap(void *data, 1022 const prxmap_t *pmp, 1023 const char *object_name, 1024 int last, int doswap) 1025 { 1026 struct totals *t = data; 1027 const pstatus_t *Psp = Pstatus(Pr); 1028 char mname[PATH_MAX]; 1029 char *lname = NULL; 1030 char *ln; 1031 1032 /* 1033 * If the mapping is not anon or not part of the heap, make a name 1034 * for it. We don't want to report the heap as a.out's data. 1035 */ 1036 if (!(pmp->pr_mflags & MA_ANON) || 1037 pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase || 1038 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) { 1039 lname = make_name(Pr, pmp->pr_vaddr, pmp->pr_mapname, 1040 mname, sizeof (mname)); 1041 } 1042 1043 if (lname != NULL) { 1044 if ((ln = strrchr(lname, '/')) != NULL) 1045 lname = ln + 1; 1046 } else if ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD) { 1047 lname = anon_name(mname, Psp, pmp->pr_vaddr, 1048 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid); 1049 } 1050 1051 (void) printf("%.*lX", addr_width, (ulong_t)pmp->pr_vaddr); 1052 1053 printK(ROUNDUP_KB(pmp->pr_size), size_width); 1054 printK(pmp->pr_rss * (pmp->pr_pagesize / KILOBYTE), size_width); 1055 printK(ANON(pmp) * (pmp->pr_pagesize / KILOBYTE), size_width); 1056 printK(pmp->pr_locked * (pmp->pr_pagesize / KILOBYTE), size_width); 1057 (void) printf(lname ? " %4s %-6s %s\n" : " %4s %s\n", 1058 pagesize(pmp), mflags(pmp->pr_mflags), lname); 1059 1060 t->total_size += ROUNDUP_KB(pmp->pr_size); 1061 t->total_rss += pmp->pr_rss * (pmp->pr_pagesize / KILOBYTE); 1062 t->total_anon += ANON(pmp) * (pmp->pr_pagesize / KILOBYTE); 1063 t->total_locked += (pmp->pr_locked * (pmp->pr_pagesize / KILOBYTE)); 1064 1065 return (0); 1066 } 1067 1068 /*ARGSUSED*/ 1069 static int 1070 look_xmap_nopgsz(void *data, 1071 const prxmap_t *pmp, 1072 const char *object_name, 1073 int last, int doswap) 1074 { 1075 struct totals *t = data; 1076 const pstatus_t *Psp = Pstatus(Pr); 1077 char mname[PATH_MAX]; 1078 char *lname = NULL; 1079 char *ln; 1080 static uintptr_t prev_vaddr; 1081 static size_t prev_size; 1082 static offset_t prev_offset; 1083 static int prev_mflags; 1084 static char *prev_lname; 1085 static char prev_mname[PATH_MAX]; 1086 static ulong_t prev_rss; 1087 static ulong_t prev_anon; 1088 static ulong_t prev_locked; 1089 static ulong_t prev_swap; 1090 int merged = 0; 1091 static int first = 1; 1092 ulong_t swap = 0; 1093 int kperpage; 1094 1095 /* 1096 * Calculate swap reservations 1097 */ 1098 if (pmp->pr_mflags & MA_SHARED) { 1099 if (aflag && (pmp->pr_mflags & MA_NORESERVE) == 0) { 1100 /* Swap reserved for entire non-ism SHM */ 1101 swap = pmp->pr_size / pmp->pr_pagesize; 1102 } 1103 } else if (pmp->pr_mflags & MA_NORESERVE) { 1104 /* Swap reserved on fault for each anon page */ 1105 swap = pmp->pr_anon; 1106 } else if (pmp->pr_mflags & MA_WRITE) { 1107 /* Swap reserve for entire writable segment */ 1108 swap = pmp->pr_size / pmp->pr_pagesize; 1109 } 1110 1111 /* 1112 * If the mapping is not anon or not part of the heap, make a name 1113 * for it. We don't want to report the heap as a.out's data. 1114 */ 1115 if (!(pmp->pr_mflags & MA_ANON) || 1116 pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase || 1117 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) { 1118 lname = make_name(Pr, pmp->pr_vaddr, pmp->pr_mapname, 1119 mname, sizeof (mname)); 1120 } 1121 1122 if (lname != NULL) { 1123 if ((ln = strrchr(lname, '/')) != NULL) 1124 lname = ln + 1; 1125 } else if ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD) { 1126 lname = anon_name(mname, Psp, pmp->pr_vaddr, 1127 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid); 1128 } 1129 1130 kperpage = pmp->pr_pagesize / KILOBYTE; 1131 1132 t->total_size += ROUNDUP_KB(pmp->pr_size); 1133 t->total_rss += pmp->pr_rss * kperpage; 1134 t->total_anon += ANON(pmp) * kperpage; 1135 t->total_locked += pmp->pr_locked * kperpage; 1136 t->total_swap += swap * kperpage; 1137 1138 if (first == 1) { 1139 first = 0; 1140 prev_vaddr = pmp->pr_vaddr; 1141 prev_size = pmp->pr_size; 1142 prev_offset = pmp->pr_offset; 1143 prev_mflags = pmp->pr_mflags; 1144 if (lname == NULL) { 1145 prev_lname = NULL; 1146 } else { 1147 (void) strcpy(prev_mname, lname); 1148 prev_lname = prev_mname; 1149 } 1150 prev_rss = pmp->pr_rss * kperpage; 1151 prev_anon = ANON(pmp) * kperpage; 1152 prev_locked = pmp->pr_locked * kperpage; 1153 prev_swap = swap * kperpage; 1154 if (last == 0) { 1155 return (0); 1156 } 1157 merged = 1; 1158 } else if (prev_vaddr + prev_size == pmp->pr_vaddr && 1159 prev_mflags == pmp->pr_mflags && 1160 ((prev_mflags & MA_ISM) || 1161 prev_offset + prev_size == pmp->pr_offset) && 1162 ((lname == NULL && prev_lname == NULL) || 1163 (lname != NULL && prev_lname != NULL && 1164 strcmp(lname, prev_lname) == 0))) { 1165 prev_size += pmp->pr_size; 1166 prev_rss += pmp->pr_rss * kperpage; 1167 prev_anon += ANON(pmp) * kperpage; 1168 prev_locked += pmp->pr_locked * kperpage; 1169 prev_swap += swap * kperpage; 1170 if (last == 0) { 1171 return (0); 1172 } 1173 merged = 1; 1174 } 1175 1176 (void) printf("%.*lX", addr_width, (ulong_t)prev_vaddr); 1177 printK(ROUNDUP_KB(prev_size), size_width); 1178 1179 if (doswap) 1180 printK(prev_swap, size_width); 1181 else { 1182 printK(prev_rss, size_width); 1183 printK(prev_anon, size_width); 1184 printK(prev_locked, size_width); 1185 } 1186 (void) printf(prev_lname ? " %-6s %s\n" : "%s\n", 1187 mflags(prev_mflags), prev_lname); 1188 1189 if (last == 0) { 1190 prev_vaddr = pmp->pr_vaddr; 1191 prev_size = pmp->pr_size; 1192 prev_offset = pmp->pr_offset; 1193 prev_mflags = pmp->pr_mflags; 1194 if (lname == NULL) { 1195 prev_lname = NULL; 1196 } else { 1197 (void) strcpy(prev_mname, lname); 1198 prev_lname = prev_mname; 1199 } 1200 prev_rss = pmp->pr_rss * kperpage; 1201 prev_anon = ANON(pmp) * kperpage; 1202 prev_locked = pmp->pr_locked * kperpage; 1203 prev_swap = swap * kperpage; 1204 } else if (merged == 0) { 1205 (void) printf("%.*lX", addr_width, (ulong_t)pmp->pr_vaddr); 1206 printK(ROUNDUP_KB(pmp->pr_size), size_width); 1207 if (doswap) 1208 printK(swap * kperpage, size_width); 1209 else { 1210 printK(pmp->pr_rss * kperpage, size_width); 1211 printK(ANON(pmp) * kperpage, size_width); 1212 printK(pmp->pr_locked * kperpage, size_width); 1213 } 1214 (void) printf(lname ? " %-6s %s\n" : " %s\n", 1215 mflags(pmp->pr_mflags), lname); 1216 } 1217 1218 if (last != 0) 1219 first = 1; 1220 1221 return (0); 1222 } 1223 1224 static int 1225 perr(char *s) 1226 { 1227 if (s) 1228 (void) fprintf(stderr, "%s: ", procname); 1229 else 1230 s = procname; 1231 perror(s); 1232 return (1); 1233 } 1234 1235 static char * 1236 mflags(uint_t arg) 1237 { 1238 static char code_buf[80]; 1239 char *str = code_buf; 1240 1241 /* 1242 * rwxsR 1243 * 1244 * r - segment is readable 1245 * w - segment is writable 1246 * x - segment is executable 1247 * s - segment is shared 1248 * R - segment is mapped MAP_NORESERVE 1249 * 1250 */ 1251 (void) sprintf(str, "%c%c%c%c%c%c", 1252 arg & MA_READ ? 'r' : '-', 1253 arg & MA_WRITE ? 'w' : '-', 1254 arg & MA_EXEC ? 'x' : '-', 1255 arg & MA_SHARED ? 's' : '-', 1256 arg & MA_NORESERVE ? 'R' : '-', 1257 arg & MA_RESERVED1 ? '*' : ' '); 1258 1259 return (str); 1260 } 1261 1262 static mapdata_t * 1263 nextmap(void) 1264 { 1265 mapdata_t *newmaps; 1266 int next; 1267 1268 if (map_count == map_alloc) { 1269 if (map_alloc == 0) 1270 next = 16; 1271 else 1272 next = map_alloc * 2; 1273 1274 newmaps = realloc(maps, next * sizeof (mapdata_t)); 1275 if (newmaps == NULL) { 1276 (void) perr("failed to allocate maps"); 1277 exit(1); 1278 } 1279 (void) memset(newmaps + map_alloc, '\0', 1280 (next - map_alloc) * sizeof (mapdata_t)); 1281 1282 map_alloc = next; 1283 maps = newmaps; 1284 } 1285 1286 return (&maps[map_count++]); 1287 } 1288 1289 /*ARGSUSED*/ 1290 static int 1291 gather_map(void *ignored, const prmap_t *map, const char *objname) 1292 { 1293 mapdata_t *data; 1294 1295 /* Skip mappings which are outside the range specified by -A */ 1296 if (!address_in_range(map->pr_vaddr, 1297 map->pr_vaddr + map->pr_size, map->pr_pagesize)) 1298 return (0); 1299 1300 data = nextmap(); 1301 data->md_map = *map; 1302 if (data->md_objname != NULL) 1303 free(data->md_objname); 1304 data->md_objname = objname ? strdup(objname) : NULL; 1305 1306 return (0); 1307 } 1308 1309 /*ARGSUSED*/ 1310 static int 1311 gather_xmap(void *ignored, const prxmap_t *xmap, const char *objname, 1312 int last, int doswap) 1313 { 1314 mapdata_t *data; 1315 1316 /* Skip mappings which are outside the range specified by -A */ 1317 if (!address_in_range(xmap->pr_vaddr, 1318 xmap->pr_vaddr + xmap->pr_size, xmap->pr_pagesize)) 1319 return (0); 1320 1321 data = nextmap(); 1322 data->md_xmap = *xmap; 1323 if (data->md_objname != NULL) 1324 free(data->md_objname); 1325 data->md_objname = objname ? strdup(objname) : NULL; 1326 data->md_last = last; 1327 data->md_doswap = doswap; 1328 1329 return (0); 1330 } 1331 1332 static int 1333 iter_map(proc_map_f *func, void *data) 1334 { 1335 int i; 1336 int ret; 1337 1338 for (i = 0; i < map_count; i++) { 1339 if (interrupt) 1340 break; 1341 if ((ret = func(data, &maps[i].md_map, 1342 maps[i].md_objname)) != 0) 1343 return (ret); 1344 } 1345 1346 return (0); 1347 } 1348 1349 static int 1350 iter_xmap(proc_xmap_f *func, void *data) 1351 { 1352 int i; 1353 int ret; 1354 1355 for (i = 0; i < map_count; i++) { 1356 if (interrupt) 1357 break; 1358 if ((ret = func(data, &maps[i].md_xmap, maps[i].md_objname, 1359 maps[i].md_last, maps[i].md_doswap)) != 0) 1360 return (ret); 1361 } 1362 1363 return (0); 1364 } 1365 1366 /* 1367 * Convert lgroup ID to string. 1368 * returns dash when lgroup ID is invalid. 1369 */ 1370 static char * 1371 lgrp2str(lgrp_id_t lgrp) 1372 { 1373 static char lgrp_buf[20]; 1374 char *str = lgrp_buf; 1375 1376 (void) sprintf(str, lgrp == LGRP_NONE ? " -" : "%4d", lgrp); 1377 return (str); 1378 } 1379 1380 /* 1381 * Parse address range specification for -A option. 1382 * The address range may have the following forms: 1383 * 1384 * address 1385 * start and end is set to address 1386 * address, 1387 * start is set to address, end is set to INVALID_ADDRESS 1388 * ,address 1389 * start is set to 0, end is set to address 1390 * address1,address2 1391 * start is set to address1, end is set to address2 1392 * 1393 */ 1394 static int 1395 parse_addr_range(char *input_str, uintptr_t *start, uintptr_t *end) 1396 { 1397 char *startp = input_str; 1398 char *endp = strchr(input_str, ','); 1399 ulong_t s = (ulong_t)INVALID_ADDRESS; 1400 ulong_t e = (ulong_t)INVALID_ADDRESS; 1401 1402 if (endp != NULL) { 1403 /* 1404 * Comma is present. If there is nothing after comma, the end 1405 * remains set at INVALID_ADDRESS. Otherwise it is set to the 1406 * value after comma. 1407 */ 1408 *endp = '\0'; 1409 endp++; 1410 1411 if ((*endp != '\0') && sscanf(endp, "%lx", &e) != 1) 1412 return (1); 1413 } 1414 1415 if (startp != NULL) { 1416 /* 1417 * Read the start address, if it is specified. If the address is 1418 * missing, start will be set to INVALID_ADDRESS. 1419 */ 1420 if ((*startp != '\0') && sscanf(startp, "%lx", &s) != 1) 1421 return (1); 1422 } 1423 1424 /* If there is no comma, end becomes equal to start */ 1425 if (endp == NULL) 1426 e = s; 1427 1428 /* 1429 * ,end implies 0..end range 1430 */ 1431 if (e != INVALID_ADDRESS && s == INVALID_ADDRESS) 1432 s = 0; 1433 1434 *start = (uintptr_t)s; 1435 *end = (uintptr_t)e; 1436 1437 /* Return error if neither start nor end address were specified */ 1438 return (! (s != INVALID_ADDRESS || e != INVALID_ADDRESS)); 1439 } 1440 1441 /* 1442 * Check whether any portion of [start, end] segment is within the 1443 * [start_addr, end_addr] range. 1444 * 1445 * Return values: 1446 * 0 - address is outside the range 1447 * 1 - address is within the range 1448 */ 1449 static int 1450 address_in_range(uintptr_t start, uintptr_t end, size_t psz) 1451 { 1452 int rc = 1; 1453 1454 /* 1455 * Nothing to do if there is no address range specified with -A 1456 */ 1457 if (start_addr != INVALID_ADDRESS || end_addr != INVALID_ADDRESS) { 1458 /* The segment end is below the range start */ 1459 if ((start_addr != INVALID_ADDRESS) && 1460 (end < P2ALIGN(start_addr, psz))) 1461 rc = 0; 1462 1463 /* The segment start is above the range end */ 1464 if ((end_addr != INVALID_ADDRESS) && 1465 (start > P2ALIGN(end_addr + psz, psz))) 1466 rc = 0; 1467 } 1468 return (rc); 1469 } 1470 1471 /* 1472 * Returns an intersection of the [start, end] interval and the range specified 1473 * by -A flag [start_addr, end_addr]. Unspecified parts of the address range 1474 * have value INVALID_ADDRESS. 1475 * 1476 * The start_addr address is rounded down to the beginning of page and end_addr 1477 * is rounded up to the end of page. 1478 * 1479 * Returns the size of the resulting interval or zero if the interval is empty 1480 * or invalid. 1481 */ 1482 static size_t 1483 adjust_addr_range(uintptr_t start, uintptr_t end, size_t psz, 1484 uintptr_t *new_start, uintptr_t *new_end) 1485 { 1486 uintptr_t from; /* start_addr rounded down */ 1487 uintptr_t to; /* end_addr rounded up */ 1488 1489 /* 1490 * Round down the lower address of the range to the beginning of page. 1491 */ 1492 if (start_addr == INVALID_ADDRESS) { 1493 /* 1494 * No start_addr specified by -A, the lower part of the interval 1495 * does not change. 1496 */ 1497 *new_start = start; 1498 } else { 1499 from = P2ALIGN(start_addr, psz); 1500 /* 1501 * If end address is outside the range, return an empty 1502 * interval 1503 */ 1504 if (end < from) { 1505 *new_start = *new_end = 0; 1506 return (0); 1507 } 1508 /* 1509 * The adjusted start address is the maximum of requested start 1510 * and the aligned start_addr of the -A range. 1511 */ 1512 *new_start = start < from ? from : start; 1513 } 1514 1515 /* 1516 * Round up the higher address of the range to the end of page. 1517 */ 1518 if (end_addr == INVALID_ADDRESS) { 1519 /* 1520 * No end_addr specified by -A, the upper part of the interval 1521 * does not change. 1522 */ 1523 *new_end = end; 1524 } else { 1525 /* 1526 * If only one address is specified and it is the beginning of a 1527 * segment, get information about the whole segment. This 1528 * function is called once per segment and the 'end' argument is 1529 * always the end of a segment, so just use the 'end' value. 1530 */ 1531 to = (end_addr == start_addr && start == start_addr) ? 1532 end : 1533 P2ALIGN(end_addr + psz, psz); 1534 /* 1535 * If start address is outside the range, return an empty 1536 * interval 1537 */ 1538 if (start > to) { 1539 *new_start = *new_end = 0; 1540 return (0); 1541 } 1542 /* 1543 * The adjusted end address is the minimum of requested end 1544 * and the aligned end_addr of the -A range. 1545 */ 1546 *new_end = end > to ? to : end; 1547 } 1548 1549 /* 1550 * Make sure that the resulting interval is legal. 1551 */ 1552 if (*new_end < *new_start) 1553 *new_start = *new_end = 0; 1554 1555 /* Return the size of the interval */ 1556 return (*new_end - *new_start); 1557 } 1558 1559 /* 1560 * Initialize memory_info data structure with information about a new segment. 1561 */ 1562 static void 1563 mem_chunk_init(memory_chunk_t *chunk, uintptr_t end, size_t psz) 1564 { 1565 chunk->end_addr = end; 1566 chunk->page_size = psz; 1567 chunk->page_index = 0; 1568 chunk->chunk_start = chunk->chunk_end = 0; 1569 } 1570 1571 /* 1572 * Create a new chunk of addresses starting from vaddr. 1573 * Pass the whole chunk to pr_meminfo to collect lgroup and page size 1574 * information for each page in the chunk. 1575 */ 1576 static void 1577 mem_chunk_get(memory_chunk_t *chunk, uintptr_t vaddr) 1578 { 1579 page_descr_t *pdp = chunk->page_info; 1580 size_t psz = chunk->page_size; 1581 uintptr_t addr = vaddr; 1582 uint64_t inaddr[MAX_MEMINFO_CNT]; 1583 uint64_t outdata[2 * MAX_MEMINFO_CNT]; 1584 uint_t info[2] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 1585 uint_t validity[MAX_MEMINFO_CNT]; 1586 uint64_t *dataptr = inaddr; 1587 uint64_t *outptr = outdata; 1588 uint_t *valptr = validity; 1589 int i, j, rc; 1590 1591 chunk->chunk_start = vaddr; 1592 chunk->page_index = 0; /* reset index for the new chunk */ 1593 1594 /* 1595 * Fill in MAX_MEMINFO_CNT wotrh of pages starting from vaddr. Also, 1596 * copy starting address of each page to inaddr array for pr_meminfo. 1597 */ 1598 for (i = 0, pdp = chunk->page_info; 1599 (i < MAX_MEMINFO_CNT) && (addr <= chunk->end_addr); 1600 i++, pdp++, dataptr++, addr += psz) { 1601 *dataptr = (uint64_t)addr; 1602 pdp->pd_start = addr; 1603 pdp->pd_lgrp = LGRP_NONE; 1604 pdp->pd_valid = 0; 1605 pdp->pd_pagesize = 0; 1606 } 1607 1608 /* Mark the number of entries in the chunk and the last address */ 1609 chunk->page_count = i; 1610 chunk->chunk_end = addr - psz; 1611 1612 if (interrupt) 1613 return; 1614 1615 /* Call meminfo for all collected addresses */ 1616 rc = pr_meminfo(Pr, inaddr, i, info, 2, outdata, validity); 1617 if (rc < 0) { 1618 (void) perr("can not get memory information"); 1619 return; 1620 } 1621 1622 /* Verify validity of each result and fill in the addrs array */ 1623 pdp = chunk->page_info; 1624 for (j = 0; j < i; j++, pdp++, valptr++, outptr += 2) { 1625 /* Skip invalid address pointers */ 1626 if ((*valptr & 1) == 0) { 1627 continue; 1628 } 1629 1630 /* Is lgroup information available? */ 1631 if ((*valptr & 2) != 0) { 1632 pdp->pd_lgrp = (lgrp_id_t)*outptr; 1633 pdp->pd_valid = 1; 1634 } 1635 1636 /* Is page size informaion available? */ 1637 if ((*valptr & 4) != 0) { 1638 pdp->pd_pagesize = *(outptr + 1); 1639 } 1640 } 1641 } 1642 1643 /* 1644 * Starting from address 'vaddr' find the region with pages allocated from the 1645 * same lgroup. 1646 * 1647 * Arguments: 1648 * mchunk Initialized memory chunk structure 1649 * vaddr Starting address of the region 1650 * maxaddr Upper bound of the region 1651 * pagesize Default page size to use 1652 * ret_lgrp On exit contains the lgroup ID of all pages in the 1653 * region. 1654 * 1655 * Returns: 1656 * Size of the contiguous region in bytes 1657 * The lgroup ID of all pages in the region in ret_lgrp argument. 1658 */ 1659 static size_t 1660 get_contiguous_region(memory_chunk_t *mchunk, uintptr_t vaddr, 1661 uintptr_t maxaddr, size_t pagesize, lgrp_id_t *ret_lgrp) 1662 { 1663 size_t size_contig = 0; 1664 lgrp_id_t lgrp; /* Lgroup of the region start */ 1665 lgrp_id_t curr_lgrp; /* Lgroup of the current page */ 1666 size_t psz = pagesize; /* Pagesize to use */ 1667 1668 /* Set both lgroup IDs to the lgroup of the first page */ 1669 curr_lgrp = lgrp = addr_to_lgrp(mchunk, vaddr, &psz); 1670 1671 /* 1672 * Starting from vaddr, walk page by page until either the end 1673 * of the segment is reached or a page is allocated from a different 1674 * lgroup. Also stop if interrupted from keyboard. 1675 */ 1676 while ((vaddr < maxaddr) && (curr_lgrp == lgrp) && !interrupt) { 1677 /* 1678 * Get lgroup ID and the page size of the current page. 1679 */ 1680 curr_lgrp = addr_to_lgrp(mchunk, vaddr, &psz); 1681 /* If there is no page size information, use the default */ 1682 if (psz == 0) 1683 psz = pagesize; 1684 1685 if (curr_lgrp == lgrp) { 1686 /* 1687 * This page belongs to the contiguous region. 1688 * Increase the region size and advance to the new page. 1689 */ 1690 size_contig += psz; 1691 vaddr += psz; 1692 } 1693 } 1694 1695 /* Return the region lgroup ID and the size */ 1696 *ret_lgrp = lgrp; 1697 return (size_contig); 1698 } 1699 1700 /* 1701 * Given a virtual address, return its lgroup and page size. If there is meminfo 1702 * information for an address, use it, otherwise shift the chunk window to the 1703 * vaddr and create a new chunk with known meminfo information. 1704 */ 1705 static lgrp_id_t 1706 addr_to_lgrp(memory_chunk_t *chunk, uintptr_t vaddr, size_t *psz) 1707 { 1708 page_descr_t *pdp; 1709 lgrp_id_t lgrp = LGRP_NONE; 1710 int i; 1711 1712 *psz = chunk->page_size; 1713 1714 if (interrupt) 1715 return (0); 1716 1717 /* 1718 * Is there information about this address? If not, create a new chunk 1719 * starting from vaddr and apply pr_meminfo() to the whole chunk. 1720 */ 1721 if (vaddr < chunk->chunk_start || vaddr > chunk->chunk_end) { 1722 /* 1723 * This address is outside the chunk, get the new chunk and 1724 * collect meminfo information for it. 1725 */ 1726 mem_chunk_get(chunk, vaddr); 1727 } 1728 1729 /* 1730 * Find information about the address. 1731 */ 1732 pdp = &chunk->page_info[chunk->page_index]; 1733 for (i = chunk->page_index; i < chunk->page_count; i++, pdp++) { 1734 if (pdp->pd_start == vaddr) { 1735 if (pdp->pd_valid) { 1736 lgrp = pdp->pd_lgrp; 1737 /* 1738 * Override page size information if it is 1739 * present. 1740 */ 1741 if (pdp->pd_pagesize > 0) 1742 *psz = pdp->pd_pagesize; 1743 } 1744 break; 1745 } 1746 } 1747 /* 1748 * Remember where we ended - the next search will start here. 1749 * We can query for the lgrp for the same address again, so do not 1750 * advance index past the current value. 1751 */ 1752 chunk->page_index = i; 1753 1754 return (lgrp); 1755 } 1756 1757 /* ARGSUSED */ 1758 static void 1759 intr(int sig) 1760 { 1761 interrupt = 1; 1762 } 1763