1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <stdio.h> 28 #include <stdio_ext.h> 29 #include <stdlib.h> 30 #include <unistd.h> 31 #include <ctype.h> 32 #include <fcntl.h> 33 #include <string.h> 34 #include <dirent.h> 35 #include <limits.h> 36 #include <link.h> 37 #include <libelf.h> 38 #include <sys/types.h> 39 #include <signal.h> 40 #include <sys/stat.h> 41 #include <sys/mkdev.h> 42 #include <sys/mman.h> 43 #include <sys/lgrp_user.h> 44 #include <libproc.h> 45 46 #include "pmap_common.h" 47 48 #define KILOBYTE 1024 49 #define MEGABYTE (KILOBYTE * KILOBYTE) 50 #define GIGABYTE (KILOBYTE * KILOBYTE * KILOBYTE) 51 52 /* 53 * Round up the value to the nearest kilobyte 54 */ 55 #define ROUNDUP_KB(x) (((x) + (KILOBYTE - 1)) / KILOBYTE) 56 57 /* 58 * The alignment should be a power of 2. 59 */ 60 #define P2ALIGN(x, align) ((x) & -(align)) 61 62 #define INVALID_ADDRESS (uintptr_t)(-1) 63 64 struct totals { 65 ulong_t total_size; 66 ulong_t total_swap; 67 ulong_t total_rss; 68 ulong_t total_anon; 69 ulong_t total_locked; 70 }; 71 72 /* 73 * -L option requires per-page information. The information is presented in an 74 * array of page_descr structures. 75 */ 76 typedef struct page_descr { 77 uintptr_t pd_start; /* start address of a page */ 78 size_t pd_pagesize; /* page size in bytes */ 79 lgrp_id_t pd_lgrp; /* lgroup of memory backing the page */ 80 int pd_valid; /* valid page description if non-zero */ 81 } page_descr_t; 82 83 /* 84 * Per-page information for a memory chunk. 85 * The meminfo(2) system call accepts up to MAX_MEMINFO_CNT pages at once. 86 * When we need to scan larger ranges we divide them in MAX_MEMINFO_CNT sized 87 * chunks. The chunk information is stored in the memory_chunk structure. 88 */ 89 typedef struct memory_chunk { 90 page_descr_t page_info[MAX_MEMINFO_CNT]; 91 uintptr_t end_addr; 92 uintptr_t chunk_start; /* Starting address */ 93 uintptr_t chunk_end; /* chunk_end is always <= end_addr */ 94 size_t page_size; 95 int page_index; /* Current page */ 96 int page_count; /* Number of pages */ 97 } memory_chunk_t; 98 99 static volatile int interrupt; 100 101 typedef int proc_xmap_f(void *, const prxmap_t *, const char *, int, int); 102 103 static int xmapping_iter(struct ps_prochandle *, proc_xmap_f *, void *, 104 int); 105 static int rmapping_iter(struct ps_prochandle *, proc_map_f *, void *); 106 107 static int look_map(void *, const prmap_t *, const char *); 108 static int look_smap(void *, const prxmap_t *, const char *, int, int); 109 static int look_xmap(void *, const prxmap_t *, const char *, int, int); 110 static int look_xmap_nopgsz(void *, const prxmap_t *, const char *, 111 int, int); 112 113 static int gather_map(void *, const prmap_t *, const char *); 114 static int gather_xmap(void *, const prxmap_t *, const char *, int, int); 115 static int iter_map(proc_map_f *, void *); 116 static int iter_xmap(proc_xmap_f *, void *); 117 static int parse_addr_range(char *, uintptr_t *, uintptr_t *); 118 static void mem_chunk_init(memory_chunk_t *, uintptr_t, size_t); 119 120 static int perr(char *); 121 static void printK(long, int); 122 static char *mflags(uint_t); 123 124 static size_t get_contiguous_region(memory_chunk_t *, uintptr_t, 125 uintptr_t, size_t, lgrp_id_t *); 126 static void mem_chunk_get(memory_chunk_t *, uintptr_t); 127 static lgrp_id_t addr_to_lgrp(memory_chunk_t *, uintptr_t, size_t *); 128 static char *lgrp2str(lgrp_id_t); 129 130 static int address_in_range(uintptr_t, uintptr_t, size_t); 131 static size_t adjust_addr_range(uintptr_t, uintptr_t, size_t, 132 uintptr_t *, uintptr_t *); 133 134 static int lflag = 0; 135 static int Lflag = 0; 136 static int aflag = 0; 137 138 /* 139 * The -A address range is represented as a pair of addresses 140 * <start_addr, end_addr>. Either one of these may be unspecified (set to 141 * INVALID_ADDRESS). If both are unspecified, no address range restrictions are 142 * in place. 143 */ 144 static uintptr_t start_addr = INVALID_ADDRESS; 145 static uintptr_t end_addr = INVALID_ADDRESS; 146 147 static int addr_width, size_width; 148 static char *command; 149 static char *procname; 150 static struct ps_prochandle *Pr; 151 152 static void intr(int); 153 154 typedef struct { 155 prxmap_t md_xmap; 156 prmap_t md_map; 157 char *md_objname; 158 boolean_t md_last; 159 int md_doswap; 160 } mapdata_t; 161 162 static mapdata_t *maps; 163 static int map_count; 164 static int map_alloc; 165 166 static lwpstack_t *stacks = NULL; 167 static uint_t nstacks = 0; 168 169 #define MAX_TRIES 5 170 171 static int 172 getstack(void *data, const lwpstatus_t *lsp) 173 { 174 int *np = (int *)data; 175 176 if (Plwp_alt_stack(Pr, lsp->pr_lwpid, &stacks[*np].lwps_stack) == 0) { 177 stacks[*np].lwps_stack.ss_flags |= SS_ONSTACK; 178 stacks[*np].lwps_lwpid = lsp->pr_lwpid; 179 (*np)++; 180 } 181 182 if (Plwp_main_stack(Pr, lsp->pr_lwpid, &stacks[*np].lwps_stack) == 0) { 183 stacks[*np].lwps_lwpid = lsp->pr_lwpid; 184 (*np)++; 185 } 186 187 return (0); 188 } 189 190 int 191 main(int argc, char **argv) 192 { 193 int rflag = 0, sflag = 0, xflag = 0, Fflag = 0; 194 int errflg = 0, Sflag = 0; 195 int rc = 0; 196 int opt; 197 const char *bar8 = "-------"; 198 const char *bar16 = "----------"; 199 const char *bar; 200 struct rlimit rlim; 201 struct stat64 statbuf; 202 char buf[128]; 203 int mapfd; 204 int prg_gflags = PGRAB_RDONLY; 205 int prr_flags = 0; 206 boolean_t use_agent_lwp = B_FALSE; 207 208 if ((command = strrchr(argv[0], '/')) != NULL) 209 command++; 210 else 211 command = argv[0]; 212 213 while ((opt = getopt(argc, argv, "arsxSlLFA:")) != EOF) { 214 switch (opt) { 215 case 'a': /* include shared mappings in -[xS] */ 216 aflag = 1; 217 break; 218 case 'r': /* show reserved mappings */ 219 rflag = 1; 220 break; 221 case 's': /* show hardware page sizes */ 222 sflag = 1; 223 break; 224 case 'S': /* show swap reservations */ 225 Sflag = 1; 226 break; 227 case 'x': /* show extended mappings */ 228 xflag = 1; 229 break; 230 case 'l': /* show unresolved link map names */ 231 lflag = 1; 232 break; 233 case 'L': /* show lgroup information */ 234 Lflag = 1; 235 use_agent_lwp = B_TRUE; 236 break; 237 case 'F': /* force grabbing (no O_EXCL) */ 238 Fflag = PGRAB_FORCE; 239 break; 240 case 'A': 241 if (parse_addr_range(optarg, &start_addr, &end_addr) 242 != 0) 243 errflg++; 244 break; 245 default: 246 errflg = 1; 247 break; 248 } 249 } 250 251 argc -= optind; 252 argv += optind; 253 254 if ((Sflag && (xflag || rflag || sflag)) || (xflag && rflag) || 255 (aflag && (!xflag && !Sflag)) || 256 (Lflag && (xflag || Sflag))) { 257 errflg = 1; 258 } 259 260 if (errflg || argc <= 0) { 261 (void) fprintf(stderr, 262 "usage:\t%s [-rslF] [-A start[,end]] { pid | core } ...\n", 263 command); 264 (void) fprintf(stderr, 265 "\t\t(report process address maps)\n"); 266 (void) fprintf(stderr, 267 "\t%s -L [-rslF] [-A start[,end]] pid ...\n", command); 268 (void) fprintf(stderr, 269 "\t\t(report process address maps lgroups mappings)\n"); 270 (void) fprintf(stderr, 271 "\t%s -x [-aslF] [-A start[,end]] pid ...\n", command); 272 (void) fprintf(stderr, 273 "\t\t(show resident/anon/locked mapping details)\n"); 274 (void) fprintf(stderr, 275 "\t%s -S [-alF] [-A start[,end]] { pid | core } ...\n", 276 command); 277 (void) fprintf(stderr, 278 "\t\t(show swap reservations)\n\n"); 279 (void) fprintf(stderr, 280 "\t-a: include shared mappings in -[xS] summary\n"); 281 (void) fprintf(stderr, 282 "\t-r: show reserved address maps\n"); 283 (void) fprintf(stderr, 284 "\t-s: show hardware page sizes\n"); 285 (void) fprintf(stderr, 286 "\t-l: show unresolved dynamic linker map names\n"); 287 (void) fprintf(stderr, 288 "\t-F: force grabbing of the target process\n"); 289 (void) fprintf(stderr, 290 "\t-L: show lgroup mappings\n"); 291 (void) fprintf(stderr, 292 "\t-A start,end: limit output to the specified range\n"); 293 return (2); 294 } 295 296 /* 297 * Make sure we'll have enough file descriptors to handle a target 298 * that has many many mappings. 299 */ 300 if (getrlimit(RLIMIT_NOFILE, &rlim) == 0) { 301 rlim.rlim_cur = rlim.rlim_max; 302 (void) setrlimit(RLIMIT_NOFILE, &rlim); 303 (void) enable_extended_FILE_stdio(-1, -1); 304 } 305 306 /* 307 * The implementation of -L option creates an agent LWP in the target 308 * process address space. The agent LWP issues meminfo(2) system calls 309 * on behalf of the target process. If we are interrupted prematurely, 310 * the target process remains in the stopped state with the agent still 311 * attached to it. To prevent such situation we catch signals from 312 * terminal and terminate gracefully. 313 */ 314 if (use_agent_lwp) { 315 /* 316 * Buffer output to stdout, stderr while process is grabbed. 317 * Prevents infamous deadlocks due to pmap `pgrep xterm` and 318 * other variants. 319 */ 320 (void) proc_initstdio(); 321 322 prg_gflags = PGRAB_RETAIN | Fflag; 323 prr_flags = PRELEASE_RETAIN; 324 325 if (sigset(SIGHUP, SIG_IGN) == SIG_DFL) 326 (void) sigset(SIGHUP, intr); 327 if (sigset(SIGINT, SIG_IGN) == SIG_DFL) 328 (void) sigset(SIGINT, intr); 329 if (sigset(SIGQUIT, SIG_IGN) == SIG_DFL) 330 (void) sigset(SIGQUIT, intr); 331 (void) sigset(SIGPIPE, intr); 332 (void) sigset(SIGTERM, intr); 333 } 334 335 while (argc-- > 0) { 336 char *arg; 337 int gcode; 338 psinfo_t psinfo; 339 int tries = 0; 340 341 if (use_agent_lwp) 342 (void) proc_flushstdio(); 343 344 if ((Pr = proc_arg_grab(arg = *argv++, PR_ARG_ANY, 345 prg_gflags, &gcode)) == NULL) { 346 (void) fprintf(stderr, "%s: cannot examine %s: %s\n", 347 command, arg, Pgrab_error(gcode)); 348 rc++; 349 continue; 350 } 351 352 procname = arg; /* for perr() */ 353 354 addr_width = (Pstatus(Pr)->pr_dmodel == PR_MODEL_LP64) ? 16 : 8; 355 size_width = (Pstatus(Pr)->pr_dmodel == PR_MODEL_LP64) ? 11 : 8; 356 bar = addr_width == 8 ? bar8 : bar16; 357 (void) memcpy(&psinfo, Ppsinfo(Pr), sizeof (psinfo_t)); 358 proc_unctrl_psinfo(&psinfo); 359 360 if (Pstate(Pr) != PS_DEAD) { 361 (void) snprintf(buf, sizeof (buf), 362 "/proc/%d/map", (int)psinfo.pr_pid); 363 if ((mapfd = open(buf, O_RDONLY)) < 0) { 364 (void) fprintf(stderr, "%s: cannot " 365 "examine %s: lost control of " 366 "process\n", command, arg); 367 rc++; 368 Prelease(Pr, prr_flags); 369 continue; 370 } 371 } else { 372 mapfd = -1; 373 } 374 375 again: 376 map_count = 0; 377 378 if (Pstate(Pr) == PS_DEAD) { 379 (void) printf("core '%s' of %d:\t%.70s\n", 380 arg, (int)psinfo.pr_pid, psinfo.pr_psargs); 381 382 if (rflag || sflag || xflag || Sflag || Lflag) { 383 (void) printf(" -%c option is not compatible " 384 "with core files\n", xflag ? 'x' : 385 sflag ? 's' : rflag ? 'r' : 386 Lflag ? 'L' : 'S'); 387 Prelease(Pr, prr_flags); 388 rc++; 389 continue; 390 } 391 392 } else { 393 (void) printf("%d:\t%.70s\n", 394 (int)psinfo.pr_pid, psinfo.pr_psargs); 395 } 396 397 if (!(Pstatus(Pr)->pr_flags & PR_ISSYS)) { 398 struct totals t; 399 400 /* 401 * Since we're grabbing the process readonly, we need 402 * to make sure the address space doesn't change during 403 * execution. 404 */ 405 if (Pstate(Pr) != PS_DEAD) { 406 if (tries++ == MAX_TRIES) { 407 Prelease(Pr, prr_flags); 408 (void) close(mapfd); 409 (void) fprintf(stderr, "%s: cannot " 410 "examine %s: address space is " 411 "changing\n", command, arg); 412 continue; 413 } 414 415 if (fstat64(mapfd, &statbuf) != 0) { 416 Prelease(Pr, prr_flags); 417 (void) close(mapfd); 418 (void) fprintf(stderr, "%s: cannot " 419 "examine %s: lost control of " 420 "process\n", command, arg); 421 continue; 422 } 423 } 424 425 nstacks = psinfo.pr_nlwp * 2; 426 stacks = calloc(nstacks, sizeof (stacks[0])); 427 if (stacks != NULL) { 428 int n = 0; 429 (void) Plwp_iter(Pr, getstack, &n); 430 qsort(stacks, nstacks, sizeof (stacks[0]), 431 cmpstacks); 432 } 433 434 (void) memset(&t, 0, sizeof (t)); 435 436 if (Pgetauxval(Pr, AT_BASE) != -1L && 437 Prd_agent(Pr) == NULL) { 438 (void) fprintf(stderr, "%s: warning: " 439 "librtld_db failed to initialize; " 440 "shared library information will not be " 441 "available\n", command); 442 } 443 444 /* 445 * Gather data 446 */ 447 if (xflag) 448 rc += xmapping_iter(Pr, gather_xmap, NULL, 0); 449 else if (Sflag) 450 rc += xmapping_iter(Pr, gather_xmap, NULL, 1); 451 else { 452 if (rflag) 453 rc += rmapping_iter(Pr, gather_map, 454 NULL); 455 else if (sflag) 456 rc += xmapping_iter(Pr, gather_xmap, 457 NULL, 0); 458 else if (lflag) 459 rc += Pmapping_iter(Pr, 460 gather_map, NULL); 461 else 462 rc += Pmapping_iter_resolved(Pr, 463 gather_map, NULL); 464 } 465 466 /* 467 * Ensure mappings are consistent. 468 */ 469 if (Pstate(Pr) != PS_DEAD) { 470 struct stat64 newbuf; 471 472 if (fstat64(mapfd, &newbuf) != 0 || 473 memcmp(&newbuf.st_mtim, &statbuf.st_mtim, 474 sizeof (newbuf.st_mtim)) != 0) { 475 if (stacks != NULL) { 476 free(stacks); 477 stacks = NULL; 478 } 479 goto again; 480 } 481 } 482 483 /* 484 * Display data. 485 */ 486 if (xflag) { 487 (void) printf("%*s%*s%*s%*s%*s " 488 "%sMode Mapped File\n", 489 addr_width, "Address", 490 size_width, "Kbytes", 491 size_width, "RSS", 492 size_width, "Anon", 493 size_width, "Locked", 494 sflag ? "Pgsz " : ""); 495 496 rc += iter_xmap(sflag ? look_xmap : 497 look_xmap_nopgsz, &t); 498 499 (void) printf("%s%s %s %s %s %s\n", 500 addr_width == 8 ? "-" : "------", 501 bar, bar, bar, bar, bar); 502 503 (void) printf("%stotal Kb", addr_width == 16 ? 504 " " : ""); 505 506 printK(t.total_size, size_width); 507 printK(t.total_rss, size_width); 508 printK(t.total_anon, size_width); 509 printK(t.total_locked, size_width); 510 511 (void) printf("\n"); 512 513 } else if (Sflag) { 514 (void) printf("%*s%*s%*s Mode" 515 " Mapped File\n", 516 addr_width, "Address", 517 size_width, "Kbytes", 518 size_width, "Swap"); 519 520 rc += iter_xmap(look_xmap_nopgsz, &t); 521 522 (void) printf("%s%s %s %s\n", 523 addr_width == 8 ? "-" : "------", 524 bar, bar, bar); 525 526 (void) printf("%stotal Kb", addr_width == 16 ? 527 " " : ""); 528 529 printK(t.total_size, size_width); 530 printK(t.total_swap, size_width); 531 532 (void) printf("\n"); 533 534 } else { 535 536 if (rflag) { 537 rc += iter_map(look_map, &t); 538 } else if (sflag) { 539 if (Lflag) { 540 (void) printf("%*s %*s %4s" 541 " %-6s %s %s\n", 542 addr_width, "Address", 543 size_width, 544 "Bytes", "Pgsz", "Mode ", 545 "Lgrp", "Mapped File"); 546 rc += iter_xmap(look_smap, &t); 547 } else { 548 (void) printf("%*s %*s %4s" 549 " %-6s %s\n", 550 addr_width, "Address", 551 size_width, 552 "Bytes", "Pgsz", "Mode ", 553 "Mapped File"); 554 rc += iter_xmap(look_smap, &t); 555 } 556 } else { 557 rc += iter_map(look_map, &t); 558 } 559 560 (void) printf(" %stotal %*luK\n", 561 addr_width == 16 ? 562 " " : "", 563 size_width, t.total_size); 564 } 565 566 if (stacks != NULL) { 567 free(stacks); 568 stacks = NULL; 569 } 570 571 } 572 573 Prelease(Pr, prr_flags); 574 if (mapfd != -1) 575 (void) close(mapfd); 576 } 577 578 if (use_agent_lwp) 579 (void) proc_finistdio(); 580 581 return (rc); 582 } 583 584 static int 585 rmapping_iter(struct ps_prochandle *Pr, proc_map_f *func, void *cd) 586 { 587 char mapname[PATH_MAX]; 588 int mapfd, nmap, i, rc; 589 struct stat st; 590 prmap_t *prmapp, *pmp; 591 ssize_t n; 592 593 (void) snprintf(mapname, sizeof (mapname), 594 "/proc/%d/rmap", (int)Pstatus(Pr)->pr_pid); 595 596 if ((mapfd = open(mapname, O_RDONLY)) < 0 || fstat(mapfd, &st) != 0) { 597 if (mapfd >= 0) 598 (void) close(mapfd); 599 return (perr(mapname)); 600 } 601 602 nmap = st.st_size / sizeof (prmap_t); 603 prmapp = malloc((nmap + 1) * sizeof (prmap_t)); 604 605 if ((n = pread(mapfd, prmapp, (nmap + 1) * sizeof (prmap_t), 0L)) < 0) { 606 (void) close(mapfd); 607 free(prmapp); 608 return (perr("read rmap")); 609 } 610 611 (void) close(mapfd); 612 nmap = n / sizeof (prmap_t); 613 614 for (i = 0, pmp = prmapp; i < nmap; i++, pmp++) { 615 if ((rc = func(cd, pmp, NULL)) != 0) { 616 free(prmapp); 617 return (rc); 618 } 619 } 620 621 free(prmapp); 622 return (0); 623 } 624 625 static int 626 xmapping_iter(struct ps_prochandle *Pr, proc_xmap_f *func, void *cd, int doswap) 627 { 628 char mapname[PATH_MAX]; 629 int mapfd, nmap, i, rc; 630 struct stat st; 631 prxmap_t *prmapp, *pmp; 632 ssize_t n; 633 634 (void) snprintf(mapname, sizeof (mapname), 635 "/proc/%d/xmap", (int)Pstatus(Pr)->pr_pid); 636 637 if ((mapfd = open(mapname, O_RDONLY)) < 0 || fstat(mapfd, &st) != 0) { 638 if (mapfd >= 0) 639 (void) close(mapfd); 640 return (perr(mapname)); 641 } 642 643 nmap = st.st_size / sizeof (prxmap_t); 644 nmap *= 2; 645 again: 646 prmapp = malloc((nmap + 1) * sizeof (prxmap_t)); 647 648 if ((n = pread(mapfd, prmapp, (nmap + 1) * sizeof (prxmap_t), 0)) < 0) { 649 (void) close(mapfd); 650 free(prmapp); 651 return (perr("read xmap")); 652 } 653 654 if (nmap < n / sizeof (prxmap_t)) { 655 free(prmapp); 656 nmap *= 2; 657 goto again; 658 } 659 660 (void) close(mapfd); 661 nmap = n / sizeof (prxmap_t); 662 663 for (i = 0, pmp = prmapp; i < nmap; i++, pmp++) { 664 if ((rc = func(cd, pmp, NULL, i == nmap - 1, doswap)) != 0) { 665 free(prmapp); 666 return (rc); 667 } 668 } 669 670 /* 671 * Mark the last element. 672 */ 673 if (map_count > 0) 674 maps[map_count - 1].md_last = B_TRUE; 675 676 free(prmapp); 677 return (0); 678 } 679 680 /*ARGSUSED*/ 681 static int 682 look_map(void *data, const prmap_t *pmp, const char *object_name) 683 { 684 struct totals *t = data; 685 const pstatus_t *Psp = Pstatus(Pr); 686 size_t size; 687 char mname[PATH_MAX]; 688 char *lname = NULL; 689 size_t psz = pmp->pr_pagesize; 690 uintptr_t vaddr = pmp->pr_vaddr; 691 uintptr_t segment_end = vaddr + pmp->pr_size; 692 lgrp_id_t lgrp; 693 memory_chunk_t mchunk; 694 695 /* 696 * If the mapping is not anon or not part of the heap, make a name 697 * for it. We don't want to report the heap as a.out's data. 698 */ 699 if (!(pmp->pr_mflags & MA_ANON) || 700 segment_end <= Psp->pr_brkbase || 701 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) { 702 lname = make_name(Pr, lflag, pmp->pr_vaddr, pmp->pr_mapname, 703 mname, sizeof (mname)); 704 } 705 706 if (lname == NULL && 707 ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD)) { 708 lname = anon_name(mname, Psp, stacks, nstacks, pmp->pr_vaddr, 709 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid, NULL); 710 } 711 712 /* 713 * Adjust the address range if -A is specified. 714 */ 715 size = adjust_addr_range(pmp->pr_vaddr, segment_end, psz, 716 &vaddr, &segment_end); 717 718 if (size == 0) 719 return (0); 720 721 if (!Lflag) { 722 /* 723 * Display the whole mapping 724 */ 725 size = ROUNDUP_KB(size); 726 727 (void) printf(lname ? 728 "%.*lX %*luK %-6s %s\n" : 729 "%.*lX %*luK %s\n", 730 addr_width, vaddr, 731 size_width - 1, size, mflags(pmp->pr_mflags), lname); 732 733 t->total_size += size; 734 return (0); 735 } 736 737 /* 738 * We need to display lgroups backing physical memory, so we break the 739 * segment into individual pages and coalesce pages with the same lgroup 740 * into one "segment". 741 */ 742 743 /* 744 * Initialize address descriptions for the mapping. 745 */ 746 mem_chunk_init(&mchunk, segment_end, psz); 747 size = 0; 748 749 /* 750 * Walk mapping (page by page) and display contiguous ranges of memory 751 * allocated to same lgroup. 752 */ 753 do { 754 size_t size_contig; 755 756 /* 757 * Get contiguous region of memory starting from vaddr allocated 758 * from the same lgroup. 759 */ 760 size_contig = get_contiguous_region(&mchunk, vaddr, 761 segment_end, pmp->pr_pagesize, &lgrp); 762 763 (void) printf(lname ? "%.*lX %*luK %-6s%s %s\n" : 764 "%.*lX %*luK %s %s\n", 765 addr_width, vaddr, 766 size_width - 1, size_contig / KILOBYTE, 767 mflags(pmp->pr_mflags), 768 lgrp2str(lgrp), lname); 769 770 vaddr += size_contig; 771 size += size_contig; 772 } while (vaddr < segment_end && !interrupt); 773 774 /* Update the total size */ 775 t->total_size += ROUNDUP_KB(size); 776 return (0); 777 } 778 779 static void 780 printK(long value, int width) 781 { 782 if (value == 0) 783 (void) printf(width == 8 ? " -" : " -"); 784 else 785 (void) printf(" %*lu", width - 1, value); 786 } 787 788 static const char * 789 pagesize(const prxmap_t *pmp) 790 { 791 int pagesize = pmp->pr_hatpagesize; 792 static char buf[32]; 793 794 if (pagesize == 0) { 795 return ("-"); /* no underlying HAT mapping */ 796 } 797 798 if (pagesize >= KILOBYTE && (pagesize % KILOBYTE) == 0) { 799 if ((pagesize % GIGABYTE) == 0) 800 (void) snprintf(buf, sizeof (buf), "%dG", 801 pagesize / GIGABYTE); 802 else if ((pagesize % MEGABYTE) == 0) 803 (void) snprintf(buf, sizeof (buf), "%dM", 804 pagesize / MEGABYTE); 805 else 806 (void) snprintf(buf, sizeof (buf), "%dK", 807 pagesize / KILOBYTE); 808 } else 809 (void) snprintf(buf, sizeof (buf), "%db", pagesize); 810 811 return (buf); 812 } 813 814 /*ARGSUSED*/ 815 static int 816 look_smap(void *data, 817 const prxmap_t *pmp, 818 const char *object_name, 819 int last, int doswap) 820 { 821 struct totals *t = data; 822 const pstatus_t *Psp = Pstatus(Pr); 823 size_t size; 824 char mname[PATH_MAX]; 825 char *lname = NULL; 826 const char *format; 827 size_t psz = pmp->pr_pagesize; 828 uintptr_t vaddr = pmp->pr_vaddr; 829 uintptr_t segment_end = vaddr + pmp->pr_size; 830 lgrp_id_t lgrp; 831 memory_chunk_t mchunk; 832 833 /* 834 * If the mapping is not anon or not part of the heap, make a name 835 * for it. We don't want to report the heap as a.out's data. 836 */ 837 if (!(pmp->pr_mflags & MA_ANON) || 838 pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase || 839 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) { 840 lname = make_name(Pr, lflag, pmp->pr_vaddr, pmp->pr_mapname, 841 mname, sizeof (mname)); 842 } 843 844 if (lname == NULL && 845 ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD)) { 846 lname = anon_name(mname, Psp, stacks, nstacks, pmp->pr_vaddr, 847 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid, NULL); 848 } 849 850 /* 851 * Adjust the address range if -A is specified. 852 */ 853 size = adjust_addr_range(pmp->pr_vaddr, segment_end, psz, 854 &vaddr, &segment_end); 855 856 if (size == 0) 857 return (0); 858 859 if (!Lflag) { 860 /* 861 * Display the whole mapping 862 */ 863 if (lname != NULL) 864 format = "%.*lX %*luK %4s %-6s %s\n"; 865 else 866 format = "%.*lX %*luK %4s %s\n"; 867 868 size = ROUNDUP_KB(size); 869 870 (void) printf(format, addr_width, vaddr, size_width - 1, size, 871 pagesize(pmp), mflags(pmp->pr_mflags), lname); 872 873 t->total_size += size; 874 return (0); 875 } 876 877 if (lname != NULL) 878 format = "%.*lX %*luK %4s %-6s%s %s\n"; 879 else 880 format = "%.*lX %*luK %4s%s %s\n"; 881 882 /* 883 * We need to display lgroups backing physical memory, so we break the 884 * segment into individual pages and coalesce pages with the same lgroup 885 * into one "segment". 886 */ 887 888 /* 889 * Initialize address descriptions for the mapping. 890 */ 891 mem_chunk_init(&mchunk, segment_end, psz); 892 size = 0; 893 894 /* 895 * Walk mapping (page by page) and display contiguous ranges of memory 896 * allocated to same lgroup. 897 */ 898 do { 899 size_t size_contig; 900 901 /* 902 * Get contiguous region of memory starting from vaddr allocated 903 * from the same lgroup. 904 */ 905 size_contig = get_contiguous_region(&mchunk, vaddr, 906 segment_end, pmp->pr_pagesize, &lgrp); 907 908 (void) printf(format, addr_width, vaddr, 909 size_width - 1, size_contig / KILOBYTE, 910 pagesize(pmp), mflags(pmp->pr_mflags), 911 lgrp2str(lgrp), lname); 912 913 vaddr += size_contig; 914 size += size_contig; 915 } while (vaddr < segment_end && !interrupt); 916 917 t->total_size += ROUNDUP_KB(size); 918 return (0); 919 } 920 921 #define ANON(x) ((aflag || (((x)->pr_mflags & MA_SHARED) == 0)) ? \ 922 ((x)->pr_anon) : 0) 923 924 /*ARGSUSED*/ 925 static int 926 look_xmap(void *data, 927 const prxmap_t *pmp, 928 const char *object_name, 929 int last, int doswap) 930 { 931 struct totals *t = data; 932 const pstatus_t *Psp = Pstatus(Pr); 933 char mname[PATH_MAX]; 934 char *lname = NULL; 935 char *ln; 936 937 /* 938 * If the mapping is not anon or not part of the heap, make a name 939 * for it. We don't want to report the heap as a.out's data. 940 */ 941 if (!(pmp->pr_mflags & MA_ANON) || 942 pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase || 943 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) { 944 lname = make_name(Pr, lflag, pmp->pr_vaddr, pmp->pr_mapname, 945 mname, sizeof (mname)); 946 } 947 948 if (lname != NULL) { 949 if ((ln = strrchr(lname, '/')) != NULL) 950 lname = ln + 1; 951 } else if ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD) { 952 lname = anon_name(mname, Psp, stacks, nstacks, pmp->pr_vaddr, 953 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid, NULL); 954 } 955 956 (void) printf("%.*lX", addr_width, (ulong_t)pmp->pr_vaddr); 957 958 printK(ROUNDUP_KB(pmp->pr_size), size_width); 959 printK(pmp->pr_rss * (pmp->pr_pagesize / KILOBYTE), size_width); 960 printK(ANON(pmp) * (pmp->pr_pagesize / KILOBYTE), size_width); 961 printK(pmp->pr_locked * (pmp->pr_pagesize / KILOBYTE), size_width); 962 (void) printf(lname ? " %4s %-6s %s\n" : " %4s %s\n", 963 pagesize(pmp), mflags(pmp->pr_mflags), lname); 964 965 t->total_size += ROUNDUP_KB(pmp->pr_size); 966 t->total_rss += pmp->pr_rss * (pmp->pr_pagesize / KILOBYTE); 967 t->total_anon += ANON(pmp) * (pmp->pr_pagesize / KILOBYTE); 968 t->total_locked += (pmp->pr_locked * (pmp->pr_pagesize / KILOBYTE)); 969 970 return (0); 971 } 972 973 /*ARGSUSED*/ 974 static int 975 look_xmap_nopgsz(void *data, 976 const prxmap_t *pmp, 977 const char *object_name, 978 int last, int doswap) 979 { 980 struct totals *t = data; 981 const pstatus_t *Psp = Pstatus(Pr); 982 char mname[PATH_MAX]; 983 char *lname = NULL; 984 char *ln; 985 static uintptr_t prev_vaddr; 986 static size_t prev_size; 987 static offset_t prev_offset; 988 static int prev_mflags; 989 static char *prev_lname; 990 static char prev_mname[PATH_MAX]; 991 static ulong_t prev_rss; 992 static ulong_t prev_anon; 993 static ulong_t prev_locked; 994 static ulong_t prev_swap; 995 int merged = 0; 996 static int first = 1; 997 ulong_t swap = 0; 998 int kperpage; 999 1000 /* 1001 * Calculate swap reservations 1002 */ 1003 if (pmp->pr_mflags & MA_SHARED) { 1004 if (aflag && (pmp->pr_mflags & MA_NORESERVE) == 0) { 1005 /* Swap reserved for entire non-ism SHM */ 1006 swap = pmp->pr_size / pmp->pr_pagesize; 1007 } 1008 } else if (pmp->pr_mflags & MA_NORESERVE) { 1009 /* Swap reserved on fault for each anon page */ 1010 swap = pmp->pr_anon; 1011 } else if (pmp->pr_mflags & MA_WRITE) { 1012 /* Swap reserve for entire writable segment */ 1013 swap = pmp->pr_size / pmp->pr_pagesize; 1014 } 1015 1016 /* 1017 * If the mapping is not anon or not part of the heap, make a name 1018 * for it. We don't want to report the heap as a.out's data. 1019 */ 1020 if (!(pmp->pr_mflags & MA_ANON) || 1021 pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase || 1022 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) { 1023 lname = make_name(Pr, lflag, pmp->pr_vaddr, pmp->pr_mapname, 1024 mname, sizeof (mname)); 1025 } 1026 1027 if (lname != NULL) { 1028 if ((ln = strrchr(lname, '/')) != NULL) 1029 lname = ln + 1; 1030 } else if ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD) { 1031 lname = anon_name(mname, Psp, stacks, nstacks, pmp->pr_vaddr, 1032 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid, NULL); 1033 } 1034 1035 kperpage = pmp->pr_pagesize / KILOBYTE; 1036 1037 t->total_size += ROUNDUP_KB(pmp->pr_size); 1038 t->total_rss += pmp->pr_rss * kperpage; 1039 t->total_anon += ANON(pmp) * kperpage; 1040 t->total_locked += pmp->pr_locked * kperpage; 1041 t->total_swap += swap * kperpage; 1042 1043 if (first == 1) { 1044 first = 0; 1045 prev_vaddr = pmp->pr_vaddr; 1046 prev_size = pmp->pr_size; 1047 prev_offset = pmp->pr_offset; 1048 prev_mflags = pmp->pr_mflags; 1049 if (lname == NULL) { 1050 prev_lname = NULL; 1051 } else { 1052 (void) strcpy(prev_mname, lname); 1053 prev_lname = prev_mname; 1054 } 1055 prev_rss = pmp->pr_rss * kperpage; 1056 prev_anon = ANON(pmp) * kperpage; 1057 prev_locked = pmp->pr_locked * kperpage; 1058 prev_swap = swap * kperpage; 1059 if (last == 0) { 1060 return (0); 1061 } 1062 merged = 1; 1063 } else if (prev_vaddr + prev_size == pmp->pr_vaddr && 1064 prev_mflags == pmp->pr_mflags && 1065 ((prev_mflags & MA_ISM) || 1066 prev_offset + prev_size == pmp->pr_offset) && 1067 ((lname == NULL && prev_lname == NULL) || 1068 (lname != NULL && prev_lname != NULL && 1069 strcmp(lname, prev_lname) == 0))) { 1070 prev_size += pmp->pr_size; 1071 prev_rss += pmp->pr_rss * kperpage; 1072 prev_anon += ANON(pmp) * kperpage; 1073 prev_locked += pmp->pr_locked * kperpage; 1074 prev_swap += swap * kperpage; 1075 if (last == 0) { 1076 return (0); 1077 } 1078 merged = 1; 1079 } 1080 1081 (void) printf("%.*lX", addr_width, (ulong_t)prev_vaddr); 1082 printK(ROUNDUP_KB(prev_size), size_width); 1083 1084 if (doswap) 1085 printK(prev_swap, size_width); 1086 else { 1087 printK(prev_rss, size_width); 1088 printK(prev_anon, size_width); 1089 printK(prev_locked, size_width); 1090 } 1091 (void) printf(prev_lname ? " %-6s %s\n" : "%s\n", 1092 mflags(prev_mflags), prev_lname); 1093 1094 if (last == 0) { 1095 prev_vaddr = pmp->pr_vaddr; 1096 prev_size = pmp->pr_size; 1097 prev_offset = pmp->pr_offset; 1098 prev_mflags = pmp->pr_mflags; 1099 if (lname == NULL) { 1100 prev_lname = NULL; 1101 } else { 1102 (void) strcpy(prev_mname, lname); 1103 prev_lname = prev_mname; 1104 } 1105 prev_rss = pmp->pr_rss * kperpage; 1106 prev_anon = ANON(pmp) * kperpage; 1107 prev_locked = pmp->pr_locked * kperpage; 1108 prev_swap = swap * kperpage; 1109 } else if (merged == 0) { 1110 (void) printf("%.*lX", addr_width, (ulong_t)pmp->pr_vaddr); 1111 printK(ROUNDUP_KB(pmp->pr_size), size_width); 1112 if (doswap) 1113 printK(swap * kperpage, size_width); 1114 else { 1115 printK(pmp->pr_rss * kperpage, size_width); 1116 printK(ANON(pmp) * kperpage, size_width); 1117 printK(pmp->pr_locked * kperpage, size_width); 1118 } 1119 (void) printf(lname ? " %-6s %s\n" : " %s\n", 1120 mflags(pmp->pr_mflags), lname); 1121 } 1122 1123 if (last != 0) 1124 first = 1; 1125 1126 return (0); 1127 } 1128 1129 static int 1130 perr(char *s) 1131 { 1132 if (s) 1133 (void) fprintf(stderr, "%s: ", procname); 1134 else 1135 s = procname; 1136 perror(s); 1137 return (1); 1138 } 1139 1140 static char * 1141 mflags(uint_t arg) 1142 { 1143 static char code_buf[80]; 1144 char *str = code_buf; 1145 1146 /* 1147 * rwxsR 1148 * 1149 * r - segment is readable 1150 * w - segment is writable 1151 * x - segment is executable 1152 * s - segment is shared 1153 * R - segment is mapped MAP_NORESERVE 1154 * 1155 */ 1156 (void) sprintf(str, "%c%c%c%c%c%c", 1157 arg & MA_READ ? 'r' : '-', 1158 arg & MA_WRITE ? 'w' : '-', 1159 arg & MA_EXEC ? 'x' : '-', 1160 arg & MA_SHARED ? 's' : '-', 1161 arg & MA_NORESERVE ? 'R' : '-', 1162 arg & MA_RESERVED1 ? '*' : ' '); 1163 1164 return (str); 1165 } 1166 1167 static mapdata_t * 1168 nextmap(void) 1169 { 1170 mapdata_t *newmaps; 1171 int next; 1172 1173 if (map_count == map_alloc) { 1174 if (map_alloc == 0) 1175 next = 16; 1176 else 1177 next = map_alloc * 2; 1178 1179 newmaps = realloc(maps, next * sizeof (mapdata_t)); 1180 if (newmaps == NULL) { 1181 (void) perr("failed to allocate maps"); 1182 exit(1); 1183 } 1184 (void) memset(newmaps + map_alloc, '\0', 1185 (next - map_alloc) * sizeof (mapdata_t)); 1186 1187 map_alloc = next; 1188 maps = newmaps; 1189 } 1190 1191 return (&maps[map_count++]); 1192 } 1193 1194 /*ARGSUSED*/ 1195 static int 1196 gather_map(void *ignored, const prmap_t *map, const char *objname) 1197 { 1198 mapdata_t *data; 1199 1200 /* Skip mappings which are outside the range specified by -A */ 1201 if (!address_in_range(map->pr_vaddr, 1202 map->pr_vaddr + map->pr_size, map->pr_pagesize)) 1203 return (0); 1204 1205 data = nextmap(); 1206 data->md_map = *map; 1207 if (data->md_objname != NULL) 1208 free(data->md_objname); 1209 data->md_objname = objname ? strdup(objname) : NULL; 1210 1211 return (0); 1212 } 1213 1214 /*ARGSUSED*/ 1215 static int 1216 gather_xmap(void *ignored, const prxmap_t *xmap, const char *objname, 1217 int last, int doswap) 1218 { 1219 mapdata_t *data; 1220 1221 /* Skip mappings which are outside the range specified by -A */ 1222 if (!address_in_range(xmap->pr_vaddr, 1223 xmap->pr_vaddr + xmap->pr_size, xmap->pr_pagesize)) 1224 return (0); 1225 1226 data = nextmap(); 1227 data->md_xmap = *xmap; 1228 if (data->md_objname != NULL) 1229 free(data->md_objname); 1230 data->md_objname = objname ? strdup(objname) : NULL; 1231 data->md_last = last; 1232 data->md_doswap = doswap; 1233 1234 return (0); 1235 } 1236 1237 static int 1238 iter_map(proc_map_f *func, void *data) 1239 { 1240 int i; 1241 int ret; 1242 1243 for (i = 0; i < map_count; i++) { 1244 if (interrupt) 1245 break; 1246 if ((ret = func(data, &maps[i].md_map, 1247 maps[i].md_objname)) != 0) 1248 return (ret); 1249 } 1250 1251 return (0); 1252 } 1253 1254 static int 1255 iter_xmap(proc_xmap_f *func, void *data) 1256 { 1257 int i; 1258 int ret; 1259 1260 for (i = 0; i < map_count; i++) { 1261 if (interrupt) 1262 break; 1263 if ((ret = func(data, &maps[i].md_xmap, maps[i].md_objname, 1264 maps[i].md_last, maps[i].md_doswap)) != 0) 1265 return (ret); 1266 } 1267 1268 return (0); 1269 } 1270 1271 /* 1272 * Convert lgroup ID to string. 1273 * returns dash when lgroup ID is invalid. 1274 */ 1275 static char * 1276 lgrp2str(lgrp_id_t lgrp) 1277 { 1278 static char lgrp_buf[20]; 1279 char *str = lgrp_buf; 1280 1281 (void) sprintf(str, lgrp == LGRP_NONE ? " -" : "%4d", lgrp); 1282 return (str); 1283 } 1284 1285 /* 1286 * Parse address range specification for -A option. 1287 * The address range may have the following forms: 1288 * 1289 * address 1290 * start and end is set to address 1291 * address, 1292 * start is set to address, end is set to INVALID_ADDRESS 1293 * ,address 1294 * start is set to 0, end is set to address 1295 * address1,address2 1296 * start is set to address1, end is set to address2 1297 * 1298 */ 1299 static int 1300 parse_addr_range(char *input_str, uintptr_t *start, uintptr_t *end) 1301 { 1302 char *startp = input_str; 1303 char *endp = strchr(input_str, ','); 1304 ulong_t s = (ulong_t)INVALID_ADDRESS; 1305 ulong_t e = (ulong_t)INVALID_ADDRESS; 1306 1307 if (endp != NULL) { 1308 /* 1309 * Comma is present. If there is nothing after comma, the end 1310 * remains set at INVALID_ADDRESS. Otherwise it is set to the 1311 * value after comma. 1312 */ 1313 *endp = '\0'; 1314 endp++; 1315 1316 if ((*endp != '\0') && sscanf(endp, "%lx", &e) != 1) 1317 return (1); 1318 } 1319 1320 if (startp != NULL) { 1321 /* 1322 * Read the start address, if it is specified. If the address is 1323 * missing, start will be set to INVALID_ADDRESS. 1324 */ 1325 if ((*startp != '\0') && sscanf(startp, "%lx", &s) != 1) 1326 return (1); 1327 } 1328 1329 /* If there is no comma, end becomes equal to start */ 1330 if (endp == NULL) 1331 e = s; 1332 1333 /* 1334 * ,end implies 0..end range 1335 */ 1336 if (e != INVALID_ADDRESS && s == INVALID_ADDRESS) 1337 s = 0; 1338 1339 *start = (uintptr_t)s; 1340 *end = (uintptr_t)e; 1341 1342 /* Return error if neither start nor end address were specified */ 1343 return (! (s != INVALID_ADDRESS || e != INVALID_ADDRESS)); 1344 } 1345 1346 /* 1347 * Check whether any portion of [start, end] segment is within the 1348 * [start_addr, end_addr] range. 1349 * 1350 * Return values: 1351 * 0 - address is outside the range 1352 * 1 - address is within the range 1353 */ 1354 static int 1355 address_in_range(uintptr_t start, uintptr_t end, size_t psz) 1356 { 1357 int rc = 1; 1358 1359 /* 1360 * Nothing to do if there is no address range specified with -A 1361 */ 1362 if (start_addr != INVALID_ADDRESS || end_addr != INVALID_ADDRESS) { 1363 /* The segment end is below the range start */ 1364 if ((start_addr != INVALID_ADDRESS) && 1365 (end < P2ALIGN(start_addr, psz))) 1366 rc = 0; 1367 1368 /* The segment start is above the range end */ 1369 if ((end_addr != INVALID_ADDRESS) && 1370 (start > P2ALIGN(end_addr + psz, psz))) 1371 rc = 0; 1372 } 1373 return (rc); 1374 } 1375 1376 /* 1377 * Returns an intersection of the [start, end] interval and the range specified 1378 * by -A flag [start_addr, end_addr]. Unspecified parts of the address range 1379 * have value INVALID_ADDRESS. 1380 * 1381 * The start_addr address is rounded down to the beginning of page and end_addr 1382 * is rounded up to the end of page. 1383 * 1384 * Returns the size of the resulting interval or zero if the interval is empty 1385 * or invalid. 1386 */ 1387 static size_t 1388 adjust_addr_range(uintptr_t start, uintptr_t end, size_t psz, 1389 uintptr_t *new_start, uintptr_t *new_end) 1390 { 1391 uintptr_t from; /* start_addr rounded down */ 1392 uintptr_t to; /* end_addr rounded up */ 1393 1394 /* 1395 * Round down the lower address of the range to the beginning of page. 1396 */ 1397 if (start_addr == INVALID_ADDRESS) { 1398 /* 1399 * No start_addr specified by -A, the lower part of the interval 1400 * does not change. 1401 */ 1402 *new_start = start; 1403 } else { 1404 from = P2ALIGN(start_addr, psz); 1405 /* 1406 * If end address is outside the range, return an empty 1407 * interval 1408 */ 1409 if (end < from) { 1410 *new_start = *new_end = 0; 1411 return (0); 1412 } 1413 /* 1414 * The adjusted start address is the maximum of requested start 1415 * and the aligned start_addr of the -A range. 1416 */ 1417 *new_start = start < from ? from : start; 1418 } 1419 1420 /* 1421 * Round up the higher address of the range to the end of page. 1422 */ 1423 if (end_addr == INVALID_ADDRESS) { 1424 /* 1425 * No end_addr specified by -A, the upper part of the interval 1426 * does not change. 1427 */ 1428 *new_end = end; 1429 } else { 1430 /* 1431 * If only one address is specified and it is the beginning of a 1432 * segment, get information about the whole segment. This 1433 * function is called once per segment and the 'end' argument is 1434 * always the end of a segment, so just use the 'end' value. 1435 */ 1436 to = (end_addr == start_addr && start == start_addr) ? 1437 end : 1438 P2ALIGN(end_addr + psz, psz); 1439 /* 1440 * If start address is outside the range, return an empty 1441 * interval 1442 */ 1443 if (start > to) { 1444 *new_start = *new_end = 0; 1445 return (0); 1446 } 1447 /* 1448 * The adjusted end address is the minimum of requested end 1449 * and the aligned end_addr of the -A range. 1450 */ 1451 *new_end = end > to ? to : end; 1452 } 1453 1454 /* 1455 * Make sure that the resulting interval is legal. 1456 */ 1457 if (*new_end < *new_start) 1458 *new_start = *new_end = 0; 1459 1460 /* Return the size of the interval */ 1461 return (*new_end - *new_start); 1462 } 1463 1464 /* 1465 * Initialize memory_info data structure with information about a new segment. 1466 */ 1467 static void 1468 mem_chunk_init(memory_chunk_t *chunk, uintptr_t end, size_t psz) 1469 { 1470 chunk->end_addr = end; 1471 chunk->page_size = psz; 1472 chunk->page_index = 0; 1473 chunk->chunk_start = chunk->chunk_end = 0; 1474 } 1475 1476 /* 1477 * Create a new chunk of addresses starting from vaddr. 1478 * Pass the whole chunk to pr_meminfo to collect lgroup and page size 1479 * information for each page in the chunk. 1480 */ 1481 static void 1482 mem_chunk_get(memory_chunk_t *chunk, uintptr_t vaddr) 1483 { 1484 page_descr_t *pdp = chunk->page_info; 1485 size_t psz = chunk->page_size; 1486 uintptr_t addr = vaddr; 1487 uint64_t inaddr[MAX_MEMINFO_CNT]; 1488 uint64_t outdata[2 * MAX_MEMINFO_CNT]; 1489 uint_t info[2] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 1490 uint_t validity[MAX_MEMINFO_CNT]; 1491 uint64_t *dataptr = inaddr; 1492 uint64_t *outptr = outdata; 1493 uint_t *valptr = validity; 1494 int i, j, rc; 1495 1496 chunk->chunk_start = vaddr; 1497 chunk->page_index = 0; /* reset index for the new chunk */ 1498 1499 /* 1500 * Fill in MAX_MEMINFO_CNT wotrh of pages starting from vaddr. Also, 1501 * copy starting address of each page to inaddr array for pr_meminfo. 1502 */ 1503 for (i = 0, pdp = chunk->page_info; 1504 (i < MAX_MEMINFO_CNT) && (addr <= chunk->end_addr); 1505 i++, pdp++, dataptr++, addr += psz) { 1506 *dataptr = (uint64_t)addr; 1507 pdp->pd_start = addr; 1508 pdp->pd_lgrp = LGRP_NONE; 1509 pdp->pd_valid = 0; 1510 pdp->pd_pagesize = 0; 1511 } 1512 1513 /* Mark the number of entries in the chunk and the last address */ 1514 chunk->page_count = i; 1515 chunk->chunk_end = addr - psz; 1516 1517 if (interrupt) 1518 return; 1519 1520 /* Call meminfo for all collected addresses */ 1521 rc = pr_meminfo(Pr, inaddr, i, info, 2, outdata, validity); 1522 if (rc < 0) { 1523 (void) perr("can not get memory information"); 1524 return; 1525 } 1526 1527 /* Verify validity of each result and fill in the addrs array */ 1528 pdp = chunk->page_info; 1529 for (j = 0; j < i; j++, pdp++, valptr++, outptr += 2) { 1530 /* Skip invalid address pointers */ 1531 if ((*valptr & 1) == 0) { 1532 continue; 1533 } 1534 1535 /* Is lgroup information available? */ 1536 if ((*valptr & 2) != 0) { 1537 pdp->pd_lgrp = (lgrp_id_t)*outptr; 1538 pdp->pd_valid = 1; 1539 } 1540 1541 /* Is page size informaion available? */ 1542 if ((*valptr & 4) != 0) { 1543 pdp->pd_pagesize = *(outptr + 1); 1544 } 1545 } 1546 } 1547 1548 /* 1549 * Starting from address 'vaddr' find the region with pages allocated from the 1550 * same lgroup. 1551 * 1552 * Arguments: 1553 * mchunk Initialized memory chunk structure 1554 * vaddr Starting address of the region 1555 * maxaddr Upper bound of the region 1556 * pagesize Default page size to use 1557 * ret_lgrp On exit contains the lgroup ID of all pages in the 1558 * region. 1559 * 1560 * Returns: 1561 * Size of the contiguous region in bytes 1562 * The lgroup ID of all pages in the region in ret_lgrp argument. 1563 */ 1564 static size_t 1565 get_contiguous_region(memory_chunk_t *mchunk, uintptr_t vaddr, 1566 uintptr_t maxaddr, size_t pagesize, lgrp_id_t *ret_lgrp) 1567 { 1568 size_t size_contig = 0; 1569 lgrp_id_t lgrp; /* Lgroup of the region start */ 1570 lgrp_id_t curr_lgrp; /* Lgroup of the current page */ 1571 size_t psz = pagesize; /* Pagesize to use */ 1572 1573 /* Set both lgroup IDs to the lgroup of the first page */ 1574 curr_lgrp = lgrp = addr_to_lgrp(mchunk, vaddr, &psz); 1575 1576 /* 1577 * Starting from vaddr, walk page by page until either the end 1578 * of the segment is reached or a page is allocated from a different 1579 * lgroup. Also stop if interrupted from keyboard. 1580 */ 1581 while ((vaddr < maxaddr) && (curr_lgrp == lgrp) && !interrupt) { 1582 /* 1583 * Get lgroup ID and the page size of the current page. 1584 */ 1585 curr_lgrp = addr_to_lgrp(mchunk, vaddr, &psz); 1586 /* If there is no page size information, use the default */ 1587 if (psz == 0) 1588 psz = pagesize; 1589 1590 if (curr_lgrp == lgrp) { 1591 /* 1592 * This page belongs to the contiguous region. 1593 * Increase the region size and advance to the new page. 1594 */ 1595 size_contig += psz; 1596 vaddr += psz; 1597 } 1598 } 1599 1600 /* Return the region lgroup ID and the size */ 1601 *ret_lgrp = lgrp; 1602 return (size_contig); 1603 } 1604 1605 /* 1606 * Given a virtual address, return its lgroup and page size. If there is meminfo 1607 * information for an address, use it, otherwise shift the chunk window to the 1608 * vaddr and create a new chunk with known meminfo information. 1609 */ 1610 static lgrp_id_t 1611 addr_to_lgrp(memory_chunk_t *chunk, uintptr_t vaddr, size_t *psz) 1612 { 1613 page_descr_t *pdp; 1614 lgrp_id_t lgrp = LGRP_NONE; 1615 int i; 1616 1617 *psz = chunk->page_size; 1618 1619 if (interrupt) 1620 return (0); 1621 1622 /* 1623 * Is there information about this address? If not, create a new chunk 1624 * starting from vaddr and apply pr_meminfo() to the whole chunk. 1625 */ 1626 if (vaddr < chunk->chunk_start || vaddr > chunk->chunk_end) { 1627 /* 1628 * This address is outside the chunk, get the new chunk and 1629 * collect meminfo information for it. 1630 */ 1631 mem_chunk_get(chunk, vaddr); 1632 } 1633 1634 /* 1635 * Find information about the address. 1636 */ 1637 pdp = &chunk->page_info[chunk->page_index]; 1638 for (i = chunk->page_index; i < chunk->page_count; i++, pdp++) { 1639 if (pdp->pd_start == vaddr) { 1640 if (pdp->pd_valid) { 1641 lgrp = pdp->pd_lgrp; 1642 /* 1643 * Override page size information if it is 1644 * present. 1645 */ 1646 if (pdp->pd_pagesize > 0) 1647 *psz = pdp->pd_pagesize; 1648 } 1649 break; 1650 } 1651 } 1652 /* 1653 * Remember where we ended - the next search will start here. 1654 * We can query for the lgrp for the same address again, so do not 1655 * advance index past the current value. 1656 */ 1657 chunk->page_index = i; 1658 1659 return (lgrp); 1660 } 1661 1662 /* ARGSUSED */ 1663 static void 1664 intr(int sig) 1665 { 1666 interrupt = 1; 1667 } 1668