1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * Copyright 2024 Oxide Computer Company
29 */
30
31 #include <stdio.h>
32 #include <stdio_ext.h>
33 #include <stdlib.h>
34 #include <unistd.h>
35 #include <ctype.h>
36 #include <fcntl.h>
37 #include <string.h>
38 #include <dirent.h>
39 #include <limits.h>
40 #include <link.h>
41 #include <libelf.h>
42 #include <sys/types.h>
43 #include <signal.h>
44 #include <sys/stat.h>
45 #include <sys/mkdev.h>
46 #include <sys/mman.h>
47 #include <sys/lgrp_user.h>
48 #include <sys/debug.h>
49 #include <libproc.h>
50
51 #include "pmap_common.h"
52
53 #define KILOBYTE 1024
54 #define MEGABYTE (KILOBYTE * KILOBYTE)
55 #define GIGABYTE (KILOBYTE * KILOBYTE * KILOBYTE)
56
57 /*
58 * Round up the value to the nearest kilobyte
59 */
60 #define ROUNDUP_KB(x) (((x) + (KILOBYTE - 1)) / KILOBYTE)
61
62 /*
63 * The alignment should be a power of 2.
64 */
65 #define P2ALIGN(x, align) ((x) & -(align))
66
67 #define INVALID_ADDRESS (uintptr_t)(-1)
68
69 struct totals {
70 ulong_t total_size;
71 ulong_t total_swap;
72 ulong_t total_rss;
73 ulong_t total_anon;
74 ulong_t total_locked;
75 };
76
77 /*
78 * -L option requires per-page information. The information is presented in an
79 * array of page_descr structures.
80 */
81 typedef struct page_descr {
82 uintptr_t pd_start; /* start address of a page */
83 size_t pd_pagesize; /* page size in bytes */
84 lgrp_id_t pd_lgrp; /* lgroup of memory backing the page */
85 int pd_valid; /* valid page description if non-zero */
86 } page_descr_t;
87
88 /*
89 * Per-page information for a memory chunk.
90 * The meminfo(2) system call accepts up to MAX_MEMINFO_CNT pages at once.
91 * When we need to scan larger ranges we divide them in MAX_MEMINFO_CNT sized
92 * chunks. The chunk information is stored in the memory_chunk structure.
93 */
94 typedef struct memory_chunk {
95 page_descr_t page_info[MAX_MEMINFO_CNT];
96 uintptr_t end_addr;
97 uintptr_t chunk_start; /* Starting address */
98 uintptr_t chunk_end; /* chunk_end is always <= end_addr */
99 size_t page_size;
100 int page_index; /* Current page */
101 int page_count; /* Number of pages */
102 } memory_chunk_t;
103
104 static volatile int interrupt;
105
106 typedef int proc_xmap_f(void *, const prxmap_t *, const char *, int, int);
107
108 static int xmapping_iter(struct ps_prochandle *, proc_xmap_f *, void *,
109 int);
110 static int rmapping_iter(struct ps_prochandle *, proc_map_f *, void *);
111
112 static int look_map(void *, const prmap_t *, const char *);
113 static int look_smap(void *, const prxmap_t *, const char *, int, int);
114 static int look_xmap(void *, const prxmap_t *, const char *, int, int);
115 static int look_xmap_nopgsz(void *, const prxmap_t *, const char *,
116 int, int);
117
118 static int gather_map(void *, const prmap_t *, const char *);
119 static int gather_xmap(void *, const prxmap_t *, const char *, int, int);
120 static int iter_map(proc_map_f *, void *);
121 static int iter_xmap(proc_xmap_f *, void *);
122 static int parse_addr_range(char *, uintptr_t *, uintptr_t *);
123 static void mem_chunk_init(memory_chunk_t *, uintptr_t, size_t);
124
125 static int perr(char *);
126 static void printK(long, int);
127 static char *mflags(uint_t);
128
129 static size_t get_contiguous_region(memory_chunk_t *, uintptr_t,
130 uintptr_t, size_t, lgrp_id_t *);
131 static void mem_chunk_get(memory_chunk_t *, uintptr_t);
132 static lgrp_id_t addr_to_lgrp(memory_chunk_t *, uintptr_t, size_t *);
133 static char *lgrp2str(lgrp_id_t);
134
135 static int address_in_range(uintptr_t, uintptr_t, size_t);
136 static size_t adjust_addr_range(uintptr_t, uintptr_t, size_t,
137 uintptr_t *, uintptr_t *);
138
139 static int lflag = 0;
140 static int Lflag = 0;
141 static int aflag = 0;
142
143 /*
144 * The -A address range is represented as a pair of addresses
145 * <start_addr, end_addr>. Either one of these may be unspecified (set to
146 * INVALID_ADDRESS). If both are unspecified, no address range restrictions are
147 * in place.
148 */
149 static uintptr_t start_addr = INVALID_ADDRESS;
150 static uintptr_t end_addr = INVALID_ADDRESS;
151
152 static int addr_width, size_width;
153 static char *command;
154 static char *procname;
155 static struct ps_prochandle *Pr;
156
157 static void intr(int);
158
159 typedef struct {
160 prxmap_t md_xmap;
161 prmap_t md_map;
162 char *md_objname;
163 boolean_t md_last;
164 int md_doswap;
165 } mapdata_t;
166
167 static mapdata_t *maps;
168 static int map_count;
169 static int map_alloc;
170
171 static lwpstack_t *stacks = NULL;
172 static uint_t nstacks = 0;
173
174 #define MAX_TRIES 5
175
176 static boolean_t
reallocstacks(uint_t newcount)177 reallocstacks(uint_t newcount)
178 {
179 lwpstack_t *newstacks;
180
181 newstacks = recallocarray(stacks, nstacks, newcount,
182 sizeof (lwpstack_t));
183 if (newstacks != NULL) {
184 stacks = newstacks;
185 nstacks = newcount;
186 return (B_TRUE);
187 }
188 return (B_FALSE);
189 }
190
191 static int
getstack(void * data,const lwpstatus_t * lsp)192 getstack(void *data, const lwpstatus_t *lsp)
193 {
194 uint_t *np = (uint_t *)data;
195
196 /*
197 * In the unlikely event that the number of LWPs has increased since we
198 * allocated the stacks array to hold them, expand the space for these
199 * next two entries.
200 */
201 if (*np + 2 > nstacks && !reallocstacks(nstacks + 2)) {
202 (void) fprintf(stderr, "%s: warning: "
203 "number of LWPs changed during execution, some details "
204 "have been omitted.\n", command);
205 /* Terminate the walk */
206 return (1);
207 }
208
209 if (Plwp_alt_stack(Pr, lsp->pr_lwpid, &stacks[*np].lwps_stack) == 0) {
210 stacks[*np].lwps_stack.ss_flags |= SS_ONSTACK;
211 stacks[*np].lwps_lwpid = lsp->pr_lwpid;
212 (*np)++;
213 }
214
215 if (Plwp_main_stack(Pr, lsp->pr_lwpid, &stacks[*np].lwps_stack) == 0) {
216 stacks[*np].lwps_lwpid = lsp->pr_lwpid;
217 (*np)++;
218 }
219
220 VERIFY3U(*np, <=, nstacks);
221
222 return (0);
223 }
224
225 int
main(int argc,char ** argv)226 main(int argc, char **argv)
227 {
228 int rflag = 0, sflag = 0, xflag = 0, Fflag = 0;
229 int errflg = 0, Sflag = 0;
230 int rc = 0;
231 int opt;
232 const char *bar8 = "-------";
233 const char *bar16 = "----------";
234 const char *bar;
235 struct rlimit rlim;
236 struct stat64 statbuf;
237 char buf[128];
238 int mapfd;
239 int prg_gflags = PGRAB_RDONLY;
240 int prr_flags = 0;
241 boolean_t use_agent_lwp = B_FALSE;
242
243 if ((command = strrchr(argv[0], '/')) != NULL)
244 command++;
245 else
246 command = argv[0];
247
248 while ((opt = getopt(argc, argv, "arsxSlLFA:")) != EOF) {
249 switch (opt) {
250 case 'a': /* include shared mappings in -[xS] */
251 aflag = 1;
252 break;
253 case 'r': /* show reserved mappings */
254 rflag = 1;
255 break;
256 case 's': /* show hardware page sizes */
257 sflag = 1;
258 break;
259 case 'S': /* show swap reservations */
260 Sflag = 1;
261 break;
262 case 'x': /* show extended mappings */
263 xflag = 1;
264 break;
265 case 'l': /* show unresolved link map names */
266 lflag = 1;
267 break;
268 case 'L': /* show lgroup information */
269 Lflag = 1;
270 use_agent_lwp = B_TRUE;
271 break;
272 case 'F': /* force grabbing (no O_EXCL) */
273 Fflag = PGRAB_FORCE;
274 break;
275 case 'A':
276 if (parse_addr_range(optarg, &start_addr, &end_addr)
277 != 0)
278 errflg++;
279 break;
280 default:
281 errflg = 1;
282 break;
283 }
284 }
285
286 argc -= optind;
287 argv += optind;
288
289 if ((Sflag && (xflag || rflag || sflag)) || (xflag && rflag) ||
290 (aflag && (!xflag && !Sflag)) ||
291 (Lflag && (xflag || Sflag))) {
292 errflg = 1;
293 }
294
295 if (errflg || argc <= 0) {
296 (void) fprintf(stderr,
297 "usage:\t%s [-rslF] [-A start[,end]] { pid | core } ...\n",
298 command);
299 (void) fprintf(stderr,
300 "\t\t(report process address maps)\n");
301 (void) fprintf(stderr,
302 "\t%s -L [-rslF] [-A start[,end]] pid ...\n", command);
303 (void) fprintf(stderr,
304 "\t\t(report process address maps lgroups mappings)\n");
305 (void) fprintf(stderr,
306 "\t%s -x [-aslF] [-A start[,end]] pid ...\n", command);
307 (void) fprintf(stderr,
308 "\t\t(show resident/anon/locked mapping details)\n");
309 (void) fprintf(stderr,
310 "\t%s -S [-alF] [-A start[,end]] { pid | core } ...\n",
311 command);
312 (void) fprintf(stderr,
313 "\t\t(show swap reservations)\n\n");
314 (void) fprintf(stderr,
315 "\t-a: include shared mappings in -[xS] summary\n");
316 (void) fprintf(stderr,
317 "\t-r: show reserved address maps\n");
318 (void) fprintf(stderr,
319 "\t-s: show hardware page sizes\n");
320 (void) fprintf(stderr,
321 "\t-l: show unresolved dynamic linker map names\n");
322 (void) fprintf(stderr,
323 "\t-F: force grabbing of the target process\n");
324 (void) fprintf(stderr,
325 "\t-L: show lgroup mappings\n");
326 (void) fprintf(stderr,
327 "\t-A start,end: limit output to the specified range\n");
328 return (2);
329 }
330
331 /*
332 * Make sure we'll have enough file descriptors to handle a target
333 * that has many many mappings.
334 */
335 if (getrlimit(RLIMIT_NOFILE, &rlim) == 0) {
336 rlim.rlim_cur = rlim.rlim_max;
337 (void) setrlimit(RLIMIT_NOFILE, &rlim);
338 (void) enable_extended_FILE_stdio(-1, -1);
339 }
340
341 /*
342 * The implementation of -L option creates an agent LWP in the target
343 * process address space. The agent LWP issues meminfo(2) system calls
344 * on behalf of the target process. If we are interrupted prematurely,
345 * the target process remains in the stopped state with the agent still
346 * attached to it. To prevent such situation we catch signals from
347 * terminal and terminate gracefully.
348 */
349 if (use_agent_lwp) {
350 /*
351 * Buffer output to stdout, stderr while process is grabbed.
352 * Prevents infamous deadlocks due to pmap `pgrep xterm` and
353 * other variants.
354 */
355 (void) proc_initstdio();
356
357 prg_gflags = PGRAB_RETAIN | Fflag;
358 prr_flags = PRELEASE_RETAIN;
359
360 if (sigset(SIGHUP, SIG_IGN) == SIG_DFL)
361 (void) sigset(SIGHUP, intr);
362 if (sigset(SIGINT, SIG_IGN) == SIG_DFL)
363 (void) sigset(SIGINT, intr);
364 if (sigset(SIGQUIT, SIG_IGN) == SIG_DFL)
365 (void) sigset(SIGQUIT, intr);
366 (void) sigset(SIGPIPE, intr);
367 (void) sigset(SIGTERM, intr);
368 }
369
370 while (argc-- > 0) {
371 char *arg;
372 int gcode;
373 psinfo_t psinfo;
374 int tries = 0;
375
376 if (use_agent_lwp)
377 (void) proc_flushstdio();
378
379 if ((Pr = proc_arg_grab(arg = *argv++, PR_ARG_ANY,
380 prg_gflags, &gcode)) == NULL) {
381 (void) fprintf(stderr, "%s: cannot examine %s: %s\n",
382 command, arg, Pgrab_error(gcode));
383 rc++;
384 continue;
385 }
386
387 procname = arg; /* for perr() */
388
389 addr_width = (Pstatus(Pr)->pr_dmodel == PR_MODEL_LP64) ? 16 : 8;
390 size_width = (Pstatus(Pr)->pr_dmodel == PR_MODEL_LP64) ? 11 : 8;
391 bar = addr_width == 8 ? bar8 : bar16;
392 (void) memcpy(&psinfo, Ppsinfo(Pr), sizeof (psinfo_t));
393 proc_unctrl_psinfo(&psinfo);
394
395 if (Pstate(Pr) != PS_DEAD) {
396 (void) snprintf(buf, sizeof (buf),
397 "/proc/%d/map", (int)psinfo.pr_pid);
398 if ((mapfd = open(buf, O_RDONLY)) < 0) {
399 (void) fprintf(stderr, "%s: cannot "
400 "examine %s: lost control of "
401 "process\n", command, arg);
402 rc++;
403 Prelease(Pr, prr_flags);
404 continue;
405 }
406 } else {
407 mapfd = -1;
408 }
409
410 again:
411 map_count = 0;
412
413 if (Pstate(Pr) == PS_DEAD) {
414 (void) printf("core '%s' of %d:\t%.70s\n",
415 arg, (int)psinfo.pr_pid, psinfo.pr_psargs);
416
417 if (rflag || sflag || xflag || Sflag || Lflag) {
418 (void) printf(" -%c option is not compatible "
419 "with core files\n", xflag ? 'x' :
420 sflag ? 's' : rflag ? 'r' :
421 Lflag ? 'L' : 'S');
422 Prelease(Pr, prr_flags);
423 rc++;
424 continue;
425 }
426
427 } else {
428 (void) printf("%d:\t%.70s\n",
429 (int)psinfo.pr_pid, psinfo.pr_psargs);
430 }
431
432 if (!(Pstatus(Pr)->pr_flags & PR_ISSYS)) {
433 struct totals t;
434
435 /*
436 * Since we're grabbing the process readonly, we need
437 * to make sure the address space doesn't change during
438 * execution.
439 */
440 if (Pstate(Pr) != PS_DEAD) {
441 if (tries++ == MAX_TRIES) {
442 Prelease(Pr, prr_flags);
443 (void) close(mapfd);
444 (void) fprintf(stderr, "%s: cannot "
445 "examine %s: address space is "
446 "changing\n", command, arg);
447 continue;
448 }
449
450 if (fstat64(mapfd, &statbuf) != 0) {
451 Prelease(Pr, prr_flags);
452 (void) close(mapfd);
453 (void) fprintf(stderr, "%s: cannot "
454 "examine %s: lost control of "
455 "process\n", command, arg);
456 continue;
457 }
458 }
459
460 /*
461 * Multiplied by 2 to accomodate the main and alt
462 * stack for each LWP.
463 */
464 if (reallocstacks(psinfo.pr_nlwp * 2)) {
465 uint_t n = 0;
466 (void) Plwp_iter(Pr, getstack, &n);
467 qsort(stacks, nstacks, sizeof (stacks[0]),
468 cmpstacks);
469 }
470
471 (void) memset(&t, 0, sizeof (t));
472
473 if (Pgetauxval(Pr, AT_BASE) != -1L &&
474 Prd_agent(Pr) == NULL) {
475 (void) fprintf(stderr, "%s: warning: "
476 "librtld_db failed to initialize; "
477 "shared library information will not be "
478 "available\n", command);
479 }
480
481 /*
482 * Gather data
483 */
484 if (xflag)
485 rc += xmapping_iter(Pr, gather_xmap, NULL, 0);
486 else if (Sflag)
487 rc += xmapping_iter(Pr, gather_xmap, NULL, 1);
488 else {
489 if (rflag)
490 rc += rmapping_iter(Pr, gather_map,
491 NULL);
492 else if (sflag)
493 rc += xmapping_iter(Pr, gather_xmap,
494 NULL, 0);
495 else if (lflag)
496 rc += Pmapping_iter(Pr,
497 gather_map, NULL);
498 else
499 rc += Pmapping_iter_resolved(Pr,
500 gather_map, NULL);
501 }
502
503 /*
504 * Ensure mappings are consistent.
505 */
506 if (Pstate(Pr) != PS_DEAD) {
507 struct stat64 newbuf;
508
509 if (fstat64(mapfd, &newbuf) != 0 ||
510 memcmp(&newbuf.st_mtim, &statbuf.st_mtim,
511 sizeof (newbuf.st_mtim)) != 0) {
512 if (stacks != NULL) {
513 free(stacks);
514 stacks = NULL;
515 nstacks = 0;
516 }
517 goto again;
518 }
519 }
520
521 /*
522 * Display data.
523 */
524 if (xflag) {
525 (void) printf("%*s%*s%*s%*s%*s "
526 "%sMode Mapped File\n",
527 addr_width, "Address",
528 size_width, "Kbytes",
529 size_width, "RSS",
530 size_width, "Anon",
531 size_width, "Locked",
532 sflag ? "Pgsz " : "");
533
534 rc += iter_xmap(sflag ? look_xmap :
535 look_xmap_nopgsz, &t);
536
537 (void) printf("%s%s %s %s %s %s\n",
538 addr_width == 8 ? "-" : "------",
539 bar, bar, bar, bar, bar);
540
541 (void) printf("%stotal Kb", addr_width == 16 ?
542 " " : "");
543
544 printK(t.total_size, size_width);
545 printK(t.total_rss, size_width);
546 printK(t.total_anon, size_width);
547 printK(t.total_locked, size_width);
548
549 (void) printf("\n");
550
551 } else if (Sflag) {
552 (void) printf("%*s%*s%*s Mode"
553 " Mapped File\n",
554 addr_width, "Address",
555 size_width, "Kbytes",
556 size_width, "Swap");
557
558 rc += iter_xmap(look_xmap_nopgsz, &t);
559
560 (void) printf("%s%s %s %s\n",
561 addr_width == 8 ? "-" : "------",
562 bar, bar, bar);
563
564 (void) printf("%stotal Kb", addr_width == 16 ?
565 " " : "");
566
567 printK(t.total_size, size_width);
568 printK(t.total_swap, size_width);
569
570 (void) printf("\n");
571
572 } else {
573
574 if (rflag) {
575 rc += iter_map(look_map, &t);
576 } else if (sflag) {
577 if (Lflag) {
578 (void) printf("%*s %*s %4s"
579 " %-6s %s %s\n",
580 addr_width, "Address",
581 size_width,
582 "Bytes", "Pgsz", "Mode ",
583 "Lgrp", "Mapped File");
584 rc += iter_xmap(look_smap, &t);
585 } else {
586 (void) printf("%*s %*s %4s"
587 " %-6s %s\n",
588 addr_width, "Address",
589 size_width,
590 "Bytes", "Pgsz", "Mode ",
591 "Mapped File");
592 rc += iter_xmap(look_smap, &t);
593 }
594 } else {
595 rc += iter_map(look_map, &t);
596 }
597
598 (void) printf(" %stotal %*luK\n",
599 addr_width == 16 ?
600 " " : "",
601 size_width, t.total_size);
602 }
603
604 if (stacks != NULL) {
605 free(stacks);
606 stacks = NULL;
607 nstacks = 0;
608 }
609
610 }
611
612 Prelease(Pr, prr_flags);
613 if (mapfd != -1)
614 (void) close(mapfd);
615 }
616
617 if (use_agent_lwp)
618 (void) proc_finistdio();
619
620 return (rc);
621 }
622
623 static int
rmapping_iter(struct ps_prochandle * Pr,proc_map_f * func,void * cd)624 rmapping_iter(struct ps_prochandle *Pr, proc_map_f *func, void *cd)
625 {
626 char mapname[PATH_MAX];
627 int mapfd, nmap, i, rc;
628 struct stat st;
629 prmap_t *prmapp, *pmp;
630 ssize_t n;
631
632 (void) snprintf(mapname, sizeof (mapname),
633 "/proc/%d/rmap", (int)Pstatus(Pr)->pr_pid);
634
635 if ((mapfd = open(mapname, O_RDONLY)) < 0 || fstat(mapfd, &st) != 0) {
636 if (mapfd >= 0)
637 (void) close(mapfd);
638 return (perr(mapname));
639 }
640
641 nmap = st.st_size / sizeof (prmap_t);
642 prmapp = malloc((nmap + 1) * sizeof (prmap_t));
643
644 if ((n = pread(mapfd, prmapp, (nmap + 1) * sizeof (prmap_t), 0L)) < 0) {
645 (void) close(mapfd);
646 free(prmapp);
647 return (perr("read rmap"));
648 }
649
650 (void) close(mapfd);
651 nmap = n / sizeof (prmap_t);
652
653 for (i = 0, pmp = prmapp; i < nmap; i++, pmp++) {
654 if ((rc = func(cd, pmp, NULL)) != 0) {
655 free(prmapp);
656 return (rc);
657 }
658 }
659
660 free(prmapp);
661 return (0);
662 }
663
664 static int
xmapping_iter(struct ps_prochandle * Pr,proc_xmap_f * func,void * cd,int doswap)665 xmapping_iter(struct ps_prochandle *Pr, proc_xmap_f *func, void *cd, int doswap)
666 {
667 char mapname[PATH_MAX];
668 int mapfd, nmap, i, rc;
669 struct stat st;
670 prxmap_t *prmapp, *pmp;
671 ssize_t n;
672
673 (void) snprintf(mapname, sizeof (mapname),
674 "/proc/%d/xmap", (int)Pstatus(Pr)->pr_pid);
675
676 if ((mapfd = open(mapname, O_RDONLY)) < 0 || fstat(mapfd, &st) != 0) {
677 if (mapfd >= 0)
678 (void) close(mapfd);
679 return (perr(mapname));
680 }
681
682 nmap = st.st_size / sizeof (prxmap_t);
683 nmap *= 2;
684 again:
685 prmapp = malloc((nmap + 1) * sizeof (prxmap_t));
686
687 if ((n = pread(mapfd, prmapp, (nmap + 1) * sizeof (prxmap_t), 0)) < 0) {
688 (void) close(mapfd);
689 free(prmapp);
690 return (perr("read xmap"));
691 }
692
693 if (nmap < n / sizeof (prxmap_t)) {
694 free(prmapp);
695 nmap *= 2;
696 goto again;
697 }
698
699 (void) close(mapfd);
700 nmap = n / sizeof (prxmap_t);
701
702 for (i = 0, pmp = prmapp; i < nmap; i++, pmp++) {
703 if ((rc = func(cd, pmp, NULL, i == nmap - 1, doswap)) != 0) {
704 free(prmapp);
705 return (rc);
706 }
707 }
708
709 /*
710 * Mark the last element.
711 */
712 if (map_count > 0)
713 maps[map_count - 1].md_last = B_TRUE;
714
715 free(prmapp);
716 return (0);
717 }
718
719 static int
look_map(void * data,const prmap_t * pmp,const char * object_name)720 look_map(void *data, const prmap_t *pmp, const char *object_name)
721 {
722 struct totals *t = data;
723 const pstatus_t *Psp = Pstatus(Pr);
724 size_t size;
725 char mname[PATH_MAX];
726 char *lname = NULL;
727 size_t psz = pmp->pr_pagesize;
728 uintptr_t vaddr = pmp->pr_vaddr;
729 uintptr_t segment_end = vaddr + pmp->pr_size;
730 lgrp_id_t lgrp;
731 memory_chunk_t mchunk;
732
733 /*
734 * If the mapping is not anon or not part of the heap, make a name
735 * for it. We don't want to report the heap as a.out's data.
736 */
737 if (!(pmp->pr_mflags & MA_ANON) ||
738 segment_end <= Psp->pr_brkbase ||
739 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) {
740 lname = make_name(Pr, lflag, pmp->pr_vaddr, pmp->pr_mapname,
741 mname, sizeof (mname));
742 }
743
744 if (lname == NULL &&
745 ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD)) {
746 lname = anon_name(mname, Psp, stacks, nstacks, pmp->pr_vaddr,
747 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid, NULL);
748 }
749
750 /*
751 * Adjust the address range if -A is specified.
752 */
753 size = adjust_addr_range(pmp->pr_vaddr, segment_end, psz,
754 &vaddr, &segment_end);
755
756 if (size == 0)
757 return (0);
758
759 if (!Lflag) {
760 /*
761 * Display the whole mapping
762 */
763 size = ROUNDUP_KB(size);
764
765 (void) printf(lname ?
766 "%.*lX %*luK %-6s %s\n" :
767 "%.*lX %*luK %s\n",
768 addr_width, vaddr,
769 size_width - 1, size, mflags(pmp->pr_mflags), lname);
770
771 t->total_size += size;
772 return (0);
773 }
774
775 /*
776 * We need to display lgroups backing physical memory, so we break the
777 * segment into individual pages and coalesce pages with the same lgroup
778 * into one "segment".
779 */
780
781 /*
782 * Initialize address descriptions for the mapping.
783 */
784 mem_chunk_init(&mchunk, segment_end, psz);
785 size = 0;
786
787 /*
788 * Walk mapping (page by page) and display contiguous ranges of memory
789 * allocated to same lgroup.
790 */
791 do {
792 size_t size_contig;
793
794 /*
795 * Get contiguous region of memory starting from vaddr allocated
796 * from the same lgroup.
797 */
798 size_contig = get_contiguous_region(&mchunk, vaddr,
799 segment_end, pmp->pr_pagesize, &lgrp);
800
801 (void) printf(lname ? "%.*lX %*luK %-6s%s %s\n" :
802 "%.*lX %*luK %s %s\n",
803 addr_width, vaddr,
804 size_width - 1, size_contig / KILOBYTE,
805 mflags(pmp->pr_mflags),
806 lgrp2str(lgrp), lname);
807
808 vaddr += size_contig;
809 size += size_contig;
810 } while (vaddr < segment_end && !interrupt);
811
812 /* Update the total size */
813 t->total_size += ROUNDUP_KB(size);
814 return (0);
815 }
816
817 static void
printK(long value,int width)818 printK(long value, int width)
819 {
820 if (value == 0)
821 (void) printf(width == 8 ? " -" : " -");
822 else
823 (void) printf(" %*lu", width - 1, value);
824 }
825
826 static const char *
pagesize(const prxmap_t * pmp)827 pagesize(const prxmap_t *pmp)
828 {
829 int pagesize = pmp->pr_hatpagesize;
830 static char buf[32];
831
832 if (pagesize == 0) {
833 return ("-"); /* no underlying HAT mapping */
834 }
835
836 if (pagesize >= KILOBYTE && (pagesize % KILOBYTE) == 0) {
837 if ((pagesize % GIGABYTE) == 0)
838 (void) snprintf(buf, sizeof (buf), "%dG",
839 pagesize / GIGABYTE);
840 else if ((pagesize % MEGABYTE) == 0)
841 (void) snprintf(buf, sizeof (buf), "%dM",
842 pagesize / MEGABYTE);
843 else
844 (void) snprintf(buf, sizeof (buf), "%dK",
845 pagesize / KILOBYTE);
846 } else
847 (void) snprintf(buf, sizeof (buf), "%db", pagesize);
848
849 return (buf);
850 }
851
852 static int
look_smap(void * data,const prxmap_t * pmp,const char * object_name,int last,int doswap)853 look_smap(void *data, const prxmap_t *pmp, const char *object_name, int last,
854 int doswap)
855 {
856 struct totals *t = data;
857 const pstatus_t *Psp = Pstatus(Pr);
858 size_t size;
859 char mname[PATH_MAX];
860 char *lname = NULL;
861 const char *format;
862 size_t psz = pmp->pr_pagesize;
863 uintptr_t vaddr = pmp->pr_vaddr;
864 uintptr_t segment_end = vaddr + pmp->pr_size;
865 lgrp_id_t lgrp;
866 memory_chunk_t mchunk;
867
868 /*
869 * If the mapping is not anon or not part of the heap, make a name
870 * for it. We don't want to report the heap as a.out's data.
871 */
872 if (!(pmp->pr_mflags & MA_ANON) ||
873 pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase ||
874 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) {
875 lname = make_name(Pr, lflag, pmp->pr_vaddr, pmp->pr_mapname,
876 mname, sizeof (mname));
877 }
878
879 if (lname == NULL &&
880 ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD)) {
881 lname = anon_name(mname, Psp, stacks, nstacks, pmp->pr_vaddr,
882 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid, NULL);
883 }
884
885 /*
886 * Adjust the address range if -A is specified.
887 */
888 size = adjust_addr_range(pmp->pr_vaddr, segment_end, psz,
889 &vaddr, &segment_end);
890
891 if (size == 0)
892 return (0);
893
894 if (!Lflag) {
895 /*
896 * Display the whole mapping
897 */
898 if (lname != NULL)
899 format = "%.*lX %*luK %4s %-6s %s\n";
900 else
901 format = "%.*lX %*luK %4s %s\n";
902
903 size = ROUNDUP_KB(size);
904
905 (void) printf(format, addr_width, vaddr, size_width - 1, size,
906 pagesize(pmp), mflags(pmp->pr_mflags), lname);
907
908 t->total_size += size;
909 return (0);
910 }
911
912 if (lname != NULL)
913 format = "%.*lX %*luK %4s %-6s%s %s\n";
914 else
915 format = "%.*lX %*luK %4s%s %s\n";
916
917 /*
918 * We need to display lgroups backing physical memory, so we break the
919 * segment into individual pages and coalesce pages with the same lgroup
920 * into one "segment".
921 */
922
923 /*
924 * Initialize address descriptions for the mapping.
925 */
926 mem_chunk_init(&mchunk, segment_end, psz);
927 size = 0;
928
929 /*
930 * Walk mapping (page by page) and display contiguous ranges of memory
931 * allocated to same lgroup.
932 */
933 do {
934 size_t size_contig;
935
936 /*
937 * Get contiguous region of memory starting from vaddr allocated
938 * from the same lgroup.
939 */
940 size_contig = get_contiguous_region(&mchunk, vaddr,
941 segment_end, pmp->pr_pagesize, &lgrp);
942
943 (void) printf(format, addr_width, vaddr,
944 size_width - 1, size_contig / KILOBYTE,
945 pagesize(pmp), mflags(pmp->pr_mflags),
946 lgrp2str(lgrp), lname);
947
948 vaddr += size_contig;
949 size += size_contig;
950 } while (vaddr < segment_end && !interrupt);
951
952 t->total_size += ROUNDUP_KB(size);
953 return (0);
954 }
955
956 #define ANON(x) ((aflag || (((x)->pr_mflags & MA_SHARED) == 0)) ? \
957 ((x)->pr_anon) : 0)
958
959 static int
look_xmap(void * data,const prxmap_t * pmp,const char * object_name,int last,int doswap)960 look_xmap(void *data, const prxmap_t *pmp, const char *object_name, int last,
961 int doswap)
962 {
963 struct totals *t = data;
964 const pstatus_t *Psp = Pstatus(Pr);
965 char mname[PATH_MAX];
966 char *lname = NULL;
967 char *ln;
968
969 /*
970 * If the mapping is not anon or not part of the heap, make a name
971 * for it. We don't want to report the heap as a.out's data.
972 */
973 if (!(pmp->pr_mflags & MA_ANON) ||
974 pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase ||
975 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) {
976 lname = make_name(Pr, lflag, pmp->pr_vaddr, pmp->pr_mapname,
977 mname, sizeof (mname));
978 }
979
980 if (lname != NULL) {
981 if ((ln = strrchr(lname, '/')) != NULL)
982 lname = ln + 1;
983 } else if ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD) {
984 lname = anon_name(mname, Psp, stacks, nstacks, pmp->pr_vaddr,
985 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid, NULL);
986 }
987
988 (void) printf("%.*lX", addr_width, (ulong_t)pmp->pr_vaddr);
989
990 printK(ROUNDUP_KB(pmp->pr_size), size_width);
991 printK(pmp->pr_rss * (pmp->pr_pagesize / KILOBYTE), size_width);
992 printK(ANON(pmp) * (pmp->pr_pagesize / KILOBYTE), size_width);
993 printK(pmp->pr_locked * (pmp->pr_pagesize / KILOBYTE), size_width);
994 (void) printf(lname ? " %4s %-6s %s\n" : " %4s %s\n",
995 pagesize(pmp), mflags(pmp->pr_mflags), lname);
996
997 t->total_size += ROUNDUP_KB(pmp->pr_size);
998 t->total_rss += pmp->pr_rss * (pmp->pr_pagesize / KILOBYTE);
999 t->total_anon += ANON(pmp) * (pmp->pr_pagesize / KILOBYTE);
1000 t->total_locked += (pmp->pr_locked * (pmp->pr_pagesize / KILOBYTE));
1001
1002 return (0);
1003 }
1004
1005 static int
look_xmap_nopgsz(void * data,const prxmap_t * pmp,const char * object_name,int last,int doswap)1006 look_xmap_nopgsz(void *data, const prxmap_t *pmp, const char *object_name,
1007 int last, int doswap)
1008 {
1009 struct totals *t = data;
1010 const pstatus_t *Psp = Pstatus(Pr);
1011 char mname[PATH_MAX];
1012 char *lname = NULL;
1013 char *ln;
1014 static uintptr_t prev_vaddr;
1015 static size_t prev_size;
1016 static offset_t prev_offset;
1017 static int prev_mflags;
1018 static char *prev_lname;
1019 static char prev_mname[PATH_MAX];
1020 static ulong_t prev_rss;
1021 static ulong_t prev_anon;
1022 static ulong_t prev_locked;
1023 static ulong_t prev_swap;
1024 int merged = 0;
1025 static int first = 1;
1026 ulong_t swap = 0;
1027 int kperpage;
1028
1029 /*
1030 * Calculate swap reservations
1031 */
1032 if (pmp->pr_mflags & MA_SHARED) {
1033 if (aflag && (pmp->pr_mflags & MA_NORESERVE) == 0) {
1034 /* Swap reserved for entire non-ism SHM */
1035 swap = pmp->pr_size / pmp->pr_pagesize;
1036 }
1037 } else if (pmp->pr_mflags & MA_NORESERVE) {
1038 /* Swap reserved on fault for each anon page */
1039 swap = pmp->pr_anon;
1040 } else if (pmp->pr_mflags & MA_WRITE) {
1041 /* Swap reserve for entire writable segment */
1042 swap = pmp->pr_size / pmp->pr_pagesize;
1043 }
1044
1045 /*
1046 * If the mapping is not anon or not part of the heap, make a name
1047 * for it. We don't want to report the heap as a.out's data.
1048 */
1049 if (!(pmp->pr_mflags & MA_ANON) ||
1050 pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase ||
1051 pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) {
1052 lname = make_name(Pr, lflag, pmp->pr_vaddr, pmp->pr_mapname,
1053 mname, sizeof (mname));
1054 }
1055
1056 if (lname != NULL) {
1057 if ((ln = strrchr(lname, '/')) != NULL)
1058 lname = ln + 1;
1059 } else if ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD) {
1060 lname = anon_name(mname, Psp, stacks, nstacks, pmp->pr_vaddr,
1061 pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid, NULL);
1062 }
1063
1064 kperpage = pmp->pr_pagesize / KILOBYTE;
1065
1066 t->total_size += ROUNDUP_KB(pmp->pr_size);
1067 t->total_rss += pmp->pr_rss * kperpage;
1068 t->total_anon += ANON(pmp) * kperpage;
1069 t->total_locked += pmp->pr_locked * kperpage;
1070 t->total_swap += swap * kperpage;
1071
1072 if (first == 1) {
1073 first = 0;
1074 prev_vaddr = pmp->pr_vaddr;
1075 prev_size = pmp->pr_size;
1076 prev_offset = pmp->pr_offset;
1077 prev_mflags = pmp->pr_mflags;
1078 if (lname == NULL) {
1079 prev_lname = NULL;
1080 } else {
1081 (void) strcpy(prev_mname, lname);
1082 prev_lname = prev_mname;
1083 }
1084 prev_rss = pmp->pr_rss * kperpage;
1085 prev_anon = ANON(pmp) * kperpage;
1086 prev_locked = pmp->pr_locked * kperpage;
1087 prev_swap = swap * kperpage;
1088 if (last == 0) {
1089 return (0);
1090 }
1091 merged = 1;
1092 } else if (prev_vaddr + prev_size == pmp->pr_vaddr &&
1093 prev_mflags == pmp->pr_mflags &&
1094 ((prev_mflags & MA_ISM) ||
1095 prev_offset + prev_size == pmp->pr_offset) &&
1096 ((lname == NULL && prev_lname == NULL) ||
1097 (lname != NULL && prev_lname != NULL &&
1098 strcmp(lname, prev_lname) == 0))) {
1099 prev_size += pmp->pr_size;
1100 prev_rss += pmp->pr_rss * kperpage;
1101 prev_anon += ANON(pmp) * kperpage;
1102 prev_locked += pmp->pr_locked * kperpage;
1103 prev_swap += swap * kperpage;
1104 if (last == 0) {
1105 return (0);
1106 }
1107 merged = 1;
1108 }
1109
1110 (void) printf("%.*lX", addr_width, (ulong_t)prev_vaddr);
1111 printK(ROUNDUP_KB(prev_size), size_width);
1112
1113 if (doswap)
1114 printK(prev_swap, size_width);
1115 else {
1116 printK(prev_rss, size_width);
1117 printK(prev_anon, size_width);
1118 printK(prev_locked, size_width);
1119 }
1120 (void) printf(prev_lname ? " %-6s %s\n" : "%s\n",
1121 mflags(prev_mflags), prev_lname);
1122
1123 if (last == 0) {
1124 prev_vaddr = pmp->pr_vaddr;
1125 prev_size = pmp->pr_size;
1126 prev_offset = pmp->pr_offset;
1127 prev_mflags = pmp->pr_mflags;
1128 if (lname == NULL) {
1129 prev_lname = NULL;
1130 } else {
1131 (void) strcpy(prev_mname, lname);
1132 prev_lname = prev_mname;
1133 }
1134 prev_rss = pmp->pr_rss * kperpage;
1135 prev_anon = ANON(pmp) * kperpage;
1136 prev_locked = pmp->pr_locked * kperpage;
1137 prev_swap = swap * kperpage;
1138 } else if (merged == 0) {
1139 (void) printf("%.*lX", addr_width, (ulong_t)pmp->pr_vaddr);
1140 printK(ROUNDUP_KB(pmp->pr_size), size_width);
1141 if (doswap)
1142 printK(swap * kperpage, size_width);
1143 else {
1144 printK(pmp->pr_rss * kperpage, size_width);
1145 printK(ANON(pmp) * kperpage, size_width);
1146 printK(pmp->pr_locked * kperpage, size_width);
1147 }
1148 (void) printf(lname ? " %-6s %s\n" : " %s\n",
1149 mflags(pmp->pr_mflags), lname);
1150 }
1151
1152 if (last != 0)
1153 first = 1;
1154
1155 return (0);
1156 }
1157
1158 static int
perr(char * s)1159 perr(char *s)
1160 {
1161 if (s)
1162 (void) fprintf(stderr, "%s: ", procname);
1163 else
1164 s = procname;
1165 perror(s);
1166 return (1);
1167 }
1168
1169 static char *
mflags(uint_t arg)1170 mflags(uint_t arg)
1171 {
1172 static char code_buf[80];
1173 char *str = code_buf;
1174
1175 /*
1176 * rwxsR
1177 *
1178 * r - segment is readable
1179 * w - segment is writable
1180 * x - segment is executable
1181 * s - segment is shared
1182 * R - segment is mapped MAP_NORESERVE
1183 *
1184 */
1185 (void) sprintf(str, "%c%c%c%c%c%c",
1186 arg & MA_READ ? 'r' : '-',
1187 arg & MA_WRITE ? 'w' : '-',
1188 arg & MA_EXEC ? 'x' : '-',
1189 arg & MA_SHARED ? 's' : '-',
1190 arg & MA_NORESERVE ? 'R' : '-',
1191 arg & MA_RESERVED1 ? '*' : ' ');
1192
1193 return (str);
1194 }
1195
1196 static mapdata_t *
nextmap(void)1197 nextmap(void)
1198 {
1199 mapdata_t *newmaps;
1200 int next;
1201
1202 if (map_count == map_alloc) {
1203 if (map_alloc == 0)
1204 next = 16;
1205 else
1206 next = map_alloc * 2;
1207
1208 newmaps = realloc(maps, next * sizeof (mapdata_t));
1209 if (newmaps == NULL) {
1210 (void) perr("failed to allocate maps");
1211 exit(1);
1212 }
1213 (void) memset(newmaps + map_alloc, '\0',
1214 (next - map_alloc) * sizeof (mapdata_t));
1215
1216 map_alloc = next;
1217 maps = newmaps;
1218 }
1219
1220 return (&maps[map_count++]);
1221 }
1222
1223 static int
gather_map(void * ignored,const prmap_t * map,const char * objname)1224 gather_map(void *ignored, const prmap_t *map, const char *objname)
1225 {
1226 mapdata_t *data;
1227
1228 /* Skip mappings which are outside the range specified by -A */
1229 if (!address_in_range(map->pr_vaddr,
1230 map->pr_vaddr + map->pr_size, map->pr_pagesize))
1231 return (0);
1232
1233 data = nextmap();
1234 data->md_map = *map;
1235 if (data->md_objname != NULL)
1236 free(data->md_objname);
1237 data->md_objname = objname ? strdup(objname) : NULL;
1238
1239 return (0);
1240 }
1241
1242 static int
gather_xmap(void * ignored,const prxmap_t * xmap,const char * objname,int last,int doswap)1243 gather_xmap(void *ignored, const prxmap_t *xmap, const char *objname,
1244 int last, int doswap)
1245 {
1246 mapdata_t *data;
1247
1248 /* Skip mappings which are outside the range specified by -A */
1249 if (!address_in_range(xmap->pr_vaddr,
1250 xmap->pr_vaddr + xmap->pr_size, xmap->pr_pagesize))
1251 return (0);
1252
1253 data = nextmap();
1254 data->md_xmap = *xmap;
1255 if (data->md_objname != NULL)
1256 free(data->md_objname);
1257 data->md_objname = objname ? strdup(objname) : NULL;
1258 data->md_last = last;
1259 data->md_doswap = doswap;
1260
1261 return (0);
1262 }
1263
1264 static int
iter_map(proc_map_f * func,void * data)1265 iter_map(proc_map_f *func, void *data)
1266 {
1267 int i;
1268 int ret;
1269
1270 for (i = 0; i < map_count; i++) {
1271 if (interrupt)
1272 break;
1273 if ((ret = func(data, &maps[i].md_map,
1274 maps[i].md_objname)) != 0)
1275 return (ret);
1276 }
1277
1278 return (0);
1279 }
1280
1281 static int
iter_xmap(proc_xmap_f * func,void * data)1282 iter_xmap(proc_xmap_f *func, void *data)
1283 {
1284 int i;
1285 int ret;
1286
1287 for (i = 0; i < map_count; i++) {
1288 if (interrupt)
1289 break;
1290 if ((ret = func(data, &maps[i].md_xmap, maps[i].md_objname,
1291 maps[i].md_last, maps[i].md_doswap)) != 0)
1292 return (ret);
1293 }
1294
1295 return (0);
1296 }
1297
1298 /*
1299 * Convert lgroup ID to string.
1300 * returns dash when lgroup ID is invalid.
1301 */
1302 static char *
lgrp2str(lgrp_id_t lgrp)1303 lgrp2str(lgrp_id_t lgrp)
1304 {
1305 static char lgrp_buf[20];
1306 char *str = lgrp_buf;
1307
1308 (void) sprintf(str, lgrp == LGRP_NONE ? " -" : "%4d", lgrp);
1309 return (str);
1310 }
1311
1312 /*
1313 * Parse address range specification for -A option.
1314 * The address range may have the following forms:
1315 *
1316 * address
1317 * start and end is set to address
1318 * address,
1319 * start is set to address, end is set to INVALID_ADDRESS
1320 * ,address
1321 * start is set to 0, end is set to address
1322 * address1,address2
1323 * start is set to address1, end is set to address2
1324 *
1325 */
1326 static int
parse_addr_range(char * input_str,uintptr_t * start,uintptr_t * end)1327 parse_addr_range(char *input_str, uintptr_t *start, uintptr_t *end)
1328 {
1329 char *startp = input_str;
1330 char *endp = strchr(input_str, ',');
1331 ulong_t s = (ulong_t)INVALID_ADDRESS;
1332 ulong_t e = (ulong_t)INVALID_ADDRESS;
1333
1334 if (endp != NULL) {
1335 /*
1336 * Comma is present. If there is nothing after comma, the end
1337 * remains set at INVALID_ADDRESS. Otherwise it is set to the
1338 * value after comma.
1339 */
1340 *endp = '\0';
1341 endp++;
1342
1343 if ((*endp != '\0') && sscanf(endp, "%lx", &e) != 1)
1344 return (1);
1345 }
1346
1347 if (startp != NULL) {
1348 /*
1349 * Read the start address, if it is specified. If the address is
1350 * missing, start will be set to INVALID_ADDRESS.
1351 */
1352 if ((*startp != '\0') && sscanf(startp, "%lx", &s) != 1)
1353 return (1);
1354 }
1355
1356 /* If there is no comma, end becomes equal to start */
1357 if (endp == NULL)
1358 e = s;
1359
1360 /*
1361 * ,end implies 0..end range
1362 */
1363 if (e != INVALID_ADDRESS && s == INVALID_ADDRESS)
1364 s = 0;
1365
1366 *start = (uintptr_t)s;
1367 *end = (uintptr_t)e;
1368
1369 /* Return error if neither start nor end address were specified */
1370 return (! (s != INVALID_ADDRESS || e != INVALID_ADDRESS));
1371 }
1372
1373 /*
1374 * Check whether any portion of [start, end] segment is within the
1375 * [start_addr, end_addr] range.
1376 *
1377 * Return values:
1378 * 0 - address is outside the range
1379 * 1 - address is within the range
1380 */
1381 static int
address_in_range(uintptr_t start,uintptr_t end,size_t psz)1382 address_in_range(uintptr_t start, uintptr_t end, size_t psz)
1383 {
1384 int rc = 1;
1385
1386 /*
1387 * Nothing to do if there is no address range specified with -A
1388 */
1389 if (start_addr != INVALID_ADDRESS || end_addr != INVALID_ADDRESS) {
1390 /* The segment end is below the range start */
1391 if ((start_addr != INVALID_ADDRESS) &&
1392 (end < P2ALIGN(start_addr, psz)))
1393 rc = 0;
1394
1395 /* The segment start is above the range end */
1396 if ((end_addr != INVALID_ADDRESS) &&
1397 (start > P2ALIGN(end_addr + psz, psz)))
1398 rc = 0;
1399 }
1400 return (rc);
1401 }
1402
1403 /*
1404 * Returns an intersection of the [start, end] interval and the range specified
1405 * by -A flag [start_addr, end_addr]. Unspecified parts of the address range
1406 * have value INVALID_ADDRESS.
1407 *
1408 * The start_addr address is rounded down to the beginning of page and end_addr
1409 * is rounded up to the end of page.
1410 *
1411 * Returns the size of the resulting interval or zero if the interval is empty
1412 * or invalid.
1413 */
1414 static size_t
adjust_addr_range(uintptr_t start,uintptr_t end,size_t psz,uintptr_t * new_start,uintptr_t * new_end)1415 adjust_addr_range(uintptr_t start, uintptr_t end, size_t psz,
1416 uintptr_t *new_start, uintptr_t *new_end)
1417 {
1418 uintptr_t from; /* start_addr rounded down */
1419 uintptr_t to; /* end_addr rounded up */
1420
1421 /*
1422 * Round down the lower address of the range to the beginning of page.
1423 */
1424 if (start_addr == INVALID_ADDRESS) {
1425 /*
1426 * No start_addr specified by -A, the lower part of the interval
1427 * does not change.
1428 */
1429 *new_start = start;
1430 } else {
1431 from = P2ALIGN(start_addr, psz);
1432 /*
1433 * If end address is outside the range, return an empty
1434 * interval
1435 */
1436 if (end < from) {
1437 *new_start = *new_end = 0;
1438 return (0);
1439 }
1440 /*
1441 * The adjusted start address is the maximum of requested start
1442 * and the aligned start_addr of the -A range.
1443 */
1444 *new_start = start < from ? from : start;
1445 }
1446
1447 /*
1448 * Round up the higher address of the range to the end of page.
1449 */
1450 if (end_addr == INVALID_ADDRESS) {
1451 /*
1452 * No end_addr specified by -A, the upper part of the interval
1453 * does not change.
1454 */
1455 *new_end = end;
1456 } else {
1457 /*
1458 * If only one address is specified and it is the beginning of a
1459 * segment, get information about the whole segment. This
1460 * function is called once per segment and the 'end' argument is
1461 * always the end of a segment, so just use the 'end' value.
1462 */
1463 to = (end_addr == start_addr && start == start_addr) ?
1464 end :
1465 P2ALIGN(end_addr + psz, psz);
1466 /*
1467 * If start address is outside the range, return an empty
1468 * interval
1469 */
1470 if (start > to) {
1471 *new_start = *new_end = 0;
1472 return (0);
1473 }
1474 /*
1475 * The adjusted end address is the minimum of requested end
1476 * and the aligned end_addr of the -A range.
1477 */
1478 *new_end = end > to ? to : end;
1479 }
1480
1481 /*
1482 * Make sure that the resulting interval is legal.
1483 */
1484 if (*new_end < *new_start)
1485 *new_start = *new_end = 0;
1486
1487 /* Return the size of the interval */
1488 return (*new_end - *new_start);
1489 }
1490
1491 /*
1492 * Initialize memory_info data structure with information about a new segment.
1493 */
1494 static void
mem_chunk_init(memory_chunk_t * chunk,uintptr_t end,size_t psz)1495 mem_chunk_init(memory_chunk_t *chunk, uintptr_t end, size_t psz)
1496 {
1497 chunk->end_addr = end;
1498 chunk->page_size = psz;
1499 chunk->page_index = 0;
1500 chunk->chunk_start = chunk->chunk_end = 0;
1501 }
1502
1503 /*
1504 * Create a new chunk of addresses starting from vaddr.
1505 * Pass the whole chunk to pr_meminfo to collect lgroup and page size
1506 * information for each page in the chunk.
1507 */
1508 static void
mem_chunk_get(memory_chunk_t * chunk,uintptr_t vaddr)1509 mem_chunk_get(memory_chunk_t *chunk, uintptr_t vaddr)
1510 {
1511 page_descr_t *pdp = chunk->page_info;
1512 size_t psz = chunk->page_size;
1513 uintptr_t addr = vaddr;
1514 uint64_t inaddr[MAX_MEMINFO_CNT];
1515 uint64_t outdata[2 * MAX_MEMINFO_CNT];
1516 uint_t info[2] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
1517 uint_t validity[MAX_MEMINFO_CNT];
1518 uint64_t *dataptr = inaddr;
1519 uint64_t *outptr = outdata;
1520 uint_t *valptr = validity;
1521 int i, j, rc;
1522
1523 chunk->chunk_start = vaddr;
1524 chunk->page_index = 0; /* reset index for the new chunk */
1525
1526 /*
1527 * Fill in MAX_MEMINFO_CNT wotrh of pages starting from vaddr. Also,
1528 * copy starting address of each page to inaddr array for pr_meminfo.
1529 */
1530 for (i = 0, pdp = chunk->page_info;
1531 (i < MAX_MEMINFO_CNT) && (addr <= chunk->end_addr);
1532 i++, pdp++, dataptr++, addr += psz) {
1533 *dataptr = (uint64_t)addr;
1534 pdp->pd_start = addr;
1535 pdp->pd_lgrp = LGRP_NONE;
1536 pdp->pd_valid = 0;
1537 pdp->pd_pagesize = 0;
1538 }
1539
1540 /* Mark the number of entries in the chunk and the last address */
1541 chunk->page_count = i;
1542 chunk->chunk_end = addr - psz;
1543
1544 if (interrupt)
1545 return;
1546
1547 /* Call meminfo for all collected addresses */
1548 rc = pr_meminfo(Pr, inaddr, i, info, 2, outdata, validity);
1549 if (rc < 0) {
1550 (void) perr("can not get memory information");
1551 return;
1552 }
1553
1554 /* Verify validity of each result and fill in the addrs array */
1555 pdp = chunk->page_info;
1556 for (j = 0; j < i; j++, pdp++, valptr++, outptr += 2) {
1557 /* Skip invalid address pointers */
1558 if ((*valptr & 1) == 0) {
1559 continue;
1560 }
1561
1562 /* Is lgroup information available? */
1563 if ((*valptr & 2) != 0) {
1564 pdp->pd_lgrp = (lgrp_id_t)*outptr;
1565 pdp->pd_valid = 1;
1566 }
1567
1568 /* Is page size informaion available? */
1569 if ((*valptr & 4) != 0) {
1570 pdp->pd_pagesize = *(outptr + 1);
1571 }
1572 }
1573 }
1574
1575 /*
1576 * Starting from address 'vaddr' find the region with pages allocated from the
1577 * same lgroup.
1578 *
1579 * Arguments:
1580 * mchunk Initialized memory chunk structure
1581 * vaddr Starting address of the region
1582 * maxaddr Upper bound of the region
1583 * pagesize Default page size to use
1584 * ret_lgrp On exit contains the lgroup ID of all pages in the
1585 * region.
1586 *
1587 * Returns:
1588 * Size of the contiguous region in bytes
1589 * The lgroup ID of all pages in the region in ret_lgrp argument.
1590 */
1591 static size_t
get_contiguous_region(memory_chunk_t * mchunk,uintptr_t vaddr,uintptr_t maxaddr,size_t pagesize,lgrp_id_t * ret_lgrp)1592 get_contiguous_region(memory_chunk_t *mchunk, uintptr_t vaddr,
1593 uintptr_t maxaddr, size_t pagesize, lgrp_id_t *ret_lgrp)
1594 {
1595 size_t size_contig = 0;
1596 lgrp_id_t lgrp; /* Lgroup of the region start */
1597 lgrp_id_t curr_lgrp; /* Lgroup of the current page */
1598 size_t psz = pagesize; /* Pagesize to use */
1599
1600 /* Set both lgroup IDs to the lgroup of the first page */
1601 curr_lgrp = lgrp = addr_to_lgrp(mchunk, vaddr, &psz);
1602
1603 /*
1604 * Starting from vaddr, walk page by page until either the end
1605 * of the segment is reached or a page is allocated from a different
1606 * lgroup. Also stop if interrupted from keyboard.
1607 */
1608 while ((vaddr < maxaddr) && (curr_lgrp == lgrp) && !interrupt) {
1609 /*
1610 * Get lgroup ID and the page size of the current page.
1611 */
1612 curr_lgrp = addr_to_lgrp(mchunk, vaddr, &psz);
1613 /* If there is no page size information, use the default */
1614 if (psz == 0)
1615 psz = pagesize;
1616
1617 if (curr_lgrp == lgrp) {
1618 /*
1619 * This page belongs to the contiguous region.
1620 * Increase the region size and advance to the new page.
1621 */
1622 size_contig += psz;
1623 vaddr += psz;
1624 }
1625 }
1626
1627 /* Return the region lgroup ID and the size */
1628 *ret_lgrp = lgrp;
1629 return (size_contig);
1630 }
1631
1632 /*
1633 * Given a virtual address, return its lgroup and page size. If there is meminfo
1634 * information for an address, use it, otherwise shift the chunk window to the
1635 * vaddr and create a new chunk with known meminfo information.
1636 */
1637 static lgrp_id_t
addr_to_lgrp(memory_chunk_t * chunk,uintptr_t vaddr,size_t * psz)1638 addr_to_lgrp(memory_chunk_t *chunk, uintptr_t vaddr, size_t *psz)
1639 {
1640 page_descr_t *pdp;
1641 lgrp_id_t lgrp = LGRP_NONE;
1642 int i;
1643
1644 *psz = chunk->page_size;
1645
1646 if (interrupt)
1647 return (0);
1648
1649 /*
1650 * Is there information about this address? If not, create a new chunk
1651 * starting from vaddr and apply pr_meminfo() to the whole chunk.
1652 */
1653 if (vaddr < chunk->chunk_start || vaddr > chunk->chunk_end) {
1654 /*
1655 * This address is outside the chunk, get the new chunk and
1656 * collect meminfo information for it.
1657 */
1658 mem_chunk_get(chunk, vaddr);
1659 }
1660
1661 /*
1662 * Find information about the address.
1663 */
1664 pdp = &chunk->page_info[chunk->page_index];
1665 for (i = chunk->page_index; i < chunk->page_count; i++, pdp++) {
1666 if (pdp->pd_start == vaddr) {
1667 if (pdp->pd_valid) {
1668 lgrp = pdp->pd_lgrp;
1669 /*
1670 * Override page size information if it is
1671 * present.
1672 */
1673 if (pdp->pd_pagesize > 0)
1674 *psz = pdp->pd_pagesize;
1675 }
1676 break;
1677 }
1678 }
1679 /*
1680 * Remember where we ended - the next search will start here.
1681 * We can query for the lgrp for the same address again, so do not
1682 * advance index past the current value.
1683 */
1684 chunk->page_index = i;
1685
1686 return (lgrp);
1687 }
1688
1689 static void
intr(int sig)1690 intr(int sig)
1691 {
1692 interrupt = 1;
1693 }
1694