1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2003 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * rstat service: built with rstat.x 31 */ 32 33 #include <stdio.h> 34 #include <stdlib.h> 35 #include <stdarg.h> 36 #include <string.h> 37 #include <signal.h> 38 #include <utmpx.h> 39 #include <nlist.h> 40 #include <fcntl.h> 41 #include <syslog.h> 42 #include <kstat.h> 43 44 #include <rpc/rpc.h> 45 46 #include <sys/socket.h> 47 #include <sys/cpuvar.h> 48 #include <sys/sysinfo.h> 49 #include <sys/systm.h> 50 #include <errno.h> 51 #include <sys/stropts.h> 52 #include <sys/tihdr.h> 53 #include <sys/sysmacros.h> 54 55 #include <net/if.h> 56 #include <inet/mib2.h> 57 58 #include "rstat.h" 59 #include "rstat_v2.h" 60 61 typedef struct { 62 kstat_t sys; 63 kstat_t vm; 64 } _cpu_stats_t; 65 66 /* 67 * system and cpu stats 68 */ 69 static kstat_ctl_t *kc; /* libkstat cookie */ 70 static int ncpus; 71 static _cpu_stats_t *cpu_stats_list = NULL; 72 static kstat_t *system_misc_ksp; 73 static kstat_named_t *boot_time_knp; 74 static kstat_named_t *avenrun_1min_knp, *avenrun_5min_knp, *avenrun_15min_knp; 75 static int hz; 76 static struct timeval btm; /* boottime */ 77 78 /* 79 * network interface stats 80 */ 81 82 typedef struct mib_item_s { 83 struct mib_item_s *next_item; 84 long group; 85 long mib_id; 86 long length; 87 char *valp; 88 } mib_item_t; 89 90 mib_item_t *netstat_item; 91 92 /* 93 * disk stats 94 */ 95 96 struct diskinfo { 97 struct diskinfo *next; 98 kstat_t *ks; 99 kstat_io_t kios; 100 }; 101 102 #define NULLDISK (struct diskinfo *)0 103 static struct diskinfo zerodisk = { NULL, NULL }; 104 static struct diskinfo *firstdisk = NULLDISK; 105 static struct diskinfo *lastdisk = NULLDISK; 106 static struct diskinfo *snip = NULLDISK; 107 static int ndisks; 108 109 /* 110 * net stats 111 */ 112 113 struct netinfo { 114 struct netinfo *next; 115 kstat_t *ks; 116 kstat_named_t *ipackets; 117 kstat_named_t *opackets; 118 kstat_named_t *ierrors; 119 kstat_named_t *oerrors; 120 kstat_named_t *collisions; 121 }; 122 123 #define NULLNET (struct netinfo *)0 124 static struct netinfo zeronet = { NULL, NULL, NULL, NULL, NULL, NULL, NULL }; 125 static struct netinfo *firstnet = NULLNET; 126 static struct netinfo *lastnet = NULLNET; 127 static struct netinfo *netsnip = NULLNET; 128 static int nnets; 129 130 /* 131 * Define EXIT_WHEN_IDLE if you are able to have this program invoked 132 * automatically on demand (as from inetd). When defined, the service 133 * will terminated after being idle for 120 seconds. 134 */ 135 136 #define EXIT_WHEN_IDLE 1 137 138 int sincelastreq = 0; /* number of alarms since last request */ 139 #ifdef EXIT_WHEN_IDLE 140 #define CLOSEDOWN 120 /* how long to wait before exiting */ 141 #endif /* def EXIT_WHEN_IDLE */ 142 143 statstime stats_s3; 144 statsvar stats_s4; 145 /* V2 support for backwards compatibility to pre-5.0 systems */ 146 statsswtch stats_s2; 147 148 static stat_is_init = 0; 149 150 static void fail(int, char *, ...); 151 static void safe_zalloc(void **, int, int); 152 static kid_t safe_kstat_read(kstat_ctl_t *, kstat_t *, void *); 153 static kstat_t *safe_kstat_lookup(kstat_ctl_t *, char *, int, char *); 154 static void *safe_kstat_data_lookup(kstat_t *, char *); 155 static void system_stat_init(void); 156 static int system_stat_load(void); 157 static void init_disks(void); 158 static int diskinfo_load(void); 159 static void init_net(void); 160 static int netinfo_load(void); 161 162 static void updatestat(int); 163 164 static mib_item_t *mibget(int sd); 165 static int mibopen(void); 166 static char *octetstr(char *buf, Octet_t *op, int code); 167 168 static void kstat_copy(kstat_t *, kstat_t *, int); 169 170 static char *cmdname = "rpc.rstatd"; 171 172 #define CPU_STAT(ksp, name) (((kstat_named_t *)safe_kstat_data_lookup( \ 173 (ksp), (name)))->value.ui64) 174 static _cpu_stats_t cpu_stats_all = { 0 }; 175 176 static void 177 stat_init(void) 178 { 179 struct utmpx *utmpx, utmpx_id; 180 181 stat_is_init = 1; 182 183 if ((kc = kstat_open()) == NULL) 184 fail(1, "kstat_open(): can't open /dev/kstat"); 185 186 /* 187 * Preallocate minimal set of drive entries. 188 */ 189 190 if (stats_s4.dk_xfer.dk_xfer_val == NULL) { 191 stats_s4.dk_xfer.dk_xfer_len = RSTAT_DK_NDRIVE; 192 stats_s4.dk_xfer.dk_xfer_val = 193 (int *)calloc(RSTAT_DK_NDRIVE, sizeof (int)); 194 } 195 196 system_stat_init(); 197 init_disks(); 198 init_net(); 199 200 /* 201 * To get the boot time, use utmpx, which is per-zone, but fall back 202 * to the system-wide kstat if utmpx is hosed for any reason. 203 */ 204 utmpx_id.ut_type = BOOT_TIME; 205 if ((utmpx = getutxid(&utmpx_id)) != NULL) 206 btm = utmpx->ut_tv; 207 else { 208 btm.tv_sec = boot_time_knp->value.ul; 209 btm.tv_usec = 0; /* don't bother with usecs for boot time */ 210 } 211 endutxent(); 212 stats_s4.boottime.tv_sec = 213 stats_s2.boottime.tv_sec = 214 stats_s3.boottime.tv_sec = btm.tv_sec; 215 stats_s4.boottime.tv_usec = 216 stats_s2.boottime.tv_usec = 217 stats_s3.boottime.tv_usec = btm.tv_usec; 218 219 updatestat(0); 220 alarm(1); 221 signal(SIGALRM, updatestat); 222 sleep(2); /* allow for one wake-up */ 223 } 224 225 statsvar * 226 rstatproc_stats_4_svc(argp, svcrq) 227 void *argp; 228 struct svc_req *svcrq; 229 { 230 if (! stat_is_init) 231 stat_init(); 232 #ifdef EXIT_WHEN_IDLE 233 sincelastreq = 0; 234 #endif 235 return (&stats_s4); 236 } 237 238 statstime * 239 rstatproc_stats_3_svc(argp, svcrq) 240 void *argp; 241 struct svc_req *svcrq; 242 { 243 if (! stat_is_init) 244 stat_init(); 245 #ifdef EXIT_WHEN_IDLE 246 sincelastreq = 0; 247 #endif 248 return (&stats_s3); 249 } 250 251 statsswtch * 252 rstatproc_stats_2_svc(argp, svcrq) 253 void *argp; 254 struct svc_req *svcrq; 255 { 256 if (! stat_is_init) 257 stat_init(); 258 #ifdef EXIT_WHEN_IDLE 259 sincelastreq = 0; 260 #endif 261 return (&stats_s2); 262 } 263 264 265 uint_t * 266 rstatproc_havedisk_4_svc(argp, svcrq) 267 void *argp; 268 struct svc_req *svcrq; 269 { 270 return (rstatproc_havedisk_3_svc(argp, svcrq)); 271 } 272 273 uint_t * 274 rstatproc_havedisk_3_svc(argp, svcrq) 275 void *argp; 276 struct svc_req *svcrq; 277 { 278 static uint_t have; 279 280 if (! stat_is_init) 281 stat_init(); 282 #ifdef EXIT_WHEN_IDLE 283 sincelastreq = 0; 284 #endif 285 have = (ndisks != 0); 286 return (&have); 287 } 288 289 uint_t * 290 rstatproc_havedisk_2_svc(argp, svcrq) 291 void *argp; 292 struct svc_req *svcrq; 293 { 294 return (rstatproc_havedisk_3_svc(argp, svcrq)); 295 } 296 297 void 298 updatestat(int ignored) 299 { 300 extern int _rpcpmstart; /* Started by a port monitor ? */ 301 extern int _rpcsvcdirty; /* Still serving ? */ 302 303 #ifdef DEBUG 304 fprintf(stderr, "entering updatestat\n"); 305 #endif 306 #ifdef EXIT_WHEN_IDLE 307 if (_rpcpmstart && sincelastreq >= CLOSEDOWN && !_rpcsvcdirty) { 308 #ifdef DEBUG 309 fprintf(stderr, "about to closedown\n"); 310 #endif 311 exit(0); 312 } 313 sincelastreq++; 314 #endif /* def EXIT_WHEN_IDLE */ 315 316 (void) alarm(0); 317 #ifdef DEBUG 318 fprintf(stderr, "boottime: %d %d\n", stats_s3.boottime.tv_sec, 319 stats_s3.boottime.tv_usec); 320 #endif 321 while (system_stat_load() || diskinfo_load() || netinfo_load()) { 322 (void) kstat_chain_update(kc); 323 system_stat_init(); 324 init_disks(); 325 init_net(); 326 } 327 stats_s4.cp_time.cp_time_len = CPU_STATES; 328 if (stats_s4.cp_time.cp_time_val == NULL) 329 stats_s4.cp_time.cp_time_val = 330 malloc(stats_s4.cp_time.cp_time_len * sizeof (int)); 331 stats_s2.cp_time[RSTAT_CPU_USER] = 332 stats_s3.cp_time[RSTAT_CPU_USER] = 333 stats_s4.cp_time.cp_time_val[RSTAT_CPU_USER] = 334 CPU_STAT(&cpu_stats_all.sys, "cpu_ticks_user"); 335 stats_s2.cp_time[RSTAT_CPU_NICE] = 336 stats_s3.cp_time[RSTAT_CPU_NICE] = 337 stats_s4.cp_time.cp_time_val[RSTAT_CPU_NICE] = 338 CPU_STAT(&cpu_stats_all.sys, "cpu_ticks_wait"); 339 stats_s2.cp_time[RSTAT_CPU_SYS] = 340 stats_s3.cp_time[RSTAT_CPU_SYS] = 341 stats_s4.cp_time.cp_time_val[RSTAT_CPU_SYS] = 342 CPU_STAT(&cpu_stats_all.sys, "cpu_ticks_kernel"); 343 stats_s2.cp_time[RSTAT_CPU_IDLE] = 344 stats_s3.cp_time[RSTAT_CPU_IDLE] = 345 stats_s4.cp_time.cp_time_val[RSTAT_CPU_IDLE] = 346 CPU_STAT(&cpu_stats_all.sys, "cpu_ticks_idle"); 347 348 #ifdef DEBUG 349 fprintf(stderr, "cpu: %d %d %d %d\n", 350 CPU_STAT(&cpu_stats_all.sys, "cpu_ticks_user"), 351 CPU_STAT(&cpu_stats_all.sys, "cpu_ticks_wait"), 352 CPU_STAT(&cpu_stats_all.sys, "cpu_ticks_kernel"), 353 CPU_STAT(&cpu_stats_all.sys, "cpu_ticks_idle")); 354 fprintf(stderr, "cp_time: %d %d %d %d\n", 355 stats_s3.cp_time[RSTAT_CPU_USER], 356 stats_s3.cp_time[RSTAT_CPU_NICE], 357 stats_s3.cp_time[RSTAT_CPU_SYS], 358 stats_s3.cp_time[RSTAT_CPU_IDLE]); 359 #endif 360 361 /* current time */ 362 gettimeofday((struct timeval *)&stats_s3.curtime, NULL); 363 stats_s4.curtime = stats_s3.curtime; 364 365 stats_s2.v_pgpgin = 366 stats_s3.v_pgpgin = 367 stats_s4.v_pgpgin = CPU_STAT(&cpu_stats_all.vm, "pgpgin"); 368 stats_s2.v_pgpgout = 369 stats_s3.v_pgpgout = 370 stats_s4.v_pgpgout = CPU_STAT(&cpu_stats_all.vm, "pgpgout"); 371 stats_s2.v_pswpin = 372 stats_s3.v_pswpin = 373 stats_s4.v_pswpin = CPU_STAT(&cpu_stats_all.vm, "pgswapin"); 374 stats_s2.v_pswpout = 375 stats_s3.v_pswpout = 376 stats_s4.v_pswpout = CPU_STAT(&cpu_stats_all.vm, "pgswapout"); 377 stats_s3.v_intr = CPU_STAT(&cpu_stats_all.sys, "intr"); 378 stats_s3.v_intr -= hz*(stats_s3.curtime.tv_sec - btm.tv_sec) + 379 hz*(stats_s3.curtime.tv_usec - btm.tv_usec)/1000000; 380 stats_s2.v_intr = 381 stats_s4.v_intr = stats_s3.v_intr; 382 /* swtch not in V1 */ 383 stats_s2.v_swtch = 384 stats_s3.v_swtch = 385 stats_s4.v_swtch = CPU_STAT(&cpu_stats_all.sys, "pswitch"); 386 387 #ifdef DEBUG 388 fprintf(stderr, 389 "pgin: %d pgout: %d swpin: %d swpout: %d intr: %d swtch: %d\n", 390 stats_s3.v_pgpgin, 391 stats_s3.v_pgpgout, 392 stats_s3.v_pswpin, 393 stats_s3.v_pswpout, 394 stats_s3.v_intr, 395 stats_s3.v_swtch); 396 #endif 397 /* 398 * V2 and V3 of rstat are limited to RSTAT_DK_NDRIVE drives 399 */ 400 memcpy(stats_s3.dk_xfer, stats_s4.dk_xfer.dk_xfer_val, 401 RSTAT_DK_NDRIVE * sizeof (int)); 402 memcpy(stats_s2.dk_xfer, stats_s4.dk_xfer.dk_xfer_val, 403 RSTAT_DK_NDRIVE * sizeof (int)); 404 #ifdef DEBUG 405 fprintf(stderr, "dk_xfer: %d %d %d %d\n", 406 stats_s4.dk_xfer.dk_xfer_val[0], 407 stats_s4.dk_xfer.dk_xfer_val[1], 408 stats_s4.dk_xfer.dk_xfer_val[2], 409 stats_s4.dk_xfer.dk_xfer_val[3]); 410 #endif 411 412 stats_s2.if_ipackets = 413 stats_s3.if_ipackets = stats_s4.if_ipackets; 414 /* no s2 opackets */ 415 stats_s3.if_opackets = stats_s4.if_opackets; 416 stats_s2.if_ierrors = 417 stats_s3.if_ierrors = stats_s4.if_ierrors; 418 stats_s2.if_oerrors = 419 stats_s3.if_oerrors = stats_s4.if_oerrors; 420 stats_s2.if_collisions = 421 stats_s3.if_collisions = stats_s4.if_collisions; 422 423 stats_s2.avenrun[0] = 424 stats_s3.avenrun[0] = 425 stats_s4.avenrun[0] = avenrun_1min_knp->value.ul; 426 stats_s2.avenrun[1] = 427 stats_s3.avenrun[1] = 428 stats_s4.avenrun[1] = avenrun_5min_knp->value.ul; 429 stats_s2.avenrun[2] = 430 stats_s3.avenrun[2] = 431 stats_s4.avenrun[2] = avenrun_15min_knp->value.ul; 432 #ifdef DEBUG 433 fprintf(stderr, "avenrun: %d %d %d\n", stats_s3.avenrun[0], 434 stats_s3.avenrun[1], stats_s3.avenrun[2]); 435 #endif 436 signal(SIGALRM, updatestat); 437 alarm(1); 438 } 439 440 /* --------------------------------- MIBGET -------------------------------- */ 441 442 static mib_item_t * 443 mibget(int sd) 444 { 445 int flags; 446 int j, getcode; 447 struct strbuf ctlbuf, databuf; 448 char buf[512]; 449 struct T_optmgmt_req *tor = (struct T_optmgmt_req *)buf; 450 struct T_optmgmt_ack *toa = (struct T_optmgmt_ack *)buf; 451 struct T_error_ack *tea = (struct T_error_ack *)buf; 452 struct opthdr *req; 453 mib_item_t *first_item = NULL; 454 mib_item_t *last_item = NULL; 455 mib_item_t *temp; 456 457 tor->PRIM_type = T_SVR4_OPTMGMT_REQ; 458 tor->OPT_offset = sizeof (struct T_optmgmt_req); 459 tor->OPT_length = sizeof (struct opthdr); 460 tor->MGMT_flags = T_CURRENT; 461 req = (struct opthdr *)&tor[1]; 462 req->level = MIB2_IP; /* any MIB2_xxx value ok here */ 463 req->name = 0; 464 req->len = 0; 465 466 ctlbuf.buf = buf; 467 ctlbuf.len = tor->OPT_length + tor->OPT_offset; 468 flags = 0; 469 if (putmsg(sd, &ctlbuf, NULL, flags) == -1) { 470 perror("mibget: putmsg(ctl) failed"); 471 goto error_exit; 472 } 473 /* 474 * each reply consists of a ctl part for one fixed structure 475 * or table, as defined in mib2.h. The format is a T_OPTMGMT_ACK, 476 * containing an opthdr structure. level/name identify the entry, 477 * len is the size of the data part of the message. 478 */ 479 req = (struct opthdr *)&toa[1]; 480 ctlbuf.maxlen = sizeof (buf); 481 /*CSTYLED*/ 482 for (j = 1; ; j++) { 483 flags = 0; 484 getcode = getmsg(sd, &ctlbuf, NULL, &flags); 485 if (getcode == -1) { 486 #ifdef DEBUG_MIB 487 perror("mibget getmsg(ctl) failed"); 488 fprintf(stderr, "# level name len\n"); 489 i = 0; 490 for (last_item = first_item; last_item; 491 last_item = last_item->next_item) 492 fprintf(stderr, "%d %4d %5d %d\n", ++i, 493 last_item->group, 494 last_item->mib_id, 495 last_item->length); 496 #endif /* DEBUG_MIB */ 497 goto error_exit; 498 } 499 if (getcode == 0 && 500 (ctlbuf.len >= sizeof (struct T_optmgmt_ack)) && 501 (toa->PRIM_type == T_OPTMGMT_ACK) && 502 (toa->MGMT_flags == T_SUCCESS) && 503 req->len == 0) { 504 #ifdef DEBUG_MIB 505 fprintf(stderr, 506 "mibget getmsg() %d returned EOD (level %d, name %d)\n", 507 j, req->level, req->name); 508 #endif /* DEBUG_MIB */ 509 return (first_item); /* this is EOD msg */ 510 } 511 512 if (ctlbuf.len >= sizeof (struct T_error_ack) && 513 (tea->PRIM_type == T_ERROR_ACK)) { 514 #ifdef DEBUG_MIB 515 fprintf(stderr, 516 "mibget %d gives T_ERROR_ACK: TLI_error = 0x%x, UNIX_error = 0x%x\n", 517 j, getcode, tea->TLI_error, tea->UNIX_error); 518 #endif /* DEBUG_MIB */ 519 errno = (tea->TLI_error == TSYSERR) 520 ? tea->UNIX_error : EPROTO; 521 goto error_exit; 522 } 523 524 if (getcode != MOREDATA || 525 (ctlbuf.len < sizeof (struct T_optmgmt_ack)) || 526 (toa->PRIM_type != T_OPTMGMT_ACK) || 527 (toa->MGMT_flags != T_SUCCESS)) { 528 #ifdef DEBUG_MIB 529 fprintf(stderr, 530 "mibget getmsg(ctl) %d returned %d, ctlbuf.len = %d, PRIM_type = %d\n", 531 j, getcode, ctlbuf.len, toa->PRIM_type); 532 if (toa->PRIM_type == T_OPTMGMT_ACK) 533 fprintf(stderr, 534 "T_OPTMGMT_ACK: MGMT_flags = 0x%x, req->len = %d\n", 535 toa->MGMT_flags, req->len); 536 #endif /* DEBUG_MIB */ 537 errno = ENOMSG; 538 goto error_exit; 539 } 540 541 temp = malloc(sizeof (mib_item_t)); 542 if (!temp) { 543 perror("mibget malloc failed"); 544 goto error_exit; 545 } 546 if (last_item) 547 last_item->next_item = temp; 548 else 549 first_item = temp; 550 last_item = temp; 551 last_item->next_item = NULL; 552 last_item->group = req->level; 553 last_item->mib_id = req->name; 554 last_item->length = req->len; 555 last_item->valp = malloc(req->len); 556 #ifdef DEBUG_MIB 557 fprintf(stderr, 558 "msg %d: group = %4d mib_id = %5d length = %d\n", 559 j, last_item->group, last_item->mib_id, 560 last_item->length); 561 #endif /* DEBUG_MIB */ 562 databuf.maxlen = last_item->length; 563 databuf.buf = last_item->valp; 564 databuf.len = 0; 565 flags = 0; 566 getcode = getmsg(sd, NULL, &databuf, &flags); 567 if (getcode == -1) { 568 perror("mibget getmsg(data) failed"); 569 goto error_exit; 570 } else if (getcode != 0) { 571 fprintf(stderr, 572 "mibget getmsg(data) returned %d, databuf.maxlen = %d, databuf.len = %d\n", 573 getcode, databuf.maxlen, databuf.len); 574 goto error_exit; 575 } 576 } 577 578 error_exit: 579 while (first_item) { 580 last_item = first_item; 581 first_item = first_item->next_item; 582 if (last_item->valp) { 583 free(last_item->valp); 584 } 585 free(last_item); 586 } 587 return (first_item); 588 } 589 590 static int 591 mibopen(void) 592 { 593 int sd; 594 595 /* gives us ip w/ arp on top */ 596 sd = open("/dev/arp", O_RDWR); 597 if (sd == -1) { 598 perror("arp open"); 599 close(sd); 600 return (-1); 601 } 602 if (ioctl(sd, I_PUSH, "tcp") == -1) { 603 perror("tcp I_PUSH"); 604 close(sd); 605 return (-1); 606 } 607 if (ioctl(sd, I_PUSH, "udp") == -1) { 608 perror("udp I_PUSH"); 609 close(sd); 610 return (-1); 611 } 612 return (sd); 613 } 614 615 static char * 616 octetstr(char *buf, Octet_t *op, int code) 617 { 618 int i; 619 char *cp; 620 621 cp = buf; 622 if (op) 623 for (i = 0; i < op->o_length; i++) 624 switch (code) { 625 case 'd': 626 sprintf(cp, "%d.", 0xff & op->o_bytes[i]); 627 cp = strchr(cp, '\0'); 628 break; 629 case 'a': 630 *cp++ = op->o_bytes[i]; 631 break; 632 case 'h': 633 default: 634 sprintf(cp, "%02x:", 0xff & op->o_bytes[i]); 635 cp += 3; 636 break; 637 } 638 if (code != 'a' && cp != buf) 639 cp--; 640 *cp = '\0'; 641 return (buf); 642 } 643 644 static void 645 fail(int do_perror, char *message, ...) 646 { 647 va_list args; 648 649 va_start(args, message); 650 fprintf(stderr, "%s: ", cmdname); 651 vfprintf(stderr, message, args); 652 va_end(args); 653 if (do_perror) 654 fprintf(stderr, ": %s", strerror(errno)); 655 fprintf(stderr, "\n"); 656 exit(2); 657 } 658 659 static void 660 safe_zalloc(void **ptr, int size, int free_first) 661 { 662 if (free_first && *ptr != NULL) 663 free(*ptr); 664 if ((*ptr = malloc(size)) == NULL) 665 fail(1, "malloc failed"); 666 memset(*ptr, 0, size); 667 } 668 669 kid_t 670 safe_kstat_read(kstat_ctl_t *kctl, kstat_t *ksp, void *data) 671 { 672 kid_t kstat_chain_id = kstat_read(kctl, ksp, data); 673 674 if (kstat_chain_id == -1) 675 fail(1, "kstat_read(%x, '%s') failed", kctl, ksp->ks_name); 676 return (kstat_chain_id); 677 } 678 679 kstat_t * 680 safe_kstat_lookup(kstat_ctl_t *kctl, char *ks_module, int ks_instance, 681 char *ks_name) 682 { 683 kstat_t *ksp = kstat_lookup(kctl, ks_module, ks_instance, ks_name); 684 685 if (ksp == NULL) 686 fail(0, "kstat_lookup('%s', %d, '%s') failed", 687 ks_module == NULL ? "" : ks_module, 688 ks_instance, 689 ks_name == NULL ? "" : ks_name); 690 return (ksp); 691 } 692 693 void * 694 safe_kstat_data_lookup(kstat_t *ksp, char *name) 695 { 696 void *fp = kstat_data_lookup(ksp, name); 697 698 if (fp == NULL) { 699 fail(0, "kstat_data_lookup('%s', '%s') failed", 700 ksp->ks_name, name); 701 } 702 return (fp); 703 } 704 705 /* 706 * Get various KIDs for subsequent system_stat_load operations. 707 */ 708 709 static void 710 system_stat_init(void) 711 { 712 kstat_t *ksp; 713 int i, nvmks; 714 715 /* 716 * Global statistics 717 */ 718 719 system_misc_ksp = safe_kstat_lookup(kc, "unix", 0, "system_misc"); 720 721 safe_kstat_read(kc, system_misc_ksp, NULL); 722 boot_time_knp = safe_kstat_data_lookup(system_misc_ksp, "boot_time"); 723 avenrun_1min_knp = safe_kstat_data_lookup(system_misc_ksp, 724 "avenrun_1min"); 725 avenrun_5min_knp = safe_kstat_data_lookup(system_misc_ksp, 726 "avenrun_5min"); 727 avenrun_15min_knp = safe_kstat_data_lookup(system_misc_ksp, 728 "avenrun_15min"); 729 730 /* 731 * Per-CPU statistics 732 */ 733 734 ncpus = 0; 735 for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) 736 if (strcmp(ksp->ks_module, "cpu") == 0 && 737 strcmp(ksp->ks_name, "sys") == 0) 738 ncpus++; 739 740 safe_zalloc((void **)&cpu_stats_list, ncpus * sizeof (*cpu_stats_list), 741 1); 742 743 ncpus = 0; 744 for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) 745 if (strcmp(ksp->ks_module, "cpu") == 0 && 746 strcmp(ksp->ks_name, "sys") == 0 && 747 kstat_read(kc, ksp, NULL) != -1) { 748 kstat_copy(ksp, &cpu_stats_list[ncpus].sys, 749 1); 750 if ((ksp = kstat_lookup(kc, "cpu", ksp->ks_instance, 751 "vm")) != NULL && kstat_read(kc, ksp, NULL) != -1) 752 kstat_copy(ksp, &cpu_stats_list[ncpus].vm, 1); 753 else 754 fail(0, "couldn't find per-CPU VM statistics"); 755 ncpus++; 756 } 757 758 if (ncpus == 0) 759 fail(0, "couldn't find per-CPU statistics"); 760 } 761 762 /* 763 * load statistics, summing across CPUs where needed 764 */ 765 766 static int 767 system_stat_load(void) 768 { 769 int i, j; 770 _cpu_stats_t cs; 771 ulong_t *np, *tp; 772 773 /* 774 * Global statistics 775 */ 776 777 safe_kstat_read(kc, system_misc_ksp, NULL); 778 779 /* 780 * Per-CPU statistics. 781 */ 782 783 for (i = 0; i < ncpus; i++) { 784 if (kstat_read(kc, &cpu_stats_list[i].sys, NULL) == -1 || 785 kstat_read(kc, &cpu_stats_list[i].vm, NULL) == -1) 786 return (1); 787 if (i == 0) { 788 kstat_copy(&cpu_stats_list[0].sys, &cpu_stats_all.sys, 789 1); 790 kstat_copy(&cpu_stats_list[0].vm, &cpu_stats_all.vm, 1); 791 } else { 792 kstat_named_t *nkp; 793 kstat_named_t *tkp; 794 795 /* 796 * Other CPUs' statistics are accumulated in 797 * cpu_stats_all, initialized at the first iteration of 798 * the loop. 799 */ 800 nkp = (kstat_named_t *)cpu_stats_all.sys.ks_data; 801 tkp = (kstat_named_t *)cpu_stats_list[i].sys.ks_data; 802 for (j = 0; j < cpu_stats_list[i].sys.ks_ndata; j++) 803 (nkp++)->value.ui64 += (tkp++)->value.ui64; 804 nkp = (kstat_named_t *)cpu_stats_all.vm.ks_data; 805 tkp = (kstat_named_t *)cpu_stats_list[i].vm.ks_data; 806 for (j = 0; j < cpu_stats_list[i].vm.ks_ndata; j++) 807 (nkp++)->value.ui64 += (tkp++)->value.ui64; 808 } 809 } 810 return (0); 811 } 812 813 static int 814 kscmp(kstat_t *ks1, kstat_t *ks2) 815 { 816 int cmp; 817 818 cmp = strcmp(ks1->ks_module, ks2->ks_module); 819 if (cmp != 0) 820 return (cmp); 821 cmp = ks1->ks_instance - ks2->ks_instance; 822 if (cmp != 0) 823 return (cmp); 824 return (strcmp(ks1->ks_name, ks2->ks_name)); 825 } 826 827 static void 828 init_disks(void) 829 { 830 struct diskinfo *disk, *prevdisk, *comp; 831 kstat_t *ksp; 832 833 ndisks = 0; 834 disk = &zerodisk; 835 836 /* 837 * Patch the snip in the diskinfo list (see below) 838 */ 839 if (snip) 840 lastdisk->next = snip; 841 842 for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) { 843 844 if (ksp->ks_type != KSTAT_TYPE_IO || 845 strcmp(ksp->ks_class, "disk") != 0) 846 continue; 847 prevdisk = disk; 848 if (disk->next) 849 disk = disk->next; 850 else { 851 safe_zalloc((void **)&disk->next, 852 sizeof (struct diskinfo), 0); 853 disk = disk->next; 854 disk->next = NULLDISK; 855 } 856 disk->ks = ksp; 857 memset((void *)&disk->kios, 0, sizeof (kstat_io_t)); 858 disk->kios.wlastupdate = disk->ks->ks_crtime; 859 disk->kios.rlastupdate = disk->ks->ks_crtime; 860 861 /* 862 * Insertion sort on (ks_module, ks_instance, ks_name) 863 */ 864 comp = &zerodisk; 865 while (kscmp(disk->ks, comp->next->ks) > 0) 866 comp = comp->next; 867 if (prevdisk != comp) { 868 prevdisk->next = disk->next; 869 disk->next = comp->next; 870 comp->next = disk; 871 disk = prevdisk; 872 } 873 ndisks++; 874 } 875 /* 876 * Put a snip in the linked list of diskinfos. The idea: 877 * If there was a state change such that now there are fewer 878 * disks, we snip the list and retain the tail, rather than 879 * freeing it. At the next state change, we clip the tail back on. 880 * This prevents a lot of malloc/free activity, and it's simpler. 881 */ 882 lastdisk = disk; 883 snip = disk->next; 884 disk->next = NULLDISK; 885 886 firstdisk = zerodisk.next; 887 888 if (ndisks > stats_s4.dk_xfer.dk_xfer_len) { 889 stats_s4.dk_xfer.dk_xfer_len = ndisks; 890 safe_zalloc((void **)&stats_s4.dk_xfer.dk_xfer_val, 891 ndisks * sizeof (int), 1); 892 } 893 } 894 895 static int 896 diskinfo_load(void) 897 { 898 struct diskinfo *disk; 899 int i; 900 901 for (disk = firstdisk, i = 0; disk; disk = disk->next, i++) { 902 if (kstat_read(kc, disk->ks, (void *)&disk->kios) == -1) 903 return (1); 904 stats_s4.dk_xfer.dk_xfer_val[i] = disk->kios.reads + 905 disk->kios.writes; 906 } 907 return (0); 908 } 909 910 static void 911 init_net(void) 912 { 913 static int sd; 914 mib_item_t *item; 915 mib2_ipAddrEntry_t *ap; 916 char namebuf[KSTAT_STRLEN]; 917 struct netinfo *net, *prevnet, *comp; 918 kstat_t *ksp; 919 920 if (sd) { 921 close(sd); 922 } 923 while (netstat_item) { 924 item = netstat_item; 925 netstat_item = netstat_item->next_item; 926 if (item->valp) { 927 free(item->valp); 928 } 929 free(item); 930 } 931 sd = mibopen(); 932 if (sd == -1) { 933 #ifdef DEBUG 934 fprintf(stderr, "mibopen() failed\n"); 935 #endif 936 sd = 0; 937 } else { 938 if ((netstat_item = mibget(sd)) == NULL) { 939 #ifdef DEBUG 940 fprintf(stderr, "mibget() failed\n"); 941 #endif 942 close(sd); 943 sd = 0; 944 } 945 } 946 #ifdef DEBUG 947 fprintf(stderr, "mibget returned item: %x\n", netstat_item); 948 #endif 949 950 nnets = 0; 951 net = &zeronet; 952 953 if (netsnip) 954 lastnet->next = netsnip; 955 956 for (item = netstat_item; item; item = item->next_item) { 957 #ifdef DEBUG_MIB 958 fprintf(stderr, "\n--- Item %x ---\n", item); 959 fprintf(stderr, 960 "Group = %d, mib_id = %d, length = %d, valp = 0x%x\n", 961 item->group, item->mib_id, item->length, 962 item->valp); 963 #endif 964 if (item->group != MIB2_IP || item->mib_id != MIB2_IP_20) 965 continue; 966 ap = (mib2_ipAddrEntry_t *)item->valp; 967 for (; (char *)ap < item->valp + item->length; ap++) { 968 969 octetstr(namebuf, &ap->ipAdEntIfIndex, 'a'); 970 #ifdef DEBUG 971 fprintf(stderr, "%s ", namebuf); 972 #endif 973 if (strlen(namebuf) == 0) 974 continue; 975 /* 976 * We found a device of interest. 977 * Now, let's see if there's a kstat for it. 978 */ 979 if ((ksp = kstat_lookup(kc, NULL, -1, namebuf)) == NULL) 980 continue; 981 if (ksp->ks_type != KSTAT_TYPE_NAMED) 982 continue; 983 if (kstat_read(kc, ksp, NULL) == -1) 984 continue; 985 prevnet = net; 986 if (net->next) 987 net = net->next; 988 else { 989 safe_zalloc((void **)&net->next, 990 sizeof (struct netinfo), 0); 991 net = net->next; 992 net->next = NULLNET; 993 } 994 net->ks = ksp; 995 net->ipackets = kstat_data_lookup(net->ks, 996 "ipackets"); 997 net->opackets = kstat_data_lookup(net->ks, 998 "opackets"); 999 net->ierrors = kstat_data_lookup(net->ks, 1000 "ierrors"); 1001 net->oerrors = kstat_data_lookup(net->ks, 1002 "oerrors"); 1003 net->collisions = kstat_data_lookup(net->ks, 1004 "collisions"); 1005 /* 1006 * Insertion sort on the name 1007 */ 1008 comp = &zeronet; 1009 while (strcmp(net->ks->ks_name, 1010 comp->next->ks->ks_name) > 0) 1011 comp = comp->next; 1012 if (prevnet != comp) { 1013 prevnet->next = net->next; 1014 net->next = comp->next; 1015 comp->next = net; 1016 net = prevnet; 1017 } 1018 nnets++; 1019 } 1020 #ifdef DEBUG 1021 fprintf(stderr, "\n"); 1022 #endif 1023 } 1024 /* 1025 * Put a snip in the linked list of netinfos. The idea: 1026 * If there was a state change such that now there are fewer 1027 * nets, we snip the list and retain the tail, rather than 1028 * freeing it. At the next state change, we clip the tail back on. 1029 * This prevents a lot of malloc/free activity, and it's simpler. 1030 */ 1031 lastnet = net; 1032 netsnip = net->next; 1033 net->next = NULLNET; 1034 1035 firstnet = zeronet.next; 1036 } 1037 1038 static int 1039 netinfo_load(void) 1040 { 1041 struct netinfo *net; 1042 1043 if (netstat_item == NULL) { 1044 #ifdef DEBUG 1045 fprintf(stderr, "No net stats\n"); 1046 #endif 1047 return (0); 1048 } 1049 1050 stats_s4.if_ipackets = 1051 stats_s4.if_opackets = 1052 stats_s4.if_ierrors = 1053 stats_s4.if_oerrors = 1054 stats_s4.if_collisions = 0; 1055 1056 for (net = firstnet; net; net = net->next) { 1057 if (kstat_read(kc, net->ks, NULL) == -1) 1058 return (1); 1059 if (net->ipackets) 1060 stats_s4.if_ipackets += net->ipackets->value.ul; 1061 if (net->opackets) 1062 stats_s4.if_opackets += net->opackets->value.ul; 1063 if (net->ierrors) 1064 stats_s4.if_ierrors += net->ierrors->value.ul; 1065 if (net->oerrors) 1066 stats_s4.if_oerrors += net->oerrors->value.ul; 1067 if (net->collisions) 1068 stats_s4.if_collisions += net->collisions->value.ul; 1069 } 1070 #ifdef DEBUG 1071 fprintf(stderr, 1072 "ipackets: %d opackets: %d ierrors: %d oerrors: %d colls: %d\n", 1073 stats_s4.if_ipackets, 1074 stats_s4.if_opackets, 1075 stats_s4.if_ierrors, 1076 stats_s4.if_oerrors, 1077 stats_s4.if_collisions); 1078 #endif 1079 return (0); 1080 } 1081 1082 static void 1083 kstat_copy(kstat_t *src, kstat_t *dst, int fr) 1084 { 1085 if (fr) 1086 free(dst->ks_data); 1087 *dst = *src; 1088 if (src->ks_data != NULL) { 1089 safe_zalloc(&dst->ks_data, src->ks_data_size, 0); 1090 (void) memcpy(dst->ks_data, src->ks_data, src->ks_data_size); 1091 } else { 1092 dst->ks_data = NULL; 1093 dst->ks_data_size = 0; 1094 } 1095 } 1096