1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "statcommon.h" 30 #include "dsr.h" 31 32 #include <stdlib.h> 33 #include <stdarg.h> 34 #include <unistd.h> 35 #include <strings.h> 36 #include <errno.h> 37 #include <poll.h> 38 39 #define ARRAY_SIZE(a) (sizeof (a) / sizeof (*a)) 40 41 /* 42 * The time we delay before retrying after an allocation 43 * failure, in milliseconds 44 */ 45 #define RETRY_DELAY 200 46 47 static char *cpu_states[] = { 48 "cpu_ticks_idle", 49 "cpu_ticks_user", 50 "cpu_ticks_kernel", 51 "cpu_ticks_wait" 52 }; 53 54 extern char cmdname[]; 55 56 static kstat_t * 57 kstat_lookup_read(kstat_ctl_t *kc, char *module, 58 int instance, char *name) 59 { 60 kstat_t *ksp = kstat_lookup(kc, module, instance, name); 61 if (ksp == NULL) 62 return (NULL); 63 if (kstat_read(kc, ksp, NULL) == -1) 64 return (NULL); 65 return (ksp); 66 } 67 68 /* 69 * Note: the following helpers do not clean up on the failure case, 70 * because it is left to the free_snapshot() in the acquire_snapshot() 71 * failure path. 72 */ 73 74 static int 75 acquire_cpus(struct snapshot *ss, kstat_ctl_t *kc) 76 { 77 size_t i; 78 79 ss->s_nr_cpus = sysconf(_SC_CPUID_MAX) + 1; 80 ss->s_cpus = calloc(ss->s_nr_cpus, sizeof (struct cpu_snapshot)); 81 if (ss->s_cpus == NULL) 82 goto out; 83 84 for (i = 0; i < ss->s_nr_cpus; i++) { 85 kstat_t *ksp; 86 87 ss->s_cpus[i].cs_id = ID_NO_CPU; 88 ss->s_cpus[i].cs_state = p_online(i, P_STATUS); 89 /* If no valid CPU is present, move on to the next one */ 90 if (ss->s_cpus[i].cs_state == -1) 91 continue; 92 ss->s_cpus[i].cs_id = i; 93 94 if ((ksp = kstat_lookup_read(kc, "cpu_info", i, NULL)) == NULL) 95 goto out; 96 97 (void) pset_assign(PS_QUERY, i, &ss->s_cpus[i].cs_pset_id); 98 if (ss->s_cpus[i].cs_pset_id == PS_NONE) 99 ss->s_cpus[i].cs_pset_id = ID_NO_PSET; 100 101 if (!CPU_ACTIVE(&ss->s_cpus[i])) 102 continue; 103 104 if ((ksp = kstat_lookup_read(kc, "cpu", i, "vm")) == NULL) 105 goto out; 106 107 if (kstat_copy(ksp, &ss->s_cpus[i].cs_vm)) 108 goto out; 109 110 if ((ksp = kstat_lookup_read(kc, "cpu", i, "sys")) == NULL) 111 goto out; 112 113 if (kstat_copy(ksp, &ss->s_cpus[i].cs_sys)) 114 goto out; 115 } 116 117 errno = 0; 118 out: 119 return (errno); 120 } 121 122 static int 123 acquire_psets(struct snapshot *ss) 124 { 125 psetid_t *pids = NULL; 126 struct pset_snapshot *ps; 127 size_t pids_nr; 128 size_t i, j; 129 130 /* 131 * Careful in this code. We have to use pset_list 132 * twice, but inbetween pids_nr can change at will. 133 * We delay the setting of s_nr_psets until we have 134 * the "final" value of pids_nr. 135 */ 136 137 if (pset_list(NULL, &pids_nr) < 0) 138 return (errno); 139 140 if ((pids = calloc(pids_nr, sizeof (psetid_t))) == NULL) 141 goto out; 142 143 if (pset_list(pids, &pids_nr) < 0) 144 goto out; 145 146 ss->s_psets = calloc(pids_nr + 1, sizeof (struct pset_snapshot)); 147 if (ss->s_psets == NULL) 148 goto out; 149 ss->s_nr_psets = pids_nr + 1; 150 151 /* CPUs not in any actual pset */ 152 ps = &ss->s_psets[0]; 153 ps->ps_id = 0; 154 ps->ps_cpus = calloc(ss->s_nr_cpus, sizeof (struct cpu_snapshot *)); 155 if (ps->ps_cpus == NULL) 156 goto out; 157 158 /* CPUs in a a pset */ 159 for (i = 1; i < ss->s_nr_psets; i++) { 160 ps = &ss->s_psets[i]; 161 162 ps->ps_id = pids[i - 1]; 163 ps->ps_cpus = 164 calloc(ss->s_nr_cpus, sizeof (struct cpu_snapshot *)); 165 if (ps->ps_cpus == NULL) 166 goto out; 167 } 168 169 for (i = 0; i < ss->s_nr_psets; i++) { 170 ps = &ss->s_psets[i]; 171 172 for (j = 0; j < ss->s_nr_cpus; j++) { 173 if (!CPU_ACTIVE(&ss->s_cpus[j])) 174 continue; 175 if (ss->s_cpus[j].cs_pset_id != ps->ps_id) 176 continue; 177 178 ps->ps_cpus[ps->ps_nr_cpus++] = &ss->s_cpus[j]; 179 } 180 } 181 182 errno = 0; 183 out: 184 free(pids); 185 return (errno); 186 } 187 188 static int 189 acquire_intrs(struct snapshot *ss, kstat_ctl_t *kc) 190 { 191 kstat_t *ksp; 192 size_t i = 0; 193 kstat_t *sys_misc; 194 kstat_named_t *clock; 195 196 /* clock interrupt */ 197 ss->s_nr_intrs = 1; 198 199 for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) { 200 if (ksp->ks_type == KSTAT_TYPE_INTR) 201 ss->s_nr_intrs++; 202 } 203 204 ss->s_intrs = calloc(ss->s_nr_intrs, sizeof (struct intr_snapshot)); 205 if (ss->s_intrs == NULL) 206 return (errno); 207 208 sys_misc = kstat_lookup_read(kc, "unix", 0, "system_misc"); 209 if (sys_misc == NULL) 210 goto out; 211 212 clock = (kstat_named_t *)kstat_data_lookup(sys_misc, "clk_intr"); 213 if (clock == NULL) 214 goto out; 215 216 (void) strlcpy(ss->s_intrs[0].is_name, "clock", KSTAT_STRLEN); 217 ss->s_intrs[0].is_total = clock->value.ui32; 218 219 i = 1; 220 221 for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) { 222 kstat_intr_t *ki; 223 int j; 224 225 if (ksp->ks_type != KSTAT_TYPE_INTR) 226 continue; 227 if (kstat_read(kc, ksp, NULL) == -1) 228 goto out; 229 230 ki = KSTAT_INTR_PTR(ksp); 231 232 (void) strlcpy(ss->s_intrs[i].is_name, ksp->ks_name, 233 KSTAT_STRLEN); 234 ss->s_intrs[i].is_total = 0; 235 236 for (j = 0; j < KSTAT_NUM_INTRS; j++) 237 ss->s_intrs[i].is_total += ki->intrs[j]; 238 239 i++; 240 } 241 242 errno = 0; 243 out: 244 return (errno); 245 } 246 247 int 248 acquire_sys(struct snapshot *ss, kstat_ctl_t *kc) 249 { 250 size_t i; 251 kstat_named_t *knp; 252 kstat_t *ksp; 253 254 if ((ksp = kstat_lookup(kc, "unix", 0, "sysinfo")) == NULL) 255 return (errno); 256 257 if (kstat_read(kc, ksp, &ss->s_sys.ss_sysinfo) == -1) 258 return (errno); 259 260 if ((ksp = kstat_lookup(kc, "unix", 0, "vminfo")) == NULL) 261 return (errno); 262 263 if (kstat_read(kc, ksp, &ss->s_sys.ss_vminfo) == -1) 264 return (errno); 265 266 if ((ksp = kstat_lookup(kc, "unix", 0, "dnlcstats")) == NULL) 267 return (errno); 268 269 if (kstat_read(kc, ksp, &ss->s_sys.ss_nc) == -1) 270 return (errno); 271 272 if ((ksp = kstat_lookup(kc, "unix", 0, "system_misc")) == NULL) 273 return (errno); 274 275 if (kstat_read(kc, ksp, NULL) == -1) 276 return (errno); 277 278 knp = (kstat_named_t *)kstat_data_lookup(ksp, "clk_intr"); 279 if (knp == NULL) 280 return (errno); 281 282 ss->s_sys.ss_ticks = knp->value.l; 283 284 knp = (kstat_named_t *)kstat_data_lookup(ksp, "deficit"); 285 if (knp == NULL) 286 return (errno); 287 288 ss->s_sys.ss_deficit = knp->value.l; 289 290 for (i = 0; i < ss->s_nr_cpus; i++) { 291 if (!CPU_ACTIVE(&ss->s_cpus[i])) 292 continue; 293 294 if (kstat_add(&ss->s_cpus[i].cs_sys, &ss->s_sys.ss_agg_sys)) 295 return (errno); 296 if (kstat_add(&ss->s_cpus[i].cs_vm, &ss->s_sys.ss_agg_vm)) 297 return (errno); 298 } 299 300 return (0); 301 } 302 303 struct snapshot * 304 acquire_snapshot(kstat_ctl_t *kc, int types, struct iodev_filter *iodev_filter) 305 { 306 struct snapshot *ss = NULL; 307 int err; 308 309 retry: 310 err = 0; 311 /* ensure any partial resources are freed on a retry */ 312 free_snapshot(ss); 313 314 ss = safe_alloc(sizeof (struct snapshot)); 315 316 (void) memset(ss, 0, sizeof (struct snapshot)); 317 318 ss->s_types = types; 319 320 /* wait for a possibly up-to-date chain */ 321 while (kstat_chain_update(kc) == -1) { 322 if (errno == EAGAIN) 323 (void) poll(NULL, 0, RETRY_DELAY); 324 else 325 fail(1, "kstat_chain_update failed"); 326 } 327 328 if (types & SNAP_FLUSHES) { 329 kstat_t *ksp; 330 ksp = kstat_lookup(kc, "unix", 0, "flushmeter"); 331 if (ksp == NULL) { 332 fail(0, "This machine does not have " 333 "a virtual address cache"); 334 } 335 if (kstat_read(kc, ksp, &ss->s_flushes) == -1) 336 err = errno; 337 } 338 339 if (!err && (types & SNAP_INTERRUPTS)) 340 err = acquire_intrs(ss, kc); 341 342 if (!err && (types & (SNAP_CPUS | SNAP_SYSTEM | SNAP_PSETS))) 343 err = acquire_cpus(ss, kc); 344 345 if (!err && (types & SNAP_PSETS)) 346 err = acquire_psets(ss); 347 348 if (!err && (types & (SNAP_IODEVS | SNAP_CONTROLLERS | SNAP_IOPATHS))) 349 err = acquire_iodevs(ss, kc, iodev_filter); 350 351 if (!err && (types & SNAP_SYSTEM)) 352 err = acquire_sys(ss, kc); 353 354 switch (err) { 355 case 0: 356 break; 357 case EAGAIN: 358 (void) poll(NULL, 0, RETRY_DELAY); 359 /* a kstat disappeared from under us */ 360 /*FALLTHRU*/ 361 case ENXIO: 362 case ENOENT: 363 goto retry; 364 default: 365 fail(1, "acquiring snapshot failed"); 366 } 367 368 return (ss); 369 } 370 371 void 372 free_snapshot(struct snapshot *ss) 373 { 374 size_t i; 375 376 if (ss == NULL) 377 return; 378 379 while (ss->s_iodevs) { 380 struct iodev_snapshot *tmp = ss->s_iodevs; 381 ss->s_iodevs = ss->s_iodevs->is_next; 382 free_iodev(tmp); 383 } 384 385 if (ss->s_cpus) { 386 for (i = 0; i < ss->s_nr_cpus; i++) { 387 free(ss->s_cpus[i].cs_vm.ks_data); 388 free(ss->s_cpus[i].cs_sys.ks_data); 389 } 390 free(ss->s_cpus); 391 } 392 393 if (ss->s_psets) { 394 for (i = 0; i < ss->s_nr_psets; i++) 395 free(ss->s_psets[i].ps_cpus); 396 free(ss->s_psets); 397 } 398 399 free(ss->s_sys.ss_agg_sys.ks_data); 400 free(ss->s_sys.ss_agg_vm.ks_data); 401 free(ss); 402 } 403 404 kstat_ctl_t * 405 open_kstat(void) 406 { 407 kstat_ctl_t *kc; 408 409 while ((kc = kstat_open()) == NULL) { 410 if (errno == EAGAIN) 411 (void) poll(NULL, 0, RETRY_DELAY); 412 else 413 fail(1, "kstat_open failed"); 414 } 415 416 return (kc); 417 } 418 419 /*PRINTFLIKE2*/ 420 void 421 fail(int do_perror, char *message, ...) 422 { 423 va_list args; 424 int save_errno = errno; 425 426 va_start(args, message); 427 (void) fprintf(stderr, "%s: ", cmdname); 428 (void) vfprintf(stderr, message, args); 429 va_end(args); 430 if (do_perror) 431 (void) fprintf(stderr, ": %s", strerror(save_errno)); 432 (void) fprintf(stderr, "\n"); 433 exit(2); 434 } 435 436 void * 437 safe_alloc(size_t size) 438 { 439 void *ptr; 440 441 while ((ptr = malloc(size)) == NULL) { 442 if (errno == EAGAIN) 443 (void) poll(NULL, 0, RETRY_DELAY); 444 else 445 fail(1, "malloc failed"); 446 } 447 return (ptr); 448 } 449 450 char * 451 safe_strdup(char *str) 452 { 453 char *ret; 454 455 if (str == NULL) 456 return (NULL); 457 458 while ((ret = strdup(str)) == NULL) { 459 if (errno == EAGAIN) 460 (void) poll(NULL, 0, RETRY_DELAY); 461 else 462 fail(1, "malloc failed"); 463 } 464 return (ret); 465 } 466 467 uint64_t 468 kstat_delta(kstat_t *old, kstat_t *new, char *name) 469 { 470 kstat_named_t *knew = kstat_data_lookup(new, name); 471 if (old && old->ks_data) { 472 kstat_named_t *kold = kstat_data_lookup(old, name); 473 return (knew->value.ui64 - kold->value.ui64); 474 } 475 return (knew->value.ui64); 476 } 477 478 int 479 kstat_copy(const kstat_t *src, kstat_t *dst) 480 { 481 *dst = *src; 482 483 if (src->ks_data != NULL) { 484 if ((dst->ks_data = malloc(src->ks_data_size)) == NULL) 485 return (-1); 486 bcopy(src->ks_data, dst->ks_data, src->ks_data_size); 487 } else { 488 dst->ks_data = NULL; 489 dst->ks_data_size = 0; 490 } 491 return (0); 492 } 493 494 int 495 kstat_add(const kstat_t *src, kstat_t *dst) 496 { 497 size_t i; 498 kstat_named_t *from; 499 kstat_named_t *to; 500 501 if (dst->ks_data == NULL) 502 return (kstat_copy(src, dst)); 503 504 from = src->ks_data; 505 to = dst->ks_data; 506 507 for (i = 0; i < src->ks_ndata; i++) { 508 /* "addition" makes little sense for strings */ 509 if (from->data_type != KSTAT_DATA_CHAR && 510 from->data_type != KSTAT_DATA_STRING) 511 (to)->value.ui64 += (from)->value.ui64; 512 from++; 513 to++; 514 } 515 516 return (0); 517 } 518 519 uint64_t 520 cpu_ticks_delta(kstat_t *old, kstat_t *new) 521 { 522 uint64_t ticks = 0; 523 size_t i; 524 for (i = 0; i < ARRAY_SIZE(cpu_states); i++) 525 ticks += kstat_delta(old, new, cpu_states[i]); 526 return (ticks); 527 } 528 529 int 530 nr_active_cpus(struct snapshot *ss) 531 { 532 size_t i; 533 int count = 0; 534 for (i = 0; i < ss->s_nr_cpus; i++) { 535 if (CPU_ACTIVE(&ss->s_cpus[i])) 536 count++; 537 } 538 539 return (count); 540 } 541