1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 /* All Rights Reserved */
27
28
29 /*
30 * sadc.c writes system activity binary data to a file or stdout.
31 *
32 * Usage: sadc [t n] [file]
33 *
34 * if t and n are not specified, it writes a dummy record to data file. This
35 * usage is particularly used at system booting. If t and n are specified, it
36 * writes system data n times to file every t seconds. In both cases, if file
37 * is not specified, it writes data to stdout.
38 */
39
40 #include <sys/fcntl.h>
41 #include <sys/flock.h>
42 #include <sys/proc.h>
43 #include <sys/stat.h>
44 #include <sys/sysinfo.h>
45 #include <sys/time.h>
46 #include <sys/types.h>
47 #include <sys/var.h>
48
49 #include <ctype.h>
50 #include <errno.h>
51 #include <fcntl.h>
52 #include <kstat.h>
53 #include <memory.h>
54 #include <nlist.h>
55 #include <signal.h>
56 #include <stdarg.h>
57 #include <stdio.h>
58 #include <stdlib.h>
59 #include <string.h>
60 #include <time.h>
61 #include <unistd.h>
62 #include <strings.h>
63
64 #include "sa.h"
65
66 #define MAX(x1, x2) ((x1) >= (x2) ? (x1) : (x2))
67
68 static kstat_ctl_t *kc; /* libkstat cookie */
69 static int ncpus;
70 static int oncpus;
71 static kstat_t **cpu_stat_list = NULL;
72 static kstat_t **ocpu_stat_list = NULL;
73 static int ncaches;
74 static kstat_t **kmem_cache_list = NULL;
75
76 static kstat_t *sysinfo_ksp, *vminfo_ksp, *var_ksp;
77 static kstat_t *system_misc_ksp, *ufs_inode_ksp, *kmem_oversize_ksp;
78 static kstat_t *file_cache_ksp;
79 static kstat_named_t *ufs_inode_size_knp, *nproc_knp;
80 static kstat_named_t *file_total_knp, *file_avail_knp;
81 static kstat_named_t *oversize_alloc_knp, *oversize_fail_knp;
82 static int slab_create_index, slab_destroy_index, slab_size_index;
83 static int buf_size_index, buf_avail_index, alloc_fail_index;
84
85 static struct iodevinfo zeroiodev = { NULL, NULL };
86 static struct iodevinfo *firstiodev = NULL;
87 static struct iodevinfo *lastiodev = NULL;
88 static struct iodevinfo *snip = NULL;
89 static ulong_t niodevs;
90
91 static void all_stat_init(void);
92 static int all_stat_load(void);
93 static void fail(int, char *, ...);
94 static void safe_zalloc(void **, int, int);
95 static kid_t safe_kstat_read(kstat_ctl_t *, kstat_t *, void *);
96 static kstat_t *safe_kstat_lookup(kstat_ctl_t *, char *, int, char *);
97 static void *safe_kstat_data_lookup(kstat_t *, char *);
98 static int safe_kstat_data_index(kstat_t *, char *);
99 static void init_iodevs(void);
100 static int iodevinfo_load(void);
101 static int kstat_copy(const kstat_t *, kstat_t *);
102 static void diff_two_arrays(kstat_t ** const [], size_t, size_t,
103 kstat_t ** const []);
104 static void compute_cpu_stat_adj(void);
105
106 static char *cmdname = "sadc";
107
108 static struct var var;
109
110 static struct sa d;
111 static int64_t cpu_stat_adj[CPU_STATES] = {0};
112
113 static long ninode;
114
115 int caught_cont = 0;
116
117 /*
118 * Sleep until *wakeup + interval, keeping cadence where desired
119 *
120 * *wakeup - The time we last wanted to wake up. Updated.
121 * interval - We want to sleep until *wakeup + interval
122 * *caught_cont - Global set by signal handler if we got a SIGCONT
123 */
124 void
sleep_until(hrtime_t * wakeup,hrtime_t interval,int * caught_cont)125 sleep_until(hrtime_t *wakeup, hrtime_t interval, int *caught_cont)
126 {
127 hrtime_t now, pause, pause_left;
128 struct timespec pause_tv;
129 int status;
130 now = gethrtime();
131 pause = *wakeup + interval - now;
132
133 if (pause <= 0 || pause < (interval / 4))
134 if (*caught_cont) {
135 /* Reset our cadence (see comment below) */
136 *wakeup = now + interval;
137 pause = interval;
138 } else {
139 /*
140 * If we got here, then the time between the
141 * output we just did, and the scheduled time
142 * for the next output is < 1/4 of our requested
143 * interval AND the number of intervals has been
144 * requested AND we have never caught a SIGCONT
145 * (so we have never been suspended). In this
146 * case, we'll try to stay to the desired
147 * cadence, and we will pause for 1/2 the normal
148 * interval this time.
149 */
150 pause = interval / 2;
151 *wakeup += interval;
152 }
153 else
154 *wakeup += interval;
155 if (pause < 1000)
156 /* Near enough */
157 return;
158
159 /* Now do the actual sleep */
160 pause_left = pause;
161 do {
162 pause_tv.tv_sec = pause_left / NANOSEC;
163 pause_tv.tv_nsec = pause_left % NANOSEC;
164 status = nanosleep(&pause_tv, (struct timespec *)NULL);
165 if (status < 0)
166 if (errno == EINTR) {
167 now = gethrtime();
168 pause_left = *wakeup - now;
169 if (pause_left < 1000)
170 /* Near enough */
171 return;
172 } else {
173 fail(1, "nanosleep failed");
174 }
175 } while (status != 0);
176 }
177
178 /*
179 * Signal handler - so we can be aware of SIGCONT
180 */
181 void
cont_handler(int sig_number)182 cont_handler(int sig_number)
183 {
184 /* Re-set the signal handler */
185 (void) signal(sig_number, cont_handler);
186 caught_cont = 1;
187 }
188
189 int
main(int argc,char * argv[])190 main(int argc, char *argv[])
191 {
192 int ct;
193 unsigned ti;
194 int fp;
195 time_t min;
196 struct stat buf;
197 char *fname;
198 struct iodevinfo *iodev;
199 off_t flength;
200 hrtime_t start_n;
201 hrtime_t period_n;
202
203
204 ct = argc >= 3? atoi(argv[2]): 0;
205 min = time((time_t *)0);
206 ti = argc >= 3? atoi(argv[1]): 0;
207
208 period_n = (hrtime_t)ti * NANOSEC;
209
210 if ((kc = kstat_open()) == NULL)
211 fail(1, "kstat_open(): can't open /dev/kstat");
212
213 /* Set up handler for SIGCONT */
214 if (signal(SIGCONT, cont_handler) == SIG_ERR)
215 fail(1, "signal failed");
216
217 all_stat_init();
218 init_iodevs();
219
220 if (argc == 3 || argc == 1) {
221 /*
222 * no data file is specified, direct data to stdout.
223 */
224 fp = 1;
225 } else {
226 struct flock lock;
227
228 fname = (argc == 2) ? argv[1] : argv[3];
229 /*
230 * Open or Create a data file. If the file doesn't exist, then
231 * it will be created.
232 */
233 if ((fp = open(fname, O_WRONLY | O_APPEND | O_CREAT, 0644))
234 == -1)
235 fail(1, "can't open data file");
236 /*
237 * Lock the entire data file to prevent data corruption
238 */
239 lock.l_type = F_WRLCK;
240 lock.l_whence = SEEK_SET;
241 lock.l_start = 0;
242 lock.l_len = 0;
243 if (fcntl(fp, F_SETLK, &lock) == -1)
244 fail(1, "can't lock data file");
245 /*
246 * Get data file statistics for use in determining whether
247 * truncation required and where rollback recovery should
248 * be applied.
249 */
250 if (fstat(fp, &buf) == -1)
251 fail(1, "can't get data file information");
252 /*
253 * If the data file was opened and is too old, truncate it
254 */
255 if (min - buf.st_mtime > 86400)
256 if (ftruncate(fp, 0) == -1)
257 fail(1, "can't truncate data file");
258 /*
259 * Remember filesize for rollback on error (bug #1223549)
260 */
261 flength = buf.st_size;
262 }
263
264 memset(&d, 0, sizeof (d));
265
266 /*
267 * If n == 0, write the additional dummy record.
268 */
269 if (ct == 0) {
270 d.valid = 0;
271 d.ts = min;
272 d.niodevs = niodevs;
273
274 if (write(fp, &d, sizeof (struct sa)) != sizeof (struct sa))
275 ftruncate(fp, flength), fail(1, "write failed");
276
277 for (iodev = firstiodev; iodev; iodev = iodev->next) {
278 if (write(fp, iodev, sizeof (struct iodevinfo)) !=
279 sizeof (struct iodevinfo))
280 ftruncate(fp, flength), fail(1, "write failed");
281 }
282 }
283
284 start_n = gethrtime();
285
286 for (;;) {
287 do {
288 (void) kstat_chain_update(kc);
289 all_stat_init();
290 init_iodevs();
291 } while (all_stat_load() || iodevinfo_load());
292
293 d.ts = time((time_t *)0);
294 d.valid = 1;
295 d.niodevs = niodevs;
296
297 if (write(fp, &d, sizeof (struct sa)) != sizeof (struct sa))
298 ftruncate(fp, flength), fail(1, "write failed");
299
300 for (iodev = firstiodev; iodev; iodev = iodev->next) {
301 if (write(fp, iodev, sizeof (struct iodevinfo)) !=
302 sizeof (struct iodevinfo))
303 ftruncate(fp, flength), fail(1, "write failed");
304 }
305 if (--ct > 0) {
306 sleep_until(&start_n, period_n, &caught_cont);
307 } else {
308 close(fp);
309 return (0);
310 }
311 }
312
313 /*NOTREACHED*/
314 }
315
316 /*
317 * Get various KIDs for subsequent all_stat_load operations.
318 */
319
320 static void
all_stat_init(void)321 all_stat_init(void)
322 {
323 kstat_t *ksp;
324
325 /*
326 * Initialize global statistics
327 */
328
329 sysinfo_ksp = safe_kstat_lookup(kc, "unix", 0, "sysinfo");
330 vminfo_ksp = safe_kstat_lookup(kc, "unix", 0, "vminfo");
331 kmem_oversize_ksp = safe_kstat_lookup(kc, "vmem", -1, "kmem_oversize");
332 var_ksp = safe_kstat_lookup(kc, "unix", 0, "var");
333 system_misc_ksp = safe_kstat_lookup(kc, "unix", 0, "system_misc");
334 file_cache_ksp = safe_kstat_lookup(kc, "unix", 0, "file_cache");
335 ufs_inode_ksp = kstat_lookup(kc, "ufs", 0, "inode_cache");
336
337 safe_kstat_read(kc, system_misc_ksp, NULL);
338 nproc_knp = safe_kstat_data_lookup(system_misc_ksp, "nproc");
339
340 safe_kstat_read(kc, file_cache_ksp, NULL);
341 file_avail_knp = safe_kstat_data_lookup(file_cache_ksp, "buf_avail");
342 file_total_knp = safe_kstat_data_lookup(file_cache_ksp, "buf_total");
343
344 safe_kstat_read(kc, kmem_oversize_ksp, NULL);
345 oversize_alloc_knp = safe_kstat_data_lookup(kmem_oversize_ksp,
346 "mem_total");
347 oversize_fail_knp = safe_kstat_data_lookup(kmem_oversize_ksp, "fail");
348
349 if (ufs_inode_ksp != NULL) {
350 safe_kstat_read(kc, ufs_inode_ksp, NULL);
351 ufs_inode_size_knp = safe_kstat_data_lookup(ufs_inode_ksp,
352 "size");
353 ninode = ((kstat_named_t *)
354 safe_kstat_data_lookup(ufs_inode_ksp,
355 "maxsize"))->value.l;
356 }
357
358 /*
359 * Load constant values now -- no need to reread each time
360 */
361
362 safe_kstat_read(kc, var_ksp, (void *) &var);
363
364 /*
365 * Initialize per-CPU and per-kmem-cache statistics
366 */
367
368 ncpus = ncaches = 0;
369 for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) {
370 if (strncmp(ksp->ks_name, "cpu_stat", 8) == 0)
371 ncpus++;
372 if (strcmp(ksp->ks_class, "kmem_cache") == 0)
373 ncaches++;
374 }
375
376 safe_zalloc((void **)&cpu_stat_list, ncpus * sizeof (kstat_t *), 1);
377 safe_zalloc((void **)&kmem_cache_list, ncaches * sizeof (kstat_t *), 1);
378
379 ncpus = ncaches = 0;
380 for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) {
381 if (strncmp(ksp->ks_name, "cpu_stat", 8) == 0 &&
382 kstat_read(kc, ksp, NULL) != -1)
383 cpu_stat_list[ncpus++] = ksp;
384 if (strcmp(ksp->ks_class, "kmem_cache") == 0 &&
385 kstat_read(kc, ksp, NULL) != -1)
386 kmem_cache_list[ncaches++] = ksp;
387 }
388
389 if (ncpus == 0)
390 fail(1, "can't find any cpu statistics");
391
392 if (ncaches == 0)
393 fail(1, "can't find any kmem_cache statistics");
394
395 ksp = kmem_cache_list[0];
396 safe_kstat_read(kc, ksp, NULL);
397 buf_size_index = safe_kstat_data_index(ksp, "buf_size");
398 slab_create_index = safe_kstat_data_index(ksp, "slab_create");
399 slab_destroy_index = safe_kstat_data_index(ksp, "slab_destroy");
400 slab_size_index = safe_kstat_data_index(ksp, "slab_size");
401 buf_avail_index = safe_kstat_data_index(ksp, "buf_avail");
402 alloc_fail_index = safe_kstat_data_index(ksp, "alloc_fail");
403 }
404
405 /*
406 * load statistics, summing across CPUs where needed
407 */
408
409 static int
all_stat_load(void)410 all_stat_load(void)
411 {
412 int i, j;
413 cpu_stat_t cs;
414 ulong_t *np, *tp;
415 uint64_t cpu_tick[4] = {0, 0, 0, 0};
416
417 memset(&d, 0, sizeof (d));
418
419 /*
420 * Global statistics
421 */
422
423 safe_kstat_read(kc, sysinfo_ksp, (void *) &d.si);
424 safe_kstat_read(kc, vminfo_ksp, (void *) &d.vmi);
425 safe_kstat_read(kc, system_misc_ksp, NULL);
426 safe_kstat_read(kc, file_cache_ksp, NULL);
427
428 if (ufs_inode_ksp != NULL) {
429 safe_kstat_read(kc, ufs_inode_ksp, NULL);
430 d.szinode = ufs_inode_size_knp->value.ul;
431 }
432
433 d.szfile = file_total_knp->value.ui64 - file_avail_knp->value.ui64;
434 d.szproc = nproc_knp->value.ul;
435
436 d.mszinode = (ninode > d.szinode) ? ninode : d.szinode;
437 d.mszfile = d.szfile;
438 d.mszproc = var.v_proc;
439
440 /*
441 * Per-CPU statistics.
442 */
443
444 for (i = 0; i < ncpus; i++) {
445 if (kstat_read(kc, cpu_stat_list[i], (void *) &cs) == -1)
446 return (1);
447
448 np = (ulong_t *)&d.csi;
449 tp = (ulong_t *)&cs.cpu_sysinfo;
450
451 /*
452 * Accumulate cpu ticks for CPU_IDLE, CPU_USER, CPU_KERNEL and
453 * CPU_WAIT with respect to each of the cpus.
454 */
455 for (j = 0; j < CPU_STATES; j++)
456 cpu_tick[j] += tp[j];
457
458 for (j = 0; j < sizeof (cpu_sysinfo_t); j += sizeof (ulong_t))
459 *np++ += *tp++;
460 np = (ulong_t *)&d.cvmi;
461 tp = (ulong_t *)&cs.cpu_vminfo;
462 for (j = 0; j < sizeof (cpu_vminfo_t); j += sizeof (ulong_t))
463 *np++ += *tp++;
464 }
465
466 /*
467 * Per-cache kmem statistics.
468 */
469
470 for (i = 0; i < ncaches; i++) {
471 kstat_named_t *knp;
472 u_longlong_t slab_create, slab_destroy, slab_size, mem_total;
473 u_longlong_t buf_size, buf_avail, alloc_fail;
474 int kmi_index;
475
476 if (kstat_read(kc, kmem_cache_list[i], NULL) == -1)
477 return (1);
478 knp = kmem_cache_list[i]->ks_data;
479 slab_create = knp[slab_create_index].value.ui64;
480 slab_destroy = knp[slab_destroy_index].value.ui64;
481 slab_size = knp[slab_size_index].value.ui64;
482 buf_size = knp[buf_size_index].value.ui64;
483 buf_avail = knp[buf_avail_index].value.ui64;
484 alloc_fail = knp[alloc_fail_index].value.ui64;
485 if (buf_size <= 256)
486 kmi_index = KMEM_SMALL;
487 else
488 kmi_index = KMEM_LARGE;
489 mem_total = (slab_create - slab_destroy) * slab_size;
490
491 d.kmi.km_mem[kmi_index] += (ulong_t)mem_total;
492 d.kmi.km_alloc[kmi_index] +=
493 (ulong_t)mem_total - buf_size * buf_avail;
494 d.kmi.km_fail[kmi_index] += (ulong_t)alloc_fail;
495 }
496
497 safe_kstat_read(kc, kmem_oversize_ksp, NULL);
498
499 d.kmi.km_alloc[KMEM_OSIZE] = d.kmi.km_mem[KMEM_OSIZE] =
500 oversize_alloc_knp->value.ui64;
501 d.kmi.km_fail[KMEM_OSIZE] = oversize_fail_knp->value.ui64;
502
503 /*
504 * Adjust CPU statistics so the delta calculations in sar will
505 * be correct when facing changes to the set of online CPUs.
506 */
507 compute_cpu_stat_adj();
508 for (i = 0; i < CPU_STATES; i++)
509 d.csi.cpu[i] = (cpu_tick[i] + cpu_stat_adj[i]) / ncpus;
510
511 return (0);
512 }
513
514 static void
fail(int do_perror,char * message,...)515 fail(int do_perror, char *message, ...)
516 {
517 va_list args;
518
519 va_start(args, message);
520 fprintf(stderr, "%s: ", cmdname);
521 vfprintf(stderr, message, args);
522 va_end(args);
523 if (do_perror)
524 fprintf(stderr, ": %s", strerror(errno));
525 fprintf(stderr, "\n");
526 exit(2);
527 }
528
529 static void
safe_zalloc(void ** ptr,int size,int free_first)530 safe_zalloc(void **ptr, int size, int free_first)
531 {
532 if (free_first && *ptr != NULL)
533 free(*ptr);
534 if ((*ptr = malloc(size)) == NULL)
535 fail(1, "malloc failed");
536 memset(*ptr, 0, size);
537 }
538
539 static kid_t
safe_kstat_read(kstat_ctl_t * kc,kstat_t * ksp,void * data)540 safe_kstat_read(kstat_ctl_t *kc, kstat_t *ksp, void *data)
541 {
542 kid_t kstat_chain_id = kstat_read(kc, ksp, data);
543
544 if (kstat_chain_id == -1)
545 fail(1, "kstat_read(%x, '%s') failed", kc, ksp->ks_name);
546 return (kstat_chain_id);
547 }
548
549 static kstat_t *
safe_kstat_lookup(kstat_ctl_t * kc,char * ks_module,int ks_instance,char * ks_name)550 safe_kstat_lookup(kstat_ctl_t *kc, char *ks_module, int ks_instance,
551 char *ks_name)
552 {
553 kstat_t *ksp = kstat_lookup(kc, ks_module, ks_instance, ks_name);
554
555 if (ksp == NULL)
556 fail(0, "kstat_lookup('%s', %d, '%s') failed",
557 ks_module == NULL ? "" : ks_module,
558 ks_instance,
559 ks_name == NULL ? "" : ks_name);
560 return (ksp);
561 }
562
563 static void *
safe_kstat_data_lookup(kstat_t * ksp,char * name)564 safe_kstat_data_lookup(kstat_t *ksp, char *name)
565 {
566 void *fp = kstat_data_lookup(ksp, name);
567
568 if (fp == NULL)
569 fail(0, "kstat_data_lookup('%s', '%s') failed",
570 ksp->ks_name, name);
571 return (fp);
572 }
573
574 static int
safe_kstat_data_index(kstat_t * ksp,char * name)575 safe_kstat_data_index(kstat_t *ksp, char *name)
576 {
577 return ((int)((char *)safe_kstat_data_lookup(ksp, name) -
578 (char *)ksp->ks_data) / (ksp->ks_data_size / ksp->ks_ndata));
579 }
580
581 static int
kscmp(kstat_t * ks1,kstat_t * ks2)582 kscmp(kstat_t *ks1, kstat_t *ks2)
583 {
584 int cmp;
585
586 cmp = strcmp(ks1->ks_module, ks2->ks_module);
587 if (cmp != 0)
588 return (cmp);
589 cmp = ks1->ks_instance - ks2->ks_instance;
590 if (cmp != 0)
591 return (cmp);
592 return (strcmp(ks1->ks_name, ks2->ks_name));
593 }
594
595 static void
init_iodevs(void)596 init_iodevs(void)
597 {
598 struct iodevinfo *iodev, *previodev, *comp;
599 kstat_t *ksp;
600
601 iodev = &zeroiodev;
602 niodevs = 0;
603
604 /*
605 * Patch the snip in the iodevinfo list (see below)
606 */
607 if (snip)
608 lastiodev->next = snip;
609
610 for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) {
611
612 if (ksp->ks_type != KSTAT_TYPE_IO)
613 continue;
614 previodev = iodev;
615 if (iodev->next)
616 iodev = iodev->next;
617 else {
618 safe_zalloc((void **) &iodev->next,
619 sizeof (struct iodevinfo), 0);
620 iodev = iodev->next;
621 iodev->next = NULL;
622 }
623 iodev->ksp = ksp;
624 iodev->ks = *ksp;
625 memset((void *)&iodev->kios, 0, sizeof (kstat_io_t));
626 iodev->kios.wlastupdate = iodev->ks.ks_crtime;
627 iodev->kios.rlastupdate = iodev->ks.ks_crtime;
628
629 /*
630 * Insertion sort on (ks_module, ks_instance, ks_name)
631 */
632 comp = &zeroiodev;
633 while (kscmp(&iodev->ks, &comp->next->ks) > 0)
634 comp = comp->next;
635 if (previodev != comp) {
636 previodev->next = iodev->next;
637 iodev->next = comp->next;
638 comp->next = iodev;
639 iodev = previodev;
640 }
641 niodevs++;
642 }
643 /*
644 * Put a snip in the linked list of iodevinfos. The idea:
645 * If there was a state change such that now there are fewer
646 * iodevs, we snip the list and retain the tail, rather than
647 * freeing it. At the next state change, we clip the tail back on.
648 * This prevents a lot of malloc/free activity, and it's simpler.
649 */
650 lastiodev = iodev;
651 snip = iodev->next;
652 iodev->next = NULL;
653
654 firstiodev = zeroiodev.next;
655 }
656
657 static int
iodevinfo_load(void)658 iodevinfo_load(void)
659 {
660 struct iodevinfo *iodev;
661
662 for (iodev = firstiodev; iodev; iodev = iodev->next) {
663 if (kstat_read(kc, iodev->ksp, (void *) &iodev->kios) == -1)
664 return (1);
665 }
666 return (0);
667 }
668
669 static int
kstat_copy(const kstat_t * src,kstat_t * dst)670 kstat_copy(const kstat_t *src, kstat_t *dst)
671 {
672 *dst = *src;
673
674 if (src->ks_data != NULL) {
675 if ((dst->ks_data = malloc(src->ks_data_size)) == NULL)
676 return (-1);
677 bcopy(src->ks_data, dst->ks_data, src->ks_data_size);
678 } else {
679 dst->ks_data = NULL;
680 dst->ks_data_size = 0;
681 }
682 return (0);
683 }
684
685 /*
686 * Determine what is different between two sets of kstats; s[0] and s[1]
687 * are arrays of kstats of size ns0 and ns1, respectively, and sorted by
688 * instance number. u[0] and u[1] are two arrays which must be
689 * caller-zallocated; each must be of size MAX(ns0, ns1). When the
690 * function terminates, u[0] contains all s[0]-unique items and u[1]
691 * contains all s[1]-unique items. Any unused entries in u[0] and u[1]
692 * are left NULL.
693 */
694 static void
diff_two_arrays(kstat_t ** const s[],size_t ns0,size_t ns1,kstat_t ** const u[])695 diff_two_arrays(kstat_t ** const s[], size_t ns0, size_t ns1,
696 kstat_t ** const u[])
697 {
698 kstat_t **s0p = s[0], **s1p = s[1];
699 kstat_t **u0p = u[0], **u1p = u[1];
700 int i = 0, j = 0;
701
702 while (i < ns0 && j < ns1) {
703 if ((*s0p)->ks_instance == (*s1p)->ks_instance) {
704 if ((*s0p)->ks_kid != (*s1p)->ks_kid) {
705 /*
706 * The instance is the same, but this
707 * CPU has been offline during the
708 * interval, so we consider *u0p to
709 * be s0p-unique, and similarly for
710 * *u1p.
711 */
712 *(u0p++) = *s0p;
713 *(u1p++) = *s1p;
714 }
715 s0p++;
716 i++;
717 s1p++;
718 j++;
719 } else if ((*s0p)->ks_instance < (*s1p)->ks_instance) {
720 *(u0p++) = *(s0p++);
721 i++;
722 } else {
723 *(u1p++) = *(s1p++);
724 j++;
725 }
726 }
727
728 while (i < ns0) {
729 *(u0p++) = *(s0p++);
730 i++;
731 }
732 while (j < ns1) {
733 *(u1p++) = *(s1p++);
734 j++;
735 }
736 }
737
738 static int
cpuid_compare(const void * p1,const void * p2)739 cpuid_compare(const void *p1, const void *p2)
740 {
741 return ((*(kstat_t **)p1)->ks_instance -
742 (*(kstat_t **)p2)->ks_instance);
743 }
744
745 /*
746 * Identify those CPUs which were not present for the whole interval so
747 * their statistics can be removed from the aggregate.
748 */
749 static void
compute_cpu_stat_adj(void)750 compute_cpu_stat_adj(void)
751 {
752 int i, j;
753
754 if (ocpu_stat_list) {
755 kstat_t **s[2];
756 kstat_t **inarray[2];
757 int max_cpus = MAX(ncpus, oncpus);
758
759 qsort(cpu_stat_list, ncpus, sizeof (*cpu_stat_list),
760 cpuid_compare);
761 qsort(ocpu_stat_list, oncpus, sizeof (*ocpu_stat_list),
762 cpuid_compare);
763
764 s[0] = ocpu_stat_list;
765 s[1] = cpu_stat_list;
766
767 safe_zalloc((void *)&inarray[0], sizeof (**inarray) * max_cpus,
768 0);
769 safe_zalloc((void *)&inarray[1], sizeof (**inarray) * max_cpus,
770 0);
771 diff_two_arrays(s, oncpus, ncpus, inarray);
772
773 for (i = 0; i < max_cpus; i++) {
774 if (inarray[0][i])
775 for (j = 0; j < CPU_STATES; j++)
776 cpu_stat_adj[j] +=
777 ((cpu_stat_t *)inarray[0][i]
778 ->ks_data)->cpu_sysinfo.cpu[j];
779 if (inarray[1][i])
780 for (j = 0; j < CPU_STATES; j++)
781 cpu_stat_adj[j] -=
782 ((cpu_stat_t *)inarray[1][i]
783 ->ks_data)->cpu_sysinfo.cpu[j];
784 }
785
786 free(inarray[0]);
787 free(inarray[1]);
788 }
789
790 /*
791 * Preserve the last interval's CPU stats.
792 */
793 if (cpu_stat_list) {
794 for (i = 0; i < oncpus; i++)
795 free(ocpu_stat_list[i]->ks_data);
796
797 oncpus = ncpus;
798 safe_zalloc((void **)&ocpu_stat_list, oncpus *
799 sizeof (*ocpu_stat_list), 1);
800 for (i = 0; i < ncpus; i++) {
801 safe_zalloc((void *)&ocpu_stat_list[i],
802 sizeof (*ocpu_stat_list[0]), 0);
803 if (kstat_copy(cpu_stat_list[i], ocpu_stat_list[i]))
804 fail(1, "kstat_copy() failed");
805 }
806 }
807 }
808