xref: /titanic_41/usr/src/cmd/sa/sadc.c (revision 4ef27277d4e5e7e7a1883d95aebf0ae8710873d7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T	*/
30 /*	  All Rights Reserved  	*/
31 
32 
33 /*
34  * sadc.c writes system activity binary data to a file or stdout.
35  *
36  * Usage: sadc [t n] [file]
37  *
38  * if t and n are not specified, it writes a dummy record to data file. This
39  * usage is particularly used at system booting.  If t and n are specified, it
40  * writes system data n times to file every t seconds.  In both cases, if file
41  * is not specified, it writes data to stdout.
42  */
43 
44 #include <sys/fcntl.h>
45 #include <sys/flock.h>
46 #include <sys/proc.h>
47 #include <sys/stat.h>
48 #include <sys/sysinfo.h>
49 #include <sys/time.h>
50 #include <sys/types.h>
51 #include <sys/var.h>
52 
53 #include <ctype.h>
54 #include <errno.h>
55 #include <fcntl.h>
56 #include <kstat.h>
57 #include <memory.h>
58 #include <nlist.h>
59 #include <signal.h>
60 #include <stdarg.h>
61 #include <stdio.h>
62 #include <stdlib.h>
63 #include <string.h>
64 #include <time.h>
65 #include <unistd.h>
66 #include <strings.h>
67 
68 #include "sa.h"
69 
70 #define	MAX(x1, x2)	((x1) >= (x2) ? (x1) : (x2))
71 
72 static	kstat_ctl_t	*kc;		/* libkstat cookie */
73 static	int	ncpus;
74 static	int	oncpus;
75 static	kstat_t	**cpu_stat_list = NULL;
76 static	kstat_t	**ocpu_stat_list = NULL;
77 static	int	ncaches;
78 static	kstat_t	**kmem_cache_list = NULL;
79 
80 static	kstat_t	*sysinfo_ksp, *vminfo_ksp, *var_ksp;
81 static	kstat_t *system_misc_ksp, *ufs_inode_ksp, *kmem_oversize_ksp;
82 static	kstat_t *file_cache_ksp;
83 static	kstat_named_t *ufs_inode_size_knp, *nproc_knp;
84 static	kstat_named_t *file_total_knp, *file_avail_knp;
85 static	kstat_named_t *oversize_alloc_knp, *oversize_fail_knp;
86 static	int slab_create_index, slab_destroy_index, slab_size_index;
87 static	int buf_size_index, buf_avail_index, alloc_fail_index;
88 
89 static	struct	iodevinfo zeroiodev = { NULL, NULL };
90 static	struct	iodevinfo *firstiodev = NULL;
91 static	struct	iodevinfo *lastiodev = NULL;
92 static	struct	iodevinfo *snip = NULL;
93 static	ulong_t	niodevs;
94 
95 static	void	all_stat_init(void);
96 static	int	all_stat_load(void);
97 static	void	fail(int, char *, ...);
98 static	void	safe_zalloc(void **, int, int);
99 static	kid_t	safe_kstat_read(kstat_ctl_t *, kstat_t *, void *);
100 static	kstat_t	*safe_kstat_lookup(kstat_ctl_t *, char *, int, char *);
101 static	void	*safe_kstat_data_lookup(kstat_t *, char *);
102 static	int	safe_kstat_data_index(kstat_t *, char *);
103 static	void	init_iodevs(void);
104 static	int	iodevinfo_load(void);
105 static	int	kstat_copy(const kstat_t *, kstat_t *);
106 static	void	diff_two_arrays(kstat_t ** const [], size_t, size_t,
107     kstat_t ** const []);
108 static	void	compute_cpu_stat_adj(void);
109 
110 static	char	*cmdname = "sadc";
111 
112 static	struct var var;
113 
114 static	struct sa d;
115 static	int64_t	cpu_stat_adj[CPU_STATES] = {0};
116 
117 static	long	ninode;
118 
119 int
120 main(int argc, char *argv[])
121 {
122 	int ct;
123 	unsigned ti;
124 	int fp;
125 	time_t min;
126 	struct stat buf;
127 	char *fname;
128 	struct iodevinfo *iodev;
129 	off_t flength;
130 
131 	ct = argc >= 3? atoi(argv[2]): 0;
132 	min = time((time_t *)0);
133 	ti = argc >= 3? atoi(argv[1]): 0;
134 
135 	if ((kc = kstat_open()) == NULL)
136 		fail(1, "kstat_open(): can't open /dev/kstat");
137 	all_stat_init();
138 	init_iodevs();
139 
140 	if (argc == 3 || argc == 1) {
141 		/*
142 		 * no data file is specified, direct data to stdout.
143 		 */
144 		fp = 1;
145 	} else {
146 		struct flock lock;
147 
148 		fname = (argc == 2) ? argv[1] : argv[3];
149 		/*
150 		 * Open or Create a data file. If the file doesn't exist, then
151 		 * it will be created.
152 		 */
153 		if ((fp = open(fname, O_WRONLY | O_APPEND | O_CREAT, 0644))
154 		    == -1)
155 			fail(1, "can't open data file");
156 		/*
157 		 * Lock the entire data file to prevent data corruption
158 		 */
159 		lock.l_type = F_WRLCK;
160 		lock.l_whence = SEEK_SET;
161 		lock.l_start = 0;
162 		lock.l_len = 0;
163 		if (fcntl(fp, F_SETLK, &lock) == -1)
164 			fail(1, "can't lock data file");
165 		/*
166 		 * Get data file statistics for use in determining whether
167 		 * truncation required and where rollback recovery should
168 		 * be applied.
169 		 */
170 		if (fstat(fp, &buf) == -1)
171 			fail(1, "can't get data file information");
172 		/*
173 		 * If the data file was opened and is too old, truncate it
174 		 */
175 		if (min - buf.st_mtime > 86400)
176 			if (ftruncate(fp, 0) == -1)
177 				fail(1, "can't truncate data file");
178 		/*
179 		 * Remember filesize for rollback on error (bug #1223549)
180 		 */
181 		flength = buf.st_size;
182 	}
183 
184 	memset(&d, 0, sizeof (d));
185 
186 	/*
187 	 * If n == 0, write the additional dummy record.
188 	 */
189 	if (ct == 0) {
190 		d.valid = 0;
191 		d.ts = min;
192 		d.niodevs = niodevs;
193 
194 		if (write(fp, &d, sizeof (struct sa)) != sizeof (struct sa))
195 			ftruncate(fp, flength), fail(1, "write failed");
196 
197 		for (iodev = firstiodev; iodev; iodev = iodev->next) {
198 			if (write(fp, iodev, sizeof (struct iodevinfo)) !=
199 			    sizeof (struct iodevinfo))
200 				ftruncate(fp, flength), fail(1, "write failed");
201 		}
202 	}
203 
204 	for (;;) {
205 		do {
206 			(void) kstat_chain_update(kc);
207 			all_stat_init();
208 			init_iodevs();
209 		} while (all_stat_load() || iodevinfo_load());
210 
211 		d.ts = time((time_t *)0);
212 		d.valid = 1;
213 		d.niodevs = niodevs;
214 
215 		if (write(fp, &d, sizeof (struct sa)) != sizeof (struct sa))
216 			ftruncate(fp, flength), fail(1, "write failed");
217 
218 		for (iodev = firstiodev; iodev; iodev = iodev->next) {
219 			if (write(fp, iodev, sizeof (struct iodevinfo)) !=
220 			    sizeof (struct iodevinfo))
221 				ftruncate(fp, flength), fail(1, "write failed");
222 		}
223 		if (--ct > 0) {
224 			sleep(ti);
225 		} else {
226 			close(fp);
227 			return (0);
228 		}
229 	}
230 
231 	/*NOTREACHED*/
232 }
233 
234 /*
235  * Get various KIDs for subsequent all_stat_load operations.
236  */
237 
238 static void
239 all_stat_init(void)
240 {
241 	kstat_t *ksp;
242 
243 	/*
244 	 * Initialize global statistics
245 	 */
246 
247 	sysinfo_ksp	= safe_kstat_lookup(kc, "unix", 0, "sysinfo");
248 	vminfo_ksp	= safe_kstat_lookup(kc, "unix", 0, "vminfo");
249 	kmem_oversize_ksp = safe_kstat_lookup(kc, "vmem", -1, "kmem_oversize");
250 	var_ksp		= safe_kstat_lookup(kc, "unix", 0, "var");
251 	system_misc_ksp	= safe_kstat_lookup(kc, "unix", 0, "system_misc");
252 	file_cache_ksp	= safe_kstat_lookup(kc, "unix", 0, "file_cache");
253 	ufs_inode_ksp	= kstat_lookup(kc, "ufs", 0, "inode_cache");
254 
255 	safe_kstat_read(kc, system_misc_ksp, NULL);
256 	nproc_knp	= safe_kstat_data_lookup(system_misc_ksp, "nproc");
257 
258 	safe_kstat_read(kc, file_cache_ksp, NULL);
259 	file_avail_knp = safe_kstat_data_lookup(file_cache_ksp, "buf_avail");
260 	file_total_knp = safe_kstat_data_lookup(file_cache_ksp, "buf_total");
261 
262 	safe_kstat_read(kc, kmem_oversize_ksp, NULL);
263 	oversize_alloc_knp = safe_kstat_data_lookup(kmem_oversize_ksp,
264 	    "mem_total");
265 	oversize_fail_knp = safe_kstat_data_lookup(kmem_oversize_ksp, "fail");
266 
267 	if (ufs_inode_ksp != NULL) {
268 		safe_kstat_read(kc, ufs_inode_ksp, NULL);
269 		ufs_inode_size_knp = safe_kstat_data_lookup(ufs_inode_ksp,
270 			"size");
271 		ninode = ((kstat_named_t *)
272 			safe_kstat_data_lookup(ufs_inode_ksp,
273 			"maxsize"))->value.l;
274 	}
275 
276 	/*
277 	 * Load constant values now -- no need to reread each time
278 	 */
279 
280 	safe_kstat_read(kc, var_ksp, (void *) &var);
281 
282 	/*
283 	 * Initialize per-CPU and per-kmem-cache statistics
284 	 */
285 
286 	ncpus = ncaches = 0;
287 	for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) {
288 		if (strncmp(ksp->ks_name, "cpu_stat", 8) == 0)
289 			ncpus++;
290 		if (strcmp(ksp->ks_class, "kmem_cache") == 0)
291 			ncaches++;
292 	}
293 
294 	safe_zalloc((void **)&cpu_stat_list, ncpus * sizeof (kstat_t *), 1);
295 	safe_zalloc((void **)&kmem_cache_list, ncaches * sizeof (kstat_t *), 1);
296 
297 	ncpus = ncaches = 0;
298 	for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) {
299 		if (strncmp(ksp->ks_name, "cpu_stat", 8) == 0 &&
300 		    kstat_read(kc, ksp, NULL) != -1)
301 			cpu_stat_list[ncpus++] = ksp;
302 		if (strcmp(ksp->ks_class, "kmem_cache") == 0 &&
303 		    kstat_read(kc, ksp, NULL) != -1)
304 			kmem_cache_list[ncaches++] = ksp;
305 	}
306 
307 	if (ncpus == 0)
308 		fail(1, "can't find any cpu statistics");
309 
310 	if (ncaches == 0)
311 		fail(1, "can't find any kmem_cache statistics");
312 
313 	ksp = kmem_cache_list[0];
314 	safe_kstat_read(kc, ksp, NULL);
315 	buf_size_index = safe_kstat_data_index(ksp, "buf_size");
316 	slab_create_index = safe_kstat_data_index(ksp, "slab_create");
317 	slab_destroy_index = safe_kstat_data_index(ksp, "slab_destroy");
318 	slab_size_index = safe_kstat_data_index(ksp, "slab_size");
319 	buf_avail_index = safe_kstat_data_index(ksp, "buf_avail");
320 	alloc_fail_index = safe_kstat_data_index(ksp, "alloc_fail");
321 }
322 
323 /*
324  * load statistics, summing across CPUs where needed
325  */
326 
327 static int
328 all_stat_load(void)
329 {
330 	int i, j;
331 	cpu_stat_t cs;
332 	ulong_t *np, *tp;
333 	uint64_t cpu_tick[4] = {0, 0, 0, 0};
334 
335 	memset(&d, 0, sizeof (d));
336 
337 	/*
338 	 * Global statistics
339 	 */
340 
341 	safe_kstat_read(kc, sysinfo_ksp, (void *) &d.si);
342 	safe_kstat_read(kc, vminfo_ksp, (void *) &d.vmi);
343 	safe_kstat_read(kc, system_misc_ksp, NULL);
344 	safe_kstat_read(kc, file_cache_ksp, NULL);
345 
346 	if (ufs_inode_ksp != NULL) {
347 		safe_kstat_read(kc, ufs_inode_ksp, NULL);
348 		d.szinode = ufs_inode_size_knp->value.ul;
349 	}
350 
351 	d.szfile = file_total_knp->value.ui64 - file_avail_knp->value.ui64;
352 	d.szproc = nproc_knp->value.ul;
353 
354 	d.mszinode = (ninode > d.szinode) ? ninode : d.szinode;
355 	d.mszfile = d.szfile;
356 	d.mszproc = var.v_proc;
357 
358 	/*
359 	 * Per-CPU statistics.
360 	 */
361 
362 	for (i = 0; i < ncpus; i++) {
363 		if (kstat_read(kc, cpu_stat_list[i], (void *) &cs) == -1)
364 			return (1);
365 
366 		np = (ulong_t *)&d.csi;
367 		tp = (ulong_t *)&cs.cpu_sysinfo;
368 
369 		/*
370 		 * Accumulate cpu ticks for CPU_IDLE, CPU_USER, CPU_KERNEL and
371 		 * CPU_WAIT with respect to each of the cpus.
372 		 */
373 		for (j = 0; j < CPU_STATES; j++)
374 			cpu_tick[j] += tp[j];
375 
376 		for (j = 0; j < sizeof (cpu_sysinfo_t); j += sizeof (ulong_t))
377 			*np++ += *tp++;
378 		np = (ulong_t *)&d.cvmi;
379 		tp = (ulong_t *)&cs.cpu_vminfo;
380 		for (j = 0; j < sizeof (cpu_vminfo_t); j += sizeof (ulong_t))
381 			*np++ += *tp++;
382 	}
383 
384 	/*
385 	 * Per-cache kmem statistics.
386 	 */
387 
388 	for (i = 0; i < ncaches; i++) {
389 		kstat_named_t *knp;
390 		u_longlong_t slab_create, slab_destroy, slab_size, mem_total;
391 		u_longlong_t buf_size, buf_avail, alloc_fail;
392 		int kmi_index;
393 
394 		if (kstat_read(kc, kmem_cache_list[i], NULL) == -1)
395 			return (1);
396 		knp = kmem_cache_list[i]->ks_data;
397 		slab_create	= knp[slab_create_index].value.ui64;
398 		slab_destroy	= knp[slab_destroy_index].value.ui64;
399 		slab_size	= knp[slab_size_index].value.ui64;
400 		buf_size	= knp[buf_size_index].value.ui64;
401 		buf_avail	= knp[buf_avail_index].value.ui64;
402 		alloc_fail	= knp[alloc_fail_index].value.ui64;
403 		if (buf_size <= 256)
404 			kmi_index = KMEM_SMALL;
405 		else
406 			kmi_index = KMEM_LARGE;
407 		mem_total = (slab_create - slab_destroy) * slab_size;
408 
409 		d.kmi.km_mem[kmi_index] += (ulong_t)mem_total;
410 		d.kmi.km_alloc[kmi_index] +=
411 			(ulong_t)mem_total - buf_size * buf_avail;
412 		d.kmi.km_fail[kmi_index] += (ulong_t)alloc_fail;
413 	}
414 
415 	safe_kstat_read(kc, kmem_oversize_ksp, NULL);
416 
417 	d.kmi.km_alloc[KMEM_OSIZE] = d.kmi.km_mem[KMEM_OSIZE] =
418 		oversize_alloc_knp->value.ui64;
419 	d.kmi.km_fail[KMEM_OSIZE] = oversize_fail_knp->value.ui64;
420 
421 	/*
422 	 * Adjust CPU statistics so the delta calculations in sar will
423 	 * be correct when facing changes to the set of online CPUs.
424 	 */
425 	compute_cpu_stat_adj();
426 	for (i = 0; i < CPU_STATES; i++)
427 		d.csi.cpu[i] = (cpu_tick[i] + cpu_stat_adj[i]) / ncpus;
428 
429 	return (0);
430 }
431 
432 static void
433 fail(int do_perror, char *message, ...)
434 {
435 	va_list args;
436 
437 	va_start(args, message);
438 	fprintf(stderr, "%s: ", cmdname);
439 	vfprintf(stderr, message, args);
440 	va_end(args);
441 	if (do_perror)
442 		fprintf(stderr, ": %s", strerror(errno));
443 	fprintf(stderr, "\n");
444 	exit(2);
445 }
446 
447 static void
448 safe_zalloc(void **ptr, int size, int free_first)
449 {
450 	if (free_first && *ptr != NULL)
451 		free(*ptr);
452 	if ((*ptr = malloc(size)) == NULL)
453 		fail(1, "malloc failed");
454 	memset(*ptr, 0, size);
455 }
456 
457 static kid_t
458 safe_kstat_read(kstat_ctl_t *kc, kstat_t *ksp, void *data)
459 {
460 	kid_t kstat_chain_id = kstat_read(kc, ksp, data);
461 
462 	if (kstat_chain_id == -1)
463 		fail(1, "kstat_read(%x, '%s') failed", kc, ksp->ks_name);
464 	return (kstat_chain_id);
465 }
466 
467 static kstat_t *
468 safe_kstat_lookup(kstat_ctl_t *kc, char *ks_module, int ks_instance,
469 	char *ks_name)
470 {
471 	kstat_t *ksp = kstat_lookup(kc, ks_module, ks_instance, ks_name);
472 
473 	if (ksp == NULL)
474 		fail(0, "kstat_lookup('%s', %d, '%s') failed",
475 			ks_module == NULL ? "" : ks_module,
476 			ks_instance,
477 			ks_name == NULL ? "" : ks_name);
478 	return (ksp);
479 }
480 
481 static void *
482 safe_kstat_data_lookup(kstat_t *ksp, char *name)
483 {
484 	void *fp = kstat_data_lookup(ksp, name);
485 
486 	if (fp == NULL)
487 		fail(0, "kstat_data_lookup('%s', '%s') failed",
488 			ksp->ks_name, name);
489 	return (fp);
490 }
491 
492 static int
493 safe_kstat_data_index(kstat_t *ksp, char *name)
494 {
495 	return ((int)((char *)safe_kstat_data_lookup(ksp, name) -
496 		(char *)ksp->ks_data) / (ksp->ks_data_size / ksp->ks_ndata));
497 }
498 
499 static int
500 kscmp(kstat_t *ks1, kstat_t *ks2)
501 {
502 	int cmp;
503 
504 	cmp = strcmp(ks1->ks_module, ks2->ks_module);
505 	if (cmp != 0)
506 		return (cmp);
507 	cmp = ks1->ks_instance - ks2->ks_instance;
508 	if (cmp != 0)
509 		return (cmp);
510 	return (strcmp(ks1->ks_name, ks2->ks_name));
511 }
512 
513 static void
514 init_iodevs(void)
515 {
516 	struct iodevinfo *iodev, *previodev, *comp;
517 	kstat_t *ksp;
518 
519 	iodev = &zeroiodev;
520 	niodevs = 0;
521 
522 	/*
523 	 * Patch the snip in the iodevinfo list (see below)
524 	 */
525 	if (snip)
526 		lastiodev->next = snip;
527 
528 	for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) {
529 
530 		if (ksp->ks_type != KSTAT_TYPE_IO)
531 			continue;
532 		previodev = iodev;
533 		if (iodev->next)
534 			iodev = iodev->next;
535 		else {
536 			safe_zalloc((void **) &iodev->next,
537 				sizeof (struct iodevinfo), 0);
538 			iodev = iodev->next;
539 			iodev->next = NULL;
540 		}
541 		iodev->ksp = ksp;
542 		iodev->ks = *ksp;
543 		memset((void *)&iodev->kios, 0, sizeof (kstat_io_t));
544 		iodev->kios.wlastupdate = iodev->ks.ks_crtime;
545 		iodev->kios.rlastupdate = iodev->ks.ks_crtime;
546 
547 		/*
548 		 * Insertion sort on (ks_module, ks_instance, ks_name)
549 		 */
550 		comp = &zeroiodev;
551 		while (kscmp(&iodev->ks, &comp->next->ks) > 0)
552 			comp = comp->next;
553 		if (previodev != comp) {
554 			previodev->next = iodev->next;
555 			iodev->next = comp->next;
556 			comp->next = iodev;
557 			iodev = previodev;
558 		}
559 		niodevs++;
560 	}
561 	/*
562 	 * Put a snip in the linked list of iodevinfos.  The idea:
563 	 * If there was a state change such that now there are fewer
564 	 * iodevs, we snip the list and retain the tail, rather than
565 	 * freeing it.  At the next state change, we clip the tail back on.
566 	 * This prevents a lot of malloc/free activity, and it's simpler.
567 	 */
568 	lastiodev = iodev;
569 	snip = iodev->next;
570 	iodev->next = NULL;
571 
572 	firstiodev = zeroiodev.next;
573 }
574 
575 static int
576 iodevinfo_load(void)
577 {
578 	struct iodevinfo *iodev;
579 
580 	for (iodev = firstiodev; iodev; iodev = iodev->next) {
581 		if (kstat_read(kc, iodev->ksp, (void *) &iodev->kios) == -1)
582 			return (1);
583 	}
584 	return (0);
585 }
586 
587 static int
588 kstat_copy(const kstat_t *src, kstat_t *dst)
589 {
590 	*dst = *src;
591 
592 	if (src->ks_data != NULL) {
593 		if ((dst->ks_data = malloc(src->ks_data_size)) == NULL)
594 			return (-1);
595 		bcopy(src->ks_data, dst->ks_data, src->ks_data_size);
596 	} else {
597 		dst->ks_data = NULL;
598 		dst->ks_data_size = 0;
599 	}
600 	return (0);
601 }
602 
603 /*
604  * Determine what is different between two sets of kstats; s[0] and s[1]
605  * are arrays of kstats of size ns0 and ns1, respectively, and sorted by
606  * instance number.  u[0] and u[1] are two arrays which must be
607  * caller-zallocated; each must be of size MAX(ns0, ns1).  When the
608  * function terminates, u[0] contains all s[0]-unique items and u[1]
609  * contains all s[1]-unique items.  Any unused entries in u[0] and u[1]
610  * are left NULL.
611  */
612 static void
613 diff_two_arrays(kstat_t ** const s[], size_t ns0, size_t ns1,
614     kstat_t ** const u[])
615 {
616 	kstat_t **s0p = s[0], **s1p = s[1];
617 	kstat_t **u0p = u[0], **u1p = u[1];
618 	int i = 0, j = 0;
619 
620 	while (i < ns0 && j < ns1) {
621 		if ((*s0p)->ks_instance == (*s1p)->ks_instance) {
622 			if ((*s0p)->ks_kid != (*s1p)->ks_kid) {
623 				/*
624 				 * The instance is the same, but this
625 				 * CPU has been offline during the
626 				 * interval, so we consider *u0p to
627 				 * be s0p-unique, and similarly for
628 				 * *u1p.
629 				 */
630 				*(u0p++) = *s0p;
631 				*(u1p++) = *s1p;
632 			}
633 			s0p++;
634 			i++;
635 			s1p++;
636 			j++;
637 		} else if ((*s0p)->ks_instance < (*s1p)->ks_instance) {
638 			*(u0p++) = *(s0p++);
639 			i++;
640 		} else {
641 			*(u1p++) = *(s1p++);
642 			j++;
643 		}
644 	}
645 
646 	while (i < ns0) {
647 		*(u0p++) = *(s0p++);
648 		i++;
649 	}
650 	while (j < ns1) {
651 		*(u1p++) = *(s1p++);
652 		j++;
653 	}
654 }
655 
656 static int
657 cpuid_compare(const void *p1, const void *p2)
658 {
659 	return ((*(kstat_t **)p1)->ks_instance -
660 	    (*(kstat_t **)p2)->ks_instance);
661 }
662 
663 /*
664  * Identify those CPUs which were not present for the whole interval so
665  * their statistics can be removed from the aggregate.
666  */
667 static void
668 compute_cpu_stat_adj(void)
669 {
670 	int i, j;
671 
672 	if (ocpu_stat_list) {
673 		kstat_t **s[2];
674 		kstat_t **inarray[2];
675 		int max_cpus = MAX(ncpus, oncpus);
676 
677 		qsort(cpu_stat_list, ncpus, sizeof (*cpu_stat_list),
678 		    cpuid_compare);
679 		qsort(ocpu_stat_list, oncpus, sizeof (*ocpu_stat_list),
680 		    cpuid_compare);
681 
682 		s[0] = ocpu_stat_list;
683 		s[1] = cpu_stat_list;
684 
685 		safe_zalloc((void *)&inarray[0], sizeof (**inarray) * max_cpus,
686 		    0);
687 		safe_zalloc((void *)&inarray[1], sizeof (**inarray) * max_cpus,
688 		    0);
689 		diff_two_arrays(s, oncpus, ncpus, inarray);
690 
691 		for (i = 0; i < max_cpus; i++) {
692 			if (inarray[0][i])
693 				for (j = 0; j < CPU_STATES; j++)
694 					cpu_stat_adj[j] +=
695 					    ((cpu_stat_t *)inarray[0][i]
696 					    ->ks_data)->cpu_sysinfo.cpu[j];
697 			if (inarray[1][i])
698 				for (j = 0; j < CPU_STATES; j++)
699 					cpu_stat_adj[j] -=
700 					    ((cpu_stat_t *)inarray[1][i]
701 					    ->ks_data)->cpu_sysinfo.cpu[j];
702 		}
703 
704 		free(inarray[0]);
705 		free(inarray[1]);
706 	}
707 
708 	/*
709 	 * Preserve the last interval's CPU stats.
710 	 */
711 	if (cpu_stat_list) {
712 		for (i = 0; i < oncpus; i++)
713 			free(ocpu_stat_list[i]->ks_data);
714 
715 		oncpus = ncpus;
716 		safe_zalloc((void **)&ocpu_stat_list, oncpus *
717 		    sizeof (*ocpu_stat_list), 1);
718 		for (i = 0; i < ncpus; i++) {
719 			safe_zalloc((void *)&ocpu_stat_list[i],
720 			    sizeof (*ocpu_stat_list[0]), 0);
721 			if (kstat_copy(cpu_stat_list[i], ocpu_stat_list[i]))
722 				fail(1, "kstat_copy() failed");
723 		}
724 	}
725 }
726