xref: /titanic_44/usr/src/cmd/stat/common/acquire.c (revision a4aeef46cda1835da2b19f8f62b4526de6521e6c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include "statcommon.h"
29 #include "dsr.h"
30 
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <strings.h>
34 #include <errno.h>
35 #include <limits.h>
36 #include <poll.h>
37 
38 #define	ARRAY_SIZE(a)	(sizeof (a) / sizeof (*a))
39 
40 /*
41  * The time we delay before retrying after an allocation
42  * failure, in milliseconds
43  */
44 #define	RETRY_DELAY 200
45 
46 static char *cpu_states[] = {
47 	"cpu_ticks_idle",
48 	"cpu_ticks_user",
49 	"cpu_ticks_kernel",
50 	"cpu_ticks_wait"
51 };
52 
53 static kstat_t *
54 kstat_lookup_read(kstat_ctl_t *kc, char *module,
55 		int instance, char *name)
56 {
57 	kstat_t *ksp = kstat_lookup(kc, module, instance, name);
58 	if (ksp == NULL)
59 		return (NULL);
60 	if (kstat_read(kc, ksp, NULL) == -1)
61 		return (NULL);
62 	return (ksp);
63 }
64 
65 /*
66  * Note: the following helpers do not clean up on the failure case,
67  * because it is left to the free_snapshot() in the acquire_snapshot()
68  * failure path.
69  */
70 
71 static int
72 acquire_cpus(struct snapshot *ss, kstat_ctl_t *kc)
73 {
74 	size_t i;
75 
76 	ss->s_nr_cpus = sysconf(_SC_CPUID_MAX) + 1;
77 	ss->s_cpus = calloc(ss->s_nr_cpus, sizeof (struct cpu_snapshot));
78 	if (ss->s_cpus == NULL)
79 		goto out;
80 
81 	for (i = 0; i < ss->s_nr_cpus; i++) {
82 		kstat_t *ksp;
83 
84 		ss->s_cpus[i].cs_id = ID_NO_CPU;
85 		ss->s_cpus[i].cs_state = p_online(i, P_STATUS);
86 		/* If no valid CPU is present, move on to the next one */
87 		if (ss->s_cpus[i].cs_state == -1)
88 			continue;
89 		ss->s_cpus[i].cs_id = i;
90 
91 		if ((ksp = kstat_lookup_read(kc, "cpu_info", i, NULL)) == NULL)
92 			goto out;
93 
94 		(void) pset_assign(PS_QUERY, i, &ss->s_cpus[i].cs_pset_id);
95 		if (ss->s_cpus[i].cs_pset_id == PS_NONE)
96 			ss->s_cpus[i].cs_pset_id = ID_NO_PSET;
97 
98 		if (!CPU_ACTIVE(&ss->s_cpus[i]))
99 			continue;
100 
101 		if ((ksp = kstat_lookup_read(kc, "cpu", i, "vm")) == NULL)
102 			goto out;
103 
104 		if (kstat_copy(ksp, &ss->s_cpus[i].cs_vm))
105 			goto out;
106 
107 		if ((ksp = kstat_lookup_read(kc, "cpu", i, "sys")) == NULL)
108 			goto out;
109 
110 		if (kstat_copy(ksp, &ss->s_cpus[i].cs_sys))
111 			goto out;
112 	}
113 
114 	errno = 0;
115 out:
116 	return (errno);
117 }
118 
119 static int
120 acquire_psets(struct snapshot *ss)
121 {
122 	psetid_t *pids = NULL;
123 	struct pset_snapshot *ps;
124 	size_t pids_nr;
125 	size_t i, j;
126 
127 	/*
128 	 * Careful in this code. We have to use pset_list
129 	 * twice, but inbetween pids_nr can change at will.
130 	 * We delay the setting of s_nr_psets until we have
131 	 * the "final" value of pids_nr.
132 	 */
133 
134 	if (pset_list(NULL, &pids_nr) < 0)
135 		return (errno);
136 
137 	if ((pids = calloc(pids_nr, sizeof (psetid_t))) == NULL)
138 		goto out;
139 
140 	if (pset_list(pids, &pids_nr) < 0)
141 		goto out;
142 
143 	ss->s_psets = calloc(pids_nr + 1, sizeof (struct pset_snapshot));
144 	if (ss->s_psets == NULL)
145 		goto out;
146 	ss->s_nr_psets = pids_nr + 1;
147 
148 	/* CPUs not in any actual pset */
149 	ps = &ss->s_psets[0];
150 	ps->ps_id = 0;
151 	ps->ps_cpus = calloc(ss->s_nr_cpus, sizeof (struct cpu_snapshot *));
152 	if (ps->ps_cpus == NULL)
153 		goto out;
154 
155 	/* CPUs in a a pset */
156 	for (i = 1; i < ss->s_nr_psets; i++) {
157 		ps = &ss->s_psets[i];
158 
159 		ps->ps_id = pids[i - 1];
160 		ps->ps_cpus =
161 			calloc(ss->s_nr_cpus, sizeof (struct cpu_snapshot *));
162 		if (ps->ps_cpus == NULL)
163 			goto out;
164 	}
165 
166 	for (i = 0; i < ss->s_nr_psets; i++) {
167 		ps = &ss->s_psets[i];
168 
169 		for (j = 0; j < ss->s_nr_cpus; j++) {
170 			if (!CPU_ACTIVE(&ss->s_cpus[j]))
171 				continue;
172 			if (ss->s_cpus[j].cs_pset_id != ps->ps_id)
173 				continue;
174 
175 			ps->ps_cpus[ps->ps_nr_cpus++] = &ss->s_cpus[j];
176 		}
177 	}
178 
179 	errno = 0;
180 out:
181 	free(pids);
182 	return (errno);
183 }
184 
185 static int
186 acquire_intrs(struct snapshot *ss, kstat_ctl_t *kc)
187 {
188 	kstat_t *ksp;
189 	size_t i = 0;
190 	kstat_t *sys_misc;
191 	kstat_named_t *clock;
192 
193 	/* clock interrupt */
194 	ss->s_nr_intrs = 1;
195 
196 	for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) {
197 		if (ksp->ks_type == KSTAT_TYPE_INTR)
198 			ss->s_nr_intrs++;
199 	}
200 
201 	ss->s_intrs = calloc(ss->s_nr_intrs, sizeof (struct intr_snapshot));
202 	if (ss->s_intrs == NULL)
203 		return (errno);
204 
205 	sys_misc = kstat_lookup_read(kc, "unix", 0, "system_misc");
206 	if (sys_misc == NULL)
207 		goto out;
208 
209 	clock = (kstat_named_t *)kstat_data_lookup(sys_misc, "clk_intr");
210 	if (clock == NULL)
211 		goto out;
212 
213 	(void) strlcpy(ss->s_intrs[0].is_name, "clock", KSTAT_STRLEN);
214 	ss->s_intrs[0].is_total = clock->value.ui32;
215 
216 	i = 1;
217 
218 	for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) {
219 		kstat_intr_t *ki;
220 		int j;
221 
222 		if (ksp->ks_type != KSTAT_TYPE_INTR)
223 			continue;
224 		if (kstat_read(kc, ksp, NULL) == -1)
225 			goto out;
226 
227 		ki = KSTAT_INTR_PTR(ksp);
228 
229 		(void) strlcpy(ss->s_intrs[i].is_name, ksp->ks_name,
230 			KSTAT_STRLEN);
231 		ss->s_intrs[i].is_total = 0;
232 
233 		for (j = 0; j < KSTAT_NUM_INTRS; j++)
234 			ss->s_intrs[i].is_total += ki->intrs[j];
235 
236 		i++;
237 	}
238 
239 	errno = 0;
240 out:
241 	return (errno);
242 }
243 
244 int
245 acquire_sys(struct snapshot *ss, kstat_ctl_t *kc)
246 {
247 	size_t i;
248 	kstat_named_t *knp;
249 	kstat_t *ksp;
250 
251 	if ((ksp = kstat_lookup(kc, "unix", 0, "sysinfo")) == NULL)
252 		return (errno);
253 
254 	if (kstat_read(kc, ksp, &ss->s_sys.ss_sysinfo) == -1)
255 		return (errno);
256 
257 	if ((ksp = kstat_lookup(kc, "unix", 0, "vminfo")) == NULL)
258 		return (errno);
259 
260 	if (kstat_read(kc, ksp, &ss->s_sys.ss_vminfo) == -1)
261 		return (errno);
262 
263 	if ((ksp = kstat_lookup(kc, "unix", 0, "dnlcstats")) == NULL)
264 		return (errno);
265 
266 	if (kstat_read(kc, ksp, &ss->s_sys.ss_nc) == -1)
267 		return (errno);
268 
269 	if ((ksp = kstat_lookup(kc, "unix", 0, "system_misc")) == NULL)
270 		return (errno);
271 
272 	if (kstat_read(kc, ksp, NULL) == -1)
273 		return (errno);
274 
275 	knp = (kstat_named_t *)kstat_data_lookup(ksp, "clk_intr");
276 	if (knp == NULL)
277 		return (errno);
278 
279 	ss->s_sys.ss_ticks = knp->value.l;
280 
281 	knp = (kstat_named_t *)kstat_data_lookup(ksp, "deficit");
282 	if (knp == NULL)
283 		return (errno);
284 
285 	ss->s_sys.ss_deficit = knp->value.l;
286 
287 	for (i = 0; i < ss->s_nr_cpus; i++) {
288 		if (!CPU_ACTIVE(&ss->s_cpus[i]))
289 			continue;
290 
291 		if (kstat_add(&ss->s_cpus[i].cs_sys, &ss->s_sys.ss_agg_sys))
292 			return (errno);
293 		if (kstat_add(&ss->s_cpus[i].cs_vm, &ss->s_sys.ss_agg_vm))
294 			return (errno);
295 	}
296 
297 	return (0);
298 }
299 
300 struct snapshot *
301 acquire_snapshot(kstat_ctl_t *kc, int types, struct iodev_filter *iodev_filter)
302 {
303 	struct snapshot *ss = NULL;
304 	int err;
305 
306 retry:
307 	err = 0;
308 	/* ensure any partial resources are freed on a retry */
309 	free_snapshot(ss);
310 
311 	ss = safe_alloc(sizeof (struct snapshot));
312 
313 	(void) memset(ss, 0, sizeof (struct snapshot));
314 
315 	ss->s_types = types;
316 
317 	/* wait for a possibly up-to-date chain */
318 	while (kstat_chain_update(kc) == -1) {
319 		if (errno == EAGAIN)
320 			(void) poll(NULL, 0, RETRY_DELAY);
321 		else
322 			fail(1, "kstat_chain_update failed");
323 	}
324 
325 	if (types & SNAP_FLUSHES) {
326 		kstat_t *ksp;
327 		ksp = kstat_lookup(kc, "unix", 0, "flushmeter");
328 		if (ksp == NULL) {
329 			fail(0, "This machine does not have "
330 				"a virtual address cache");
331 		}
332 		if (kstat_read(kc, ksp, &ss->s_flushes) == -1)
333 			err = errno;
334 	}
335 
336 	if (!err && (types & SNAP_INTERRUPTS))
337 		err = acquire_intrs(ss, kc);
338 
339 	if (!err && (types & (SNAP_CPUS | SNAP_SYSTEM | SNAP_PSETS)))
340 		err = acquire_cpus(ss, kc);
341 
342 	if (!err && (types & SNAP_PSETS))
343 		err = acquire_psets(ss);
344 
345 	if (!err && (types & (SNAP_IODEVS | SNAP_CONTROLLERS |
346 	    SNAP_IOPATHS_LI | SNAP_IOPATHS_LTI)))
347 		err = acquire_iodevs(ss, kc, iodev_filter);
348 
349 	if (!err && (types & SNAP_SYSTEM))
350 		err = acquire_sys(ss, kc);
351 
352 	switch (err) {
353 		case 0:
354 			break;
355 		case EAGAIN:
356 			(void) poll(NULL, 0, RETRY_DELAY);
357 		/* a kstat disappeared from under us */
358 		/*FALLTHRU*/
359 		case ENXIO:
360 		case ENOENT:
361 			goto retry;
362 		default:
363 			fail(1, "acquiring snapshot failed");
364 	}
365 
366 	return (ss);
367 }
368 
369 void
370 free_snapshot(struct snapshot *ss)
371 {
372 	size_t i;
373 
374 	if (ss == NULL)
375 		return;
376 
377 	while (ss->s_iodevs) {
378 		struct iodev_snapshot *tmp = ss->s_iodevs;
379 		ss->s_iodevs = ss->s_iodevs->is_next;
380 		free_iodev(tmp);
381 	}
382 
383 	if (ss->s_cpus) {
384 		for (i = 0; i < ss->s_nr_cpus; i++) {
385 			free(ss->s_cpus[i].cs_vm.ks_data);
386 			free(ss->s_cpus[i].cs_sys.ks_data);
387 		}
388 		free(ss->s_cpus);
389 	}
390 
391 	if (ss->s_psets) {
392 		for (i = 0; i < ss->s_nr_psets; i++)
393 			free(ss->s_psets[i].ps_cpus);
394 		free(ss->s_psets);
395 	}
396 
397 	free(ss->s_sys.ss_agg_sys.ks_data);
398 	free(ss->s_sys.ss_agg_vm.ks_data);
399 	free(ss);
400 }
401 
402 kstat_ctl_t *
403 open_kstat(void)
404 {
405 	kstat_ctl_t *kc;
406 
407 	while ((kc = kstat_open()) == NULL) {
408 		if (errno == EAGAIN)
409 			(void) poll(NULL, 0, RETRY_DELAY);
410 		else
411 			fail(1, "kstat_open failed");
412 	}
413 
414 	return (kc);
415 }
416 
417 void *
418 safe_alloc(size_t size)
419 {
420 	void *ptr;
421 
422 	while ((ptr = malloc(size)) == NULL) {
423 		if (errno == EAGAIN)
424 			(void) poll(NULL, 0, RETRY_DELAY);
425 		else
426 			fail(1, "malloc failed");
427 	}
428 	return (ptr);
429 }
430 
431 char *
432 safe_strdup(char *str)
433 {
434 	char *ret;
435 
436 	if (str == NULL)
437 		return (NULL);
438 
439 	while ((ret = strdup(str)) == NULL) {
440 		if (errno == EAGAIN)
441 			(void) poll(NULL, 0, RETRY_DELAY);
442 		else
443 			fail(1, "malloc failed");
444 	}
445 	return (ret);
446 }
447 
448 uint64_t
449 kstat_delta(kstat_t *old, kstat_t *new, char *name)
450 {
451 	kstat_named_t *knew = kstat_data_lookup(new, name);
452 	if (old && old->ks_data) {
453 		kstat_named_t *kold = kstat_data_lookup(old, name);
454 		return (knew->value.ui64 - kold->value.ui64);
455 	}
456 	return (knew->value.ui64);
457 }
458 
459 int
460 kstat_copy(const kstat_t *src, kstat_t *dst)
461 {
462 	*dst = *src;
463 
464 	if (src->ks_data != NULL) {
465 		if ((dst->ks_data = malloc(src->ks_data_size)) == NULL)
466 			return (-1);
467 		bcopy(src->ks_data, dst->ks_data, src->ks_data_size);
468 	} else {
469 		dst->ks_data = NULL;
470 		dst->ks_data_size = 0;
471 	}
472 	return (0);
473 }
474 
475 int
476 kstat_add(const kstat_t *src, kstat_t *dst)
477 {
478 	size_t i;
479 	kstat_named_t *from;
480 	kstat_named_t *to;
481 
482 	if (dst->ks_data == NULL)
483 		return (kstat_copy(src, dst));
484 
485 	from = src->ks_data;
486 	to = dst->ks_data;
487 
488 	for (i = 0; i < src->ks_ndata; i++) {
489 		/* "addition" makes little sense for strings */
490 		if (from->data_type != KSTAT_DATA_CHAR &&
491 			from->data_type != KSTAT_DATA_STRING)
492 			(to)->value.ui64 += (from)->value.ui64;
493 		from++;
494 		to++;
495 	}
496 
497 	return (0);
498 }
499 
500 uint64_t
501 cpu_ticks_delta(kstat_t *old, kstat_t *new)
502 {
503 	uint64_t ticks = 0;
504 	size_t i;
505 	for (i = 0; i < ARRAY_SIZE(cpu_states); i++)
506 		ticks += kstat_delta(old, new, cpu_states[i]);
507 	return (ticks);
508 }
509 
510 int
511 nr_active_cpus(struct snapshot *ss)
512 {
513 	size_t i;
514 	int count = 0;
515 	for (i = 0; i < ss->s_nr_cpus; i++) {
516 		if (CPU_ACTIVE(&ss->s_cpus[i]))
517 			count++;
518 	}
519 
520 	return (count);
521 }
522 
523 /*
524  * Return the number of ticks delta between two hrtime_t
525  * values. Attempt to cater for various kinds of overflow
526  * in hrtime_t - no matter how improbable.
527  */
528 uint64_t
529 hrtime_delta(hrtime_t old, hrtime_t new)
530 {
531 	uint64_t del;
532 
533 	if ((new >= old) && (old >= 0L))
534 		return (new - old);
535 	else {
536 		/*
537 		 * We've overflowed the positive portion of an
538 		 * hrtime_t.
539 		 */
540 		if (new < 0L) {
541 			/*
542 			 * The new value is negative. Handle the
543 			 * case where the old value is positive or
544 			 * negative.
545 			 */
546 			uint64_t n1;
547 			uint64_t o1;
548 
549 			n1 = -new;
550 			if (old > 0L)
551 				return (n1 - old);
552 			else {
553 				o1 = -old;
554 				del = n1 - o1;
555 				return (del);
556 			}
557 		} else {
558 			/*
559 			 * Either we've just gone from being negative
560 			 * to positive *or* the last entry was positive
561 			 * and the new entry is also positive but *less*
562 			 * than the old entry. This implies we waited
563 			 * quite a few days on a very fast system between
564 			 * iostat displays.
565 			 */
566 			if (old < 0L) {
567 				uint64_t o2;
568 
569 				o2 = -old;
570 				del = UINT64_MAX - o2;
571 			} else {
572 				del = UINT64_MAX - old;
573 			}
574 			del += new;
575 			return (del);
576 		}
577 	}
578 }
579