xref: /illumos-gate/usr/src/uts/common/os/pghw.c (revision e9af4bc0b1cc30cea75d6ad4aa2fde97d985e9be)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/systm.h>
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/thread.h>
30 #include <sys/cpuvar.h>
31 #include <sys/kmem.h>
32 #include <sys/cmn_err.h>
33 #include <sys/group.h>
34 #include <sys/pg.h>
35 #include <sys/pghw.h>
36 #include <sys/cpu_pm.h>
37 #include <sys/cap_util.h>
38 
39 /*
40  * Processor Groups: Hardware sharing relationship layer
41  *
42  * This file implements an extension to Processor Groups to capture
43  * hardware sharing relationships existing between logical CPUs. Examples of
44  * hardware sharing relationships include shared caches on some CMT
45  * procesoor architectures, or shared local memory controllers on NUMA
46  * based system architectures.
47  *
48  * The pghw_t structure represents the extended PG. The first member
49  * of the structure is the generic pg_t with the pghw specific members
50  * following. The generic pg_t *must* remain the first member of the
51  * structure as the code uses casting of structure references to access
52  * the generic pg_t structure elements.
53  *
54  * In addition to the generic CPU grouping, physical PGs have a hardware
55  * sharing relationship enumerated "type", and an instance id. The enumerated
56  * type is defined by the pghw_type_t enumeration, while the instance id
57  * uniquely identifies the sharing instance from among others of the same
58  * hardware sharing type.
59  *
60  * The physical PGs are organized into an overall hierarchy, and are tracked
61  * in a number of different per CPU, and per pghw_type_t type groups.
62  * As an example:
63  *
64  * -------------
65  * | pg_hw     |
66  * | (group_t) |
67  * -------------
68  *  ||                          ============================
69  *  ||\\-----------------------//       \\                 \\
70  *  ||  | hwset (PGC_HW_CHIP) |        -------------      -------------
71  *  ||  | (group_t)           |        | pghw_t    |      | pghw_t    |
72  *  ||  -----------------------        | chip 0    |      | chip 1    |
73  *  ||                                 -------------      -------------
74  *  ||                                 \\  \\  \\  \\     \\  \\  \\  \\
75  *  ||                                  cpu cpu cpu cpu    cpu cpu cpu cpu
76  *  ||
77  *  ||                          ============================
78  *  ||\\-----------------------//       \\                 \\
79  *  ||  | hwset (PGC_HW_IPIPE)|        -------------      -------------
80  *  ||  | (group_t)           |        | pghw_t    |      | pghw_t    |
81  *  ||  -----------------------        | ipipe 0   |      | ipipe 1   |
82  *  ||                                 -------------      -------------
83  *  ||                                 \\  \\             \\  \\
84  *  ||                                  cpu cpu            cpu cpu
85  *  ...
86  *
87  *
88  * The top level pg_hw is a group of "hwset" groups. Each hwset holds of group
89  * of physical PGs of the same hardware sharing type. Within each hwset, the
90  * PG's instance id uniquely identifies the grouping relationshsip among other
91  * groupings of the same sharing type. The instance id for a grouping is
92  * platform defined, and in some cases may be used by platform code as a handle
93  * to search for a particular relationship instance.
94  *
95  * Each physical PG (by virtue of the embedded pg_t) contains a group of CPUs
96  * that participate in the sharing relationship. Each CPU also has associated
97  * with it a grouping tracking the PGs in which the CPU belongs. This can be
98  * used to iterate over the various relationships in which the CPU participates
99  * (the CPU's chip, cache, lgroup, etc.).
100  *
101  * The hwsets are created dynamically as new hardware sharing relationship types
102  * are instantiated. They are never destroyed, as once a given relationship
103  * type appears in the system, it is quite likely that at least one instance of
104  * that relationship will always persist as long as the system is running.
105  */
106 
107 static group_t		*pg_hw;		/* top level pg hw group */
108 
109 /*
110  * Physical PG kstats
111  */
112 struct pghw_kstat {
113 	kstat_named_t	pg_id;
114 	kstat_named_t	pg_class;
115 	kstat_named_t	pg_ncpus;
116 	kstat_named_t	pg_instance_id;
117 	kstat_named_t	pg_hw;
118 	kstat_named_t	pg_policy;
119 } pghw_kstat = {
120 	{ "id",			KSTAT_DATA_UINT32 },
121 	{ "pg_class",		KSTAT_DATA_STRING },
122 	{ "ncpus",		KSTAT_DATA_UINT32 },
123 	{ "instance_id",	KSTAT_DATA_UINT32 },
124 	{ "hardware",		KSTAT_DATA_STRING },
125 	{ "policy",		KSTAT_DATA_STRING },
126 };
127 
128 kmutex_t		pghw_kstat_lock;
129 
130 /*
131  * Capacity and Utilization PG kstats
132  *
133  * These kstats are updated one at a time, so we can have a single scratch space
134  * to fill the data.
135  *
136  * kstat fields:
137  *
138  *   pgid		PG ID for PG described by this kstat
139  *
140  *   pg_ncpus		Number of CPUs within this PG
141  *
142  *   pg_cpus		String describing CPUs within this PG
143  *
144  *   pg_sharing		Name of sharing relationship for this PG
145  *
146  *   pg_generation	Generation value that increases whenever any CPU leaves
147  *			  or joins PG. Two kstat snapshots for the same
148  *			  CPU may only be compared if they have the same
149  *			  generation
150  *
151  *   pg_hw_util		Running value of PG utilization for the sharing
152  *			  relationship
153  *
154  *   pg_hw_util_time_running
155  *			Total time spent collecting CU data. The time may be
156  *			less than wall time if CU counters were stopped for
157  *			some time.
158  *
159  *   pg_hw_util_time_stopped Total time the CU counters were stopped.
160  *
161  *   pg_hw_util_rate	Utilization rate, expressed in operations per second.
162  *
163  *   pg_hw_util_rate_max Maximum observed value of utilization rate.
164  */
165 struct pghw_cu_kstat {
166 	kstat_named_t	pg_id;
167 	kstat_named_t	pg_ncpus;
168 	kstat_named_t	pg_generation;
169 	kstat_named_t	pg_hw_util;
170 	kstat_named_t	pg_hw_util_time_running;
171 	kstat_named_t	pg_hw_util_time_stopped;
172 	kstat_named_t	pg_hw_util_rate;
173 	kstat_named_t	pg_hw_util_rate_max;
174 	kstat_named_t	pg_cpus;
175 	kstat_named_t	pg_sharing;
176 } pghw_cu_kstat = {
177 	{ "id",			KSTAT_DATA_UINT32 },
178 	{ "ncpus",		KSTAT_DATA_UINT32 },
179 	{ "generation",		KSTAT_DATA_UINT32   },
180 	{ "hw_util",		KSTAT_DATA_UINT64   },
181 	{ "hw_util_time_running",	KSTAT_DATA_UINT64   },
182 	{ "hw_util_time_stopped",	KSTAT_DATA_UINT64   },
183 	{ "hw_util_rate",	KSTAT_DATA_UINT64   },
184 	{ "hw_util_rate_max",	KSTAT_DATA_UINT64   },
185 	{ "cpus",		KSTAT_DATA_STRING   },
186 	{ "sharing_relation",	KSTAT_DATA_STRING   },
187 };
188 
189 /*
190  * Calculate the string size to represent NCPUS. Allow 5 digits for each CPU ID
191  * plus one space per CPU plus NUL byte in the end. This is only an estimate,
192  * since we try to compress CPU ranges as x-y. In the worst case the string
193  * representation of CPUs may be truncated.
194  */
195 #define	CPUSTR_LEN(ncpus) ((ncpus) * 6)
196 
197 /*
198  * Maximum length of the string that represents list of CPUs
199  */
200 static int pg_cpulist_maxlen = 0;
201 
202 static void		pghw_kstat_create(pghw_t *);
203 static int		pghw_kstat_update(kstat_t *, int);
204 static int		pghw_cu_kstat_update(kstat_t *, int);
205 static int		cpu2id(void *);
206 
207 /*
208  * hwset operations
209  */
210 static group_t		*pghw_set_create(pghw_type_t);
211 static void		pghw_set_add(group_t *, pghw_t *);
212 static void		pghw_set_remove(group_t *, pghw_t *);
213 
214 static void		pghw_cpulist_alloc(pghw_t *);
215 static int		cpu2id(void *);
216 
217 /*
218  * Initialize the physical portion of a hardware PG
219  */
220 void
221 pghw_init(pghw_t *pg, cpu_t *cp, pghw_type_t hw)
222 {
223 	group_t		*hwset;
224 
225 	if ((hwset = pghw_set_lookup(hw)) == NULL) {
226 		/*
227 		 * Haven't seen this hardware type yet
228 		 */
229 		hwset = pghw_set_create(hw);
230 	}
231 
232 	pghw_set_add(hwset, pg);
233 	pg->pghw_hw = hw;
234 	pg->pghw_generation = 0;
235 	pg->pghw_instance =
236 	    pg_plat_hw_instance_id(cp, hw);
237 	pghw_kstat_create(pg);
238 
239 	/*
240 	 * Hardware sharing relationship specific initialization
241 	 */
242 	switch (pg->pghw_hw) {
243 	case PGHW_POW_ACTIVE:
244 		pg->pghw_handle =
245 		    (pghw_handle_t)cpupm_domain_init(cp, CPUPM_DTYPE_ACTIVE);
246 		break;
247 	case PGHW_POW_IDLE:
248 		pg->pghw_handle =
249 		    (pghw_handle_t)cpupm_domain_init(cp, CPUPM_DTYPE_IDLE);
250 		break;
251 	default:
252 		pg->pghw_handle = (pghw_handle_t)NULL;
253 	}
254 }
255 
256 /*
257  * Teardown the physical portion of a physical PG
258  */
259 void
260 pghw_fini(pghw_t *pg)
261 {
262 	group_t		*hwset;
263 
264 	hwset = pghw_set_lookup(pg->pghw_hw);
265 	ASSERT(hwset != NULL);
266 
267 	pghw_set_remove(hwset, pg);
268 	pg->pghw_instance = (id_t)PGHW_INSTANCE_ANON;
269 	pg->pghw_hw = (pghw_type_t)-1;
270 
271 	if (pg->pghw_kstat != NULL)
272 		kstat_delete(pg->pghw_kstat);
273 
274 	/*
275 	 * Destroy string representation of CPUs
276 	 */
277 	if (pg->pghw_cpulist != NULL) {
278 		kmem_free(pg->pghw_cpulist,
279 		    pg->pghw_cpulist_len);
280 		pg->pghw_cpulist = NULL;
281 	}
282 
283 	if (pg->pghw_cu_kstat != NULL)
284 		kstat_delete(pg->pghw_cu_kstat);
285 }
286 
287 /*
288  * Find an existing physical PG in which to place
289  * the given CPU for the specified hardware sharing
290  * relationship
291  */
292 pghw_t *
293 pghw_place_cpu(cpu_t *cp, pghw_type_t hw)
294 {
295 	group_t		*hwset;
296 
297 	if ((hwset = pghw_set_lookup(hw)) == NULL) {
298 		return (NULL);
299 	}
300 
301 	return ((pghw_t *)pg_cpu_find_pg(cp, hwset));
302 }
303 
304 /*
305  * Find the pg representing the hw sharing relationship in which
306  * cp belongs
307  */
308 pghw_t *
309 pghw_find_pg(cpu_t *cp, pghw_type_t hw)
310 {
311 	group_iter_t	i;
312 	pghw_t	*pg;
313 
314 	group_iter_init(&i);
315 	while ((pg = group_iterate(&cp->cpu_pg->pgs, &i)) != NULL) {
316 		if (pg->pghw_hw == hw)
317 			return (pg);
318 	}
319 	return (NULL);
320 }
321 
322 /*
323  * Find the PG of the given hardware sharing relationship
324  * type with the given instance id
325  */
326 pghw_t *
327 pghw_find_by_instance(id_t id, pghw_type_t hw)
328 {
329 	group_iter_t	i;
330 	group_t		*set;
331 	pghw_t		*pg;
332 
333 	set = pghw_set_lookup(hw);
334 	if (!set)
335 		return (NULL);
336 
337 	group_iter_init(&i);
338 	while ((pg = group_iterate(set, &i)) != NULL) {
339 		if (pg->pghw_instance == id)
340 			return (pg);
341 	}
342 	return (NULL);
343 }
344 
345 /*
346  * CPUs physical ID cache creation / destruction
347  * The cache's elements are initialized to the CPU's id
348  */
349 void
350 pghw_physid_create(cpu_t *cp)
351 {
352 	int	i;
353 
354 	cp->cpu_physid = kmem_alloc(sizeof (cpu_physid_t), KM_SLEEP);
355 
356 	for (i = 0; i < (sizeof (cpu_physid_t) / sizeof (id_t)); i++) {
357 		((id_t *)cp->cpu_physid)[i] = cp->cpu_id;
358 	}
359 }
360 
361 void
362 pghw_physid_destroy(cpu_t *cp)
363 {
364 	if (cp->cpu_physid) {
365 		kmem_free(cp->cpu_physid, sizeof (cpu_physid_t));
366 		cp->cpu_physid = NULL;
367 	}
368 }
369 
370 /*
371  * Create a new, empty hwset.
372  * This routine may block, and must not be called from any
373  * paused CPU context.
374  */
375 static group_t	*
376 pghw_set_create(pghw_type_t hw)
377 {
378 	group_t	*g;
379 	int	ret;
380 
381 	/*
382 	 * Create the top level PG hw group if it doesn't already exist
383 	 * This is a "set" of hardware sets, that is ordered (and indexed)
384 	 * by the pghw_type_t enum.
385 	 */
386 	if (pg_hw == NULL) {
387 		pg_hw = kmem_alloc(sizeof (group_t), KM_SLEEP);
388 		group_create(pg_hw);
389 		group_expand(pg_hw, (uint_t)PGHW_NUM_COMPONENTS);
390 	}
391 
392 	/*
393 	 * Create the new hwset
394 	 * Add it to the top level pg_hw group.
395 	 */
396 	g = kmem_alloc(sizeof (group_t), KM_SLEEP);
397 	group_create(g);
398 
399 	ret = group_add_at(pg_hw, g, (uint_t)hw);
400 	ASSERT(ret == 0);
401 
402 	return (g);
403 }
404 
405 /*
406  * Find the hwset associated with the given hardware sharing type
407  */
408 group_t *
409 pghw_set_lookup(pghw_type_t hw)
410 {
411 	group_t	*hwset;
412 
413 	if (pg_hw == NULL)
414 		return (NULL);
415 
416 	hwset = GROUP_ACCESS(pg_hw, (uint_t)hw);
417 	return (hwset);
418 }
419 
420 /*
421  * Add a PG to a hwset
422  */
423 static void
424 pghw_set_add(group_t *hwset, pghw_t *pg)
425 {
426 	(void) group_add(hwset, pg, GRP_RESIZE);
427 }
428 
429 /*
430  * Remove a PG from a hwset
431  */
432 static void
433 pghw_set_remove(group_t *hwset, pghw_t *pg)
434 {
435 	int result;
436 
437 	result = group_remove(hwset, pg, GRP_RESIZE);
438 	ASSERT(result == 0);
439 }
440 
441 /*
442  * Return a string name given a pg_hw sharing type
443  */
444 char *
445 pghw_type_string(pghw_type_t hw)
446 {
447 	switch (hw) {
448 	case PGHW_IPIPE:
449 		return ("Integer Pipeline");
450 	case PGHW_CACHE:
451 		return ("Cache");
452 	case PGHW_FPU:
453 		return ("Floating Point Unit");
454 	case PGHW_MPIPE:
455 		return ("Data Pipe to memory");
456 	case PGHW_CHIP:
457 		return ("Socket");
458 	case PGHW_MEMORY:
459 		return ("Memory");
460 	case PGHW_POW_ACTIVE:
461 		return ("CPU PM Active Power Domain");
462 	case PGHW_POW_IDLE:
463 		return ("CPU PM Idle Power Domain");
464 	default:
465 		return ("unknown");
466 	}
467 }
468 
469 /*
470  * Return a short string name given a pg_hw sharing type
471  */
472 char *
473 pghw_type_shortstring(pghw_type_t hw)
474 {
475 	switch (hw) {
476 	case PGHW_IPIPE:
477 		return ("instr_pipeline");
478 	case PGHW_CACHE:
479 		return ("Cache");
480 	case PGHW_FPU:
481 		return ("FPU");
482 	case PGHW_MPIPE:
483 		return ("memory_pipeline");
484 	case PGHW_CHIP:
485 		return ("Socket");
486 	case PGHW_MEMORY:
487 		return ("Memory");
488 	case PGHW_POW_ACTIVE:
489 		return ("CPU_PM_Active");
490 	case PGHW_POW_IDLE:
491 		return ("CPU_PM_Idle");
492 	default:
493 		return ("unknown");
494 	}
495 }
496 
497 /*
498  * Create / Update routines for PG hw kstats
499  *
500  * It is the intention of these kstats to provide some level
501  * of informational / debugging observability into the types
502  * and nature of the system's detected hardware sharing relationships
503  */
504 void
505 pghw_kstat_create(pghw_t *pg)
506 {
507 	char *class = pghw_type_string(pg->pghw_hw);
508 
509 	/*
510 	 * Create a physical pg kstat
511 	 */
512 	if ((pg->pghw_kstat = kstat_create("pg", ((pg_t *)pg)->pg_id,
513 	    "pg", "pg",
514 	    KSTAT_TYPE_NAMED,
515 	    sizeof (pghw_kstat) / sizeof (kstat_named_t),
516 	    KSTAT_FLAG_VIRTUAL)) != NULL) {
517 		/* Class string, hw string, and policy string */
518 		pg->pghw_kstat->ks_data_size += PG_CLASS_NAME_MAX;
519 		pg->pghw_kstat->ks_data_size += PGHW_KSTAT_STR_LEN_MAX;
520 		pg->pghw_kstat->ks_data_size += PGHW_KSTAT_STR_LEN_MAX;
521 		pg->pghw_kstat->ks_lock = &pghw_kstat_lock;
522 		pg->pghw_kstat->ks_data = &pghw_kstat;
523 		pg->pghw_kstat->ks_update = pghw_kstat_update;
524 		pg->pghw_kstat->ks_private = pg;
525 		kstat_install(pg->pghw_kstat);
526 	}
527 
528 	if (pg_cpulist_maxlen == 0)
529 		pg_cpulist_maxlen = CPUSTR_LEN(max_ncpus);
530 
531 	/*
532 	 * Create a physical pg kstat
533 	 */
534 	if ((pg->pghw_cu_kstat = kstat_create("pg", ((pg_t *)pg)->pg_id,
535 	    "hardware", class,
536 	    KSTAT_TYPE_NAMED,
537 	    sizeof (pghw_cu_kstat) / sizeof (kstat_named_t),
538 	    KSTAT_FLAG_VIRTUAL)) != NULL) {
539 		pg->pghw_cu_kstat->ks_lock = &pghw_kstat_lock;
540 		pg->pghw_cu_kstat->ks_data = &pghw_cu_kstat;
541 		pg->pghw_cu_kstat->ks_update = pghw_cu_kstat_update;
542 		pg->pghw_cu_kstat->ks_private = pg;
543 		pg->pghw_cu_kstat->ks_data_size += strlen(class) + 1;
544 		/* Allow space for CPU strings */
545 		pg->pghw_cu_kstat->ks_data_size += PGHW_KSTAT_STR_LEN_MAX;
546 		pg->pghw_cu_kstat->ks_data_size += pg_cpulist_maxlen;
547 		kstat_install(pg->pghw_cu_kstat);
548 	}
549 }
550 
551 int
552 pghw_kstat_update(kstat_t *ksp, int rw)
553 {
554 	struct pghw_kstat	*pgsp = &pghw_kstat;
555 	pghw_t			*pg = ksp->ks_private;
556 
557 	if (rw == KSTAT_WRITE)
558 		return (EACCES);
559 
560 	pgsp->pg_id.value.ui32 = ((pg_t *)pg)->pg_id;
561 	pgsp->pg_ncpus.value.ui32 = GROUP_SIZE(&((pg_t *)pg)->pg_cpus);
562 	pgsp->pg_instance_id.value.ui32 = pg->pghw_instance;
563 	kstat_named_setstr(&pgsp->pg_class, ((pg_t *)pg)->pg_class->pgc_name);
564 	kstat_named_setstr(&pgsp->pg_hw, pghw_type_string(pg->pghw_hw));
565 	kstat_named_setstr(&pgsp->pg_policy, pg_policy_name((pg_t *)pg));
566 	return (0);
567 }
568 
569 int
570 pghw_cu_kstat_update(kstat_t *ksp, int rw)
571 {
572 	struct pghw_cu_kstat	*pgsp = &pghw_cu_kstat;
573 	pghw_t			*pg = ksp->ks_private;
574 	pghw_util_t		*hw_util = &pg->pghw_stats;
575 
576 	if (rw == KSTAT_WRITE)
577 		return (EACCES);
578 
579 	pgsp->pg_id.value.ui32 = ((pg_t *)pg)->pg_id;
580 	pgsp->pg_ncpus.value.ui32 = GROUP_SIZE(&((pg_t *)pg)->pg_cpus);
581 
582 	/*
583 	 * Allocate memory for the string representing the list of CPUs in PG.
584 	 * This memory should persist past the call to pghw_cu_kstat_update()
585 	 * since the kstat snapshot routine will reference this memory.
586 	 */
587 	pghw_cpulist_alloc(pg);
588 
589 	if (pg->pghw_kstat_gen != pg->pghw_generation) {
590 		/*
591 		 * PG kstat generation number is out of sync with PG's
592 		 * generation mumber. It means that some CPUs could have joined
593 		 * or left PG and it is not possible to compare the numbers
594 		 * obtained before and after the generation change.
595 		 *
596 		 * Reset the maximum utilization rate and start computing it
597 		 * from scratch.
598 		 */
599 		hw_util->pghw_util = 0;
600 		hw_util->pghw_rate_max = 0;
601 		pg->pghw_kstat_gen = pg->pghw_generation;
602 	}
603 
604 	/*
605 	 * We can't block on CPU lock because when PG is destroyed (under
606 	 * cpu_lock) it tries to delete this kstat and it will wait for us to
607 	 * complete which will never happen since we are waiting for cpu_lock to
608 	 * drop. Deadlocks are fun!
609 	 */
610 	if (mutex_tryenter(&cpu_lock)) {
611 		if (pg->pghw_cpulist != NULL &&
612 		    *(pg->pghw_cpulist) == '\0') {
613 			(void) group2intlist(&(((pg_t *)pg)->pg_cpus),
614 			    pg->pghw_cpulist, pg->pghw_cpulist_len, cpu2id);
615 		}
616 		cu_pg_update(pg);
617 		mutex_exit(&cpu_lock);
618 	}
619 
620 	pgsp->pg_generation.value.ui32 = pg->pghw_kstat_gen;
621 	pgsp->pg_hw_util.value.ui64 = hw_util->pghw_util;
622 	pgsp->pg_hw_util_time_running.value.ui64 = hw_util->pghw_time_running;
623 	pgsp->pg_hw_util_time_stopped.value.ui64 = hw_util->pghw_time_stopped;
624 	pgsp->pg_hw_util_rate.value.ui64 = hw_util->pghw_rate;
625 	pgsp->pg_hw_util_rate_max.value.ui64 = hw_util->pghw_rate_max;
626 	if (pg->pghw_cpulist != NULL)
627 		kstat_named_setstr(&pgsp->pg_cpus, pg->pghw_cpulist);
628 	else
629 		kstat_named_setstr(&pgsp->pg_cpus, "");
630 
631 	kstat_named_setstr(&pgsp->pg_sharing, pghw_type_string(pg->pghw_hw));
632 
633 	return (0);
634 }
635 
636 /*
637  * Update the string representation of CPUs in PG (pg->pghw_cpulist).
638  * The string representation is used for kstats.
639  *
640  * The string is allocated if it has not already been or if it is already
641  * allocated and PG has more CPUs now. If PG has smaller or equal number of
642  * CPUs, but the actual CPUs may have changed, the string is reset to the empty
643  * string causes the string representation to be recreated. The pghw_generation
644  * field is used to detect whether CPUs within the pg may have changed.
645  */
646 static void
647 pghw_cpulist_alloc(pghw_t *pg)
648 {
649 	uint_t	ncpus = GROUP_SIZE(&((pg_t *)pg)->pg_cpus);
650 	size_t	len = CPUSTR_LEN(ncpus);
651 
652 	/*
653 	 * If the pghw_cpulist string is already allocated we need to make sure
654 	 * that it has sufficient length. Also if the set of CPUs may have
655 	 * changed, we need to re-generate the string.
656 	 */
657 	if (pg->pghw_cpulist != NULL &&
658 	    pg->pghw_kstat_gen != pg->pghw_generation) {
659 		if (len <= pg->pghw_cpulist_len) {
660 			/*
661 			 * There is sufficient space in the pghw_cpulist for
662 			 * the new set of CPUs. Just clear the string to trigger
663 			 * re-generation of list of CPUs
664 			 */
665 			*(pg->pghw_cpulist) = '\0';
666 		} else {
667 			/*
668 			 * There is, potentially, insufficient space in
669 			 * pghw_cpulist, so reallocate the string.
670 			 */
671 			ASSERT(strlen(pg->pghw_cpulist) < pg->pghw_cpulist_len);
672 			kmem_free(pg->pghw_cpulist, pg->pghw_cpulist_len);
673 			pg->pghw_cpulist = NULL;
674 			pg->pghw_cpulist_len = 0;
675 		}
676 	}
677 
678 	if (pg->pghw_cpulist == NULL) {
679 		/*
680 		 * Allocate space to hold cpulist.
681 		 *
682 		 * Length can not be bigger that the maximum space we have
683 		 * allowed for the kstat buffer
684 		 */
685 		if (len > pg_cpulist_maxlen)
686 			len = pg_cpulist_maxlen;
687 		if (len > 0) {
688 			pg->pghw_cpulist = kmem_zalloc(len, KM_NOSLEEP);
689 			if (pg->pghw_cpulist != NULL)
690 				pg->pghw_cpulist_len = len;
691 		}
692 	}
693 }
694 
695 static int
696 cpu2id(void *v)
697 {
698 	cpu_t *cp = (cpu_t *)v;
699 
700 	ASSERT(v != NULL);
701 
702 	return (cp->cpu_id);
703 }
704