xref: /titanic_41/usr/src/uts/common/os/pg.c (revision 4df55fde49134f9735f84011f23a767c75e393c7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/systm.h>
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/thread.h>
30 #include <sys/cpuvar.h>
31 #include <sys/cpupart.h>
32 #include <sys/kmem.h>
33 #include <sys/cmn_err.h>
34 #include <sys/kstat.h>
35 #include <sys/processor.h>
36 #include <sys/disp.h>
37 #include <sys/group.h>
38 #include <sys/pg.h>
39 
40 /*
41  * Processor groups
42  *
43  * With the introduction of Chip Multi-Threaded (CMT) processor architectures,
44  * it is no longer necessarily true that a given physical processor module
45  * will present itself as a single schedulable entity (cpu_t). Rather, each
46  * chip and/or processor core may present itself as one or more "logical" CPUs.
47  *
48  * The logical CPUs presented may share physical components such as caches,
49  * data pipes, execution pipelines, FPUs, etc. It is advantageous to have the
50  * kernel be aware of the relationships existing between logical CPUs so that
51  * the appropriate optmizations may be employed.
52  *
53  * The processor group abstraction represents a set of logical CPUs that
54  * generally share some sort of physical or characteristic relationship.
55  *
56  * In the case of a physical sharing relationship, the CPUs in the group may
57  * share a pipeline, cache or floating point unit. In the case of a logical
58  * relationship, a PG may represent the set of CPUs in a processor set, or the
59  * set of CPUs running at a particular clock speed.
60  *
61  * The generic processor group structure, pg_t, contains the elements generic
62  * to a group of CPUs. Depending on the nature of the CPU relationship
63  * (LOGICAL or PHYSICAL), a pointer to a pg may be recast to a "view" of that
64  * PG where more specific data is represented.
65  *
66  * As an example, a PG representing a PHYSICAL relationship, may be recast to
67  * a pghw_t, where data further describing the hardware sharing relationship
68  * is maintained. See pghw.c and pghw.h for details on physical PGs.
69  *
70  * At this time a more specialized casting of a PG representing a LOGICAL
71  * relationship has not been implemented, but the architecture allows for this
72  * in the future.
73  *
74  * Processor Group Classes
75  *
76  * Processor group consumers may wish to maintain and associate specific
77  * data with the PGs they create. For this reason, a mechanism for creating
78  * class specific PGs exists. Classes may overload the default functions for
79  * creating, destroying, and associating CPUs with PGs, and may also register
80  * class specific callbacks to be invoked when the CPU related system
81  * configuration changes. Class specific data is stored/associated with
82  * PGs by incorporating the pg_t (or pghw_t, as appropriate), as the first
83  * element of a class specific PG object. In memory, such a structure may look
84  * like:
85  *
86  * ----------------------- - - -
87  * | common              | | | |  <--(pg_t *)
88  * ----------------------- | | -
89  * | HW specific         | | | <-----(pghw_t *)
90  * ----------------------- | -
91  * | class specific      | | <-------(pg_cmt_t *)
92  * ----------------------- -
93  *
94  * Access to the PG class specific data can be had by casting a pointer to
95  * it's class specific view.
96  */
97 
98 static pg_t		*pg_alloc_default(pg_class_t);
99 static void		pg_free_default(pg_t *);
100 static void		pg_null_op();
101 
102 /*
103  * Bootstrap CPU specific PG data
104  * See pg_cpu_bootstrap()
105  */
106 static cpu_pg_t		bootstrap_pg_data;
107 
108 /*
109  * Bitset of allocated PG ids (they are sequential)
110  * and the next free id in the set.
111  */
112 static bitset_t		pg_id_set;
113 static pgid_t		pg_id_next = 0;
114 
115 /*
116  * Default and externed PG ops vectors
117  */
118 static struct pg_ops pg_ops_default = {
119 	pg_alloc_default,	/* alloc */
120 	pg_free_default,	/* free */
121 	NULL,			/* cpu_init */
122 	NULL,			/* cpu_fini */
123 	NULL,			/* cpu_active */
124 	NULL,			/* cpu_inactive */
125 	NULL,			/* cpupart_in */
126 	NULL,			/* cpupart_out */
127 	NULL,			/* cpupart_move */
128 	NULL,			/* cpu_belongs */
129 	NULL,			/* policy_name */
130 };
131 
132 static struct pg_cb_ops pg_cb_ops_default = {
133 	pg_null_op,		/* thread_swtch */
134 	pg_null_op,		/* thread_remain */
135 };
136 
137 /*
138  * Class specific PG allocation callbacks
139  */
140 #define	PG_ALLOC(class)							\
141 	(pg_classes[class].pgc_ops->alloc ?				\
142 	    pg_classes[class].pgc_ops->alloc() :			\
143 	    pg_classes[pg_default_cid].pgc_ops->alloc())
144 
145 #define	PG_FREE(pg)							\
146 	((pg)->pg_class->pgc_ops->free ?				\
147 	    (pg)->pg_class->pgc_ops->free(pg) :				\
148 	    pg_classes[pg_default_cid].pgc_ops->free(pg))		\
149 
150 
151 /*
152  * Class specific PG policy name
153  */
154 #define	PG_POLICY_NAME(pg)						\
155 	((pg)->pg_class->pgc_ops->policy_name ?				\
156 	    (pg)->pg_class->pgc_ops->policy_name(pg) : NULL)		\
157 
158 /*
159  * Class specific membership test callback
160  */
161 #define	PG_CPU_BELONGS(pg, cp)						\
162 	((pg)->pg_class->pgc_ops->cpu_belongs ?				\
163 	    (pg)->pg_class->pgc_ops->cpu_belongs(pg, cp) : 0)		\
164 
165 /*
166  * CPU configuration callbacks
167  */
168 #define	PG_CPU_INIT(class, cp, cpu_pg)					\
169 {									\
170 	if (pg_classes[class].pgc_ops->cpu_init)			\
171 		pg_classes[class].pgc_ops->cpu_init(cp, cpu_pg);	\
172 }
173 
174 #define	PG_CPU_FINI(class, cp, cpu_pg)					\
175 {									\
176 	if (pg_classes[class].pgc_ops->cpu_fini)			\
177 		pg_classes[class].pgc_ops->cpu_fini(cp, cpu_pg);	\
178 }
179 
180 #define	PG_CPU_ACTIVE(class, cp)					\
181 {									\
182 	if (pg_classes[class].pgc_ops->cpu_active)			\
183 		pg_classes[class].pgc_ops->cpu_active(cp);		\
184 }
185 
186 #define	PG_CPU_INACTIVE(class, cp)					\
187 {									\
188 	if (pg_classes[class].pgc_ops->cpu_inactive)			\
189 		pg_classes[class].pgc_ops->cpu_inactive(cp);		\
190 }
191 
192 /*
193  * CPU / cpupart configuration callbacks
194  */
195 #define	PG_CPUPART_IN(class, cp, pp)					\
196 {									\
197 	if (pg_classes[class].pgc_ops->cpupart_in)			\
198 		pg_classes[class].pgc_ops->cpupart_in(cp, pp);		\
199 }
200 
201 #define	PG_CPUPART_OUT(class, cp, pp)					\
202 {									\
203 	if (pg_classes[class].pgc_ops->cpupart_out)			\
204 		pg_classes[class].pgc_ops->cpupart_out(cp, pp);		\
205 }
206 
207 #define	PG_CPUPART_MOVE(class, cp, old, new)				\
208 {									\
209 	if (pg_classes[class].pgc_ops->cpupart_move)			\
210 		pg_classes[class].pgc_ops->cpupart_move(cp, old, new);	\
211 }
212 
213 
214 
215 static pg_class_t	*pg_classes;
216 static int		pg_nclasses;
217 
218 static pg_cid_t		pg_default_cid;
219 
220 /*
221  * Initialze common PG subsystem.
222  */
223 void
224 pg_init(void)
225 {
226 	extern void pg_cmt_class_init();
227 	extern void pg_cmt_cpu_startup();
228 
229 	pg_default_cid =
230 	    pg_class_register("default", &pg_ops_default, PGR_LOGICAL);
231 
232 	/*
233 	 * Initialize classes to allow them to register with the framework
234 	 */
235 	pg_cmt_class_init();
236 
237 	pg_cpu0_init();
238 	pg_cmt_cpu_startup(CPU);
239 }
240 
241 /*
242  * Perform CPU 0 initialization
243  */
244 void
245 pg_cpu0_init(void)
246 {
247 	extern void pghw_physid_create();
248 
249 	/*
250 	 * Create the physical ID cache for the boot CPU
251 	 */
252 	pghw_physid_create(CPU);
253 
254 	/*
255 	 * pg_cpu_* require that cpu_lock be held
256 	 */
257 	mutex_enter(&cpu_lock);
258 
259 	(void) pg_cpu_init(CPU, B_FALSE);
260 	pg_cpupart_in(CPU, &cp_default);
261 	pg_cpu_active(CPU);
262 
263 	mutex_exit(&cpu_lock);
264 }
265 
266 /*
267  * Invoked when topology for CPU0 changes
268  * post pg_cpu0_init().
269  *
270  * Currently happens as a result of null_proc_lpa
271  * on Starcat.
272  */
273 void
274 pg_cpu0_reinit(void)
275 {
276 	mutex_enter(&cpu_lock);
277 	pg_cpu_inactive(CPU);
278 	pg_cpupart_out(CPU, &cp_default);
279 	pg_cpu_fini(CPU, NULL);
280 
281 	(void) pg_cpu_init(CPU, B_FALSE);
282 	pg_cpupart_in(CPU, &cp_default);
283 	pg_cpu_active(CPU);
284 	mutex_exit(&cpu_lock);
285 }
286 
287 /*
288  * Register a new PG class
289  */
290 pg_cid_t
291 pg_class_register(char *name, struct pg_ops *ops, pg_relation_t relation)
292 {
293 	pg_class_t	*newclass;
294 	pg_class_t	*classes_old;
295 	id_t		cid;
296 
297 	mutex_enter(&cpu_lock);
298 
299 	/*
300 	 * Allocate a new pg_class_t in the pg_classes array
301 	 */
302 	if (pg_nclasses == 0) {
303 		pg_classes = kmem_zalloc(sizeof (pg_class_t), KM_SLEEP);
304 	} else {
305 		classes_old = pg_classes;
306 		pg_classes =
307 		    kmem_zalloc(sizeof (pg_class_t) * (pg_nclasses + 1),
308 		    KM_SLEEP);
309 		(void) kcopy(classes_old, pg_classes,
310 		    sizeof (pg_class_t) * pg_nclasses);
311 		kmem_free(classes_old, sizeof (pg_class_t) * pg_nclasses);
312 	}
313 
314 	cid = pg_nclasses++;
315 	newclass = &pg_classes[cid];
316 
317 	(void) strncpy(newclass->pgc_name, name, PG_CLASS_NAME_MAX);
318 	newclass->pgc_id = cid;
319 	newclass->pgc_ops = ops;
320 	newclass->pgc_relation = relation;
321 
322 	mutex_exit(&cpu_lock);
323 
324 	return (cid);
325 }
326 
327 /*
328  * Try to find an existing pg in set in which to place cp.
329  * Returns the pg if found, and NULL otherwise.
330  * In the event that the CPU could belong to multiple
331  * PGs in the set, the first matching PG will be returned.
332  */
333 pg_t *
334 pg_cpu_find_pg(cpu_t *cp, group_t *set)
335 {
336 	pg_t		*pg;
337 	group_iter_t	i;
338 
339 	group_iter_init(&i);
340 	while ((pg = group_iterate(set, &i)) != NULL) {
341 		/*
342 		 * Ask the class if the CPU belongs here
343 		 */
344 		if (PG_CPU_BELONGS(pg, cp))
345 			return (pg);
346 	}
347 	return (NULL);
348 }
349 
350 /*
351  * Iterate over the CPUs in a PG after initializing
352  * the iterator with PG_CPU_ITR_INIT()
353  */
354 cpu_t *
355 pg_cpu_next(pg_cpu_itr_t *itr)
356 {
357 	cpu_t		*cpu;
358 	pg_t		*pg = itr->pg;
359 
360 	cpu = group_iterate(&pg->pg_cpus, &itr->position);
361 	return (cpu);
362 }
363 
364 /*
365  * Test if a given PG contains a given CPU
366  */
367 boolean_t
368 pg_cpu_find(pg_t *pg, cpu_t *cp)
369 {
370 	if (group_find(&pg->pg_cpus, cp) == (uint_t)-1)
371 		return (B_FALSE);
372 
373 	return (B_TRUE);
374 }
375 
376 /*
377  * Set the PGs callbacks to the default
378  */
379 void
380 pg_callback_set_defaults(pg_t *pg)
381 {
382 	bcopy(&pg_cb_ops_default, &pg->pg_cb, sizeof (struct pg_cb_ops));
383 }
384 
385 /*
386  * Create a PG of a given class.
387  * This routine may block.
388  */
389 pg_t *
390 pg_create(pg_cid_t cid)
391 {
392 	pg_t	*pg;
393 	pgid_t	id;
394 
395 	ASSERT(MUTEX_HELD(&cpu_lock));
396 
397 	/*
398 	 * Call the class specific PG allocation routine
399 	 */
400 	pg = PG_ALLOC(cid);
401 	pg->pg_class = &pg_classes[cid];
402 	pg->pg_relation = pg->pg_class->pgc_relation;
403 
404 	/*
405 	 * Find the next free sequential pg id
406 	 */
407 	do {
408 		if (pg_id_next >= bitset_capacity(&pg_id_set))
409 			bitset_resize(&pg_id_set, pg_id_next + 1);
410 		id = pg_id_next++;
411 	} while (bitset_in_set(&pg_id_set, id));
412 
413 	pg->pg_id = id;
414 	bitset_add(&pg_id_set, pg->pg_id);
415 
416 	/*
417 	 * Create the PG's CPU group
418 	 */
419 	group_create(&pg->pg_cpus);
420 
421 	/*
422 	 * Initialize the events ops vector
423 	 */
424 	pg_callback_set_defaults(pg);
425 
426 	return (pg);
427 }
428 
429 /*
430  * Destroy a PG.
431  * This routine may block.
432  */
433 void
434 pg_destroy(pg_t *pg)
435 {
436 	ASSERT(MUTEX_HELD(&cpu_lock));
437 
438 	group_destroy(&pg->pg_cpus);
439 
440 	/*
441 	 * Unassign the pg_id
442 	 */
443 	if (pg_id_next > pg->pg_id)
444 		pg_id_next = pg->pg_id;
445 	bitset_del(&pg_id_set, pg->pg_id);
446 
447 	/*
448 	 * Invoke the class specific de-allocation routine
449 	 */
450 	PG_FREE(pg);
451 }
452 
453 /*
454  * Add the CPU "cp" to processor group "pg"
455  * This routine may block.
456  */
457 void
458 pg_cpu_add(pg_t *pg, cpu_t *cp, cpu_pg_t *cpu_pg)
459 {
460 	int	err;
461 
462 	ASSERT(MUTEX_HELD(&cpu_lock));
463 
464 	/* This adds the CPU to the PG's CPU group */
465 	err = group_add(&pg->pg_cpus, cp, GRP_RESIZE);
466 	ASSERT(err == 0);
467 
468 	/*
469 	 * The CPU should be referencing the bootstrap PG data still
470 	 * at this point, since this routine may block causing us to
471 	 * enter the dispatcher.
472 	 */
473 	ASSERT(pg_cpu_is_bootstrapped(cp));
474 
475 	/* This adds the PG to the CPUs PG group */
476 	err = group_add(&cpu_pg->pgs, pg, GRP_RESIZE);
477 	ASSERT(err == 0);
478 }
479 
480 /*
481  * Remove "cp" from "pg".
482  * This routine may block.
483  */
484 void
485 pg_cpu_delete(pg_t *pg, cpu_t *cp, cpu_pg_t *cpu_pg)
486 {
487 	int	err;
488 
489 	ASSERT(MUTEX_HELD(&cpu_lock));
490 
491 	/* Remove the CPU from the PG */
492 	err = group_remove(&pg->pg_cpus, cp, GRP_RESIZE);
493 	ASSERT(err == 0);
494 
495 	/*
496 	 * The CPU should be referencing the bootstrap PG data still
497 	 * at this point, since this routine may block causing us to
498 	 * enter the dispatcher.
499 	 */
500 	ASSERT(pg_cpu_is_bootstrapped(cp));
501 
502 	/* Remove the PG from the CPU's PG group */
503 	err = group_remove(&cpu_pg->pgs, pg, GRP_RESIZE);
504 	ASSERT(err == 0);
505 }
506 
507 /*
508  * Allocate a CPU's PG data. This hangs off struct cpu at cpu_pg
509  */
510 static cpu_pg_t *
511 pg_cpu_data_alloc(void)
512 {
513 	cpu_pg_t	*pgd;
514 
515 	pgd = kmem_zalloc(sizeof (cpu_pg_t), KM_SLEEP);
516 	group_create(&pgd->pgs);
517 	group_create(&pgd->cmt_pgs);
518 
519 	return (pgd);
520 }
521 
522 /*
523  * Free the CPU's PG data.
524  */
525 static void
526 pg_cpu_data_free(cpu_pg_t *pgd)
527 {
528 	group_destroy(&pgd->pgs);
529 	group_destroy(&pgd->cmt_pgs);
530 	kmem_free(pgd, sizeof (cpu_pg_t));
531 }
532 
533 /*
534  * Called when either a new CPU is coming into the system (either
535  * via booting or DR) or when the CPU's PG data is being recalculated.
536  * Allocate its PG data, and notify all registered classes about
537  * the new CPU.
538  *
539  * If "deferred_init" is B_TRUE, the CPU's PG data will be allocated
540  * and returned, but the "bootstrap" structure will be left in place.
541  * The deferred_init option is used when all CPUs in the system are
542  * using the bootstrap structure as part of the process of recalculating
543  * all PG data. The caller must replace the bootstrap structure with the
544  * allocated PG data before pg_cpu_active is called.
545  *
546  * This routine may block.
547  */
548 cpu_pg_t *
549 pg_cpu_init(cpu_t *cp, boolean_t deferred_init)
550 {
551 	pg_cid_t	i;
552 	cpu_pg_t	*cpu_pg;
553 
554 	ASSERT(MUTEX_HELD(&cpu_lock));
555 
556 	/*
557 	 * Allocate and size the per CPU pg data
558 	 *
559 	 * The CPU's PG data will be populated by the various
560 	 * PG classes during the invocation of the PG_CPU_INIT()
561 	 * callback below.
562 	 *
563 	 * Since the we could block and enter the dispatcher during
564 	 * this process, the CPU will continue to reference the bootstrap
565 	 * PG data until all the initialization completes.
566 	 */
567 	ASSERT(pg_cpu_is_bootstrapped(cp));
568 
569 	cpu_pg = pg_cpu_data_alloc();
570 
571 	/*
572 	 * Notify all registered classes about the new CPU
573 	 */
574 	for (i = 0; i < pg_nclasses; i++)
575 		PG_CPU_INIT(i, cp, cpu_pg);
576 
577 	/*
578 	 * The CPU's PG data is now ready to use.
579 	 */
580 	if (deferred_init == B_FALSE)
581 		cp->cpu_pg = cpu_pg;
582 
583 	return (cpu_pg);
584 }
585 
586 /*
587  * Either this CPU is being deleted from the system or its PG data is
588  * being recalculated. Notify the classes and free up the CPU's PG data.
589  *
590  * If "cpu_pg_deferred" is non-NULL, it points to the CPU's PG data and
591  * serves to indicate that this CPU is already using the bootstrap
592  * stucture. Used as part of the process to recalculate the PG data for
593  * all CPUs in the system.
594  */
595 void
596 pg_cpu_fini(cpu_t *cp, cpu_pg_t *cpu_pg_deferred)
597 {
598 	pg_cid_t	i;
599 	cpu_pg_t	*cpu_pg;
600 
601 	ASSERT(MUTEX_HELD(&cpu_lock));
602 
603 	if (cpu_pg_deferred == NULL) {
604 		cpu_pg = cp->cpu_pg;
605 
606 		/*
607 		 * This can happen if the CPU coming into the system
608 		 * failed to power on.
609 		 */
610 		if (cpu_pg == NULL || pg_cpu_is_bootstrapped(cp))
611 			return;
612 
613 		/*
614 		 * Have the CPU reference the bootstrap PG data to survive
615 		 * the dispatcher should it block from here on out.
616 		 */
617 		pg_cpu_bootstrap(cp);
618 	} else {
619 		ASSERT(pg_cpu_is_bootstrapped(cp));
620 		cpu_pg = cpu_pg_deferred;
621 	}
622 
623 	for (i = 0; i < pg_nclasses; i++)
624 		PG_CPU_FINI(i, cp, cpu_pg);
625 
626 	pg_cpu_data_free(cpu_pg);
627 }
628 
629 /*
630  * This CPU is becoming active (online)
631  * This routine may not block as it is called from paused CPUs
632  * context.
633  */
634 void
635 pg_cpu_active(cpu_t *cp)
636 {
637 	pg_cid_t	i;
638 
639 	ASSERT(MUTEX_HELD(&cpu_lock));
640 
641 	/*
642 	 * Notify all registered classes about the new CPU
643 	 */
644 	for (i = 0; i < pg_nclasses; i++)
645 		PG_CPU_ACTIVE(i, cp);
646 }
647 
648 /*
649  * This CPU is going inactive (offline)
650  * This routine may not block, as it is called from paused
651  * CPUs context.
652  */
653 void
654 pg_cpu_inactive(cpu_t *cp)
655 {
656 	pg_cid_t	i;
657 
658 	ASSERT(MUTEX_HELD(&cpu_lock));
659 
660 	/*
661 	 * Notify all registered classes about the new CPU
662 	 */
663 	for (i = 0; i < pg_nclasses; i++)
664 		PG_CPU_INACTIVE(i, cp);
665 }
666 
667 /*
668  * Invoked when the CPU is about to move into the partition
669  * This routine may block.
670  */
671 void
672 pg_cpupart_in(cpu_t *cp, cpupart_t *pp)
673 {
674 	int	i;
675 
676 	ASSERT(MUTEX_HELD(&cpu_lock));
677 
678 	/*
679 	 * Notify all registered classes that the
680 	 * CPU is about to enter the CPU partition
681 	 */
682 	for (i = 0; i < pg_nclasses; i++)
683 		PG_CPUPART_IN(i, cp, pp);
684 }
685 
686 /*
687  * Invoked when the CPU is about to move out of the partition
688  * This routine may block.
689  */
690 /*ARGSUSED*/
691 void
692 pg_cpupart_out(cpu_t *cp, cpupart_t *pp)
693 {
694 	int	i;
695 
696 	ASSERT(MUTEX_HELD(&cpu_lock));
697 
698 	/*
699 	 * Notify all registered classes that the
700 	 * CPU is about to leave the CPU partition
701 	 */
702 	for (i = 0; i < pg_nclasses; i++)
703 		PG_CPUPART_OUT(i, cp, pp);
704 }
705 
706 /*
707  * Invoked when the CPU is *moving* partitions.
708  *
709  * This routine may not block, as it is called from paused CPUs
710  * context.
711  */
712 void
713 pg_cpupart_move(cpu_t *cp, cpupart_t *oldpp, cpupart_t *newpp)
714 {
715 	int	i;
716 
717 	ASSERT(MUTEX_HELD(&cpu_lock));
718 
719 	/*
720 	 * Notify all registered classes that the
721 	 * CPU is about to leave the CPU partition
722 	 */
723 	for (i = 0; i < pg_nclasses; i++)
724 		PG_CPUPART_MOVE(i, cp, oldpp, newpp);
725 }
726 
727 /*
728  * Return a class specific string describing a policy implemented
729  * across this PG
730  */
731 char *
732 pg_policy_name(pg_t *pg)
733 {
734 	char *str;
735 	if ((str = PG_POLICY_NAME(pg)) != NULL)
736 		return (str);
737 
738 	return ("N/A");
739 }
740 
741 /*
742  * Provide the specified CPU a bootstrap pg
743  * This is needed to allow sane behaviour if any PG consuming
744  * code needs to deal with a partially initialized CPU
745  */
746 void
747 pg_cpu_bootstrap(cpu_t *cp)
748 {
749 	cp->cpu_pg = &bootstrap_pg_data;
750 }
751 
752 /*
753  * Return non-zero if the specified CPU is bootstrapped,
754  * which means it's CPU specific PG data has not yet been
755  * fully constructed.
756  */
757 int
758 pg_cpu_is_bootstrapped(cpu_t *cp)
759 {
760 	return (cp->cpu_pg == &bootstrap_pg_data);
761 }
762 
763 /*ARGSUSED*/
764 static pg_t *
765 pg_alloc_default(pg_class_t class)
766 {
767 	return (kmem_zalloc(sizeof (pg_t), KM_SLEEP));
768 }
769 
770 /*ARGSUSED*/
771 static void
772 pg_free_default(struct pg *pg)
773 {
774 	kmem_free(pg, sizeof (pg_t));
775 }
776 
777 static void
778 pg_null_op()
779 {
780 }
781 
782 /*
783  * Invoke the "thread switch" callback for each of the CPU's PGs
784  * This is invoked from the dispatcher swtch() routine, which is called
785  * when a thread running an a CPU should switch to another thread.
786  * "cp" is the CPU on which the thread switch is happening
787  * "now" is an unscaled hrtime_t timestamp taken in swtch()
788  * "old" and "new" are the outgoing and incoming threads, respectively.
789  */
790 void
791 pg_ev_thread_swtch(struct cpu *cp, hrtime_t now, kthread_t *old, kthread_t *new)
792 {
793 	int	i, sz;
794 	group_t	*grp;
795 	pg_t	*pg;
796 
797 	grp = &cp->cpu_pg->pgs;
798 	sz = GROUP_SIZE(grp);
799 	for (i = 0; i < sz; i++) {
800 		pg = GROUP_ACCESS(grp, i);
801 		pg->pg_cb.thread_swtch(pg, cp, now, old, new);
802 	}
803 }
804 
805 /*
806  * Invoke the "thread remain" callback for each of the CPU's PGs.
807  * This is called from the dispatcher's swtch() routine when a thread
808  * running on the CPU "cp" is switching to itself, which can happen as an
809  * artifact of the thread's timeslice expiring.
810  */
811 void
812 pg_ev_thread_remain(struct cpu *cp, kthread_t *t)
813 {
814 	int	i, sz;
815 	group_t	*grp;
816 	pg_t	*pg;
817 
818 	grp = &cp->cpu_pg->pgs;
819 	sz = GROUP_SIZE(grp);
820 	for (i = 0; i < sz; i++) {
821 		pg = GROUP_ACCESS(grp, i);
822 		pg->pg_cb.thread_remain(pg, cp, t);
823 	}
824 }
825