xref: /titanic_41/usr/src/uts/common/sys/lgrp.h (revision d5d7cf4e084ada61ab475b433429da88487a6725)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #ifndef	_LGRP_H
28 #define	_LGRP_H
29 
30 /*
31  * locality group definitions for kernel
32  */
33 
34 #include <sys/types.h>
35 
36 #ifdef	__cplusplus
37 extern "C" {
38 #endif
39 
40 #define	LGRP_NONE	(-1)		/* non-existent lgroup ID */
41 
42 #if (!defined(_KERNEL) && !defined(_KMEMUSER))
43 typedef struct lgrp_mem_policy_info { int opaque[2]; }	lgrp_mem_policy_info_t;
44 #endif	/* !_KERNEL && !_KMEMUSER */
45 
46 #if (defined(_KERNEL) || defined(_KMEMUSER))
47 #include <sys/cpuvar.h>
48 #include <sys/bitmap.h>
49 #include <sys/vnode.h>
50 #include <vm/anon.h>
51 #include <vm/seg.h>
52 #include <sys/lgrp_user.h>
53 #include <sys/param.h>
54 
55 typedef	uint32_t	lgrp_load_t;	/* lgrp_loadavg type */
56 typedef uintptr_t	lgrp_handle_t;	/* lgrp handle */
57 
58 #define	LGRP_NONE_SUCH		LGRP_NONE	/* non-existent lgroup ID */
59 /* null platform handle */
60 #define	LGRP_NULL_HANDLE	((lgrp_handle_t)0xbadbad)
61 #define	LGRP_DEFAULT_HANDLE	((lgrp_handle_t)0xbabecafe) /* uma handle */
62 #define	LGRP_ROOTID		(0)		/* root lgroup ID */
63 
64 /*
65  * Maximum number of lgrps a platform may define.
66  */
67 #define	NLGRPS_MAX		64
68 #define	LGRP_LOADAVG_MAX	UINT32_MAX
69 
70 /*
71  * The load-average we expect for one cpu-bound thread's worth of load
72  */
73 #define	LGRP_LOADAVG_THREAD_MAX		65516
74 
75 /*
76  * The input to the load-average generating function for one cpu-bound thread's
77  * worth of load
78  */
79 
80 #define	LGRP_LOADAVG_IN_THREAD_MAX	128
81 
82 /*
83  * LPL actions
84  */
85 
86 typedef enum {
87 	LPL_INCREMENT,
88 	LPL_DECREMENT
89 } lpl_act_t;
90 
91 /*
92  * lgroup statistics.  Most of these are counters that are updated
93  * dynamically so they are hashed to CPU buckets to reduce cache
94  * interference.  The remaining statistics are snapshots of kernel
95  * data, so they aren't stored in the array of counter stats.
96  *
97  * For the hashed stats to make sense, you have to sum all the buckets for
98  * that stat, hence macros are provided to read the stats.
99  */
100 
101 #define	LGRP_NUM_CPU_BUCKETS	8	/* must be power of 2 */
102 #define	LGRP_CPU_BUCKET_MASK	(LGRP_NUM_CPU_BUCKETS - 1)
103 
104 /*
105  * Flags for what to do with lgroup memory policy
106  * Used for heap and stack where policy is extended to new segments added to
107  * the end
108  */
109 #define	LGRP_MP_FLAG_EXTEND_UP		0x1	/* policy should extend up */
110 #define	LGRP_MP_FLAG_EXTEND_DOWN	0x2	/* policy should extend down */
111 
112 #define	LGRP_STAT(stats, bucket, whichstat) \
113 	((stats)->ls_data[bucket][whichstat])
114 
115 /* Return a pointer suitable for an atomic 64-bit op on the bucket */
116 #define	LGRP_STAT_WRITE_PTR(stats, whichstat) \
117 	(&LGRP_STAT(stats, (CPU->cpu_id) & LGRP_CPU_BUCKET_MASK, \
118 	    whichstat))
119 
120 /* Sum up all the buckets and return the value in 'val' */
121 #define	LGRP_STAT_READ(stats, whichstat, val) {				\
122 	int bkt;							\
123 	for (val = 0, bkt = 0; bkt < LGRP_NUM_CPU_BUCKETS; bkt++)	\
124 		val += LGRP_STAT(stats, bkt, whichstat);		\
125 }
126 
127 /* Reset all buckets for the stat to 0 */
128 #define	LGRP_STAT_RESET(stats, stat) {					\
129 	int i;								\
130 	for (i = 0; i < LGRP_NUM_CPU_BUCKETS; i++)			\
131 		LGRP_STAT(stats, i, stat) = 0;				\
132 }
133 
134 /*
135  * Define all of the statistics that are kept for lgrp kstats,
136  * and their corresponding text names.
137  */
138 
139 typedef enum lgrp_stat_types {
140 	LGRP_NUM_MIGR,		/* # migrations away from this lgrp */
141 	LGRP_NUM_ALLOC_FAIL,	/* # times alloc fails for chosen lgrp */
142 	LGRP_PM_SRC_PGS,	/* # pages migrated from this lgrp */
143 	LGRP_PM_DEST_PGS,	/* # pages migrated to this lgrp */
144 	LGRP_PM_FAIL_ALLOC_PGS,	/* # pages failed to migrate to this lgrp */
145 	LGRP_PM_FAIL_LOCK_PGS,	/* # pages failed to migrate from this lgrp */
146 	LGRP_PMM_PGS,		/* # pages marked to migrate from this lgrp */
147 	LGRP_PMM_FAIL_PGS,	/* # pages marked to migrate from this lgrp */
148 	LGRP_NUM_DEFAULT,	/* # of times default policy applied */
149 	LGRP_NUM_NEXT,		/* # of times next touch policy applied */
150 	LGRP_NUM_RANDOM,	/* # of times random policy applied */
151 	LGRP_NUM_RANDOM_PROC,	/* # of times random proc policy applied */
152 	LGRP_NUM_RANDOM_PSET,	/* # of times random pset policy applied */
153 	LGRP_NUM_ROUNDROBIN,	/* # of times round robin policy applied */
154 	LGRP_NUM_NEXT_SEG,	/* # of times next to seg policy applied */
155 	LGRP_NUM_COUNTER_STATS,	/* always last */
156 	LGRP_CTR_STATS_ALLOC = 16	/* cache-align pad - multiple of 8 */
157 				/* always keep >= LGRP_NUM_COUNTER_STATS */
158 } lgrp_stat_t;
159 
160 typedef enum lgrp_snap_stat_types {
161 	LGRP_NUM_CPUS,		/* number of CPUs */
162 	LGRP_NUM_PG_FREE,	/* # of free pages */
163 	LGRP_NUM_PG_AVAIL,	/* # of allocatable physical pages */
164 	LGRP_NUM_PG_INSTALL,	/* # of installed physical pages */
165 	LGRP_LOADAVG,		/* unscaled load average of this lgrp */
166 	LGRP_LOADAVG_SCALE,	/* load unit of one CPU bound thread */
167 	LGRP_NUM_SNAPSHOT_STATS	/* always last */
168 } lgrp_snap_stat_t;
169 
170 #define	LGRP_KSTAT_NAMES		\
171 static char *lgrp_kstat_names[] = {	\
172 					\
173 	/* Counter stats */		\
174 	"lwp migrations",		\
175 	"alloc fail",			\
176 	"pages migrated from",		\
177 	"pages migrated to",		\
178 	"pages failed to migrate to",	\
179 	"pages failed to migrate from",	\
180 	"pages marked for migration",	\
181 	"pages failed to mark",		\
182 	"default policy",		\
183 	"next-touch policy",		\
184 	"random policy",		\
185 	"span process policy",		\
186 	"span psrset policy",		\
187 	"round robin policy",		\
188 	"next-seg policy",		\
189 					\
190 	/* Snapshot stats */		\
191 	"cpus",				\
192 	"pages free",			\
193 	"pages avail",			\
194 	"pages installed",		\
195 	"load average",			\
196 	"loadscale"			\
197 }
198 
199 #define	LGRP_NUM_STATS	((int)LGRP_NUM_COUNTER_STATS +			\
200 	(int)LGRP_NUM_SNAPSHOT_STATS)
201 
202 /*
203  * The contents of this structure are opaque and should only be
204  * accessed through the LGRP_STAT macro.
205  */
206 struct lgrp_stats {
207 	int64_t ls_data[LGRP_NUM_CPU_BUCKETS][LGRP_CTR_STATS_ALLOC];
208 };
209 
210 /* The kernel's version of a bitmap of lgroups */
211 typedef uint64_t klgrpset_t;
212 
213 /*
214  * This really belongs in memnode.h, but it must be defined here to avoid
215  * recursive inclusion problems. Note that memnode.h includes this header.
216  */
217 typedef	uint64_t	mnodeset_t;
218 
219 /*
220  * lgroup structure
221  *
222  * Visible to generic code and contains the lgroup ID, CPUs in this lgroup,
223  * and a platform handle used to identify this lgroup to the lgroup platform
224  * support code
225  */
226 typedef struct lgrp {
227 
228 	lgrp_id_t 	lgrp_id;	/* which lgroup	*/
229 	int		lgrp_latency;
230 	lgrp_handle_t  	lgrp_plathand;	/* handle for platform calls */
231 	struct lgrp	*lgrp_parent;	/* parent lgroup */
232 	uint_t		lgrp_reserved1;	/* filler */
233 	uint_t		lgrp_childcnt;	/* number of children lgroups */
234 	klgrpset_t	lgrp_children;	/* children lgroups */
235 	klgrpset_t	lgrp_leaves;	/* (direct decendant) leaf lgroups */
236 
237 	/*
238 	 * set of lgroups containing a given type of resource
239 	 * at this level of locality
240 	 */
241 	klgrpset_t	lgrp_set[LGRP_RSRC_COUNT];
242 
243 	mnodeset_t	lgrp_mnodes;	/* set of memory nodes in this lgroup */
244 	uint_t		lgrp_nmnodes;	/* number of memnodes */
245 	uint_t		lgrp_reserved2;	/* filler */
246 
247 	struct cpu	*lgrp_cpu;	/* pointer to a cpu may be null */
248 	uint_t		lgrp_cpucnt;	/* number of cpus in this lgrp	*/
249 	kstat_t		*lgrp_kstat;	/* per-lgrp kstats */
250 } lgrp_t;
251 
252 /*
253  * lgroup load average structure
254  */
255 
256 typedef struct lgrp_ld {
257 	lgrp_load_t	lpl_loadavg;	/* load average		*/
258 	uint_t		lpl_ncpu;	/* how many cpus	*/
259 	lgrp_id_t	lpl_lgrpid;	/* which group this lpl part of */
260 	lgrp_t		*lpl_lgrp;	/* ptr to lpl's lgrp */
261 	struct lgrp_ld	*lpl_parent;	/* lpl of parent lgrp */
262 	struct cpu	*lpl_cpus;	/* list of cpus in lpl */
263 					/* NULL for non-leaf lgrps */
264 	uint_t		lpl_nrset;	/* no. of leaf lpls for lgrp */
265 	hrtime_t	lpl_homed_time;	/* time of last homing to this lpl */
266 	uint_t		lpl_rset_sz;	/* Resource set capacity */
267 	struct lgrp_ld	**lpl_rset;	/* leaf lpls for lgrp */
268 					/* contains ptr to self for leaf lgrp */
269 	int		*lpl_id2rset;	/* mapping of lgrpid to rset index */
270 } lpl_t;
271 
272 /*
273  * 1 << LGRP_MAX_EFFECT_SHFT ==  lgrp_loadavg_max_effect
274  */
275 #define	LGRP_MAX_EFFECT_SHFT 16
276 
277 /*
278  * Operations handled by lgrp_config()
279  */
280 typedef enum lgrp_config_flag {
281 	LGRP_CONFIG_NOP,
282 	LGRP_CONFIG_CPU_ADD,
283 	LGRP_CONFIG_CPU_DEL,
284 	LGRP_CONFIG_CPU_ONLINE,
285 	LGRP_CONFIG_CPU_OFFLINE,
286 	LGRP_CONFIG_CPUPART_ADD,
287 	LGRP_CONFIG_CPUPART_DEL,
288 	LGRP_CONFIG_MEM_ADD,
289 	LGRP_CONFIG_MEM_DEL,
290 	LGRP_CONFIG_MEM_RENAME,
291 	LGRP_CONFIG_GEN_UPDATE,
292 	LGRP_CONFIG_FLATTEN,
293 	LGRP_CONFIG_LAT_CHANGE_ALL,
294 	LGRP_CONFIG_LAT_CHANGE
295 } lgrp_config_flag_t;
296 
297 /*
298  * Stages of lgroup framework initialization (done through lgrp_init()):
299  *
300  * 1) Initialize common and platform specific code (called in mlsetup())
301  *
302  * 2) Setup root lgroup and add CPU 0 to lgroup(s) (called near beginning of
303  *    main() before startup())
304  *
305  * 3) Probe from CPU 0 and copy and release any BOP_ALLOC-ed memory temporarily
306  *    allocated before kernel memory allocator is setup (called in main()
307  *    after startup(), gethrtime() is setup, and before interrupts enabled)
308  *
309  * 4) Check for null proc LPA on Starcat, collapse lgroup topology (if
310  *    necessary), setup lgroup kstats, etc. (called before start_other_cpus())
311  *
312  * 5) Finish any lgroup initialization needed including updating lgroup
313  *    topology after all CPUs started (called after start_other_cpus())
314  */
315 typedef enum lgrp_init_stages {
316 	LGRP_INIT_STAGE1,
317 	LGRP_INIT_STAGE2,
318 	LGRP_INIT_STAGE3,
319 	LGRP_INIT_STAGE4,
320 	LGRP_INIT_STAGE5
321 } lgrp_init_stages_t;
322 
323 /*
324  * Memory allocation policies
325  */
326 typedef enum lgrp_mem_policy {
327 	LGRP_MEM_POLICY_DEFAULT,
328 	LGRP_MEM_POLICY_NEXT,		/* near LWP to next touch */
329 	LGRP_MEM_POLICY_RANDOM_PROC,	/* randomly across process */
330 	LGRP_MEM_POLICY_RANDOM_PSET,	/* randomly across processor set */
331 	LGRP_MEM_POLICY_RANDOM,		/* randomly across all lgroups */
332 	LGRP_MEM_POLICY_ROUNDROBIN,	/* round robin across all lgroups */
333 	LGRP_MEM_POLICY_NEXT_CPU,	/* Near next CPU to touch memory */
334 	LGRP_MEM_POLICY_NEXT_SEG,	/* lgrp specified directly by seg */
335 	LGRP_NUM_MEM_POLICIES
336 } lgrp_mem_policy_t;
337 
338 /*
339  * Search scopes for finding resouces
340  */
341 typedef	enum lgrp_res_ss {
342 	LGRP_SRCH_LOCAL,		/* Search local lgroup only */
343 	LGRP_SRCH_HIER			/* Search entire hierarchy */
344 } lgrp_res_ss_t;
345 
346 /*
347  * Cookie used for lgrp mnode selection
348  */
349 typedef struct lgrp_mnode_cookie {
350 	lgrp_t		*lmc_lgrp;	/* lgrp under consideration */
351 	mnodeset_t	lmc_nodes;	/* nodes not yet tried in lgrp */
352 	int		lmc_cnt;	/* how many nodes in untried set */
353 	mnodeset_t	lmc_tried;	/* nodes already tried */
354 	int		lmc_ntried;	/* how many nodes in tried set */
355 	lgrp_res_ss_t	lmc_scope;	/* consider non-local nodes? */
356 	ushort_t	lmc_rand;	/* a "random" number */
357 } lgrp_mnode_cookie_t;
358 
359 /*
360  * Information needed to implement memory allocation policy
361  */
362 typedef struct lgrp_mem_policy_info {
363 	int		mem_policy;		/* memory allocation policy */
364 	lgrp_id_t	mem_lgrpid;		/* lgroup id */
365 } lgrp_mem_policy_info_t;
366 
367 /*
368  * Shared memory policy segment
369  */
370 typedef struct lgrp_shm_policy_seg {
371 	u_offset_t		shm_off;	/* offset into shared object */
372 	size_t			shm_size;	/* size of segment */
373 	lgrp_mem_policy_info_t	shm_policy;	/* memory allocation policy */
374 	avl_node_t		shm_tree;	/* AVL tree */
375 } lgrp_shm_policy_seg_t;
376 
377 /*
378  * Shared memory locality info
379  */
380 typedef struct lgrp_shm_locality {
381 	size_t		loc_count;		/* reference count */
382 	avl_tree_t	*loc_tree;		/* policy segment tree */
383 	krwlock_t	loc_lock;		/* protects tree */
384 } lgrp_shm_locality_t;
385 
386 /*
387  * Queries that may be made to determine lgroup memory size
388  */
389 typedef enum {
390 	LGRP_MEM_SIZE_FREE,		/* number of free pages */
391 	LGRP_MEM_SIZE_AVAIL,		/* number of pages in phys_avail */
392 	LGRP_MEM_SIZE_INSTALL		/* number of pages in phys_install */
393 } lgrp_mem_query_t;
394 
395 /*
396  * Argument for the memory copy-rename operation, contains the source and the
397  * destination platform handles.
398  */
399 typedef struct lgrp_config_mem_rename {
400 	lgrp_handle_t lmem_rename_from;
401 	lgrp_handle_t lmem_rename_to;
402 } lgrp_config_mem_rename_t;
403 
404 /* Macro to clear an lgroup bitmap */
405 #define	klgrpset_clear(klgrpset) \
406 	(klgrpset) = (klgrpset_t)0
407 
408 /* Macro to fill an lgroup bitmap */
409 #define	klgrpset_fill(klgrpset) \
410 	(klgrpset) = (klgrpset_t)(-1)
411 
412 /* Macro to add an lgroup to an lgroup bitmap */
413 #define	klgrpset_add(klgrpset, lgrpid) \
414 	(klgrpset) |= ((klgrpset_t)1 << (lgrpid))
415 
416 /* Macro to delete an lgroup from an lgroup bitmap */
417 #define	klgrpset_del(klgrpset, lgrpid) \
418 	(klgrpset) &= ~((klgrpset_t)1 << (lgrpid))
419 
420 /* Macro to copy a klgrpset into another klgrpset */
421 #define	klgrpset_copy(klgrpset_to, klgrpset_from) \
422 	(klgrpset_to) = (klgrpset_from)
423 
424 /* Macro to perform an 'and' operation on a pair of lgroup bitmaps */
425 #define	klgrpset_and(klgrpset_rslt, klgrpset_arg) \
426 	(klgrpset_rslt) &= (klgrpset_arg)
427 
428 /* Macro to perform an 'or' operation on a pair of lgroup bitmaps */
429 #define	klgrpset_or(klgrpset_rslt, klgrpset_arg) \
430 	(klgrpset_rslt) |= (klgrpset_arg)
431 
432 /* Macro to perform a 'diff' operation on a pair of lgroup bitmaps */
433 #define	klgrpset_diff(klgrpset_rslt, klgrpset_arg) \
434 	(klgrpset_rslt) &= ~(klgrpset_arg)
435 
436 /* Macro to check if an lgroup is a member of an lgrpset */
437 #define	klgrpset_ismember(klgrpset, lgrpid) \
438 	((klgrpset) & ((klgrpset_t)1 << (lgrpid)))
439 
440 /* Macro to check if an lgroup bitmap is empty */
441 #define	klgrpset_isempty(klgrpset) \
442 	((klgrpset) == (klgrpset_t)0)
443 
444 /* Macro to check if two lgrpsets intersect */
445 #define	klgrpset_intersects(klgrpset1, klgrpset2) \
446 	((klgrpset1) & (klgrpset2))
447 
448 /* Macro to count the number of members in an lgrpset */
449 #define	klgrpset_nlgrps(klgrpset, count)				\
450 {									\
451 	lgrp_id_t	lgrpid;						\
452 	for (lgrpid = 0, count = 0; lgrpid <= lgrp_alloc_max; lgrpid++) {\
453 		if (klgrpset_ismember(klgrpset, lgrpid))		\
454 			count++;					\
455 	}								\
456 }
457 
458 /* Macro to get total memory size (in bytes) of a given set of lgroups */
459 #define	klgrpset_totalsize(klgrpset, size)				\
460 {									\
461 	lgrp_handle_t	hand;						\
462 	lgrp_id_t	lgrpid;						\
463 									\
464 	for (lgrpid = 0, size = 0; lgrpid <= lgrp_alloc_max; lgrpid++) {\
465 		if (klgrpset_ismember(klgrpset, lgrpid) &&		\
466 		    lgrp_table[lgrpid])	{				\
467 			hand = lgrp_table[lgrpid]->lgrp_plathand;	\
468 			size += lgrp_plat_mem_size(hand,		\
469 			    LGRP_MEM_SIZE_AVAIL) * PAGESIZE;		\
470 		}							\
471 	}								\
472 }
473 
474 /*
475  * Does this lgroup exist?
476  */
477 #define	LGRP_EXISTS(lgrp)	\
478 	(lgrp != NULL && lgrp->lgrp_id != LGRP_NONE)
479 
480 /*
481  * Macro for testing if a CPU is contained in an lgrp.
482  */
483 #define	LGRP_CONTAINS_CPU(lgrp, cpu)	\
484 	(klgrpset_ismember(lgrp->lgrp_set[LGRP_RSRC_CPU],	\
485 	    cpu->cpu_lpl->lpl_lgrpid))
486 
487 /*
488  * Initialize an lgrp_mnode_cookie
489  */
490 #define	LGRP_MNODE_COOKIE_INIT(c, lgrp, scope)	\
491 {							\
492 	bzero(&(c), sizeof (lgrp_mnode_cookie_t));	\
493 	(&(c))->lmc_lgrp = lgrp;			\
494 	(&(c))->lmc_nodes = lgrp->lgrp_mnodes;		\
495 	(&(c))->lmc_cnt = lgrp->lgrp_nmnodes;		\
496 	(&(c))->lmc_scope = scope;			\
497 	(&(c))->lmc_rand = (ushort_t)gethrtime_unscaled() >> 4;	\
498 }
499 
500 /*
501  * Upgrade cookie scope from LGRP_SRCH_LOCAL to LGRP_SRCH_HIER.
502  */
503 #define	LGRP_MNODE_COOKIE_UPGRADE(c)	\
504 {							\
505 	ASSERT((&(c))->lmc_scope == LGRP_SRCH_LOCAL);	\
506 	(&(c))->lmc_scope = LGRP_SRCH_HIER;		\
507 }
508 
509 /*
510  * Macro to see whether memory allocation policy can be reapplied
511  */
512 #define	LGRP_MEM_POLICY_REAPPLICABLE(p) \
513 	(p == LGRP_MEM_POLICY_NEXT)
514 
515 /*
516  * Return true if lgrp has CPU resources in the cpupart
517  */
518 #define	LGRP_CPUS_IN_PART(lgrpid, cpupart) \
519 	(cpupart->cp_lgrploads[lgrpid].lpl_ncpu > 0)
520 
521 extern int	lgrp_alloc_max;
522 extern lgrp_t	*lgrp_table[NLGRPS_MAX];	/* indexed by lgrp_id */
523 extern int		nlgrps;		/* number of lgroups in machine */
524 extern int		nlgrpsmax;	/* max number of lgroups on platform */
525 extern lgrp_gen_t	lgrp_gen;	/* generation of lgroup hierarchy */
526 extern int		lgrp_initialized; /* single-CPU initialization done */
527 extern int		lgrp_topo_initialized; /* lgrp topology constructed */
528 extern lgrp_t		*lgrp_root;	/* root lgroup */
529 extern unsigned int	lgrp_topo_levels;
530 extern lpl_t		*lpl_bootstrap;	/* bootstrap lpl for non-active CPUs */
531 
532 
533 /* generic interfaces */
534 
535 /*
536  * lgroup management
537  */
538 int	lgrp_optimizations(void);
539 void	lgrp_init(lgrp_init_stages_t);
540 lgrp_t	*lgrp_create(void);
541 void	lgrp_destroy(lgrp_t *);
542 void	lgrp_config(lgrp_config_flag_t, uintptr_t, uintptr_t);
543 lgrp_t	*lgrp_hand_to_lgrp(lgrp_handle_t);
544 
545 /*
546  * lgroup stats
547  */
548 void	lgrp_kstat_create(struct cpu *);
549 void	lgrp_kstat_destroy(struct cpu *);
550 void	lgrp_stat_add(lgrp_id_t, lgrp_stat_t, int64_t);
551 int64_t lgrp_stat_read(lgrp_id_t, lgrp_stat_t);
552 
553 /*
554  * lgroup memory
555  */
556 lgrp_mem_policy_t	lgrp_madv_to_policy(uchar_t, size_t, int);
557 pgcnt_t	lgrp_mem_size(lgrp_id_t, lgrp_mem_query_t);
558 lgrp_t	*lgrp_mem_choose(struct seg *, caddr_t, size_t);
559 int	lgrp_memnode_choose(lgrp_mnode_cookie_t *);
560 lgrp_mem_policy_t	lgrp_mem_policy_default(size_t, int);
561 int	lgrp_mnode_update(klgrpset_t, klgrpset_t *);
562 lgrp_t	*lgrp_pfn_to_lgrp(pfn_t);
563 lgrp_t	*lgrp_phys_to_lgrp(u_longlong_t);	/* used by numat driver */
564 int	lgrp_privm_policy_set(lgrp_mem_policy_t, lgrp_mem_policy_info_t *,
565     size_t);
566 void	lgrp_shm_policy_init(struct anon_map *, vnode_t *);
567 void	lgrp_shm_policy_fini(struct anon_map *, vnode_t *);
568 lgrp_mem_policy_info_t	*lgrp_shm_policy_get(struct anon_map *, ulong_t,
569     vnode_t *, u_offset_t);
570 int	lgrp_shm_policy_set(lgrp_mem_policy_t, struct anon_map *, ulong_t,
571     vnode_t *, u_offset_t, size_t);
572 
573 /*
574  * Used by numat driver
575  */
576 int	lgrp_query_cpu(processorid_t, lgrp_id_t *);
577 int	lgrp_query_load(processorid_t, lgrp_load_t *);
578 
579 /*
580  * lgroup thread placement
581  */
582 lpl_t	*lgrp_affinity_best(kthread_t *, struct cpupart *, lgrp_id_t,
583     boolean_t);
584 void	lgrp_affinity_init(lgrp_affinity_t **);
585 void	lgrp_affinity_free(lgrp_affinity_t **);
586 lpl_t	*lgrp_choose(kthread_t *t, struct cpupart *);
587 lgrp_t	*lgrp_home_lgrp(void);
588 lgrp_id_t	lgrp_home_id(kthread_t *);
589 void	lgrp_loadavg(lpl_t *, uint_t, int);
590 void	lgrp_move_thread(kthread_t *, lpl_t *, int);
591 uint64_t lgrp_get_trthr_migrations(void);
592 void 	lgrp_update_trthr_migrations(uint64_t);
593 
594 /*
595  * lgroup topology
596  */
597 int	lgrp_leaf_add(lgrp_t *, lgrp_t **, int, klgrpset_t *);
598 int	lgrp_leaf_delete(lgrp_t *, lgrp_t **, int, klgrpset_t *);
599 int	lgrp_rsets_empty(klgrpset_t *);
600 int	lgrp_rsets_member(klgrpset_t *, lgrp_id_t);
601 int	lgrp_topo_flatten(int, lgrp_t **, int, klgrpset_t *);
602 int	lgrp_topo_ht_limit(void);
603 int	lgrp_topo_ht_limit_default(void);
604 int	lgrp_topo_ht_limit_set(int);
605 int	lgrp_topo_update(lgrp_t **, int, klgrpset_t *);
606 
607 /*
608  * lpl topology
609  */
610 void	lpl_topo_bootstrap(lpl_t *, int);
611 int	lpl_topo_flatten(int);
612 int	lpl_topo_verify(struct cpupart *);
613 
614 
615 /* platform interfaces */
616 void	lgrp_plat_init(lgrp_init_stages_t);
617 lgrp_t	*lgrp_plat_alloc(lgrp_id_t lgrpid);
618 void	lgrp_plat_config(lgrp_config_flag_t, uintptr_t);
619 lgrp_handle_t	lgrp_plat_cpu_to_hand(processorid_t);
620 lgrp_handle_t	lgrp_plat_pfn_to_hand(pfn_t);
621 int	lgrp_plat_max_lgrps(void);
622 pgcnt_t	lgrp_plat_mem_size(lgrp_handle_t, lgrp_mem_query_t);
623 int	lgrp_plat_latency(lgrp_handle_t, lgrp_handle_t);
624 lgrp_handle_t	lgrp_plat_root_hand(void);
625 
626 extern uint32_t		lgrp_expand_proc_thresh;
627 extern uint32_t		lgrp_expand_proc_diff;
628 extern pgcnt_t		lgrp_mem_free_thresh;
629 extern uint32_t		lgrp_loadavg_tolerance;
630 extern uint32_t		lgrp_loadavg_max_effect;
631 extern uint32_t		lgrp_load_thresh;
632 extern lgrp_mem_policy_t lgrp_mem_policy_root;
633 
634 #endif	/* _KERNEL && _KMEMUSER */
635 
636 #ifdef	__cplusplus
637 }
638 #endif
639 
640 #endif /* _LGRP_H */
641