xref: /titanic_51/usr/src/uts/sun4/os/lgrpplat.c (revision 8eea8e29cc4374d1ee24c25a07f45af132db3499)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 
30 #include <sys/cpuvar.h>
31 #include <sys/lgrp.h>
32 #include <sys/memnode.h>
33 #include <sys/mman.h>
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/types.h>
37 #include <vm/seg_spt.h>
38 #include <vm/seg_vn.h>
39 
40 #include <sys/errno.h>
41 #include <sys/kstat.h>
42 #include <sys/cmn_err.h>
43 #include <sys/memlist.h>
44 #include <sys/sysmacros.h>
45 
46 /*
47  * Platform-specific support for lgroups common to sun4 based platforms.
48  *
49  * Those sun4 platforms wanting default lgroup behavior build with
50  * MAX_MEM_NODES = 1.  Those sun4 platforms wanting other than default
51  * lgroup behavior build with MAX_MEM_NODES > 1 and provide unique
52  * definitions to replace the #pragma weak interfaces.
53  */
54 
55 /*
56  * For now, there are 0 or 1 memnodes per lgroup on sun4 based platforms,
57  * plus the root lgroup.
58  */
59 #define	NLGRP	(MAX_MEM_NODES + 1)
60 
61 /*
62  * Allocate lgrp and lgrp stat arrays statically.
63  */
64 struct lgrp_stats lgrp_stats[NLGRP];
65 
66 static int nlgrps_alloc;
67 static lgrp_t lgrp_space[NLGRP];
68 
69 /*
70  * Arrays mapping lgroup handles to memnodes and vice versa.  This helps
71  * manage a copy-rename operation during DR, which moves memory from one
72  * board to another without changing addresses/pfns or memnodes.
73  */
74 int lgrphand_to_memnode[MAX_MEM_NODES];
75 int memnode_to_lgrphand[MAX_MEM_NODES];
76 
77 static pgcnt_t lgrp_plat_mem_size_default(lgrp_handle_t, lgrp_mem_query_t);
78 int plat_lgrphand_to_mem_node(lgrp_handle_t);
79 lgrp_handle_t plat_mem_node_to_lgrphand(int);
80 void plat_assign_lgrphand_to_mem_node(lgrp_handle_t, int);
81 
82 /*
83  * Default sun4 lgroup interfaces which should be overriden
84  * by platform module.
85  */
86 extern void plat_lgrp_init(void);
87 extern void plat_lgrp_config(lgrp_config_flag_t, uintptr_t);
88 extern lgrp_handle_t plat_lgrp_cpu_to_hand(processorid_t);
89 extern int plat_lgrp_latency(lgrp_handle_t, lgrp_handle_t);
90 extern lgrp_handle_t plat_lgrp_root_hand(void);
91 
92 #pragma weak plat_lgrp_init
93 #pragma weak plat_lgrp_config
94 #pragma weak plat_lgrp_cpu_to_hand
95 #pragma weak plat_lgrp_latency
96 #pragma weak plat_lgrp_root_hand
97 
98 int mpo_disabled = 0;
99 lgrp_handle_t lgrp_default_handle = LGRP_DEFAULT_HANDLE;
100 
101 void
102 lgrp_plat_init(void)
103 {
104 	int i;
105 
106 	/*
107 	 * Initialize lookup tables to invalid values so we catch
108 	 * any illegal use of them.
109 	 */
110 	for (i = 0; i < MAX_MEM_NODES; i++) {
111 		memnode_to_lgrphand[i] = -1;
112 		lgrphand_to_memnode[i] = -1;
113 	}
114 
115 	if (lgrp_topo_ht_limit() == 1) {
116 		max_mem_nodes = 1;
117 		return;
118 	}
119 
120 	if (&plat_lgrp_cpu_to_hand)
121 		max_mem_nodes = MAX_MEM_NODES;
122 
123 	if (&plat_lgrp_init)
124 		plat_lgrp_init();
125 }
126 
127 void
128 lgrp_plat_main_init(void)
129 {
130 }
131 
132 /* ARGSUSED */
133 void
134 lgrp_plat_config(lgrp_config_flag_t flag, uintptr_t arg)
135 {
136 	if (max_mem_nodes == 1)
137 		return;
138 
139 	if (&plat_lgrp_config) {
140 		plat_lgrp_config(flag, arg);
141 	}
142 }
143 
144 lgrp_handle_t
145 lgrp_plat_cpu_to_hand(processorid_t id)
146 {
147 	if (lgrp_topo_ht_limit() > 1 && &plat_lgrp_cpu_to_hand)
148 		return (plat_lgrp_cpu_to_hand(id));
149 	else
150 		return (LGRP_DEFAULT_HANDLE);
151 }
152 
153 /*
154  * Lgroup interfaces common to all sun4 platforms.
155  */
156 
157 /*
158  * Return the platform handle of the lgroup that contains the physical memory
159  * corresponding to the given page frame number
160  */
161 lgrp_handle_t
162 lgrp_plat_pfn_to_hand(pfn_t pfn)
163 {
164 	int	mnode;
165 
166 	if (lgrp_topo_ht_limit() == 1 || max_mem_nodes == 1)
167 		return (LGRP_DEFAULT_HANDLE);
168 
169 	if (pfn > physmax)
170 		return (LGRP_NULL_HANDLE);
171 
172 	mnode = PFN_2_MEM_NODE(pfn);
173 	return (MEM_NODE_2_LGRPHAND(mnode));
174 }
175 
176 /*
177  * Return the maximum number of supported lgroups
178  */
179 int
180 lgrp_plat_max_lgrps(void)
181 {
182 	return (NLGRP);
183 }
184 
185 /*
186  * Return the number of free pages in an lgroup.
187  *
188  * For query of LGRP_MEM_SIZE_FREE, return the number of base pagesize
189  * pages on freelists.  For query of LGRP_MEM_SIZE_AVAIL, return the
190  * number of allocatable base pagesize pages corresponding to the
191  * lgroup (e.g. do not include page_t's, BOP_ALLOC()'ed memory, ..)
192  * For query of LGRP_MEM_SIZE_INSTALL, return the amount of physical
193  * memory installed, regardless of whether or not it's usable.
194  */
195 pgcnt_t
196 lgrp_plat_mem_size(lgrp_handle_t plathand, lgrp_mem_query_t query)
197 {
198 	int	mnode;
199 	pgcnt_t	npgs = (pgcnt_t)0;
200 	extern struct memlist *phys_avail;
201 	extern struct memlist *phys_install;
202 
203 
204 	if (lgrp_topo_ht_limit() == 1 || max_mem_nodes == 1 || mpo_disabled ||
205 	    plathand == LGRP_DEFAULT_HANDLE)
206 		return (lgrp_plat_mem_size_default(plathand, query));
207 
208 	if (plathand != LGRP_NULL_HANDLE) {
209 		mnode = plat_lgrphand_to_mem_node(plathand);
210 		if (mnode >= 0 && mem_node_config[mnode].exists) {
211 			switch (query) {
212 			case LGRP_MEM_SIZE_FREE:
213 				npgs = mem_node_config[mnode].cursize;
214 				break;
215 			case LGRP_MEM_SIZE_AVAIL:
216 				npgs = mem_node_memlist_pages(mnode,
217 				    phys_avail);
218 				break;
219 			case LGRP_MEM_SIZE_INSTALL:
220 				npgs = mem_node_memlist_pages(mnode,
221 				    phys_install);
222 				break;
223 			default:
224 				break;
225 			}
226 		}
227 	}
228 	return (npgs);
229 }
230 
231 /*
232  * Return latency between "from" and "to" lgroups
233  * If "from" or "to" is LGRP_NONE, then just return latency within other
234  * lgroup.  This latency number can only be used for relative comparison
235  * between lgroups on the running system, cannot be used across platforms,
236  * and may not reflect the actual latency.  It is platform and implementation
237  * specific, so platform gets to decide its value.
238  */
239 int
240 lgrp_plat_latency(lgrp_handle_t from, lgrp_handle_t to)
241 {
242 	if (lgrp_topo_ht_limit() > 1 && &plat_lgrp_latency)
243 		return (plat_lgrp_latency(from, to));
244 	else
245 		return (0);
246 }
247 
248 /*
249  * Return platform handle for root lgroup
250  */
251 lgrp_handle_t
252 lgrp_plat_root_hand(void)
253 {
254 	if (&plat_lgrp_root_hand)
255 		return (plat_lgrp_root_hand());
256 	else
257 		return (LGRP_DEFAULT_HANDLE);
258 }
259 
260 /* Internal interfaces */
261 /*
262  * Return the number of free, allocatable, or installed
263  * pages in an lgroup
264  * This is a copy of the MAX_MEM_NODES == 1 version of the routine
265  * used when MPO is disabled (i.e. single lgroup)
266  */
267 /* ARGSUSED */
268 static pgcnt_t
269 lgrp_plat_mem_size_default(lgrp_handle_t lgrphand, lgrp_mem_query_t query)
270 {
271 	extern struct memlist *phys_install;
272 	extern struct memlist *phys_avail;
273 	struct memlist *mlist;
274 	pgcnt_t npgs = 0;
275 
276 	switch (query) {
277 	case LGRP_MEM_SIZE_FREE:
278 		return ((pgcnt_t)freemem);
279 	case LGRP_MEM_SIZE_AVAIL:
280 		memlist_read_lock();
281 		for (mlist = phys_avail; mlist; mlist = mlist->next)
282 			npgs += btop(mlist->size);
283 		memlist_read_unlock();
284 		return (npgs);
285 	case LGRP_MEM_SIZE_INSTALL:
286 		memlist_read_lock();
287 		for (mlist = phys_install; mlist; mlist = mlist->next)
288 			npgs += btop(mlist->size);
289 		memlist_read_unlock();
290 		return (npgs);
291 	default:
292 		return ((pgcnt_t)0);
293 	}
294 }
295 
296 /*
297  * Return the memnode associated with the specified lgroup handle
298  */
299 int
300 plat_lgrphand_to_mem_node(lgrp_handle_t plathand)
301 {
302 	int mnode;
303 
304 	if (lgrp_topo_ht_limit() == 1 || mpo_disabled || max_mem_nodes == 1)
305 		return (-1);
306 
307 	/*
308 	 * We should always receive a valid pointer to a platform
309 	 * handle, as we can not choose the allocation policy in
310 	 * this layer.
311 	 */
312 	ASSERT((int)plathand >= 0 && (int)plathand < max_mem_nodes);
313 
314 	mnode = lgrphand_to_memnode[(int)plathand];
315 	return (mnode);
316 }
317 
318 lgrp_handle_t
319 plat_mem_node_to_lgrphand(int mnode)
320 {
321 	if (lgrp_topo_ht_limit() == 1 || mpo_disabled || max_mem_nodes == 1)
322 		return (lgrp_default_handle);
323 
324 	ASSERT(mnode >= 0 && mnode < max_mem_nodes);
325 	return (memnode_to_lgrphand[mnode]);
326 }
327 
328 void
329 plat_assign_lgrphand_to_mem_node(lgrp_handle_t plathand, int mnode)
330 {
331 	if (lgrp_topo_ht_limit() == 1 || mpo_disabled || max_mem_nodes == 1)
332 		return;
333 
334 	ASSERT(plathand < max_mem_nodes);
335 	ASSERT(mnode >= 0 && mnode < max_mem_nodes);
336 
337 	lgrphand_to_memnode[plathand] = mnode;
338 	memnode_to_lgrphand[mnode] = plathand;
339 }
340 
341 lgrp_t *
342 lgrp_plat_alloc(lgrp_id_t lgrpid)
343 {
344 	lgrp_t *lgrp;
345 
346 	lgrp = &lgrp_space[nlgrps_alloc++];
347 	if (lgrpid >= NLGRP || nlgrps_alloc > NLGRP)
348 		return (NULL);
349 	return (lgrp);
350 }
351 
352 /*
353  * Do any platform specific preparation and/or building of full lgroup topology
354  */
355 void
356 lgrp_plat_build_topo(void)
357 {
358 }
359 
360 /*
361  * Probe memory in each node from current CPU to determine latency topology
362  */
363 void
364 lgrp_plat_probe(void)
365 {
366 }
367