xref: /titanic_51/usr/src/uts/sun4/os/lgrpplat.c (revision 7aec1d6e253b21f9e9b7ef68b4d81ab9859b51fe)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 
30 #include <sys/cpuvar.h>
31 #include <sys/lgrp.h>
32 #include <sys/memnode.h>
33 #include <sys/mman.h>
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/types.h>
37 #include <vm/seg_spt.h>
38 #include <vm/seg_vn.h>
39 #include <vm/vm_dep.h>
40 
41 #include <sys/errno.h>
42 #include <sys/kstat.h>
43 #include <sys/cmn_err.h>
44 #include <sys/memlist.h>
45 #include <sys/sysmacros.h>
46 
47 /*
48  * Platform-specific support for lgroups common to sun4 based platforms.
49  *
50  * Those sun4 platforms wanting default lgroup behavior build with
51  * MAX_MEM_NODES = 1.  Those sun4 platforms wanting other than default
52  * lgroup behavior build with MAX_MEM_NODES > 1 and provide unique
53  * definitions to replace the #pragma weak interfaces.
54  */
55 
56 /*
57  * For now, there are 0 or 1 memnodes per lgroup on sun4 based platforms,
58  * plus the root lgroup.
59  */
60 #define	NLGRP	(MAX_MEM_NODES + 1)
61 
62 /*
63  * Allocate lgrp and lgrp stat arrays statically.
64  */
65 struct lgrp_stats lgrp_stats[NLGRP];
66 
67 static int nlgrps_alloc;
68 static lgrp_t lgrp_space[NLGRP];
69 
70 /*
71  * Arrays mapping lgroup handles to memnodes and vice versa.  This helps
72  * manage a copy-rename operation during DR, which moves memory from one
73  * board to another without changing addresses/pfns or memnodes.
74  */
75 int lgrphand_to_memnode[MAX_MEM_NODES];
76 int memnode_to_lgrphand[MAX_MEM_NODES];
77 
78 static pgcnt_t lgrp_plat_mem_size_default(lgrp_handle_t, lgrp_mem_query_t);
79 int plat_lgrphand_to_mem_node(lgrp_handle_t);
80 lgrp_handle_t plat_mem_node_to_lgrphand(int);
81 void plat_assign_lgrphand_to_mem_node(lgrp_handle_t, int);
82 
83 /*
84  * Default sun4 lgroup interfaces which should be overriden
85  * by platform module.
86  */
87 extern void plat_lgrp_init(void);
88 extern void plat_lgrp_config(lgrp_config_flag_t, uintptr_t);
89 extern lgrp_handle_t plat_lgrp_cpu_to_hand(processorid_t);
90 extern int plat_lgrp_latency(lgrp_handle_t, lgrp_handle_t);
91 extern lgrp_handle_t plat_lgrp_root_hand(void);
92 
93 #pragma weak plat_lgrp_init
94 #pragma weak plat_lgrp_config
95 #pragma weak plat_lgrp_cpu_to_hand
96 #pragma weak plat_lgrp_latency
97 #pragma weak plat_lgrp_root_hand
98 
99 int mpo_disabled = 0;
100 lgrp_handle_t lgrp_default_handle = LGRP_DEFAULT_HANDLE;
101 
102 void
103 lgrp_plat_init(void)
104 {
105 	int i;
106 
107 	/*
108 	 * Initialize lookup tables to invalid values so we catch
109 	 * any illegal use of them.
110 	 */
111 	for (i = 0; i < MAX_MEM_NODES; i++) {
112 		memnode_to_lgrphand[i] = -1;
113 		lgrphand_to_memnode[i] = -1;
114 	}
115 
116 	if (lgrp_topo_ht_limit() == 1) {
117 		max_mem_nodes = 1;
118 		return;
119 	}
120 
121 	if (&plat_lgrp_cpu_to_hand)
122 		max_mem_nodes = MAX_MEM_NODES;
123 
124 	if (&plat_lgrp_init)
125 		plat_lgrp_init();
126 }
127 
128 void
129 lgrp_plat_main_init(void)
130 {
131 }
132 
133 /* ARGSUSED */
134 void
135 lgrp_plat_config(lgrp_config_flag_t flag, uintptr_t arg)
136 {
137 	if (max_mem_nodes == 1)
138 		return;
139 
140 	if (&plat_lgrp_config) {
141 		plat_lgrp_config(flag, arg);
142 	}
143 }
144 
145 lgrp_handle_t
146 lgrp_plat_cpu_to_hand(processorid_t id)
147 {
148 	if (lgrp_topo_ht_limit() > 1 && &plat_lgrp_cpu_to_hand)
149 		return (plat_lgrp_cpu_to_hand(id));
150 	else
151 		return (LGRP_DEFAULT_HANDLE);
152 }
153 
154 /*
155  * Lgroup interfaces common to all sun4 platforms.
156  */
157 
158 /*
159  * Return the platform handle of the lgroup that contains the physical memory
160  * corresponding to the given page frame number
161  */
162 lgrp_handle_t
163 lgrp_plat_pfn_to_hand(pfn_t pfn)
164 {
165 	int	mnode;
166 
167 	if (lgrp_topo_ht_limit() == 1 || max_mem_nodes == 1)
168 		return (LGRP_DEFAULT_HANDLE);
169 
170 	if (pfn > physmax)
171 		return (LGRP_NULL_HANDLE);
172 
173 	mnode = PFN_2_MEM_NODE(pfn);
174 	if (mnode < 0)
175 		return (LGRP_NULL_HANDLE);
176 
177 	return (MEM_NODE_2_LGRPHAND(mnode));
178 }
179 
180 /*
181  * Return the maximum number of supported lgroups
182  */
183 int
184 lgrp_plat_max_lgrps(void)
185 {
186 	return (NLGRP);
187 }
188 
189 /*
190  * Return the number of free pages in an lgroup.
191  *
192  * For query of LGRP_MEM_SIZE_FREE, return the number of base pagesize
193  * pages on freelists.  For query of LGRP_MEM_SIZE_AVAIL, return the
194  * number of allocatable base pagesize pages corresponding to the
195  * lgroup (e.g. do not include page_t's, BOP_ALLOC()'ed memory, ..)
196  * For query of LGRP_MEM_SIZE_INSTALL, return the amount of physical
197  * memory installed, regardless of whether or not it's usable.
198  */
199 pgcnt_t
200 lgrp_plat_mem_size(lgrp_handle_t plathand, lgrp_mem_query_t query)
201 {
202 	int	mnode;
203 	pgcnt_t	npgs = (pgcnt_t)0;
204 	extern struct memlist *phys_avail;
205 	extern struct memlist *phys_install;
206 
207 
208 	if (lgrp_topo_ht_limit() == 1 || max_mem_nodes == 1 || mpo_disabled ||
209 	    plathand == LGRP_DEFAULT_HANDLE)
210 		return (lgrp_plat_mem_size_default(plathand, query));
211 
212 	if (plathand != LGRP_NULL_HANDLE) {
213 		mnode = plat_lgrphand_to_mem_node(plathand);
214 		if (mnode >= 0 && mem_node_config[mnode].exists) {
215 			switch (query) {
216 			case LGRP_MEM_SIZE_FREE:
217 				npgs = MNODE_PGCNT(mnode);
218 				break;
219 			case LGRP_MEM_SIZE_AVAIL:
220 				npgs = mem_node_memlist_pages(mnode,
221 				    phys_avail);
222 				break;
223 			case LGRP_MEM_SIZE_INSTALL:
224 				npgs = mem_node_memlist_pages(mnode,
225 				    phys_install);
226 				break;
227 			default:
228 				break;
229 			}
230 		}
231 	}
232 	return (npgs);
233 }
234 
235 /*
236  * Return latency between "from" and "to" lgroups
237  * If "from" or "to" is LGRP_NONE, then just return latency within other
238  * lgroup.  This latency number can only be used for relative comparison
239  * between lgroups on the running system, cannot be used across platforms,
240  * and may not reflect the actual latency.  It is platform and implementation
241  * specific, so platform gets to decide its value.
242  */
243 int
244 lgrp_plat_latency(lgrp_handle_t from, lgrp_handle_t to)
245 {
246 	if (lgrp_topo_ht_limit() > 1 && &plat_lgrp_latency)
247 		return (plat_lgrp_latency(from, to));
248 	else
249 		return (0);
250 }
251 
252 /*
253  * Return platform handle for root lgroup
254  */
255 lgrp_handle_t
256 lgrp_plat_root_hand(void)
257 {
258 	if (&plat_lgrp_root_hand)
259 		return (plat_lgrp_root_hand());
260 	else
261 		return (LGRP_DEFAULT_HANDLE);
262 }
263 
264 /* Internal interfaces */
265 /*
266  * Return the number of free, allocatable, or installed
267  * pages in an lgroup
268  * This is a copy of the MAX_MEM_NODES == 1 version of the routine
269  * used when MPO is disabled (i.e. single lgroup)
270  */
271 /* ARGSUSED */
272 static pgcnt_t
273 lgrp_plat_mem_size_default(lgrp_handle_t lgrphand, lgrp_mem_query_t query)
274 {
275 	extern struct memlist *phys_install;
276 	extern struct memlist *phys_avail;
277 	struct memlist *mlist;
278 	pgcnt_t npgs = 0;
279 
280 	switch (query) {
281 	case LGRP_MEM_SIZE_FREE:
282 		return ((pgcnt_t)freemem);
283 	case LGRP_MEM_SIZE_AVAIL:
284 		memlist_read_lock();
285 		for (mlist = phys_avail; mlist; mlist = mlist->next)
286 			npgs += btop(mlist->size);
287 		memlist_read_unlock();
288 		return (npgs);
289 	case LGRP_MEM_SIZE_INSTALL:
290 		memlist_read_lock();
291 		for (mlist = phys_install; mlist; mlist = mlist->next)
292 			npgs += btop(mlist->size);
293 		memlist_read_unlock();
294 		return (npgs);
295 	default:
296 		return ((pgcnt_t)0);
297 	}
298 }
299 
300 /*
301  * Return the memnode associated with the specified lgroup handle
302  */
303 int
304 plat_lgrphand_to_mem_node(lgrp_handle_t plathand)
305 {
306 	int mnode;
307 
308 	if (lgrp_topo_ht_limit() == 1 || mpo_disabled || max_mem_nodes == 1)
309 		return (-1);
310 
311 	/*
312 	 * We should always receive a valid pointer to a platform
313 	 * handle, as we can not choose the allocation policy in
314 	 * this layer.
315 	 */
316 	ASSERT((int)plathand >= 0 && (int)plathand < max_mem_nodes);
317 
318 	mnode = lgrphand_to_memnode[(int)plathand];
319 	return (mnode);
320 }
321 
322 lgrp_handle_t
323 plat_mem_node_to_lgrphand(int mnode)
324 {
325 	if (lgrp_topo_ht_limit() == 1 || mpo_disabled || max_mem_nodes == 1)
326 		return (lgrp_default_handle);
327 
328 	ASSERT(mnode >= 0 && mnode < max_mem_nodes);
329 	return (memnode_to_lgrphand[mnode]);
330 }
331 
332 void
333 plat_assign_lgrphand_to_mem_node(lgrp_handle_t plathand, int mnode)
334 {
335 	if (lgrp_topo_ht_limit() == 1 || mpo_disabled || max_mem_nodes == 1)
336 		return;
337 
338 	ASSERT(plathand < max_mem_nodes);
339 	ASSERT(mnode >= 0 && mnode < max_mem_nodes);
340 
341 	lgrphand_to_memnode[plathand] = mnode;
342 	memnode_to_lgrphand[mnode] = plathand;
343 }
344 
345 lgrp_t *
346 lgrp_plat_alloc(lgrp_id_t lgrpid)
347 {
348 	lgrp_t *lgrp;
349 
350 	lgrp = &lgrp_space[nlgrps_alloc++];
351 	if (lgrpid >= NLGRP || nlgrps_alloc > NLGRP)
352 		return (NULL);
353 	return (lgrp);
354 }
355 
356 /*
357  * Probe memory in each node from current CPU to determine latency topology
358  */
359 void
360 lgrp_plat_probe(void)
361 {
362 }
363