1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 30 #include <sys/cpuvar.h> 31 #include <sys/lgrp.h> 32 #include <sys/memnode.h> 33 #include <sys/mman.h> 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/types.h> 37 #include <vm/seg_spt.h> 38 #include <vm/seg_vn.h> 39 #include <vm/vm_dep.h> 40 41 #include <sys/errno.h> 42 #include <sys/kstat.h> 43 #include <sys/cmn_err.h> 44 #include <sys/memlist.h> 45 #include <sys/sysmacros.h> 46 47 /* 48 * Platform-specific support for lgroups common to sun4 based platforms. 49 * 50 * Those sun4 platforms wanting default lgroup behavior build with 51 * MAX_MEM_NODES = 1. Those sun4 platforms wanting other than default 52 * lgroup behavior build with MAX_MEM_NODES > 1 and provide unique 53 * definitions to replace the #pragma weak interfaces. 54 */ 55 56 /* 57 * For now, there are 0 or 1 memnodes per lgroup on sun4 based platforms, 58 * plus the root lgroup. 59 */ 60 #define NLGRP (MAX_MEM_NODES + 1) 61 62 /* 63 * Allocate lgrp and lgrp stat arrays statically. 64 */ 65 struct lgrp_stats lgrp_stats[NLGRP]; 66 67 static int nlgrps_alloc; 68 static lgrp_t lgrp_space[NLGRP]; 69 70 /* 71 * Arrays mapping lgroup handles to memnodes and vice versa. This helps 72 * manage a copy-rename operation during DR, which moves memory from one 73 * board to another without changing addresses/pfns or memnodes. 74 */ 75 int lgrphand_to_memnode[MAX_MEM_NODES]; 76 int memnode_to_lgrphand[MAX_MEM_NODES]; 77 78 static pgcnt_t lgrp_plat_mem_size_default(lgrp_handle_t, lgrp_mem_query_t); 79 int plat_lgrphand_to_mem_node(lgrp_handle_t); 80 lgrp_handle_t plat_mem_node_to_lgrphand(int); 81 void plat_assign_lgrphand_to_mem_node(lgrp_handle_t, int); 82 83 /* 84 * Default sun4 lgroup interfaces which should be overriden 85 * by platform module. 86 */ 87 extern void plat_lgrp_init(void); 88 extern void plat_lgrp_config(lgrp_config_flag_t, uintptr_t); 89 extern lgrp_handle_t plat_lgrp_cpu_to_hand(processorid_t); 90 extern int plat_lgrp_latency(lgrp_handle_t, lgrp_handle_t); 91 extern lgrp_handle_t plat_lgrp_root_hand(void); 92 93 #pragma weak plat_lgrp_init 94 #pragma weak plat_lgrp_config 95 #pragma weak plat_lgrp_cpu_to_hand 96 #pragma weak plat_lgrp_latency 97 #pragma weak plat_lgrp_root_hand 98 99 int mpo_disabled = 0; 100 lgrp_handle_t lgrp_default_handle = LGRP_DEFAULT_HANDLE; 101 102 void 103 lgrp_plat_init(void) 104 { 105 int i; 106 107 /* 108 * Initialize lookup tables to invalid values so we catch 109 * any illegal use of them. 110 */ 111 for (i = 0; i < MAX_MEM_NODES; i++) { 112 memnode_to_lgrphand[i] = -1; 113 lgrphand_to_memnode[i] = -1; 114 } 115 116 if (lgrp_topo_ht_limit() == 1) { 117 max_mem_nodes = 1; 118 return; 119 } 120 121 if (&plat_lgrp_cpu_to_hand) 122 max_mem_nodes = MAX_MEM_NODES; 123 124 if (&plat_lgrp_init) 125 plat_lgrp_init(); 126 } 127 128 void 129 lgrp_plat_main_init(void) 130 { 131 } 132 133 /* ARGSUSED */ 134 void 135 lgrp_plat_config(lgrp_config_flag_t flag, uintptr_t arg) 136 { 137 if (max_mem_nodes == 1) 138 return; 139 140 if (&plat_lgrp_config) { 141 plat_lgrp_config(flag, arg); 142 } 143 } 144 145 lgrp_handle_t 146 lgrp_plat_cpu_to_hand(processorid_t id) 147 { 148 if (lgrp_topo_ht_limit() > 1 && &plat_lgrp_cpu_to_hand) 149 return (plat_lgrp_cpu_to_hand(id)); 150 else 151 return (LGRP_DEFAULT_HANDLE); 152 } 153 154 /* 155 * Lgroup interfaces common to all sun4 platforms. 156 */ 157 158 /* 159 * Return the platform handle of the lgroup that contains the physical memory 160 * corresponding to the given page frame number 161 */ 162 lgrp_handle_t 163 lgrp_plat_pfn_to_hand(pfn_t pfn) 164 { 165 int mnode; 166 167 if (lgrp_topo_ht_limit() == 1 || max_mem_nodes == 1) 168 return (LGRP_DEFAULT_HANDLE); 169 170 if (pfn > physmax) 171 return (LGRP_NULL_HANDLE); 172 173 mnode = PFN_2_MEM_NODE(pfn); 174 return (MEM_NODE_2_LGRPHAND(mnode)); 175 } 176 177 /* 178 * Return the maximum number of supported lgroups 179 */ 180 int 181 lgrp_plat_max_lgrps(void) 182 { 183 return (NLGRP); 184 } 185 186 /* 187 * Return the number of free pages in an lgroup. 188 * 189 * For query of LGRP_MEM_SIZE_FREE, return the number of base pagesize 190 * pages on freelists. For query of LGRP_MEM_SIZE_AVAIL, return the 191 * number of allocatable base pagesize pages corresponding to the 192 * lgroup (e.g. do not include page_t's, BOP_ALLOC()'ed memory, ..) 193 * For query of LGRP_MEM_SIZE_INSTALL, return the amount of physical 194 * memory installed, regardless of whether or not it's usable. 195 */ 196 pgcnt_t 197 lgrp_plat_mem_size(lgrp_handle_t plathand, lgrp_mem_query_t query) 198 { 199 int mnode; 200 pgcnt_t npgs = (pgcnt_t)0; 201 extern struct memlist *phys_avail; 202 extern struct memlist *phys_install; 203 204 205 if (lgrp_topo_ht_limit() == 1 || max_mem_nodes == 1 || mpo_disabled || 206 plathand == LGRP_DEFAULT_HANDLE) 207 return (lgrp_plat_mem_size_default(plathand, query)); 208 209 if (plathand != LGRP_NULL_HANDLE) { 210 mnode = plat_lgrphand_to_mem_node(plathand); 211 if (mnode >= 0 && mem_node_config[mnode].exists) { 212 switch (query) { 213 case LGRP_MEM_SIZE_FREE: 214 npgs = MNODE_PGCNT(mnode); 215 break; 216 case LGRP_MEM_SIZE_AVAIL: 217 npgs = mem_node_memlist_pages(mnode, 218 phys_avail); 219 break; 220 case LGRP_MEM_SIZE_INSTALL: 221 npgs = mem_node_memlist_pages(mnode, 222 phys_install); 223 break; 224 default: 225 break; 226 } 227 } 228 } 229 return (npgs); 230 } 231 232 /* 233 * Return latency between "from" and "to" lgroups 234 * If "from" or "to" is LGRP_NONE, then just return latency within other 235 * lgroup. This latency number can only be used for relative comparison 236 * between lgroups on the running system, cannot be used across platforms, 237 * and may not reflect the actual latency. It is platform and implementation 238 * specific, so platform gets to decide its value. 239 */ 240 int 241 lgrp_plat_latency(lgrp_handle_t from, lgrp_handle_t to) 242 { 243 if (lgrp_topo_ht_limit() > 1 && &plat_lgrp_latency) 244 return (plat_lgrp_latency(from, to)); 245 else 246 return (0); 247 } 248 249 /* 250 * Return platform handle for root lgroup 251 */ 252 lgrp_handle_t 253 lgrp_plat_root_hand(void) 254 { 255 if (&plat_lgrp_root_hand) 256 return (plat_lgrp_root_hand()); 257 else 258 return (LGRP_DEFAULT_HANDLE); 259 } 260 261 /* Internal interfaces */ 262 /* 263 * Return the number of free, allocatable, or installed 264 * pages in an lgroup 265 * This is a copy of the MAX_MEM_NODES == 1 version of the routine 266 * used when MPO is disabled (i.e. single lgroup) 267 */ 268 /* ARGSUSED */ 269 static pgcnt_t 270 lgrp_plat_mem_size_default(lgrp_handle_t lgrphand, lgrp_mem_query_t query) 271 { 272 extern struct memlist *phys_install; 273 extern struct memlist *phys_avail; 274 struct memlist *mlist; 275 pgcnt_t npgs = 0; 276 277 switch (query) { 278 case LGRP_MEM_SIZE_FREE: 279 return ((pgcnt_t)freemem); 280 case LGRP_MEM_SIZE_AVAIL: 281 memlist_read_lock(); 282 for (mlist = phys_avail; mlist; mlist = mlist->next) 283 npgs += btop(mlist->size); 284 memlist_read_unlock(); 285 return (npgs); 286 case LGRP_MEM_SIZE_INSTALL: 287 memlist_read_lock(); 288 for (mlist = phys_install; mlist; mlist = mlist->next) 289 npgs += btop(mlist->size); 290 memlist_read_unlock(); 291 return (npgs); 292 default: 293 return ((pgcnt_t)0); 294 } 295 } 296 297 /* 298 * Return the memnode associated with the specified lgroup handle 299 */ 300 int 301 plat_lgrphand_to_mem_node(lgrp_handle_t plathand) 302 { 303 int mnode; 304 305 if (lgrp_topo_ht_limit() == 1 || mpo_disabled || max_mem_nodes == 1) 306 return (-1); 307 308 /* 309 * We should always receive a valid pointer to a platform 310 * handle, as we can not choose the allocation policy in 311 * this layer. 312 */ 313 ASSERT((int)plathand >= 0 && (int)plathand < max_mem_nodes); 314 315 mnode = lgrphand_to_memnode[(int)plathand]; 316 return (mnode); 317 } 318 319 lgrp_handle_t 320 plat_mem_node_to_lgrphand(int mnode) 321 { 322 if (lgrp_topo_ht_limit() == 1 || mpo_disabled || max_mem_nodes == 1) 323 return (lgrp_default_handle); 324 325 ASSERT(mnode >= 0 && mnode < max_mem_nodes); 326 return (memnode_to_lgrphand[mnode]); 327 } 328 329 void 330 plat_assign_lgrphand_to_mem_node(lgrp_handle_t plathand, int mnode) 331 { 332 if (lgrp_topo_ht_limit() == 1 || mpo_disabled || max_mem_nodes == 1) 333 return; 334 335 ASSERT(plathand < max_mem_nodes); 336 ASSERT(mnode >= 0 && mnode < max_mem_nodes); 337 338 lgrphand_to_memnode[plathand] = mnode; 339 memnode_to_lgrphand[mnode] = plathand; 340 } 341 342 lgrp_t * 343 lgrp_plat_alloc(lgrp_id_t lgrpid) 344 { 345 lgrp_t *lgrp; 346 347 lgrp = &lgrp_space[nlgrps_alloc++]; 348 if (lgrpid >= NLGRP || nlgrps_alloc > NLGRP) 349 return (NULL); 350 return (lgrp); 351 } 352 353 /* 354 * Probe memory in each node from current CPU to determine latency topology 355 */ 356 void 357 lgrp_plat_probe(void) 358 { 359 } 360