1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2000, 05 by Ralf Baechle (ralf@linux-mips.org) 7 * Copyright (C) 2000 by Silicon Graphics, Inc. 8 * Copyright (C) 2004 by Christoph Hellwig 9 * 10 * On SGI IP27 the ARC memory configuration data is completly bogus but 11 * alternate easier to use mechanisms are available. 12 */ 13 #include <linux/init.h> 14 #include <linux/kernel.h> 15 #include <linux/memblock.h> 16 #include <linux/mm.h> 17 #include <linux/mmzone.h> 18 #include <linux/module.h> 19 #include <linux/nodemask.h> 20 #include <linux/swap.h> 21 #include <linux/bootmem.h> 22 #include <linux/pfn.h> 23 #include <linux/highmem.h> 24 #include <asm/page.h> 25 #include <asm/pgalloc.h> 26 #include <asm/sections.h> 27 28 #include <asm/sn/arch.h> 29 #include <asm/sn/hub.h> 30 #include <asm/sn/klconfig.h> 31 #include <asm/sn/sn_private.h> 32 33 34 #define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT) 35 #define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT) 36 37 struct node_data *__node_data[MAX_COMPACT_NODES]; 38 39 EXPORT_SYMBOL(__node_data); 40 41 static int fine_mode; 42 43 static int is_fine_dirmode(void) 44 { 45 return (((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK) 46 >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE); 47 } 48 49 static hubreg_t get_region(cnodeid_t cnode) 50 { 51 if (fine_mode) 52 return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_FINEREG_SHFT; 53 else 54 return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_COARSEREG_SHFT; 55 } 56 57 static hubreg_t region_mask; 58 59 static void gen_region_mask(hubreg_t *region_mask) 60 { 61 cnodeid_t cnode; 62 63 (*region_mask) = 0; 64 for_each_online_node(cnode) { 65 (*region_mask) |= 1ULL << get_region(cnode); 66 } 67 } 68 69 #define rou_rflag rou_flags 70 71 static int router_distance; 72 73 static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth) 74 { 75 klrou_t *router; 76 lboard_t *brd; 77 int port; 78 79 if (router_a->rou_rflag == 1) 80 return; 81 82 if (depth >= router_distance) 83 return; 84 85 router_a->rou_rflag = 1; 86 87 for (port = 1; port <= MAX_ROUTER_PORTS; port++) { 88 if (router_a->rou_port[port].port_nasid == INVALID_NASID) 89 continue; 90 91 brd = (lboard_t *)NODE_OFFSET_TO_K0( 92 router_a->rou_port[port].port_nasid, 93 router_a->rou_port[port].port_offset); 94 95 if (brd->brd_type == KLTYPE_ROUTER) { 96 router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]); 97 if (router == router_b) { 98 if (depth < router_distance) 99 router_distance = depth; 100 } 101 else 102 router_recurse(router, router_b, depth + 1); 103 } 104 } 105 106 router_a->rou_rflag = 0; 107 } 108 109 unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; 110 111 static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b) 112 { 113 klrou_t *router, *router_a = NULL, *router_b = NULL; 114 lboard_t *brd, *dest_brd; 115 cnodeid_t cnode; 116 nasid_t nasid; 117 int port; 118 119 /* Figure out which routers nodes in question are connected to */ 120 for_each_online_node(cnode) { 121 nasid = COMPACT_TO_NASID_NODEID(cnode); 122 123 if (nasid == -1) continue; 124 125 brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid), 126 KLTYPE_ROUTER); 127 128 if (!brd) 129 continue; 130 131 do { 132 if (brd->brd_flags & DUPLICATE_BOARD) 133 continue; 134 135 router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]); 136 router->rou_rflag = 0; 137 138 for (port = 1; port <= MAX_ROUTER_PORTS; port++) { 139 if (router->rou_port[port].port_nasid == INVALID_NASID) 140 continue; 141 142 dest_brd = (lboard_t *)NODE_OFFSET_TO_K0( 143 router->rou_port[port].port_nasid, 144 router->rou_port[port].port_offset); 145 146 if (dest_brd->brd_type == KLTYPE_IP27) { 147 if (dest_brd->brd_nasid == nasid_a) 148 router_a = router; 149 if (dest_brd->brd_nasid == nasid_b) 150 router_b = router; 151 } 152 } 153 154 } while ((brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER))); 155 } 156 157 if (router_a == NULL) { 158 printk("node_distance: router_a NULL\n"); 159 return -1; 160 } 161 if (router_b == NULL) { 162 printk("node_distance: router_b NULL\n"); 163 return -1; 164 } 165 166 if (nasid_a == nasid_b) 167 return 0; 168 169 if (router_a == router_b) 170 return 1; 171 172 router_distance = 100; 173 router_recurse(router_a, router_b, 2); 174 175 return router_distance; 176 } 177 178 static void __init init_topology_matrix(void) 179 { 180 nasid_t nasid, nasid2; 181 cnodeid_t row, col; 182 183 for (row = 0; row < MAX_COMPACT_NODES; row++) 184 for (col = 0; col < MAX_COMPACT_NODES; col++) 185 __node_distances[row][col] = -1; 186 187 for_each_online_node(row) { 188 nasid = COMPACT_TO_NASID_NODEID(row); 189 for_each_online_node(col) { 190 nasid2 = COMPACT_TO_NASID_NODEID(col); 191 __node_distances[row][col] = 192 compute_node_distance(nasid, nasid2); 193 } 194 } 195 } 196 197 static void __init dump_topology(void) 198 { 199 nasid_t nasid; 200 cnodeid_t cnode; 201 lboard_t *brd, *dest_brd; 202 int port; 203 int router_num = 0; 204 klrou_t *router; 205 cnodeid_t row, col; 206 207 printk("************** Topology ********************\n"); 208 209 printk(" "); 210 for_each_online_node(col) 211 printk("%02d ", col); 212 printk("\n"); 213 for_each_online_node(row) { 214 printk("%02d ", row); 215 for_each_online_node(col) 216 printk("%2d ", node_distance(row, col)); 217 printk("\n"); 218 } 219 220 for_each_online_node(cnode) { 221 nasid = COMPACT_TO_NASID_NODEID(cnode); 222 223 if (nasid == -1) continue; 224 225 brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid), 226 KLTYPE_ROUTER); 227 228 if (!brd) 229 continue; 230 231 do { 232 if (brd->brd_flags & DUPLICATE_BOARD) 233 continue; 234 printk("Router %d:", router_num); 235 router_num++; 236 237 router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]); 238 239 for (port = 1; port <= MAX_ROUTER_PORTS; port++) { 240 if (router->rou_port[port].port_nasid == INVALID_NASID) 241 continue; 242 243 dest_brd = (lboard_t *)NODE_OFFSET_TO_K0( 244 router->rou_port[port].port_nasid, 245 router->rou_port[port].port_offset); 246 247 if (dest_brd->brd_type == KLTYPE_IP27) 248 printk(" %d", dest_brd->brd_nasid); 249 if (dest_brd->brd_type == KLTYPE_ROUTER) 250 printk(" r"); 251 } 252 printk("\n"); 253 254 } while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) ); 255 } 256 } 257 258 static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot) 259 { 260 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); 261 262 return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT); 263 } 264 265 static pfn_t __init slot_psize_compute(cnodeid_t node, int slot) 266 { 267 nasid_t nasid; 268 lboard_t *brd; 269 klmembnk_t *banks; 270 unsigned long size; 271 272 nasid = COMPACT_TO_NASID_NODEID(node); 273 /* Find the node board */ 274 brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27); 275 if (!brd) 276 return 0; 277 278 /* Get the memory bank structure */ 279 banks = (klmembnk_t *) find_first_component(brd, KLSTRUCT_MEMBNK); 280 if (!banks) 281 return 0; 282 283 /* Size in _Megabytes_ */ 284 size = (unsigned long)banks->membnk_bnksz[slot/4]; 285 286 /* hack for 128 dimm banks */ 287 if (size <= 128) { 288 if (slot % 4 == 0) { 289 size <<= 20; /* size in bytes */ 290 return(size >> PAGE_SHIFT); 291 } else 292 return 0; 293 } else { 294 size /= 4; 295 size <<= 20; 296 return size >> PAGE_SHIFT; 297 } 298 } 299 300 static void __init mlreset(void) 301 { 302 int i; 303 304 master_nasid = get_nasid(); 305 fine_mode = is_fine_dirmode(); 306 307 /* 308 * Probe for all CPUs - this creates the cpumask and sets up the 309 * mapping tables. We need to do this as early as possible. 310 */ 311 #ifdef CONFIG_SMP 312 cpu_node_probe(); 313 #endif 314 315 init_topology_matrix(); 316 dump_topology(); 317 318 gen_region_mask(®ion_mask); 319 320 setup_replication_mask(); 321 322 /* 323 * Set all nodes' calias sizes to 8k 324 */ 325 for_each_online_node(i) { 326 nasid_t nasid; 327 328 nasid = COMPACT_TO_NASID_NODEID(i); 329 330 /* 331 * Always have node 0 in the region mask, otherwise 332 * CALIAS accesses get exceptions since the hub 333 * thinks it is a node 0 address. 334 */ 335 REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1)); 336 #ifdef CONFIG_REPLICATE_EXHANDLERS 337 REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K); 338 #else 339 REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0); 340 #endif 341 342 #ifdef LATER 343 /* 344 * Set up all hubs to have a big window pointing at 345 * widget 0. Memory mode, widget 0, offset 0 346 */ 347 REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN), 348 ((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) | 349 (0 << IIO_ITTE_WIDGET_SHIFT))); 350 #endif 351 } 352 } 353 354 static void __init szmem(void) 355 { 356 pfn_t slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */ 357 int slot; 358 cnodeid_t node; 359 360 num_physpages = 0; 361 362 for_each_online_node(node) { 363 nodebytes = 0; 364 for (slot = 0; slot < MAX_MEM_SLOTS; slot++) { 365 slot_psize = slot_psize_compute(node, slot); 366 if (slot == 0) 367 slot0sz = slot_psize; 368 /* 369 * We need to refine the hack when we have replicated 370 * kernel text. 371 */ 372 nodebytes += (1LL << SLOT_SHIFT); 373 374 if (!slot_psize) 375 continue; 376 377 if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) > 378 (slot0sz << PAGE_SHIFT)) { 379 printk("Ignoring slot %d onwards on node %d\n", 380 slot, node); 381 slot = MAX_MEM_SLOTS; 382 continue; 383 } 384 num_physpages += slot_psize; 385 memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)), 386 PFN_PHYS(slot_psize), node); 387 } 388 } 389 } 390 391 static void __init node_mem_init(cnodeid_t node) 392 { 393 pfn_t slot_firstpfn = slot_getbasepfn(node, 0); 394 pfn_t slot_freepfn = node_getfirstfree(node); 395 unsigned long bootmap_size; 396 pfn_t start_pfn, end_pfn; 397 398 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); 399 400 /* 401 * Allocate the node data structures on the node first. 402 */ 403 __node_data[node] = __va(slot_freepfn << PAGE_SHIFT); 404 memset(__node_data[node], 0, PAGE_SIZE); 405 406 NODE_DATA(node)->bdata = &bootmem_node_data[node]; 407 NODE_DATA(node)->node_start_pfn = start_pfn; 408 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; 409 410 cpus_clear(hub_data(node)->h_cpus); 411 412 slot_freepfn += PFN_UP(sizeof(struct pglist_data) + 413 sizeof(struct hub_data)); 414 415 bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn, 416 start_pfn, end_pfn); 417 free_bootmem_with_active_regions(node, end_pfn); 418 reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT, 419 ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size, 420 BOOTMEM_DEFAULT); 421 sparse_memory_present_with_active_regions(node); 422 } 423 424 /* 425 * A node with nothing. We use it to avoid any special casing in 426 * cpumask_of_node 427 */ 428 static struct node_data null_node = { 429 .hub = { 430 .h_cpus = CPU_MASK_NONE 431 } 432 }; 433 434 /* 435 * Currently, the intranode memory hole support assumes that each slot 436 * contains at least 32 MBytes of memory. We assume all bootmem data 437 * fits on the first slot. 438 */ 439 void __init prom_meminit(void) 440 { 441 cnodeid_t node; 442 443 mlreset(); 444 szmem(); 445 446 for (node = 0; node < MAX_COMPACT_NODES; node++) { 447 if (node_online(node)) { 448 node_mem_init(node); 449 continue; 450 } 451 __node_data[node] = &null_node; 452 } 453 } 454 455 void __init prom_free_prom_memory(void) 456 { 457 /* We got nothing to free here ... */ 458 } 459 460 extern unsigned long setup_zero_pages(void); 461 462 void __init paging_init(void) 463 { 464 unsigned long zones_size[MAX_NR_ZONES] = {0, }; 465 unsigned node; 466 467 pagetable_init(); 468 469 for_each_online_node(node) { 470 pfn_t start_pfn, end_pfn; 471 472 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); 473 474 if (end_pfn > max_low_pfn) 475 max_low_pfn = end_pfn; 476 } 477 zones_size[ZONE_NORMAL] = max_low_pfn; 478 free_area_init_nodes(zones_size); 479 } 480 481 void __init mem_init(void) 482 { 483 unsigned long codesize, datasize, initsize, tmp; 484 unsigned node; 485 486 high_memory = (void *) __va(num_physpages << PAGE_SHIFT); 487 488 for_each_online_node(node) { 489 /* 490 * This will free up the bootmem, ie, slot 0 memory. 491 */ 492 totalram_pages += free_all_bootmem_node(NODE_DATA(node)); 493 } 494 495 totalram_pages -= setup_zero_pages(); /* This comes from node 0 */ 496 497 codesize = (unsigned long) &_etext - (unsigned long) &_text; 498 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 499 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 500 501 tmp = nr_free_pages(); 502 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " 503 "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", 504 tmp << (PAGE_SHIFT-10), 505 num_physpages << (PAGE_SHIFT-10), 506 codesize >> 10, 507 (num_physpages - tmp) << (PAGE_SHIFT-10), 508 datasize >> 10, 509 initsize >> 10, 510 totalhigh_pages << (PAGE_SHIFT-10)); 511 } 512