1 /* 2 * pSeries NUMA support 3 * 4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 #include <linux/threads.h> 12 #include <linux/bootmem.h> 13 #include <linux/init.h> 14 #include <linux/mm.h> 15 #include <linux/mmzone.h> 16 #include <linux/module.h> 17 #include <linux/nodemask.h> 18 #include <linux/cpu.h> 19 #include <linux/notifier.h> 20 #include <asm/sparsemem.h> 21 #include <asm/lmb.h> 22 #include <asm/system.h> 23 #include <asm/smp.h> 24 25 static int numa_enabled = 1; 26 27 static int numa_debug; 28 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); } 29 30 int numa_cpu_lookup_table[NR_CPUS]; 31 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; 32 struct pglist_data *node_data[MAX_NUMNODES]; 33 34 EXPORT_SYMBOL(numa_cpu_lookup_table); 35 EXPORT_SYMBOL(numa_cpumask_lookup_table); 36 EXPORT_SYMBOL(node_data); 37 38 static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES]; 39 static int min_common_depth; 40 static int n_mem_addr_cells, n_mem_size_cells; 41 42 static void __cpuinit map_cpu_to_node(int cpu, int node) 43 { 44 numa_cpu_lookup_table[cpu] = node; 45 46 dbg("adding cpu %d to node %d\n", cpu, node); 47 48 if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node]))) 49 cpu_set(cpu, numa_cpumask_lookup_table[node]); 50 } 51 52 #ifdef CONFIG_HOTPLUG_CPU 53 static void unmap_cpu_from_node(unsigned long cpu) 54 { 55 int node = numa_cpu_lookup_table[cpu]; 56 57 dbg("removing cpu %lu from node %d\n", cpu, node); 58 59 if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { 60 cpu_clear(cpu, numa_cpumask_lookup_table[node]); 61 } else { 62 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", 63 cpu, node); 64 } 65 } 66 #endif /* CONFIG_HOTPLUG_CPU */ 67 68 static struct device_node * __cpuinit find_cpu_node(unsigned int cpu) 69 { 70 unsigned int hw_cpuid = get_hard_smp_processor_id(cpu); 71 struct device_node *cpu_node = NULL; 72 const unsigned int *interrupt_server, *reg; 73 int len; 74 75 while ((cpu_node = of_find_node_by_type(cpu_node, "cpu")) != NULL) { 76 /* Try interrupt server first */ 77 interrupt_server = get_property(cpu_node, 78 "ibm,ppc-interrupt-server#s", &len); 79 80 len = len / sizeof(u32); 81 82 if (interrupt_server && (len > 0)) { 83 while (len--) { 84 if (interrupt_server[len] == hw_cpuid) 85 return cpu_node; 86 } 87 } else { 88 reg = get_property(cpu_node, "reg", &len); 89 if (reg && (len > 0) && (reg[0] == hw_cpuid)) 90 return cpu_node; 91 } 92 } 93 94 return NULL; 95 } 96 97 /* must hold reference to node during call */ 98 static const int *of_get_associativity(struct device_node *dev) 99 { 100 return get_property(dev, "ibm,associativity", NULL); 101 } 102 103 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa 104 * info is found. 105 */ 106 static int of_node_to_nid_single(struct device_node *device) 107 { 108 int nid = -1; 109 const unsigned int *tmp; 110 111 if (min_common_depth == -1) 112 goto out; 113 114 tmp = of_get_associativity(device); 115 if (!tmp) 116 goto out; 117 118 if (tmp[0] >= min_common_depth) 119 nid = tmp[min_common_depth]; 120 121 /* POWER4 LPAR uses 0xffff as invalid node */ 122 if (nid == 0xffff || nid >= MAX_NUMNODES) 123 nid = -1; 124 out: 125 return nid; 126 } 127 128 /* Walk the device tree upwards, looking for an associativity id */ 129 int of_node_to_nid(struct device_node *device) 130 { 131 struct device_node *tmp; 132 int nid = -1; 133 134 of_node_get(device); 135 while (device) { 136 nid = of_node_to_nid_single(device); 137 if (nid != -1) 138 break; 139 140 tmp = device; 141 device = of_get_parent(tmp); 142 of_node_put(tmp); 143 } 144 of_node_put(device); 145 146 return nid; 147 } 148 EXPORT_SYMBOL_GPL(of_node_to_nid); 149 150 /* 151 * In theory, the "ibm,associativity" property may contain multiple 152 * associativity lists because a resource may be multiply connected 153 * into the machine. This resource then has different associativity 154 * characteristics relative to its multiple connections. We ignore 155 * this for now. We also assume that all cpu and memory sets have 156 * their distances represented at a common level. This won't be 157 * true for hierarchical NUMA. 158 * 159 * In any case the ibm,associativity-reference-points should give 160 * the correct depth for a normal NUMA system. 161 * 162 * - Dave Hansen <haveblue@us.ibm.com> 163 */ 164 static int __init find_min_common_depth(void) 165 { 166 int depth; 167 const unsigned int *ref_points; 168 struct device_node *rtas_root; 169 unsigned int len; 170 171 rtas_root = of_find_node_by_path("/rtas"); 172 173 if (!rtas_root) 174 return -1; 175 176 /* 177 * this property is 2 32-bit integers, each representing a level of 178 * depth in the associativity nodes. The first is for an SMP 179 * configuration (should be all 0's) and the second is for a normal 180 * NUMA configuration. 181 */ 182 ref_points = get_property(rtas_root, 183 "ibm,associativity-reference-points", &len); 184 185 if ((len >= 1) && ref_points) { 186 depth = ref_points[1]; 187 } else { 188 dbg("NUMA: ibm,associativity-reference-points not found.\n"); 189 depth = -1; 190 } 191 of_node_put(rtas_root); 192 193 return depth; 194 } 195 196 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) 197 { 198 struct device_node *memory = NULL; 199 200 memory = of_find_node_by_type(memory, "memory"); 201 if (!memory) 202 panic("numa.c: No memory nodes found!"); 203 204 *n_addr_cells = prom_n_addr_cells(memory); 205 *n_size_cells = prom_n_size_cells(memory); 206 of_node_put(memory); 207 } 208 209 static unsigned long __devinit read_n_cells(int n, const unsigned int **buf) 210 { 211 unsigned long result = 0; 212 213 while (n--) { 214 result = (result << 32) | **buf; 215 (*buf)++; 216 } 217 return result; 218 } 219 220 /* 221 * Figure out to which domain a cpu belongs and stick it there. 222 * Return the id of the domain used. 223 */ 224 static int __cpuinit numa_setup_cpu(unsigned long lcpu) 225 { 226 int nid = 0; 227 struct device_node *cpu = find_cpu_node(lcpu); 228 229 if (!cpu) { 230 WARN_ON(1); 231 goto out; 232 } 233 234 nid = of_node_to_nid_single(cpu); 235 236 if (nid < 0 || !node_online(nid)) 237 nid = any_online_node(NODE_MASK_ALL); 238 out: 239 map_cpu_to_node(lcpu, nid); 240 241 of_node_put(cpu); 242 243 return nid; 244 } 245 246 static int __cpuinit cpu_numa_callback(struct notifier_block *nfb, 247 unsigned long action, 248 void *hcpu) 249 { 250 unsigned long lcpu = (unsigned long)hcpu; 251 int ret = NOTIFY_DONE; 252 253 switch (action) { 254 case CPU_UP_PREPARE: 255 numa_setup_cpu(lcpu); 256 ret = NOTIFY_OK; 257 break; 258 #ifdef CONFIG_HOTPLUG_CPU 259 case CPU_DEAD: 260 case CPU_UP_CANCELED: 261 unmap_cpu_from_node(lcpu); 262 break; 263 ret = NOTIFY_OK; 264 #endif 265 } 266 return ret; 267 } 268 269 /* 270 * Check and possibly modify a memory region to enforce the memory limit. 271 * 272 * Returns the size the region should have to enforce the memory limit. 273 * This will either be the original value of size, a truncated value, 274 * or zero. If the returned value of size is 0 the region should be 275 * discarded as it lies wholy above the memory limit. 276 */ 277 static unsigned long __init numa_enforce_memory_limit(unsigned long start, 278 unsigned long size) 279 { 280 /* 281 * We use lmb_end_of_DRAM() in here instead of memory_limit because 282 * we've already adjusted it for the limit and it takes care of 283 * having memory holes below the limit. 284 */ 285 286 if (! memory_limit) 287 return size; 288 289 if (start + size <= lmb_end_of_DRAM()) 290 return size; 291 292 if (start >= lmb_end_of_DRAM()) 293 return 0; 294 295 return lmb_end_of_DRAM() - start; 296 } 297 298 /* 299 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory 300 * node. This assumes n_mem_{addr,size}_cells have been set. 301 */ 302 static void __init parse_drconf_memory(struct device_node *memory) 303 { 304 const unsigned int *lm, *dm, *aa; 305 unsigned int ls, ld, la; 306 unsigned int n, aam, aalen; 307 unsigned long lmb_size, size; 308 int nid, default_nid = 0; 309 unsigned int start, ai, flags; 310 311 lm = get_property(memory, "ibm,lmb-size", &ls); 312 dm = get_property(memory, "ibm,dynamic-memory", &ld); 313 aa = get_property(memory, "ibm,associativity-lookup-arrays", &la); 314 if (!lm || !dm || !aa || 315 ls < sizeof(unsigned int) || ld < sizeof(unsigned int) || 316 la < 2 * sizeof(unsigned int)) 317 return; 318 319 lmb_size = read_n_cells(n_mem_size_cells, &lm); 320 n = *dm++; /* number of LMBs */ 321 aam = *aa++; /* number of associativity lists */ 322 aalen = *aa++; /* length of each associativity list */ 323 if (ld < (n * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int) || 324 la < (aam * aalen + 2) * sizeof(unsigned int)) 325 return; 326 327 for (; n != 0; --n) { 328 start = read_n_cells(n_mem_addr_cells, &dm); 329 ai = dm[2]; 330 flags = dm[3]; 331 dm += 4; 332 /* 0x80 == reserved, 0x8 = assigned to us */ 333 if ((flags & 0x80) || !(flags & 0x8)) 334 continue; 335 nid = default_nid; 336 /* flags & 0x40 means associativity index is invalid */ 337 if (min_common_depth > 0 && min_common_depth <= aalen && 338 (flags & 0x40) == 0 && ai < aam) { 339 /* this is like of_node_to_nid_single */ 340 nid = aa[ai * aalen + min_common_depth - 1]; 341 if (nid == 0xffff || nid >= MAX_NUMNODES) 342 nid = default_nid; 343 } 344 node_set_online(nid); 345 346 size = numa_enforce_memory_limit(start, lmb_size); 347 if (!size) 348 continue; 349 350 add_active_range(nid, start >> PAGE_SHIFT, 351 (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT)); 352 } 353 } 354 355 static int __init parse_numa_properties(void) 356 { 357 struct device_node *cpu = NULL; 358 struct device_node *memory = NULL; 359 int default_nid = 0; 360 unsigned long i; 361 362 if (numa_enabled == 0) { 363 printk(KERN_WARNING "NUMA disabled by user\n"); 364 return -1; 365 } 366 367 min_common_depth = find_min_common_depth(); 368 369 if (min_common_depth < 0) 370 return min_common_depth; 371 372 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth); 373 374 /* 375 * Even though we connect cpus to numa domains later in SMP 376 * init, we need to know the node ids now. This is because 377 * each node to be onlined must have NODE_DATA etc backing it. 378 */ 379 for_each_present_cpu(i) { 380 int nid; 381 382 cpu = find_cpu_node(i); 383 BUG_ON(!cpu); 384 nid = of_node_to_nid_single(cpu); 385 of_node_put(cpu); 386 387 /* 388 * Don't fall back to default_nid yet -- we will plug 389 * cpus into nodes once the memory scan has discovered 390 * the topology. 391 */ 392 if (nid < 0) 393 continue; 394 node_set_online(nid); 395 } 396 397 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); 398 memory = NULL; 399 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { 400 unsigned long start; 401 unsigned long size; 402 int nid; 403 int ranges; 404 const unsigned int *memcell_buf; 405 unsigned int len; 406 407 memcell_buf = get_property(memory, 408 "linux,usable-memory", &len); 409 if (!memcell_buf || len <= 0) 410 memcell_buf = get_property(memory, "reg", &len); 411 if (!memcell_buf || len <= 0) 412 continue; 413 414 /* ranges in cell */ 415 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); 416 new_range: 417 /* these are order-sensitive, and modify the buffer pointer */ 418 start = read_n_cells(n_mem_addr_cells, &memcell_buf); 419 size = read_n_cells(n_mem_size_cells, &memcell_buf); 420 421 /* 422 * Assumption: either all memory nodes or none will 423 * have associativity properties. If none, then 424 * everything goes to default_nid. 425 */ 426 nid = of_node_to_nid_single(memory); 427 if (nid < 0) 428 nid = default_nid; 429 node_set_online(nid); 430 431 if (!(size = numa_enforce_memory_limit(start, size))) { 432 if (--ranges) 433 goto new_range; 434 else 435 continue; 436 } 437 438 add_active_range(nid, start >> PAGE_SHIFT, 439 (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT)); 440 441 if (--ranges) 442 goto new_range; 443 } 444 445 /* 446 * Now do the same thing for each LMB listed in the ibm,dynamic-memory 447 * property in the ibm,dynamic-reconfiguration-memory node. 448 */ 449 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 450 if (memory) 451 parse_drconf_memory(memory); 452 453 return 0; 454 } 455 456 static void __init setup_nonnuma(void) 457 { 458 unsigned long top_of_ram = lmb_end_of_DRAM(); 459 unsigned long total_ram = lmb_phys_mem_size(); 460 unsigned long start_pfn, end_pfn; 461 unsigned int i; 462 463 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", 464 top_of_ram, total_ram); 465 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 466 (top_of_ram - total_ram) >> 20); 467 468 for (i = 0; i < lmb.memory.cnt; ++i) { 469 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; 470 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); 471 add_active_range(0, start_pfn, end_pfn); 472 } 473 node_set_online(0); 474 } 475 476 void __init dump_numa_cpu_topology(void) 477 { 478 unsigned int node; 479 unsigned int cpu, count; 480 481 if (min_common_depth == -1 || !numa_enabled) 482 return; 483 484 for_each_online_node(node) { 485 printk(KERN_DEBUG "Node %d CPUs:", node); 486 487 count = 0; 488 /* 489 * If we used a CPU iterator here we would miss printing 490 * the holes in the cpumap. 491 */ 492 for (cpu = 0; cpu < NR_CPUS; cpu++) { 493 if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { 494 if (count == 0) 495 printk(" %u", cpu); 496 ++count; 497 } else { 498 if (count > 1) 499 printk("-%u", cpu - 1); 500 count = 0; 501 } 502 } 503 504 if (count > 1) 505 printk("-%u", NR_CPUS - 1); 506 printk("\n"); 507 } 508 } 509 510 static void __init dump_numa_memory_topology(void) 511 { 512 unsigned int node; 513 unsigned int count; 514 515 if (min_common_depth == -1 || !numa_enabled) 516 return; 517 518 for_each_online_node(node) { 519 unsigned long i; 520 521 printk(KERN_DEBUG "Node %d Memory:", node); 522 523 count = 0; 524 525 for (i = 0; i < lmb_end_of_DRAM(); 526 i += (1 << SECTION_SIZE_BITS)) { 527 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { 528 if (count == 0) 529 printk(" 0x%lx", i); 530 ++count; 531 } else { 532 if (count > 0) 533 printk("-0x%lx", i); 534 count = 0; 535 } 536 } 537 538 if (count > 0) 539 printk("-0x%lx", i); 540 printk("\n"); 541 } 542 } 543 544 /* 545 * Allocate some memory, satisfying the lmb or bootmem allocator where 546 * required. nid is the preferred node and end is the physical address of 547 * the highest address in the node. 548 * 549 * Returns the physical address of the memory. 550 */ 551 static void __init *careful_allocation(int nid, unsigned long size, 552 unsigned long align, 553 unsigned long end_pfn) 554 { 555 int new_nid; 556 unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); 557 558 /* retry over all memory */ 559 if (!ret) 560 ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM()); 561 562 if (!ret) 563 panic("numa.c: cannot allocate %lu bytes on node %d", 564 size, nid); 565 566 /* 567 * If the memory came from a previously allocated node, we must 568 * retry with the bootmem allocator. 569 */ 570 new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT); 571 if (new_nid < nid) { 572 ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid), 573 size, align, 0); 574 575 if (!ret) 576 panic("numa.c: cannot allocate %lu bytes on node %d", 577 size, new_nid); 578 579 ret = __pa(ret); 580 581 dbg("alloc_bootmem %lx %lx\n", ret, size); 582 } 583 584 return (void *)ret; 585 } 586 587 static struct notifier_block __cpuinitdata ppc64_numa_nb = { 588 .notifier_call = cpu_numa_callback, 589 .priority = 1 /* Must run before sched domains notifier. */ 590 }; 591 592 void __init do_init_bootmem(void) 593 { 594 int nid; 595 unsigned int i; 596 597 min_low_pfn = 0; 598 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; 599 max_pfn = max_low_pfn; 600 601 if (parse_numa_properties()) 602 setup_nonnuma(); 603 else 604 dump_numa_memory_topology(); 605 606 register_cpu_notifier(&ppc64_numa_nb); 607 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE, 608 (void *)(unsigned long)boot_cpuid); 609 610 for_each_online_node(nid) { 611 unsigned long start_pfn, end_pfn; 612 unsigned long bootmem_paddr; 613 unsigned long bootmap_pages; 614 615 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 616 617 /* Allocate the node structure node local if possible */ 618 NODE_DATA(nid) = careful_allocation(nid, 619 sizeof(struct pglist_data), 620 SMP_CACHE_BYTES, end_pfn); 621 NODE_DATA(nid) = __va(NODE_DATA(nid)); 622 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 623 624 dbg("node %d\n", nid); 625 dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); 626 627 NODE_DATA(nid)->bdata = &plat_node_bdata[nid]; 628 NODE_DATA(nid)->node_start_pfn = start_pfn; 629 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; 630 631 if (NODE_DATA(nid)->node_spanned_pages == 0) 632 continue; 633 634 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT); 635 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); 636 637 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 638 bootmem_paddr = (unsigned long)careful_allocation(nid, 639 bootmap_pages << PAGE_SHIFT, 640 PAGE_SIZE, end_pfn); 641 memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT); 642 643 dbg("bootmap_paddr = %lx\n", bootmem_paddr); 644 645 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, 646 start_pfn, end_pfn); 647 648 free_bootmem_with_active_regions(nid, end_pfn); 649 650 /* Mark reserved regions on this node */ 651 for (i = 0; i < lmb.reserved.cnt; i++) { 652 unsigned long physbase = lmb.reserved.region[i].base; 653 unsigned long size = lmb.reserved.region[i].size; 654 unsigned long start_paddr = start_pfn << PAGE_SHIFT; 655 unsigned long end_paddr = end_pfn << PAGE_SHIFT; 656 657 if (early_pfn_to_nid(physbase >> PAGE_SHIFT) != nid && 658 early_pfn_to_nid((physbase+size-1) >> PAGE_SHIFT) != nid) 659 continue; 660 661 if (physbase < end_paddr && 662 (physbase+size) > start_paddr) { 663 /* overlaps */ 664 if (physbase < start_paddr) { 665 size -= start_paddr - physbase; 666 physbase = start_paddr; 667 } 668 669 if (size > end_paddr - physbase) 670 size = end_paddr - physbase; 671 672 dbg("reserve_bootmem %lx %lx\n", physbase, 673 size); 674 reserve_bootmem_node(NODE_DATA(nid), physbase, 675 size); 676 } 677 } 678 679 sparse_memory_present_with_active_regions(nid); 680 } 681 } 682 683 void __init paging_init(void) 684 { 685 unsigned long max_zone_pfns[MAX_NR_ZONES]; 686 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 687 max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT; 688 free_area_init_nodes(max_zone_pfns); 689 } 690 691 static int __init early_numa(char *p) 692 { 693 if (!p) 694 return 0; 695 696 if (strstr(p, "off")) 697 numa_enabled = 0; 698 699 if (strstr(p, "debug")) 700 numa_debug = 1; 701 702 return 0; 703 } 704 early_param("numa", early_numa); 705 706 #ifdef CONFIG_MEMORY_HOTPLUG 707 /* 708 * Find the node associated with a hot added memory section. Section 709 * corresponds to a SPARSEMEM section, not an LMB. It is assumed that 710 * sections are fully contained within a single LMB. 711 */ 712 int hot_add_scn_to_nid(unsigned long scn_addr) 713 { 714 struct device_node *memory = NULL; 715 nodemask_t nodes; 716 int default_nid = any_online_node(NODE_MASK_ALL); 717 int nid; 718 719 if (!numa_enabled || (min_common_depth < 0)) 720 return default_nid; 721 722 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { 723 unsigned long start, size; 724 int ranges; 725 const unsigned int *memcell_buf; 726 unsigned int len; 727 728 memcell_buf = get_property(memory, "reg", &len); 729 if (!memcell_buf || len <= 0) 730 continue; 731 732 /* ranges in cell */ 733 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); 734 ha_new_range: 735 start = read_n_cells(n_mem_addr_cells, &memcell_buf); 736 size = read_n_cells(n_mem_size_cells, &memcell_buf); 737 nid = of_node_to_nid_single(memory); 738 739 /* Domains not present at boot default to 0 */ 740 if (nid < 0 || !node_online(nid)) 741 nid = default_nid; 742 743 if ((scn_addr >= start) && (scn_addr < (start + size))) { 744 of_node_put(memory); 745 goto got_nid; 746 } 747 748 if (--ranges) /* process all ranges in cell */ 749 goto ha_new_range; 750 } 751 BUG(); /* section address should be found above */ 752 return 0; 753 754 /* Temporary code to ensure that returned node is not empty */ 755 got_nid: 756 nodes_setall(nodes); 757 while (NODE_DATA(nid)->node_spanned_pages == 0) { 758 node_clear(nid, nodes); 759 nid = any_online_node(nodes); 760 } 761 return nid; 762 } 763 #endif /* CONFIG_MEMORY_HOTPLUG */ 764