1 /* Common code for 32 and 64-bit NUMA */ 2 #include <linux/kernel.h> 3 #include <linux/mm.h> 4 #include <linux/string.h> 5 #include <linux/init.h> 6 #include <linux/bootmem.h> 7 #include <linux/memblock.h> 8 #include <linux/mmzone.h> 9 #include <linux/ctype.h> 10 #include <linux/module.h> 11 #include <linux/nodemask.h> 12 #include <linux/sched.h> 13 #include <linux/topology.h> 14 15 #include <asm/e820.h> 16 #include <asm/proto.h> 17 #include <asm/dma.h> 18 #include <asm/acpi.h> 19 #include <asm/amd_nb.h> 20 21 #include "numa_internal.h" 22 23 int __initdata numa_off; 24 nodemask_t numa_nodes_parsed __initdata; 25 26 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 27 EXPORT_SYMBOL(node_data); 28 29 static struct numa_meminfo numa_meminfo 30 #ifndef CONFIG_MEMORY_HOTPLUG 31 __initdata 32 #endif 33 ; 34 35 static int numa_distance_cnt; 36 static u8 *numa_distance; 37 38 static __init int numa_setup(char *opt) 39 { 40 if (!opt) 41 return -EINVAL; 42 if (!strncmp(opt, "off", 3)) 43 numa_off = 1; 44 #ifdef CONFIG_NUMA_EMU 45 if (!strncmp(opt, "fake=", 5)) 46 numa_emu_cmdline(opt + 5); 47 #endif 48 #ifdef CONFIG_ACPI_NUMA 49 if (!strncmp(opt, "noacpi", 6)) 50 acpi_numa = -1; 51 #endif 52 return 0; 53 } 54 early_param("numa", numa_setup); 55 56 /* 57 * apicid, cpu, node mappings 58 */ 59 s16 __apicid_to_node[MAX_LOCAL_APIC] = { 60 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE 61 }; 62 63 int numa_cpu_node(int cpu) 64 { 65 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); 66 67 if (apicid != BAD_APICID) 68 return __apicid_to_node[apicid]; 69 return NUMA_NO_NODE; 70 } 71 72 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 73 EXPORT_SYMBOL(node_to_cpumask_map); 74 75 /* 76 * Map cpu index to node index 77 */ 78 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); 79 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); 80 81 void numa_set_node(int cpu, int node) 82 { 83 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); 84 85 /* early setting, no percpu area yet */ 86 if (cpu_to_node_map) { 87 cpu_to_node_map[cpu] = node; 88 return; 89 } 90 91 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 92 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { 93 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); 94 dump_stack(); 95 return; 96 } 97 #endif 98 per_cpu(x86_cpu_to_node_map, cpu) = node; 99 100 set_cpu_numa_node(cpu, node); 101 } 102 103 void numa_clear_node(int cpu) 104 { 105 numa_set_node(cpu, NUMA_NO_NODE); 106 } 107 108 /* 109 * Allocate node_to_cpumask_map based on number of available nodes 110 * Requires node_possible_map to be valid. 111 * 112 * Note: cpumask_of_node() is not valid until after this is done. 113 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) 114 */ 115 void __init setup_node_to_cpumask_map(void) 116 { 117 unsigned int node; 118 119 /* setup nr_node_ids if not done yet */ 120 if (nr_node_ids == MAX_NUMNODES) 121 setup_nr_node_ids(); 122 123 /* allocate the map */ 124 for (node = 0; node < nr_node_ids; node++) 125 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); 126 127 /* cpumask_of_node() will now work */ 128 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); 129 } 130 131 static int __init numa_add_memblk_to(int nid, u64 start, u64 end, 132 struct numa_meminfo *mi) 133 { 134 /* ignore zero length blks */ 135 if (start == end) 136 return 0; 137 138 /* whine about and ignore invalid blks */ 139 if (start > end || nid < 0 || nid >= MAX_NUMNODES) { 140 pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", 141 nid, start, end - 1); 142 return 0; 143 } 144 145 if (mi->nr_blks >= NR_NODE_MEMBLKS) { 146 pr_err("NUMA: too many memblk ranges\n"); 147 return -EINVAL; 148 } 149 150 mi->blk[mi->nr_blks].start = start; 151 mi->blk[mi->nr_blks].end = end; 152 mi->blk[mi->nr_blks].nid = nid; 153 mi->nr_blks++; 154 return 0; 155 } 156 157 /** 158 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo 159 * @idx: Index of memblk to remove 160 * @mi: numa_meminfo to remove memblk from 161 * 162 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and 163 * decrementing @mi->nr_blks. 164 */ 165 void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi) 166 { 167 mi->nr_blks--; 168 memmove(&mi->blk[idx], &mi->blk[idx + 1], 169 (mi->nr_blks - idx) * sizeof(mi->blk[0])); 170 } 171 172 /** 173 * numa_add_memblk - Add one numa_memblk to numa_meminfo 174 * @nid: NUMA node ID of the new memblk 175 * @start: Start address of the new memblk 176 * @end: End address of the new memblk 177 * 178 * Add a new memblk to the default numa_meminfo. 179 * 180 * RETURNS: 181 * 0 on success, -errno on failure. 182 */ 183 int __init numa_add_memblk(int nid, u64 start, u64 end) 184 { 185 return numa_add_memblk_to(nid, start, end, &numa_meminfo); 186 } 187 188 /* Allocate NODE_DATA for a node on the local memory */ 189 static void __init alloc_node_data(int nid) 190 { 191 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); 192 u64 nd_pa; 193 void *nd; 194 int tnid; 195 196 /* 197 * Allocate node data. Try node-local memory and then any node. 198 * Never allocate in DMA zone. 199 */ 200 nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); 201 if (!nd_pa) { 202 nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES, 203 MEMBLOCK_ALLOC_ACCESSIBLE); 204 if (!nd_pa) { 205 pr_err("Cannot find %zu bytes in node %d\n", 206 nd_size, nid); 207 return; 208 } 209 } 210 nd = __va(nd_pa); 211 212 /* report and initialize */ 213 printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid, 214 nd_pa, nd_pa + nd_size - 1); 215 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); 216 if (tnid != nid) 217 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); 218 219 node_data[nid] = nd; 220 memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); 221 222 node_set_online(nid); 223 } 224 225 /** 226 * numa_cleanup_meminfo - Cleanup a numa_meminfo 227 * @mi: numa_meminfo to clean up 228 * 229 * Sanitize @mi by merging and removing unncessary memblks. Also check for 230 * conflicts and clear unused memblks. 231 * 232 * RETURNS: 233 * 0 on success, -errno on failure. 234 */ 235 int __init numa_cleanup_meminfo(struct numa_meminfo *mi) 236 { 237 const u64 low = 0; 238 const u64 high = PFN_PHYS(max_pfn); 239 int i, j, k; 240 241 /* first, trim all entries */ 242 for (i = 0; i < mi->nr_blks; i++) { 243 struct numa_memblk *bi = &mi->blk[i]; 244 245 /* make sure all blocks are inside the limits */ 246 bi->start = max(bi->start, low); 247 bi->end = min(bi->end, high); 248 249 /* and there's no empty or non-exist block */ 250 if (bi->start >= bi->end || 251 !memblock_overlaps_region(&memblock.memory, 252 bi->start, bi->end - bi->start)) 253 numa_remove_memblk_from(i--, mi); 254 } 255 256 /* merge neighboring / overlapping entries */ 257 for (i = 0; i < mi->nr_blks; i++) { 258 struct numa_memblk *bi = &mi->blk[i]; 259 260 for (j = i + 1; j < mi->nr_blks; j++) { 261 struct numa_memblk *bj = &mi->blk[j]; 262 u64 start, end; 263 264 /* 265 * See whether there are overlapping blocks. Whine 266 * about but allow overlaps of the same nid. They 267 * will be merged below. 268 */ 269 if (bi->end > bj->start && bi->start < bj->end) { 270 if (bi->nid != bj->nid) { 271 pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n", 272 bi->nid, bi->start, bi->end - 1, 273 bj->nid, bj->start, bj->end - 1); 274 return -EINVAL; 275 } 276 pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n", 277 bi->nid, bi->start, bi->end - 1, 278 bj->start, bj->end - 1); 279 } 280 281 /* 282 * Join together blocks on the same node, holes 283 * between which don't overlap with memory on other 284 * nodes. 285 */ 286 if (bi->nid != bj->nid) 287 continue; 288 start = min(bi->start, bj->start); 289 end = max(bi->end, bj->end); 290 for (k = 0; k < mi->nr_blks; k++) { 291 struct numa_memblk *bk = &mi->blk[k]; 292 293 if (bi->nid == bk->nid) 294 continue; 295 if (start < bk->end && end > bk->start) 296 break; 297 } 298 if (k < mi->nr_blks) 299 continue; 300 printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n", 301 bi->nid, bi->start, bi->end - 1, bj->start, 302 bj->end - 1, start, end - 1); 303 bi->start = start; 304 bi->end = end; 305 numa_remove_memblk_from(j--, mi); 306 } 307 } 308 309 /* clear unused ones */ 310 for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) { 311 mi->blk[i].start = mi->blk[i].end = 0; 312 mi->blk[i].nid = NUMA_NO_NODE; 313 } 314 315 return 0; 316 } 317 318 /* 319 * Set nodes, which have memory in @mi, in *@nodemask. 320 */ 321 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, 322 const struct numa_meminfo *mi) 323 { 324 int i; 325 326 for (i = 0; i < ARRAY_SIZE(mi->blk); i++) 327 if (mi->blk[i].start != mi->blk[i].end && 328 mi->blk[i].nid != NUMA_NO_NODE) 329 node_set(mi->blk[i].nid, *nodemask); 330 } 331 332 /** 333 * numa_reset_distance - Reset NUMA distance table 334 * 335 * The current table is freed. The next numa_set_distance() call will 336 * create a new one. 337 */ 338 void __init numa_reset_distance(void) 339 { 340 size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); 341 342 /* numa_distance could be 1LU marking allocation failure, test cnt */ 343 if (numa_distance_cnt) 344 memblock_free(__pa(numa_distance), size); 345 numa_distance_cnt = 0; 346 numa_distance = NULL; /* enable table creation */ 347 } 348 349 static int __init numa_alloc_distance(void) 350 { 351 nodemask_t nodes_parsed; 352 size_t size; 353 int i, j, cnt = 0; 354 u64 phys; 355 356 /* size the new table and allocate it */ 357 nodes_parsed = numa_nodes_parsed; 358 numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo); 359 360 for_each_node_mask(i, nodes_parsed) 361 cnt = i; 362 cnt++; 363 size = cnt * cnt * sizeof(numa_distance[0]); 364 365 phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), 366 size, PAGE_SIZE); 367 if (!phys) { 368 pr_warning("NUMA: Warning: can't allocate distance table!\n"); 369 /* don't retry until explicitly reset */ 370 numa_distance = (void *)1LU; 371 return -ENOMEM; 372 } 373 memblock_reserve(phys, size); 374 375 numa_distance = __va(phys); 376 numa_distance_cnt = cnt; 377 378 /* fill with the default distances */ 379 for (i = 0; i < cnt; i++) 380 for (j = 0; j < cnt; j++) 381 numa_distance[i * cnt + j] = i == j ? 382 LOCAL_DISTANCE : REMOTE_DISTANCE; 383 printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt); 384 385 return 0; 386 } 387 388 /** 389 * numa_set_distance - Set NUMA distance from one NUMA to another 390 * @from: the 'from' node to set distance 391 * @to: the 'to' node to set distance 392 * @distance: NUMA distance 393 * 394 * Set the distance from node @from to @to to @distance. If distance table 395 * doesn't exist, one which is large enough to accommodate all the currently 396 * known nodes will be created. 397 * 398 * If such table cannot be allocated, a warning is printed and further 399 * calls are ignored until the distance table is reset with 400 * numa_reset_distance(). 401 * 402 * If @from or @to is higher than the highest known node or lower than zero 403 * at the time of table creation or @distance doesn't make sense, the call 404 * is ignored. 405 * This is to allow simplification of specific NUMA config implementations. 406 */ 407 void __init numa_set_distance(int from, int to, int distance) 408 { 409 if (!numa_distance && numa_alloc_distance() < 0) 410 return; 411 412 if (from >= numa_distance_cnt || to >= numa_distance_cnt || 413 from < 0 || to < 0) { 414 pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n", 415 from, to, distance); 416 return; 417 } 418 419 if ((u8)distance != distance || 420 (from == to && distance != LOCAL_DISTANCE)) { 421 pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n", 422 from, to, distance); 423 return; 424 } 425 426 numa_distance[from * numa_distance_cnt + to] = distance; 427 } 428 429 int __node_distance(int from, int to) 430 { 431 if (from >= numa_distance_cnt || to >= numa_distance_cnt) 432 return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; 433 return numa_distance[from * numa_distance_cnt + to]; 434 } 435 EXPORT_SYMBOL(__node_distance); 436 437 /* 438 * Sanity check to catch more bad NUMA configurations (they are amazingly 439 * common). Make sure the nodes cover all memory. 440 */ 441 static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) 442 { 443 u64 numaram, e820ram; 444 int i; 445 446 numaram = 0; 447 for (i = 0; i < mi->nr_blks; i++) { 448 u64 s = mi->blk[i].start >> PAGE_SHIFT; 449 u64 e = mi->blk[i].end >> PAGE_SHIFT; 450 numaram += e - s; 451 numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); 452 if ((s64)numaram < 0) 453 numaram = 0; 454 } 455 456 e820ram = max_pfn - absent_pages_in_range(0, max_pfn); 457 458 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ 459 if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { 460 printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n", 461 (numaram << PAGE_SHIFT) >> 20, 462 (e820ram << PAGE_SHIFT) >> 20); 463 return false; 464 } 465 return true; 466 } 467 468 static void __init numa_clear_kernel_node_hotplug(void) 469 { 470 int i, nid; 471 nodemask_t numa_kernel_nodes = NODE_MASK_NONE; 472 unsigned long start, end; 473 struct memblock_region *r; 474 475 /* 476 * At this time, all memory regions reserved by memblock are 477 * used by the kernel. Set the nid in memblock.reserved will 478 * mark out all the nodes the kernel resides in. 479 */ 480 for (i = 0; i < numa_meminfo.nr_blks; i++) { 481 struct numa_memblk *mb = &numa_meminfo.blk[i]; 482 483 memblock_set_node(mb->start, mb->end - mb->start, 484 &memblock.reserved, mb->nid); 485 } 486 487 /* 488 * Mark all kernel nodes. 489 * 490 * When booting with mem=nn[kMG] or in a kdump kernel, numa_meminfo 491 * may not include all the memblock.reserved memory ranges because 492 * trim_snb_memory() reserves specific pages for Sandy Bridge graphics. 493 */ 494 for_each_memblock(reserved, r) 495 if (r->nid != MAX_NUMNODES) 496 node_set(r->nid, numa_kernel_nodes); 497 498 /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */ 499 for (i = 0; i < numa_meminfo.nr_blks; i++) { 500 nid = numa_meminfo.blk[i].nid; 501 if (!node_isset(nid, numa_kernel_nodes)) 502 continue; 503 504 start = numa_meminfo.blk[i].start; 505 end = numa_meminfo.blk[i].end; 506 507 memblock_clear_hotplug(start, end - start); 508 } 509 } 510 511 static int __init numa_register_memblks(struct numa_meminfo *mi) 512 { 513 unsigned long uninitialized_var(pfn_align); 514 int i, nid; 515 516 /* Account for nodes with cpus and no memory */ 517 node_possible_map = numa_nodes_parsed; 518 numa_nodemask_from_meminfo(&node_possible_map, mi); 519 if (WARN_ON(nodes_empty(node_possible_map))) 520 return -EINVAL; 521 522 for (i = 0; i < mi->nr_blks; i++) { 523 struct numa_memblk *mb = &mi->blk[i]; 524 memblock_set_node(mb->start, mb->end - mb->start, 525 &memblock.memory, mb->nid); 526 } 527 528 /* 529 * At very early time, the kernel have to use some memory such as 530 * loading the kernel image. We cannot prevent this anyway. So any 531 * node the kernel resides in should be un-hotpluggable. 532 * 533 * And when we come here, alloc node data won't fail. 534 */ 535 numa_clear_kernel_node_hotplug(); 536 537 /* 538 * If sections array is gonna be used for pfn -> nid mapping, check 539 * whether its granularity is fine enough. 540 */ 541 #ifdef NODE_NOT_IN_PAGE_FLAGS 542 pfn_align = node_map_pfn_alignment(); 543 if (pfn_align && pfn_align < PAGES_PER_SECTION) { 544 printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n", 545 PFN_PHYS(pfn_align) >> 20, 546 PFN_PHYS(PAGES_PER_SECTION) >> 20); 547 return -EINVAL; 548 } 549 #endif 550 if (!numa_meminfo_cover_memory(mi)) 551 return -EINVAL; 552 553 /* Finally register nodes. */ 554 for_each_node_mask(nid, node_possible_map) { 555 u64 start = PFN_PHYS(max_pfn); 556 u64 end = 0; 557 558 for (i = 0; i < mi->nr_blks; i++) { 559 if (nid != mi->blk[i].nid) 560 continue; 561 start = min(mi->blk[i].start, start); 562 end = max(mi->blk[i].end, end); 563 } 564 565 if (start >= end) 566 continue; 567 568 /* 569 * Don't confuse VM with a node that doesn't have the 570 * minimum amount of memory: 571 */ 572 if (end && (end - start) < NODE_MIN_SIZE) 573 continue; 574 575 alloc_node_data(nid); 576 } 577 578 /* Dump memblock with node info and return. */ 579 memblock_dump_all(); 580 return 0; 581 } 582 583 /* 584 * There are unfortunately some poorly designed mainboards around that 585 * only connect memory to a single CPU. This breaks the 1:1 cpu->node 586 * mapping. To avoid this fill in the mapping for all possible CPUs, 587 * as the number of CPUs is not known yet. We round robin the existing 588 * nodes. 589 */ 590 static void __init numa_init_array(void) 591 { 592 int rr, i; 593 594 rr = first_node(node_online_map); 595 for (i = 0; i < nr_cpu_ids; i++) { 596 if (early_cpu_to_node(i) != NUMA_NO_NODE) 597 continue; 598 numa_set_node(i, rr); 599 rr = next_node(rr, node_online_map); 600 if (rr == MAX_NUMNODES) 601 rr = first_node(node_online_map); 602 } 603 } 604 605 static int __init numa_init(int (*init_func)(void)) 606 { 607 int i; 608 int ret; 609 610 for (i = 0; i < MAX_LOCAL_APIC; i++) 611 set_apicid_to_node(i, NUMA_NO_NODE); 612 613 nodes_clear(numa_nodes_parsed); 614 nodes_clear(node_possible_map); 615 nodes_clear(node_online_map); 616 memset(&numa_meminfo, 0, sizeof(numa_meminfo)); 617 WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory, 618 MAX_NUMNODES)); 619 WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved, 620 MAX_NUMNODES)); 621 /* In case that parsing SRAT failed. */ 622 WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX)); 623 numa_reset_distance(); 624 625 ret = init_func(); 626 if (ret < 0) 627 return ret; 628 629 /* 630 * We reset memblock back to the top-down direction 631 * here because if we configured ACPI_NUMA, we have 632 * parsed SRAT in init_func(). It is ok to have the 633 * reset here even if we did't configure ACPI_NUMA 634 * or acpi numa init fails and fallbacks to dummy 635 * numa init. 636 */ 637 memblock_set_bottom_up(false); 638 639 ret = numa_cleanup_meminfo(&numa_meminfo); 640 if (ret < 0) 641 return ret; 642 643 numa_emulation(&numa_meminfo, numa_distance_cnt); 644 645 ret = numa_register_memblks(&numa_meminfo); 646 if (ret < 0) 647 return ret; 648 649 for (i = 0; i < nr_cpu_ids; i++) { 650 int nid = early_cpu_to_node(i); 651 652 if (nid == NUMA_NO_NODE) 653 continue; 654 if (!node_online(nid)) 655 numa_clear_node(i); 656 } 657 numa_init_array(); 658 659 return 0; 660 } 661 662 /** 663 * dummy_numa_init - Fallback dummy NUMA init 664 * 665 * Used if there's no underlying NUMA architecture, NUMA initialization 666 * fails, or NUMA is disabled on the command line. 667 * 668 * Must online at least one node and add memory blocks that cover all 669 * allowed memory. This function must not fail. 670 */ 671 static int __init dummy_numa_init(void) 672 { 673 printk(KERN_INFO "%s\n", 674 numa_off ? "NUMA turned off" : "No NUMA configuration found"); 675 printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n", 676 0LLU, PFN_PHYS(max_pfn) - 1); 677 678 node_set(0, numa_nodes_parsed); 679 numa_add_memblk(0, 0, PFN_PHYS(max_pfn)); 680 681 return 0; 682 } 683 684 /** 685 * x86_numa_init - Initialize NUMA 686 * 687 * Try each configured NUMA initialization method until one succeeds. The 688 * last fallback is dummy single node config encomapssing whole memory and 689 * never fails. 690 */ 691 void __init x86_numa_init(void) 692 { 693 if (!numa_off) { 694 #ifdef CONFIG_ACPI_NUMA 695 if (!numa_init(x86_acpi_numa_init)) 696 return; 697 #endif 698 #ifdef CONFIG_AMD_NUMA 699 if (!numa_init(amd_numa_init)) 700 return; 701 #endif 702 } 703 704 numa_init(dummy_numa_init); 705 } 706 707 static __init int find_near_online_node(int node) 708 { 709 int n, val; 710 int min_val = INT_MAX; 711 int best_node = -1; 712 713 for_each_online_node(n) { 714 val = node_distance(node, n); 715 716 if (val < min_val) { 717 min_val = val; 718 best_node = n; 719 } 720 } 721 722 return best_node; 723 } 724 725 /* 726 * Setup early cpu_to_node. 727 * 728 * Populate cpu_to_node[] only if x86_cpu_to_apicid[], 729 * and apicid_to_node[] tables have valid entries for a CPU. 730 * This means we skip cpu_to_node[] initialisation for NUMA 731 * emulation and faking node case (when running a kernel compiled 732 * for NUMA on a non NUMA box), which is OK as cpu_to_node[] 733 * is already initialized in a round robin manner at numa_init_array, 734 * prior to this call, and this initialization is good enough 735 * for the fake NUMA cases. 736 * 737 * Called before the per_cpu areas are setup. 738 */ 739 void __init init_cpu_to_node(void) 740 { 741 int cpu; 742 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); 743 744 BUG_ON(cpu_to_apicid == NULL); 745 746 for_each_possible_cpu(cpu) { 747 int node = numa_cpu_node(cpu); 748 749 if (node == NUMA_NO_NODE) 750 continue; 751 if (!node_online(node)) 752 node = find_near_online_node(node); 753 numa_set_node(cpu, node); 754 } 755 } 756 757 #ifndef CONFIG_DEBUG_PER_CPU_MAPS 758 759 # ifndef CONFIG_NUMA_EMU 760 void numa_add_cpu(int cpu) 761 { 762 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 763 } 764 765 void numa_remove_cpu(int cpu) 766 { 767 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 768 } 769 # endif /* !CONFIG_NUMA_EMU */ 770 771 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 772 773 int __cpu_to_node(int cpu) 774 { 775 if (early_per_cpu_ptr(x86_cpu_to_node_map)) { 776 printk(KERN_WARNING 777 "cpu_to_node(%d): usage too early!\n", cpu); 778 dump_stack(); 779 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; 780 } 781 return per_cpu(x86_cpu_to_node_map, cpu); 782 } 783 EXPORT_SYMBOL(__cpu_to_node); 784 785 /* 786 * Same function as cpu_to_node() but used if called before the 787 * per_cpu areas are setup. 788 */ 789 int early_cpu_to_node(int cpu) 790 { 791 if (early_per_cpu_ptr(x86_cpu_to_node_map)) 792 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; 793 794 if (!cpu_possible(cpu)) { 795 printk(KERN_WARNING 796 "early_cpu_to_node(%d): no per_cpu area!\n", cpu); 797 dump_stack(); 798 return NUMA_NO_NODE; 799 } 800 return per_cpu(x86_cpu_to_node_map, cpu); 801 } 802 803 void debug_cpumask_set_cpu(int cpu, int node, bool enable) 804 { 805 struct cpumask *mask; 806 807 if (node == NUMA_NO_NODE) { 808 /* early_cpu_to_node() already emits a warning and trace */ 809 return; 810 } 811 mask = node_to_cpumask_map[node]; 812 if (!mask) { 813 pr_err("node_to_cpumask_map[%i] NULL\n", node); 814 dump_stack(); 815 return; 816 } 817 818 if (enable) 819 cpumask_set_cpu(cpu, mask); 820 else 821 cpumask_clear_cpu(cpu, mask); 822 823 printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n", 824 enable ? "numa_add_cpu" : "numa_remove_cpu", 825 cpu, node, cpumask_pr_args(mask)); 826 return; 827 } 828 829 # ifndef CONFIG_NUMA_EMU 830 static void numa_set_cpumask(int cpu, bool enable) 831 { 832 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); 833 } 834 835 void numa_add_cpu(int cpu) 836 { 837 numa_set_cpumask(cpu, true); 838 } 839 840 void numa_remove_cpu(int cpu) 841 { 842 numa_set_cpumask(cpu, false); 843 } 844 # endif /* !CONFIG_NUMA_EMU */ 845 846 /* 847 * Returns a pointer to the bitmask of CPUs on Node 'node'. 848 */ 849 const struct cpumask *cpumask_of_node(int node) 850 { 851 if (node >= nr_node_ids) { 852 printk(KERN_WARNING 853 "cpumask_of_node(%d): node > nr_node_ids(%d)\n", 854 node, nr_node_ids); 855 dump_stack(); 856 return cpu_none_mask; 857 } 858 if (node_to_cpumask_map[node] == NULL) { 859 printk(KERN_WARNING 860 "cpumask_of_node(%d): no node_to_cpumask_map!\n", 861 node); 862 dump_stack(); 863 return cpu_online_mask; 864 } 865 return node_to_cpumask_map[node]; 866 } 867 EXPORT_SYMBOL(cpumask_of_node); 868 869 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 870 871 #ifdef CONFIG_MEMORY_HOTPLUG 872 int memory_add_physaddr_to_nid(u64 start) 873 { 874 struct numa_meminfo *mi = &numa_meminfo; 875 int nid = mi->blk[0].nid; 876 int i; 877 878 for (i = 0; i < mi->nr_blks; i++) 879 if (mi->blk[i].start <= start && mi->blk[i].end > start) 880 nid = mi->blk[i].nid; 881 return nid; 882 } 883 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); 884 #endif 885