1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * pSeries NUMA support 4 * 5 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM 6 */ 7 #define pr_fmt(fmt) "numa: " fmt 8 9 #include <linux/threads.h> 10 #include <linux/memblock.h> 11 #include <linux/init.h> 12 #include <linux/mm.h> 13 #include <linux/mmzone.h> 14 #include <linux/export.h> 15 #include <linux/nodemask.h> 16 #include <linux/cpu.h> 17 #include <linux/notifier.h> 18 #include <linux/of.h> 19 #include <linux/of_address.h> 20 #include <linux/pfn.h> 21 #include <linux/cpuset.h> 22 #include <linux/node.h> 23 #include <linux/stop_machine.h> 24 #include <linux/proc_fs.h> 25 #include <linux/seq_file.h> 26 #include <linux/uaccess.h> 27 #include <linux/slab.h> 28 #include <asm/cputhreads.h> 29 #include <asm/sparsemem.h> 30 #include <asm/smp.h> 31 #include <asm/topology.h> 32 #include <asm/firmware.h> 33 #include <asm/paca.h> 34 #include <asm/hvcall.h> 35 #include <asm/setup.h> 36 #include <asm/vdso.h> 37 #include <asm/drmem.h> 38 39 static int numa_enabled = 1; 40 41 static char *cmdline __initdata; 42 43 int numa_cpu_lookup_table[NR_CPUS]; 44 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 45 struct pglist_data *node_data[MAX_NUMNODES]; 46 47 EXPORT_SYMBOL(numa_cpu_lookup_table); 48 EXPORT_SYMBOL(node_to_cpumask_map); 49 EXPORT_SYMBOL(node_data); 50 51 static int primary_domain_index; 52 static int n_mem_addr_cells, n_mem_size_cells; 53 54 #define FORM0_AFFINITY 0 55 #define FORM1_AFFINITY 1 56 #define FORM2_AFFINITY 2 57 static int affinity_form; 58 59 #define MAX_DISTANCE_REF_POINTS 4 60 static int distance_ref_points_depth; 61 static const __be32 *distance_ref_points; 62 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS]; 63 static int numa_distance_table[MAX_NUMNODES][MAX_NUMNODES] = { 64 [0 ... MAX_NUMNODES - 1] = { [0 ... MAX_NUMNODES - 1] = -1 } 65 }; 66 static int numa_id_index_table[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = NUMA_NO_NODE }; 67 68 /* 69 * Allocate node_to_cpumask_map based on number of available nodes 70 * Requires node_possible_map to be valid. 71 * 72 * Note: cpumask_of_node() is not valid until after this is done. 73 */ 74 static void __init setup_node_to_cpumask_map(void) 75 { 76 unsigned int node; 77 78 /* setup nr_node_ids if not done yet */ 79 if (nr_node_ids == MAX_NUMNODES) 80 setup_nr_node_ids(); 81 82 /* allocate the map */ 83 for_each_node(node) 84 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); 85 86 /* cpumask_of_node() will now work */ 87 pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); 88 } 89 90 static int __init fake_numa_create_new_node(unsigned long end_pfn, 91 unsigned int *nid) 92 { 93 unsigned long long mem; 94 char *p = cmdline; 95 static unsigned int fake_nid; 96 static unsigned long long curr_boundary; 97 98 /* 99 * Modify node id, iff we started creating NUMA nodes 100 * We want to continue from where we left of the last time 101 */ 102 if (fake_nid) 103 *nid = fake_nid; 104 /* 105 * In case there are no more arguments to parse, the 106 * node_id should be the same as the last fake node id 107 * (we've handled this above). 108 */ 109 if (!p) 110 return 0; 111 112 mem = memparse(p, &p); 113 if (!mem) 114 return 0; 115 116 if (mem < curr_boundary) 117 return 0; 118 119 curr_boundary = mem; 120 121 if ((end_pfn << PAGE_SHIFT) > mem) { 122 /* 123 * Skip commas and spaces 124 */ 125 while (*p == ',' || *p == ' ' || *p == '\t') 126 p++; 127 128 cmdline = p; 129 fake_nid++; 130 *nid = fake_nid; 131 pr_debug("created new fake_node with id %d\n", fake_nid); 132 return 1; 133 } 134 return 0; 135 } 136 137 static void __init reset_numa_cpu_lookup_table(void) 138 { 139 unsigned int cpu; 140 141 for_each_possible_cpu(cpu) 142 numa_cpu_lookup_table[cpu] = -1; 143 } 144 145 void map_cpu_to_node(int cpu, int node) 146 { 147 update_numa_cpu_lookup_table(cpu, node); 148 149 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) { 150 pr_debug("adding cpu %d to node %d\n", cpu, node); 151 cpumask_set_cpu(cpu, node_to_cpumask_map[node]); 152 } 153 } 154 155 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR) 156 void unmap_cpu_from_node(unsigned long cpu) 157 { 158 int node = numa_cpu_lookup_table[cpu]; 159 160 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { 161 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); 162 pr_debug("removing cpu %lu from node %d\n", cpu, node); 163 } else { 164 pr_warn("Warning: cpu %lu not found in node %d\n", cpu, node); 165 } 166 } 167 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */ 168 169 static int __associativity_to_nid(const __be32 *associativity, 170 int max_array_sz) 171 { 172 int nid; 173 /* 174 * primary_domain_index is 1 based array index. 175 */ 176 int index = primary_domain_index - 1; 177 178 if (!numa_enabled || index >= max_array_sz) 179 return NUMA_NO_NODE; 180 181 nid = of_read_number(&associativity[index], 1); 182 183 /* POWER4 LPAR uses 0xffff as invalid node */ 184 if (nid == 0xffff || nid >= nr_node_ids) 185 nid = NUMA_NO_NODE; 186 return nid; 187 } 188 /* 189 * Returns nid in the range [0..nr_node_ids], or -1 if no useful NUMA 190 * info is found. 191 */ 192 static int associativity_to_nid(const __be32 *associativity) 193 { 194 int array_sz = of_read_number(associativity, 1); 195 196 /* Skip the first element in the associativity array */ 197 return __associativity_to_nid((associativity + 1), array_sz); 198 } 199 200 static int __cpu_form2_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) 201 { 202 int dist; 203 int node1, node2; 204 205 node1 = associativity_to_nid(cpu1_assoc); 206 node2 = associativity_to_nid(cpu2_assoc); 207 208 dist = numa_distance_table[node1][node2]; 209 if (dist <= LOCAL_DISTANCE) 210 return 0; 211 else if (dist <= REMOTE_DISTANCE) 212 return 1; 213 else 214 return 2; 215 } 216 217 static int __cpu_form1_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) 218 { 219 int dist = 0; 220 221 int i, index; 222 223 for (i = 0; i < distance_ref_points_depth; i++) { 224 index = be32_to_cpu(distance_ref_points[i]); 225 if (cpu1_assoc[index] == cpu2_assoc[index]) 226 break; 227 dist++; 228 } 229 230 return dist; 231 } 232 233 int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) 234 { 235 /* We should not get called with FORM0 */ 236 VM_WARN_ON(affinity_form == FORM0_AFFINITY); 237 if (affinity_form == FORM1_AFFINITY) 238 return __cpu_form1_relative_distance(cpu1_assoc, cpu2_assoc); 239 return __cpu_form2_relative_distance(cpu1_assoc, cpu2_assoc); 240 } 241 242 /* must hold reference to node during call */ 243 static const __be32 *of_get_associativity(struct device_node *dev) 244 { 245 return of_get_property(dev, "ibm,associativity", NULL); 246 } 247 248 int __node_distance(int a, int b) 249 { 250 int i; 251 int distance = LOCAL_DISTANCE; 252 253 if (affinity_form == FORM2_AFFINITY) 254 return numa_distance_table[a][b]; 255 else if (affinity_form == FORM0_AFFINITY) 256 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE); 257 258 for (i = 0; i < distance_ref_points_depth; i++) { 259 if (distance_lookup_table[a][i] == distance_lookup_table[b][i]) 260 break; 261 262 /* Double the distance for each NUMA level */ 263 distance *= 2; 264 } 265 266 return distance; 267 } 268 EXPORT_SYMBOL(__node_distance); 269 270 /* Returns the nid associated with the given device tree node, 271 * or -1 if not found. 272 */ 273 static int of_node_to_nid_single(struct device_node *device) 274 { 275 int nid = NUMA_NO_NODE; 276 const __be32 *tmp; 277 278 tmp = of_get_associativity(device); 279 if (tmp) 280 nid = associativity_to_nid(tmp); 281 return nid; 282 } 283 284 /* Walk the device tree upwards, looking for an associativity id */ 285 int of_node_to_nid(struct device_node *device) 286 { 287 int nid = NUMA_NO_NODE; 288 289 of_node_get(device); 290 while (device) { 291 nid = of_node_to_nid_single(device); 292 if (nid != -1) 293 break; 294 295 device = of_get_next_parent(device); 296 } 297 of_node_put(device); 298 299 return nid; 300 } 301 EXPORT_SYMBOL(of_node_to_nid); 302 303 static void __initialize_form1_numa_distance(const __be32 *associativity, 304 int max_array_sz) 305 { 306 int i, nid; 307 308 if (affinity_form != FORM1_AFFINITY) 309 return; 310 311 nid = __associativity_to_nid(associativity, max_array_sz); 312 if (nid != NUMA_NO_NODE) { 313 for (i = 0; i < distance_ref_points_depth; i++) { 314 const __be32 *entry; 315 int index = be32_to_cpu(distance_ref_points[i]) - 1; 316 317 /* 318 * broken hierarchy, return with broken distance table 319 */ 320 if (WARN(index >= max_array_sz, "Broken ibm,associativity property")) 321 return; 322 323 entry = &associativity[index]; 324 distance_lookup_table[nid][i] = of_read_number(entry, 1); 325 } 326 } 327 } 328 329 static void initialize_form1_numa_distance(const __be32 *associativity) 330 { 331 int array_sz; 332 333 array_sz = of_read_number(associativity, 1); 334 /* Skip the first element in the associativity array */ 335 __initialize_form1_numa_distance(associativity + 1, array_sz); 336 } 337 338 /* 339 * Used to update distance information w.r.t newly added node. 340 */ 341 void update_numa_distance(struct device_node *node) 342 { 343 int nid; 344 345 if (affinity_form == FORM0_AFFINITY) 346 return; 347 else if (affinity_form == FORM1_AFFINITY) { 348 const __be32 *associativity; 349 350 associativity = of_get_associativity(node); 351 if (!associativity) 352 return; 353 354 initialize_form1_numa_distance(associativity); 355 return; 356 } 357 358 /* FORM2 affinity */ 359 nid = of_node_to_nid_single(node); 360 if (nid == NUMA_NO_NODE) 361 return; 362 363 /* 364 * With FORM2 we expect NUMA distance of all possible NUMA 365 * nodes to be provided during boot. 366 */ 367 WARN(numa_distance_table[nid][nid] == -1, 368 "NUMA distance details for node %d not provided\n", nid); 369 } 370 EXPORT_SYMBOL_GPL(update_numa_distance); 371 372 /* 373 * ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN} 374 * ibm,numa-distance-table = { N, 1, 2, 4, 5, 1, 6, .... N elements} 375 */ 376 static void __init initialize_form2_numa_distance_lookup_table(void) 377 { 378 int i, j; 379 struct device_node *root; 380 const __u8 *form2_distances; 381 const __be32 *numa_lookup_index; 382 int form2_distances_length; 383 int max_numa_index, distance_index; 384 385 if (firmware_has_feature(FW_FEATURE_OPAL)) 386 root = of_find_node_by_path("/ibm,opal"); 387 else 388 root = of_find_node_by_path("/rtas"); 389 if (!root) 390 root = of_find_node_by_path("/"); 391 392 numa_lookup_index = of_get_property(root, "ibm,numa-lookup-index-table", NULL); 393 max_numa_index = of_read_number(&numa_lookup_index[0], 1); 394 395 /* first element of the array is the size and is encode-int */ 396 form2_distances = of_get_property(root, "ibm,numa-distance-table", NULL); 397 form2_distances_length = of_read_number((const __be32 *)&form2_distances[0], 1); 398 /* Skip the size which is encoded int */ 399 form2_distances += sizeof(__be32); 400 401 pr_debug("form2_distances_len = %d, numa_dist_indexes_len = %d\n", 402 form2_distances_length, max_numa_index); 403 404 for (i = 0; i < max_numa_index; i++) 405 /* +1 skip the max_numa_index in the property */ 406 numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1); 407 408 409 if (form2_distances_length != max_numa_index * max_numa_index) { 410 WARN(1, "Wrong NUMA distance information\n"); 411 form2_distances = NULL; // don't use it 412 } 413 distance_index = 0; 414 for (i = 0; i < max_numa_index; i++) { 415 for (j = 0; j < max_numa_index; j++) { 416 int nodeA = numa_id_index_table[i]; 417 int nodeB = numa_id_index_table[j]; 418 int dist; 419 420 if (form2_distances) 421 dist = form2_distances[distance_index++]; 422 else if (nodeA == nodeB) 423 dist = LOCAL_DISTANCE; 424 else 425 dist = REMOTE_DISTANCE; 426 numa_distance_table[nodeA][nodeB] = dist; 427 pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, dist); 428 } 429 } 430 431 of_node_put(root); 432 } 433 434 static int __init find_primary_domain_index(void) 435 { 436 int index; 437 struct device_node *root; 438 439 /* 440 * Check for which form of affinity. 441 */ 442 if (firmware_has_feature(FW_FEATURE_OPAL)) { 443 affinity_form = FORM1_AFFINITY; 444 } else if (firmware_has_feature(FW_FEATURE_FORM2_AFFINITY)) { 445 pr_debug("Using form 2 affinity\n"); 446 affinity_form = FORM2_AFFINITY; 447 } else if (firmware_has_feature(FW_FEATURE_FORM1_AFFINITY)) { 448 pr_debug("Using form 1 affinity\n"); 449 affinity_form = FORM1_AFFINITY; 450 } else 451 affinity_form = FORM0_AFFINITY; 452 453 if (firmware_has_feature(FW_FEATURE_OPAL)) 454 root = of_find_node_by_path("/ibm,opal"); 455 else 456 root = of_find_node_by_path("/rtas"); 457 if (!root) 458 root = of_find_node_by_path("/"); 459 460 /* 461 * This property is a set of 32-bit integers, each representing 462 * an index into the ibm,associativity nodes. 463 * 464 * With form 0 affinity the first integer is for an SMP configuration 465 * (should be all 0's) and the second is for a normal NUMA 466 * configuration. We have only one level of NUMA. 467 * 468 * With form 1 affinity the first integer is the most significant 469 * NUMA boundary and the following are progressively less significant 470 * boundaries. There can be more than one level of NUMA. 471 */ 472 distance_ref_points = of_get_property(root, 473 "ibm,associativity-reference-points", 474 &distance_ref_points_depth); 475 476 if (!distance_ref_points) { 477 pr_debug("ibm,associativity-reference-points not found.\n"); 478 goto err; 479 } 480 481 distance_ref_points_depth /= sizeof(int); 482 if (affinity_form == FORM0_AFFINITY) { 483 if (distance_ref_points_depth < 2) { 484 pr_warn("short ibm,associativity-reference-points\n"); 485 goto err; 486 } 487 488 index = of_read_number(&distance_ref_points[1], 1); 489 } else { 490 /* 491 * Both FORM1 and FORM2 affinity find the primary domain details 492 * at the same offset. 493 */ 494 index = of_read_number(distance_ref_points, 1); 495 } 496 /* 497 * Warn and cap if the hardware supports more than 498 * MAX_DISTANCE_REF_POINTS domains. 499 */ 500 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) { 501 pr_warn("distance array capped at %d entries\n", 502 MAX_DISTANCE_REF_POINTS); 503 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS; 504 } 505 506 of_node_put(root); 507 return index; 508 509 err: 510 of_node_put(root); 511 return -1; 512 } 513 514 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) 515 { 516 struct device_node *memory = NULL; 517 518 memory = of_find_node_by_type(memory, "memory"); 519 if (!memory) 520 panic("numa.c: No memory nodes found!"); 521 522 *n_addr_cells = of_n_addr_cells(memory); 523 *n_size_cells = of_n_size_cells(memory); 524 of_node_put(memory); 525 } 526 527 static unsigned long read_n_cells(int n, const __be32 **buf) 528 { 529 unsigned long result = 0; 530 531 while (n--) { 532 result = (result << 32) | of_read_number(*buf, 1); 533 (*buf)++; 534 } 535 return result; 536 } 537 538 struct assoc_arrays { 539 u32 n_arrays; 540 u32 array_sz; 541 const __be32 *arrays; 542 }; 543 544 /* 545 * Retrieve and validate the list of associativity arrays for drconf 546 * memory from the ibm,associativity-lookup-arrays property of the 547 * device tree.. 548 * 549 * The layout of the ibm,associativity-lookup-arrays property is a number N 550 * indicating the number of associativity arrays, followed by a number M 551 * indicating the size of each associativity array, followed by a list 552 * of N associativity arrays. 553 */ 554 static int of_get_assoc_arrays(struct assoc_arrays *aa) 555 { 556 struct device_node *memory; 557 const __be32 *prop; 558 u32 len; 559 560 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 561 if (!memory) 562 return -1; 563 564 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len); 565 if (!prop || len < 2 * sizeof(unsigned int)) { 566 of_node_put(memory); 567 return -1; 568 } 569 570 aa->n_arrays = of_read_number(prop++, 1); 571 aa->array_sz = of_read_number(prop++, 1); 572 573 of_node_put(memory); 574 575 /* Now that we know the number of arrays and size of each array, 576 * revalidate the size of the property read in. 577 */ 578 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int)) 579 return -1; 580 581 aa->arrays = prop; 582 return 0; 583 } 584 585 static int __init get_nid_and_numa_distance(struct drmem_lmb *lmb) 586 { 587 struct assoc_arrays aa = { .arrays = NULL }; 588 int default_nid = NUMA_NO_NODE; 589 int nid = default_nid; 590 int rc, index; 591 592 if ((primary_domain_index < 0) || !numa_enabled) 593 return default_nid; 594 595 rc = of_get_assoc_arrays(&aa); 596 if (rc) 597 return default_nid; 598 599 if (primary_domain_index <= aa.array_sz && 600 !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) { 601 const __be32 *associativity; 602 603 index = lmb->aa_index * aa.array_sz; 604 associativity = &aa.arrays[index]; 605 nid = __associativity_to_nid(associativity, aa.array_sz); 606 if (nid > 0 && affinity_form == FORM1_AFFINITY) { 607 /* 608 * lookup array associativity entries have 609 * no length of the array as the first element. 610 */ 611 __initialize_form1_numa_distance(associativity, aa.array_sz); 612 } 613 } 614 return nid; 615 } 616 617 /* 618 * This is like of_node_to_nid_single() for memory represented in the 619 * ibm,dynamic-reconfiguration-memory node. 620 */ 621 int of_drconf_to_nid_single(struct drmem_lmb *lmb) 622 { 623 struct assoc_arrays aa = { .arrays = NULL }; 624 int default_nid = NUMA_NO_NODE; 625 int nid = default_nid; 626 int rc, index; 627 628 if ((primary_domain_index < 0) || !numa_enabled) 629 return default_nid; 630 631 rc = of_get_assoc_arrays(&aa); 632 if (rc) 633 return default_nid; 634 635 if (primary_domain_index <= aa.array_sz && 636 !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) { 637 const __be32 *associativity; 638 639 index = lmb->aa_index * aa.array_sz; 640 associativity = &aa.arrays[index]; 641 nid = __associativity_to_nid(associativity, aa.array_sz); 642 } 643 return nid; 644 } 645 646 #ifdef CONFIG_PPC_SPLPAR 647 648 static int __vphn_get_associativity(long lcpu, __be32 *associativity) 649 { 650 long rc, hwid; 651 652 /* 653 * On a shared lpar, device tree will not have node associativity. 654 * At this time lppaca, or its __old_status field may not be 655 * updated. Hence kernel cannot detect if its on a shared lpar. So 656 * request an explicit associativity irrespective of whether the 657 * lpar is shared or dedicated. Use the device tree property as a 658 * fallback. cpu_to_phys_id is only valid between 659 * smp_setup_cpu_maps() and smp_setup_pacas(). 660 */ 661 if (firmware_has_feature(FW_FEATURE_VPHN)) { 662 if (cpu_to_phys_id) 663 hwid = cpu_to_phys_id[lcpu]; 664 else 665 hwid = get_hard_smp_processor_id(lcpu); 666 667 rc = hcall_vphn(hwid, VPHN_FLAG_VCPU, associativity); 668 if (rc == H_SUCCESS) 669 return 0; 670 } 671 672 return -1; 673 } 674 675 static int vphn_get_nid(long lcpu) 676 { 677 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; 678 679 680 if (!__vphn_get_associativity(lcpu, associativity)) 681 return associativity_to_nid(associativity); 682 683 return NUMA_NO_NODE; 684 685 } 686 #else 687 688 static int __vphn_get_associativity(long lcpu, __be32 *associativity) 689 { 690 return -1; 691 } 692 693 static int vphn_get_nid(long unused) 694 { 695 return NUMA_NO_NODE; 696 } 697 #endif /* CONFIG_PPC_SPLPAR */ 698 699 /* 700 * Figure out to which domain a cpu belongs and stick it there. 701 * Return the id of the domain used. 702 */ 703 static int numa_setup_cpu(unsigned long lcpu) 704 { 705 struct device_node *cpu; 706 int fcpu = cpu_first_thread_sibling(lcpu); 707 int nid = NUMA_NO_NODE; 708 709 if (!cpu_present(lcpu)) { 710 set_cpu_numa_node(lcpu, first_online_node); 711 return first_online_node; 712 } 713 714 /* 715 * If a valid cpu-to-node mapping is already available, use it 716 * directly instead of querying the firmware, since it represents 717 * the most recent mapping notified to us by the platform (eg: VPHN). 718 * Since cpu_to_node binding remains the same for all threads in the 719 * core. If a valid cpu-to-node mapping is already available, for 720 * the first thread in the core, use it. 721 */ 722 nid = numa_cpu_lookup_table[fcpu]; 723 if (nid >= 0) { 724 map_cpu_to_node(lcpu, nid); 725 return nid; 726 } 727 728 nid = vphn_get_nid(lcpu); 729 if (nid != NUMA_NO_NODE) 730 goto out_present; 731 732 cpu = of_get_cpu_node(lcpu, NULL); 733 734 if (!cpu) { 735 WARN_ON(1); 736 if (cpu_present(lcpu)) 737 goto out_present; 738 else 739 goto out; 740 } 741 742 nid = of_node_to_nid_single(cpu); 743 of_node_put(cpu); 744 745 out_present: 746 if (nid < 0 || !node_possible(nid)) 747 nid = first_online_node; 748 749 /* 750 * Update for the first thread of the core. All threads of a core 751 * have to be part of the same node. This not only avoids querying 752 * for every other thread in the core, but always avoids a case 753 * where virtual node associativity change causes subsequent threads 754 * of a core to be associated with different nid. However if first 755 * thread is already online, expect it to have a valid mapping. 756 */ 757 if (fcpu != lcpu) { 758 WARN_ON(cpu_online(fcpu)); 759 map_cpu_to_node(fcpu, nid); 760 } 761 762 map_cpu_to_node(lcpu, nid); 763 out: 764 return nid; 765 } 766 767 static void verify_cpu_node_mapping(int cpu, int node) 768 { 769 int base, sibling, i; 770 771 /* Verify that all the threads in the core belong to the same node */ 772 base = cpu_first_thread_sibling(cpu); 773 774 for (i = 0; i < threads_per_core; i++) { 775 sibling = base + i; 776 777 if (sibling == cpu || cpu_is_offline(sibling)) 778 continue; 779 780 if (cpu_to_node(sibling) != node) { 781 WARN(1, "CPU thread siblings %d and %d don't belong" 782 " to the same node!\n", cpu, sibling); 783 break; 784 } 785 } 786 } 787 788 /* Must run before sched domains notifier. */ 789 static int ppc_numa_cpu_prepare(unsigned int cpu) 790 { 791 int nid; 792 793 nid = numa_setup_cpu(cpu); 794 verify_cpu_node_mapping(cpu, nid); 795 return 0; 796 } 797 798 static int ppc_numa_cpu_dead(unsigned int cpu) 799 { 800 return 0; 801 } 802 803 /* 804 * Check and possibly modify a memory region to enforce the memory limit. 805 * 806 * Returns the size the region should have to enforce the memory limit. 807 * This will either be the original value of size, a truncated value, 808 * or zero. If the returned value of size is 0 the region should be 809 * discarded as it lies wholly above the memory limit. 810 */ 811 static unsigned long __init numa_enforce_memory_limit(unsigned long start, 812 unsigned long size) 813 { 814 /* 815 * We use memblock_end_of_DRAM() in here instead of memory_limit because 816 * we've already adjusted it for the limit and it takes care of 817 * having memory holes below the limit. Also, in the case of 818 * iommu_is_off, memory_limit is not set but is implicitly enforced. 819 */ 820 821 if (start + size <= memblock_end_of_DRAM()) 822 return size; 823 824 if (start >= memblock_end_of_DRAM()) 825 return 0; 826 827 return memblock_end_of_DRAM() - start; 828 } 829 830 /* 831 * Reads the counter for a given entry in 832 * linux,drconf-usable-memory property 833 */ 834 static inline int __init read_usm_ranges(const __be32 **usm) 835 { 836 /* 837 * For each lmb in ibm,dynamic-memory a corresponding 838 * entry in linux,drconf-usable-memory property contains 839 * a counter followed by that many (base, size) duple. 840 * read the counter from linux,drconf-usable-memory 841 */ 842 return read_n_cells(n_mem_size_cells, usm); 843 } 844 845 /* 846 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory 847 * node. This assumes n_mem_{addr,size}_cells have been set. 848 */ 849 static int __init numa_setup_drmem_lmb(struct drmem_lmb *lmb, 850 const __be32 **usm, 851 void *data) 852 { 853 unsigned int ranges, is_kexec_kdump = 0; 854 unsigned long base, size, sz; 855 int nid; 856 857 /* 858 * Skip this block if the reserved bit is set in flags (0x80) 859 * or if the block is not assigned to this partition (0x8) 860 */ 861 if ((lmb->flags & DRCONF_MEM_RESERVED) 862 || !(lmb->flags & DRCONF_MEM_ASSIGNED)) 863 return 0; 864 865 if (*usm) 866 is_kexec_kdump = 1; 867 868 base = lmb->base_addr; 869 size = drmem_lmb_size(); 870 ranges = 1; 871 872 if (is_kexec_kdump) { 873 ranges = read_usm_ranges(usm); 874 if (!ranges) /* there are no (base, size) duple */ 875 return 0; 876 } 877 878 do { 879 if (is_kexec_kdump) { 880 base = read_n_cells(n_mem_addr_cells, usm); 881 size = read_n_cells(n_mem_size_cells, usm); 882 } 883 884 nid = get_nid_and_numa_distance(lmb); 885 fake_numa_create_new_node(((base + size) >> PAGE_SHIFT), 886 &nid); 887 node_set_online(nid); 888 sz = numa_enforce_memory_limit(base, size); 889 if (sz) 890 memblock_set_node(base, sz, &memblock.memory, nid); 891 } while (--ranges); 892 893 return 0; 894 } 895 896 static int __init parse_numa_properties(void) 897 { 898 struct device_node *memory; 899 int default_nid = 0; 900 unsigned long i; 901 const __be32 *associativity; 902 903 if (numa_enabled == 0) { 904 pr_warn("disabled by user\n"); 905 return -1; 906 } 907 908 primary_domain_index = find_primary_domain_index(); 909 910 if (primary_domain_index < 0) { 911 /* 912 * if we fail to parse primary_domain_index from device tree 913 * mark the numa disabled, boot with numa disabled. 914 */ 915 numa_enabled = false; 916 return primary_domain_index; 917 } 918 919 pr_debug("associativity depth for CPU/Memory: %d\n", primary_domain_index); 920 921 /* 922 * If it is FORM2 initialize the distance table here. 923 */ 924 if (affinity_form == FORM2_AFFINITY) 925 initialize_form2_numa_distance_lookup_table(); 926 927 /* 928 * Even though we connect cpus to numa domains later in SMP 929 * init, we need to know the node ids now. This is because 930 * each node to be onlined must have NODE_DATA etc backing it. 931 */ 932 for_each_present_cpu(i) { 933 __be32 vphn_assoc[VPHN_ASSOC_BUFSIZE]; 934 struct device_node *cpu; 935 int nid = NUMA_NO_NODE; 936 937 memset(vphn_assoc, 0, VPHN_ASSOC_BUFSIZE * sizeof(__be32)); 938 939 if (__vphn_get_associativity(i, vphn_assoc) == 0) { 940 nid = associativity_to_nid(vphn_assoc); 941 initialize_form1_numa_distance(vphn_assoc); 942 } else { 943 944 /* 945 * Don't fall back to default_nid yet -- we will plug 946 * cpus into nodes once the memory scan has discovered 947 * the topology. 948 */ 949 cpu = of_get_cpu_node(i, NULL); 950 BUG_ON(!cpu); 951 952 associativity = of_get_associativity(cpu); 953 if (associativity) { 954 nid = associativity_to_nid(associativity); 955 initialize_form1_numa_distance(associativity); 956 } 957 of_node_put(cpu); 958 } 959 960 /* node_set_online() is an UB if 'nid' is negative */ 961 if (likely(nid >= 0)) 962 node_set_online(nid); 963 } 964 965 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); 966 967 for_each_node_by_type(memory, "memory") { 968 unsigned long start; 969 unsigned long size; 970 int nid; 971 int ranges; 972 const __be32 *memcell_buf; 973 unsigned int len; 974 975 memcell_buf = of_get_property(memory, 976 "linux,usable-memory", &len); 977 if (!memcell_buf || len <= 0) 978 memcell_buf = of_get_property(memory, "reg", &len); 979 if (!memcell_buf || len <= 0) 980 continue; 981 982 /* ranges in cell */ 983 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); 984 new_range: 985 /* these are order-sensitive, and modify the buffer pointer */ 986 start = read_n_cells(n_mem_addr_cells, &memcell_buf); 987 size = read_n_cells(n_mem_size_cells, &memcell_buf); 988 989 /* 990 * Assumption: either all memory nodes or none will 991 * have associativity properties. If none, then 992 * everything goes to default_nid. 993 */ 994 associativity = of_get_associativity(memory); 995 if (associativity) { 996 nid = associativity_to_nid(associativity); 997 initialize_form1_numa_distance(associativity); 998 } else 999 nid = default_nid; 1000 1001 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid); 1002 node_set_online(nid); 1003 1004 size = numa_enforce_memory_limit(start, size); 1005 if (size) 1006 memblock_set_node(start, size, &memblock.memory, nid); 1007 1008 if (--ranges) 1009 goto new_range; 1010 } 1011 1012 /* 1013 * Now do the same thing for each MEMBLOCK listed in the 1014 * ibm,dynamic-memory property in the 1015 * ibm,dynamic-reconfiguration-memory node. 1016 */ 1017 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 1018 if (memory) { 1019 walk_drmem_lmbs(memory, NULL, numa_setup_drmem_lmb); 1020 of_node_put(memory); 1021 } 1022 1023 return 0; 1024 } 1025 1026 static void __init setup_nonnuma(void) 1027 { 1028 unsigned long top_of_ram = memblock_end_of_DRAM(); 1029 unsigned long total_ram = memblock_phys_mem_size(); 1030 unsigned long start_pfn, end_pfn; 1031 unsigned int nid = 0; 1032 int i; 1033 1034 pr_debug("Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); 1035 pr_debug("Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); 1036 1037 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { 1038 fake_numa_create_new_node(end_pfn, &nid); 1039 memblock_set_node(PFN_PHYS(start_pfn), 1040 PFN_PHYS(end_pfn - start_pfn), 1041 &memblock.memory, nid); 1042 node_set_online(nid); 1043 } 1044 } 1045 1046 void __init dump_numa_cpu_topology(void) 1047 { 1048 unsigned int node; 1049 unsigned int cpu, count; 1050 1051 if (!numa_enabled) 1052 return; 1053 1054 for_each_online_node(node) { 1055 pr_info("Node %d CPUs:", node); 1056 1057 count = 0; 1058 /* 1059 * If we used a CPU iterator here we would miss printing 1060 * the holes in the cpumap. 1061 */ 1062 for (cpu = 0; cpu < nr_cpu_ids; cpu++) { 1063 if (cpumask_test_cpu(cpu, 1064 node_to_cpumask_map[node])) { 1065 if (count == 0) 1066 pr_cont(" %u", cpu); 1067 ++count; 1068 } else { 1069 if (count > 1) 1070 pr_cont("-%u", cpu - 1); 1071 count = 0; 1072 } 1073 } 1074 1075 if (count > 1) 1076 pr_cont("-%u", nr_cpu_ids - 1); 1077 pr_cont("\n"); 1078 } 1079 } 1080 1081 /* Initialize NODE_DATA for a node on the local memory */ 1082 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) 1083 { 1084 u64 spanned_pages = end_pfn - start_pfn; 1085 const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); 1086 u64 nd_pa; 1087 void *nd; 1088 int tnid; 1089 1090 nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); 1091 if (!nd_pa) 1092 panic("Cannot allocate %zu bytes for node %d data\n", 1093 nd_size, nid); 1094 1095 nd = __va(nd_pa); 1096 1097 /* report and initialize */ 1098 pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n", 1099 nd_pa, nd_pa + nd_size - 1); 1100 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); 1101 if (tnid != nid) 1102 pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid); 1103 1104 node_data[nid] = nd; 1105 memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); 1106 NODE_DATA(nid)->node_id = nid; 1107 NODE_DATA(nid)->node_start_pfn = start_pfn; 1108 NODE_DATA(nid)->node_spanned_pages = spanned_pages; 1109 } 1110 1111 static void __init find_possible_nodes(void) 1112 { 1113 struct device_node *rtas; 1114 const __be32 *domains = NULL; 1115 int prop_length, max_nodes; 1116 u32 i; 1117 1118 if (!numa_enabled) 1119 return; 1120 1121 rtas = of_find_node_by_path("/rtas"); 1122 if (!rtas) 1123 return; 1124 1125 /* 1126 * ibm,current-associativity-domains is a fairly recent property. If 1127 * it doesn't exist, then fallback on ibm,max-associativity-domains. 1128 * Current denotes what the platform can support compared to max 1129 * which denotes what the Hypervisor can support. 1130 * 1131 * If the LPAR is migratable, new nodes might be activated after a LPM, 1132 * so we should consider the max number in that case. 1133 */ 1134 if (!of_get_property(of_root, "ibm,migratable-partition", NULL)) 1135 domains = of_get_property(rtas, 1136 "ibm,current-associativity-domains", 1137 &prop_length); 1138 if (!domains) { 1139 domains = of_get_property(rtas, "ibm,max-associativity-domains", 1140 &prop_length); 1141 if (!domains) 1142 goto out; 1143 } 1144 1145 max_nodes = of_read_number(&domains[primary_domain_index], 1); 1146 pr_info("Partition configured for %d NUMA nodes.\n", max_nodes); 1147 1148 for (i = 0; i < max_nodes; i++) { 1149 if (!node_possible(i)) 1150 node_set(i, node_possible_map); 1151 } 1152 1153 prop_length /= sizeof(int); 1154 if (prop_length > primary_domain_index + 2) 1155 coregroup_enabled = 1; 1156 1157 out: 1158 of_node_put(rtas); 1159 } 1160 1161 void __init mem_topology_setup(void) 1162 { 1163 int cpu; 1164 1165 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 1166 min_low_pfn = MEMORY_START >> PAGE_SHIFT; 1167 1168 /* 1169 * Linux/mm assumes node 0 to be online at boot. However this is not 1170 * true on PowerPC, where node 0 is similar to any other node, it 1171 * could be cpuless, memoryless node. So force node 0 to be offline 1172 * for now. This will prevent cpuless, memoryless node 0 showing up 1173 * unnecessarily as online. If a node has cpus or memory that need 1174 * to be online, then node will anyway be marked online. 1175 */ 1176 node_set_offline(0); 1177 1178 if (parse_numa_properties()) 1179 setup_nonnuma(); 1180 1181 /* 1182 * Modify the set of possible NUMA nodes to reflect information 1183 * available about the set of online nodes, and the set of nodes 1184 * that we expect to make use of for this platform's affinity 1185 * calculations. 1186 */ 1187 nodes_and(node_possible_map, node_possible_map, node_online_map); 1188 1189 find_possible_nodes(); 1190 1191 setup_node_to_cpumask_map(); 1192 1193 reset_numa_cpu_lookup_table(); 1194 1195 for_each_possible_cpu(cpu) { 1196 /* 1197 * Powerpc with CONFIG_NUMA always used to have a node 0, 1198 * even if it was memoryless or cpuless. For all cpus that 1199 * are possible but not present, cpu_to_node() would point 1200 * to node 0. To remove a cpuless, memoryless dummy node, 1201 * powerpc need to make sure all possible but not present 1202 * cpu_to_node are set to a proper node. 1203 */ 1204 numa_setup_cpu(cpu); 1205 } 1206 } 1207 1208 void __init initmem_init(void) 1209 { 1210 int nid; 1211 1212 memblock_dump_all(); 1213 1214 for_each_online_node(nid) { 1215 unsigned long start_pfn, end_pfn; 1216 1217 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 1218 setup_node_data(nid, start_pfn, end_pfn); 1219 } 1220 1221 sparse_init(); 1222 1223 /* 1224 * We need the numa_cpu_lookup_table to be accurate for all CPUs, 1225 * even before we online them, so that we can use cpu_to_{node,mem} 1226 * early in boot, cf. smp_prepare_cpus(). 1227 * _nocalls() + manual invocation is used because cpuhp is not yet 1228 * initialized for the boot CPU. 1229 */ 1230 cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare", 1231 ppc_numa_cpu_prepare, ppc_numa_cpu_dead); 1232 } 1233 1234 static int __init early_numa(char *p) 1235 { 1236 if (!p) 1237 return 0; 1238 1239 if (strstr(p, "off")) 1240 numa_enabled = 0; 1241 1242 p = strstr(p, "fake="); 1243 if (p) 1244 cmdline = p + strlen("fake="); 1245 1246 return 0; 1247 } 1248 early_param("numa", early_numa); 1249 1250 #ifdef CONFIG_MEMORY_HOTPLUG 1251 /* 1252 * Find the node associated with a hot added memory section for 1253 * memory represented in the device tree by the property 1254 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory. 1255 */ 1256 static int hot_add_drconf_scn_to_nid(unsigned long scn_addr) 1257 { 1258 struct drmem_lmb *lmb; 1259 unsigned long lmb_size; 1260 int nid = NUMA_NO_NODE; 1261 1262 lmb_size = drmem_lmb_size(); 1263 1264 for_each_drmem_lmb(lmb) { 1265 /* skip this block if it is reserved or not assigned to 1266 * this partition */ 1267 if ((lmb->flags & DRCONF_MEM_RESERVED) 1268 || !(lmb->flags & DRCONF_MEM_ASSIGNED)) 1269 continue; 1270 1271 if ((scn_addr < lmb->base_addr) 1272 || (scn_addr >= (lmb->base_addr + lmb_size))) 1273 continue; 1274 1275 nid = of_drconf_to_nid_single(lmb); 1276 break; 1277 } 1278 1279 return nid; 1280 } 1281 1282 /* 1283 * Find the node associated with a hot added memory section for memory 1284 * represented in the device tree as a node (i.e. memory@XXXX) for 1285 * each memblock. 1286 */ 1287 static int hot_add_node_scn_to_nid(unsigned long scn_addr) 1288 { 1289 struct device_node *memory; 1290 int nid = NUMA_NO_NODE; 1291 1292 for_each_node_by_type(memory, "memory") { 1293 int i = 0; 1294 1295 while (1) { 1296 struct resource res; 1297 1298 if (of_address_to_resource(memory, i++, &res)) 1299 break; 1300 1301 if ((scn_addr < res.start) || (scn_addr > res.end)) 1302 continue; 1303 1304 nid = of_node_to_nid_single(memory); 1305 break; 1306 } 1307 1308 if (nid >= 0) 1309 break; 1310 } 1311 1312 of_node_put(memory); 1313 1314 return nid; 1315 } 1316 1317 /* 1318 * Find the node associated with a hot added memory section. Section 1319 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that 1320 * sections are fully contained within a single MEMBLOCK. 1321 */ 1322 int hot_add_scn_to_nid(unsigned long scn_addr) 1323 { 1324 struct device_node *memory = NULL; 1325 int nid; 1326 1327 if (!numa_enabled) 1328 return first_online_node; 1329 1330 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 1331 if (memory) { 1332 nid = hot_add_drconf_scn_to_nid(scn_addr); 1333 of_node_put(memory); 1334 } else { 1335 nid = hot_add_node_scn_to_nid(scn_addr); 1336 } 1337 1338 if (nid < 0 || !node_possible(nid)) 1339 nid = first_online_node; 1340 1341 return nid; 1342 } 1343 1344 static u64 hot_add_drconf_memory_max(void) 1345 { 1346 struct device_node *memory = NULL; 1347 struct device_node *dn = NULL; 1348 const __be64 *lrdr = NULL; 1349 1350 dn = of_find_node_by_path("/rtas"); 1351 if (dn) { 1352 lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL); 1353 of_node_put(dn); 1354 if (lrdr) 1355 return be64_to_cpup(lrdr); 1356 } 1357 1358 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 1359 if (memory) { 1360 of_node_put(memory); 1361 return drmem_lmb_memory_max(); 1362 } 1363 return 0; 1364 } 1365 1366 /* 1367 * memory_hotplug_max - return max address of memory that may be added 1368 * 1369 * This is currently only used on systems that support drconfig memory 1370 * hotplug. 1371 */ 1372 u64 memory_hotplug_max(void) 1373 { 1374 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM()); 1375 } 1376 #endif /* CONFIG_MEMORY_HOTPLUG */ 1377 1378 /* Virtual Processor Home Node (VPHN) support */ 1379 #ifdef CONFIG_PPC_SPLPAR 1380 static int topology_inited; 1381 1382 /* 1383 * Retrieve the new associativity information for a virtual processor's 1384 * home node. 1385 */ 1386 static long vphn_get_associativity(unsigned long cpu, 1387 __be32 *associativity) 1388 { 1389 long rc; 1390 1391 rc = hcall_vphn(get_hard_smp_processor_id(cpu), 1392 VPHN_FLAG_VCPU, associativity); 1393 1394 switch (rc) { 1395 case H_SUCCESS: 1396 pr_debug("VPHN hcall succeeded. Reset polling...\n"); 1397 goto out; 1398 1399 case H_FUNCTION: 1400 pr_err_ratelimited("VPHN unsupported. Disabling polling...\n"); 1401 break; 1402 case H_HARDWARE: 1403 pr_err_ratelimited("hcall_vphn() experienced a hardware fault " 1404 "preventing VPHN. Disabling polling...\n"); 1405 break; 1406 case H_PARAMETER: 1407 pr_err_ratelimited("hcall_vphn() was passed an invalid parameter. " 1408 "Disabling polling...\n"); 1409 break; 1410 default: 1411 pr_err_ratelimited("hcall_vphn() returned %ld. Disabling polling...\n" 1412 , rc); 1413 break; 1414 } 1415 out: 1416 return rc; 1417 } 1418 1419 void find_and_update_cpu_nid(int cpu) 1420 { 1421 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; 1422 int new_nid; 1423 1424 /* Use associativity from first thread for all siblings */ 1425 if (vphn_get_associativity(cpu, associativity)) 1426 return; 1427 1428 /* Do not have previous associativity, so find it now. */ 1429 new_nid = associativity_to_nid(associativity); 1430 1431 if (new_nid < 0 || !node_possible(new_nid)) 1432 new_nid = first_online_node; 1433 else 1434 // Associate node <-> cpu, so cpu_up() calls 1435 // try_online_node() on the right node. 1436 set_cpu_numa_node(cpu, new_nid); 1437 1438 pr_debug("%s:%d cpu %d nid %d\n", __func__, __LINE__, cpu, new_nid); 1439 } 1440 1441 int cpu_to_coregroup_id(int cpu) 1442 { 1443 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; 1444 int index; 1445 1446 if (cpu < 0 || cpu > nr_cpu_ids) 1447 return -1; 1448 1449 if (!coregroup_enabled) 1450 goto out; 1451 1452 if (!firmware_has_feature(FW_FEATURE_VPHN)) 1453 goto out; 1454 1455 if (vphn_get_associativity(cpu, associativity)) 1456 goto out; 1457 1458 index = of_read_number(associativity, 1); 1459 if (index > primary_domain_index + 1) 1460 return of_read_number(&associativity[index - 1], 1); 1461 1462 out: 1463 return cpu_to_core_id(cpu); 1464 } 1465 1466 static int topology_update_init(void) 1467 { 1468 topology_inited = 1; 1469 return 0; 1470 } 1471 device_initcall(topology_update_init); 1472 #endif /* CONFIG_PPC_SPLPAR */ 1473