1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * acpi_numa.c - ACPI NUMA support 4 * 5 * Copyright (C) 2002 Takayoshi Kochi <t-kochi@bq.jp.nec.com> 6 */ 7 8 #define pr_fmt(fmt) "ACPI: " fmt 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/types.h> 14 #include <linux/errno.h> 15 #include <linux/acpi.h> 16 #include <linux/memblock.h> 17 #include <linux/numa.h> 18 #include <linux/nodemask.h> 19 #include <linux/topology.h> 20 21 static nodemask_t nodes_found_map = NODE_MASK_NONE; 22 23 /* maps to convert between proximity domain and logical node ID */ 24 static int pxm_to_node_map[MAX_PXM_DOMAINS] 25 = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE }; 26 static int node_to_pxm_map[MAX_NUMNODES] 27 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; 28 29 unsigned char acpi_srat_revision __initdata; 30 static int acpi_numa __initdata; 31 32 static int last_real_pxm; 33 34 void __init disable_srat(void) 35 { 36 acpi_numa = -1; 37 } 38 39 int pxm_to_node(int pxm) 40 { 41 if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off) 42 return NUMA_NO_NODE; 43 return pxm_to_node_map[pxm]; 44 } 45 EXPORT_SYMBOL(pxm_to_node); 46 47 int node_to_pxm(int node) 48 { 49 if (node < 0) 50 return PXM_INVAL; 51 return node_to_pxm_map[node]; 52 } 53 54 static void __acpi_map_pxm_to_node(int pxm, int node) 55 { 56 if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm]) 57 pxm_to_node_map[pxm] = node; 58 if (node_to_pxm_map[node] == PXM_INVAL || pxm < node_to_pxm_map[node]) 59 node_to_pxm_map[node] = pxm; 60 } 61 62 int acpi_map_pxm_to_node(int pxm) 63 { 64 int node; 65 66 if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off) 67 return NUMA_NO_NODE; 68 69 node = pxm_to_node_map[pxm]; 70 71 if (node == NUMA_NO_NODE) { 72 node = first_unset_node(nodes_found_map); 73 if (node >= MAX_NUMNODES) 74 return NUMA_NO_NODE; 75 __acpi_map_pxm_to_node(pxm, node); 76 node_set(node, nodes_found_map); 77 } 78 79 return node; 80 } 81 EXPORT_SYMBOL(acpi_map_pxm_to_node); 82 83 static void __init 84 acpi_table_print_srat_entry(struct acpi_subtable_header *header) 85 { 86 switch (header->type) { 87 case ACPI_SRAT_TYPE_CPU_AFFINITY: 88 { 89 struct acpi_srat_cpu_affinity *p = 90 (struct acpi_srat_cpu_affinity *)header; 91 pr_debug("SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n", 92 p->apic_id, p->local_sapic_eid, 93 p->proximity_domain_lo, 94 (p->flags & ACPI_SRAT_CPU_ENABLED) ? 95 "enabled" : "disabled"); 96 } 97 break; 98 99 case ACPI_SRAT_TYPE_MEMORY_AFFINITY: 100 { 101 struct acpi_srat_mem_affinity *p = 102 (struct acpi_srat_mem_affinity *)header; 103 pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n", 104 (unsigned long long)p->base_address, 105 (unsigned long long)p->length, 106 p->proximity_domain, 107 (p->flags & ACPI_SRAT_MEM_ENABLED) ? 108 "enabled" : "disabled", 109 (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ? 110 " hot-pluggable" : "", 111 (p->flags & ACPI_SRAT_MEM_NON_VOLATILE) ? 112 " non-volatile" : ""); 113 } 114 break; 115 116 case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY: 117 { 118 struct acpi_srat_x2apic_cpu_affinity *p = 119 (struct acpi_srat_x2apic_cpu_affinity *)header; 120 pr_debug("SRAT Processor (x2apicid[0x%08x]) in proximity domain %d %s\n", 121 p->apic_id, 122 p->proximity_domain, 123 (p->flags & ACPI_SRAT_CPU_ENABLED) ? 124 "enabled" : "disabled"); 125 } 126 break; 127 128 case ACPI_SRAT_TYPE_GICC_AFFINITY: 129 { 130 struct acpi_srat_gicc_affinity *p = 131 (struct acpi_srat_gicc_affinity *)header; 132 pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n", 133 p->acpi_processor_uid, 134 p->proximity_domain, 135 (p->flags & ACPI_SRAT_GICC_ENABLED) ? 136 "enabled" : "disabled"); 137 } 138 break; 139 140 case ACPI_SRAT_TYPE_GENERIC_AFFINITY: 141 { 142 struct acpi_srat_generic_affinity *p = 143 (struct acpi_srat_generic_affinity *)header; 144 145 if (p->device_handle_type == 0) { 146 /* 147 * For pci devices this may be the only place they 148 * are assigned a proximity domain 149 */ 150 pr_debug("SRAT Generic Initiator(Seg:%u BDF:%u) in proximity domain %d %s\n", 151 *(u16 *)(&p->device_handle[0]), 152 *(u16 *)(&p->device_handle[2]), 153 p->proximity_domain, 154 (p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ? 155 "enabled" : "disabled"); 156 } else { 157 /* 158 * In this case we can rely on the device having a 159 * proximity domain reference 160 */ 161 pr_debug("SRAT Generic Initiator(HID=%.8s UID=%.4s) in proximity domain %d %s\n", 162 (char *)(&p->device_handle[0]), 163 (char *)(&p->device_handle[8]), 164 p->proximity_domain, 165 (p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ? 166 "enabled" : "disabled"); 167 } 168 } 169 break; 170 171 case ACPI_SRAT_TYPE_RINTC_AFFINITY: 172 { 173 struct acpi_srat_rintc_affinity *p = 174 (struct acpi_srat_rintc_affinity *)header; 175 pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n", 176 p->acpi_processor_uid, 177 p->proximity_domain, 178 (p->flags & ACPI_SRAT_RINTC_ENABLED) ? 179 "enabled" : "disabled"); 180 } 181 break; 182 183 default: 184 pr_warn("Found unsupported SRAT entry (type = 0x%x)\n", 185 header->type); 186 break; 187 } 188 } 189 190 /* 191 * A lot of BIOS fill in 10 (= no distance) everywhere. This messes 192 * up the NUMA heuristics which wants the local node to have a smaller 193 * distance than the others. 194 * Do some quick checks here and only use the SLIT if it passes. 195 */ 196 static int __init slit_valid(struct acpi_table_slit *slit) 197 { 198 int i, j; 199 int d = slit->locality_count; 200 for (i = 0; i < d; i++) { 201 for (j = 0; j < d; j++) { 202 u8 val = slit->entry[d*i + j]; 203 if (i == j) { 204 if (val != LOCAL_DISTANCE) 205 return 0; 206 } else if (val <= LOCAL_DISTANCE) 207 return 0; 208 } 209 } 210 return 1; 211 } 212 213 void __init bad_srat(void) 214 { 215 pr_err("SRAT: SRAT not used.\n"); 216 disable_srat(); 217 } 218 219 int __init srat_disabled(void) 220 { 221 return acpi_numa < 0; 222 } 223 224 __weak int __init numa_fill_memblks(u64 start, u64 end) 225 { 226 return NUMA_NO_MEMBLK; 227 } 228 229 /* 230 * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for 231 * I/O localities since SRAT does not list them. I/O localities are 232 * not supported at this point. 233 */ 234 static int __init acpi_parse_slit(struct acpi_table_header *table) 235 { 236 struct acpi_table_slit *slit = (struct acpi_table_slit *)table; 237 int i, j; 238 239 if (!slit_valid(slit)) { 240 pr_info("SLIT table looks invalid. Not used.\n"); 241 return -EINVAL; 242 } 243 244 for (i = 0; i < slit->locality_count; i++) { 245 const int from_node = pxm_to_node(i); 246 247 if (from_node == NUMA_NO_NODE) 248 continue; 249 250 for (j = 0; j < slit->locality_count; j++) { 251 const int to_node = pxm_to_node(j); 252 253 if (to_node == NUMA_NO_NODE) 254 continue; 255 256 numa_set_distance(from_node, to_node, 257 slit->entry[slit->locality_count * i + j]); 258 } 259 } 260 261 return 0; 262 } 263 264 static int parsed_numa_memblks __initdata; 265 266 static int __init 267 acpi_parse_memory_affinity(union acpi_subtable_headers *header, 268 const unsigned long table_end) 269 { 270 struct acpi_srat_mem_affinity *ma; 271 u64 start, end; 272 u32 hotpluggable; 273 int node, pxm; 274 275 ma = (struct acpi_srat_mem_affinity *)header; 276 277 acpi_table_print_srat_entry(&header->common); 278 279 if (srat_disabled()) 280 return 0; 281 if (ma->header.length < sizeof(struct acpi_srat_mem_affinity)) { 282 pr_err("SRAT: Unexpected header length: %d\n", 283 ma->header.length); 284 goto out_err_bad_srat; 285 } 286 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) 287 return 0; 288 hotpluggable = IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && 289 (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE); 290 291 start = ma->base_address; 292 end = start + ma->length; 293 pxm = ma->proximity_domain; 294 if (acpi_srat_revision <= 1) 295 pxm &= 0xff; 296 297 node = acpi_map_pxm_to_node(pxm); 298 if (node == NUMA_NO_NODE) { 299 pr_err("SRAT: Too many proximity domains.\n"); 300 goto out_err_bad_srat; 301 } 302 303 if (numa_add_memblk(node, start, end) < 0) { 304 pr_err("SRAT: Failed to add memblk to node %u [mem %#010Lx-%#010Lx]\n", 305 node, (unsigned long long) start, 306 (unsigned long long) end - 1); 307 goto out_err_bad_srat; 308 } 309 310 node_set(node, numa_nodes_parsed); 311 312 pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s%s\n", 313 node, pxm, 314 (unsigned long long) start, (unsigned long long) end - 1, 315 hotpluggable ? " hotplug" : "", 316 ma->flags & ACPI_SRAT_MEM_NON_VOLATILE ? " non-volatile" : ""); 317 318 /* Mark hotplug range in memblock. */ 319 if (hotpluggable && memblock_mark_hotplug(start, ma->length)) 320 pr_warn("SRAT: Failed to mark hotplug range [mem %#010Lx-%#010Lx] in memblock\n", 321 (unsigned long long)start, (unsigned long long)end - 1); 322 323 max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1)); 324 325 parsed_numa_memblks++; 326 327 return 0; 328 329 out_err_bad_srat: 330 /* Just disable SRAT, but do not fail and ignore errors. */ 331 bad_srat(); 332 333 return 0; 334 } 335 336 static int __init acpi_parse_cfmws(union acpi_subtable_headers *header, 337 void *arg, const unsigned long table_end) 338 { 339 struct acpi_cedt_cfmws *cfmws; 340 int *fake_pxm = arg; 341 u64 start, end; 342 int node; 343 344 cfmws = (struct acpi_cedt_cfmws *)header; 345 start = cfmws->base_hpa; 346 end = cfmws->base_hpa + cfmws->window_size; 347 348 /* 349 * The SRAT may have already described NUMA details for all, 350 * or a portion of, this CFMWS HPA range. Extend the memblks 351 * found for any portion of the window to cover the entire 352 * window. 353 */ 354 if (!numa_fill_memblks(start, end)) 355 return 0; 356 357 /* No SRAT description. Create a new node. */ 358 node = acpi_map_pxm_to_node(*fake_pxm); 359 360 if (node == NUMA_NO_NODE) { 361 pr_err("ACPI NUMA: Too many proximity domains while processing CFMWS.\n"); 362 return -EINVAL; 363 } 364 365 if (numa_add_memblk(node, start, end) < 0) { 366 /* CXL driver must handle the NUMA_NO_NODE case */ 367 pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n", 368 node, start, end); 369 } 370 node_set(node, numa_nodes_parsed); 371 372 /* Set the next available fake_pxm value */ 373 (*fake_pxm)++; 374 return 0; 375 } 376 377 void __init __weak 378 acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) 379 { 380 pr_warn("Found unsupported x2apic [0x%08x] SRAT entry\n", pa->apic_id); 381 } 382 383 static int __init 384 acpi_parse_x2apic_affinity(union acpi_subtable_headers *header, 385 const unsigned long end) 386 { 387 struct acpi_srat_x2apic_cpu_affinity *processor_affinity; 388 389 processor_affinity = (struct acpi_srat_x2apic_cpu_affinity *)header; 390 391 acpi_table_print_srat_entry(&header->common); 392 393 /* let architecture-dependent part to do it */ 394 acpi_numa_x2apic_affinity_init(processor_affinity); 395 396 return 0; 397 } 398 399 static int __init 400 acpi_parse_processor_affinity(union acpi_subtable_headers *header, 401 const unsigned long end) 402 { 403 struct acpi_srat_cpu_affinity *processor_affinity; 404 405 processor_affinity = (struct acpi_srat_cpu_affinity *)header; 406 407 acpi_table_print_srat_entry(&header->common); 408 409 /* let architecture-dependent part to do it */ 410 acpi_numa_processor_affinity_init(processor_affinity); 411 412 return 0; 413 } 414 415 static int __init 416 acpi_parse_gicc_affinity(union acpi_subtable_headers *header, 417 const unsigned long end) 418 { 419 struct acpi_srat_gicc_affinity *processor_affinity; 420 421 processor_affinity = (struct acpi_srat_gicc_affinity *)header; 422 423 acpi_table_print_srat_entry(&header->common); 424 425 /* let architecture-dependent part to do it */ 426 acpi_numa_gicc_affinity_init(processor_affinity); 427 428 return 0; 429 } 430 431 #if defined(CONFIG_X86) || defined(CONFIG_ARM64) 432 static int __init 433 acpi_parse_gi_affinity(union acpi_subtable_headers *header, 434 const unsigned long end) 435 { 436 struct acpi_srat_generic_affinity *gi_affinity; 437 int node; 438 439 gi_affinity = (struct acpi_srat_generic_affinity *)header; 440 if (!gi_affinity) 441 return -EINVAL; 442 acpi_table_print_srat_entry(&header->common); 443 444 if (!(gi_affinity->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED)) 445 return -EINVAL; 446 447 node = acpi_map_pxm_to_node(gi_affinity->proximity_domain); 448 if (node == NUMA_NO_NODE) { 449 pr_err("SRAT: Too many proximity domains.\n"); 450 return -EINVAL; 451 } 452 node_set(node, numa_nodes_parsed); 453 node_set_state(node, N_GENERIC_INITIATOR); 454 455 return 0; 456 } 457 #else 458 static int __init 459 acpi_parse_gi_affinity(union acpi_subtable_headers *header, 460 const unsigned long end) 461 { 462 return 0; 463 } 464 #endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */ 465 466 static int __init 467 acpi_parse_rintc_affinity(union acpi_subtable_headers *header, 468 const unsigned long end) 469 { 470 struct acpi_srat_rintc_affinity *rintc_affinity; 471 472 rintc_affinity = (struct acpi_srat_rintc_affinity *)header; 473 acpi_table_print_srat_entry(&header->common); 474 475 /* let architecture-dependent part to do it */ 476 acpi_numa_rintc_affinity_init(rintc_affinity); 477 478 return 0; 479 } 480 481 static int __init acpi_parse_srat(struct acpi_table_header *table) 482 { 483 struct acpi_table_srat *srat = (struct acpi_table_srat *)table; 484 485 acpi_srat_revision = srat->header.revision; 486 487 /* Real work done in acpi_table_parse_srat below. */ 488 489 return 0; 490 } 491 492 static int __init 493 acpi_table_parse_srat(enum acpi_srat_type id, 494 acpi_tbl_entry_handler handler, unsigned int max_entries) 495 { 496 return acpi_table_parse_entries(ACPI_SIG_SRAT, 497 sizeof(struct acpi_table_srat), id, 498 handler, max_entries); 499 } 500 501 int __init acpi_numa_init(void) 502 { 503 int i, fake_pxm, cnt = 0; 504 505 if (acpi_disabled) 506 return -EINVAL; 507 508 /* 509 * Should not limit number with cpu num that is from NR_CPUS or nr_cpus= 510 * SRAT cpu entries could have different order with that in MADT. 511 * So go over all cpu entries in SRAT to get apicid to node mapping. 512 */ 513 514 /* SRAT: System Resource Affinity Table */ 515 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { 516 struct acpi_subtable_proc srat_proc[5]; 517 518 memset(srat_proc, 0, sizeof(srat_proc)); 519 srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY; 520 srat_proc[0].handler = acpi_parse_processor_affinity; 521 srat_proc[1].id = ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY; 522 srat_proc[1].handler = acpi_parse_x2apic_affinity; 523 srat_proc[2].id = ACPI_SRAT_TYPE_GICC_AFFINITY; 524 srat_proc[2].handler = acpi_parse_gicc_affinity; 525 srat_proc[3].id = ACPI_SRAT_TYPE_GENERIC_AFFINITY; 526 srat_proc[3].handler = acpi_parse_gi_affinity; 527 srat_proc[4].id = ACPI_SRAT_TYPE_RINTC_AFFINITY; 528 srat_proc[4].handler = acpi_parse_rintc_affinity; 529 530 acpi_table_parse_entries_array(ACPI_SIG_SRAT, 531 sizeof(struct acpi_table_srat), 532 srat_proc, ARRAY_SIZE(srat_proc), 0); 533 534 cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, 535 acpi_parse_memory_affinity, 0); 536 } 537 538 /* SLIT: System Locality Information Table */ 539 acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit); 540 541 /* 542 * CXL Fixed Memory Window Structures (CFMWS) must be parsed 543 * after the SRAT. Create NUMA Nodes for CXL memory ranges that 544 * are defined in the CFMWS and not already defined in the SRAT. 545 * Initialize a fake_pxm as the first available PXM to emulate. 546 */ 547 548 /* fake_pxm is the next unused PXM value after SRAT parsing */ 549 for (i = 0, fake_pxm = -1; i < MAX_NUMNODES; i++) { 550 if (node_to_pxm_map[i] > fake_pxm) 551 fake_pxm = node_to_pxm_map[i]; 552 } 553 last_real_pxm = fake_pxm; 554 fake_pxm++; 555 acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, acpi_parse_cfmws, 556 &fake_pxm); 557 558 if (cnt < 0) 559 return cnt; 560 else if (!parsed_numa_memblks) 561 return -ENOENT; 562 return 0; 563 } 564 565 bool acpi_node_backed_by_real_pxm(int nid) 566 { 567 int pxm = node_to_pxm(nid); 568 569 return pxm <= last_real_pxm; 570 } 571 EXPORT_SYMBOL_GPL(acpi_node_backed_by_real_pxm); 572 573 static int acpi_get_pxm(acpi_handle h) 574 { 575 unsigned long long pxm; 576 acpi_status status; 577 acpi_handle handle; 578 acpi_handle phandle = h; 579 580 do { 581 handle = phandle; 582 status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm); 583 if (ACPI_SUCCESS(status)) 584 return pxm; 585 status = acpi_get_parent(handle, &phandle); 586 } while (ACPI_SUCCESS(status)); 587 return -1; 588 } 589 590 int acpi_get_node(acpi_handle handle) 591 { 592 int pxm; 593 594 pxm = acpi_get_pxm(handle); 595 596 return pxm_to_node(pxm); 597 } 598 EXPORT_SYMBOL(acpi_get_node); 599