1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * acpi_numa.c - ACPI NUMA support 4 * 5 * Copyright (C) 2002 Takayoshi Kochi <t-kochi@bq.jp.nec.com> 6 */ 7 8 #define pr_fmt(fmt) "ACPI: " fmt 9 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/types.h> 14 #include <linux/errno.h> 15 #include <linux/acpi.h> 16 #include <linux/memblock.h> 17 #include <linux/numa.h> 18 #include <linux/nodemask.h> 19 #include <linux/topology.h> 20 21 static nodemask_t nodes_found_map = NODE_MASK_NONE; 22 23 /* maps to convert between proximity domain and logical node ID */ 24 static int pxm_to_node_map[MAX_PXM_DOMAINS] 25 = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE }; 26 static int node_to_pxm_map[MAX_NUMNODES] 27 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; 28 29 unsigned char acpi_srat_revision __initdata; 30 int acpi_numa __initdata; 31 32 int pxm_to_node(int pxm) 33 { 34 if (pxm < 0) 35 return NUMA_NO_NODE; 36 return pxm_to_node_map[pxm]; 37 } 38 39 int node_to_pxm(int node) 40 { 41 if (node < 0) 42 return PXM_INVAL; 43 return node_to_pxm_map[node]; 44 } 45 46 static void __acpi_map_pxm_to_node(int pxm, int node) 47 { 48 if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm]) 49 pxm_to_node_map[pxm] = node; 50 if (node_to_pxm_map[node] == PXM_INVAL || pxm < node_to_pxm_map[node]) 51 node_to_pxm_map[node] = pxm; 52 } 53 54 int acpi_map_pxm_to_node(int pxm) 55 { 56 int node; 57 58 if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off) 59 return NUMA_NO_NODE; 60 61 node = pxm_to_node_map[pxm]; 62 63 if (node == NUMA_NO_NODE) { 64 if (nodes_weight(nodes_found_map) >= MAX_NUMNODES) 65 return NUMA_NO_NODE; 66 node = first_unset_node(nodes_found_map); 67 __acpi_map_pxm_to_node(pxm, node); 68 node_set(node, nodes_found_map); 69 } 70 71 return node; 72 } 73 EXPORT_SYMBOL(acpi_map_pxm_to_node); 74 75 /** 76 * acpi_map_pxm_to_online_node - Map proximity ID to online node 77 * @pxm: ACPI proximity ID 78 * 79 * This is similar to acpi_map_pxm_to_node(), but always returns an online 80 * node. When the mapped node from a given proximity ID is offline, it 81 * looks up the node distance table and returns the nearest online node. 82 * 83 * ACPI device drivers, which are called after the NUMA initialization has 84 * completed in the kernel, can call this interface to obtain their device 85 * NUMA topology from ACPI tables. Such drivers do not have to deal with 86 * offline nodes. A node may be offline when a device proximity ID is 87 * unique, SRAT memory entry does not exist, or NUMA is disabled, ex. 88 * "numa=off" on x86. 89 */ 90 int acpi_map_pxm_to_online_node(int pxm) 91 { 92 int node, min_node; 93 94 node = acpi_map_pxm_to_node(pxm); 95 96 if (node == NUMA_NO_NODE) 97 node = 0; 98 99 min_node = node; 100 if (!node_online(node)) { 101 int min_dist = INT_MAX, dist, n; 102 103 for_each_online_node(n) { 104 dist = node_distance(node, n); 105 if (dist < min_dist) { 106 min_dist = dist; 107 min_node = n; 108 } 109 } 110 } 111 112 return min_node; 113 } 114 EXPORT_SYMBOL(acpi_map_pxm_to_online_node); 115 116 static void __init 117 acpi_table_print_srat_entry(struct acpi_subtable_header *header) 118 { 119 switch (header->type) { 120 case ACPI_SRAT_TYPE_CPU_AFFINITY: 121 { 122 struct acpi_srat_cpu_affinity *p = 123 (struct acpi_srat_cpu_affinity *)header; 124 pr_debug("SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n", 125 p->apic_id, p->local_sapic_eid, 126 p->proximity_domain_lo, 127 (p->flags & ACPI_SRAT_CPU_ENABLED) ? 128 "enabled" : "disabled"); 129 } 130 break; 131 132 case ACPI_SRAT_TYPE_MEMORY_AFFINITY: 133 { 134 struct acpi_srat_mem_affinity *p = 135 (struct acpi_srat_mem_affinity *)header; 136 pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n", 137 (unsigned long long)p->base_address, 138 (unsigned long long)p->length, 139 p->proximity_domain, 140 (p->flags & ACPI_SRAT_MEM_ENABLED) ? 141 "enabled" : "disabled", 142 (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ? 143 " hot-pluggable" : "", 144 (p->flags & ACPI_SRAT_MEM_NON_VOLATILE) ? 145 " non-volatile" : ""); 146 } 147 break; 148 149 case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY: 150 { 151 struct acpi_srat_x2apic_cpu_affinity *p = 152 (struct acpi_srat_x2apic_cpu_affinity *)header; 153 pr_debug("SRAT Processor (x2apicid[0x%08x]) in proximity domain %d %s\n", 154 p->apic_id, 155 p->proximity_domain, 156 (p->flags & ACPI_SRAT_CPU_ENABLED) ? 157 "enabled" : "disabled"); 158 } 159 break; 160 161 case ACPI_SRAT_TYPE_GICC_AFFINITY: 162 { 163 struct acpi_srat_gicc_affinity *p = 164 (struct acpi_srat_gicc_affinity *)header; 165 pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n", 166 p->acpi_processor_uid, 167 p->proximity_domain, 168 (p->flags & ACPI_SRAT_GICC_ENABLED) ? 169 "enabled" : "disabled"); 170 } 171 break; 172 173 default: 174 pr_warn("Found unsupported SRAT entry (type = 0x%x)\n", 175 header->type); 176 break; 177 } 178 } 179 180 /* 181 * A lot of BIOS fill in 10 (= no distance) everywhere. This messes 182 * up the NUMA heuristics which wants the local node to have a smaller 183 * distance than the others. 184 * Do some quick checks here and only use the SLIT if it passes. 185 */ 186 static int __init slit_valid(struct acpi_table_slit *slit) 187 { 188 int i, j; 189 int d = slit->locality_count; 190 for (i = 0; i < d; i++) { 191 for (j = 0; j < d; j++) { 192 u8 val = slit->entry[d*i + j]; 193 if (i == j) { 194 if (val != LOCAL_DISTANCE) 195 return 0; 196 } else if (val <= LOCAL_DISTANCE) 197 return 0; 198 } 199 } 200 return 1; 201 } 202 203 void __init bad_srat(void) 204 { 205 pr_err("SRAT: SRAT not used.\n"); 206 acpi_numa = -1; 207 } 208 209 int __init srat_disabled(void) 210 { 211 return acpi_numa < 0; 212 } 213 214 #if defined(CONFIG_X86) || defined(CONFIG_ARM64) 215 /* 216 * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for 217 * I/O localities since SRAT does not list them. I/O localities are 218 * not supported at this point. 219 */ 220 void __init acpi_numa_slit_init(struct acpi_table_slit *slit) 221 { 222 int i, j; 223 224 for (i = 0; i < slit->locality_count; i++) { 225 const int from_node = pxm_to_node(i); 226 227 if (from_node == NUMA_NO_NODE) 228 continue; 229 230 for (j = 0; j < slit->locality_count; j++) { 231 const int to_node = pxm_to_node(j); 232 233 if (to_node == NUMA_NO_NODE) 234 continue; 235 236 numa_set_distance(from_node, to_node, 237 slit->entry[slit->locality_count * i + j]); 238 } 239 } 240 } 241 242 /* 243 * Default callback for parsing of the Proximity Domain <-> Memory 244 * Area mappings 245 */ 246 int __init 247 acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) 248 { 249 u64 start, end; 250 u32 hotpluggable; 251 int node, pxm; 252 253 if (srat_disabled()) 254 goto out_err; 255 if (ma->header.length < sizeof(struct acpi_srat_mem_affinity)) { 256 pr_err("SRAT: Unexpected header length: %d\n", 257 ma->header.length); 258 goto out_err_bad_srat; 259 } 260 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) 261 goto out_err; 262 hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE; 263 if (hotpluggable && !IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) 264 goto out_err; 265 266 start = ma->base_address; 267 end = start + ma->length; 268 pxm = ma->proximity_domain; 269 if (acpi_srat_revision <= 1) 270 pxm &= 0xff; 271 272 node = acpi_map_pxm_to_node(pxm); 273 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { 274 pr_err("SRAT: Too many proximity domains.\n"); 275 goto out_err_bad_srat; 276 } 277 278 if (numa_add_memblk(node, start, end) < 0) { 279 pr_err("SRAT: Failed to add memblk to node %u [mem %#010Lx-%#010Lx]\n", 280 node, (unsigned long long) start, 281 (unsigned long long) end - 1); 282 goto out_err_bad_srat; 283 } 284 285 node_set(node, numa_nodes_parsed); 286 287 pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s%s\n", 288 node, pxm, 289 (unsigned long long) start, (unsigned long long) end - 1, 290 hotpluggable ? " hotplug" : "", 291 ma->flags & ACPI_SRAT_MEM_NON_VOLATILE ? " non-volatile" : ""); 292 293 /* Mark hotplug range in memblock. */ 294 if (hotpluggable && memblock_mark_hotplug(start, ma->length)) 295 pr_warn("SRAT: Failed to mark hotplug range [mem %#010Lx-%#010Lx] in memblock\n", 296 (unsigned long long)start, (unsigned long long)end - 1); 297 298 max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1)); 299 300 return 0; 301 out_err_bad_srat: 302 bad_srat(); 303 out_err: 304 return -EINVAL; 305 } 306 #endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */ 307 308 static int __init acpi_parse_slit(struct acpi_table_header *table) 309 { 310 struct acpi_table_slit *slit = (struct acpi_table_slit *)table; 311 312 if (!slit_valid(slit)) { 313 pr_info("SLIT table looks invalid. Not used.\n"); 314 return -EINVAL; 315 } 316 acpi_numa_slit_init(slit); 317 318 return 0; 319 } 320 321 void __init __weak 322 acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) 323 { 324 pr_warn("Found unsupported x2apic [0x%08x] SRAT entry\n", pa->apic_id); 325 } 326 327 static int __init 328 acpi_parse_x2apic_affinity(union acpi_subtable_headers *header, 329 const unsigned long end) 330 { 331 struct acpi_srat_x2apic_cpu_affinity *processor_affinity; 332 333 processor_affinity = (struct acpi_srat_x2apic_cpu_affinity *)header; 334 if (!processor_affinity) 335 return -EINVAL; 336 337 acpi_table_print_srat_entry(&header->common); 338 339 /* let architecture-dependent part to do it */ 340 acpi_numa_x2apic_affinity_init(processor_affinity); 341 342 return 0; 343 } 344 345 static int __init 346 acpi_parse_processor_affinity(union acpi_subtable_headers *header, 347 const unsigned long end) 348 { 349 struct acpi_srat_cpu_affinity *processor_affinity; 350 351 processor_affinity = (struct acpi_srat_cpu_affinity *)header; 352 if (!processor_affinity) 353 return -EINVAL; 354 355 acpi_table_print_srat_entry(&header->common); 356 357 /* let architecture-dependent part to do it */ 358 acpi_numa_processor_affinity_init(processor_affinity); 359 360 return 0; 361 } 362 363 static int __init 364 acpi_parse_gicc_affinity(union acpi_subtable_headers *header, 365 const unsigned long end) 366 { 367 struct acpi_srat_gicc_affinity *processor_affinity; 368 369 processor_affinity = (struct acpi_srat_gicc_affinity *)header; 370 if (!processor_affinity) 371 return -EINVAL; 372 373 acpi_table_print_srat_entry(&header->common); 374 375 /* let architecture-dependent part to do it */ 376 acpi_numa_gicc_affinity_init(processor_affinity); 377 378 return 0; 379 } 380 381 static int __initdata parsed_numa_memblks; 382 383 static int __init 384 acpi_parse_memory_affinity(union acpi_subtable_headers * header, 385 const unsigned long end) 386 { 387 struct acpi_srat_mem_affinity *memory_affinity; 388 389 memory_affinity = (struct acpi_srat_mem_affinity *)header; 390 if (!memory_affinity) 391 return -EINVAL; 392 393 acpi_table_print_srat_entry(&header->common); 394 395 /* let architecture-dependent part to do it */ 396 if (!acpi_numa_memory_affinity_init(memory_affinity)) 397 parsed_numa_memblks++; 398 return 0; 399 } 400 401 static int __init acpi_parse_srat(struct acpi_table_header *table) 402 { 403 struct acpi_table_srat *srat = (struct acpi_table_srat *)table; 404 405 acpi_srat_revision = srat->header.revision; 406 407 /* Real work done in acpi_table_parse_srat below. */ 408 409 return 0; 410 } 411 412 static int __init 413 acpi_table_parse_srat(enum acpi_srat_type id, 414 acpi_tbl_entry_handler handler, unsigned int max_entries) 415 { 416 return acpi_table_parse_entries(ACPI_SIG_SRAT, 417 sizeof(struct acpi_table_srat), id, 418 handler, max_entries); 419 } 420 421 int __init acpi_numa_init(void) 422 { 423 int cnt = 0; 424 425 if (acpi_disabled) 426 return -EINVAL; 427 428 /* 429 * Should not limit number with cpu num that is from NR_CPUS or nr_cpus= 430 * SRAT cpu entries could have different order with that in MADT. 431 * So go over all cpu entries in SRAT to get apicid to node mapping. 432 */ 433 434 /* SRAT: System Resource Affinity Table */ 435 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { 436 struct acpi_subtable_proc srat_proc[3]; 437 438 memset(srat_proc, 0, sizeof(srat_proc)); 439 srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY; 440 srat_proc[0].handler = acpi_parse_processor_affinity; 441 srat_proc[1].id = ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY; 442 srat_proc[1].handler = acpi_parse_x2apic_affinity; 443 srat_proc[2].id = ACPI_SRAT_TYPE_GICC_AFFINITY; 444 srat_proc[2].handler = acpi_parse_gicc_affinity; 445 446 acpi_table_parse_entries_array(ACPI_SIG_SRAT, 447 sizeof(struct acpi_table_srat), 448 srat_proc, ARRAY_SIZE(srat_proc), 0); 449 450 cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, 451 acpi_parse_memory_affinity, 0); 452 } 453 454 /* SLIT: System Locality Information Table */ 455 acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit); 456 457 if (cnt < 0) 458 return cnt; 459 else if (!parsed_numa_memblks) 460 return -ENOENT; 461 return 0; 462 } 463 464 static int acpi_get_pxm(acpi_handle h) 465 { 466 unsigned long long pxm; 467 acpi_status status; 468 acpi_handle handle; 469 acpi_handle phandle = h; 470 471 do { 472 handle = phandle; 473 status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm); 474 if (ACPI_SUCCESS(status)) 475 return pxm; 476 status = acpi_get_parent(handle, &phandle); 477 } while (ACPI_SUCCESS(status)); 478 return -1; 479 } 480 481 int acpi_get_node(acpi_handle handle) 482 { 483 int pxm; 484 485 pxm = acpi_get_pxm(handle); 486 487 return acpi_map_pxm_to_node(pxm); 488 } 489 EXPORT_SYMBOL(acpi_get_node); 490