1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Common code for 32 and 64-bit NUMA */ 3 #include <linux/acpi.h> 4 #include <linux/kernel.h> 5 #include <linux/mm.h> 6 #include <linux/of.h> 7 #include <linux/string.h> 8 #include <linux/init.h> 9 #include <linux/memblock.h> 10 #include <linux/mmzone.h> 11 #include <linux/ctype.h> 12 #include <linux/nodemask.h> 13 #include <linux/sched.h> 14 #include <linux/topology.h> 15 #include <linux/sort.h> 16 #include <linux/numa_memblks.h> 17 18 #include <asm/e820/api.h> 19 #include <asm/proto.h> 20 #include <asm/dma.h> 21 #include <asm/amd_nb.h> 22 23 #include "numa_internal.h" 24 25 int numa_off; 26 27 static __init int numa_setup(char *opt) 28 { 29 if (!opt) 30 return -EINVAL; 31 if (!strncmp(opt, "off", 3)) 32 numa_off = 1; 33 if (!strncmp(opt, "fake=", 5)) 34 return numa_emu_cmdline(opt + 5); 35 if (!strncmp(opt, "noacpi", 6)) 36 disable_srat(); 37 if (!strncmp(opt, "nohmat", 6)) 38 disable_hmat(); 39 return 0; 40 } 41 early_param("numa", numa_setup); 42 43 /* 44 * apicid, cpu, node mappings 45 */ 46 s16 __apicid_to_node[MAX_LOCAL_APIC] = { 47 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE 48 }; 49 50 int numa_cpu_node(int cpu) 51 { 52 u32 apicid = early_per_cpu(x86_cpu_to_apicid, cpu); 53 54 if (apicid != BAD_APICID) 55 return __apicid_to_node[apicid]; 56 return NUMA_NO_NODE; 57 } 58 59 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 60 EXPORT_SYMBOL(node_to_cpumask_map); 61 62 /* 63 * Map cpu index to node index 64 */ 65 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); 66 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); 67 68 void numa_set_node(int cpu, int node) 69 { 70 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); 71 72 /* early setting, no percpu area yet */ 73 if (cpu_to_node_map) { 74 cpu_to_node_map[cpu] = node; 75 return; 76 } 77 78 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 79 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { 80 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); 81 dump_stack(); 82 return; 83 } 84 #endif 85 per_cpu(x86_cpu_to_node_map, cpu) = node; 86 87 set_cpu_numa_node(cpu, node); 88 } 89 90 void numa_clear_node(int cpu) 91 { 92 numa_set_node(cpu, NUMA_NO_NODE); 93 } 94 95 /* 96 * Allocate node_to_cpumask_map based on number of available nodes 97 * Requires node_possible_map to be valid. 98 * 99 * Note: cpumask_of_node() is not valid until after this is done. 100 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) 101 */ 102 void __init setup_node_to_cpumask_map(void) 103 { 104 unsigned int node; 105 106 /* setup nr_node_ids if not done yet */ 107 if (nr_node_ids == MAX_NUMNODES) 108 setup_nr_node_ids(); 109 110 /* allocate the map */ 111 for (node = 0; node < nr_node_ids; node++) 112 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); 113 114 /* cpumask_of_node() will now work */ 115 pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); 116 } 117 118 static int __init numa_register_nodes(void) 119 { 120 int nid; 121 122 if (!memblock_validate_numa_coverage(SZ_1M)) 123 return -EINVAL; 124 125 /* Finally register nodes. */ 126 for_each_node_mask(nid, node_possible_map) { 127 unsigned long start_pfn, end_pfn; 128 129 /* 130 * Note, get_pfn_range_for_nid() depends on 131 * memblock_set_node() having already happened 132 */ 133 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 134 if (start_pfn >= end_pfn) 135 continue; 136 137 alloc_node_data(nid); 138 node_set_online(nid); 139 } 140 141 /* Dump memblock with node info and return. */ 142 memblock_dump_all(); 143 return 0; 144 } 145 146 /* 147 * There are unfortunately some poorly designed mainboards around that 148 * only connect memory to a single CPU. This breaks the 1:1 cpu->node 149 * mapping. To avoid this fill in the mapping for all possible CPUs, 150 * as the number of CPUs is not known yet. We round robin the existing 151 * nodes. 152 */ 153 static void __init numa_init_array(void) 154 { 155 int rr, i; 156 157 rr = first_node(node_online_map); 158 for (i = 0; i < nr_cpu_ids; i++) { 159 if (early_cpu_to_node(i) != NUMA_NO_NODE) 160 continue; 161 numa_set_node(i, rr); 162 rr = next_node_in(rr, node_online_map); 163 } 164 } 165 166 static int __init numa_init(int (*init_func)(void)) 167 { 168 int i; 169 int ret; 170 171 for (i = 0; i < MAX_LOCAL_APIC; i++) 172 set_apicid_to_node(i, NUMA_NO_NODE); 173 174 ret = numa_memblks_init(init_func, /* memblock_force_top_down */ true); 175 if (ret < 0) 176 return ret; 177 178 ret = numa_register_nodes(); 179 if (ret < 0) 180 return ret; 181 182 for (i = 0; i < nr_cpu_ids; i++) { 183 int nid = early_cpu_to_node(i); 184 185 if (nid == NUMA_NO_NODE) 186 continue; 187 if (!node_online(nid)) 188 numa_clear_node(i); 189 } 190 numa_init_array(); 191 192 return 0; 193 } 194 195 /** 196 * dummy_numa_init - Fallback dummy NUMA init 197 * 198 * Used if there's no underlying NUMA architecture, NUMA initialization 199 * fails, or NUMA is disabled on the command line. 200 * 201 * Must online at least one node and add memory blocks that cover all 202 * allowed memory. This function must not fail. 203 */ 204 static int __init dummy_numa_init(void) 205 { 206 printk(KERN_INFO "%s\n", 207 numa_off ? "NUMA turned off" : "No NUMA configuration found"); 208 printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n", 209 0LLU, PFN_PHYS(max_pfn) - 1); 210 211 node_set(0, numa_nodes_parsed); 212 numa_add_memblk(0, 0, PFN_PHYS(max_pfn)); 213 214 return 0; 215 } 216 217 /** 218 * x86_numa_init - Initialize NUMA 219 * 220 * Try each configured NUMA initialization method until one succeeds. The 221 * last fallback is dummy single node config encompassing whole memory and 222 * never fails. 223 */ 224 void __init x86_numa_init(void) 225 { 226 if (!numa_off) { 227 #ifdef CONFIG_ACPI_NUMA 228 if (!numa_init(x86_acpi_numa_init)) 229 return; 230 #endif 231 #ifdef CONFIG_AMD_NUMA 232 if (!numa_init(amd_numa_init)) 233 return; 234 #endif 235 if (acpi_disabled && !numa_init(of_numa_init)) 236 return; 237 } 238 239 numa_init(dummy_numa_init); 240 } 241 242 243 /* 244 * A node may exist which has one or more Generic Initiators but no CPUs and no 245 * memory. 246 * 247 * This function must be called after init_cpu_to_node(), to ensure that any 248 * memoryless CPU nodes have already been brought online, and before the 249 * node_data[nid] is needed for zone list setup in build_all_zonelists(). 250 * 251 * When this function is called, any nodes containing either memory and/or CPUs 252 * will already be online and there is no need to do anything extra, even if 253 * they also contain one or more Generic Initiators. 254 */ 255 void __init init_gi_nodes(void) 256 { 257 int nid; 258 259 /* 260 * Exclude this node from 261 * bringup_nonboot_cpus 262 * cpu_up 263 * __try_online_node 264 * register_one_node 265 * because node_subsys is not initialized yet. 266 * TODO remove dependency on node_online 267 */ 268 for_each_node_state(nid, N_GENERIC_INITIATOR) 269 if (!node_online(nid)) 270 node_set_online(nid); 271 } 272 273 /* 274 * Setup early cpu_to_node. 275 * 276 * Populate cpu_to_node[] only if x86_cpu_to_apicid[], 277 * and apicid_to_node[] tables have valid entries for a CPU. 278 * This means we skip cpu_to_node[] initialisation for NUMA 279 * emulation and faking node case (when running a kernel compiled 280 * for NUMA on a non NUMA box), which is OK as cpu_to_node[] 281 * is already initialized in a round robin manner at numa_init_array, 282 * prior to this call, and this initialization is good enough 283 * for the fake NUMA cases. 284 * 285 * Called before the per_cpu areas are setup. 286 */ 287 void __init init_cpu_to_node(void) 288 { 289 int cpu; 290 u32 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); 291 292 BUG_ON(cpu_to_apicid == NULL); 293 294 for_each_possible_cpu(cpu) { 295 int node = numa_cpu_node(cpu); 296 297 if (node == NUMA_NO_NODE) 298 continue; 299 300 /* 301 * Exclude this node from 302 * bringup_nonboot_cpus 303 * cpu_up 304 * __try_online_node 305 * register_one_node 306 * because node_subsys is not initialized yet. 307 * TODO remove dependency on node_online 308 */ 309 if (!node_online(node)) 310 node_set_online(node); 311 312 numa_set_node(cpu, node); 313 } 314 } 315 316 #ifndef CONFIG_DEBUG_PER_CPU_MAPS 317 318 # ifndef CONFIG_NUMA_EMU 319 void numa_add_cpu(unsigned int cpu) 320 { 321 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 322 } 323 324 void numa_remove_cpu(unsigned int cpu) 325 { 326 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 327 } 328 # endif /* !CONFIG_NUMA_EMU */ 329 330 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 331 332 int __cpu_to_node(int cpu) 333 { 334 if (early_per_cpu_ptr(x86_cpu_to_node_map)) { 335 printk(KERN_WARNING 336 "cpu_to_node(%d): usage too early!\n", cpu); 337 dump_stack(); 338 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; 339 } 340 return per_cpu(x86_cpu_to_node_map, cpu); 341 } 342 EXPORT_SYMBOL(__cpu_to_node); 343 344 /* 345 * Same function as cpu_to_node() but used if called before the 346 * per_cpu areas are setup. 347 */ 348 int early_cpu_to_node(int cpu) 349 { 350 if (early_per_cpu_ptr(x86_cpu_to_node_map)) 351 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; 352 353 if (!cpu_possible(cpu)) { 354 printk(KERN_WARNING 355 "early_cpu_to_node(%d): no per_cpu area!\n", cpu); 356 dump_stack(); 357 return NUMA_NO_NODE; 358 } 359 return per_cpu(x86_cpu_to_node_map, cpu); 360 } 361 362 void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable) 363 { 364 struct cpumask *mask; 365 366 if (node == NUMA_NO_NODE) { 367 /* early_cpu_to_node() already emits a warning and trace */ 368 return; 369 } 370 mask = node_to_cpumask_map[node]; 371 if (!cpumask_available(mask)) { 372 pr_err("node_to_cpumask_map[%i] NULL\n", node); 373 dump_stack(); 374 return; 375 } 376 377 if (enable) 378 cpumask_set_cpu(cpu, mask); 379 else 380 cpumask_clear_cpu(cpu, mask); 381 382 printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n", 383 enable ? "numa_add_cpu" : "numa_remove_cpu", 384 cpu, node, cpumask_pr_args(mask)); 385 return; 386 } 387 388 # ifndef CONFIG_NUMA_EMU 389 static void numa_set_cpumask(int cpu, bool enable) 390 { 391 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); 392 } 393 394 void numa_add_cpu(unsigned int cpu) 395 { 396 numa_set_cpumask(cpu, true); 397 } 398 399 void numa_remove_cpu(unsigned int cpu) 400 { 401 numa_set_cpumask(cpu, false); 402 } 403 # endif /* !CONFIG_NUMA_EMU */ 404 405 /* 406 * Returns a pointer to the bitmask of CPUs on Node 'node'. 407 */ 408 const struct cpumask *cpumask_of_node(int node) 409 { 410 if ((unsigned)node >= nr_node_ids) { 411 printk(KERN_WARNING 412 "cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n", 413 node, nr_node_ids); 414 dump_stack(); 415 return cpu_none_mask; 416 } 417 if (!cpumask_available(node_to_cpumask_map[node])) { 418 printk(KERN_WARNING 419 "cpumask_of_node(%d): no node_to_cpumask_map!\n", 420 node); 421 dump_stack(); 422 return cpu_online_mask; 423 } 424 return node_to_cpumask_map[node]; 425 } 426 EXPORT_SYMBOL(cpumask_of_node); 427 428 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 429 430 #ifdef CONFIG_NUMA_EMU 431 void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys, 432 unsigned int nr_emu_nids) 433 { 434 int i, j; 435 436 /* 437 * Transform __apicid_to_node table to use emulated nids by 438 * reverse-mapping phys_nid. The maps should always exist but fall 439 * back to zero just in case. 440 */ 441 for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) { 442 if (__apicid_to_node[i] == NUMA_NO_NODE) 443 continue; 444 for (j = 0; j < nr_emu_nids; j++) 445 if (__apicid_to_node[i] == emu_nid_to_phys[j]) 446 break; 447 __apicid_to_node[i] = j < nr_emu_nids ? j : 0; 448 } 449 } 450 451 u64 __init numa_emu_dma_end(void) 452 { 453 return PFN_PHYS(MAX_DMA32_PFN); 454 } 455 #endif /* CONFIG_NUMA_EMU */ 456