1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ACPI 6.6 based NUMA setup for RISCV
4 * Lots of code was borrowed from arch/arm64/kernel/acpi_numa.c
5 *
6 * Copyright 2004 Andi Kleen, SuSE Labs.
7 * Copyright (C) 2013-2016, Linaro Ltd.
8 * Author: Hanjun Guo <hanjun.guo@linaro.org>
9 * Copyright (C) 2024 Intel Corporation.
10 *
11 * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
12 *
13 * Called from acpi_numa_init while reading the SRAT and SLIT tables.
14 * Assumes all memory regions belonging to a single proximity domain
15 * are in one chunk. Holes between them will be included in the node.
16 */
17
18 #define pr_fmt(fmt) "ACPI: NUMA: " fmt
19
20 #include <linux/acpi.h>
21 #include <linux/bitmap.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/memblock.h>
25 #include <linux/mmzone.h>
26 #include <linux/module.h>
27 #include <linux/topology.h>
28
29 #include <asm/numa.h>
30
31 static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE };
32
acpi_numa_get_nid(unsigned int cpu)33 static int __init acpi_numa_get_nid(unsigned int cpu)
34 {
35 return acpi_early_node_map[cpu];
36 }
37
get_cpu_for_acpi_id(u32 uid)38 static inline int get_cpu_for_acpi_id(u32 uid)
39 {
40 int cpu;
41
42 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
43 if (uid == get_acpi_id_for_cpu(cpu))
44 return cpu;
45
46 return -EINVAL;
47 }
48
acpi_parse_rintc_pxm(union acpi_subtable_headers * header,const unsigned long end)49 static int __init acpi_parse_rintc_pxm(union acpi_subtable_headers *header,
50 const unsigned long end)
51 {
52 struct acpi_srat_rintc_affinity *pa;
53 int cpu, pxm, node;
54
55 if (srat_disabled())
56 return -EINVAL;
57
58 pa = (struct acpi_srat_rintc_affinity *)header;
59 if (!pa)
60 return -EINVAL;
61
62 if (!(pa->flags & ACPI_SRAT_RINTC_ENABLED))
63 return 0;
64
65 pxm = pa->proximity_domain;
66 node = pxm_to_node(pxm);
67
68 /*
69 * If we can't map the UID to a logical cpu this
70 * means that the UID is not part of possible cpus
71 * so we do not need a NUMA mapping for it, skip
72 * the SRAT entry and keep parsing.
73 */
74 cpu = get_cpu_for_acpi_id(pa->acpi_processor_uid);
75 if (cpu < 0)
76 return 0;
77
78 acpi_early_node_map[cpu] = node;
79 pr_info("SRAT: PXM %d -> HARTID 0x%lx -> Node %d\n", pxm,
80 cpuid_to_hartid_map(cpu), node);
81
82 return 0;
83 }
84
acpi_map_cpus_to_nodes(void)85 void __init acpi_map_cpus_to_nodes(void)
86 {
87 int i;
88
89 /*
90 * In ACPI, SMP and CPU NUMA information is provided in separate
91 * static tables, namely the MADT and the SRAT.
92 *
93 * Thus, it is simpler to first create the cpu logical map through
94 * an MADT walk and then map the logical cpus to their node ids
95 * as separate steps.
96 */
97 acpi_table_parse_entries(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat),
98 ACPI_SRAT_TYPE_RINTC_AFFINITY, acpi_parse_rintc_pxm, 0);
99
100 for (i = 0; i < nr_cpu_ids; i++)
101 early_map_cpu_to_node(i, acpi_numa_get_nid(i));
102 }
103
104 /* Callback for Proximity Domain -> logical node ID mapping */
acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity * pa)105 void __init acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa)
106 {
107 int pxm, node;
108
109 if (srat_disabled())
110 return;
111
112 if (pa->header.length < sizeof(struct acpi_srat_rintc_affinity)) {
113 pr_err("SRAT: Invalid SRAT header length: %d\n", pa->header.length);
114 bad_srat();
115 return;
116 }
117
118 if (!(pa->flags & ACPI_SRAT_RINTC_ENABLED))
119 return;
120
121 pxm = pa->proximity_domain;
122 node = acpi_map_pxm_to_node(pxm);
123
124 if (node == NUMA_NO_NODE) {
125 pr_err("SRAT: Too many proximity domains %d\n", pxm);
126 bad_srat();
127 return;
128 }
129
130 node_set(node, numa_nodes_parsed);
131 }
132