xref: /linux/arch/loongarch/kernel/acpi.c (revision 497e6b37b0099dc415578488287fd84fb74433eb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * acpi.c - Architecture-Specific Low-Level ACPI Boot Support
4  *
5  * Author: Jianmin Lv <lvjianmin@loongson.cn>
6  *         Huacai Chen <chenhuacai@loongson.cn>
7  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
8  */
9 
10 #include <linux/init.h>
11 #include <linux/acpi.h>
12 #include <linux/irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/memblock.h>
15 #include <linux/of_fdt.h>
16 #include <linux/serial_core.h>
17 #include <asm/io.h>
18 #include <asm/numa.h>
19 #include <asm/loongson.h>
20 
21 int acpi_disabled;
22 EXPORT_SYMBOL(acpi_disabled);
23 int acpi_noirq;
24 int acpi_pci_disabled;
25 EXPORT_SYMBOL(acpi_pci_disabled);
26 int acpi_strict = 1; /* We have no workarounds on LoongArch */
27 int num_processors;
28 int disabled_cpus;
29 
30 u64 acpi_saved_sp;
31 
32 #define MAX_CORE_PIC 256
33 
34 #define PREFIX			"ACPI: "
35 
36 void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
37 {
38 
39 	if (!phys || !size)
40 		return NULL;
41 
42 	return early_memremap(phys, size);
43 }
44 void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
45 {
46 	if (!map || !size)
47 		return;
48 
49 	early_memunmap(map, size);
50 }
51 
52 void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
53 {
54 	if (!memblock_is_memory(phys))
55 		return ioremap(phys, size);
56 	else
57 		return ioremap_cache(phys, size);
58 }
59 
60 #ifdef CONFIG_SMP
61 static int set_processor_mask(u32 id, u32 flags)
62 {
63 
64 	int cpu, cpuid = id;
65 
66 	if (num_processors >= nr_cpu_ids) {
67 		pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached."
68 			" processor 0x%x ignored.\n", nr_cpu_ids, cpuid);
69 
70 		return -ENODEV;
71 
72 	}
73 	if (cpuid == loongson_sysconf.boot_cpu_id)
74 		cpu = 0;
75 	else
76 		cpu = cpumask_next_zero(-1, cpu_present_mask);
77 
78 	if (flags & ACPI_MADT_ENABLED) {
79 		num_processors++;
80 		set_cpu_possible(cpu, true);
81 		set_cpu_present(cpu, true);
82 		__cpu_number_map[cpuid] = cpu;
83 		__cpu_logical_map[cpu] = cpuid;
84 	} else
85 		disabled_cpus++;
86 
87 	return cpu;
88 }
89 #endif
90 
91 static int __init
92 acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end)
93 {
94 	struct acpi_madt_core_pic *processor = NULL;
95 
96 	processor = (struct acpi_madt_core_pic *)header;
97 	if (BAD_MADT_ENTRY(processor, end))
98 		return -EINVAL;
99 
100 	acpi_table_print_madt_entry(&header->common);
101 #ifdef CONFIG_SMP
102 	set_processor_mask(processor->core_id, processor->flags);
103 #endif
104 
105 	return 0;
106 }
107 
108 static int __init
109 acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end)
110 {
111 	static int core = 0;
112 	struct acpi_madt_eio_pic *eiointc = NULL;
113 
114 	eiointc = (struct acpi_madt_eio_pic *)header;
115 	if (BAD_MADT_ENTRY(eiointc, end))
116 		return -EINVAL;
117 
118 	core = eiointc->node * CORES_PER_EIO_NODE;
119 	set_bit(core, &(loongson_sysconf.cores_io_master));
120 
121 	return 0;
122 }
123 
124 static void __init acpi_process_madt(void)
125 {
126 #ifdef CONFIG_SMP
127 	int i;
128 
129 	for (i = 0; i < NR_CPUS; i++) {
130 		__cpu_number_map[i] = -1;
131 		__cpu_logical_map[i] = -1;
132 	}
133 #endif
134 	acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
135 			acpi_parse_processor, MAX_CORE_PIC);
136 
137 	acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
138 			acpi_parse_eio_master, MAX_IO_PICS);
139 
140 	loongson_sysconf.nr_cpus = num_processors;
141 }
142 
143 #ifndef CONFIG_SUSPEND
144 int (*acpi_suspend_lowlevel)(void);
145 #else
146 int (*acpi_suspend_lowlevel)(void) = loongarch_acpi_suspend;
147 #endif
148 
149 void __init acpi_boot_table_init(void)
150 {
151 	/*
152 	 * If acpi_disabled, bail out
153 	 */
154 	if (acpi_disabled)
155 		goto fdt_earlycon;
156 
157 	/*
158 	 * Initialize the ACPI boot-time table parser.
159 	 */
160 	if (acpi_table_init()) {
161 		disable_acpi();
162 		goto fdt_earlycon;
163 	}
164 
165 	loongson_sysconf.boot_cpu_id = read_csr_cpuid();
166 
167 	/*
168 	 * Process the Multiple APIC Description Table (MADT), if present
169 	 */
170 	acpi_process_madt();
171 
172 	/* Do not enable ACPI SPCR console by default */
173 	acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
174 
175 	return;
176 
177 fdt_earlycon:
178 	if (earlycon_acpi_spcr_enable)
179 		early_init_dt_scan_chosen_stdout();
180 }
181 
182 #ifdef CONFIG_ACPI_NUMA
183 
184 static __init int setup_node(int pxm)
185 {
186 	return acpi_map_pxm_to_node(pxm);
187 }
188 
189 /*
190  * Callback for SLIT parsing.  pxm_to_node() returns NUMA_NO_NODE for
191  * I/O localities since SRAT does not list them.  I/O localities are
192  * not supported at this point.
193  */
194 unsigned int numa_distance_cnt;
195 
196 static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
197 {
198 	return slit->locality_count;
199 }
200 
201 void __init numa_set_distance(int from, int to, int distance)
202 {
203 	if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
204 		pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
205 				from, to, distance);
206 		return;
207 	}
208 
209 	node_distances[from][to] = distance;
210 }
211 
212 /* Callback for Proximity Domain -> CPUID mapping */
213 void __init
214 acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
215 {
216 	int pxm, node;
217 
218 	if (srat_disabled())
219 		return;
220 	if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
221 		bad_srat();
222 		return;
223 	}
224 	if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
225 		return;
226 	pxm = pa->proximity_domain_lo;
227 	if (acpi_srat_revision >= 2) {
228 		pxm |= (pa->proximity_domain_hi[0] << 8);
229 		pxm |= (pa->proximity_domain_hi[1] << 16);
230 		pxm |= (pa->proximity_domain_hi[2] << 24);
231 	}
232 	node = setup_node(pxm);
233 	if (node < 0) {
234 		pr_err("SRAT: Too many proximity domains %x\n", pxm);
235 		bad_srat();
236 		return;
237 	}
238 
239 	if (pa->apic_id >= CONFIG_NR_CPUS) {
240 		pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n",
241 				pxm, pa->apic_id, node);
242 		return;
243 	}
244 
245 	early_numa_add_cpu(pa->apic_id, node);
246 
247 	set_cpuid_to_node(pa->apic_id, node);
248 	node_set(node, numa_nodes_parsed);
249 	pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
250 }
251 
252 void __init acpi_numa_arch_fixup(void) {}
253 #endif
254 
255 void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
256 {
257 	memblock_reserve(addr, size);
258 }
259 
260 #ifdef CONFIG_ACPI_HOTPLUG_CPU
261 
262 #include <acpi/processor.h>
263 
264 static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
265 {
266 #ifdef CONFIG_ACPI_NUMA
267 	int nid;
268 
269 	nid = acpi_get_node(handle);
270 	if (nid != NUMA_NO_NODE) {
271 		set_cpuid_to_node(physid, nid);
272 		node_set(nid, numa_nodes_parsed);
273 		set_cpu_numa_node(cpu, nid);
274 		cpumask_set_cpu(cpu, cpumask_of_node(nid));
275 	}
276 #endif
277 	return 0;
278 }
279 
280 int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu)
281 {
282 	int cpu;
283 
284 	cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
285 	if (cpu < 0) {
286 		pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
287 		return cpu;
288 	}
289 
290 	acpi_map_cpu2node(handle, cpu, physid);
291 
292 	*pcpu = cpu;
293 
294 	return 0;
295 }
296 EXPORT_SYMBOL(acpi_map_cpu);
297 
298 int acpi_unmap_cpu(int cpu)
299 {
300 #ifdef CONFIG_ACPI_NUMA
301 	set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE);
302 #endif
303 	set_cpu_present(cpu, false);
304 	num_processors--;
305 
306 	pr_info("cpu%d hot remove!\n", cpu);
307 
308 	return 0;
309 }
310 EXPORT_SYMBOL(acpi_unmap_cpu);
311 
312 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
313