1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * acpi.c - Architecture-Specific Low-Level ACPI Boot Support 4 * 5 * Author: Jianmin Lv <lvjianmin@loongson.cn> 6 * Huacai Chen <chenhuacai@loongson.cn> 7 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 8 */ 9 10 #include <linux/init.h> 11 #include <linux/acpi.h> 12 #include <linux/efi-bgrt.h> 13 #include <linux/irq.h> 14 #include <linux/irqdomain.h> 15 #include <linux/memblock.h> 16 #include <linux/of_fdt.h> 17 #include <linux/serial_core.h> 18 #include <asm/io.h> 19 #include <asm/numa.h> 20 #include <asm/loongson.h> 21 22 int acpi_disabled; 23 EXPORT_SYMBOL(acpi_disabled); 24 int acpi_noirq; 25 int acpi_pci_disabled; 26 EXPORT_SYMBOL(acpi_pci_disabled); 27 int acpi_strict = 1; /* We have no workarounds on LoongArch */ 28 int num_processors; 29 int disabled_cpus; 30 31 u64 acpi_saved_sp; 32 33 #define PREFIX "ACPI: " 34 35 struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC]; 36 37 void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size) 38 { 39 40 if (!phys || !size) 41 return NULL; 42 43 return early_memremap(phys, size); 44 } 45 void __init __acpi_unmap_table(void __iomem *map, unsigned long size) 46 { 47 if (!map || !size) 48 return; 49 50 early_memunmap(map, size); 51 } 52 53 void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) 54 { 55 if (!memblock_is_memory(phys)) 56 return ioremap(phys, size); 57 else 58 return ioremap_cache(phys, size); 59 } 60 61 #ifdef CONFIG_SMP 62 static int set_processor_mask(u32 id, u32 pass) 63 { 64 int cpu = -1, cpuid = id; 65 66 if (num_processors >= NR_CPUS) { 67 pr_warn(PREFIX "nr_cpus limit of %i reached." 68 " processor 0x%x ignored.\n", NR_CPUS, cpuid); 69 70 return -ENODEV; 71 72 } 73 74 if (cpuid == loongson_sysconf.boot_cpu_id) 75 cpu = 0; 76 77 switch (pass) { 78 case 1: /* Pass 1 handle enabled processors */ 79 if (cpu < 0) 80 cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS); 81 num_processors++; 82 set_cpu_present(cpu, true); 83 break; 84 case 2: /* Pass 2 handle disabled processors */ 85 if (cpu < 0) 86 cpu = find_first_zero_bit(cpumask_bits(cpu_possible_mask), NR_CPUS); 87 disabled_cpus++; 88 break; 89 default: 90 return cpu; 91 } 92 93 set_cpu_possible(cpu, true); 94 __cpu_number_map[cpuid] = cpu; 95 __cpu_logical_map[cpu] = cpuid; 96 97 return cpu; 98 } 99 #endif 100 101 static int __init 102 acpi_parse_p1_processor(union acpi_subtable_headers *header, const unsigned long end) 103 { 104 struct acpi_madt_core_pic *processor = NULL; 105 106 processor = (struct acpi_madt_core_pic *)header; 107 if (BAD_MADT_ENTRY(processor, end)) 108 return -EINVAL; 109 110 acpi_table_print_madt_entry(&header->common); 111 #ifdef CONFIG_SMP 112 acpi_core_pic[processor->core_id] = *processor; 113 if (processor->flags & ACPI_MADT_ENABLED) 114 set_processor_mask(processor->core_id, 1); 115 #endif 116 117 return 0; 118 } 119 120 static int __init 121 acpi_parse_p2_processor(union acpi_subtable_headers *header, const unsigned long end) 122 { 123 struct acpi_madt_core_pic *processor = NULL; 124 125 processor = (struct acpi_madt_core_pic *)header; 126 if (BAD_MADT_ENTRY(processor, end)) 127 return -EINVAL; 128 129 #ifdef CONFIG_SMP 130 if (!(processor->flags & ACPI_MADT_ENABLED)) 131 set_processor_mask(processor->core_id, 2); 132 #endif 133 134 return 0; 135 } 136 static int __init 137 acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end) 138 { 139 static int core = 0; 140 struct acpi_madt_eio_pic *eiointc = NULL; 141 142 eiointc = (struct acpi_madt_eio_pic *)header; 143 if (BAD_MADT_ENTRY(eiointc, end)) 144 return -EINVAL; 145 146 core = eiointc->node * CORES_PER_EIO_NODE; 147 set_bit(core, loongson_sysconf.cores_io_master); 148 149 return 0; 150 } 151 152 static void __init acpi_process_madt(void) 153 { 154 #ifdef CONFIG_SMP 155 int i; 156 157 for (i = 0; i < NR_CPUS; i++) { 158 __cpu_number_map[i] = -1; 159 __cpu_logical_map[i] = -1; 160 } 161 #endif 162 acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC, 163 acpi_parse_p1_processor, MAX_CORE_PIC); 164 165 acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC, 166 acpi_parse_p2_processor, MAX_CORE_PIC); 167 168 acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC, 169 acpi_parse_eio_master, MAX_IO_PICS); 170 171 loongson_sysconf.nr_cpus = num_processors; 172 } 173 174 int pptt_enabled; 175 176 int __init parse_acpi_topology(void) 177 { 178 int cpu, topology_id; 179 180 for_each_possible_cpu(cpu) { 181 topology_id = find_acpi_cpu_topology(cpu, 0); 182 if (topology_id < 0) { 183 pr_warn("Invalid BIOS PPTT\n"); 184 return -ENOENT; 185 } 186 187 if (acpi_pptt_cpu_is_thread(cpu) <= 0) 188 cpu_data[cpu].core = topology_id; 189 else { 190 topology_id = find_acpi_cpu_topology(cpu, 1); 191 if (topology_id < 0) 192 return -ENOENT; 193 194 cpu_data[cpu].core = topology_id; 195 } 196 } 197 198 pptt_enabled = 1; 199 200 return 0; 201 } 202 203 #ifndef CONFIG_SUSPEND 204 int (*acpi_suspend_lowlevel)(void); 205 #else 206 int (*acpi_suspend_lowlevel)(void) = loongarch_acpi_suspend; 207 #endif 208 209 void __init acpi_boot_table_init(void) 210 { 211 /* 212 * If acpi_disabled, bail out 213 */ 214 if (acpi_disabled) 215 goto fdt_earlycon; 216 217 /* 218 * Initialize the ACPI boot-time table parser. 219 */ 220 if (acpi_table_init()) { 221 disable_acpi(); 222 goto fdt_earlycon; 223 } 224 225 loongson_sysconf.boot_cpu_id = read_csr_cpuid(); 226 227 /* 228 * Process the Multiple APIC Description Table (MADT), if present 229 */ 230 acpi_process_madt(); 231 232 /* Do not enable ACPI SPCR console by default */ 233 acpi_parse_spcr(earlycon_acpi_spcr_enable, false); 234 235 if (IS_ENABLED(CONFIG_ACPI_BGRT)) 236 acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt); 237 238 return; 239 240 fdt_earlycon: 241 if (earlycon_acpi_spcr_enable) 242 early_init_dt_scan_chosen_stdout(); 243 } 244 245 #ifdef CONFIG_ACPI_NUMA 246 247 static __init int setup_node(int pxm) 248 { 249 return acpi_map_pxm_to_node(pxm); 250 } 251 252 /* 253 * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for 254 * I/O localities since SRAT does not list them. I/O localities are 255 * not supported at this point. 256 */ 257 unsigned int numa_distance_cnt; 258 259 static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit) 260 { 261 return slit->locality_count; 262 } 263 264 void __init numa_set_distance(int from, int to, int distance) 265 { 266 if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) { 267 pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", 268 from, to, distance); 269 return; 270 } 271 272 node_distances[from][to] = distance; 273 } 274 275 /* Callback for Proximity Domain -> CPUID mapping */ 276 void __init 277 acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) 278 { 279 int pxm, node; 280 281 if (srat_disabled()) 282 return; 283 if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) { 284 bad_srat(); 285 return; 286 } 287 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) 288 return; 289 pxm = pa->proximity_domain_lo; 290 if (acpi_srat_revision >= 2) { 291 pxm |= (pa->proximity_domain_hi[0] << 8); 292 pxm |= (pa->proximity_domain_hi[1] << 16); 293 pxm |= (pa->proximity_domain_hi[2] << 24); 294 } 295 node = setup_node(pxm); 296 if (node < 0) { 297 pr_err("SRAT: Too many proximity domains %x\n", pxm); 298 bad_srat(); 299 return; 300 } 301 302 if (pa->apic_id >= CONFIG_NR_CPUS) { 303 pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n", 304 pxm, pa->apic_id, node); 305 return; 306 } 307 308 early_numa_add_cpu(pa->apic_id, node); 309 310 set_cpuid_to_node(pa->apic_id, node); 311 node_set(node, numa_nodes_parsed); 312 pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node); 313 } 314 315 #endif 316 317 void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size) 318 { 319 memblock_reserve(addr, size); 320 } 321 322 #ifdef CONFIG_ACPI_HOTPLUG_CPU 323 324 #include <acpi/processor.h> 325 326 static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 327 { 328 #ifdef CONFIG_ACPI_NUMA 329 int nid; 330 331 nid = acpi_get_node(handle); 332 333 if (nid != NUMA_NO_NODE) 334 nid = early_cpu_to_node(cpu); 335 336 if (nid != NUMA_NO_NODE) { 337 set_cpuid_to_node(physid, nid); 338 node_set(nid, numa_nodes_parsed); 339 set_cpu_numa_node(cpu, nid); 340 cpumask_set_cpu(cpu, cpumask_of_node(nid)); 341 } 342 #endif 343 return 0; 344 } 345 346 int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu) 347 { 348 int cpu; 349 350 cpu = cpu_number_map(physid); 351 if (cpu < 0 || cpu >= nr_cpu_ids) { 352 pr_info(PREFIX "Unable to map lapic to logical cpu number\n"); 353 return -ERANGE; 354 } 355 356 num_processors++; 357 set_cpu_present(cpu, true); 358 acpi_map_cpu2node(handle, cpu, physid); 359 360 *pcpu = cpu; 361 362 return 0; 363 } 364 EXPORT_SYMBOL(acpi_map_cpu); 365 366 int acpi_unmap_cpu(int cpu) 367 { 368 #ifdef CONFIG_ACPI_NUMA 369 set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE); 370 #endif 371 set_cpu_present(cpu, false); 372 num_processors--; 373 374 pr_info("cpu%d hot remove!\n", cpu); 375 376 return 0; 377 } 378 EXPORT_SYMBOL(acpi_unmap_cpu); 379 380 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 381