1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * ARM64 Specific Low-Level ACPI Boot Support 4 * 5 * Copyright (C) 2013-2014, Linaro Ltd. 6 * Author: Al Stone <al.stone@linaro.org> 7 * Author: Graeme Gregory <graeme.gregory@linaro.org> 8 * Author: Hanjun Guo <hanjun.guo@linaro.org> 9 * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org> 10 * Author: Naresh Bhat <naresh.bhat@linaro.org> 11 */ 12 13 #define pr_fmt(fmt) "ACPI: " fmt 14 15 #include <linux/acpi.h> 16 #include <linux/arm-smccc.h> 17 #include <linux/cpumask.h> 18 #include <linux/efi.h> 19 #include <linux/efi-bgrt.h> 20 #include <linux/init.h> 21 #include <linux/irq.h> 22 #include <linux/irqdomain.h> 23 #include <linux/irq_work.h> 24 #include <linux/memblock.h> 25 #include <linux/of_fdt.h> 26 #include <linux/libfdt.h> 27 #include <linux/smp.h> 28 #include <linux/serial_core.h> 29 #include <linux/suspend.h> 30 #include <linux/pgtable.h> 31 32 #include <acpi/ghes.h> 33 #include <asm/cputype.h> 34 #include <asm/cpu_ops.h> 35 #include <asm/daifflags.h> 36 #include <asm/smp_plat.h> 37 38 int acpi_noirq = 1; /* skip ACPI IRQ initialization */ 39 int acpi_disabled = 1; 40 EXPORT_SYMBOL(acpi_disabled); 41 42 int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */ 43 EXPORT_SYMBOL(acpi_pci_disabled); 44 45 static bool param_acpi_off __initdata; 46 static bool param_acpi_on __initdata; 47 static bool param_acpi_force __initdata; 48 49 static int __init parse_acpi(char *arg) 50 { 51 if (!arg) 52 return -EINVAL; 53 54 /* "acpi=off" disables both ACPI table parsing and interpreter */ 55 if (strcmp(arg, "off") == 0) 56 param_acpi_off = true; 57 else if (strcmp(arg, "on") == 0) /* prefer ACPI over DT */ 58 param_acpi_on = true; 59 else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */ 60 param_acpi_force = true; 61 else 62 return -EINVAL; /* Core will print when we return error */ 63 64 return 0; 65 } 66 early_param("acpi", parse_acpi); 67 68 static bool __init dt_is_stub(void) 69 { 70 int node; 71 72 fdt_for_each_subnode(node, initial_boot_params, 0) { 73 const char *name = fdt_get_name(initial_boot_params, node, NULL); 74 if (strcmp(name, "chosen") == 0) 75 continue; 76 if (strcmp(name, "hypervisor") == 0 && 77 of_flat_dt_is_compatible(node, "xen,xen")) 78 continue; 79 80 return false; 81 } 82 83 return true; 84 } 85 86 /* 87 * __acpi_map_table() will be called before page_init(), so early_ioremap() 88 * or early_memremap() should be called here to for ACPI table mapping. 89 */ 90 void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size) 91 { 92 if (!size) 93 return NULL; 94 95 return early_memremap(phys, size); 96 } 97 98 void __init __acpi_unmap_table(void __iomem *map, unsigned long size) 99 { 100 if (!map || !size) 101 return; 102 103 early_memunmap(map, size); 104 } 105 106 bool __init acpi_psci_present(void) 107 { 108 return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT; 109 } 110 111 /* Whether HVC must be used instead of SMC as the PSCI conduit */ 112 bool acpi_psci_use_hvc(void) 113 { 114 return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC; 115 } 116 117 /* 118 * acpi_fadt_sanity_check() - Check FADT presence and carry out sanity 119 * checks on it 120 * 121 * Return 0 on success, <0 on failure 122 */ 123 static int __init acpi_fadt_sanity_check(void) 124 { 125 struct acpi_table_header *table; 126 struct acpi_table_fadt *fadt; 127 acpi_status status; 128 int ret = 0; 129 130 /* 131 * FADT is required on arm64; retrieve it to check its presence 132 * and carry out revision and ACPI HW reduced compliancy tests 133 */ 134 status = acpi_get_table(ACPI_SIG_FADT, 0, &table); 135 if (ACPI_FAILURE(status)) { 136 const char *msg = acpi_format_exception(status); 137 138 pr_err("Failed to get FADT table, %s\n", msg); 139 return -ENODEV; 140 } 141 142 fadt = (struct acpi_table_fadt *)table; 143 144 /* 145 * Revision in table header is the FADT Major revision, and there 146 * is a minor revision of FADT which was introduced by ACPI 5.1, 147 * we only deal with ACPI 5.1 or newer revision to get GIC and SMP 148 * boot protocol configuration data. 149 */ 150 if (table->revision < 5 || 151 (table->revision == 5 && fadt->minor_revision < 1)) { 152 pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 5.1+\n", 153 table->revision, fadt->minor_revision); 154 155 if (!fadt->arm_boot_flags) { 156 ret = -EINVAL; 157 goto out; 158 } 159 pr_err("FADT has ARM boot flags set, assuming 5.1\n"); 160 } 161 162 if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) { 163 pr_err("FADT not ACPI hardware reduced compliant\n"); 164 ret = -EINVAL; 165 } 166 167 out: 168 /* 169 * acpi_get_table() creates FADT table mapping that 170 * should be released after parsing and before resuming boot 171 */ 172 acpi_put_table(table); 173 return ret; 174 } 175 176 /* 177 * acpi_boot_table_init() called from setup_arch(), always. 178 * 1. find RSDP and get its address, and then find XSDT 179 * 2. extract all tables and checksums them all 180 * 3. check ACPI FADT revision 181 * 4. check ACPI FADT HW reduced flag 182 * 183 * We can parse ACPI boot-time tables such as MADT after 184 * this function is called. 185 * 186 * On return ACPI is enabled if either: 187 * 188 * - ACPI tables are initialized and sanity checks passed 189 * - acpi=force was passed in the command line and ACPI was not disabled 190 * explicitly through acpi=off command line parameter 191 * 192 * ACPI is disabled on function return otherwise 193 */ 194 void __init acpi_boot_table_init(void) 195 { 196 /* 197 * Enable ACPI instead of device tree unless 198 * - ACPI has been disabled explicitly (acpi=off), or 199 * - the device tree is not empty (it has more than just a /chosen node, 200 * and a /hypervisor node when running on Xen) 201 * and ACPI has not been [force] enabled (acpi=on|force) 202 */ 203 if (param_acpi_off || 204 (!param_acpi_on && !param_acpi_force && !dt_is_stub())) 205 goto done; 206 207 /* 208 * ACPI is disabled at this point. Enable it in order to parse 209 * the ACPI tables and carry out sanity checks 210 */ 211 enable_acpi(); 212 213 /* 214 * If ACPI tables are initialized and FADT sanity checks passed, 215 * leave ACPI enabled and carry on booting; otherwise disable ACPI 216 * on initialization error. 217 * If acpi=force was passed on the command line it forces ACPI 218 * to be enabled even if its initialization failed. 219 */ 220 if (acpi_table_init() || acpi_fadt_sanity_check()) { 221 pr_err("Failed to init ACPI tables\n"); 222 if (!param_acpi_force) 223 disable_acpi(); 224 } 225 226 done: 227 if (acpi_disabled) { 228 if (earlycon_acpi_spcr_enable) 229 early_init_dt_scan_chosen_stdout(); 230 } else { 231 #ifdef CONFIG_HIBERNATION 232 struct acpi_table_header *facs = NULL; 233 acpi_get_table(ACPI_SIG_FACS, 1, &facs); 234 if (facs) { 235 swsusp_hardware_signature = 236 ((struct acpi_table_facs *)facs)->hardware_signature; 237 acpi_put_table(facs); 238 } 239 #endif 240 acpi_parse_spcr(earlycon_acpi_spcr_enable, true); 241 if (IS_ENABLED(CONFIG_ACPI_BGRT)) 242 acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt); 243 } 244 } 245 246 static pgprot_t __acpi_get_writethrough_mem_attribute(void) 247 { 248 /* 249 * Although UEFI specifies the use of Normal Write-through for 250 * EFI_MEMORY_WT, it is seldom used in practice and not implemented 251 * by most (all?) CPUs. Rather than allocate a MAIR just for this 252 * purpose, emit a warning and use Normal Non-cacheable instead. 253 */ 254 pr_warn_once("No MAIR allocation for EFI_MEMORY_WT; treating as Normal Non-cacheable\n"); 255 return __pgprot(PROT_NORMAL_NC); 256 } 257 258 pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) 259 { 260 /* 261 * According to "Table 8 Map: EFI memory types to AArch64 memory 262 * types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is 263 * mapped to a corresponding MAIR attribute encoding. 264 * The EFI memory attribute advises all possible capabilities 265 * of a memory region. 266 */ 267 268 u64 attr; 269 270 attr = efi_mem_attributes(addr); 271 if (attr & EFI_MEMORY_WB) 272 return PAGE_KERNEL; 273 if (attr & EFI_MEMORY_WC) 274 return __pgprot(PROT_NORMAL_NC); 275 if (attr & EFI_MEMORY_WT) 276 return __acpi_get_writethrough_mem_attribute(); 277 return __pgprot(PROT_DEVICE_nGnRnE); 278 } 279 280 void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) 281 { 282 efi_memory_desc_t *md, *region = NULL; 283 pgprot_t prot; 284 285 if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP))) 286 return NULL; 287 288 for_each_efi_memory_desc(md) { 289 u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); 290 291 if (phys < md->phys_addr || phys >= end) 292 continue; 293 294 if (phys + size > end) { 295 pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n"); 296 return NULL; 297 } 298 region = md; 299 break; 300 } 301 302 /* 303 * It is fine for AML to remap regions that are not represented in the 304 * EFI memory map at all, as it only describes normal memory, and MMIO 305 * regions that require a virtual mapping to make them accessible to 306 * the EFI runtime services. 307 */ 308 prot = __pgprot(PROT_DEVICE_nGnRnE); 309 if (region) { 310 switch (region->type) { 311 case EFI_LOADER_CODE: 312 case EFI_LOADER_DATA: 313 case EFI_BOOT_SERVICES_CODE: 314 case EFI_BOOT_SERVICES_DATA: 315 case EFI_CONVENTIONAL_MEMORY: 316 case EFI_PERSISTENT_MEMORY: 317 if (memblock_is_map_memory(phys) || 318 !memblock_is_region_memory(phys, size)) { 319 pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys); 320 return NULL; 321 } 322 /* 323 * Mapping kernel memory is permitted if the region in 324 * question is covered by a single memblock with the 325 * NOMAP attribute set: this enables the use of ACPI 326 * table overrides passed via initramfs, which are 327 * reserved in memory using arch_reserve_mem_area() 328 * below. As this particular use case only requires 329 * read access, fall through to the R/O mapping case. 330 */ 331 fallthrough; 332 333 case EFI_RUNTIME_SERVICES_CODE: 334 /* 335 * This would be unusual, but not problematic per se, 336 * as long as we take care not to create a writable 337 * mapping for executable code. 338 */ 339 prot = PAGE_KERNEL_RO; 340 break; 341 342 case EFI_ACPI_RECLAIM_MEMORY: 343 /* 344 * ACPI reclaim memory is used to pass firmware tables 345 * and other data that is intended for consumption by 346 * the OS only, which may decide it wants to reclaim 347 * that memory and use it for something else. We never 348 * do that, but we usually add it to the linear map 349 * anyway, in which case we should use the existing 350 * mapping. 351 */ 352 if (memblock_is_map_memory(phys)) 353 return (void __iomem *)__phys_to_virt(phys); 354 fallthrough; 355 356 default: 357 if (region->attribute & EFI_MEMORY_WB) 358 prot = PAGE_KERNEL; 359 else if (region->attribute & EFI_MEMORY_WC) 360 prot = __pgprot(PROT_NORMAL_NC); 361 else if (region->attribute & EFI_MEMORY_WT) 362 prot = __acpi_get_writethrough_mem_attribute(); 363 } 364 } 365 return ioremap_prot(phys, size, pgprot_val(prot)); 366 } 367 368 /* 369 * Claim Synchronous External Aborts as a firmware first notification. 370 * 371 * Used by KVM and the arch do_sea handler. 372 * @regs may be NULL when called from process context. 373 */ 374 int apei_claim_sea(struct pt_regs *regs) 375 { 376 int err = -ENOENT; 377 bool return_to_irqs_enabled; 378 unsigned long current_flags; 379 380 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) 381 return err; 382 383 current_flags = local_daif_save_flags(); 384 385 /* current_flags isn't useful here as daif doesn't tell us about pNMI */ 386 return_to_irqs_enabled = !irqs_disabled_flags(arch_local_save_flags()); 387 388 if (regs) 389 return_to_irqs_enabled = interrupts_enabled(regs); 390 391 /* 392 * SEA can interrupt SError, mask it and describe this as an NMI so 393 * that APEI defers the handling. 394 */ 395 local_daif_restore(DAIF_ERRCTX); 396 nmi_enter(); 397 err = ghes_notify_sea(); 398 nmi_exit(); 399 400 /* 401 * APEI NMI-like notifications are deferred to irq_work. Unless 402 * we interrupted irqs-masked code, we can do that now. 403 */ 404 if (!err) { 405 if (return_to_irqs_enabled) { 406 local_daif_restore(DAIF_PROCCTX_NOIRQ); 407 __irq_enter(); 408 irq_work_run(); 409 __irq_exit(); 410 } else { 411 pr_warn_ratelimited("APEI work queued but not completed"); 412 err = -EINPROGRESS; 413 } 414 } 415 416 local_daif_restore(current_flags); 417 418 return err; 419 } 420 421 void arch_reserve_mem_area(acpi_physical_address addr, size_t size) 422 { 423 memblock_mark_nomap(addr, size); 424 } 425 426 #ifdef CONFIG_ACPI_FFH 427 /* 428 * Implements ARM64 specific callbacks to support ACPI FFH Operation Region as 429 * specified in https://developer.arm.com/docs/den0048/latest 430 */ 431 struct acpi_ffh_data { 432 struct acpi_ffh_info info; 433 void (*invoke_ffh_fn)(unsigned long a0, unsigned long a1, 434 unsigned long a2, unsigned long a3, 435 unsigned long a4, unsigned long a5, 436 unsigned long a6, unsigned long a7, 437 struct arm_smccc_res *args, 438 struct arm_smccc_quirk *res); 439 void (*invoke_ffh64_fn)(const struct arm_smccc_1_2_regs *args, 440 struct arm_smccc_1_2_regs *res); 441 }; 442 443 int acpi_ffh_address_space_arch_setup(void *handler_ctxt, void **region_ctxt) 444 { 445 enum arm_smccc_conduit conduit; 446 struct acpi_ffh_data *ffh_ctxt; 447 448 if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2) 449 return -EOPNOTSUPP; 450 451 conduit = arm_smccc_1_1_get_conduit(); 452 if (conduit == SMCCC_CONDUIT_NONE) { 453 pr_err("%s: invalid SMCCC conduit\n", __func__); 454 return -EOPNOTSUPP; 455 } 456 457 ffh_ctxt = kzalloc(sizeof(*ffh_ctxt), GFP_KERNEL); 458 if (!ffh_ctxt) 459 return -ENOMEM; 460 461 if (conduit == SMCCC_CONDUIT_SMC) { 462 ffh_ctxt->invoke_ffh_fn = __arm_smccc_smc; 463 ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_smc; 464 } else { 465 ffh_ctxt->invoke_ffh_fn = __arm_smccc_hvc; 466 ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_hvc; 467 } 468 469 memcpy(ffh_ctxt, handler_ctxt, sizeof(ffh_ctxt->info)); 470 471 *region_ctxt = ffh_ctxt; 472 return AE_OK; 473 } 474 475 static bool acpi_ffh_smccc_owner_allowed(u32 fid) 476 { 477 int owner = ARM_SMCCC_OWNER_NUM(fid); 478 479 if (owner == ARM_SMCCC_OWNER_STANDARD || 480 owner == ARM_SMCCC_OWNER_SIP || owner == ARM_SMCCC_OWNER_OEM) 481 return true; 482 483 return false; 484 } 485 486 int acpi_ffh_address_space_arch_handler(acpi_integer *value, void *region_context) 487 { 488 int ret = 0; 489 struct acpi_ffh_data *ffh_ctxt = region_context; 490 491 if (ffh_ctxt->info.offset == 0) { 492 /* SMC/HVC 32bit call */ 493 struct arm_smccc_res res; 494 u32 a[8] = { 0 }, *ptr = (u32 *)value; 495 496 if (!ARM_SMCCC_IS_FAST_CALL(*ptr) || ARM_SMCCC_IS_64(*ptr) || 497 !acpi_ffh_smccc_owner_allowed(*ptr) || 498 ffh_ctxt->info.length > 32) { 499 ret = AE_ERROR; 500 } else { 501 int idx, len = ffh_ctxt->info.length >> 2; 502 503 for (idx = 0; idx < len; idx++) 504 a[idx] = *(ptr + idx); 505 506 ffh_ctxt->invoke_ffh_fn(a[0], a[1], a[2], a[3], a[4], 507 a[5], a[6], a[7], &res, NULL); 508 memcpy(value, &res, sizeof(res)); 509 } 510 511 } else if (ffh_ctxt->info.offset == 1) { 512 /* SMC/HVC 64bit call */ 513 struct arm_smccc_1_2_regs *r = (struct arm_smccc_1_2_regs *)value; 514 515 if (!ARM_SMCCC_IS_FAST_CALL(r->a0) || !ARM_SMCCC_IS_64(r->a0) || 516 !acpi_ffh_smccc_owner_allowed(r->a0) || 517 ffh_ctxt->info.length > sizeof(*r)) { 518 ret = AE_ERROR; 519 } else { 520 ffh_ctxt->invoke_ffh64_fn(r, r); 521 memcpy(value, r, ffh_ctxt->info.length); 522 } 523 } else { 524 ret = AE_ERROR; 525 } 526 527 return ret; 528 } 529 #endif /* CONFIG_ACPI_FFH */ 530