1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * efi.c - EFI subsystem 4 * 5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com> 6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com> 7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no> 8 * 9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported, 10 * allowing the efivarfs to be mounted or the efivars module to be loaded. 11 * The existance of /sys/firmware/efi may also be used by userspace to 12 * determine that the system supports EFI. 13 */ 14 15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17 #include <linux/kobject.h> 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/debugfs.h> 21 #include <linux/device.h> 22 #include <linux/efi.h> 23 #include <linux/of.h> 24 #include <linux/initrd.h> 25 #include <linux/io.h> 26 #include <linux/kexec.h> 27 #include <linux/platform_device.h> 28 #include <linux/random.h> 29 #include <linux/reboot.h> 30 #include <linux/slab.h> 31 #include <linux/acpi.h> 32 #include <linux/ucs2_string.h> 33 #include <linux/memblock.h> 34 #include <linux/security.h> 35 #include <linux/notifier.h> 36 37 #include <asm/early_ioremap.h> 38 39 struct efi __read_mostly efi = { 40 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL, 41 .acpi = EFI_INVALID_TABLE_ADDR, 42 .acpi20 = EFI_INVALID_TABLE_ADDR, 43 .smbios = EFI_INVALID_TABLE_ADDR, 44 .smbios3 = EFI_INVALID_TABLE_ADDR, 45 .esrt = EFI_INVALID_TABLE_ADDR, 46 .tpm_log = EFI_INVALID_TABLE_ADDR, 47 .tpm_final_log = EFI_INVALID_TABLE_ADDR, 48 #ifdef CONFIG_LOAD_UEFI_KEYS 49 .mokvar_table = EFI_INVALID_TABLE_ADDR, 50 #endif 51 #ifdef CONFIG_EFI_COCO_SECRET 52 .coco_secret = EFI_INVALID_TABLE_ADDR, 53 #endif 54 #ifdef CONFIG_UNACCEPTED_MEMORY 55 .unaccepted = EFI_INVALID_TABLE_ADDR, 56 #endif 57 }; 58 EXPORT_SYMBOL(efi); 59 60 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR; 61 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR; 62 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR; 63 static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR; 64 65 extern unsigned long screen_info_table; 66 67 struct mm_struct efi_mm = { 68 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock), 69 .mm_users = ATOMIC_INIT(2), 70 .mm_count = ATOMIC_INIT(1), 71 .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq), 72 MMAP_LOCK_INITIALIZER(efi_mm) 73 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), 74 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), 75 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0}, 76 }; 77 78 struct workqueue_struct *efi_rts_wq; 79 80 static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME); 81 static int __init setup_noefi(char *arg) 82 { 83 disable_runtime = true; 84 return 0; 85 } 86 early_param("noefi", setup_noefi); 87 88 bool efi_runtime_disabled(void) 89 { 90 return disable_runtime; 91 } 92 93 bool __pure __efi_soft_reserve_enabled(void) 94 { 95 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE); 96 } 97 98 static int __init parse_efi_cmdline(char *str) 99 { 100 if (!str) { 101 pr_warn("need at least one option\n"); 102 return -EINVAL; 103 } 104 105 if (parse_option_str(str, "debug")) 106 set_bit(EFI_DBG, &efi.flags); 107 108 if (parse_option_str(str, "noruntime")) 109 disable_runtime = true; 110 111 if (parse_option_str(str, "runtime")) 112 disable_runtime = false; 113 114 if (parse_option_str(str, "nosoftreserve")) 115 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags); 116 117 return 0; 118 } 119 early_param("efi", parse_efi_cmdline); 120 121 struct kobject *efi_kobj; 122 123 /* 124 * Let's not leave out systab information that snuck into 125 * the efivars driver 126 * Note, do not add more fields in systab sysfs file as it breaks sysfs 127 * one value per file rule! 128 */ 129 static ssize_t systab_show(struct kobject *kobj, 130 struct kobj_attribute *attr, char *buf) 131 { 132 char *str = buf; 133 134 if (!kobj || !buf) 135 return -EINVAL; 136 137 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 138 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20); 139 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 140 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); 141 /* 142 * If both SMBIOS and SMBIOS3 entry points are implemented, the 143 * SMBIOS3 entry point shall be preferred, so we list it first to 144 * let applications stop parsing after the first match. 145 */ 146 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) 147 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3); 148 if (efi.smbios != EFI_INVALID_TABLE_ADDR) 149 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); 150 151 return str - buf; 152 } 153 154 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400); 155 156 static ssize_t fw_platform_size_show(struct kobject *kobj, 157 struct kobj_attribute *attr, char *buf) 158 { 159 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32); 160 } 161 162 extern __weak struct kobj_attribute efi_attr_fw_vendor; 163 extern __weak struct kobj_attribute efi_attr_runtime; 164 extern __weak struct kobj_attribute efi_attr_config_table; 165 static struct kobj_attribute efi_attr_fw_platform_size = 166 __ATTR_RO(fw_platform_size); 167 168 static struct attribute *efi_subsys_attrs[] = { 169 &efi_attr_systab.attr, 170 &efi_attr_fw_platform_size.attr, 171 &efi_attr_fw_vendor.attr, 172 &efi_attr_runtime.attr, 173 &efi_attr_config_table.attr, 174 NULL, 175 }; 176 177 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, 178 int n) 179 { 180 return attr->mode; 181 } 182 183 static const struct attribute_group efi_subsys_attr_group = { 184 .attrs = efi_subsys_attrs, 185 .is_visible = efi_attr_is_visible, 186 }; 187 188 struct blocking_notifier_head efivar_ops_nh; 189 EXPORT_SYMBOL_GPL(efivar_ops_nh); 190 191 static struct efivars generic_efivars; 192 static struct efivar_operations generic_ops; 193 194 static bool generic_ops_supported(void) 195 { 196 unsigned long name_size; 197 efi_status_t status; 198 efi_char16_t name; 199 efi_guid_t guid; 200 201 name_size = sizeof(name); 202 203 if (!efi.get_next_variable) 204 return false; 205 status = efi.get_next_variable(&name_size, &name, &guid); 206 if (status == EFI_UNSUPPORTED) 207 return false; 208 209 return true; 210 } 211 212 static int generic_ops_register(void) 213 { 214 if (!generic_ops_supported()) 215 return 0; 216 217 generic_ops.get_variable = efi.get_variable; 218 generic_ops.get_next_variable = efi.get_next_variable; 219 generic_ops.query_variable_store = efi_query_variable_store; 220 generic_ops.query_variable_info = efi.query_variable_info; 221 222 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) { 223 generic_ops.set_variable = efi.set_variable; 224 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking; 225 } 226 return efivars_register(&generic_efivars, &generic_ops); 227 } 228 229 static void generic_ops_unregister(void) 230 { 231 if (!generic_ops.get_variable) 232 return; 233 234 efivars_unregister(&generic_efivars); 235 } 236 237 void efivars_generic_ops_register(void) 238 { 239 generic_ops_register(); 240 } 241 EXPORT_SYMBOL_GPL(efivars_generic_ops_register); 242 243 void efivars_generic_ops_unregister(void) 244 { 245 generic_ops_unregister(); 246 } 247 EXPORT_SYMBOL_GPL(efivars_generic_ops_unregister); 248 249 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS 250 #define EFIVAR_SSDT_NAME_MAX 16UL 251 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; 252 static int __init efivar_ssdt_setup(char *str) 253 { 254 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES); 255 256 if (ret) 257 return ret; 258 259 if (strlen(str) < sizeof(efivar_ssdt)) 260 memcpy(efivar_ssdt, str, strlen(str)); 261 else 262 pr_warn("efivar_ssdt: name too long: %s\n", str); 263 return 1; 264 } 265 __setup("efivar_ssdt=", efivar_ssdt_setup); 266 267 static __init int efivar_ssdt_load(void) 268 { 269 unsigned long name_size = 256; 270 efi_char16_t *name = NULL; 271 efi_status_t status; 272 efi_guid_t guid; 273 int ret = 0; 274 275 if (!efivar_ssdt[0]) 276 return 0; 277 278 name = kzalloc(name_size, GFP_KERNEL); 279 if (!name) 280 return -ENOMEM; 281 282 for (;;) { 283 char utf8_name[EFIVAR_SSDT_NAME_MAX]; 284 unsigned long data_size = 0; 285 void *data; 286 int limit; 287 288 status = efi.get_next_variable(&name_size, name, &guid); 289 if (status == EFI_NOT_FOUND) { 290 break; 291 } else if (status == EFI_BUFFER_TOO_SMALL) { 292 efi_char16_t *name_tmp = 293 krealloc(name, name_size, GFP_KERNEL); 294 if (!name_tmp) { 295 ret = -ENOMEM; 296 goto out; 297 } 298 name = name_tmp; 299 continue; 300 } 301 302 limit = min(EFIVAR_SSDT_NAME_MAX, name_size); 303 ucs2_as_utf8(utf8_name, name, limit - 1); 304 if (strncmp(utf8_name, efivar_ssdt, limit) != 0) 305 continue; 306 307 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid); 308 309 status = efi.get_variable(name, &guid, NULL, &data_size, NULL); 310 if (status != EFI_BUFFER_TOO_SMALL || !data_size) { 311 ret = -EIO; 312 goto out; 313 } 314 315 data = kmalloc(data_size, GFP_KERNEL); 316 if (!data) { 317 ret = -ENOMEM; 318 goto out; 319 } 320 321 status = efi.get_variable(name, &guid, NULL, &data_size, data); 322 if (status == EFI_SUCCESS) { 323 acpi_status acpi_ret = acpi_load_table(data, NULL); 324 if (ACPI_FAILURE(acpi_ret)) { 325 pr_err("efivar_ssdt: failed to load table: %u\n", 326 acpi_ret); 327 } else { 328 /* 329 * The @data will be in use by ACPI engine, 330 * do not free it! 331 */ 332 continue; 333 } 334 } else { 335 pr_err("efivar_ssdt: failed to get var data: 0x%lx\n", status); 336 } 337 kfree(data); 338 } 339 out: 340 kfree(name); 341 return ret; 342 } 343 #else 344 static inline int efivar_ssdt_load(void) { return 0; } 345 #endif 346 347 #ifdef CONFIG_DEBUG_FS 348 349 #define EFI_DEBUGFS_MAX_BLOBS 32 350 351 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS]; 352 353 static void __init efi_debugfs_init(void) 354 { 355 struct dentry *efi_debugfs; 356 efi_memory_desc_t *md; 357 char name[32]; 358 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {}; 359 int i = 0; 360 361 efi_debugfs = debugfs_create_dir("efi", NULL); 362 if (IS_ERR(efi_debugfs)) 363 return; 364 365 for_each_efi_memory_desc(md) { 366 switch (md->type) { 367 case EFI_BOOT_SERVICES_CODE: 368 snprintf(name, sizeof(name), "boot_services_code%d", 369 type_count[md->type]++); 370 break; 371 case EFI_BOOT_SERVICES_DATA: 372 snprintf(name, sizeof(name), "boot_services_data%d", 373 type_count[md->type]++); 374 break; 375 default: 376 continue; 377 } 378 379 if (i >= EFI_DEBUGFS_MAX_BLOBS) { 380 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n", 381 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS); 382 break; 383 } 384 385 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT; 386 debugfs_blob[i].data = memremap(md->phys_addr, 387 debugfs_blob[i].size, 388 MEMREMAP_WB); 389 if (!debugfs_blob[i].data) 390 continue; 391 392 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]); 393 i++; 394 } 395 } 396 #else 397 static inline void efi_debugfs_init(void) {} 398 #endif 399 400 /* 401 * We register the efi subsystem with the firmware subsystem and the 402 * efivars subsystem with the efi subsystem, if the system was booted with 403 * EFI. 404 */ 405 static int __init efisubsys_init(void) 406 { 407 int error; 408 409 if (!efi_enabled(EFI_RUNTIME_SERVICES)) 410 efi.runtime_supported_mask = 0; 411 412 if (!efi_enabled(EFI_BOOT)) 413 return 0; 414 415 if (efi.runtime_supported_mask) { 416 /* 417 * Since we process only one efi_runtime_service() at a time, an 418 * ordered workqueue (which creates only one execution context) 419 * should suffice for all our needs. 420 */ 421 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0); 422 if (!efi_rts_wq) { 423 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n"); 424 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 425 efi.runtime_supported_mask = 0; 426 return 0; 427 } 428 } 429 430 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES)) 431 platform_device_register_simple("rtc-efi", 0, NULL, 0); 432 433 /* We register the efi directory at /sys/firmware/efi */ 434 efi_kobj = kobject_create_and_add("efi", firmware_kobj); 435 if (!efi_kobj) { 436 pr_err("efi: Firmware registration failed.\n"); 437 error = -ENOMEM; 438 goto err_destroy_wq; 439 } 440 441 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | 442 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) { 443 error = generic_ops_register(); 444 if (error) 445 goto err_put; 446 error = efivar_ssdt_load(); 447 if (error) 448 pr_err("efi: failed to load SSDT, error %d.\n", error); 449 platform_device_register_simple("efivars", 0, NULL, 0); 450 } 451 452 BLOCKING_INIT_NOTIFIER_HEAD(&efivar_ops_nh); 453 454 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group); 455 if (error) { 456 pr_err("efi: Sysfs attribute export failed with error %d.\n", 457 error); 458 goto err_unregister; 459 } 460 461 /* and the standard mountpoint for efivarfs */ 462 error = sysfs_create_mount_point(efi_kobj, "efivars"); 463 if (error) { 464 pr_err("efivars: Subsystem registration failed.\n"); 465 goto err_remove_group; 466 } 467 468 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS)) 469 efi_debugfs_init(); 470 471 #ifdef CONFIG_EFI_COCO_SECRET 472 if (efi.coco_secret != EFI_INVALID_TABLE_ADDR) 473 platform_device_register_simple("efi_secret", 0, NULL, 0); 474 #endif 475 476 return 0; 477 478 err_remove_group: 479 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group); 480 err_unregister: 481 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | 482 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) 483 generic_ops_unregister(); 484 err_put: 485 kobject_put(efi_kobj); 486 efi_kobj = NULL; 487 err_destroy_wq: 488 if (efi_rts_wq) 489 destroy_workqueue(efi_rts_wq); 490 491 return error; 492 } 493 494 subsys_initcall(efisubsys_init); 495 496 void __init efi_find_mirror(void) 497 { 498 efi_memory_desc_t *md; 499 u64 mirror_size = 0, total_size = 0; 500 501 if (!efi_enabled(EFI_MEMMAP)) 502 return; 503 504 for_each_efi_memory_desc(md) { 505 unsigned long long start = md->phys_addr; 506 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; 507 508 total_size += size; 509 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) { 510 memblock_mark_mirror(start, size); 511 mirror_size += size; 512 } 513 } 514 if (mirror_size) 515 pr_info("Memory: %lldM/%lldM mirrored memory\n", 516 mirror_size>>20, total_size>>20); 517 } 518 519 /* 520 * Find the efi memory descriptor for a given physical address. Given a 521 * physical address, determine if it exists within an EFI Memory Map entry, 522 * and if so, populate the supplied memory descriptor with the appropriate 523 * data. 524 */ 525 int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) 526 { 527 efi_memory_desc_t *md; 528 529 if (!efi_enabled(EFI_MEMMAP)) { 530 pr_err_once("EFI_MEMMAP is not enabled.\n"); 531 return -EINVAL; 532 } 533 534 if (!out_md) { 535 pr_err_once("out_md is null.\n"); 536 return -EINVAL; 537 } 538 539 for_each_efi_memory_desc(md) { 540 u64 size; 541 u64 end; 542 543 /* skip bogus entries (including empty ones) */ 544 if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) || 545 (md->num_pages <= 0) || 546 (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT)) 547 continue; 548 549 size = md->num_pages << EFI_PAGE_SHIFT; 550 end = md->phys_addr + size; 551 if (phys_addr >= md->phys_addr && phys_addr < end) { 552 memcpy(out_md, md, sizeof(*out_md)); 553 return 0; 554 } 555 } 556 return -ENOENT; 557 } 558 559 extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) 560 __weak __alias(__efi_mem_desc_lookup); 561 EXPORT_SYMBOL_GPL(efi_mem_desc_lookup); 562 563 /* 564 * Calculate the highest address of an efi memory descriptor. 565 */ 566 u64 __init efi_mem_desc_end(efi_memory_desc_t *md) 567 { 568 u64 size = md->num_pages << EFI_PAGE_SHIFT; 569 u64 end = md->phys_addr + size; 570 return end; 571 } 572 573 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {} 574 575 /** 576 * efi_mem_reserve - Reserve an EFI memory region 577 * @addr: Physical address to reserve 578 * @size: Size of reservation 579 * 580 * Mark a region as reserved from general kernel allocation and 581 * prevent it being released by efi_free_boot_services(). 582 * 583 * This function should be called drivers once they've parsed EFI 584 * configuration tables to figure out where their data lives, e.g. 585 * efi_esrt_init(). 586 */ 587 void __init efi_mem_reserve(phys_addr_t addr, u64 size) 588 { 589 /* efi_mem_reserve() does not work under Xen */ 590 if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT))) 591 return; 592 593 if (!memblock_is_region_reserved(addr, size)) 594 memblock_reserve(addr, size); 595 596 /* 597 * Some architectures (x86) reserve all boot services ranges 598 * until efi_free_boot_services() because of buggy firmware 599 * implementations. This means the above memblock_reserve() is 600 * superfluous on x86 and instead what it needs to do is 601 * ensure the @start, @size is not freed. 602 */ 603 efi_arch_mem_reserve(addr, size); 604 } 605 606 static const efi_config_table_type_t common_tables[] __initconst = { 607 {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" }, 608 {ACPI_TABLE_GUID, &efi.acpi, "ACPI" }, 609 {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" }, 610 {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" }, 611 {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" }, 612 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" }, 613 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" }, 614 {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" }, 615 {EFI_TCG2_FINAL_EVENTS_TABLE_GUID, &efi.tpm_final_log, "TPMFinalLog" }, 616 {EFI_CC_FINAL_EVENTS_TABLE_GUID, &efi.tpm_final_log, "CCFinalLog" }, 617 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" }, 618 {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" }, 619 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" }, 620 #ifdef CONFIG_EFI_RCI2_TABLE 621 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys }, 622 #endif 623 #ifdef CONFIG_LOAD_UEFI_KEYS 624 {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" }, 625 #endif 626 #ifdef CONFIG_EFI_COCO_SECRET 627 {LINUX_EFI_COCO_SECRET_AREA_GUID, &efi.coco_secret, "CocoSecret" }, 628 #endif 629 #ifdef CONFIG_UNACCEPTED_MEMORY 630 {LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID, &efi.unaccepted, "Unaccepted" }, 631 #endif 632 #ifdef CONFIG_EFI_GENERIC_STUB 633 {LINUX_EFI_SCREEN_INFO_TABLE_GUID, &screen_info_table }, 634 #endif 635 {}, 636 }; 637 638 static __init int match_config_table(const efi_guid_t *guid, 639 unsigned long table, 640 const efi_config_table_type_t *table_types) 641 { 642 int i; 643 644 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) { 645 if (efi_guidcmp(*guid, table_types[i].guid)) 646 continue; 647 648 if (!efi_config_table_is_usable(guid, table)) { 649 if (table_types[i].name[0]) 650 pr_cont("(%s=0x%lx unusable) ", 651 table_types[i].name, table); 652 return 1; 653 } 654 655 *(table_types[i].ptr) = table; 656 if (table_types[i].name[0]) 657 pr_cont("%s=0x%lx ", table_types[i].name, table); 658 return 1; 659 } 660 661 return 0; 662 } 663 664 /** 665 * reserve_unaccepted - Map and reserve unaccepted configuration table 666 * @unaccepted: Pointer to unaccepted memory table 667 * 668 * memblock_add() makes sure that the table is mapped in direct mapping. During 669 * normal boot it happens automatically because the table is allocated from 670 * usable memory. But during crashkernel boot only memory specifically reserved 671 * for crash scenario is mapped. memblock_add() forces the table to be mapped 672 * in crashkernel case. 673 * 674 * Align the range to the nearest page borders. Ranges smaller than page size 675 * are not going to be mapped. 676 * 677 * memblock_reserve() makes sure that future allocations will not touch the 678 * table. 679 */ 680 681 static __init void reserve_unaccepted(struct efi_unaccepted_memory *unaccepted) 682 { 683 phys_addr_t start, size; 684 685 start = PAGE_ALIGN_DOWN(efi.unaccepted); 686 size = PAGE_ALIGN(sizeof(*unaccepted) + unaccepted->size); 687 688 memblock_add(start, size); 689 memblock_reserve(start, size); 690 } 691 692 int __init efi_config_parse_tables(const efi_config_table_t *config_tables, 693 int count, 694 const efi_config_table_type_t *arch_tables) 695 { 696 const efi_config_table_64_t *tbl64 = (void *)config_tables; 697 const efi_config_table_32_t *tbl32 = (void *)config_tables; 698 const efi_guid_t *guid; 699 unsigned long table; 700 int i; 701 702 pr_info(""); 703 for (i = 0; i < count; i++) { 704 if (!IS_ENABLED(CONFIG_X86)) { 705 guid = &config_tables[i].guid; 706 table = (unsigned long)config_tables[i].table; 707 } else if (efi_enabled(EFI_64BIT)) { 708 guid = &tbl64[i].guid; 709 table = tbl64[i].table; 710 711 if (IS_ENABLED(CONFIG_X86_32) && 712 tbl64[i].table > U32_MAX) { 713 pr_cont("\n"); 714 pr_err("Table located above 4GB, disabling EFI.\n"); 715 return -EINVAL; 716 } 717 } else { 718 guid = &tbl32[i].guid; 719 table = tbl32[i].table; 720 } 721 722 if (!match_config_table(guid, table, common_tables) && arch_tables) 723 match_config_table(guid, table, arch_tables); 724 } 725 pr_cont("\n"); 726 set_bit(EFI_CONFIG_TABLES, &efi.flags); 727 728 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) { 729 struct linux_efi_random_seed *seed; 730 u32 size = 0; 731 732 seed = early_memremap(efi_rng_seed, sizeof(*seed)); 733 if (seed != NULL) { 734 size = min_t(u32, seed->size, SZ_1K); // sanity check 735 early_memunmap(seed, sizeof(*seed)); 736 } else { 737 pr_err("Could not map UEFI random seed!\n"); 738 } 739 if (size > 0) { 740 seed = early_memremap(efi_rng_seed, 741 sizeof(*seed) + size); 742 if (seed != NULL) { 743 add_bootloader_randomness(seed->bits, size); 744 memzero_explicit(seed->bits, size); 745 early_memunmap(seed, sizeof(*seed) + size); 746 } else { 747 pr_err("Could not map UEFI random seed!\n"); 748 } 749 } 750 } 751 752 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP)) 753 efi_memattr_init(); 754 755 efi_tpm_eventlog_init(); 756 757 if (mem_reserve != EFI_INVALID_TABLE_ADDR) { 758 unsigned long prsv = mem_reserve; 759 760 while (prsv) { 761 struct linux_efi_memreserve *rsv; 762 u8 *p; 763 764 /* 765 * Just map a full page: that is what we will get 766 * anyway, and it permits us to map the entire entry 767 * before knowing its size. 768 */ 769 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE), 770 PAGE_SIZE); 771 if (p == NULL) { 772 pr_err("Could not map UEFI memreserve entry!\n"); 773 return -ENOMEM; 774 } 775 776 rsv = (void *)(p + prsv % PAGE_SIZE); 777 778 /* reserve the entry itself */ 779 memblock_reserve(prsv, 780 struct_size(rsv, entry, rsv->size)); 781 782 for (i = 0; i < atomic_read(&rsv->count); i++) { 783 memblock_reserve(rsv->entry[i].base, 784 rsv->entry[i].size); 785 } 786 787 prsv = rsv->next; 788 early_memunmap(p, PAGE_SIZE); 789 } 790 } 791 792 if (rt_prop != EFI_INVALID_TABLE_ADDR) { 793 efi_rt_properties_table_t *tbl; 794 795 tbl = early_memremap(rt_prop, sizeof(*tbl)); 796 if (tbl) { 797 efi.runtime_supported_mask &= tbl->runtime_services_supported; 798 early_memunmap(tbl, sizeof(*tbl)); 799 } 800 } 801 802 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && 803 initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) { 804 struct linux_efi_initrd *tbl; 805 806 tbl = early_memremap(initrd, sizeof(*tbl)); 807 if (tbl) { 808 phys_initrd_start = tbl->base; 809 phys_initrd_size = tbl->size; 810 early_memunmap(tbl, sizeof(*tbl)); 811 } 812 } 813 814 if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) && 815 efi.unaccepted != EFI_INVALID_TABLE_ADDR) { 816 struct efi_unaccepted_memory *unaccepted; 817 818 unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted)); 819 if (unaccepted) { 820 821 if (unaccepted->version == 1) { 822 reserve_unaccepted(unaccepted); 823 } else { 824 efi.unaccepted = EFI_INVALID_TABLE_ADDR; 825 } 826 827 early_memunmap(unaccepted, sizeof(*unaccepted)); 828 } 829 } 830 831 return 0; 832 } 833 834 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr) 835 { 836 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) { 837 pr_err("System table signature incorrect!\n"); 838 return -EINVAL; 839 } 840 841 return 0; 842 } 843 844 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor, 845 size_t size) 846 { 847 const efi_char16_t *ret; 848 849 ret = early_memremap_ro(fw_vendor, size); 850 if (!ret) 851 pr_err("Could not map the firmware vendor!\n"); 852 return ret; 853 } 854 855 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size) 856 { 857 early_memunmap((void *)fw_vendor, size); 858 } 859 860 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr, 861 unsigned long fw_vendor) 862 { 863 char vendor[100] = "unknown"; 864 const efi_char16_t *c16; 865 size_t i; 866 u16 rev; 867 868 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t)); 869 if (c16) { 870 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i) 871 vendor[i] = c16[i]; 872 vendor[i] = '\0'; 873 874 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t)); 875 } 876 877 rev = (u16)systab_hdr->revision; 878 pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10); 879 880 rev %= 10; 881 if (rev) 882 pr_cont(".%u", rev); 883 884 pr_cont(" by %s\n", vendor); 885 886 if (IS_ENABLED(CONFIG_X86_64) && 887 systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION && 888 !strcmp(vendor, "Apple")) { 889 pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n"); 890 efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION; 891 } 892 } 893 894 static __initdata char memory_type_name[][13] = { 895 "Reserved", 896 "Loader Code", 897 "Loader Data", 898 "Boot Code", 899 "Boot Data", 900 "Runtime Code", 901 "Runtime Data", 902 "Conventional", 903 "Unusable", 904 "ACPI Reclaim", 905 "ACPI Mem NVS", 906 "MMIO", 907 "MMIO Port", 908 "PAL Code", 909 "Persistent", 910 "Unaccepted", 911 }; 912 913 char * __init efi_md_typeattr_format(char *buf, size_t size, 914 const efi_memory_desc_t *md) 915 { 916 char *pos; 917 int type_len; 918 u64 attr; 919 920 pos = buf; 921 if (md->type >= ARRAY_SIZE(memory_type_name)) 922 type_len = snprintf(pos, size, "[type=%u", md->type); 923 else 924 type_len = snprintf(pos, size, "[%-*s", 925 (int)(sizeof(memory_type_name[0]) - 1), 926 memory_type_name[md->type]); 927 if (type_len >= size) 928 return buf; 929 930 pos += type_len; 931 size -= type_len; 932 933 attr = md->attribute; 934 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT | 935 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO | 936 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP | 937 EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO | 938 EFI_MEMORY_MORE_RELIABLE | EFI_MEMORY_HOT_PLUGGABLE | 939 EFI_MEMORY_RUNTIME)) 940 snprintf(pos, size, "|attr=0x%016llx]", 941 (unsigned long long)attr); 942 else 943 snprintf(pos, size, 944 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]", 945 attr & EFI_MEMORY_RUNTIME ? "RUN" : "", 946 attr & EFI_MEMORY_HOT_PLUGGABLE ? "HP" : "", 947 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "", 948 attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "", 949 attr & EFI_MEMORY_SP ? "SP" : "", 950 attr & EFI_MEMORY_NV ? "NV" : "", 951 attr & EFI_MEMORY_XP ? "XP" : "", 952 attr & EFI_MEMORY_RP ? "RP" : "", 953 attr & EFI_MEMORY_WP ? "WP" : "", 954 attr & EFI_MEMORY_RO ? "RO" : "", 955 attr & EFI_MEMORY_UCE ? "UCE" : "", 956 attr & EFI_MEMORY_WB ? "WB" : "", 957 attr & EFI_MEMORY_WT ? "WT" : "", 958 attr & EFI_MEMORY_WC ? "WC" : "", 959 attr & EFI_MEMORY_UC ? "UC" : ""); 960 return buf; 961 } 962 963 /* 964 * efi_mem_attributes - lookup memmap attributes for physical address 965 * @phys_addr: the physical address to lookup 966 * 967 * Search in the EFI memory map for the region covering 968 * @phys_addr. Returns the EFI memory attributes if the region 969 * was found in the memory map, 0 otherwise. 970 */ 971 u64 efi_mem_attributes(unsigned long phys_addr) 972 { 973 efi_memory_desc_t *md; 974 975 if (!efi_enabled(EFI_MEMMAP)) 976 return 0; 977 978 for_each_efi_memory_desc(md) { 979 if ((md->phys_addr <= phys_addr) && 980 (phys_addr < (md->phys_addr + 981 (md->num_pages << EFI_PAGE_SHIFT)))) 982 return md->attribute; 983 } 984 return 0; 985 } 986 987 /* 988 * efi_mem_type - lookup memmap type for physical address 989 * @phys_addr: the physical address to lookup 990 * 991 * Search in the EFI memory map for the region covering @phys_addr. 992 * Returns the EFI memory type if the region was found in the memory 993 * map, -EINVAL otherwise. 994 */ 995 int efi_mem_type(unsigned long phys_addr) 996 { 997 const efi_memory_desc_t *md; 998 999 if (!efi_enabled(EFI_MEMMAP)) 1000 return -ENOTSUPP; 1001 1002 for_each_efi_memory_desc(md) { 1003 if ((md->phys_addr <= phys_addr) && 1004 (phys_addr < (md->phys_addr + 1005 (md->num_pages << EFI_PAGE_SHIFT)))) 1006 return md->type; 1007 } 1008 return -EINVAL; 1009 } 1010 1011 int efi_status_to_err(efi_status_t status) 1012 { 1013 int err; 1014 1015 switch (status) { 1016 case EFI_SUCCESS: 1017 err = 0; 1018 break; 1019 case EFI_INVALID_PARAMETER: 1020 err = -EINVAL; 1021 break; 1022 case EFI_OUT_OF_RESOURCES: 1023 err = -ENOSPC; 1024 break; 1025 case EFI_DEVICE_ERROR: 1026 err = -EIO; 1027 break; 1028 case EFI_WRITE_PROTECTED: 1029 err = -EROFS; 1030 break; 1031 case EFI_SECURITY_VIOLATION: 1032 err = -EACCES; 1033 break; 1034 case EFI_NOT_FOUND: 1035 err = -ENOENT; 1036 break; 1037 case EFI_ABORTED: 1038 err = -EINTR; 1039 break; 1040 default: 1041 err = -EINVAL; 1042 } 1043 1044 return err; 1045 } 1046 EXPORT_SYMBOL_GPL(efi_status_to_err); 1047 1048 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); 1049 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; 1050 1051 static int __init efi_memreserve_map_root(void) 1052 { 1053 if (mem_reserve == EFI_INVALID_TABLE_ADDR) 1054 return -ENODEV; 1055 1056 efi_memreserve_root = memremap(mem_reserve, 1057 sizeof(*efi_memreserve_root), 1058 MEMREMAP_WB); 1059 if (WARN_ON_ONCE(!efi_memreserve_root)) 1060 return -ENOMEM; 1061 return 0; 1062 } 1063 1064 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) 1065 { 1066 struct resource *res, *parent; 1067 int ret; 1068 1069 res = kzalloc(sizeof(struct resource), GFP_ATOMIC); 1070 if (!res) 1071 return -ENOMEM; 1072 1073 res->name = "reserved"; 1074 res->flags = IORESOURCE_MEM; 1075 res->start = addr; 1076 res->end = addr + size - 1; 1077 1078 /* we expect a conflict with a 'System RAM' region */ 1079 parent = request_resource_conflict(&iomem_resource, res); 1080 ret = parent ? request_resource(parent, res) : 0; 1081 1082 /* 1083 * Given that efi_mem_reserve_iomem() can be called at any 1084 * time, only call memblock_reserve() if the architecture 1085 * keeps the infrastructure around. 1086 */ 1087 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret) 1088 memblock_reserve(addr, size); 1089 1090 return ret; 1091 } 1092 1093 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) 1094 { 1095 struct linux_efi_memreserve *rsv; 1096 unsigned long prsv; 1097 int rc, index; 1098 1099 if (efi_memreserve_root == (void *)ULONG_MAX) 1100 return -ENODEV; 1101 1102 if (!efi_memreserve_root) { 1103 rc = efi_memreserve_map_root(); 1104 if (rc) 1105 return rc; 1106 } 1107 1108 /* first try to find a slot in an existing linked list entry */ 1109 for (prsv = efi_memreserve_root->next; prsv; ) { 1110 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); 1111 if (!rsv) 1112 return -ENOMEM; 1113 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); 1114 if (index < rsv->size) { 1115 rsv->entry[index].base = addr; 1116 rsv->entry[index].size = size; 1117 1118 memunmap(rsv); 1119 return efi_mem_reserve_iomem(addr, size); 1120 } 1121 prsv = rsv->next; 1122 memunmap(rsv); 1123 } 1124 1125 /* no slot found - allocate a new linked list entry */ 1126 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC); 1127 if (!rsv) 1128 return -ENOMEM; 1129 1130 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K); 1131 if (rc) { 1132 free_page((unsigned long)rsv); 1133 return rc; 1134 } 1135 1136 /* 1137 * The memremap() call above assumes that a linux_efi_memreserve entry 1138 * never crosses a page boundary, so let's ensure that this remains true 1139 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by 1140 * using SZ_4K explicitly in the size calculation below. 1141 */ 1142 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K); 1143 atomic_set(&rsv->count, 1); 1144 rsv->entry[0].base = addr; 1145 rsv->entry[0].size = size; 1146 1147 spin_lock(&efi_mem_reserve_persistent_lock); 1148 rsv->next = efi_memreserve_root->next; 1149 efi_memreserve_root->next = __pa(rsv); 1150 spin_unlock(&efi_mem_reserve_persistent_lock); 1151 1152 return efi_mem_reserve_iomem(addr, size); 1153 } 1154 1155 static int __init efi_memreserve_root_init(void) 1156 { 1157 if (efi_memreserve_root) 1158 return 0; 1159 if (efi_memreserve_map_root()) 1160 efi_memreserve_root = (void *)ULONG_MAX; 1161 return 0; 1162 } 1163 early_initcall(efi_memreserve_root_init); 1164 1165 #ifdef CONFIG_KEXEC 1166 static int update_efi_random_seed(struct notifier_block *nb, 1167 unsigned long code, void *unused) 1168 { 1169 struct linux_efi_random_seed *seed; 1170 u32 size = 0; 1171 1172 if (!kexec_in_progress) 1173 return NOTIFY_DONE; 1174 1175 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB); 1176 if (seed != NULL) { 1177 size = min(seed->size, EFI_RANDOM_SEED_SIZE); 1178 memunmap(seed); 1179 } else { 1180 pr_err("Could not map UEFI random seed!\n"); 1181 } 1182 if (size > 0) { 1183 seed = memremap(efi_rng_seed, sizeof(*seed) + size, 1184 MEMREMAP_WB); 1185 if (seed != NULL) { 1186 seed->size = size; 1187 get_random_bytes(seed->bits, seed->size); 1188 memunmap(seed); 1189 } else { 1190 pr_err("Could not map UEFI random seed!\n"); 1191 } 1192 } 1193 return NOTIFY_DONE; 1194 } 1195 1196 static struct notifier_block efi_random_seed_nb = { 1197 .notifier_call = update_efi_random_seed, 1198 }; 1199 1200 static int __init register_update_efi_random_seed(void) 1201 { 1202 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR) 1203 return 0; 1204 return register_reboot_notifier(&efi_random_seed_nb); 1205 } 1206 late_initcall(register_update_efi_random_seed); 1207 #endif 1208