1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * efi.c - EFI subsystem 4 * 5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com> 6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com> 7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no> 8 * 9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported, 10 * allowing the efivarfs to be mounted or the efivars module to be loaded. 11 * The existance of /sys/firmware/efi may also be used by userspace to 12 * determine that the system supports EFI. 13 */ 14 15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17 #include <linux/kobject.h> 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/debugfs.h> 21 #include <linux/device.h> 22 #include <linux/efi.h> 23 #include <linux/of.h> 24 #include <linux/initrd.h> 25 #include <linux/io.h> 26 #include <linux/kexec.h> 27 #include <linux/platform_device.h> 28 #include <linux/random.h> 29 #include <linux/reboot.h> 30 #include <linux/slab.h> 31 #include <linux/acpi.h> 32 #include <linux/ucs2_string.h> 33 #include <linux/memblock.h> 34 #include <linux/security.h> 35 #include <linux/notifier.h> 36 37 #include <asm/early_ioremap.h> 38 39 struct efi __read_mostly efi = { 40 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL, 41 .acpi = EFI_INVALID_TABLE_ADDR, 42 .acpi20 = EFI_INVALID_TABLE_ADDR, 43 .smbios = EFI_INVALID_TABLE_ADDR, 44 .smbios3 = EFI_INVALID_TABLE_ADDR, 45 .esrt = EFI_INVALID_TABLE_ADDR, 46 .tpm_log = EFI_INVALID_TABLE_ADDR, 47 .tpm_final_log = EFI_INVALID_TABLE_ADDR, 48 #ifdef CONFIG_LOAD_UEFI_KEYS 49 .mokvar_table = EFI_INVALID_TABLE_ADDR, 50 #endif 51 #ifdef CONFIG_EFI_COCO_SECRET 52 .coco_secret = EFI_INVALID_TABLE_ADDR, 53 #endif 54 #ifdef CONFIG_UNACCEPTED_MEMORY 55 .unaccepted = EFI_INVALID_TABLE_ADDR, 56 #endif 57 }; 58 EXPORT_SYMBOL(efi); 59 60 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR; 61 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR; 62 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR; 63 static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR; 64 65 extern unsigned long screen_info_table; 66 67 struct mm_struct efi_mm = { 68 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock), 69 .mm_users = ATOMIC_INIT(2), 70 .mm_count = ATOMIC_INIT(1), 71 .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq), 72 MMAP_LOCK_INITIALIZER(efi_mm) 73 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), 74 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), 75 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0}, 76 }; 77 78 struct workqueue_struct *efi_rts_wq; 79 80 static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME); 81 static int __init setup_noefi(char *arg) 82 { 83 disable_runtime = true; 84 return 0; 85 } 86 early_param("noefi", setup_noefi); 87 88 bool efi_runtime_disabled(void) 89 { 90 return disable_runtime; 91 } 92 93 bool __pure __efi_soft_reserve_enabled(void) 94 { 95 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE); 96 } 97 98 static int __init parse_efi_cmdline(char *str) 99 { 100 if (!str) { 101 pr_warn("need at least one option\n"); 102 return -EINVAL; 103 } 104 105 if (parse_option_str(str, "debug")) 106 set_bit(EFI_DBG, &efi.flags); 107 108 if (parse_option_str(str, "noruntime")) 109 disable_runtime = true; 110 111 if (parse_option_str(str, "runtime")) 112 disable_runtime = false; 113 114 if (parse_option_str(str, "nosoftreserve")) 115 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags); 116 117 return 0; 118 } 119 early_param("efi", parse_efi_cmdline); 120 121 struct kobject *efi_kobj; 122 123 /* 124 * Let's not leave out systab information that snuck into 125 * the efivars driver 126 * Note, do not add more fields in systab sysfs file as it breaks sysfs 127 * one value per file rule! 128 */ 129 static ssize_t systab_show(struct kobject *kobj, 130 struct kobj_attribute *attr, char *buf) 131 { 132 char *str = buf; 133 134 if (!kobj || !buf) 135 return -EINVAL; 136 137 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 138 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20); 139 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 140 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); 141 /* 142 * If both SMBIOS and SMBIOS3 entry points are implemented, the 143 * SMBIOS3 entry point shall be preferred, so we list it first to 144 * let applications stop parsing after the first match. 145 */ 146 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) 147 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3); 148 if (efi.smbios != EFI_INVALID_TABLE_ADDR) 149 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); 150 151 return str - buf; 152 } 153 154 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400); 155 156 static ssize_t fw_platform_size_show(struct kobject *kobj, 157 struct kobj_attribute *attr, char *buf) 158 { 159 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32); 160 } 161 162 extern __weak struct kobj_attribute efi_attr_fw_vendor; 163 extern __weak struct kobj_attribute efi_attr_runtime; 164 extern __weak struct kobj_attribute efi_attr_config_table; 165 static struct kobj_attribute efi_attr_fw_platform_size = 166 __ATTR_RO(fw_platform_size); 167 168 static struct attribute *efi_subsys_attrs[] = { 169 &efi_attr_systab.attr, 170 &efi_attr_fw_platform_size.attr, 171 &efi_attr_fw_vendor.attr, 172 &efi_attr_runtime.attr, 173 &efi_attr_config_table.attr, 174 NULL, 175 }; 176 177 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, 178 int n) 179 { 180 return attr->mode; 181 } 182 183 static const struct attribute_group efi_subsys_attr_group = { 184 .attrs = efi_subsys_attrs, 185 .is_visible = efi_attr_is_visible, 186 }; 187 188 struct blocking_notifier_head efivar_ops_nh; 189 EXPORT_SYMBOL_GPL(efivar_ops_nh); 190 191 static struct efivars generic_efivars; 192 static struct efivar_operations generic_ops; 193 194 static bool generic_ops_supported(void) 195 { 196 unsigned long name_size; 197 efi_status_t status; 198 efi_char16_t name; 199 efi_guid_t guid; 200 201 name_size = sizeof(name); 202 203 if (!efi.get_next_variable) 204 return false; 205 status = efi.get_next_variable(&name_size, &name, &guid); 206 if (status == EFI_UNSUPPORTED) 207 return false; 208 209 return true; 210 } 211 212 static int generic_ops_register(void) 213 { 214 if (!generic_ops_supported()) 215 return 0; 216 217 generic_ops.get_variable = efi.get_variable; 218 generic_ops.get_next_variable = efi.get_next_variable; 219 generic_ops.query_variable_store = efi_query_variable_store; 220 generic_ops.query_variable_info = efi.query_variable_info; 221 222 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) { 223 generic_ops.set_variable = efi.set_variable; 224 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking; 225 } 226 return efivars_register(&generic_efivars, &generic_ops); 227 } 228 229 static void generic_ops_unregister(void) 230 { 231 if (!generic_ops.get_variable) 232 return; 233 234 efivars_unregister(&generic_efivars); 235 } 236 237 void efivars_generic_ops_register(void) 238 { 239 generic_ops_register(); 240 } 241 EXPORT_SYMBOL_GPL(efivars_generic_ops_register); 242 243 void efivars_generic_ops_unregister(void) 244 { 245 generic_ops_unregister(); 246 } 247 EXPORT_SYMBOL_GPL(efivars_generic_ops_unregister); 248 249 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS 250 #define EFIVAR_SSDT_NAME_MAX 16UL 251 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; 252 static int __init efivar_ssdt_setup(char *str) 253 { 254 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES); 255 256 if (ret) 257 return ret; 258 259 if (strlen(str) < sizeof(efivar_ssdt)) 260 memcpy(efivar_ssdt, str, strlen(str)); 261 else 262 pr_warn("efivar_ssdt: name too long: %s\n", str); 263 return 1; 264 } 265 __setup("efivar_ssdt=", efivar_ssdt_setup); 266 267 static __init int efivar_ssdt_load(void) 268 { 269 unsigned long name_size = 256; 270 efi_char16_t *name = NULL; 271 efi_status_t status; 272 efi_guid_t guid; 273 int ret = 0; 274 275 if (!efivar_ssdt[0]) 276 return 0; 277 278 name = kzalloc(name_size, GFP_KERNEL); 279 if (!name) 280 return -ENOMEM; 281 282 for (;;) { 283 char utf8_name[EFIVAR_SSDT_NAME_MAX]; 284 unsigned long data_size = 0; 285 void *data; 286 int limit; 287 288 status = efi.get_next_variable(&name_size, name, &guid); 289 if (status == EFI_NOT_FOUND) { 290 break; 291 } else if (status == EFI_BUFFER_TOO_SMALL) { 292 efi_char16_t *name_tmp = 293 krealloc(name, name_size, GFP_KERNEL); 294 if (!name_tmp) { 295 ret = -ENOMEM; 296 goto out; 297 } 298 name = name_tmp; 299 continue; 300 } 301 302 limit = min(EFIVAR_SSDT_NAME_MAX, name_size); 303 ucs2_as_utf8(utf8_name, name, limit - 1); 304 if (strncmp(utf8_name, efivar_ssdt, limit) != 0) 305 continue; 306 307 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid); 308 309 status = efi.get_variable(name, &guid, NULL, &data_size, NULL); 310 if (status != EFI_BUFFER_TOO_SMALL || !data_size) { 311 ret = -EIO; 312 goto out; 313 } 314 315 data = kmalloc(data_size, GFP_KERNEL); 316 if (!data) { 317 ret = -ENOMEM; 318 goto out; 319 } 320 321 status = efi.get_variable(name, &guid, NULL, &data_size, data); 322 if (status == EFI_SUCCESS) { 323 acpi_status acpi_ret = acpi_load_table(data, NULL); 324 if (ACPI_FAILURE(acpi_ret)) { 325 pr_err("efivar_ssdt: failed to load table: %u\n", 326 acpi_ret); 327 } else { 328 /* 329 * The @data will be in use by ACPI engine, 330 * do not free it! 331 */ 332 continue; 333 } 334 } else { 335 pr_err("efivar_ssdt: failed to get var data: 0x%lx\n", status); 336 } 337 kfree(data); 338 } 339 out: 340 kfree(name); 341 return ret; 342 } 343 #else 344 static inline int efivar_ssdt_load(void) { return 0; } 345 #endif 346 347 #ifdef CONFIG_DEBUG_FS 348 349 #define EFI_DEBUGFS_MAX_BLOBS 32 350 351 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS]; 352 353 static void __init efi_debugfs_init(void) 354 { 355 struct dentry *efi_debugfs; 356 efi_memory_desc_t *md; 357 char name[32]; 358 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {}; 359 int i = 0; 360 361 efi_debugfs = debugfs_create_dir("efi", NULL); 362 if (IS_ERR(efi_debugfs)) 363 return; 364 365 for_each_efi_memory_desc(md) { 366 switch (md->type) { 367 case EFI_BOOT_SERVICES_CODE: 368 snprintf(name, sizeof(name), "boot_services_code%d", 369 type_count[md->type]++); 370 break; 371 case EFI_BOOT_SERVICES_DATA: 372 snprintf(name, sizeof(name), "boot_services_data%d", 373 type_count[md->type]++); 374 break; 375 default: 376 continue; 377 } 378 379 if (i >= EFI_DEBUGFS_MAX_BLOBS) { 380 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n", 381 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS); 382 break; 383 } 384 385 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT; 386 debugfs_blob[i].data = memremap(md->phys_addr, 387 debugfs_blob[i].size, 388 MEMREMAP_WB); 389 if (!debugfs_blob[i].data) 390 continue; 391 392 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]); 393 i++; 394 } 395 } 396 #else 397 static inline void efi_debugfs_init(void) {} 398 #endif 399 400 /* 401 * We register the efi subsystem with the firmware subsystem and the 402 * efivars subsystem with the efi subsystem, if the system was booted with 403 * EFI. 404 */ 405 static int __init efisubsys_init(void) 406 { 407 int error; 408 409 if (!efi_enabled(EFI_RUNTIME_SERVICES)) 410 efi.runtime_supported_mask = 0; 411 412 if (!efi_enabled(EFI_BOOT)) 413 return 0; 414 415 if (efi.runtime_supported_mask) { 416 /* 417 * Since we process only one efi_runtime_service() at a time, an 418 * ordered workqueue (which creates only one execution context) 419 * should suffice for all our needs. 420 */ 421 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0); 422 if (!efi_rts_wq) { 423 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n"); 424 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 425 efi.runtime_supported_mask = 0; 426 return 0; 427 } 428 } 429 430 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES)) 431 platform_device_register_simple("rtc-efi", 0, NULL, 0); 432 433 /* We register the efi directory at /sys/firmware/efi */ 434 efi_kobj = kobject_create_and_add("efi", firmware_kobj); 435 if (!efi_kobj) { 436 pr_err("efi: Firmware registration failed.\n"); 437 error = -ENOMEM; 438 goto err_destroy_wq; 439 } 440 441 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | 442 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) { 443 error = generic_ops_register(); 444 if (error) 445 goto err_put; 446 error = efivar_ssdt_load(); 447 if (error) 448 pr_err("efi: failed to load SSDT, error %d.\n", error); 449 platform_device_register_simple("efivars", 0, NULL, 0); 450 } 451 452 BLOCKING_INIT_NOTIFIER_HEAD(&efivar_ops_nh); 453 454 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group); 455 if (error) { 456 pr_err("efi: Sysfs attribute export failed with error %d.\n", 457 error); 458 goto err_unregister; 459 } 460 461 /* and the standard mountpoint for efivarfs */ 462 error = sysfs_create_mount_point(efi_kobj, "efivars"); 463 if (error) { 464 pr_err("efivars: Subsystem registration failed.\n"); 465 goto err_remove_group; 466 } 467 468 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS)) 469 efi_debugfs_init(); 470 471 #ifdef CONFIG_EFI_COCO_SECRET 472 if (efi.coco_secret != EFI_INVALID_TABLE_ADDR) 473 platform_device_register_simple("efi_secret", 0, NULL, 0); 474 #endif 475 476 return 0; 477 478 err_remove_group: 479 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group); 480 err_unregister: 481 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | 482 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) 483 generic_ops_unregister(); 484 err_put: 485 kobject_put(efi_kobj); 486 efi_kobj = NULL; 487 err_destroy_wq: 488 if (efi_rts_wq) 489 destroy_workqueue(efi_rts_wq); 490 491 return error; 492 } 493 494 subsys_initcall(efisubsys_init); 495 496 void __init efi_find_mirror(void) 497 { 498 efi_memory_desc_t *md; 499 u64 mirror_size = 0, total_size = 0; 500 501 if (!efi_enabled(EFI_MEMMAP)) 502 return; 503 504 for_each_efi_memory_desc(md) { 505 unsigned long long start = md->phys_addr; 506 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; 507 508 total_size += size; 509 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) { 510 memblock_mark_mirror(start, size); 511 mirror_size += size; 512 } 513 } 514 if (mirror_size) 515 pr_info("Memory: %lldM/%lldM mirrored memory\n", 516 mirror_size>>20, total_size>>20); 517 } 518 519 /* 520 * Find the efi memory descriptor for a given physical address. Given a 521 * physical address, determine if it exists within an EFI Memory Map entry, 522 * and if so, populate the supplied memory descriptor with the appropriate 523 * data. 524 */ 525 int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) 526 { 527 efi_memory_desc_t *md; 528 529 if (!efi_enabled(EFI_MEMMAP)) { 530 pr_err_once("EFI_MEMMAP is not enabled.\n"); 531 return -EINVAL; 532 } 533 534 if (!out_md) { 535 pr_err_once("out_md is null.\n"); 536 return -EINVAL; 537 } 538 539 for_each_efi_memory_desc(md) { 540 u64 size; 541 u64 end; 542 543 /* skip bogus entries (including empty ones) */ 544 if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) || 545 (md->num_pages <= 0) || 546 (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT)) 547 continue; 548 549 size = md->num_pages << EFI_PAGE_SHIFT; 550 end = md->phys_addr + size; 551 if (phys_addr >= md->phys_addr && phys_addr < end) { 552 memcpy(out_md, md, sizeof(*out_md)); 553 return 0; 554 } 555 } 556 return -ENOENT; 557 } 558 559 extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) 560 __weak __alias(__efi_mem_desc_lookup); 561 562 /* 563 * Calculate the highest address of an efi memory descriptor. 564 */ 565 u64 __init efi_mem_desc_end(efi_memory_desc_t *md) 566 { 567 u64 size = md->num_pages << EFI_PAGE_SHIFT; 568 u64 end = md->phys_addr + size; 569 return end; 570 } 571 572 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {} 573 574 /** 575 * efi_mem_reserve - Reserve an EFI memory region 576 * @addr: Physical address to reserve 577 * @size: Size of reservation 578 * 579 * Mark a region as reserved from general kernel allocation and 580 * prevent it being released by efi_free_boot_services(). 581 * 582 * This function should be called drivers once they've parsed EFI 583 * configuration tables to figure out where their data lives, e.g. 584 * efi_esrt_init(). 585 */ 586 void __init efi_mem_reserve(phys_addr_t addr, u64 size) 587 { 588 /* efi_mem_reserve() does not work under Xen */ 589 if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT))) 590 return; 591 592 if (!memblock_is_region_reserved(addr, size)) 593 memblock_reserve(addr, size); 594 595 /* 596 * Some architectures (x86) reserve all boot services ranges 597 * until efi_free_boot_services() because of buggy firmware 598 * implementations. This means the above memblock_reserve() is 599 * superfluous on x86 and instead what it needs to do is 600 * ensure the @start, @size is not freed. 601 */ 602 efi_arch_mem_reserve(addr, size); 603 } 604 605 static const efi_config_table_type_t common_tables[] __initconst = { 606 {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" }, 607 {ACPI_TABLE_GUID, &efi.acpi, "ACPI" }, 608 {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" }, 609 {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" }, 610 {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" }, 611 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" }, 612 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" }, 613 {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" }, 614 {EFI_TCG2_FINAL_EVENTS_TABLE_GUID, &efi.tpm_final_log, "TPMFinalLog" }, 615 {EFI_CC_FINAL_EVENTS_TABLE_GUID, &efi.tpm_final_log, "CCFinalLog" }, 616 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" }, 617 {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" }, 618 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" }, 619 #ifdef CONFIG_EFI_RCI2_TABLE 620 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys }, 621 #endif 622 #ifdef CONFIG_LOAD_UEFI_KEYS 623 {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" }, 624 #endif 625 #ifdef CONFIG_EFI_COCO_SECRET 626 {LINUX_EFI_COCO_SECRET_AREA_GUID, &efi.coco_secret, "CocoSecret" }, 627 #endif 628 #ifdef CONFIG_UNACCEPTED_MEMORY 629 {LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID, &efi.unaccepted, "Unaccepted" }, 630 #endif 631 #ifdef CONFIG_EFI_GENERIC_STUB 632 {LINUX_EFI_SCREEN_INFO_TABLE_GUID, &screen_info_table }, 633 #endif 634 {}, 635 }; 636 637 static __init int match_config_table(const efi_guid_t *guid, 638 unsigned long table, 639 const efi_config_table_type_t *table_types) 640 { 641 int i; 642 643 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) { 644 if (efi_guidcmp(*guid, table_types[i].guid)) 645 continue; 646 647 if (!efi_config_table_is_usable(guid, table)) { 648 if (table_types[i].name[0]) 649 pr_cont("(%s=0x%lx unusable) ", 650 table_types[i].name, table); 651 return 1; 652 } 653 654 *(table_types[i].ptr) = table; 655 if (table_types[i].name[0]) 656 pr_cont("%s=0x%lx ", table_types[i].name, table); 657 return 1; 658 } 659 660 return 0; 661 } 662 663 /** 664 * reserve_unaccepted - Map and reserve unaccepted configuration table 665 * @unaccepted: Pointer to unaccepted memory table 666 * 667 * memblock_add() makes sure that the table is mapped in direct mapping. During 668 * normal boot it happens automatically because the table is allocated from 669 * usable memory. But during crashkernel boot only memory specifically reserved 670 * for crash scenario is mapped. memblock_add() forces the table to be mapped 671 * in crashkernel case. 672 * 673 * Align the range to the nearest page borders. Ranges smaller than page size 674 * are not going to be mapped. 675 * 676 * memblock_reserve() makes sure that future allocations will not touch the 677 * table. 678 */ 679 680 static __init void reserve_unaccepted(struct efi_unaccepted_memory *unaccepted) 681 { 682 phys_addr_t start, size; 683 684 start = PAGE_ALIGN_DOWN(efi.unaccepted); 685 size = PAGE_ALIGN(sizeof(*unaccepted) + unaccepted->size); 686 687 memblock_add(start, size); 688 memblock_reserve(start, size); 689 } 690 691 int __init efi_config_parse_tables(const efi_config_table_t *config_tables, 692 int count, 693 const efi_config_table_type_t *arch_tables) 694 { 695 const efi_config_table_64_t *tbl64 = (void *)config_tables; 696 const efi_config_table_32_t *tbl32 = (void *)config_tables; 697 const efi_guid_t *guid; 698 unsigned long table; 699 int i; 700 701 pr_info(""); 702 for (i = 0; i < count; i++) { 703 if (!IS_ENABLED(CONFIG_X86)) { 704 guid = &config_tables[i].guid; 705 table = (unsigned long)config_tables[i].table; 706 } else if (efi_enabled(EFI_64BIT)) { 707 guid = &tbl64[i].guid; 708 table = tbl64[i].table; 709 710 if (IS_ENABLED(CONFIG_X86_32) && 711 tbl64[i].table > U32_MAX) { 712 pr_cont("\n"); 713 pr_err("Table located above 4GB, disabling EFI.\n"); 714 return -EINVAL; 715 } 716 } else { 717 guid = &tbl32[i].guid; 718 table = tbl32[i].table; 719 } 720 721 if (!match_config_table(guid, table, common_tables) && arch_tables) 722 match_config_table(guid, table, arch_tables); 723 } 724 pr_cont("\n"); 725 set_bit(EFI_CONFIG_TABLES, &efi.flags); 726 727 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) { 728 struct linux_efi_random_seed *seed; 729 u32 size = 0; 730 731 seed = early_memremap(efi_rng_seed, sizeof(*seed)); 732 if (seed != NULL) { 733 size = min_t(u32, seed->size, SZ_1K); // sanity check 734 early_memunmap(seed, sizeof(*seed)); 735 } else { 736 pr_err("Could not map UEFI random seed!\n"); 737 } 738 if (size > 0) { 739 seed = early_memremap(efi_rng_seed, 740 sizeof(*seed) + size); 741 if (seed != NULL) { 742 add_bootloader_randomness(seed->bits, size); 743 memzero_explicit(seed->bits, size); 744 early_memunmap(seed, sizeof(*seed) + size); 745 } else { 746 pr_err("Could not map UEFI random seed!\n"); 747 } 748 } 749 } 750 751 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP)) 752 efi_memattr_init(); 753 754 efi_tpm_eventlog_init(); 755 756 if (mem_reserve != EFI_INVALID_TABLE_ADDR) { 757 unsigned long prsv = mem_reserve; 758 759 while (prsv) { 760 struct linux_efi_memreserve *rsv; 761 u8 *p; 762 763 /* 764 * Just map a full page: that is what we will get 765 * anyway, and it permits us to map the entire entry 766 * before knowing its size. 767 */ 768 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE), 769 PAGE_SIZE); 770 if (p == NULL) { 771 pr_err("Could not map UEFI memreserve entry!\n"); 772 return -ENOMEM; 773 } 774 775 rsv = (void *)(p + prsv % PAGE_SIZE); 776 777 /* reserve the entry itself */ 778 memblock_reserve(prsv, 779 struct_size(rsv, entry, rsv->size)); 780 781 for (i = 0; i < atomic_read(&rsv->count); i++) { 782 memblock_reserve(rsv->entry[i].base, 783 rsv->entry[i].size); 784 } 785 786 prsv = rsv->next; 787 early_memunmap(p, PAGE_SIZE); 788 } 789 } 790 791 if (rt_prop != EFI_INVALID_TABLE_ADDR) { 792 efi_rt_properties_table_t *tbl; 793 794 tbl = early_memremap(rt_prop, sizeof(*tbl)); 795 if (tbl) { 796 efi.runtime_supported_mask &= tbl->runtime_services_supported; 797 early_memunmap(tbl, sizeof(*tbl)); 798 } 799 } 800 801 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && 802 initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) { 803 struct linux_efi_initrd *tbl; 804 805 tbl = early_memremap(initrd, sizeof(*tbl)); 806 if (tbl) { 807 phys_initrd_start = tbl->base; 808 phys_initrd_size = tbl->size; 809 early_memunmap(tbl, sizeof(*tbl)); 810 } 811 } 812 813 if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) && 814 efi.unaccepted != EFI_INVALID_TABLE_ADDR) { 815 struct efi_unaccepted_memory *unaccepted; 816 817 unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted)); 818 if (unaccepted) { 819 820 if (unaccepted->version == 1) { 821 reserve_unaccepted(unaccepted); 822 } else { 823 efi.unaccepted = EFI_INVALID_TABLE_ADDR; 824 } 825 826 early_memunmap(unaccepted, sizeof(*unaccepted)); 827 } 828 } 829 830 return 0; 831 } 832 833 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr) 834 { 835 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) { 836 pr_err("System table signature incorrect!\n"); 837 return -EINVAL; 838 } 839 840 return 0; 841 } 842 843 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor, 844 size_t size) 845 { 846 const efi_char16_t *ret; 847 848 ret = early_memremap_ro(fw_vendor, size); 849 if (!ret) 850 pr_err("Could not map the firmware vendor!\n"); 851 return ret; 852 } 853 854 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size) 855 { 856 early_memunmap((void *)fw_vendor, size); 857 } 858 859 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr, 860 unsigned long fw_vendor) 861 { 862 char vendor[100] = "unknown"; 863 const efi_char16_t *c16; 864 size_t i; 865 u16 rev; 866 867 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t)); 868 if (c16) { 869 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i) 870 vendor[i] = c16[i]; 871 vendor[i] = '\0'; 872 873 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t)); 874 } 875 876 rev = (u16)systab_hdr->revision; 877 pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10); 878 879 rev %= 10; 880 if (rev) 881 pr_cont(".%u", rev); 882 883 pr_cont(" by %s\n", vendor); 884 885 if (IS_ENABLED(CONFIG_X86_64) && 886 systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION && 887 !strcmp(vendor, "Apple")) { 888 pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n"); 889 efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION; 890 } 891 } 892 893 static __initdata char memory_type_name[][13] = { 894 "Reserved", 895 "Loader Code", 896 "Loader Data", 897 "Boot Code", 898 "Boot Data", 899 "Runtime Code", 900 "Runtime Data", 901 "Conventional", 902 "Unusable", 903 "ACPI Reclaim", 904 "ACPI Mem NVS", 905 "MMIO", 906 "MMIO Port", 907 "PAL Code", 908 "Persistent", 909 "Unaccepted", 910 }; 911 912 char * __init efi_md_typeattr_format(char *buf, size_t size, 913 const efi_memory_desc_t *md) 914 { 915 char *pos; 916 int type_len; 917 u64 attr; 918 919 pos = buf; 920 if (md->type >= ARRAY_SIZE(memory_type_name)) 921 type_len = snprintf(pos, size, "[type=%u", md->type); 922 else 923 type_len = snprintf(pos, size, "[%-*s", 924 (int)(sizeof(memory_type_name[0]) - 1), 925 memory_type_name[md->type]); 926 if (type_len >= size) 927 return buf; 928 929 pos += type_len; 930 size -= type_len; 931 932 attr = md->attribute; 933 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT | 934 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO | 935 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP | 936 EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO | 937 EFI_MEMORY_MORE_RELIABLE | EFI_MEMORY_HOT_PLUGGABLE | 938 EFI_MEMORY_RUNTIME)) 939 snprintf(pos, size, "|attr=0x%016llx]", 940 (unsigned long long)attr); 941 else 942 snprintf(pos, size, 943 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]", 944 attr & EFI_MEMORY_RUNTIME ? "RUN" : "", 945 attr & EFI_MEMORY_HOT_PLUGGABLE ? "HP" : "", 946 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "", 947 attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "", 948 attr & EFI_MEMORY_SP ? "SP" : "", 949 attr & EFI_MEMORY_NV ? "NV" : "", 950 attr & EFI_MEMORY_XP ? "XP" : "", 951 attr & EFI_MEMORY_RP ? "RP" : "", 952 attr & EFI_MEMORY_WP ? "WP" : "", 953 attr & EFI_MEMORY_RO ? "RO" : "", 954 attr & EFI_MEMORY_UCE ? "UCE" : "", 955 attr & EFI_MEMORY_WB ? "WB" : "", 956 attr & EFI_MEMORY_WT ? "WT" : "", 957 attr & EFI_MEMORY_WC ? "WC" : "", 958 attr & EFI_MEMORY_UC ? "UC" : ""); 959 return buf; 960 } 961 962 /* 963 * efi_mem_attributes - lookup memmap attributes for physical address 964 * @phys_addr: the physical address to lookup 965 * 966 * Search in the EFI memory map for the region covering 967 * @phys_addr. Returns the EFI memory attributes if the region 968 * was found in the memory map, 0 otherwise. 969 */ 970 u64 efi_mem_attributes(unsigned long phys_addr) 971 { 972 efi_memory_desc_t *md; 973 974 if (!efi_enabled(EFI_MEMMAP)) 975 return 0; 976 977 for_each_efi_memory_desc(md) { 978 if ((md->phys_addr <= phys_addr) && 979 (phys_addr < (md->phys_addr + 980 (md->num_pages << EFI_PAGE_SHIFT)))) 981 return md->attribute; 982 } 983 return 0; 984 } 985 986 /* 987 * efi_mem_type - lookup memmap type for physical address 988 * @phys_addr: the physical address to lookup 989 * 990 * Search in the EFI memory map for the region covering @phys_addr. 991 * Returns the EFI memory type if the region was found in the memory 992 * map, -EINVAL otherwise. 993 */ 994 int efi_mem_type(unsigned long phys_addr) 995 { 996 const efi_memory_desc_t *md; 997 998 if (!efi_enabled(EFI_MEMMAP)) 999 return -ENOTSUPP; 1000 1001 for_each_efi_memory_desc(md) { 1002 if ((md->phys_addr <= phys_addr) && 1003 (phys_addr < (md->phys_addr + 1004 (md->num_pages << EFI_PAGE_SHIFT)))) 1005 return md->type; 1006 } 1007 return -EINVAL; 1008 } 1009 1010 int efi_status_to_err(efi_status_t status) 1011 { 1012 int err; 1013 1014 switch (status) { 1015 case EFI_SUCCESS: 1016 err = 0; 1017 break; 1018 case EFI_INVALID_PARAMETER: 1019 err = -EINVAL; 1020 break; 1021 case EFI_OUT_OF_RESOURCES: 1022 err = -ENOSPC; 1023 break; 1024 case EFI_DEVICE_ERROR: 1025 err = -EIO; 1026 break; 1027 case EFI_WRITE_PROTECTED: 1028 err = -EROFS; 1029 break; 1030 case EFI_SECURITY_VIOLATION: 1031 err = -EACCES; 1032 break; 1033 case EFI_NOT_FOUND: 1034 err = -ENOENT; 1035 break; 1036 case EFI_ABORTED: 1037 err = -EINTR; 1038 break; 1039 default: 1040 err = -EINVAL; 1041 } 1042 1043 return err; 1044 } 1045 EXPORT_SYMBOL_GPL(efi_status_to_err); 1046 1047 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); 1048 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; 1049 1050 static int __init efi_memreserve_map_root(void) 1051 { 1052 if (mem_reserve == EFI_INVALID_TABLE_ADDR) 1053 return -ENODEV; 1054 1055 efi_memreserve_root = memremap(mem_reserve, 1056 sizeof(*efi_memreserve_root), 1057 MEMREMAP_WB); 1058 if (WARN_ON_ONCE(!efi_memreserve_root)) 1059 return -ENOMEM; 1060 return 0; 1061 } 1062 1063 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) 1064 { 1065 struct resource *res, *parent; 1066 int ret; 1067 1068 res = kzalloc(sizeof(struct resource), GFP_ATOMIC); 1069 if (!res) 1070 return -ENOMEM; 1071 1072 res->name = "reserved"; 1073 res->flags = IORESOURCE_MEM; 1074 res->start = addr; 1075 res->end = addr + size - 1; 1076 1077 /* we expect a conflict with a 'System RAM' region */ 1078 parent = request_resource_conflict(&iomem_resource, res); 1079 ret = parent ? request_resource(parent, res) : 0; 1080 1081 /* 1082 * Given that efi_mem_reserve_iomem() can be called at any 1083 * time, only call memblock_reserve() if the architecture 1084 * keeps the infrastructure around. 1085 */ 1086 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret) 1087 memblock_reserve(addr, size); 1088 1089 return ret; 1090 } 1091 1092 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) 1093 { 1094 struct linux_efi_memreserve *rsv; 1095 unsigned long prsv; 1096 int rc, index; 1097 1098 if (efi_memreserve_root == (void *)ULONG_MAX) 1099 return -ENODEV; 1100 1101 if (!efi_memreserve_root) { 1102 rc = efi_memreserve_map_root(); 1103 if (rc) 1104 return rc; 1105 } 1106 1107 /* first try to find a slot in an existing linked list entry */ 1108 for (prsv = efi_memreserve_root->next; prsv; ) { 1109 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); 1110 if (!rsv) 1111 return -ENOMEM; 1112 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); 1113 if (index < rsv->size) { 1114 rsv->entry[index].base = addr; 1115 rsv->entry[index].size = size; 1116 1117 memunmap(rsv); 1118 return efi_mem_reserve_iomem(addr, size); 1119 } 1120 prsv = rsv->next; 1121 memunmap(rsv); 1122 } 1123 1124 /* no slot found - allocate a new linked list entry */ 1125 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC); 1126 if (!rsv) 1127 return -ENOMEM; 1128 1129 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K); 1130 if (rc) { 1131 free_page((unsigned long)rsv); 1132 return rc; 1133 } 1134 1135 /* 1136 * The memremap() call above assumes that a linux_efi_memreserve entry 1137 * never crosses a page boundary, so let's ensure that this remains true 1138 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by 1139 * using SZ_4K explicitly in the size calculation below. 1140 */ 1141 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K); 1142 atomic_set(&rsv->count, 1); 1143 rsv->entry[0].base = addr; 1144 rsv->entry[0].size = size; 1145 1146 spin_lock(&efi_mem_reserve_persistent_lock); 1147 rsv->next = efi_memreserve_root->next; 1148 efi_memreserve_root->next = __pa(rsv); 1149 spin_unlock(&efi_mem_reserve_persistent_lock); 1150 1151 return efi_mem_reserve_iomem(addr, size); 1152 } 1153 1154 static int __init efi_memreserve_root_init(void) 1155 { 1156 if (efi_memreserve_root) 1157 return 0; 1158 if (efi_memreserve_map_root()) 1159 efi_memreserve_root = (void *)ULONG_MAX; 1160 return 0; 1161 } 1162 early_initcall(efi_memreserve_root_init); 1163 1164 #ifdef CONFIG_KEXEC 1165 static int update_efi_random_seed(struct notifier_block *nb, 1166 unsigned long code, void *unused) 1167 { 1168 struct linux_efi_random_seed *seed; 1169 u32 size = 0; 1170 1171 if (!kexec_in_progress) 1172 return NOTIFY_DONE; 1173 1174 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB); 1175 if (seed != NULL) { 1176 size = min(seed->size, EFI_RANDOM_SEED_SIZE); 1177 memunmap(seed); 1178 } else { 1179 pr_err("Could not map UEFI random seed!\n"); 1180 } 1181 if (size > 0) { 1182 seed = memremap(efi_rng_seed, sizeof(*seed) + size, 1183 MEMREMAP_WB); 1184 if (seed != NULL) { 1185 seed->size = size; 1186 get_random_bytes(seed->bits, seed->size); 1187 memunmap(seed); 1188 } else { 1189 pr_err("Could not map UEFI random seed!\n"); 1190 } 1191 } 1192 return NOTIFY_DONE; 1193 } 1194 1195 static struct notifier_block efi_random_seed_nb = { 1196 .notifier_call = update_efi_random_seed, 1197 }; 1198 1199 static int __init register_update_efi_random_seed(void) 1200 { 1201 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR) 1202 return 0; 1203 return register_reboot_notifier(&efi_random_seed_nb); 1204 } 1205 late_initcall(register_update_efi_random_seed); 1206 #endif 1207