1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * efi.c - EFI subsystem 4 * 5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com> 6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com> 7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no> 8 * 9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported, 10 * allowing the efivarfs to be mounted or the efivars module to be loaded. 11 * The existance of /sys/firmware/efi may also be used by userspace to 12 * determine that the system supports EFI. 13 */ 14 15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17 #include <linux/kobject.h> 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/debugfs.h> 21 #include <linux/device.h> 22 #include <linux/efi.h> 23 #include <linux/of.h> 24 #include <linux/initrd.h> 25 #include <linux/io.h> 26 #include <linux/kexec.h> 27 #include <linux/platform_device.h> 28 #include <linux/random.h> 29 #include <linux/reboot.h> 30 #include <linux/slab.h> 31 #include <linux/acpi.h> 32 #include <linux/ucs2_string.h> 33 #include <linux/memblock.h> 34 #include <linux/security.h> 35 #include <linux/notifier.h> 36 37 #include <asm/early_ioremap.h> 38 39 struct efi __read_mostly efi = { 40 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL, 41 .acpi = EFI_INVALID_TABLE_ADDR, 42 .acpi20 = EFI_INVALID_TABLE_ADDR, 43 .smbios = EFI_INVALID_TABLE_ADDR, 44 .smbios3 = EFI_INVALID_TABLE_ADDR, 45 .esrt = EFI_INVALID_TABLE_ADDR, 46 .tpm_log = EFI_INVALID_TABLE_ADDR, 47 .tpm_final_log = EFI_INVALID_TABLE_ADDR, 48 #ifdef CONFIG_LOAD_UEFI_KEYS 49 .mokvar_table = EFI_INVALID_TABLE_ADDR, 50 #endif 51 #ifdef CONFIG_EFI_COCO_SECRET 52 .coco_secret = EFI_INVALID_TABLE_ADDR, 53 #endif 54 #ifdef CONFIG_UNACCEPTED_MEMORY 55 .unaccepted = EFI_INVALID_TABLE_ADDR, 56 #endif 57 }; 58 EXPORT_SYMBOL(efi); 59 60 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR; 61 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR; 62 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR; 63 static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR; 64 65 extern unsigned long screen_info_table; 66 67 struct mm_struct efi_mm = { 68 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock), 69 .mm_users = ATOMIC_INIT(2), 70 .mm_count = ATOMIC_INIT(1), 71 .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq), 72 MMAP_LOCK_INITIALIZER(efi_mm) 73 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), 74 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), 75 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0}, 76 }; 77 78 struct workqueue_struct *efi_rts_wq; 79 80 static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME); 81 static int __init setup_noefi(char *arg) 82 { 83 disable_runtime = true; 84 return 0; 85 } 86 early_param("noefi", setup_noefi); 87 88 bool efi_runtime_disabled(void) 89 { 90 return disable_runtime; 91 } 92 93 bool __pure __efi_soft_reserve_enabled(void) 94 { 95 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE); 96 } 97 98 static int __init parse_efi_cmdline(char *str) 99 { 100 if (!str) { 101 pr_warn("need at least one option\n"); 102 return -EINVAL; 103 } 104 105 if (parse_option_str(str, "debug")) 106 set_bit(EFI_DBG, &efi.flags); 107 108 if (parse_option_str(str, "noruntime")) 109 disable_runtime = true; 110 111 if (parse_option_str(str, "runtime")) 112 disable_runtime = false; 113 114 if (parse_option_str(str, "nosoftreserve")) 115 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags); 116 117 return 0; 118 } 119 early_param("efi", parse_efi_cmdline); 120 121 struct kobject *efi_kobj; 122 123 /* 124 * Let's not leave out systab information that snuck into 125 * the efivars driver 126 * Note, do not add more fields in systab sysfs file as it breaks sysfs 127 * one value per file rule! 128 */ 129 static ssize_t systab_show(struct kobject *kobj, 130 struct kobj_attribute *attr, char *buf) 131 { 132 char *str = buf; 133 134 if (!kobj || !buf) 135 return -EINVAL; 136 137 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 138 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20); 139 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 140 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); 141 /* 142 * If both SMBIOS and SMBIOS3 entry points are implemented, the 143 * SMBIOS3 entry point shall be preferred, so we list it first to 144 * let applications stop parsing after the first match. 145 */ 146 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) 147 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3); 148 if (efi.smbios != EFI_INVALID_TABLE_ADDR) 149 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); 150 151 return str - buf; 152 } 153 154 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400); 155 156 static ssize_t fw_platform_size_show(struct kobject *kobj, 157 struct kobj_attribute *attr, char *buf) 158 { 159 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32); 160 } 161 162 extern __weak struct kobj_attribute efi_attr_fw_vendor; 163 extern __weak struct kobj_attribute efi_attr_runtime; 164 extern __weak struct kobj_attribute efi_attr_config_table; 165 static struct kobj_attribute efi_attr_fw_platform_size = 166 __ATTR_RO(fw_platform_size); 167 168 static struct attribute *efi_subsys_attrs[] = { 169 &efi_attr_systab.attr, 170 &efi_attr_fw_platform_size.attr, 171 &efi_attr_fw_vendor.attr, 172 &efi_attr_runtime.attr, 173 &efi_attr_config_table.attr, 174 NULL, 175 }; 176 177 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, 178 int n) 179 { 180 return attr->mode; 181 } 182 183 static const struct attribute_group efi_subsys_attr_group = { 184 .attrs = efi_subsys_attrs, 185 .is_visible = efi_attr_is_visible, 186 }; 187 188 struct blocking_notifier_head efivar_ops_nh; 189 EXPORT_SYMBOL_GPL(efivar_ops_nh); 190 191 static struct efivars generic_efivars; 192 static struct efivar_operations generic_ops; 193 194 static bool generic_ops_supported(void) 195 { 196 unsigned long name_size; 197 efi_status_t status; 198 efi_char16_t name; 199 efi_guid_t guid; 200 201 name_size = sizeof(name); 202 203 if (!efi.get_next_variable) 204 return false; 205 status = efi.get_next_variable(&name_size, &name, &guid); 206 if (status == EFI_UNSUPPORTED) 207 return false; 208 209 return true; 210 } 211 212 static int generic_ops_register(void) 213 { 214 if (!generic_ops_supported()) 215 return 0; 216 217 generic_ops.get_variable = efi.get_variable; 218 generic_ops.get_next_variable = efi.get_next_variable; 219 generic_ops.query_variable_store = efi_query_variable_store; 220 generic_ops.query_variable_info = efi.query_variable_info; 221 222 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) { 223 generic_ops.set_variable = efi.set_variable; 224 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking; 225 } 226 return efivars_register(&generic_efivars, &generic_ops); 227 } 228 229 static void generic_ops_unregister(void) 230 { 231 if (!generic_ops.get_variable) 232 return; 233 234 efivars_unregister(&generic_efivars); 235 } 236 237 void efivars_generic_ops_register(void) 238 { 239 generic_ops_register(); 240 } 241 EXPORT_SYMBOL_GPL(efivars_generic_ops_register); 242 243 void efivars_generic_ops_unregister(void) 244 { 245 generic_ops_unregister(); 246 } 247 EXPORT_SYMBOL_GPL(efivars_generic_ops_unregister); 248 249 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS 250 #define EFIVAR_SSDT_NAME_MAX 16UL 251 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; 252 static int __init efivar_ssdt_setup(char *str) 253 { 254 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES); 255 256 if (ret) 257 return ret; 258 259 if (strlen(str) < sizeof(efivar_ssdt)) 260 memcpy(efivar_ssdt, str, strlen(str)); 261 else 262 pr_warn("efivar_ssdt: name too long: %s\n", str); 263 return 1; 264 } 265 __setup("efivar_ssdt=", efivar_ssdt_setup); 266 267 static __init int efivar_ssdt_load(void) 268 { 269 unsigned long name_size = 256; 270 efi_char16_t *name = NULL; 271 efi_status_t status; 272 efi_guid_t guid; 273 int ret = 0; 274 275 if (!efivar_ssdt[0]) 276 return 0; 277 278 name = kzalloc(name_size, GFP_KERNEL); 279 if (!name) 280 return -ENOMEM; 281 282 for (;;) { 283 char utf8_name[EFIVAR_SSDT_NAME_MAX]; 284 unsigned long data_size = 0; 285 void *data; 286 int limit; 287 288 status = efi.get_next_variable(&name_size, name, &guid); 289 if (status == EFI_NOT_FOUND) { 290 break; 291 } else if (status == EFI_BUFFER_TOO_SMALL) { 292 efi_char16_t *name_tmp = 293 krealloc(name, name_size, GFP_KERNEL); 294 if (!name_tmp) { 295 ret = -ENOMEM; 296 goto out; 297 } 298 name = name_tmp; 299 continue; 300 } 301 302 limit = min(EFIVAR_SSDT_NAME_MAX, name_size); 303 ucs2_as_utf8(utf8_name, name, limit - 1); 304 if (strncmp(utf8_name, efivar_ssdt, limit) != 0) 305 continue; 306 307 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid); 308 309 status = efi.get_variable(name, &guid, NULL, &data_size, NULL); 310 if (status != EFI_BUFFER_TOO_SMALL || !data_size) { 311 ret = -EIO; 312 goto out; 313 } 314 315 data = kmalloc(data_size, GFP_KERNEL); 316 if (!data) { 317 ret = -ENOMEM; 318 goto out; 319 } 320 321 status = efi.get_variable(name, &guid, NULL, &data_size, data); 322 if (status == EFI_SUCCESS) { 323 acpi_status acpi_ret = acpi_load_table(data, NULL); 324 if (ACPI_FAILURE(acpi_ret)) { 325 pr_err("efivar_ssdt: failed to load table: %u\n", 326 acpi_ret); 327 } else { 328 /* 329 * The @data will be in use by ACPI engine, 330 * do not free it! 331 */ 332 continue; 333 } 334 } else { 335 pr_err("efivar_ssdt: failed to get var data: 0x%lx\n", status); 336 } 337 kfree(data); 338 } 339 out: 340 kfree(name); 341 return ret; 342 } 343 #else 344 static inline int efivar_ssdt_load(void) { return 0; } 345 #endif 346 347 #ifdef CONFIG_DEBUG_FS 348 349 #define EFI_DEBUGFS_MAX_BLOBS 32 350 351 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS]; 352 353 static void __init efi_debugfs_init(void) 354 { 355 struct dentry *efi_debugfs; 356 efi_memory_desc_t *md; 357 char name[32]; 358 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {}; 359 int i = 0; 360 361 efi_debugfs = debugfs_create_dir("efi", NULL); 362 if (IS_ERR(efi_debugfs)) 363 return; 364 365 for_each_efi_memory_desc(md) { 366 switch (md->type) { 367 case EFI_BOOT_SERVICES_CODE: 368 snprintf(name, sizeof(name), "boot_services_code%d", 369 type_count[md->type]++); 370 break; 371 case EFI_BOOT_SERVICES_DATA: 372 snprintf(name, sizeof(name), "boot_services_data%d", 373 type_count[md->type]++); 374 break; 375 default: 376 continue; 377 } 378 379 if (i >= EFI_DEBUGFS_MAX_BLOBS) { 380 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n", 381 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS); 382 break; 383 } 384 385 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT; 386 debugfs_blob[i].data = memremap(md->phys_addr, 387 debugfs_blob[i].size, 388 MEMREMAP_WB); 389 if (!debugfs_blob[i].data) 390 continue; 391 392 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]); 393 i++; 394 } 395 } 396 #else 397 static inline void efi_debugfs_init(void) {} 398 #endif 399 400 /* 401 * We register the efi subsystem with the firmware subsystem and the 402 * efivars subsystem with the efi subsystem, if the system was booted with 403 * EFI. 404 */ 405 static int __init efisubsys_init(void) 406 { 407 int error; 408 409 if (!efi_enabled(EFI_RUNTIME_SERVICES)) 410 efi.runtime_supported_mask = 0; 411 412 if (!efi_enabled(EFI_BOOT)) 413 return 0; 414 415 if (efi.runtime_supported_mask) { 416 /* 417 * Since we process only one efi_runtime_service() at a time, an 418 * ordered workqueue (which creates only one execution context) 419 * should suffice for all our needs. 420 */ 421 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0); 422 if (!efi_rts_wq) { 423 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n"); 424 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 425 efi.runtime_supported_mask = 0; 426 return 0; 427 } 428 } 429 430 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES)) 431 platform_device_register_simple("rtc-efi", 0, NULL, 0); 432 433 /* We register the efi directory at /sys/firmware/efi */ 434 efi_kobj = kobject_create_and_add("efi", firmware_kobj); 435 if (!efi_kobj) { 436 pr_err("efi: Firmware registration failed.\n"); 437 error = -ENOMEM; 438 goto err_destroy_wq; 439 } 440 441 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | 442 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) { 443 error = generic_ops_register(); 444 if (error) 445 goto err_put; 446 error = efivar_ssdt_load(); 447 if (error) 448 pr_err("efi: failed to load SSDT, error %d.\n", error); 449 platform_device_register_simple("efivars", 0, NULL, 0); 450 } 451 452 BLOCKING_INIT_NOTIFIER_HEAD(&efivar_ops_nh); 453 454 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group); 455 if (error) { 456 pr_err("efi: Sysfs attribute export failed with error %d.\n", 457 error); 458 goto err_unregister; 459 } 460 461 /* and the standard mountpoint for efivarfs */ 462 error = sysfs_create_mount_point(efi_kobj, "efivars"); 463 if (error) { 464 pr_err("efivars: Subsystem registration failed.\n"); 465 goto err_remove_group; 466 } 467 468 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS)) 469 efi_debugfs_init(); 470 471 #ifdef CONFIG_EFI_COCO_SECRET 472 if (efi.coco_secret != EFI_INVALID_TABLE_ADDR) 473 platform_device_register_simple("efi_secret", 0, NULL, 0); 474 #endif 475 476 return 0; 477 478 err_remove_group: 479 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group); 480 err_unregister: 481 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | 482 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) 483 generic_ops_unregister(); 484 err_put: 485 kobject_put(efi_kobj); 486 efi_kobj = NULL; 487 err_destroy_wq: 488 if (efi_rts_wq) 489 destroy_workqueue(efi_rts_wq); 490 491 return error; 492 } 493 494 subsys_initcall(efisubsys_init); 495 496 void __init efi_find_mirror(void) 497 { 498 efi_memory_desc_t *md; 499 u64 mirror_size = 0, total_size = 0; 500 501 if (!efi_enabled(EFI_MEMMAP)) 502 return; 503 504 for_each_efi_memory_desc(md) { 505 unsigned long long start = md->phys_addr; 506 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; 507 508 total_size += size; 509 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) { 510 memblock_mark_mirror(start, size); 511 mirror_size += size; 512 } 513 } 514 if (mirror_size) 515 pr_info("Memory: %lldM/%lldM mirrored memory\n", 516 mirror_size>>20, total_size>>20); 517 } 518 519 /* 520 * Find the efi memory descriptor for a given physical address. Given a 521 * physical address, determine if it exists within an EFI Memory Map entry, 522 * and if so, populate the supplied memory descriptor with the appropriate 523 * data. 524 */ 525 int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) 526 { 527 efi_memory_desc_t *md; 528 529 if (!efi_enabled(EFI_MEMMAP)) { 530 pr_err_once("EFI_MEMMAP is not enabled.\n"); 531 return -EINVAL; 532 } 533 534 if (!out_md) { 535 pr_err_once("out_md is null.\n"); 536 return -EINVAL; 537 } 538 539 for_each_efi_memory_desc(md) { 540 u64 size; 541 u64 end; 542 543 /* skip bogus entries (including empty ones) */ 544 if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) || 545 (md->num_pages <= 0) || 546 (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT)) 547 continue; 548 549 size = md->num_pages << EFI_PAGE_SHIFT; 550 end = md->phys_addr + size; 551 if (phys_addr >= md->phys_addr && phys_addr < end) { 552 memcpy(out_md, md, sizeof(*out_md)); 553 return 0; 554 } 555 } 556 return -ENOENT; 557 } 558 559 extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) 560 __weak __alias(__efi_mem_desc_lookup); 561 562 /* 563 * Calculate the highest address of an efi memory descriptor. 564 */ 565 u64 __init efi_mem_desc_end(efi_memory_desc_t *md) 566 { 567 u64 size = md->num_pages << EFI_PAGE_SHIFT; 568 u64 end = md->phys_addr + size; 569 return end; 570 } 571 572 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {} 573 574 /** 575 * efi_mem_reserve - Reserve an EFI memory region 576 * @addr: Physical address to reserve 577 * @size: Size of reservation 578 * 579 * Mark a region as reserved from general kernel allocation and 580 * prevent it being released by efi_free_boot_services(). 581 * 582 * This function should be called drivers once they've parsed EFI 583 * configuration tables to figure out where their data lives, e.g. 584 * efi_esrt_init(). 585 */ 586 void __init efi_mem_reserve(phys_addr_t addr, u64 size) 587 { 588 /* efi_mem_reserve() does not work under Xen */ 589 if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT))) 590 return; 591 592 if (!memblock_is_region_reserved(addr, size)) 593 memblock_reserve(addr, size); 594 595 /* 596 * Some architectures (x86) reserve all boot services ranges 597 * until efi_free_boot_services() because of buggy firmware 598 * implementations. This means the above memblock_reserve() is 599 * superfluous on x86 and instead what it needs to do is 600 * ensure the @start, @size is not freed. 601 */ 602 efi_arch_mem_reserve(addr, size); 603 } 604 605 static const efi_config_table_type_t common_tables[] __initconst = { 606 {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" }, 607 {ACPI_TABLE_GUID, &efi.acpi, "ACPI" }, 608 {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" }, 609 {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" }, 610 {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" }, 611 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" }, 612 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" }, 613 {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" }, 614 {EFI_TCG2_FINAL_EVENTS_TABLE_GUID, &efi.tpm_final_log, "TPMFinalLog" }, 615 {EFI_CC_FINAL_EVENTS_TABLE_GUID, &efi.tpm_final_log, "CCFinalLog" }, 616 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" }, 617 {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" }, 618 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" }, 619 #ifdef CONFIG_EFI_RCI2_TABLE 620 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys }, 621 #endif 622 #ifdef CONFIG_LOAD_UEFI_KEYS 623 {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" }, 624 #endif 625 #ifdef CONFIG_EFI_COCO_SECRET 626 {LINUX_EFI_COCO_SECRET_AREA_GUID, &efi.coco_secret, "CocoSecret" }, 627 #endif 628 #ifdef CONFIG_UNACCEPTED_MEMORY 629 {LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID, &efi.unaccepted, "Unaccepted" }, 630 #endif 631 #ifdef CONFIG_EFI_GENERIC_STUB 632 {LINUX_EFI_SCREEN_INFO_TABLE_GUID, &screen_info_table }, 633 #endif 634 {}, 635 }; 636 637 static __init int match_config_table(const efi_guid_t *guid, 638 unsigned long table, 639 const efi_config_table_type_t *table_types) 640 { 641 int i; 642 643 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) { 644 if (efi_guidcmp(*guid, table_types[i].guid)) 645 continue; 646 647 if (!efi_config_table_is_usable(guid, table)) { 648 if (table_types[i].name[0]) 649 pr_cont("(%s=0x%lx unusable) ", 650 table_types[i].name, table); 651 return 1; 652 } 653 654 *(table_types[i].ptr) = table; 655 if (table_types[i].name[0]) 656 pr_cont("%s=0x%lx ", table_types[i].name, table); 657 return 1; 658 } 659 660 return 0; 661 } 662 663 /** 664 * reserve_unaccepted - Map and reserve unaccepted configuration table 665 * @unaccepted: Pointer to unaccepted memory table 666 * 667 * memblock_add() makes sure that the table is mapped in direct mapping. During 668 * normal boot it happens automatically because the table is allocated from 669 * usable memory. But during crashkernel boot only memory specifically reserved 670 * for crash scenario is mapped. memblock_add() forces the table to be mapped 671 * in crashkernel case. 672 * 673 * Align the range to the nearest page borders. Ranges smaller than page size 674 * are not going to be mapped. 675 * 676 * memblock_reserve() makes sure that future allocations will not touch the 677 * table. 678 */ 679 680 static __init void reserve_unaccepted(struct efi_unaccepted_memory *unaccepted) 681 { 682 phys_addr_t start, size; 683 684 start = PAGE_ALIGN_DOWN(efi.unaccepted); 685 size = PAGE_ALIGN(sizeof(*unaccepted) + unaccepted->size); 686 687 memblock_add(start, size); 688 memblock_reserve(start, size); 689 } 690 691 int __init efi_config_parse_tables(const efi_config_table_t *config_tables, 692 int count, 693 const efi_config_table_type_t *arch_tables) 694 { 695 const efi_config_table_64_t *tbl64 = (void *)config_tables; 696 const efi_config_table_32_t *tbl32 = (void *)config_tables; 697 const efi_guid_t *guid; 698 unsigned long table; 699 int i; 700 701 pr_info(""); 702 for (i = 0; i < count; i++) { 703 if (!IS_ENABLED(CONFIG_X86)) { 704 guid = &config_tables[i].guid; 705 table = (unsigned long)config_tables[i].table; 706 } else if (efi_enabled(EFI_64BIT)) { 707 guid = &tbl64[i].guid; 708 table = tbl64[i].table; 709 710 if (IS_ENABLED(CONFIG_X86_32) && 711 tbl64[i].table > U32_MAX) { 712 pr_cont("\n"); 713 pr_err("Table located above 4GB, disabling EFI.\n"); 714 return -EINVAL; 715 } 716 } else { 717 guid = &tbl32[i].guid; 718 table = tbl32[i].table; 719 } 720 721 if (!match_config_table(guid, table, common_tables) && arch_tables) 722 match_config_table(guid, table, arch_tables); 723 } 724 pr_cont("\n"); 725 set_bit(EFI_CONFIG_TABLES, &efi.flags); 726 727 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) { 728 struct linux_efi_random_seed *seed; 729 u32 size = 0; 730 731 seed = early_memremap(efi_rng_seed, sizeof(*seed)); 732 if (seed != NULL) { 733 size = min_t(u32, seed->size, SZ_1K); // sanity check 734 early_memunmap(seed, sizeof(*seed)); 735 } else { 736 pr_err("Could not map UEFI random seed!\n"); 737 } 738 if (size > 0) { 739 seed = early_memremap(efi_rng_seed, 740 sizeof(*seed) + size); 741 if (seed != NULL) { 742 add_bootloader_randomness(seed->bits, size); 743 memzero_explicit(seed->bits, size); 744 early_memunmap(seed, sizeof(*seed) + size); 745 } else { 746 pr_err("Could not map UEFI random seed!\n"); 747 } 748 } 749 } 750 751 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP)) 752 efi_memattr_init(); 753 754 efi_tpm_eventlog_init(); 755 756 if (mem_reserve != EFI_INVALID_TABLE_ADDR) { 757 unsigned long prsv = mem_reserve; 758 759 while (prsv) { 760 struct linux_efi_memreserve *rsv; 761 u8 *p; 762 763 /* 764 * Just map a full page: that is what we will get 765 * anyway, and it permits us to map the entire entry 766 * before knowing its size. 767 */ 768 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE), 769 PAGE_SIZE); 770 if (p == NULL) { 771 pr_err("Could not map UEFI memreserve entry!\n"); 772 return -ENOMEM; 773 } 774 775 rsv = (void *)(p + prsv % PAGE_SIZE); 776 777 /* reserve the entry itself */ 778 memblock_reserve(prsv, 779 struct_size(rsv, entry, rsv->size)); 780 781 for (i = 0; i < atomic_read(&rsv->count); i++) { 782 memblock_reserve(rsv->entry[i].base, 783 rsv->entry[i].size); 784 } 785 786 prsv = rsv->next; 787 early_memunmap(p, PAGE_SIZE); 788 } 789 } 790 791 if (rt_prop != EFI_INVALID_TABLE_ADDR) { 792 efi_rt_properties_table_t *tbl; 793 794 tbl = early_memremap(rt_prop, sizeof(*tbl)); 795 if (tbl) { 796 efi.runtime_supported_mask &= tbl->runtime_services_supported; 797 early_memunmap(tbl, sizeof(*tbl)); 798 } 799 } 800 801 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && 802 initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) { 803 struct linux_efi_initrd *tbl; 804 805 tbl = early_memremap(initrd, sizeof(*tbl)); 806 if (tbl) { 807 phys_initrd_start = tbl->base; 808 phys_initrd_size = tbl->size; 809 early_memunmap(tbl, sizeof(*tbl)); 810 } 811 } 812 813 if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) && 814 efi.unaccepted != EFI_INVALID_TABLE_ADDR) { 815 struct efi_unaccepted_memory *unaccepted; 816 817 unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted)); 818 if (unaccepted) { 819 820 if (unaccepted->version == 1) { 821 reserve_unaccepted(unaccepted); 822 } else { 823 efi.unaccepted = EFI_INVALID_TABLE_ADDR; 824 } 825 826 early_memunmap(unaccepted, sizeof(*unaccepted)); 827 } 828 } 829 830 return 0; 831 } 832 833 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr) 834 { 835 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) { 836 pr_err("System table signature incorrect!\n"); 837 return -EINVAL; 838 } 839 840 return 0; 841 } 842 843 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor, 844 size_t size) 845 { 846 const efi_char16_t *ret; 847 848 ret = early_memremap_ro(fw_vendor, size); 849 if (!ret) 850 pr_err("Could not map the firmware vendor!\n"); 851 return ret; 852 } 853 854 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size) 855 { 856 early_memunmap((void *)fw_vendor, size); 857 } 858 859 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr, 860 unsigned long fw_vendor) 861 { 862 char vendor[100] = "unknown"; 863 const efi_char16_t *c16; 864 size_t i; 865 u16 rev; 866 867 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t)); 868 if (c16) { 869 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i) 870 vendor[i] = c16[i]; 871 vendor[i] = '\0'; 872 873 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t)); 874 } 875 876 rev = (u16)systab_hdr->revision; 877 pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10); 878 879 rev %= 10; 880 if (rev) 881 pr_cont(".%u", rev); 882 883 pr_cont(" by %s\n", vendor); 884 885 if (IS_ENABLED(CONFIG_X86_64) && 886 systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION && 887 !strcmp(vendor, "Apple")) { 888 pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n"); 889 efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION; 890 } 891 } 892 893 static __initdata char memory_type_name[][13] = { 894 "Reserved", 895 "Loader Code", 896 "Loader Data", 897 "Boot Code", 898 "Boot Data", 899 "Runtime Code", 900 "Runtime Data", 901 "Conventional", 902 "Unusable", 903 "ACPI Reclaim", 904 "ACPI Mem NVS", 905 "MMIO", 906 "MMIO Port", 907 "PAL Code", 908 "Persistent", 909 "Unaccepted", 910 }; 911 912 char * __init efi_md_typeattr_format(char *buf, size_t size, 913 const efi_memory_desc_t *md) 914 { 915 char *pos; 916 int type_len; 917 u64 attr; 918 919 pos = buf; 920 if (md->type >= ARRAY_SIZE(memory_type_name)) 921 type_len = snprintf(pos, size, "[type=%u", md->type); 922 else 923 type_len = snprintf(pos, size, "[%-*s", 924 (int)(sizeof(memory_type_name[0]) - 1), 925 memory_type_name[md->type]); 926 if (type_len >= size) 927 return buf; 928 929 pos += type_len; 930 size -= type_len; 931 932 attr = md->attribute; 933 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT | 934 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO | 935 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP | 936 EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO | 937 EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE)) 938 snprintf(pos, size, "|attr=0x%016llx]", 939 (unsigned long long)attr); 940 else 941 snprintf(pos, size, 942 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]", 943 attr & EFI_MEMORY_RUNTIME ? "RUN" : "", 944 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "", 945 attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "", 946 attr & EFI_MEMORY_SP ? "SP" : "", 947 attr & EFI_MEMORY_NV ? "NV" : "", 948 attr & EFI_MEMORY_XP ? "XP" : "", 949 attr & EFI_MEMORY_RP ? "RP" : "", 950 attr & EFI_MEMORY_WP ? "WP" : "", 951 attr & EFI_MEMORY_RO ? "RO" : "", 952 attr & EFI_MEMORY_UCE ? "UCE" : "", 953 attr & EFI_MEMORY_WB ? "WB" : "", 954 attr & EFI_MEMORY_WT ? "WT" : "", 955 attr & EFI_MEMORY_WC ? "WC" : "", 956 attr & EFI_MEMORY_UC ? "UC" : ""); 957 return buf; 958 } 959 960 /* 961 * efi_mem_attributes - lookup memmap attributes for physical address 962 * @phys_addr: the physical address to lookup 963 * 964 * Search in the EFI memory map for the region covering 965 * @phys_addr. Returns the EFI memory attributes if the region 966 * was found in the memory map, 0 otherwise. 967 */ 968 u64 efi_mem_attributes(unsigned long phys_addr) 969 { 970 efi_memory_desc_t *md; 971 972 if (!efi_enabled(EFI_MEMMAP)) 973 return 0; 974 975 for_each_efi_memory_desc(md) { 976 if ((md->phys_addr <= phys_addr) && 977 (phys_addr < (md->phys_addr + 978 (md->num_pages << EFI_PAGE_SHIFT)))) 979 return md->attribute; 980 } 981 return 0; 982 } 983 984 /* 985 * efi_mem_type - lookup memmap type for physical address 986 * @phys_addr: the physical address to lookup 987 * 988 * Search in the EFI memory map for the region covering @phys_addr. 989 * Returns the EFI memory type if the region was found in the memory 990 * map, -EINVAL otherwise. 991 */ 992 int efi_mem_type(unsigned long phys_addr) 993 { 994 const efi_memory_desc_t *md; 995 996 if (!efi_enabled(EFI_MEMMAP)) 997 return -ENOTSUPP; 998 999 for_each_efi_memory_desc(md) { 1000 if ((md->phys_addr <= phys_addr) && 1001 (phys_addr < (md->phys_addr + 1002 (md->num_pages << EFI_PAGE_SHIFT)))) 1003 return md->type; 1004 } 1005 return -EINVAL; 1006 } 1007 1008 int efi_status_to_err(efi_status_t status) 1009 { 1010 int err; 1011 1012 switch (status) { 1013 case EFI_SUCCESS: 1014 err = 0; 1015 break; 1016 case EFI_INVALID_PARAMETER: 1017 err = -EINVAL; 1018 break; 1019 case EFI_OUT_OF_RESOURCES: 1020 err = -ENOSPC; 1021 break; 1022 case EFI_DEVICE_ERROR: 1023 err = -EIO; 1024 break; 1025 case EFI_WRITE_PROTECTED: 1026 err = -EROFS; 1027 break; 1028 case EFI_SECURITY_VIOLATION: 1029 err = -EACCES; 1030 break; 1031 case EFI_NOT_FOUND: 1032 err = -ENOENT; 1033 break; 1034 case EFI_ABORTED: 1035 err = -EINTR; 1036 break; 1037 default: 1038 err = -EINVAL; 1039 } 1040 1041 return err; 1042 } 1043 EXPORT_SYMBOL_GPL(efi_status_to_err); 1044 1045 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); 1046 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; 1047 1048 static int __init efi_memreserve_map_root(void) 1049 { 1050 if (mem_reserve == EFI_INVALID_TABLE_ADDR) 1051 return -ENODEV; 1052 1053 efi_memreserve_root = memremap(mem_reserve, 1054 sizeof(*efi_memreserve_root), 1055 MEMREMAP_WB); 1056 if (WARN_ON_ONCE(!efi_memreserve_root)) 1057 return -ENOMEM; 1058 return 0; 1059 } 1060 1061 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) 1062 { 1063 struct resource *res, *parent; 1064 int ret; 1065 1066 res = kzalloc(sizeof(struct resource), GFP_ATOMIC); 1067 if (!res) 1068 return -ENOMEM; 1069 1070 res->name = "reserved"; 1071 res->flags = IORESOURCE_MEM; 1072 res->start = addr; 1073 res->end = addr + size - 1; 1074 1075 /* we expect a conflict with a 'System RAM' region */ 1076 parent = request_resource_conflict(&iomem_resource, res); 1077 ret = parent ? request_resource(parent, res) : 0; 1078 1079 /* 1080 * Given that efi_mem_reserve_iomem() can be called at any 1081 * time, only call memblock_reserve() if the architecture 1082 * keeps the infrastructure around. 1083 */ 1084 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret) 1085 memblock_reserve(addr, size); 1086 1087 return ret; 1088 } 1089 1090 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) 1091 { 1092 struct linux_efi_memreserve *rsv; 1093 unsigned long prsv; 1094 int rc, index; 1095 1096 if (efi_memreserve_root == (void *)ULONG_MAX) 1097 return -ENODEV; 1098 1099 if (!efi_memreserve_root) { 1100 rc = efi_memreserve_map_root(); 1101 if (rc) 1102 return rc; 1103 } 1104 1105 /* first try to find a slot in an existing linked list entry */ 1106 for (prsv = efi_memreserve_root->next; prsv; ) { 1107 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); 1108 if (!rsv) 1109 return -ENOMEM; 1110 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); 1111 if (index < rsv->size) { 1112 rsv->entry[index].base = addr; 1113 rsv->entry[index].size = size; 1114 1115 memunmap(rsv); 1116 return efi_mem_reserve_iomem(addr, size); 1117 } 1118 prsv = rsv->next; 1119 memunmap(rsv); 1120 } 1121 1122 /* no slot found - allocate a new linked list entry */ 1123 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC); 1124 if (!rsv) 1125 return -ENOMEM; 1126 1127 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K); 1128 if (rc) { 1129 free_page((unsigned long)rsv); 1130 return rc; 1131 } 1132 1133 /* 1134 * The memremap() call above assumes that a linux_efi_memreserve entry 1135 * never crosses a page boundary, so let's ensure that this remains true 1136 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by 1137 * using SZ_4K explicitly in the size calculation below. 1138 */ 1139 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K); 1140 atomic_set(&rsv->count, 1); 1141 rsv->entry[0].base = addr; 1142 rsv->entry[0].size = size; 1143 1144 spin_lock(&efi_mem_reserve_persistent_lock); 1145 rsv->next = efi_memreserve_root->next; 1146 efi_memreserve_root->next = __pa(rsv); 1147 spin_unlock(&efi_mem_reserve_persistent_lock); 1148 1149 return efi_mem_reserve_iomem(addr, size); 1150 } 1151 1152 static int __init efi_memreserve_root_init(void) 1153 { 1154 if (efi_memreserve_root) 1155 return 0; 1156 if (efi_memreserve_map_root()) 1157 efi_memreserve_root = (void *)ULONG_MAX; 1158 return 0; 1159 } 1160 early_initcall(efi_memreserve_root_init); 1161 1162 #ifdef CONFIG_KEXEC 1163 static int update_efi_random_seed(struct notifier_block *nb, 1164 unsigned long code, void *unused) 1165 { 1166 struct linux_efi_random_seed *seed; 1167 u32 size = 0; 1168 1169 if (!kexec_in_progress) 1170 return NOTIFY_DONE; 1171 1172 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB); 1173 if (seed != NULL) { 1174 size = min(seed->size, EFI_RANDOM_SEED_SIZE); 1175 memunmap(seed); 1176 } else { 1177 pr_err("Could not map UEFI random seed!\n"); 1178 } 1179 if (size > 0) { 1180 seed = memremap(efi_rng_seed, sizeof(*seed) + size, 1181 MEMREMAP_WB); 1182 if (seed != NULL) { 1183 seed->size = size; 1184 get_random_bytes(seed->bits, seed->size); 1185 memunmap(seed); 1186 } else { 1187 pr_err("Could not map UEFI random seed!\n"); 1188 } 1189 } 1190 return NOTIFY_DONE; 1191 } 1192 1193 static struct notifier_block efi_random_seed_nb = { 1194 .notifier_call = update_efi_random_seed, 1195 }; 1196 1197 static int __init register_update_efi_random_seed(void) 1198 { 1199 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR) 1200 return 0; 1201 return register_reboot_notifier(&efi_random_seed_nb); 1202 } 1203 late_initcall(register_update_efi_random_seed); 1204 #endif 1205