1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * efi.c - EFI subsystem 4 * 5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com> 6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com> 7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no> 8 * 9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported, 10 * allowing the efivarfs to be mounted or the efivars module to be loaded. 11 * The existance of /sys/firmware/efi may also be used by userspace to 12 * determine that the system supports EFI. 13 */ 14 15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17 #include <linux/kobject.h> 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/debugfs.h> 21 #include <linux/device.h> 22 #include <linux/efi.h> 23 #include <linux/of.h> 24 #include <linux/io.h> 25 #include <linux/kexec.h> 26 #include <linux/platform_device.h> 27 #include <linux/random.h> 28 #include <linux/reboot.h> 29 #include <linux/slab.h> 30 #include <linux/acpi.h> 31 #include <linux/ucs2_string.h> 32 #include <linux/memblock.h> 33 #include <linux/security.h> 34 35 #include <asm/early_ioremap.h> 36 37 struct efi __read_mostly efi = { 38 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL, 39 .acpi = EFI_INVALID_TABLE_ADDR, 40 .acpi20 = EFI_INVALID_TABLE_ADDR, 41 .smbios = EFI_INVALID_TABLE_ADDR, 42 .smbios3 = EFI_INVALID_TABLE_ADDR, 43 .esrt = EFI_INVALID_TABLE_ADDR, 44 .tpm_log = EFI_INVALID_TABLE_ADDR, 45 .tpm_final_log = EFI_INVALID_TABLE_ADDR, 46 }; 47 EXPORT_SYMBOL(efi); 48 49 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR; 50 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR; 51 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR; 52 53 struct mm_struct efi_mm = { 54 .mm_rb = RB_ROOT, 55 .mm_users = ATOMIC_INIT(2), 56 .mm_count = ATOMIC_INIT(1), 57 .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem), 58 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), 59 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), 60 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0}, 61 }; 62 63 struct workqueue_struct *efi_rts_wq; 64 65 static bool disable_runtime; 66 static int __init setup_noefi(char *arg) 67 { 68 disable_runtime = true; 69 return 0; 70 } 71 early_param("noefi", setup_noefi); 72 73 bool efi_runtime_disabled(void) 74 { 75 return disable_runtime; 76 } 77 78 bool __pure __efi_soft_reserve_enabled(void) 79 { 80 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE); 81 } 82 83 static int __init parse_efi_cmdline(char *str) 84 { 85 if (!str) { 86 pr_warn("need at least one option\n"); 87 return -EINVAL; 88 } 89 90 if (parse_option_str(str, "debug")) 91 set_bit(EFI_DBG, &efi.flags); 92 93 if (parse_option_str(str, "noruntime")) 94 disable_runtime = true; 95 96 if (parse_option_str(str, "nosoftreserve")) 97 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags); 98 99 return 0; 100 } 101 early_param("efi", parse_efi_cmdline); 102 103 struct kobject *efi_kobj; 104 105 /* 106 * Let's not leave out systab information that snuck into 107 * the efivars driver 108 * Note, do not add more fields in systab sysfs file as it breaks sysfs 109 * one value per file rule! 110 */ 111 static ssize_t systab_show(struct kobject *kobj, 112 struct kobj_attribute *attr, char *buf) 113 { 114 char *str = buf; 115 116 if (!kobj || !buf) 117 return -EINVAL; 118 119 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 120 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20); 121 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 122 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); 123 /* 124 * If both SMBIOS and SMBIOS3 entry points are implemented, the 125 * SMBIOS3 entry point shall be preferred, so we list it first to 126 * let applications stop parsing after the first match. 127 */ 128 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) 129 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3); 130 if (efi.smbios != EFI_INVALID_TABLE_ADDR) 131 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); 132 133 if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86)) 134 str = efi_systab_show_arch(str); 135 136 return str - buf; 137 } 138 139 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400); 140 141 static ssize_t fw_platform_size_show(struct kobject *kobj, 142 struct kobj_attribute *attr, char *buf) 143 { 144 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32); 145 } 146 147 extern __weak struct kobj_attribute efi_attr_fw_vendor; 148 extern __weak struct kobj_attribute efi_attr_runtime; 149 extern __weak struct kobj_attribute efi_attr_config_table; 150 static struct kobj_attribute efi_attr_fw_platform_size = 151 __ATTR_RO(fw_platform_size); 152 153 static struct attribute *efi_subsys_attrs[] = { 154 &efi_attr_systab.attr, 155 &efi_attr_fw_platform_size.attr, 156 &efi_attr_fw_vendor.attr, 157 &efi_attr_runtime.attr, 158 &efi_attr_config_table.attr, 159 NULL, 160 }; 161 162 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, 163 int n) 164 { 165 return attr->mode; 166 } 167 168 static const struct attribute_group efi_subsys_attr_group = { 169 .attrs = efi_subsys_attrs, 170 .is_visible = efi_attr_is_visible, 171 }; 172 173 static struct efivars generic_efivars; 174 static struct efivar_operations generic_ops; 175 176 static int generic_ops_register(void) 177 { 178 generic_ops.get_variable = efi.get_variable; 179 generic_ops.set_variable = efi.set_variable; 180 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking; 181 generic_ops.get_next_variable = efi.get_next_variable; 182 generic_ops.query_variable_store = efi_query_variable_store; 183 184 return efivars_register(&generic_efivars, &generic_ops, efi_kobj); 185 } 186 187 static void generic_ops_unregister(void) 188 { 189 efivars_unregister(&generic_efivars); 190 } 191 192 #if IS_ENABLED(CONFIG_ACPI) 193 #define EFIVAR_SSDT_NAME_MAX 16 194 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; 195 static int __init efivar_ssdt_setup(char *str) 196 { 197 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES); 198 199 if (ret) 200 return ret; 201 202 if (strlen(str) < sizeof(efivar_ssdt)) 203 memcpy(efivar_ssdt, str, strlen(str)); 204 else 205 pr_warn("efivar_ssdt: name too long: %s\n", str); 206 return 0; 207 } 208 __setup("efivar_ssdt=", efivar_ssdt_setup); 209 210 static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor, 211 unsigned long name_size, void *data) 212 { 213 struct efivar_entry *entry; 214 struct list_head *list = data; 215 char utf8_name[EFIVAR_SSDT_NAME_MAX]; 216 int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size); 217 218 ucs2_as_utf8(utf8_name, name, limit - 1); 219 if (strncmp(utf8_name, efivar_ssdt, limit) != 0) 220 return 0; 221 222 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 223 if (!entry) 224 return 0; 225 226 memcpy(entry->var.VariableName, name, name_size); 227 memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t)); 228 229 efivar_entry_add(entry, list); 230 231 return 0; 232 } 233 234 static __init int efivar_ssdt_load(void) 235 { 236 LIST_HEAD(entries); 237 struct efivar_entry *entry, *aux; 238 unsigned long size; 239 void *data; 240 int ret; 241 242 if (!efivar_ssdt[0]) 243 return 0; 244 245 ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries); 246 247 list_for_each_entry_safe(entry, aux, &entries, list) { 248 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, 249 &entry->var.VendorGuid); 250 251 list_del(&entry->list); 252 253 ret = efivar_entry_size(entry, &size); 254 if (ret) { 255 pr_err("failed to get var size\n"); 256 goto free_entry; 257 } 258 259 data = kmalloc(size, GFP_KERNEL); 260 if (!data) { 261 ret = -ENOMEM; 262 goto free_entry; 263 } 264 265 ret = efivar_entry_get(entry, NULL, &size, data); 266 if (ret) { 267 pr_err("failed to get var data\n"); 268 goto free_data; 269 } 270 271 ret = acpi_load_table(data, NULL); 272 if (ret) { 273 pr_err("failed to load table: %d\n", ret); 274 goto free_data; 275 } 276 277 goto free_entry; 278 279 free_data: 280 kfree(data); 281 282 free_entry: 283 kfree(entry); 284 } 285 286 return ret; 287 } 288 #else 289 static inline int efivar_ssdt_load(void) { return 0; } 290 #endif 291 292 #ifdef CONFIG_DEBUG_FS 293 294 #define EFI_DEBUGFS_MAX_BLOBS 32 295 296 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS]; 297 298 static void __init efi_debugfs_init(void) 299 { 300 struct dentry *efi_debugfs; 301 efi_memory_desc_t *md; 302 char name[32]; 303 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {}; 304 int i = 0; 305 306 efi_debugfs = debugfs_create_dir("efi", NULL); 307 if (IS_ERR_OR_NULL(efi_debugfs)) 308 return; 309 310 for_each_efi_memory_desc(md) { 311 switch (md->type) { 312 case EFI_BOOT_SERVICES_CODE: 313 snprintf(name, sizeof(name), "boot_services_code%d", 314 type_count[md->type]++); 315 break; 316 case EFI_BOOT_SERVICES_DATA: 317 snprintf(name, sizeof(name), "boot_services_data%d", 318 type_count[md->type]++); 319 break; 320 default: 321 continue; 322 } 323 324 if (i >= EFI_DEBUGFS_MAX_BLOBS) { 325 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n", 326 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS); 327 break; 328 } 329 330 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT; 331 debugfs_blob[i].data = memremap(md->phys_addr, 332 debugfs_blob[i].size, 333 MEMREMAP_WB); 334 if (!debugfs_blob[i].data) 335 continue; 336 337 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]); 338 i++; 339 } 340 } 341 #else 342 static inline void efi_debugfs_init(void) {} 343 #endif 344 345 /* 346 * We register the efi subsystem with the firmware subsystem and the 347 * efivars subsystem with the efi subsystem, if the system was booted with 348 * EFI. 349 */ 350 static int __init efisubsys_init(void) 351 { 352 int error; 353 354 if (!efi_enabled(EFI_RUNTIME_SERVICES)) 355 efi.runtime_supported_mask = 0; 356 357 if (!efi_enabled(EFI_BOOT)) 358 return 0; 359 360 if (efi.runtime_supported_mask) { 361 /* 362 * Since we process only one efi_runtime_service() at a time, an 363 * ordered workqueue (which creates only one execution context) 364 * should suffice for all our needs. 365 */ 366 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0); 367 if (!efi_rts_wq) { 368 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n"); 369 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 370 efi.runtime_supported_mask = 0; 371 return 0; 372 } 373 } 374 375 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES)) 376 platform_device_register_simple("rtc-efi", 0, NULL, 0); 377 378 /* We register the efi directory at /sys/firmware/efi */ 379 efi_kobj = kobject_create_and_add("efi", firmware_kobj); 380 if (!efi_kobj) { 381 pr_err("efi: Firmware registration failed.\n"); 382 return -ENOMEM; 383 } 384 385 if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) { 386 efivar_ssdt_load(); 387 error = generic_ops_register(); 388 if (error) 389 goto err_put; 390 platform_device_register_simple("efivars", 0, NULL, 0); 391 } 392 393 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group); 394 if (error) { 395 pr_err("efi: Sysfs attribute export failed with error %d.\n", 396 error); 397 goto err_unregister; 398 } 399 400 error = efi_runtime_map_init(efi_kobj); 401 if (error) 402 goto err_remove_group; 403 404 /* and the standard mountpoint for efivarfs */ 405 error = sysfs_create_mount_point(efi_kobj, "efivars"); 406 if (error) { 407 pr_err("efivars: Subsystem registration failed.\n"); 408 goto err_remove_group; 409 } 410 411 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS)) 412 efi_debugfs_init(); 413 414 return 0; 415 416 err_remove_group: 417 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group); 418 err_unregister: 419 if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) 420 generic_ops_unregister(); 421 err_put: 422 kobject_put(efi_kobj); 423 return error; 424 } 425 426 subsys_initcall(efisubsys_init); 427 428 /* 429 * Find the efi memory descriptor for a given physical address. Given a 430 * physical address, determine if it exists within an EFI Memory Map entry, 431 * and if so, populate the supplied memory descriptor with the appropriate 432 * data. 433 */ 434 int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) 435 { 436 efi_memory_desc_t *md; 437 438 if (!efi_enabled(EFI_MEMMAP)) { 439 pr_err_once("EFI_MEMMAP is not enabled.\n"); 440 return -EINVAL; 441 } 442 443 if (!out_md) { 444 pr_err_once("out_md is null.\n"); 445 return -EINVAL; 446 } 447 448 for_each_efi_memory_desc(md) { 449 u64 size; 450 u64 end; 451 452 size = md->num_pages << EFI_PAGE_SHIFT; 453 end = md->phys_addr + size; 454 if (phys_addr >= md->phys_addr && phys_addr < end) { 455 memcpy(out_md, md, sizeof(*out_md)); 456 return 0; 457 } 458 } 459 return -ENOENT; 460 } 461 462 /* 463 * Calculate the highest address of an efi memory descriptor. 464 */ 465 u64 __init efi_mem_desc_end(efi_memory_desc_t *md) 466 { 467 u64 size = md->num_pages << EFI_PAGE_SHIFT; 468 u64 end = md->phys_addr + size; 469 return end; 470 } 471 472 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {} 473 474 /** 475 * efi_mem_reserve - Reserve an EFI memory region 476 * @addr: Physical address to reserve 477 * @size: Size of reservation 478 * 479 * Mark a region as reserved from general kernel allocation and 480 * prevent it being released by efi_free_boot_services(). 481 * 482 * This function should be called drivers once they've parsed EFI 483 * configuration tables to figure out where their data lives, e.g. 484 * efi_esrt_init(). 485 */ 486 void __init efi_mem_reserve(phys_addr_t addr, u64 size) 487 { 488 if (!memblock_is_region_reserved(addr, size)) 489 memblock_reserve(addr, size); 490 491 /* 492 * Some architectures (x86) reserve all boot services ranges 493 * until efi_free_boot_services() because of buggy firmware 494 * implementations. This means the above memblock_reserve() is 495 * superfluous on x86 and instead what it needs to do is 496 * ensure the @start, @size is not freed. 497 */ 498 efi_arch_mem_reserve(addr, size); 499 } 500 501 static const efi_config_table_type_t common_tables[] __initconst = { 502 {ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20}, 503 {ACPI_TABLE_GUID, "ACPI", &efi.acpi}, 504 {SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios}, 505 {SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3}, 506 {EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt}, 507 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi_mem_attr_table}, 508 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi_rng_seed}, 509 {LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log}, 510 {LINUX_EFI_TPM_FINAL_LOG_GUID, "TPMFinalLog", &efi.tpm_final_log}, 511 {LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &mem_reserve}, 512 {EFI_RT_PROPERTIES_TABLE_GUID, "RTPROP", &rt_prop}, 513 #ifdef CONFIG_EFI_RCI2_TABLE 514 {DELLEMC_EFI_RCI2_TABLE_GUID, NULL, &rci2_table_phys}, 515 #endif 516 {NULL_GUID, NULL, NULL}, 517 }; 518 519 static __init int match_config_table(const efi_guid_t *guid, 520 unsigned long table, 521 const efi_config_table_type_t *table_types) 522 { 523 int i; 524 525 if (table_types) { 526 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) { 527 if (!efi_guidcmp(*guid, table_types[i].guid)) { 528 *(table_types[i].ptr) = table; 529 if (table_types[i].name) 530 pr_cont(" %s=0x%lx ", 531 table_types[i].name, table); 532 return 1; 533 } 534 } 535 } 536 537 return 0; 538 } 539 540 int __init efi_config_parse_tables(const efi_config_table_t *config_tables, 541 int count, 542 const efi_config_table_type_t *arch_tables) 543 { 544 const efi_config_table_64_t *tbl64 = (void *)config_tables; 545 const efi_config_table_32_t *tbl32 = (void *)config_tables; 546 const efi_guid_t *guid; 547 unsigned long table; 548 int i; 549 550 pr_info(""); 551 for (i = 0; i < count; i++) { 552 if (!IS_ENABLED(CONFIG_X86)) { 553 guid = &config_tables[i].guid; 554 table = (unsigned long)config_tables[i].table; 555 } else if (efi_enabled(EFI_64BIT)) { 556 guid = &tbl64[i].guid; 557 table = tbl64[i].table; 558 559 if (IS_ENABLED(CONFIG_X86_32) && 560 tbl64[i].table > U32_MAX) { 561 pr_cont("\n"); 562 pr_err("Table located above 4GB, disabling EFI.\n"); 563 return -EINVAL; 564 } 565 } else { 566 guid = &tbl32[i].guid; 567 table = tbl32[i].table; 568 } 569 570 if (!match_config_table(guid, table, common_tables)) 571 match_config_table(guid, table, arch_tables); 572 } 573 pr_cont("\n"); 574 set_bit(EFI_CONFIG_TABLES, &efi.flags); 575 576 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) { 577 struct linux_efi_random_seed *seed; 578 u32 size = 0; 579 580 seed = early_memremap(efi_rng_seed, sizeof(*seed)); 581 if (seed != NULL) { 582 size = READ_ONCE(seed->size); 583 early_memunmap(seed, sizeof(*seed)); 584 } else { 585 pr_err("Could not map UEFI random seed!\n"); 586 } 587 if (size > 0) { 588 seed = early_memremap(efi_rng_seed, 589 sizeof(*seed) + size); 590 if (seed != NULL) { 591 pr_notice("seeding entropy pool\n"); 592 add_bootloader_randomness(seed->bits, size); 593 early_memunmap(seed, sizeof(*seed) + size); 594 } else { 595 pr_err("Could not map UEFI random seed!\n"); 596 } 597 } 598 } 599 600 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP)) 601 efi_memattr_init(); 602 603 efi_tpm_eventlog_init(); 604 605 if (mem_reserve != EFI_INVALID_TABLE_ADDR) { 606 unsigned long prsv = mem_reserve; 607 608 while (prsv) { 609 struct linux_efi_memreserve *rsv; 610 u8 *p; 611 612 /* 613 * Just map a full page: that is what we will get 614 * anyway, and it permits us to map the entire entry 615 * before knowing its size. 616 */ 617 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE), 618 PAGE_SIZE); 619 if (p == NULL) { 620 pr_err("Could not map UEFI memreserve entry!\n"); 621 return -ENOMEM; 622 } 623 624 rsv = (void *)(p + prsv % PAGE_SIZE); 625 626 /* reserve the entry itself */ 627 memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size)); 628 629 for (i = 0; i < atomic_read(&rsv->count); i++) { 630 memblock_reserve(rsv->entry[i].base, 631 rsv->entry[i].size); 632 } 633 634 prsv = rsv->next; 635 early_memunmap(p, PAGE_SIZE); 636 } 637 } 638 639 if (rt_prop != EFI_INVALID_TABLE_ADDR) { 640 efi_rt_properties_table_t *tbl; 641 642 tbl = early_memremap(rt_prop, sizeof(*tbl)); 643 if (tbl) { 644 efi.runtime_supported_mask &= tbl->runtime_services_supported; 645 early_memunmap(tbl, sizeof(*tbl)); 646 } 647 } 648 649 return 0; 650 } 651 652 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr, 653 int min_major_version) 654 { 655 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) { 656 pr_err("System table signature incorrect!\n"); 657 return -EINVAL; 658 } 659 660 if ((systab_hdr->revision >> 16) < min_major_version) 661 pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n", 662 systab_hdr->revision >> 16, 663 systab_hdr->revision & 0xffff, 664 min_major_version); 665 666 return 0; 667 } 668 669 #ifndef CONFIG_IA64 670 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor, 671 size_t size) 672 { 673 const efi_char16_t *ret; 674 675 ret = early_memremap_ro(fw_vendor, size); 676 if (!ret) 677 pr_err("Could not map the firmware vendor!\n"); 678 return ret; 679 } 680 681 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size) 682 { 683 early_memunmap((void *)fw_vendor, size); 684 } 685 #else 686 #define map_fw_vendor(p, s) __va(p) 687 #define unmap_fw_vendor(v, s) 688 #endif 689 690 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr, 691 unsigned long fw_vendor) 692 { 693 char vendor[100] = "unknown"; 694 const efi_char16_t *c16; 695 size_t i; 696 697 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t)); 698 if (c16) { 699 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i) 700 vendor[i] = c16[i]; 701 vendor[i] = '\0'; 702 703 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t)); 704 } 705 706 pr_info("EFI v%u.%.02u by %s\n", 707 systab_hdr->revision >> 16, 708 systab_hdr->revision & 0xffff, 709 vendor); 710 } 711 712 static __initdata char memory_type_name[][20] = { 713 "Reserved", 714 "Loader Code", 715 "Loader Data", 716 "Boot Code", 717 "Boot Data", 718 "Runtime Code", 719 "Runtime Data", 720 "Conventional Memory", 721 "Unusable Memory", 722 "ACPI Reclaim Memory", 723 "ACPI Memory NVS", 724 "Memory Mapped I/O", 725 "MMIO Port Space", 726 "PAL Code", 727 "Persistent Memory", 728 }; 729 730 char * __init efi_md_typeattr_format(char *buf, size_t size, 731 const efi_memory_desc_t *md) 732 { 733 char *pos; 734 int type_len; 735 u64 attr; 736 737 pos = buf; 738 if (md->type >= ARRAY_SIZE(memory_type_name)) 739 type_len = snprintf(pos, size, "[type=%u", md->type); 740 else 741 type_len = snprintf(pos, size, "[%-*s", 742 (int)(sizeof(memory_type_name[0]) - 1), 743 memory_type_name[md->type]); 744 if (type_len >= size) 745 return buf; 746 747 pos += type_len; 748 size -= type_len; 749 750 attr = md->attribute; 751 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT | 752 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO | 753 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP | 754 EFI_MEMORY_NV | EFI_MEMORY_SP | 755 EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE)) 756 snprintf(pos, size, "|attr=0x%016llx]", 757 (unsigned long long)attr); 758 else 759 snprintf(pos, size, 760 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]", 761 attr & EFI_MEMORY_RUNTIME ? "RUN" : "", 762 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "", 763 attr & EFI_MEMORY_SP ? "SP" : "", 764 attr & EFI_MEMORY_NV ? "NV" : "", 765 attr & EFI_MEMORY_XP ? "XP" : "", 766 attr & EFI_MEMORY_RP ? "RP" : "", 767 attr & EFI_MEMORY_WP ? "WP" : "", 768 attr & EFI_MEMORY_RO ? "RO" : "", 769 attr & EFI_MEMORY_UCE ? "UCE" : "", 770 attr & EFI_MEMORY_WB ? "WB" : "", 771 attr & EFI_MEMORY_WT ? "WT" : "", 772 attr & EFI_MEMORY_WC ? "WC" : "", 773 attr & EFI_MEMORY_UC ? "UC" : ""); 774 return buf; 775 } 776 777 /* 778 * IA64 has a funky EFI memory map that doesn't work the same way as 779 * other architectures. 780 */ 781 #ifndef CONFIG_IA64 782 /* 783 * efi_mem_attributes - lookup memmap attributes for physical address 784 * @phys_addr: the physical address to lookup 785 * 786 * Search in the EFI memory map for the region covering 787 * @phys_addr. Returns the EFI memory attributes if the region 788 * was found in the memory map, 0 otherwise. 789 */ 790 u64 efi_mem_attributes(unsigned long phys_addr) 791 { 792 efi_memory_desc_t *md; 793 794 if (!efi_enabled(EFI_MEMMAP)) 795 return 0; 796 797 for_each_efi_memory_desc(md) { 798 if ((md->phys_addr <= phys_addr) && 799 (phys_addr < (md->phys_addr + 800 (md->num_pages << EFI_PAGE_SHIFT)))) 801 return md->attribute; 802 } 803 return 0; 804 } 805 806 /* 807 * efi_mem_type - lookup memmap type for physical address 808 * @phys_addr: the physical address to lookup 809 * 810 * Search in the EFI memory map for the region covering @phys_addr. 811 * Returns the EFI memory type if the region was found in the memory 812 * map, -EINVAL otherwise. 813 */ 814 int efi_mem_type(unsigned long phys_addr) 815 { 816 const efi_memory_desc_t *md; 817 818 if (!efi_enabled(EFI_MEMMAP)) 819 return -ENOTSUPP; 820 821 for_each_efi_memory_desc(md) { 822 if ((md->phys_addr <= phys_addr) && 823 (phys_addr < (md->phys_addr + 824 (md->num_pages << EFI_PAGE_SHIFT)))) 825 return md->type; 826 } 827 return -EINVAL; 828 } 829 #endif 830 831 int efi_status_to_err(efi_status_t status) 832 { 833 int err; 834 835 switch (status) { 836 case EFI_SUCCESS: 837 err = 0; 838 break; 839 case EFI_INVALID_PARAMETER: 840 err = -EINVAL; 841 break; 842 case EFI_OUT_OF_RESOURCES: 843 err = -ENOSPC; 844 break; 845 case EFI_DEVICE_ERROR: 846 err = -EIO; 847 break; 848 case EFI_WRITE_PROTECTED: 849 err = -EROFS; 850 break; 851 case EFI_SECURITY_VIOLATION: 852 err = -EACCES; 853 break; 854 case EFI_NOT_FOUND: 855 err = -ENOENT; 856 break; 857 case EFI_ABORTED: 858 err = -EINTR; 859 break; 860 default: 861 err = -EINVAL; 862 } 863 864 return err; 865 } 866 867 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); 868 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init; 869 870 static int __init efi_memreserve_map_root(void) 871 { 872 if (mem_reserve == EFI_INVALID_TABLE_ADDR) 873 return -ENODEV; 874 875 efi_memreserve_root = memremap(mem_reserve, 876 sizeof(*efi_memreserve_root), 877 MEMREMAP_WB); 878 if (WARN_ON_ONCE(!efi_memreserve_root)) 879 return -ENOMEM; 880 return 0; 881 } 882 883 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) 884 { 885 struct resource *res, *parent; 886 887 res = kzalloc(sizeof(struct resource), GFP_ATOMIC); 888 if (!res) 889 return -ENOMEM; 890 891 res->name = "reserved"; 892 res->flags = IORESOURCE_MEM; 893 res->start = addr; 894 res->end = addr + size - 1; 895 896 /* we expect a conflict with a 'System RAM' region */ 897 parent = request_resource_conflict(&iomem_resource, res); 898 return parent ? request_resource(parent, res) : 0; 899 } 900 901 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) 902 { 903 struct linux_efi_memreserve *rsv; 904 unsigned long prsv; 905 int rc, index; 906 907 if (efi_memreserve_root == (void *)ULONG_MAX) 908 return -ENODEV; 909 910 if (!efi_memreserve_root) { 911 rc = efi_memreserve_map_root(); 912 if (rc) 913 return rc; 914 } 915 916 /* first try to find a slot in an existing linked list entry */ 917 for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) { 918 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB); 919 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); 920 if (index < rsv->size) { 921 rsv->entry[index].base = addr; 922 rsv->entry[index].size = size; 923 924 memunmap(rsv); 925 return efi_mem_reserve_iomem(addr, size); 926 } 927 memunmap(rsv); 928 } 929 930 /* no slot found - allocate a new linked list entry */ 931 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC); 932 if (!rsv) 933 return -ENOMEM; 934 935 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K); 936 if (rc) { 937 free_page((unsigned long)rsv); 938 return rc; 939 } 940 941 /* 942 * The memremap() call above assumes that a linux_efi_memreserve entry 943 * never crosses a page boundary, so let's ensure that this remains true 944 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by 945 * using SZ_4K explicitly in the size calculation below. 946 */ 947 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K); 948 atomic_set(&rsv->count, 1); 949 rsv->entry[0].base = addr; 950 rsv->entry[0].size = size; 951 952 spin_lock(&efi_mem_reserve_persistent_lock); 953 rsv->next = efi_memreserve_root->next; 954 efi_memreserve_root->next = __pa(rsv); 955 spin_unlock(&efi_mem_reserve_persistent_lock); 956 957 return efi_mem_reserve_iomem(addr, size); 958 } 959 960 static int __init efi_memreserve_root_init(void) 961 { 962 if (efi_memreserve_root) 963 return 0; 964 if (efi_memreserve_map_root()) 965 efi_memreserve_root = (void *)ULONG_MAX; 966 return 0; 967 } 968 early_initcall(efi_memreserve_root_init); 969 970 #ifdef CONFIG_KEXEC 971 static int update_efi_random_seed(struct notifier_block *nb, 972 unsigned long code, void *unused) 973 { 974 struct linux_efi_random_seed *seed; 975 u32 size = 0; 976 977 if (!kexec_in_progress) 978 return NOTIFY_DONE; 979 980 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB); 981 if (seed != NULL) { 982 size = min(seed->size, EFI_RANDOM_SEED_SIZE); 983 memunmap(seed); 984 } else { 985 pr_err("Could not map UEFI random seed!\n"); 986 } 987 if (size > 0) { 988 seed = memremap(efi_rng_seed, sizeof(*seed) + size, 989 MEMREMAP_WB); 990 if (seed != NULL) { 991 seed->size = size; 992 get_random_bytes(seed->bits, seed->size); 993 memunmap(seed); 994 } else { 995 pr_err("Could not map UEFI random seed!\n"); 996 } 997 } 998 return NOTIFY_DONE; 999 } 1000 1001 static struct notifier_block efi_random_seed_nb = { 1002 .notifier_call = update_efi_random_seed, 1003 }; 1004 1005 static int __init register_update_efi_random_seed(void) 1006 { 1007 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR) 1008 return 0; 1009 return register_reboot_notifier(&efi_random_seed_nb); 1010 } 1011 late_initcall(register_update_efi_random_seed); 1012 #endif 1013