xref: /linux/drivers/firmware/efi/efi.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * efi.c - EFI subsystem
4  *
5  * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6  * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7  * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8  *
9  * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10  * allowing the efivarfs to be mounted or the efivars module to be loaded.
11  * The existance of /sys/firmware/efi may also be used by userspace to
12  * determine that the system supports EFI.
13  */
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/kobject.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/efi.h>
23 #include <linux/of.h>
24 #include <linux/initrd.h>
25 #include <linux/io.h>
26 #include <linux/kexec.h>
27 #include <linux/platform_device.h>
28 #include <linux/random.h>
29 #include <linux/reboot.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/ucs2_string.h>
33 #include <linux/memblock.h>
34 #include <linux/security.h>
35 #include <linux/notifier.h>
36 
37 #include <asm/early_ioremap.h>
38 
39 struct efi __read_mostly efi = {
40 	.runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
41 	.acpi			= EFI_INVALID_TABLE_ADDR,
42 	.acpi20			= EFI_INVALID_TABLE_ADDR,
43 	.smbios			= EFI_INVALID_TABLE_ADDR,
44 	.smbios3		= EFI_INVALID_TABLE_ADDR,
45 	.esrt			= EFI_INVALID_TABLE_ADDR,
46 	.tpm_log		= EFI_INVALID_TABLE_ADDR,
47 	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
48 	.ovmf_debug_log         = EFI_INVALID_TABLE_ADDR,
49 #ifdef CONFIG_LOAD_UEFI_KEYS
50 	.mokvar_table		= EFI_INVALID_TABLE_ADDR,
51 #endif
52 #ifdef CONFIG_EFI_COCO_SECRET
53 	.coco_secret		= EFI_INVALID_TABLE_ADDR,
54 #endif
55 #ifdef CONFIG_UNACCEPTED_MEMORY
56 	.unaccepted		= EFI_INVALID_TABLE_ADDR,
57 #endif
58 };
59 EXPORT_SYMBOL(efi);
60 
61 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
62 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
63 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
64 static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
65 
66 extern unsigned long screen_info_table;
67 
68 struct mm_struct efi_mm = {
69 	.mm_mt			= MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
70 	.mm_users		= ATOMIC_INIT(2),
71 	.mm_count		= ATOMIC_INIT(1),
72 	.write_protect_seq      = SEQCNT_ZERO(efi_mm.write_protect_seq),
73 	MMAP_LOCK_INITIALIZER(efi_mm)
74 	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
75 	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
76 	.cpu_bitmap		= { [BITS_TO_LONGS(NR_CPUS)] = 0},
77 #ifdef CONFIG_SCHED_MM_CID
78 	.mm_cid.lock		= __RAW_SPIN_LOCK_UNLOCKED(efi_mm.mm_cid.lock),
79 #endif
80 };
81 
82 struct workqueue_struct *efi_rts_wq;
83 
84 static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
85 static int __init setup_noefi(char *arg)
86 {
87 	disable_runtime = true;
88 	return 0;
89 }
90 early_param("noefi", setup_noefi);
91 
92 bool efi_runtime_disabled(void)
93 {
94 	return disable_runtime;
95 }
96 
97 bool __pure __efi_soft_reserve_enabled(void)
98 {
99 	return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
100 }
101 
102 static int __init parse_efi_cmdline(char *str)
103 {
104 	if (!str) {
105 		pr_warn("need at least one option\n");
106 		return -EINVAL;
107 	}
108 
109 	if (parse_option_str(str, "debug"))
110 		set_bit(EFI_DBG, &efi.flags);
111 
112 	if (parse_option_str(str, "noruntime"))
113 		disable_runtime = true;
114 
115 	if (parse_option_str(str, "runtime"))
116 		disable_runtime = false;
117 
118 	if (parse_option_str(str, "nosoftreserve"))
119 		set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
120 
121 	return 0;
122 }
123 early_param("efi", parse_efi_cmdline);
124 
125 struct kobject *efi_kobj;
126 
127 /*
128  * Let's not leave out systab information that snuck into
129  * the efivars driver
130  * Note, do not add more fields in systab sysfs file as it breaks sysfs
131  * one value per file rule!
132  */
133 static ssize_t systab_show(struct kobject *kobj,
134 			   struct kobj_attribute *attr, char *buf)
135 {
136 	char *str = buf;
137 
138 	if (!kobj || !buf)
139 		return -EINVAL;
140 
141 	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
142 		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
143 	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
144 		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
145 	/*
146 	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
147 	 * SMBIOS3 entry point shall be preferred, so we list it first to
148 	 * let applications stop parsing after the first match.
149 	 */
150 	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
151 		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
152 	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
153 		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
154 
155 	return str - buf;
156 }
157 
158 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
159 
160 static ssize_t fw_platform_size_show(struct kobject *kobj,
161 				     struct kobj_attribute *attr, char *buf)
162 {
163 	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
164 }
165 
166 extern __weak struct kobj_attribute efi_attr_fw_vendor;
167 extern __weak struct kobj_attribute efi_attr_runtime;
168 extern __weak struct kobj_attribute efi_attr_config_table;
169 static struct kobj_attribute efi_attr_fw_platform_size =
170 	__ATTR_RO(fw_platform_size);
171 
172 static struct attribute *efi_subsys_attrs[] = {
173 	&efi_attr_systab.attr,
174 	&efi_attr_fw_platform_size.attr,
175 	&efi_attr_fw_vendor.attr,
176 	&efi_attr_runtime.attr,
177 	&efi_attr_config_table.attr,
178 	NULL,
179 };
180 
181 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
182 				   int n)
183 {
184 	return attr->mode;
185 }
186 
187 static const struct attribute_group efi_subsys_attr_group = {
188 	.attrs = efi_subsys_attrs,
189 	.is_visible = efi_attr_is_visible,
190 };
191 
192 struct blocking_notifier_head efivar_ops_nh;
193 EXPORT_SYMBOL_GPL(efivar_ops_nh);
194 
195 static struct efivars generic_efivars;
196 static struct efivar_operations generic_ops;
197 
198 static bool generic_ops_supported(void)
199 {
200 	unsigned long name_size;
201 	efi_status_t status;
202 	efi_char16_t name;
203 	efi_guid_t guid;
204 
205 	name_size = sizeof(name);
206 
207 	if (!efi.get_next_variable)
208 		return false;
209 	status = efi.get_next_variable(&name_size, &name, &guid);
210 	if (status == EFI_UNSUPPORTED)
211 		return false;
212 
213 	return true;
214 }
215 
216 static int generic_ops_register(void)
217 {
218 	if (!generic_ops_supported())
219 		return 0;
220 
221 	generic_ops.get_variable = efi.get_variable;
222 	generic_ops.get_next_variable = efi.get_next_variable;
223 	generic_ops.query_variable_store = efi_query_variable_store;
224 	generic_ops.query_variable_info = efi.query_variable_info;
225 
226 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
227 		generic_ops.set_variable = efi.set_variable;
228 		generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
229 	}
230 	return efivars_register(&generic_efivars, &generic_ops);
231 }
232 
233 static void generic_ops_unregister(void)
234 {
235 	if (!generic_ops.get_variable)
236 		return;
237 
238 	efivars_unregister(&generic_efivars);
239 }
240 
241 void efivars_generic_ops_register(void)
242 {
243 	generic_ops_register();
244 }
245 EXPORT_SYMBOL_GPL(efivars_generic_ops_register);
246 
247 void efivars_generic_ops_unregister(void)
248 {
249 	generic_ops_unregister();
250 }
251 EXPORT_SYMBOL_GPL(efivars_generic_ops_unregister);
252 
253 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
254 #define EFIVAR_SSDT_NAME_MAX	16UL
255 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
256 static int __init efivar_ssdt_setup(char *str)
257 {
258 	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
259 
260 	if (ret)
261 		return ret;
262 
263 	if (strlen(str) < sizeof(efivar_ssdt))
264 		memcpy(efivar_ssdt, str, strlen(str));
265 	else
266 		pr_warn("efivar_ssdt: name too long: %s\n", str);
267 	return 1;
268 }
269 __setup("efivar_ssdt=", efivar_ssdt_setup);
270 
271 static __init int efivar_ssdt_load(void)
272 {
273 	unsigned long name_size = 256;
274 	efi_char16_t *name = NULL;
275 	efi_status_t status;
276 	efi_guid_t guid;
277 	int ret = 0;
278 
279 	if (!efivar_ssdt[0])
280 		return 0;
281 
282 	name = kzalloc(name_size, GFP_KERNEL);
283 	if (!name)
284 		return -ENOMEM;
285 
286 	for (;;) {
287 		char utf8_name[EFIVAR_SSDT_NAME_MAX];
288 		unsigned long data_size = 0;
289 		void *data;
290 		int limit;
291 
292 		status = efi.get_next_variable(&name_size, name, &guid);
293 		if (status == EFI_NOT_FOUND) {
294 			break;
295 		} else if (status == EFI_BUFFER_TOO_SMALL) {
296 			efi_char16_t *name_tmp =
297 				krealloc(name, name_size, GFP_KERNEL);
298 			if (!name_tmp) {
299 				ret = -ENOMEM;
300 				goto out;
301 			}
302 			name = name_tmp;
303 			continue;
304 		}
305 
306 		limit = min(EFIVAR_SSDT_NAME_MAX, name_size);
307 		ucs2_as_utf8(utf8_name, name, limit - 1);
308 		if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
309 			continue;
310 
311 		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
312 
313 		status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
314 		if (status != EFI_BUFFER_TOO_SMALL || !data_size) {
315 			ret = -EIO;
316 			goto out;
317 		}
318 
319 		data = kmalloc(data_size, GFP_KERNEL);
320 		if (!data) {
321 			ret = -ENOMEM;
322 			goto out;
323 		}
324 
325 		status = efi.get_variable(name, &guid, NULL, &data_size, data);
326 		if (status == EFI_SUCCESS) {
327 			acpi_status acpi_ret = acpi_load_table(data, NULL);
328 			if (ACPI_FAILURE(acpi_ret)) {
329 				pr_err("efivar_ssdt: failed to load table: %u\n",
330 				       acpi_ret);
331 			} else {
332 				/*
333 				 * The @data will be in use by ACPI engine,
334 				 * do not free it!
335 				 */
336 				continue;
337 			}
338 		} else {
339 			pr_err("efivar_ssdt: failed to get var data: 0x%lx\n", status);
340 		}
341 		kfree(data);
342 	}
343 out:
344 	kfree(name);
345 	return ret;
346 }
347 #else
348 static inline int efivar_ssdt_load(void) { return 0; }
349 #endif
350 
351 #ifdef CONFIG_DEBUG_FS
352 
353 #define EFI_DEBUGFS_MAX_BLOBS 32
354 
355 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
356 
357 static void __init efi_debugfs_init(void)
358 {
359 	struct dentry *efi_debugfs;
360 	efi_memory_desc_t *md;
361 	char name[32];
362 	int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
363 	int i = 0;
364 
365 	efi_debugfs = debugfs_create_dir("efi", NULL);
366 	if (IS_ERR(efi_debugfs))
367 		return;
368 
369 	for_each_efi_memory_desc(md) {
370 		switch (md->type) {
371 		case EFI_BOOT_SERVICES_CODE:
372 			snprintf(name, sizeof(name), "boot_services_code%d",
373 				 type_count[md->type]++);
374 			break;
375 		case EFI_BOOT_SERVICES_DATA:
376 			snprintf(name, sizeof(name), "boot_services_data%d",
377 				 type_count[md->type]++);
378 			break;
379 		default:
380 			continue;
381 		}
382 
383 		if (i >= EFI_DEBUGFS_MAX_BLOBS) {
384 			pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
385 				EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
386 			break;
387 		}
388 
389 		debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
390 		debugfs_blob[i].data = memremap(md->phys_addr,
391 						debugfs_blob[i].size,
392 						MEMREMAP_WB);
393 		if (!debugfs_blob[i].data)
394 			continue;
395 
396 		debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
397 		i++;
398 	}
399 }
400 #else
401 static inline void efi_debugfs_init(void) {}
402 #endif
403 
404 /*
405  * We register the efi subsystem with the firmware subsystem and the
406  * efivars subsystem with the efi subsystem, if the system was booted with
407  * EFI.
408  */
409 static int __init efisubsys_init(void)
410 {
411 	int error;
412 
413 	if (!efi_enabled(EFI_RUNTIME_SERVICES))
414 		efi.runtime_supported_mask = 0;
415 
416 	if (!efi_enabled(EFI_BOOT))
417 		return 0;
418 
419 	if (efi.runtime_supported_mask) {
420 		/*
421 		 * Since we process only one efi_runtime_service() at a time, an
422 		 * ordered workqueue (which creates only one execution context)
423 		 * should suffice for all our needs.
424 		 */
425 		efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
426 		if (!efi_rts_wq) {
427 			pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
428 			clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
429 			efi.runtime_supported_mask = 0;
430 			return 0;
431 		}
432 	}
433 
434 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
435 		platform_device_register_simple("rtc-efi", 0, NULL, 0);
436 
437 	/* We register the efi directory at /sys/firmware/efi */
438 	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
439 	if (!efi_kobj) {
440 		pr_err("efi: Firmware registration failed.\n");
441 		error = -ENOMEM;
442 		goto err_destroy_wq;
443 	}
444 
445 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
446 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
447 		error = generic_ops_register();
448 		if (error)
449 			goto err_put;
450 		error = efivar_ssdt_load();
451 		if (error)
452 			pr_err("efi: failed to load SSDT, error %d.\n", error);
453 		platform_device_register_simple("efivars", 0, NULL, 0);
454 	}
455 
456 	BLOCKING_INIT_NOTIFIER_HEAD(&efivar_ops_nh);
457 
458 	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
459 	if (error) {
460 		pr_err("efi: Sysfs attribute export failed with error %d.\n",
461 		       error);
462 		goto err_unregister;
463 	}
464 
465 	/* and the standard mountpoint for efivarfs */
466 	error = sysfs_create_mount_point(efi_kobj, "efivars");
467 	if (error) {
468 		pr_err("efivars: Subsystem registration failed.\n");
469 		goto err_remove_group;
470 	}
471 
472 	if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
473 		efi_debugfs_init();
474 
475 #ifdef CONFIG_EFI_COCO_SECRET
476 	if (efi.coco_secret != EFI_INVALID_TABLE_ADDR)
477 		platform_device_register_simple("efi_secret", 0, NULL, 0);
478 #endif
479 
480 	if (IS_ENABLED(CONFIG_OVMF_DEBUG_LOG) &&
481 	    efi.ovmf_debug_log != EFI_INVALID_TABLE_ADDR)
482 		ovmf_log_probe(efi.ovmf_debug_log);
483 
484 	return 0;
485 
486 err_remove_group:
487 	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
488 err_unregister:
489 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
490 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
491 		generic_ops_unregister();
492 err_put:
493 	kobject_put(efi_kobj);
494 	efi_kobj = NULL;
495 err_destroy_wq:
496 	if (efi_rts_wq)
497 		destroy_workqueue(efi_rts_wq);
498 
499 	return error;
500 }
501 
502 subsys_initcall(efisubsys_init);
503 
504 void __init efi_find_mirror(void)
505 {
506 	efi_memory_desc_t *md;
507 	u64 mirror_size = 0, total_size = 0;
508 
509 	if (!efi_enabled(EFI_MEMMAP))
510 		return;
511 
512 	for_each_efi_memory_desc(md) {
513 		unsigned long long start = md->phys_addr;
514 		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
515 
516 		total_size += size;
517 		if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
518 			memblock_mark_mirror(start, size);
519 			mirror_size += size;
520 		}
521 	}
522 	if (mirror_size)
523 		pr_info("Memory: %lldM/%lldM mirrored memory\n",
524 			mirror_size>>20, total_size>>20);
525 }
526 
527 /*
528  * Find the efi memory descriptor for a given physical address.  Given a
529  * physical address, determine if it exists within an EFI Memory Map entry,
530  * and if so, populate the supplied memory descriptor with the appropriate
531  * data.
532  */
533 int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
534 {
535 	efi_memory_desc_t *md;
536 
537 	if (!efi_enabled(EFI_MEMMAP)) {
538 		pr_err_once("EFI_MEMMAP is not enabled.\n");
539 		return -EINVAL;
540 	}
541 
542 	if (!out_md) {
543 		pr_err_once("out_md is null.\n");
544 		return -EINVAL;
545         }
546 
547 	for_each_efi_memory_desc(md) {
548 		u64 size;
549 		u64 end;
550 
551 		/* skip bogus entries (including empty ones) */
552 		if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) ||
553 		    (md->num_pages <= 0) ||
554 		    (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT))
555 			continue;
556 
557 		size = md->num_pages << EFI_PAGE_SHIFT;
558 		end = md->phys_addr + size;
559 		if (phys_addr >= md->phys_addr && phys_addr < end) {
560 			memcpy(out_md, md, sizeof(*out_md));
561 			return 0;
562 		}
563 	}
564 	return -ENOENT;
565 }
566 
567 extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
568 	__weak __alias(__efi_mem_desc_lookup);
569 EXPORT_SYMBOL_GPL(efi_mem_desc_lookup);
570 
571 /*
572  * Calculate the highest address of an efi memory descriptor.
573  */
574 u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
575 {
576 	u64 size = md->num_pages << EFI_PAGE_SHIFT;
577 	u64 end = md->phys_addr + size;
578 	return end;
579 }
580 
581 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
582 
583 /**
584  * efi_mem_reserve - Reserve an EFI memory region
585  * @addr: Physical address to reserve
586  * @size: Size of reservation
587  *
588  * Mark a region as reserved from general kernel allocation and
589  * prevent it being released by efi_free_boot_services().
590  *
591  * This function should be called drivers once they've parsed EFI
592  * configuration tables to figure out where their data lives, e.g.
593  * efi_esrt_init().
594  */
595 void __init efi_mem_reserve(phys_addr_t addr, u64 size)
596 {
597 	/* efi_mem_reserve() does not work under Xen */
598 	if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT)))
599 		return;
600 
601 	if (!memblock_is_region_reserved(addr, size))
602 		memblock_reserve(addr, size);
603 
604 	/*
605 	 * Some architectures (x86) reserve all boot services ranges
606 	 * until efi_free_boot_services() because of buggy firmware
607 	 * implementations. This means the above memblock_reserve() is
608 	 * superfluous on x86 and instead what it needs to do is
609 	 * ensure the @start, @size is not freed.
610 	 */
611 	efi_arch_mem_reserve(addr, size);
612 }
613 
614 static const efi_config_table_type_t common_tables[] __initconst = {
615 	{ACPI_20_TABLE_GUID,			&efi.acpi20,		"ACPI 2.0"	},
616 	{ACPI_TABLE_GUID,			&efi.acpi,		"ACPI"		},
617 	{SMBIOS_TABLE_GUID,			&efi.smbios,		"SMBIOS"	},
618 	{SMBIOS3_TABLE_GUID,			&efi.smbios3,		"SMBIOS 3.0"	},
619 	{EFI_SYSTEM_RESOURCE_TABLE_GUID,	&efi.esrt,		"ESRT"		},
620 	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID,	&efi_mem_attr_table,	"MEMATTR"	},
621 	{LINUX_EFI_RANDOM_SEED_TABLE_GUID,	&efi_rng_seed,		"RNG"		},
622 	{LINUX_EFI_TPM_EVENT_LOG_GUID,		&efi.tpm_log,		"TPMEventLog"	},
623 	{EFI_TCG2_FINAL_EVENTS_TABLE_GUID,	&efi.tpm_final_log,	"TPMFinalLog"	},
624 	{EFI_CC_FINAL_EVENTS_TABLE_GUID,	&efi.tpm_final_log,	"CCFinalLog"	},
625 	{LINUX_EFI_MEMRESERVE_TABLE_GUID,	&mem_reserve,		"MEMRESERVE"	},
626 	{LINUX_EFI_INITRD_MEDIA_GUID,		&initrd,		"INITRD"	},
627 	{EFI_RT_PROPERTIES_TABLE_GUID,		&rt_prop,		"RTPROP"	},
628 #ifdef CONFIG_OVMF_DEBUG_LOG
629 	{OVMF_MEMORY_LOG_TABLE_GUID,		&efi.ovmf_debug_log,	"OvmfDebugLog"	},
630 #endif
631 #ifdef CONFIG_EFI_RCI2_TABLE
632 	{DELLEMC_EFI_RCI2_TABLE_GUID,		&rci2_table_phys			},
633 #endif
634 #ifdef CONFIG_LOAD_UEFI_KEYS
635 	{LINUX_EFI_MOK_VARIABLE_TABLE_GUID,	&efi.mokvar_table,	"MOKvar"	},
636 #endif
637 #ifdef CONFIG_EFI_COCO_SECRET
638 	{LINUX_EFI_COCO_SECRET_AREA_GUID,	&efi.coco_secret,	"CocoSecret"	},
639 #endif
640 #ifdef CONFIG_UNACCEPTED_MEMORY
641 	{LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID,	&efi.unaccepted,	"Unaccepted"	},
642 #endif
643 #ifdef CONFIG_EFI_GENERIC_STUB
644 	{LINUX_EFI_SCREEN_INFO_TABLE_GUID,	&screen_info_table			},
645 #endif
646 	{},
647 };
648 
649 static __init int match_config_table(const efi_guid_t *guid,
650 				     unsigned long table,
651 				     const efi_config_table_type_t *table_types)
652 {
653 	int i;
654 
655 	for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
656 		if (efi_guidcmp(*guid, table_types[i].guid))
657 			continue;
658 
659 		if (!efi_config_table_is_usable(guid, table)) {
660 			if (table_types[i].name[0])
661 				pr_cont("(%s=0x%lx unusable) ",
662 					table_types[i].name, table);
663 			return 1;
664 		}
665 
666 		*(table_types[i].ptr) = table;
667 		if (table_types[i].name[0])
668 			pr_cont("%s=0x%lx ", table_types[i].name, table);
669 		return 1;
670 	}
671 
672 	return 0;
673 }
674 
675 /**
676  * reserve_unaccepted - Map and reserve unaccepted configuration table
677  * @unaccepted: Pointer to unaccepted memory table
678  *
679  * memblock_add() makes sure that the table is mapped in direct mapping. During
680  * normal boot it happens automatically because the table is allocated from
681  * usable memory. But during crashkernel boot only memory specifically reserved
682  * for crash scenario is mapped. memblock_add() forces the table to be mapped
683  * in crashkernel case.
684  *
685  * Align the range to the nearest page borders. Ranges smaller than page size
686  * are not going to be mapped.
687  *
688  * memblock_reserve() makes sure that future allocations will not touch the
689  * table.
690  */
691 
692 static __init void reserve_unaccepted(struct efi_unaccepted_memory *unaccepted)
693 {
694 	phys_addr_t start, size;
695 
696 	start = PAGE_ALIGN_DOWN(efi.unaccepted);
697 	size = PAGE_ALIGN(sizeof(*unaccepted) + unaccepted->size);
698 
699 	memblock_add(start, size);
700 	memblock_reserve(start, size);
701 }
702 
703 int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
704 				   int count,
705 				   const efi_config_table_type_t *arch_tables)
706 {
707 	const efi_config_table_64_t *tbl64 = (void *)config_tables;
708 	const efi_config_table_32_t *tbl32 = (void *)config_tables;
709 	const efi_guid_t *guid;
710 	unsigned long table;
711 	int i;
712 
713 	pr_info("");
714 	for (i = 0; i < count; i++) {
715 		if (!IS_ENABLED(CONFIG_X86)) {
716 			guid = &config_tables[i].guid;
717 			table = (unsigned long)config_tables[i].table;
718 		} else if (efi_enabled(EFI_64BIT)) {
719 			guid = &tbl64[i].guid;
720 			table = tbl64[i].table;
721 
722 			if (IS_ENABLED(CONFIG_X86_32) &&
723 			    tbl64[i].table > U32_MAX) {
724 				pr_cont("\n");
725 				pr_err("Table located above 4GB, disabling EFI.\n");
726 				return -EINVAL;
727 			}
728 		} else {
729 			guid = &tbl32[i].guid;
730 			table = tbl32[i].table;
731 		}
732 
733 		if (!match_config_table(guid, table, common_tables) && arch_tables)
734 			match_config_table(guid, table, arch_tables);
735 	}
736 	pr_cont("\n");
737 	set_bit(EFI_CONFIG_TABLES, &efi.flags);
738 
739 	if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
740 		struct linux_efi_random_seed *seed;
741 		u32 size = 0;
742 
743 		seed = early_memremap(efi_rng_seed, sizeof(*seed));
744 		if (seed != NULL) {
745 			size = min_t(u32, seed->size, SZ_1K); // sanity check
746 			early_memunmap(seed, sizeof(*seed));
747 		} else {
748 			pr_err("Could not map UEFI random seed!\n");
749 		}
750 		if (size > 0) {
751 			seed = early_memremap(efi_rng_seed,
752 					      sizeof(*seed) + size);
753 			if (seed != NULL) {
754 				add_bootloader_randomness(seed->bits, size);
755 				memzero_explicit(seed->bits, size);
756 				early_memunmap(seed, sizeof(*seed) + size);
757 			} else {
758 				pr_err("Could not map UEFI random seed!\n");
759 			}
760 		}
761 	}
762 
763 	if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
764 		efi_memattr_init();
765 
766 	efi_tpm_eventlog_init();
767 
768 	if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
769 		unsigned long prsv = mem_reserve;
770 
771 		while (prsv) {
772 			struct linux_efi_memreserve *rsv;
773 			u8 *p;
774 
775 			/*
776 			 * Just map a full page: that is what we will get
777 			 * anyway, and it permits us to map the entire entry
778 			 * before knowing its size.
779 			 */
780 			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
781 					   PAGE_SIZE);
782 			if (p == NULL) {
783 				pr_err("Could not map UEFI memreserve entry!\n");
784 				return -ENOMEM;
785 			}
786 
787 			rsv = (void *)(p + prsv % PAGE_SIZE);
788 
789 			/* reserve the entry itself */
790 			memblock_reserve(prsv,
791 					 struct_size(rsv, entry, rsv->size));
792 
793 			for (i = 0; i < atomic_read(&rsv->count); i++) {
794 				memblock_reserve(rsv->entry[i].base,
795 						 rsv->entry[i].size);
796 			}
797 
798 			prsv = rsv->next;
799 			early_memunmap(p, PAGE_SIZE);
800 		}
801 	}
802 
803 	if (rt_prop != EFI_INVALID_TABLE_ADDR) {
804 		efi_rt_properties_table_t *tbl;
805 
806 		tbl = early_memremap(rt_prop, sizeof(*tbl));
807 		if (tbl) {
808 			efi.runtime_supported_mask &= tbl->runtime_services_supported;
809 			early_memunmap(tbl, sizeof(*tbl));
810 		}
811 	}
812 
813 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
814 	    initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
815 		struct linux_efi_initrd *tbl;
816 
817 		tbl = early_memremap(initrd, sizeof(*tbl));
818 		if (tbl) {
819 			phys_initrd_start = tbl->base;
820 			phys_initrd_size = tbl->size;
821 			early_memunmap(tbl, sizeof(*tbl));
822 		}
823 	}
824 
825 	if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) &&
826 	    efi.unaccepted != EFI_INVALID_TABLE_ADDR) {
827 		struct efi_unaccepted_memory *unaccepted;
828 
829 		unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted));
830 		if (unaccepted) {
831 
832 			if (unaccepted->version == 1) {
833 				reserve_unaccepted(unaccepted);
834 			} else {
835 				efi.unaccepted = EFI_INVALID_TABLE_ADDR;
836 			}
837 
838 			early_memunmap(unaccepted, sizeof(*unaccepted));
839 		}
840 	}
841 
842 	return 0;
843 }
844 
845 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr)
846 {
847 	if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
848 		pr_err("System table signature incorrect!\n");
849 		return -EINVAL;
850 	}
851 
852 	return 0;
853 }
854 
855 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
856 						size_t size)
857 {
858 	const efi_char16_t *ret;
859 
860 	ret = early_memremap_ro(fw_vendor, size);
861 	if (!ret)
862 		pr_err("Could not map the firmware vendor!\n");
863 	return ret;
864 }
865 
866 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
867 {
868 	early_memunmap((void *)fw_vendor, size);
869 }
870 
871 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
872 				     unsigned long fw_vendor)
873 {
874 	char vendor[100] = "unknown";
875 	const efi_char16_t *c16;
876 	size_t i;
877 	u16 rev;
878 
879 	c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
880 	if (c16) {
881 		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
882 			vendor[i] = c16[i];
883 		vendor[i] = '\0';
884 
885 		unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
886 	}
887 
888 	rev = (u16)systab_hdr->revision;
889 	pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10);
890 
891 	rev %= 10;
892 	if (rev)
893 		pr_cont(".%u", rev);
894 
895 	pr_cont(" by %s\n", vendor);
896 
897 	if (IS_ENABLED(CONFIG_X86_64) &&
898 	    systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
899 	    !strcmp(vendor, "Apple")) {
900 		pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
901 		efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
902 	}
903 }
904 
905 static __initdata char memory_type_name[][13] = {
906 	"Reserved",
907 	"Loader Code",
908 	"Loader Data",
909 	"Boot Code",
910 	"Boot Data",
911 	"Runtime Code",
912 	"Runtime Data",
913 	"Conventional",
914 	"Unusable",
915 	"ACPI Reclaim",
916 	"ACPI Mem NVS",
917 	"MMIO",
918 	"MMIO Port",
919 	"PAL Code",
920 	"Persistent",
921 	"Unaccepted",
922 };
923 
924 char * __init efi_md_typeattr_format(char *buf, size_t size,
925 				     const efi_memory_desc_t *md)
926 {
927 	char *pos;
928 	int type_len;
929 	u64 attr;
930 
931 	pos = buf;
932 	if (md->type >= ARRAY_SIZE(memory_type_name))
933 		type_len = snprintf(pos, size, "[type=%u", md->type);
934 	else
935 		type_len = snprintf(pos, size, "[%-*s",
936 				    (int)(sizeof(memory_type_name[0]) - 1),
937 				    memory_type_name[md->type]);
938 	if (type_len >= size)
939 		return buf;
940 
941 	pos += type_len;
942 	size -= type_len;
943 
944 	attr = md->attribute;
945 	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
946 		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
947 		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
948 		     EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
949 		     EFI_MEMORY_MORE_RELIABLE | EFI_MEMORY_HOT_PLUGGABLE |
950 		     EFI_MEMORY_RUNTIME))
951 		snprintf(pos, size, "|attr=0x%016llx]",
952 			 (unsigned long long)attr);
953 	else
954 		snprintf(pos, size,
955 			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
956 			 attr & EFI_MEMORY_RUNTIME		? "RUN" : "",
957 			 attr & EFI_MEMORY_HOT_PLUGGABLE	? "HP"  : "",
958 			 attr & EFI_MEMORY_MORE_RELIABLE	? "MR"  : "",
959 			 attr & EFI_MEMORY_CPU_CRYPTO   	? "CC"  : "",
960 			 attr & EFI_MEMORY_SP			? "SP"  : "",
961 			 attr & EFI_MEMORY_NV			? "NV"  : "",
962 			 attr & EFI_MEMORY_XP			? "XP"  : "",
963 			 attr & EFI_MEMORY_RP			? "RP"  : "",
964 			 attr & EFI_MEMORY_WP			? "WP"  : "",
965 			 attr & EFI_MEMORY_RO			? "RO"  : "",
966 			 attr & EFI_MEMORY_UCE			? "UCE" : "",
967 			 attr & EFI_MEMORY_WB			? "WB"  : "",
968 			 attr & EFI_MEMORY_WT			? "WT"  : "",
969 			 attr & EFI_MEMORY_WC			? "WC"  : "",
970 			 attr & EFI_MEMORY_UC			? "UC"  : "");
971 	return buf;
972 }
973 
974 /*
975  * efi_mem_attributes - lookup memmap attributes for physical address
976  * @phys_addr: the physical address to lookup
977  *
978  * Search in the EFI memory map for the region covering
979  * @phys_addr. Returns the EFI memory attributes if the region
980  * was found in the memory map, 0 otherwise.
981  */
982 u64 efi_mem_attributes(unsigned long phys_addr)
983 {
984 	efi_memory_desc_t *md;
985 
986 	if (!efi_enabled(EFI_MEMMAP))
987 		return 0;
988 
989 	for_each_efi_memory_desc(md) {
990 		if ((md->phys_addr <= phys_addr) &&
991 		    (phys_addr < (md->phys_addr +
992 		    (md->num_pages << EFI_PAGE_SHIFT))))
993 			return md->attribute;
994 	}
995 	return 0;
996 }
997 
998 /*
999  * efi_mem_type - lookup memmap type for physical address
1000  * @phys_addr: the physical address to lookup
1001  *
1002  * Search in the EFI memory map for the region covering @phys_addr.
1003  * Returns the EFI memory type if the region was found in the memory
1004  * map, -EINVAL otherwise.
1005  */
1006 int efi_mem_type(unsigned long phys_addr)
1007 {
1008 	const efi_memory_desc_t *md;
1009 
1010 	if (!efi_enabled(EFI_MEMMAP))
1011 		return -ENOTSUPP;
1012 
1013 	for_each_efi_memory_desc(md) {
1014 		if ((md->phys_addr <= phys_addr) &&
1015 		    (phys_addr < (md->phys_addr +
1016 				  (md->num_pages << EFI_PAGE_SHIFT))))
1017 			return md->type;
1018 	}
1019 	return -EINVAL;
1020 }
1021 
1022 int efi_status_to_err(efi_status_t status)
1023 {
1024 	int err;
1025 
1026 	switch (status) {
1027 	case EFI_SUCCESS:
1028 		err = 0;
1029 		break;
1030 	case EFI_INVALID_PARAMETER:
1031 		err = -EINVAL;
1032 		break;
1033 	case EFI_OUT_OF_RESOURCES:
1034 		err = -ENOSPC;
1035 		break;
1036 	case EFI_DEVICE_ERROR:
1037 		err = -EIO;
1038 		break;
1039 	case EFI_WRITE_PROTECTED:
1040 		err = -EROFS;
1041 		break;
1042 	case EFI_SECURITY_VIOLATION:
1043 		err = -EACCES;
1044 		break;
1045 	case EFI_NOT_FOUND:
1046 		err = -ENOENT;
1047 		break;
1048 	case EFI_ABORTED:
1049 		err = -EINTR;
1050 		break;
1051 	default:
1052 		err = -EINVAL;
1053 	}
1054 
1055 	return err;
1056 }
1057 EXPORT_SYMBOL_GPL(efi_status_to_err);
1058 
1059 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
1060 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
1061 
1062 static int __init efi_memreserve_map_root(void)
1063 {
1064 	if (mem_reserve == EFI_INVALID_TABLE_ADDR)
1065 		return -ENODEV;
1066 
1067 	efi_memreserve_root = memremap(mem_reserve,
1068 				       sizeof(*efi_memreserve_root),
1069 				       MEMREMAP_WB);
1070 	if (WARN_ON_ONCE(!efi_memreserve_root))
1071 		return -ENOMEM;
1072 	return 0;
1073 }
1074 
1075 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
1076 {
1077 	struct resource *res, *parent;
1078 	int ret;
1079 
1080 	res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
1081 	if (!res)
1082 		return -ENOMEM;
1083 
1084 	res->name	= "reserved";
1085 	res->flags	= IORESOURCE_MEM;
1086 	res->start	= addr;
1087 	res->end	= addr + size - 1;
1088 
1089 	/* we expect a conflict with a 'System RAM' region */
1090 	parent = request_resource_conflict(&iomem_resource, res);
1091 	ret = parent ? request_resource(parent, res) : 0;
1092 
1093 	/*
1094 	 * Given that efi_mem_reserve_iomem() can be called at any
1095 	 * time, only call memblock_reserve() if the architecture
1096 	 * keeps the infrastructure around.
1097 	 */
1098 	if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
1099 		memblock_reserve(addr, size);
1100 
1101 	return ret;
1102 }
1103 
1104 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
1105 {
1106 	struct linux_efi_memreserve *rsv;
1107 	unsigned long prsv;
1108 	int rc, index;
1109 
1110 	if (efi_memreserve_root == (void *)ULONG_MAX)
1111 		return -ENODEV;
1112 
1113 	if (!efi_memreserve_root) {
1114 		rc = efi_memreserve_map_root();
1115 		if (rc)
1116 			return rc;
1117 	}
1118 
1119 	/* first try to find a slot in an existing linked list entry */
1120 	for (prsv = efi_memreserve_root->next; prsv; ) {
1121 		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
1122 		if (!rsv)
1123 			return -ENOMEM;
1124 		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
1125 		if (index < rsv->size) {
1126 			rsv->entry[index].base = addr;
1127 			rsv->entry[index].size = size;
1128 
1129 			memunmap(rsv);
1130 			return efi_mem_reserve_iomem(addr, size);
1131 		}
1132 		prsv = rsv->next;
1133 		memunmap(rsv);
1134 	}
1135 
1136 	/* no slot found - allocate a new linked list entry */
1137 	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
1138 	if (!rsv)
1139 		return -ENOMEM;
1140 
1141 	rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
1142 	if (rc) {
1143 		free_page((unsigned long)rsv);
1144 		return rc;
1145 	}
1146 
1147 	/*
1148 	 * The memremap() call above assumes that a linux_efi_memreserve entry
1149 	 * never crosses a page boundary, so let's ensure that this remains true
1150 	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
1151 	 * using SZ_4K explicitly in the size calculation below.
1152 	 */
1153 	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
1154 	atomic_set(&rsv->count, 1);
1155 	rsv->entry[0].base = addr;
1156 	rsv->entry[0].size = size;
1157 
1158 	spin_lock(&efi_mem_reserve_persistent_lock);
1159 	rsv->next = efi_memreserve_root->next;
1160 	efi_memreserve_root->next = __pa(rsv);
1161 	spin_unlock(&efi_mem_reserve_persistent_lock);
1162 
1163 	return efi_mem_reserve_iomem(addr, size);
1164 }
1165 
1166 static int __init efi_memreserve_root_init(void)
1167 {
1168 	if (efi_memreserve_root)
1169 		return 0;
1170 	if (efi_memreserve_map_root())
1171 		efi_memreserve_root = (void *)ULONG_MAX;
1172 	return 0;
1173 }
1174 early_initcall(efi_memreserve_root_init);
1175 
1176 #ifdef CONFIG_KEXEC
1177 static int update_efi_random_seed(struct notifier_block *nb,
1178 				  unsigned long code, void *unused)
1179 {
1180 	struct linux_efi_random_seed *seed;
1181 	u32 size = 0;
1182 
1183 	if (!kexec_in_progress)
1184 		return NOTIFY_DONE;
1185 
1186 	seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1187 	if (seed != NULL) {
1188 		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1189 		memunmap(seed);
1190 	} else {
1191 		pr_err("Could not map UEFI random seed!\n");
1192 	}
1193 	if (size > 0) {
1194 		seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1195 				MEMREMAP_WB);
1196 		if (seed != NULL) {
1197 			seed->size = size;
1198 			get_random_bytes(seed->bits, seed->size);
1199 			memunmap(seed);
1200 		} else {
1201 			pr_err("Could not map UEFI random seed!\n");
1202 		}
1203 	}
1204 	return NOTIFY_DONE;
1205 }
1206 
1207 static struct notifier_block efi_random_seed_nb = {
1208 	.notifier_call = update_efi_random_seed,
1209 };
1210 
1211 static int __init register_update_efi_random_seed(void)
1212 {
1213 	if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1214 		return 0;
1215 	return register_reboot_notifier(&efi_random_seed_nb);
1216 }
1217 late_initcall(register_update_efi_random_seed);
1218 #endif
1219